From 0edac7e22984121969b4766448b6703d09bf9566 Mon Sep 17 00:00:00 2001 From: Sharon Yu Date: Tue, 9 Dec 2025 19:48:43 +0000 Subject: [PATCH] add ml-diagnostic guide --- docs/guides/monitoring_and_debugging.md | 1 + .../ml_workload_diagnostics.md | 44 +++++++++++++++++++ docs/run_maxtext/run_maxtext_via_xpk.md | 2 +- 3 files changed, 46 insertions(+), 1 deletion(-) create mode 100644 docs/guides/monitoring_and_debugging/ml_workload_diagnostics.md diff --git a/docs/guides/monitoring_and_debugging.md b/docs/guides/monitoring_and_debugging.md index f7c32d19d..40892b9a6 100644 --- a/docs/guides/monitoring_and_debugging.md +++ b/docs/guides/monitoring_and_debugging.md @@ -26,4 +26,5 @@ monitoring_and_debugging/monitor_goodput.md monitoring_and_debugging/understand_logs_and_metrics.md monitoring_and_debugging/use_vertex_ai_tensorboard.md monitoring_and_debugging/xprof_user_guide.md +monitoring_and_debugging/ml_workload_diagnostics.md ``` diff --git a/docs/guides/monitoring_and_debugging/ml_workload_diagnostics.md b/docs/guides/monitoring_and_debugging/ml_workload_diagnostics.md new file mode 100644 index 000000000..4254c5c23 --- /dev/null +++ b/docs/guides/monitoring_and_debugging/ml_workload_diagnostics.md @@ -0,0 +1,44 @@ + + +# Running a workload with Google Cloud ML Diagnostics Enabled +This guide provides an overview on how to enable ML Diagnostics for your MaxText workload. + +## Overview +Google Cloud ML Diagnostics is an end-to-end managed platform for ML Engineers to optimize and diagnose their AI/ML workloads on Google Cloud. The product allows ML Engineers to collect and visualize all their workload metrics, configs and profiles with one single platform, all within the same UI. The current product offering focuses on workloads running on XLA-based frameworks (JAX, Pytorch XLA, Tensorflow/Keras) on Google Cloud TPUs and GPUs. Current support is for JAX on Google Cloud TPUs only. + +## Enabling ML Diagnostics on Maxtext Workload +MaxText has integrated the ML Diagnostics SDK in its code. You can enable ML Diagnostics with the **managed-mldiagnostics** flag. If this is enabled, it will + +- create a managed MachineLearning run with all the MaxText configs +- upload profiling traces, if the profiling is enabled by profiler="xplane". +- upload training metrics, at the defined log_period interval. + +### Examples + +1. Enable ML Diagnostics to just capture Maxtext metrics and configs + + python3 -m MaxText.train src/MaxText/configs/base.yml run_name=${USER}-tpu-job base_output_directory=$"gs://your-output-bucket/" dataset_path="gs://your-dataset-bucket/" steps=100 log_period=10 managed_mldiagnostics=True + +2. Enable ML Diagnostics to capture Maxtext metrics, configs and singlehost profiles (on the first TPU device) + + python3 -m MaxText.train src/MaxText/configs/base.yml run_name=${USER}-tpu-job base_output_directory=$"gs://your-output-bucket/" dataset_path="gs://your-dataset-bucket/" steps=100 log_period=10 profiler=xplane managed_mldiagnostics=True + +3. Enable ML Diagnostics to capture Maxtext metrics, configs and multihost profiles (on all TPU devices) + + python3 -m MaxText.train src/MaxText/configs/base.yml run_name=${USER}-tpu-job base_output_directory=$"gs://your-output-bucket/" dataset_path="gs://your-dataset-bucket/" steps=100 log_period=10 profiler=xplane upload_all_profiler_results=True managed_mldiagnostics=True + +Users can deploy the workload across all supported environments, including the standard XPK workload types (**xpk workload create** or **xpk workload create-pathways**) or by running the workload directly on a standalone TPU VM. \ No newline at end of file diff --git a/docs/run_maxtext/run_maxtext_via_xpk.md b/docs/run_maxtext/run_maxtext_via_xpk.md index f5d168fc6..543f5595e 100644 --- a/docs/run_maxtext/run_maxtext_via_xpk.md +++ b/docs/run_maxtext/run_maxtext_via_xpk.md @@ -225,4 +225,4 @@ For instance, to run a job across **four TPU slices**, you would change `--num-s ``` xpk workload delete --cluster ${CLUSTER_NAME} --workload - ``` + ``` \ No newline at end of file