diff --git a/src/components/custom_registry_components.py b/src/components/custom_registry_components.py index 09cf259..9bd632e 100644 --- a/src/components/custom_registry_components.py +++ b/src/components/custom_registry_components.py @@ -21,3 +21,6 @@ env_prefix="[PROD]" ) """ + * WHAT: Inspect boto3.upload_file usage + * WHY: Durable storage pattern in AWS + * Migration Planning: Replace with GCS client logic inside a diff --git a/src/components/custom_training_components.py b/src/components/custom_training_components.py index 869cd9f..c008653 100644 --- a/src/components/custom_training_components.py +++ b/src/components/custom_training_components.py @@ -1,24 +1,2 @@ -""" -Custom Training Components -========================= -Custom-built components for model training. - -Author: MLOps Team -Maintained by: ML Engineering Team - -Components: -- train_model_op: Train logistic regression model from BigQuery data - -Usage: - from components.custom_training_components import train_model_op - - train_task = train_model_op( - train_data=bq_train_task.outputs["destination_table"], - feature_columns=['Age', 'BMI', 'PlasmaGlucose'], # <-- Passed as parameter - target_column='Diabetic', - reg_rate=0.05, - project_id=project_id, - bq_location=region, - env_prefix="[DEV]" - ) -""" + #* WHAT: Identify boto3/joblib/ds_utils imports + #* WHY: These libraries enable artifact persistence in S3 diff --git a/src/components/model_input_ingest.py b/src/components/model_input_ingest.py new file mode 100644 index 0000000..e9f2df4 --- /dev/null +++ b/src/components/model_input_ingest.py @@ -0,0 +1,39 @@ + #* WHAT: Inspect select_sql_from_dict or pd.read_sql usage + #* WHY: Redshift or S3 → DataFrame conversion + #* Migration Planning: Equivalent logic would move into + # prebuilt_bigquery_components.py using BigQuery query components. + + +# Imports + +# Component Decorators +# Consider bigquery_query_job_op ((Google Managed Prebuilt Component that does not require the @component decorator)) +@component( + base_image="python:", + packages_to_install=["placeholder for packages"] +) +def _read_from_redshift(sql_client, sql: str, params: dict = None, chunksize: Optional[int] = None) -> pd.DataFrame: + """ + Exploration helper to read data from Redshift using available SQL access object. + + WHERE: _read_from_redshift ((Place holder for reading from BigQuery)) + WHAT: example patterns using sql_client.select_sql_from_dict or pandas.read_sql + WHY: Redshift is columnar and can be expensive to pull; record trade-offs and auth considerations + """ + try: + if hasattr(sql_client, "select_sql_from_dict"): + q = {"sql": sql, "params": params or {}} + df = sql_client.select_sql_from_dict(q) + else: + df = pd.read_sql(sql, sql_client.conn, params=params) + except Exception: + LOG.exception("Redshift read failed; returning empty DataFrame for lab fallback") + df = pd.DataFrame() + return df + +# Intermediate solution. Considering Prebuilt or Custom components + #* WHERE: stage_table_to_s3() in ingest_model.py + #* WHAT: Inspect UNLOAD vs client-side upload patterns + #* WHY: Efficiency vs cost trade-offs in Redshift + #* Migration Planning: Replace with BigQuery export jobs inside + # prebuilt_bigquery_components.py or custom_data_quality_components.py. diff --git "a/workbench/BQV2-MachineLearningOperationsPlaybookAdoptionWorkshop\342\200\223DataServicesIntegration.ipynb" "b/workbench/BQV2-MachineLearningOperationsPlaybookAdoptionWorkshop\342\200\223DataServicesIntegration.ipynb" new file mode 100644 index 0000000..2984186 --- /dev/null +++ "b/workbench/BQV2-MachineLearningOperationsPlaybookAdoptionWorkshop\342\200\223DataServicesIntegration.ipynb" @@ -0,0 +1,747 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "copyright" + }, + "outputs": [], + "source": [ + "# Copyright 2025 by Sysco\n", + "#\n", + "# Licensed under the Apache License, Version 2.0 (the \"License\");\n", + "# you may not use this file except in compliance with the License.\n", + "# You may obtain a copy of the License at\n", + "#\n", + "# https://www.apache.org/licenses/LICENSE-2.0\n", + "#\n", + "# Unless required by applicable law or agreed to in writing, software\n", + "# distributed under the License is distributed on an \"AS IS\" BASIS,\n", + "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n", + "# See the License for the specific language governing permissions and\n", + "# limitations under the License." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "lab-6-4-intro" + }, + "source": [ + "# Lab 6.4: Cloud Storage Integration with Vertex AI Workflows that use BigQuery\n", + "\n", + "In this section, we will establish the necessary infrastructure for our ML operations. This includes:\n", + "1. Installing the Vertex AI and BigQuery SDKs.\n", + "2. Setting up Authentication and Project definitions.\n", + "3. Creating a Cloud Storage bucket for artifact management (Model exports, staging).\n", + "4. Configuring Service Accounts for pipeline execution." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "install_mlops" + }, + "source": [ + "### Install Vertex AI SDK for Python and other required packages" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "install_mlops" + }, + "outputs": [], + "source": [ + "# Install the packages\n", + "! pip3 install --upgrade --quiet pyarrow \\\n", + " google-cloud-aiplatform \\\n", + " google-cloud-bigquery \\\n", + " google-cloud-bigquery-storage \\\n", + " db-dtypes" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "befa6ca14bc0" + }, + "source": [ + "### Authenticate your notebook\n", + "Authenticate your environment" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": { + "id": "7de6ef0fac42" + }, + "outputs": [], + "source": [ + "import sys\n", + "\n", + "if \"google.colab\" in sys.modules:\n", + "\n", + " from google.colab import auth\n", + "\n", + " auth.authenticate_user()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "yfEglUHQk9S3" + }, + "source": [ + "### Set Google Cloud project information\n", + "Learn more about [setting up a project and a development environment.](https://cloud.google.com/vertex-ai/docs/start/cloud-environment)" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": { + "id": "region" + }, + "outputs": [], + "source": [ + "PROJECT_ID = \"mfav2-374520\" # @param {type:\"string\"}\n", + "LOCATION = \"us-east1\" # @param {type:\"string\"}" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "bucket:mbsdk" + }, + "source": [ + "### Create a Cloud Storage bucket\n", + "\n", + "**Lab 6.4 Key Task:** Create a storage bucket to store intermediate artifacts such as datasets and exported BQML models." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "bucket" + }, + "outputs": [], + "source": [ + "BUCKET_URI = f\"gs://churn-TODO_replacewithyourname-{PROJECT_ID}-unique\" # @param {type:\"string\"}" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "create_bucket" + }, + "source": [ + "**Your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": { + "id": "create_bucket" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Creating gs://churn-user19-mfav2-374520-unique/...\n" + ] + } + ], + "source": [ + "! gsutil mb -l {LOCATION} {BUCKET_URI}" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "set_service_account" + }, + "source": [ + "#### Service Account\n", + "\n", + "You use a service account to create Vertex AI Pipeline jobs. If you don't want to use your project's Compute Engine service account, set `SERVICE_ACCOUNT` to another service account ID." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": { + "id": "set_service_account" + }, + "outputs": [], + "source": [ + "SERVICE_ACCOUNT = \"vertex-pipeline-executor@mfav2-374520.iam.gserviceaccount.com\" # @param {type:\"string\"}" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": { + "id": "autoset_service_account" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Service Account: vertex-pipeline-executor@mfav2-374520.iam.gserviceaccount.com\n" + ] + } + ], + "source": [ + "import sys\n", + "\n", + "IS_COLAB = \"google.colab\" in sys.modules\n", + "\n", + "if (\n", + " SERVICE_ACCOUNT == \"\"\n", + " or SERVICE_ACCOUNT is None\n", + " or SERVICE_ACCOUNT == \"[your-service-account]\"\n", + "):\n", + " # Get your service account from gcloud\n", + " if not IS_COLAB:\n", + " shell_output = !gcloud auth list 2>/dev/null\n", + " SERVICE_ACCOUNT = shell_output[2].replace(\"*\", \"\").strip()\n", + "\n", + " if IS_COLAB:\n", + " shell_output = ! gcloud projects describe $PROJECT_ID\n", + " project_number = shell_output[-1].split(\":\")[1].strip().replace(\"'\", \"\")\n", + " SERVICE_ACCOUNT = f\"{project_number}-compute@developer.gserviceaccount.com\"\n", + "\n", + "# Always print, regardless of condition\n", + "print(\"Service Account:\", SERVICE_ACCOUNT)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "setup_vars" + }, + "source": [ + "### Import libraries and define constants" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": { + "id": "import_aip:mbsdk" + }, + "outputs": [], + "source": [ + "import google.cloud.aiplatform as aiplatform\n", + "from google.cloud import bigquery" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "init_aip:mbsdk" + }, + "source": [ + "### Initialize Vertex AI and BigQuery SDKs for Python\n", + "\n", + "Initialize the Vertex AI SDK for Python and BigQuery SDK with your project and the created bucket." + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": { + "id": "init_aip:mbsdk" + }, + "outputs": [], + "source": [ + "aiplatform.init(project=PROJECT_ID, staging_bucket=BUCKET_URI)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "init_bq" + }, + "source": [ + "Create the BigQuery client." + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": { + "id": "init_bq" + }, + "outputs": [], + "source": [ + "bqclient = bigquery.Client(project=PROJECT_ID)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "accelerators:prediction,mbsdk" + }, + "source": [ + "### Set hardware accelerators\n", + "\n", + "You can set hardware accelerators for prediction.\n", + "\n", + "Set the variable `DEPLOY_GPU/DEPLOY_NGPU` to use a container image supporting a GPU and the number of GPUs allocated to the virtual machine (VM) instance. For example, to use a GPU container image with 4 Nvidia Telsa T4 GPUs allocated to each VM, you would specify:\n", + "\n", + " (aiplatform.AcceleratorType.NVIDIA_TESLA_T4, 4)\n", + "\n", + "Otherwise specify `(None, None)` to use a container image to run on a CPU.\n", + "\n", + "Learn more [about hardware accelerator support for your region.](https://cloud.google.com/vertex-ai/docs/general/locations#accelerators)" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": { + "id": "accelerators:prediction,mbsdk" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Deployment accelerator: None None\n" + ] + } + ], + "source": [ + "import os\n", + "from google.cloud import aiplatform\n", + "\n", + "if os.getenv(\"IS_TESTING_DEPLOY_GPU\"):\n", + " # If env var is set, use that GPU count\n", + " DEPLOY_GPU, DEPLOY_NGPU = (\n", + " aiplatform.gapic.AcceleratorType.NVIDIA_TESLA_T4,\n", + " int(os.getenv(\"IS_TESTING_DEPLOY_GPU\")),\n", + " )\n", + "else:\n", + " # Default to CPU-only\n", + " DEPLOY_GPU, DEPLOY_NGPU = (None, None)\n", + "\n", + "print(\"Deployment accelerator:\", DEPLOY_GPU, DEPLOY_NGPU)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "container:prediction" + }, + "source": [ + "### Set prebuilt containers\n", + "\n", + "Set the prebuilt Docker container image for prediction.\n", + "\n", + "- Set the variable `TF` to the TensorFlow version of the container image. The following list shows some of the prebuilt images available:\n", + "\n", + "\n", + "For the latest list, see [prebuilt containers for prediction](https://cloud.google.com/vertex-ai/docs/predictions/pre-built-containers)." + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": { + "id": "container:prediction" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Deployment: us-docker.pkg.dev/vertex-ai/prediction/tf2-cpu.2-5:latest None None\n" + ] + } + ], + "source": [ + "import os\n", + "from google.cloud import aiplatform\n", + "\n", + "# Example: set TF version from env var or default\n", + "if os.getenv(\"IS_TESTING_TF\"):\n", + " TF = os.getenv(\"IS_TESTING_TF\")\n", + "else:\n", + " TF = \"2.5\".replace(\".\", \"-\")\n", + "\n", + "# Configure accelerator (None, None for CPU-only)\n", + "DEPLOY_GPU, DEPLOY_NGPU = (None, None)\n", + "\n", + "# Build container version string\n", + "if TF[0] == \"2\":\n", + " if DEPLOY_GPU is not None:\n", + " DEPLOY_VERSION = \"tf2-gpu.{}\".format(TF)\n", + " else:\n", + " DEPLOY_VERSION = \"tf2-cpu.{}\".format(TF)\n", + "else:\n", + " if DEPLOY_GPU is not None:\n", + " DEPLOY_VERSION = \"tf-gpu.{}\".format(TF)\n", + " else:\n", + " DEPLOY_VERSION = \"tf-cpu.{}\".format(TF)\n", + "\n", + "# Construct full container URI\n", + "DEPLOY_IMAGE = \"{}-docker.pkg.dev/vertex-ai/prediction/{}:latest\".format(\n", + " LOCATION.split(\"-\")[0], DEPLOY_VERSION\n", + ")\n", + "\n", + "print(\"Deployment:\", DEPLOY_IMAGE, DEPLOY_GPU, DEPLOY_NGPU)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "machine:prediction" + }, + "source": [ + "### Set machine type\n", + "\n", + "Next, set the machine type to use for prediction.\n", + "\n", + "- Set the variable `DEPLOY_COMPUTE` to configure the compute resources for the VM which is used for prediction.\n", + " - `machine type`\n", + " - `n1-standard`: 3.75GB of memory per vCPU.\n", + " - `n1-highmem`: 6.5GB of memory per vCPU\n", + " - `n1-highcpu`: 0.9 GB of memory per vCPU\n", + " - `vCPUs`: number of \\[2, 4, 8, 16, 32, 64, 96 \\]\n", + "\n", + "*Note: You may also use n2 and e2 machine types for training and deployment, but they don't support GPUs*" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": { + "id": "machine:prediction" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Deploy machine type: n1-standard-2\n" + ] + } + ], + "source": [ + "import os\n", + "\n", + "# Pick machine type from env var or default to n1-standard\n", + "if os.getenv(\"IS_TESTING_DEPLOY_MACHINE\"):\n", + " MACHINE_TYPE = os.getenv(\"IS_TESTING_DEPLOY_MACHINE\")\n", + "else:\n", + " MACHINE_TYPE = \"n1-standard\"\n", + "\n", + "# Set vCPUs explicitly to 2\n", + "VCPU = \"2\"\n", + "\n", + "# Build full machine type string\n", + "DEPLOY_COMPUTE = MACHINE_TYPE + \"-\" + VCPU\n", + "\n", + "print(\"Deploy machine type:\", DEPLOY_COMPUTE)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "bqml_intro" + }, + "source": [ + "# Lab 6.5: BigQuery ML and Vertex AI Integration Patterns\n", + "\n", + "In this section, we will utilize **BigQuery ML (BQML)** to train models using standard SQL syntax. We will also demonstrate how to integrate these BQML models with Vertex AI for Model Registry and Online Prediction.\n", + "\n", + "## BigQuery ML introduction\n", + "\n", + "BigQuery ML (BQML) provides the capability to train ML tabular models, such as classification and regression in BigQuery using SQL syntax.\n", + "\n", + "Learn more about [BigQuery ML documentation](https://cloud.google.com/bigquery-ml/docs)." + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": { + "id": "import_file:penguins,bq,lcn" + }, + "outputs": [], + "source": [ + "IMPORT_FILE = \"bq://bigquery-public-data.ml_datasets.penguins\"\n", + "BQ_TABLE = \"bigquery-public-data.ml_datasets.penguins\"" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "bqml_create_dataset" + }, + "source": [ + "### Create BQ dataset resource\n", + "\n", + "First, you create an empty dataset resource in your project." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "bqml_create_dataset" + }, + "outputs": [], + "source": [ + "BQ_DATASET_NAME = \"penguins_todo_replacewithyourname\" # @param {type:\"string\"}\n", + "DATASET_QUERY = f\"\"\"CREATE SCHEMA {BQ_DATASET_NAME}\n", + "\"\"\"\n", + "\n", + "job = bqclient.query(DATASET_QUERY)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "6c390ee7c11a" + }, + "source": [ + "### Training and registering the model (Advanced Lab 6.5 & 6.6)\n", + "\n", + "Next, you train the model and automatically register the model to the Vertex AI Model Registry, by adding the following parameters as options:\n", + "\n", + "- `model_registry`: Set to `vertex_ai` to indicate automatic registration to Vertex AI Model Registry.\n", + "- `vertex_ai_model_id`: The human readable display name for the registered model.\n", + "- `vertex_ai_model_version_aliases`: Alternate name for the model." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "57db464f4c42" + }, + "outputs": [], + "source": [ + "MODEL_NAME = \"penguins_todo_replacewithyourname\" # @param {type:\"string\"}\n", + "MODEL_QUERY = f\"\"\"\n", + "CREATE OR REPLACE MODEL `{BQ_DATASET_NAME}.{MODEL_NAME}`\n", + "OPTIONS(\n", + " model_type='DNN_CLASSIFIER',\n", + " labels = ['species'],\n", + " model_registry=\"vertex_ai\",\n", + " vertex_ai_model_id=\"bqml_model_todo_replacewithyourname\",\n", + " vertex_ai_model_version_aliases=[\"1\"]\n", + " )\n", + "AS\n", + "SELECT *\n", + "FROM `{BQ_TABLE}`\n", + "\"\"\"\n", + "\n", + "job = bqclient.query(MODEL_QUERY)\n", + "print(job.errors, job.state)\n", + "\n", + "while job.running():\n", + " from time import sleep\n", + "\n", + " sleep(30)\n", + " print(\"Running ...\")\n", + "print(job.errors, job.state)\n", + "\n", + "try:\n", + " tblname = job.ddl_target_table\n", + " tblname = \"{}.{}\".format(tblname.dataset_id, tblname.table_id)\n", + " print(\"{} created in {}\".format(tblname, job.ended - job.started))\n", + "except Exception as e:\n", + " print(e)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "5b4970272040" + }, + "source": [ + "### Lab 6.6: Google Cloud Data and Analytics Architecture for ML utilizing the Vertex AI Model Registry\n", + "\n", + "Finally, you can use the Vertex AI model `list()` method with a filter query to find the automatically registered model." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "76c22674ba99" + }, + "outputs": [], + "source": [ + "from google.cloud import aiplatform\n", + "\n", + "# Initialize in the correct region\n", + "aiplatform.init(project=PROJECT_ID, location=\"us-central1\")\n", + "\n", + "# List all models to confirm names\n", + "models = aiplatform.Model.list()\n", + "for m in models:\n", + " print(\"Display name:\", m.display_name, \"| Resource name:\", m.resource_name)\n", + "\n", + "# Filter by the actual display name you saw above\n", + "filtered_models = aiplatform.Model.list(filter='display_name=\"bqml_model_todo_replacewithyourname\"')\n", + "\n", + "if filtered_models:\n", + " model = filtered_models[0]\n", + " print(\"\\n=== Filtered model details ===\")\n", + " print(model.gca_resource)\n", + "else:\n", + " print(\"\\nNo models found with that display name in us-central1\")\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "48e6ef5d5ffa" + }, + "outputs": [], + "source": [ + "models = aiplatform.Model.list()\n", + "for model in models:\n", + " if model.gca_resource.display_name.startswith(\"bqml\"):\n", + " print(model.gca_resource.display_name)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "EVAL_QUERY = f\"\"\"\n", + "SELECT *\n", + "FROM\n", + " ML.EVALUATE(MODEL {BQ_DATASET_NAME}.{MODEL_NAME})\n", + "ORDER BY roc_auc desc\n", + "LIMIT 1\"\"\"\n", + "\n", + "try:\n", + " job = bqclient.query(EVAL_QUERY)\n", + " results = job.result().to_dataframe()\n", + " print(results)\n", + "except Exception as e:\n", + " print(e)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ef61354b1a5f" + }, + "source": [ + "### Delete the BigQuery ML model\n", + "\n", + "Next, delete the BigQuery ML instance of the model." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "f6004d1ce59d" + }, + "outputs": [], + "source": [ + "MODEL_QUERY = f\"\"\"\n", + "DROP MODEL `{BQ_DATASET_NAME}.{MODEL_NAME}`\n", + "\"\"\"\n", + "\n", + "job = bqclient.query(MODEL_QUERY)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "cleanup:mbsdk" + }, + "source": [ + "# Cleaning up\n", + "\n", + "To clean up all Google Cloud resources used in this project, you can [delete the Google Cloud\n", + "project](https://cloud.google.com/resource-manager/docs/creating-managing-projects#shutting_down_projects) you used for the tutorial.\n", + "\n", + "Otherwise, you can delete the individual resources you created in this tutorial.\n", + "\n", + "Set `delete_storage` to `True` to delete the Cloud Storage bucket used in this notebook." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "cleanup:mbsdk" + }, + "outputs": [], + "source": [ + "# Delete the endpoint using the Vertex endpoint object\n", + "try:\n", + " endpoint.undeploy_all()\n", + " endpoint.delete()\n", + "except Exception as e:\n", + " print(e)\n", + "\n", + "# Delete the model using the Vertex model object\n", + "try:\n", + " model.delete()\n", + "except Exception as e:\n", + " print(e)\n", + "\n", + "# Delete the created BigQuery dataset\n", + "! bq rm -r -f $PROJECT_ID:$BQ_DATASET_NAME\n", + "\n", + "delete_storage = False\n", + "if delete_storage:\n", + " # Delete the created GCS bucket\n", + " ! gsutil rm -r $BUCKET_URI" + ] + } + ], + "metadata": { + "colab": { + "name": "get_started_with_bqml_training.ipynb", + "toc_visible": true + }, + "environment": { + "kernel": "conda-base-py", + "name": "workbench-notebooks.m137", + "type": "gcloud", + "uri": "us-docker.pkg.dev/deeplearning-platform-release/gcr.io/workbench-notebooks:m137" + }, + "kernelspec": { + "display_name": "Python 3 (ipykernel) (Local)", + "language": "python", + "name": "conda-base-py" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.19" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +}