diff --git a/acceptance/bundle/resources/model_serving_endpoints/running-endpoint/output.txt b/acceptance/bundle/resources/model_serving_endpoints/running-endpoint/output.txt index e52b751632..470af7f02d 100644 --- a/acceptance/bundle/resources/model_serving_endpoints/running-endpoint/output.txt +++ b/acceptance/bundle/resources/model_serving_endpoints/running-endpoint/output.txt @@ -33,6 +33,10 @@ Deployment complete! "creator": "[USERNAME]" } +=== verify there's no persistent drift +>>> [CLI] bundle plan +Plan: 0 to add, 0 to change, 0 to delete, 1 unchanged + >>> [CLI] bundle destroy --auto-approve The following resources will be deleted: delete resources.model_serving_endpoints.my_endpoint diff --git a/acceptance/bundle/resources/model_serving_endpoints/running-endpoint/script b/acceptance/bundle/resources/model_serving_endpoints/running-endpoint/script index 32742e9675..8b420559c3 100644 --- a/acceptance/bundle/resources/model_serving_endpoints/running-endpoint/script +++ b/acceptance/bundle/resources/model_serving_endpoints/running-endpoint/script @@ -14,4 +14,8 @@ trace $CLI bundle deploy trace print_requests.py //serving-endpoints endpoint_name=$($CLI bundle summary -o json | jq -r '.resources.model_serving_endpoints.my_endpoint.name') +endpoint_id=$($CLI bundle summary -o json | jq -r '.resources.model_serving_endpoints.my_endpoint.id') trace $CLI serving-endpoints get $endpoint_name | jq '{name, creator}' + +title "verify there's no persistent drift" +trace $CLI bundle plan diff --git a/acceptance/bundle/resources/model_serving_endpoints/update/ai-gateway/databricks.yml.tmpl b/acceptance/bundle/resources/model_serving_endpoints/update/ai-gateway/databricks.yml.tmpl new file mode 100644 index 0000000000..69626482a5 --- /dev/null +++ b/acceptance/bundle/resources/model_serving_endpoints/update/ai-gateway/databricks.yml.tmpl @@ -0,0 +1,22 @@ +bundle: + name: test-mse-update-ai-gateway-$UNIQUE_NAME + +workspace: + root_path: ~/.bundle/$UNIQUE_NAME + +resources: + model_serving_endpoints: + test_endpoint: + name: test-endpoint-$UNIQUE_NAME + config: + served_entities: + - name: prod + external_model: + name: gpt-4o-mini + provider: openai + task: llm/v1/chat + openai_config: + openai_api_key: "{{secrets/test-scope/openai-key}}" + ai_gateway: + inference_table_config: + catalog_name: "first-inference-catalog" diff --git a/acceptance/bundle/resources/model_serving_endpoints/update/ai-gateway/out.plan.direct.json b/acceptance/bundle/resources/model_serving_endpoints/update/ai-gateway/out.plan.direct.json new file mode 100644 index 0000000000..82b0c5e8ab --- /dev/null +++ b/acceptance/bundle/resources/model_serving_endpoints/update/ai-gateway/out.plan.direct.json @@ -0,0 +1,70 @@ +{ + "plan": { + "resources.model_serving_endpoints.test_endpoint": { + "action": "update", + "new_state": { + "value": { + "ai_gateway": { + "inference_table_config": { + "catalog_name": "second-inference-catalog" + } + }, + "config": { + "served_entities": [ + { + "external_model": { + "name": "gpt-4o-mini", + "openai_config": { + "openai_api_key": "{{secrets/test-scope/openai-key}}" + }, + "provider": "openai", + "task": "llm/v1/chat" + }, + "name": "prod" + } + ] + }, + "name": "[ENDPOINT_ID]" + } + }, + "remote_state": { + "endpoint_details": { + "ai_gateway": { + "inference_table_config": { + "catalog_name": "first-inference-catalog" + } + }, + "config": { + "served_entities": [ + { + "external_model": { + "name": "gpt-4o-mini", + "openai_config": { + "openai_api_key": "{{secrets/test-scope/openai-key}}" + }, + "provider": "openai", + "task": "llm/v1/chat" + }, + "name": "prod" + } + ] + }, + "creator": "[USERNAME]", + "id": "[UUID]", + "name": "[ENDPOINT_ID]", + "state": { + "config_update": "NOT_UPDATING" + } + }, + "endpoint_id": "[UUID]" + }, + "changes": { + "local": { + "ai_gateway.inference_table_config.catalog_name": { + "action": "update" + } + } + } + } + } +} diff --git a/acceptance/bundle/resources/model_serving_endpoints/update/ai-gateway/out.plan.terraform.json b/acceptance/bundle/resources/model_serving_endpoints/update/ai-gateway/out.plan.terraform.json new file mode 100644 index 0000000000..bc939a91b2 --- /dev/null +++ b/acceptance/bundle/resources/model_serving_endpoints/update/ai-gateway/out.plan.terraform.json @@ -0,0 +1,7 @@ +{ + "plan": { + "resources.model_serving_endpoints.test_endpoint": { + "action": "update" + } + } +} diff --git a/acceptance/bundle/resources/model_serving_endpoints/update/ai-gateway/out.test.toml b/acceptance/bundle/resources/model_serving_endpoints/update/ai-gateway/out.test.toml new file mode 100644 index 0000000000..d560f1de04 --- /dev/null +++ b/acceptance/bundle/resources/model_serving_endpoints/update/ai-gateway/out.test.toml @@ -0,0 +1,5 @@ +Local = true +Cloud = false + +[EnvMatrix] + DATABRICKS_BUNDLE_ENGINE = ["terraform", "direct"] diff --git a/acceptance/bundle/resources/model_serving_endpoints/update/ai-gateway/output.txt b/acceptance/bundle/resources/model_serving_endpoints/update/ai-gateway/output.txt new file mode 100644 index 0000000000..a9be82769c --- /dev/null +++ b/acceptance/bundle/resources/model_serving_endpoints/update/ai-gateway/output.txt @@ -0,0 +1,77 @@ + +>>> [CLI] bundle deploy +Uploading bundle files to /Workspace/Users/[USERNAME]/.bundle/[UNIQUE_NAME]/files... +Deploying resources... +Updating deployment state... +Deployment complete! + +>>> [CLI] serving-endpoints get [ENDPOINT_ID] +{ + "inference_table_config": { + "catalog_name": "first-inference-catalog" + } +} + +>>> update_file.py databricks.yml catalog_name: "first-inference-catalog" catalog_name: "second-inference-catalog" + +>>> [CLI] bundle plan -o json + +>>> [CLI] bundle deploy +Uploading bundle files to /Workspace/Users/[USERNAME]/.bundle/[UNIQUE_NAME]/files... +Deploying resources... +Updating deployment state... +Deployment complete! + +>>> print_requests.py //serving-endpoints +{ + "method": "POST", + "path": "/api/2.0/serving-endpoints", + "body": { + "ai_gateway": { + "inference_table_config": { + "catalog_name": "first-inference-catalog" + } + }, + "config": { + "served_entities": [ + { + "external_model": { + "name": "gpt-4o-mini", + "openai_config": { + "openai_api_key": "{{secrets/test-scope/openai-key}}" + }, + "provider": "openai", + "task": "llm/v1/chat" + }, + "name": "prod" + } + ] + }, + "name": "[ENDPOINT_ID]" + } +} +{ + "method": "PUT", + "path": "/api/2.0/serving-endpoints/[ENDPOINT_ID]/ai-gateway", + "body": { + "inference_table_config": { + "catalog_name": "second-inference-catalog" + } + } +} + +>>> [CLI] serving-endpoints get [ENDPOINT_ID] +{ + "inference_table_config": { + "catalog_name": "second-inference-catalog" + } +} + +>>> [CLI] bundle destroy --auto-approve +The following resources will be deleted: + delete resources.model_serving_endpoints.test_endpoint + +All files and directories at the following location will be deleted: /Workspace/Users/[USERNAME]/.bundle/[UNIQUE_NAME] + +Deleting files... +Destroy complete! diff --git a/acceptance/bundle/resources/model_serving_endpoints/update/ai-gateway/script b/acceptance/bundle/resources/model_serving_endpoints/update/ai-gateway/script new file mode 100755 index 0000000000..89877faef6 --- /dev/null +++ b/acceptance/bundle/resources/model_serving_endpoints/update/ai-gateway/script @@ -0,0 +1,23 @@ +#!/bin/bash +envsubst < databricks.yml.tmpl > databricks.yml + +cleanup() { + trace $CLI bundle destroy --auto-approve + rm -f out.requests.txt +} +trap cleanup EXIT + +trace $CLI bundle deploy + +ENDPOINT_ID=$($CLI bundle summary -o json | jq -r '.resources.model_serving_endpoints.test_endpoint.id') +echo "$ENDPOINT_ID:ENDPOINT_ID" >> ACC_REPLS +trace $CLI serving-endpoints get "${ENDPOINT_ID}" | jq '.ai_gateway' + +trace update_file.py databricks.yml 'catalog_name: "first-inference-catalog"' 'catalog_name: "second-inference-catalog"' + +trace $CLI bundle plan -o json > out.plan.$DATABRICKS_BUNDLE_ENGINE.json +trace $CLI bundle deploy + +trace print_requests.py //serving-endpoints + +trace $CLI serving-endpoints get "${ENDPOINT_ID}" | jq '.ai_gateway' diff --git a/acceptance/bundle/resources/model_serving_endpoints/update/both_gateway_and_tags/databricks.yml.tmpl b/acceptance/bundle/resources/model_serving_endpoints/update/both_gateway_and_tags/databricks.yml.tmpl new file mode 100644 index 0000000000..776d41401b --- /dev/null +++ b/acceptance/bundle/resources/model_serving_endpoints/update/both_gateway_and_tags/databricks.yml.tmpl @@ -0,0 +1,26 @@ +bundle: + name: test-mse-update-ai-gateway-$UNIQUE_NAME + +workspace: + root_path: ~/.bundle/$UNIQUE_NAME + +resources: + model_serving_endpoints: + test_endpoint: + name: test-endpoint-$UNIQUE_NAME + config: + served_entities: + - name: prod + external_model: + name: gpt-4o-mini + provider: openai + task: llm/v1/chat + openai_config: + openai_api_key: "{{secrets/test-scope/openai-key}}" + ai_gateway: + inference_table_config: + catalog_name: "first-inference-catalog" + + tags: + - key: team + value: my-team-one diff --git a/acceptance/bundle/resources/model_serving_endpoints/update/both_gateway_and_tags/out.plan.direct.json b/acceptance/bundle/resources/model_serving_endpoints/update/both_gateway_and_tags/out.plan.direct.json new file mode 100644 index 0000000000..2c2b7bdee0 --- /dev/null +++ b/acceptance/bundle/resources/model_serving_endpoints/update/both_gateway_and_tags/out.plan.direct.json @@ -0,0 +1,85 @@ +{ + "plan": { + "resources.model_serving_endpoints.test_endpoint": { + "action": "update", + "new_state": { + "value": { + "ai_gateway": { + "inference_table_config": { + "catalog_name": "second-inference-catalog" + } + }, + "config": { + "served_entities": [ + { + "external_model": { + "name": "gpt-4o-mini", + "openai_config": { + "openai_api_key": "{{secrets/test-scope/openai-key}}" + }, + "provider": "openai", + "task": "llm/v1/chat" + }, + "name": "prod" + } + ] + }, + "name": "[ENDPOINT_ID]", + "tags": [ + { + "key": "team", + "value": "my-team-two" + } + ] + } + }, + "remote_state": { + "endpoint_details": { + "ai_gateway": { + "inference_table_config": { + "catalog_name": "first-inference-catalog" + } + }, + "config": { + "served_entities": [ + { + "external_model": { + "name": "gpt-4o-mini", + "openai_config": { + "openai_api_key": "{{secrets/test-scope/openai-key}}" + }, + "provider": "openai", + "task": "llm/v1/chat" + }, + "name": "prod" + } + ] + }, + "creator": "[USERNAME]", + "id": "[UUID]", + "name": "[ENDPOINT_ID]", + "state": { + "config_update": "NOT_UPDATING" + }, + "tags": [ + { + "key": "team", + "value": "my-team-one" + } + ] + }, + "endpoint_id": "[UUID]" + }, + "changes": { + "local": { + "ai_gateway.inference_table_config.catalog_name": { + "action": "update" + }, + "tags[0].value": { + "action": "update" + } + } + } + } + } +} diff --git a/acceptance/bundle/resources/model_serving_endpoints/update/both_gateway_and_tags/out.plan.terraform.json b/acceptance/bundle/resources/model_serving_endpoints/update/both_gateway_and_tags/out.plan.terraform.json new file mode 100644 index 0000000000..bc939a91b2 --- /dev/null +++ b/acceptance/bundle/resources/model_serving_endpoints/update/both_gateway_and_tags/out.plan.terraform.json @@ -0,0 +1,7 @@ +{ + "plan": { + "resources.model_serving_endpoints.test_endpoint": { + "action": "update" + } + } +} diff --git a/acceptance/bundle/resources/model_serving_endpoints/update/both_gateway_and_tags/out.test.toml b/acceptance/bundle/resources/model_serving_endpoints/update/both_gateway_and_tags/out.test.toml new file mode 100644 index 0000000000..d560f1de04 --- /dev/null +++ b/acceptance/bundle/resources/model_serving_endpoints/update/both_gateway_and_tags/out.test.toml @@ -0,0 +1,5 @@ +Local = true +Cloud = false + +[EnvMatrix] + DATABRICKS_BUNDLE_ENGINE = ["terraform", "direct"] diff --git a/acceptance/bundle/resources/model_serving_endpoints/update/both_gateway_and_tags/output.txt b/acceptance/bundle/resources/model_serving_endpoints/update/both_gateway_and_tags/output.txt new file mode 100644 index 0000000000..c4a7725fe9 --- /dev/null +++ b/acceptance/bundle/resources/model_serving_endpoints/update/both_gateway_and_tags/output.txt @@ -0,0 +1,117 @@ + +>>> [CLI] bundle deploy +Uploading bundle files to /Workspace/Users/[USERNAME]/.bundle/[UNIQUE_NAME]/files... +Deploying resources... +Updating deployment state... +Deployment complete! + +>>> [CLI] serving-endpoints get [ENDPOINT_ID] +{ + "served_entities": [ + { + "external_model": { + "name": "gpt-4o-mini", + "openai_config": { + "openai_api_key": "{{secrets/test-scope/openai-key}}" + }, + "provider": "openai", + "task": "llm/v1/chat" + }, + "name": "prod" + } + ] +} + +>>> update_file.py databricks.yml value: my-team-one value: my-team-two + +>>> update_file.py databricks.yml catalog_name: "first-inference-catalog" catalog_name: "second-inference-catalog" + +>>> [CLI] bundle plan -o json + +>>> [CLI] bundle deploy +Uploading bundle files to /Workspace/Users/[USERNAME]/.bundle/[UNIQUE_NAME]/files... +Deploying resources... +Updating deployment state... +Deployment complete! + +>>> print_requests.py //serving-endpoints +{ + "method": "POST", + "path": "/api/2.0/serving-endpoints", + "body": { + "ai_gateway": { + "inference_table_config": { + "catalog_name": "first-inference-catalog" + } + }, + "config": { + "served_entities": [ + { + "external_model": { + "name": "gpt-4o-mini", + "openai_config": { + "openai_api_key": "{{secrets/test-scope/openai-key}}" + }, + "provider": "openai", + "task": "llm/v1/chat" + }, + "name": "prod" + } + ] + }, + "name": "[ENDPOINT_ID]", + "tags": [ + { + "key": "team", + "value": "my-team-one" + } + ] + } +} +{ + "method": "PATCH", + "path": "/api/2.0/serving-endpoints/[ENDPOINT_ID]/tags", + "body": { + "add_tags": [ + { + "key": "team", + "value": "my-team-two" + } + ] + } +} +{ + "method": "PUT", + "path": "/api/2.0/serving-endpoints/[ENDPOINT_ID]/ai-gateway", + "body": { + "inference_table_config": { + "catalog_name": "second-inference-catalog" + } + } +} + +>>> [CLI] serving-endpoints get [ENDPOINT_ID] +{ + "served_entities": [ + { + "external_model": { + "name": "gpt-4o-mini", + "openai_config": { + "openai_api_key": "{{secrets/test-scope/openai-key}}" + }, + "provider": "openai", + "task": "llm/v1/chat" + }, + "name": "prod" + } + ] +} + +>>> [CLI] bundle destroy --auto-approve +The following resources will be deleted: + delete resources.model_serving_endpoints.test_endpoint + +All files and directories at the following location will be deleted: /Workspace/Users/[USERNAME]/.bundle/[UNIQUE_NAME] + +Deleting files... +Destroy complete! diff --git a/acceptance/bundle/resources/model_serving_endpoints/update/both_gateway_and_tags/script b/acceptance/bundle/resources/model_serving_endpoints/update/both_gateway_and_tags/script new file mode 100755 index 0000000000..d0743fbfb7 --- /dev/null +++ b/acceptance/bundle/resources/model_serving_endpoints/update/both_gateway_and_tags/script @@ -0,0 +1,25 @@ +#!/bin/bash +envsubst < databricks.yml.tmpl > databricks.yml + +cleanup() { + trace $CLI bundle destroy --auto-approve + rm -f out.requests.txt +} +trap cleanup EXIT + +trace $CLI bundle deploy + +ENDPOINT_ID=$($CLI bundle summary -o json | jq -r '.resources.model_serving_endpoints.test_endpoint.id') +echo "$ENDPOINT_ID:ENDPOINT_ID" >> ACC_REPLS +trace $CLI serving-endpoints get "${ENDPOINT_ID}" | jq '.config' + +# Update two fields. The patch request should be called for both. +trace update_file.py databricks.yml "value: my-team-one" "value: my-team-two" +trace update_file.py databricks.yml 'catalog_name: "first-inference-catalog"' 'catalog_name: "second-inference-catalog"' + +trace $CLI bundle plan -o json > out.plan.$DATABRICKS_BUNDLE_ENGINE.json +trace $CLI bundle deploy + +trace print_requests.py //serving-endpoints + +trace $CLI serving-endpoints get "${ENDPOINT_ID}" | jq '.config' diff --git a/acceptance/bundle/resources/model_serving_endpoints/update/config/databricks.yml.tmpl b/acceptance/bundle/resources/model_serving_endpoints/update/config/databricks.yml.tmpl new file mode 100644 index 0000000000..726f5dce1f --- /dev/null +++ b/acceptance/bundle/resources/model_serving_endpoints/update/config/databricks.yml.tmpl @@ -0,0 +1,19 @@ +bundle: + name: test-mse-update-config-$UNIQUE_NAME + +workspace: + root_path: ~/.bundle/$UNIQUE_NAME + +resources: + model_serving_endpoints: + test_endpoint: + name: test-endpoint-$UNIQUE_NAME + config: + served_entities: + - name: prod + external_model: + name: gpt-4o-mini + provider: openai + task: llm/v1/chat + openai_config: + openai_api_key: "{{secrets/test-scope/openai-key}}" diff --git a/acceptance/bundle/resources/model_serving_endpoints/update/config/out.plan.direct.json b/acceptance/bundle/resources/model_serving_endpoints/update/config/out.plan.direct.json new file mode 100644 index 0000000000..d824bcf469 --- /dev/null +++ b/acceptance/bundle/resources/model_serving_endpoints/update/config/out.plan.direct.json @@ -0,0 +1,60 @@ +{ + "plan": { + "resources.model_serving_endpoints.test_endpoint": { + "action": "update", + "new_state": { + "value": { + "config": { + "served_entities": [ + { + "external_model": { + "name": "gpt-5o-mini", + "openai_config": { + "openai_api_key": "{{secrets/test-scope/openai-key}}" + }, + "provider": "openai", + "task": "llm/v1/chat" + }, + "name": "prod" + } + ] + }, + "name": "[ENDPOINT_ID]" + } + }, + "remote_state": { + "endpoint_details": { + "config": { + "served_entities": [ + { + "external_model": { + "name": "gpt-4o-mini", + "openai_config": { + "openai_api_key": "{{secrets/test-scope/openai-key}}" + }, + "provider": "openai", + "task": "llm/v1/chat" + }, + "name": "prod" + } + ] + }, + "creator": "[USERNAME]", + "id": "[UUID]", + "name": "[ENDPOINT_ID]", + "state": { + "config_update": "NOT_UPDATING" + } + }, + "endpoint_id": "[UUID]" + }, + "changes": { + "local": { + "config.served_entities[0].external_model.name": { + "action": "update" + } + } + } + } + } +} diff --git a/acceptance/bundle/resources/model_serving_endpoints/update/config/out.plan.terraform.json b/acceptance/bundle/resources/model_serving_endpoints/update/config/out.plan.terraform.json new file mode 100644 index 0000000000..bc939a91b2 --- /dev/null +++ b/acceptance/bundle/resources/model_serving_endpoints/update/config/out.plan.terraform.json @@ -0,0 +1,7 @@ +{ + "plan": { + "resources.model_serving_endpoints.test_endpoint": { + "action": "update" + } + } +} diff --git a/acceptance/bundle/resources/model_serving_endpoints/update/config/out.test.toml b/acceptance/bundle/resources/model_serving_endpoints/update/config/out.test.toml new file mode 100644 index 0000000000..d560f1de04 --- /dev/null +++ b/acceptance/bundle/resources/model_serving_endpoints/update/config/out.test.toml @@ -0,0 +1,5 @@ +Local = true +Cloud = false + +[EnvMatrix] + DATABRICKS_BUNDLE_ENGINE = ["terraform", "direct"] diff --git a/acceptance/bundle/resources/model_serving_endpoints/update/config/output.txt b/acceptance/bundle/resources/model_serving_endpoints/update/config/output.txt new file mode 100644 index 0000000000..156936aa5f --- /dev/null +++ b/acceptance/bundle/resources/model_serving_endpoints/update/config/output.txt @@ -0,0 +1,102 @@ + +>>> [CLI] bundle deploy +Uploading bundle files to /Workspace/Users/[USERNAME]/.bundle/[UNIQUE_NAME]/files... +Deploying resources... +Updating deployment state... +Deployment complete! + +>>> [CLI] serving-endpoints get [ENDPOINT_ID] +{ + "served_entities": [ + { + "external_model": { + "name": "gpt-4o-mini", + "openai_config": { + "openai_api_key": "{{secrets/test-scope/openai-key}}" + }, + "provider": "openai", + "task": "llm/v1/chat" + }, + "name": "prod" + } + ] +} + +>>> update_file.py databricks.yml name: gpt-4o-mini name: gpt-5o-mini + +>>> [CLI] bundle plan -o json + +>>> [CLI] bundle deploy +Uploading bundle files to /Workspace/Users/[USERNAME]/.bundle/[UNIQUE_NAME]/files... +Deploying resources... +Updating deployment state... +Deployment complete! + +>>> print_requests.py //serving-endpoints +{ + "method": "POST", + "path": "/api/2.0/serving-endpoints", + "body": { + "config": { + "served_entities": [ + { + "external_model": { + "name": "gpt-4o-mini", + "openai_config": { + "openai_api_key": "{{secrets/test-scope/openai-key}}" + }, + "provider": "openai", + "task": "llm/v1/chat" + }, + "name": "prod" + } + ] + }, + "name": "[ENDPOINT_ID]" + } +} +{ + "method": "PUT", + "path": "/api/2.0/serving-endpoints/[ENDPOINT_ID]/config", + "body": { + "served_entities": [ + { + "external_model": { + "name": "gpt-5o-mini", + "openai_config": { + "openai_api_key": "{{secrets/test-scope/openai-key}}" + }, + "provider": "openai", + "task": "llm/v1/chat" + }, + "name": "prod" + } + ] + } +} + +>>> [CLI] serving-endpoints get [ENDPOINT_ID] +{ + "served_entities": [ + { + "external_model": { + "name": "gpt-5o-mini", + "openai_config": { + "openai_api_key": "{{secrets/test-scope/openai-key}}" + }, + "provider": "openai", + "task": "llm/v1/chat" + }, + "name": "prod" + } + ] +} + +>>> [CLI] bundle destroy --auto-approve +The following resources will be deleted: + delete resources.model_serving_endpoints.test_endpoint + +All files and directories at the following location will be deleted: /Workspace/Users/[USERNAME]/.bundle/[UNIQUE_NAME] + +Deleting files... +Destroy complete! diff --git a/acceptance/bundle/resources/model_serving_endpoints/update/config/script b/acceptance/bundle/resources/model_serving_endpoints/update/config/script new file mode 100755 index 0000000000..3f28edc843 --- /dev/null +++ b/acceptance/bundle/resources/model_serving_endpoints/update/config/script @@ -0,0 +1,23 @@ +#!/bin/bash +envsubst < databricks.yml.tmpl > databricks.yml + +cleanup() { + trace $CLI bundle destroy --auto-approve + rm -f out.requests.txt +} +trap cleanup EXIT + +trace $CLI bundle deploy + +ENDPOINT_ID=$($CLI bundle summary -o json | jq -r '.resources.model_serving_endpoints.test_endpoint.id') +echo "$ENDPOINT_ID:ENDPOINT_ID" >> ACC_REPLS +trace $CLI serving-endpoints get "${ENDPOINT_ID}" | jq '.config' + +trace update_file.py databricks.yml "name: gpt-4o-mini" "name: gpt-5o-mini" + +trace $CLI bundle plan -o json > out.plan.$DATABRICKS_BUNDLE_ENGINE.json +trace $CLI bundle deploy + +trace print_requests.py //serving-endpoints + +trace $CLI serving-endpoints get "${ENDPOINT_ID}" | jq '.config' diff --git a/acceptance/bundle/resources/model_serving_endpoints/update/email-notifications/databricks.yml.tmpl b/acceptance/bundle/resources/model_serving_endpoints/update/email-notifications/databricks.yml.tmpl new file mode 100644 index 0000000000..f852f7b078 --- /dev/null +++ b/acceptance/bundle/resources/model_serving_endpoints/update/email-notifications/databricks.yml.tmpl @@ -0,0 +1,21 @@ +bundle: + name: test-mse-update-email-notifications-$UNIQUE_NAME + +workspace: + root_path: ~/.bundle/$UNIQUE_NAME + +resources: + model_serving_endpoints: + test_endpoint: + name: test-endpoint-$UNIQUE_NAME + config: + served_entities: + - name: prod + external_model: + name: gpt-4o-mini + provider: openai + task: llm/v1/chat + openai_config: + openai_api_key: "{{secrets/test-scope/openai-key}}" + email_notifications: + on_update_success: ["user1@example.com"] diff --git a/acceptance/bundle/resources/model_serving_endpoints/update/email-notifications/out.get_email_notifications.direct.json b/acceptance/bundle/resources/model_serving_endpoints/update/email-notifications/out.get_email_notifications.direct.json new file mode 100644 index 0000000000..d0b3899865 --- /dev/null +++ b/acceptance/bundle/resources/model_serving_endpoints/update/email-notifications/out.get_email_notifications.direct.json @@ -0,0 +1,5 @@ +{ + "on_update_success": [ + "user2@example.com" + ] +} diff --git a/acceptance/bundle/resources/model_serving_endpoints/update/email-notifications/out.get_email_notifications.terraform.json b/acceptance/bundle/resources/model_serving_endpoints/update/email-notifications/out.get_email_notifications.terraform.json new file mode 100644 index 0000000000..76b5ef08c2 --- /dev/null +++ b/acceptance/bundle/resources/model_serving_endpoints/update/email-notifications/out.get_email_notifications.terraform.json @@ -0,0 +1,5 @@ +{ + "on_update_success": [ + "user1@example.com" + ] +} diff --git a/acceptance/bundle/resources/model_serving_endpoints/update/email-notifications/out.plan.direct.json b/acceptance/bundle/resources/model_serving_endpoints/update/email-notifications/out.plan.direct.json new file mode 100644 index 0000000000..32b2b72b9b --- /dev/null +++ b/acceptance/bundle/resources/model_serving_endpoints/update/email-notifications/out.plan.direct.json @@ -0,0 +1,70 @@ +{ + "plan": { + "resources.model_serving_endpoints.test_endpoint": { + "action": "update", + "new_state": { + "value": { + "config": { + "served_entities": [ + { + "external_model": { + "name": "gpt-4o-mini", + "openai_config": { + "openai_api_key": "{{secrets/test-scope/openai-key}}" + }, + "provider": "openai", + "task": "llm/v1/chat" + }, + "name": "prod" + } + ] + }, + "email_notifications": { + "on_update_success": [ + "user2@example.com" + ] + }, + "name": "[ENDPOINT_ID]" + } + }, + "remote_state": { + "endpoint_details": { + "config": { + "served_entities": [ + { + "external_model": { + "name": "gpt-4o-mini", + "openai_config": { + "openai_api_key": "{{secrets/test-scope/openai-key}}" + }, + "provider": "openai", + "task": "llm/v1/chat" + }, + "name": "prod" + } + ] + }, + "creator": "[USERNAME]", + "email_notifications": { + "on_update_success": [ + "user1@example.com" + ] + }, + "id": "[UUID]", + "name": "[ENDPOINT_ID]", + "state": { + "config_update": "NOT_UPDATING" + } + }, + "endpoint_id": "[UUID]" + }, + "changes": { + "local": { + "email_notifications.on_update_success[0]": { + "action": "update" + } + } + } + } + } +} diff --git a/acceptance/bundle/resources/model_serving_endpoints/update/email-notifications/out.plan.terraform.json b/acceptance/bundle/resources/model_serving_endpoints/update/email-notifications/out.plan.terraform.json new file mode 100644 index 0000000000..bc939a91b2 --- /dev/null +++ b/acceptance/bundle/resources/model_serving_endpoints/update/email-notifications/out.plan.terraform.json @@ -0,0 +1,7 @@ +{ + "plan": { + "resources.model_serving_endpoints.test_endpoint": { + "action": "update" + } + } +} diff --git a/acceptance/bundle/resources/model_serving_endpoints/update/email-notifications/out.requests.direct.json b/acceptance/bundle/resources/model_serving_endpoints/update/email-notifications/out.requests.direct.json new file mode 100644 index 0000000000..bdf4a85b8b --- /dev/null +++ b/acceptance/bundle/resources/model_serving_endpoints/update/email-notifications/out.requests.direct.json @@ -0,0 +1,38 @@ +{ + "method": "POST", + "path": "/api/2.0/serving-endpoints", + "body": { + "config": { + "served_entities": [ + { + "external_model": { + "name": "gpt-4o-mini", + "openai_config": { + "openai_api_key": "{{secrets/test-scope/openai-key}}" + }, + "provider": "openai", + "task": "llm/v1/chat" + }, + "name": "prod" + } + ] + }, + "email_notifications": { + "on_update_success": [ + "user1@example.com" + ] + }, + "name": "[ENDPOINT_ID]" + } +} +{ + "method": "PATCH", + "path": "/api/2.0/serving-endpoints/[ENDPOINT_ID]/notifications", + "body": { + "email_notifications": { + "on_update_success": [ + "user2@example.com" + ] + } + } +} diff --git a/acceptance/bundle/resources/model_serving_endpoints/update/email-notifications/out.requests.terraform.json b/acceptance/bundle/resources/model_serving_endpoints/update/email-notifications/out.requests.terraform.json new file mode 100644 index 0000000000..01f2571626 --- /dev/null +++ b/acceptance/bundle/resources/model_serving_endpoints/update/email-notifications/out.requests.terraform.json @@ -0,0 +1,27 @@ +{ + "method": "POST", + "path": "/api/2.0/serving-endpoints", + "body": { + "config": { + "served_entities": [ + { + "external_model": { + "name": "gpt-4o-mini", + "openai_config": { + "openai_api_key": "{{secrets/test-scope/openai-key}}" + }, + "provider": "openai", + "task": "llm/v1/chat" + }, + "name": "prod" + } + ] + }, + "email_notifications": { + "on_update_success": [ + "user1@example.com" + ] + }, + "name": "[ENDPOINT_ID]" + } +} diff --git a/acceptance/bundle/resources/model_serving_endpoints/update/email-notifications/out.test.toml b/acceptance/bundle/resources/model_serving_endpoints/update/email-notifications/out.test.toml new file mode 100644 index 0000000000..d560f1de04 --- /dev/null +++ b/acceptance/bundle/resources/model_serving_endpoints/update/email-notifications/out.test.toml @@ -0,0 +1,5 @@ +Local = true +Cloud = false + +[EnvMatrix] + DATABRICKS_BUNDLE_ENGINE = ["terraform", "direct"] diff --git a/acceptance/bundle/resources/model_serving_endpoints/update/email-notifications/output.txt b/acceptance/bundle/resources/model_serving_endpoints/update/email-notifications/output.txt new file mode 100644 index 0000000000..1bb4647dc0 --- /dev/null +++ b/acceptance/bundle/resources/model_serving_endpoints/update/email-notifications/output.txt @@ -0,0 +1,37 @@ + +>>> [CLI] bundle deploy +Uploading bundle files to /Workspace/Users/[USERNAME]/.bundle/[UNIQUE_NAME]/files... +Deploying resources... +Updating deployment state... +Deployment complete! + +>>> [CLI] serving-endpoints get [ENDPOINT_ID] +{ + "on_update_success": [ + "user1@example.com" + ] +} + +>>> update_file.py databricks.yml user1@example.com user2@example.com + +>>> [CLI] bundle plan -o json + +>>> [CLI] bundle deploy +Uploading bundle files to /Workspace/Users/[USERNAME]/.bundle/[UNIQUE_NAME]/files... +Deploying resources... +Updating deployment state... +Deployment complete! + +=== There a bug in TF where it does not actually update the email notifications for a serving endpoint. +>>> print_requests.py //serving-endpoints + +>>> [CLI] serving-endpoints get [ENDPOINT_ID] + +>>> [CLI] bundle destroy --auto-approve +The following resources will be deleted: + delete resources.model_serving_endpoints.test_endpoint + +All files and directories at the following location will be deleted: /Workspace/Users/[USERNAME]/.bundle/[UNIQUE_NAME] + +Deleting files... +Destroy complete! diff --git a/acceptance/bundle/resources/model_serving_endpoints/update/email-notifications/script b/acceptance/bundle/resources/model_serving_endpoints/update/email-notifications/script new file mode 100755 index 0000000000..15b43b4ffd --- /dev/null +++ b/acceptance/bundle/resources/model_serving_endpoints/update/email-notifications/script @@ -0,0 +1,25 @@ +#!/bin/bash +envsubst < databricks.yml.tmpl > databricks.yml + +cleanup() { + trace $CLI bundle destroy --auto-approve + rm -f out.requests.txt +} +trap cleanup EXIT + +trace $CLI bundle deploy + +ENDPOINT_ID=$($CLI bundle summary -o json | jq -r '.resources.model_serving_endpoints.test_endpoint.id') +echo "$ENDPOINT_ID:ENDPOINT_ID" >> ACC_REPLS +trace $CLI serving-endpoints get "${ENDPOINT_ID}" | jq '.email_notifications' + +# Update email_notifications - change the email address +trace update_file.py databricks.yml "user1@example.com" "user2@example.com" + +trace $CLI bundle plan -o json > out.plan.$DATABRICKS_BUNDLE_ENGINE.json +trace $CLI bundle deploy + +title "There a bug in TF where it does not actually update the email notifications for a serving endpoint." +trace print_requests.py //serving-endpoints > out.requests.$DATABRICKS_BUNDLE_ENGINE.json + +trace $CLI serving-endpoints get "${ENDPOINT_ID}" | jq '.email_notifications' > out.get_email_notifications.$DATABRICKS_BUNDLE_ENGINE.json diff --git a/acceptance/bundle/resources/model_serving_endpoints/update/tags/databricks.yml.tmpl b/acceptance/bundle/resources/model_serving_endpoints/update/tags/databricks.yml.tmpl new file mode 100644 index 0000000000..80cbe6b591 --- /dev/null +++ b/acceptance/bundle/resources/model_serving_endpoints/update/tags/databricks.yml.tmpl @@ -0,0 +1,22 @@ +bundle: + name: test-mse-update-tags-$UNIQUE_NAME + +workspace: + root_path: ~/.bundle/$UNIQUE_NAME + +resources: + model_serving_endpoints: + test_endpoint: + name: test-endpoint-$UNIQUE_NAME + config: + served_entities: + - name: prod + external_model: + name: gpt-4o-mini + provider: openai + task: llm/v1/chat + openai_config: + openai_api_key: "{{secrets/test-scope/openai-key}}" + tags: + - key: team + value: my-team-one diff --git a/acceptance/bundle/resources/model_serving_endpoints/update/tags/out.plan.direct.json b/acceptance/bundle/resources/model_serving_endpoints/update/tags/out.plan.direct.json new file mode 100644 index 0000000000..7d08c85c59 --- /dev/null +++ b/acceptance/bundle/resources/model_serving_endpoints/update/tags/out.plan.direct.json @@ -0,0 +1,72 @@ +{ + "plan": { + "resources.model_serving_endpoints.test_endpoint": { + "action": "update", + "new_state": { + "value": { + "config": { + "served_entities": [ + { + "external_model": { + "name": "gpt-4o-mini", + "openai_config": { + "openai_api_key": "{{secrets/test-scope/openai-key}}" + }, + "provider": "openai", + "task": "llm/v1/chat" + }, + "name": "prod" + } + ] + }, + "name": "[ENDPOINT_ID]", + "tags": [ + { + "key": "team", + "value": "my-team-two" + } + ] + } + }, + "remote_state": { + "endpoint_details": { + "config": { + "served_entities": [ + { + "external_model": { + "name": "gpt-4o-mini", + "openai_config": { + "openai_api_key": "{{secrets/test-scope/openai-key}}" + }, + "provider": "openai", + "task": "llm/v1/chat" + }, + "name": "prod" + } + ] + }, + "creator": "[USERNAME]", + "id": "[UUID]", + "name": "[ENDPOINT_ID]", + "state": { + "config_update": "NOT_UPDATING" + }, + "tags": [ + { + "key": "team", + "value": "my-team-one" + } + ] + }, + "endpoint_id": "[UUID]" + }, + "changes": { + "local": { + "tags[0].value": { + "action": "update" + } + } + } + } + } +} diff --git a/acceptance/bundle/resources/model_serving_endpoints/update/tags/out.plan.terraform.json b/acceptance/bundle/resources/model_serving_endpoints/update/tags/out.plan.terraform.json new file mode 100644 index 0000000000..bc939a91b2 --- /dev/null +++ b/acceptance/bundle/resources/model_serving_endpoints/update/tags/out.plan.terraform.json @@ -0,0 +1,7 @@ +{ + "plan": { + "resources.model_serving_endpoints.test_endpoint": { + "action": "update" + } + } +} diff --git a/acceptance/bundle/resources/model_serving_endpoints/update/tags/out.test.toml b/acceptance/bundle/resources/model_serving_endpoints/update/tags/out.test.toml new file mode 100644 index 0000000000..d560f1de04 --- /dev/null +++ b/acceptance/bundle/resources/model_serving_endpoints/update/tags/out.test.toml @@ -0,0 +1,5 @@ +Local = true +Cloud = false + +[EnvMatrix] + DATABRICKS_BUNDLE_ENGINE = ["terraform", "direct"] diff --git a/acceptance/bundle/resources/model_serving_endpoints/update/tags/output.txt b/acceptance/bundle/resources/model_serving_endpoints/update/tags/output.txt new file mode 100644 index 0000000000..c651dfa5cc --- /dev/null +++ b/acceptance/bundle/resources/model_serving_endpoints/update/tags/output.txt @@ -0,0 +1,83 @@ + +>>> [CLI] bundle deploy +Uploading bundle files to /Workspace/Users/[USERNAME]/.bundle/[UNIQUE_NAME]/files... +Deploying resources... +Updating deployment state... +Deployment complete! + +>>> [CLI] serving-endpoints get [ENDPOINT_ID] +[ + { + "key": "team", + "value": "my-team-one" + } +] + +>>> update_file.py databricks.yml value: my-team-one value: my-team-two + +>>> [CLI] bundle plan -o json + +>>> [CLI] bundle deploy +Uploading bundle files to /Workspace/Users/[USERNAME]/.bundle/[UNIQUE_NAME]/files... +Deploying resources... +Updating deployment state... +Deployment complete! + +>>> print_requests.py //serving-endpoints +{ + "method": "POST", + "path": "/api/2.0/serving-endpoints", + "body": { + "config": { + "served_entities": [ + { + "external_model": { + "name": "gpt-4o-mini", + "openai_config": { + "openai_api_key": "{{secrets/test-scope/openai-key}}" + }, + "provider": "openai", + "task": "llm/v1/chat" + }, + "name": "prod" + } + ] + }, + "name": "[ENDPOINT_ID]", + "tags": [ + { + "key": "team", + "value": "my-team-one" + } + ] + } +} +{ + "method": "PATCH", + "path": "/api/2.0/serving-endpoints/[ENDPOINT_ID]/tags", + "body": { + "add_tags": [ + { + "key": "team", + "value": "my-team-two" + } + ] + } +} + +>>> [CLI] serving-endpoints get [ENDPOINT_ID] +[ + { + "key": "team", + "value": "my-team-two" + } +] + +>>> [CLI] bundle destroy --auto-approve +The following resources will be deleted: + delete resources.model_serving_endpoints.test_endpoint + +All files and directories at the following location will be deleted: /Workspace/Users/[USERNAME]/.bundle/[UNIQUE_NAME] + +Deleting files... +Destroy complete! diff --git a/acceptance/bundle/resources/model_serving_endpoints/update/tags/script b/acceptance/bundle/resources/model_serving_endpoints/update/tags/script new file mode 100755 index 0000000000..cb1286e3aa --- /dev/null +++ b/acceptance/bundle/resources/model_serving_endpoints/update/tags/script @@ -0,0 +1,23 @@ +#!/bin/bash +envsubst < databricks.yml.tmpl > databricks.yml + +cleanup() { + trace $CLI bundle destroy --auto-approve + rm -f out.requests.txt +} +trap cleanup EXIT + +trace $CLI bundle deploy + +ENDPOINT_ID=$($CLI bundle summary -o json | jq -r '.resources.model_serving_endpoints.test_endpoint.id') +echo "$ENDPOINT_ID:ENDPOINT_ID" >> ACC_REPLS +trace $CLI serving-endpoints get "${ENDPOINT_ID}" | jq '.tags' + +trace update_file.py databricks.yml "value: my-team-one" "value: my-team-two" + +trace $CLI bundle plan -o json > out.plan.$DATABRICKS_BUNDLE_ENGINE.json +trace $CLI bundle deploy + +trace print_requests.py //serving-endpoints + +trace $CLI serving-endpoints get "${ENDPOINT_ID}" | jq '.tags' diff --git a/acceptance/bundle/resources/model_serving_endpoints/update/test.toml b/acceptance/bundle/resources/model_serving_endpoints/update/test.toml new file mode 100644 index 0000000000..3aa4a19050 --- /dev/null +++ b/acceptance/bundle/resources/model_serving_endpoints/update/test.toml @@ -0,0 +1,7 @@ +Local = true +Cloud = false +RecordRequests = true + +Ignore = [ + "databricks.yml", +] diff --git a/bundle/deployplan/plan.go b/bundle/deployplan/plan.go index a02bd41315..710131b40b 100644 --- a/bundle/deployplan/plan.go +++ b/bundle/deployplan/plan.go @@ -6,6 +6,7 @@ import ( "slices" "sync" + "github.com/databricks/cli/libs/structs/structpath" "github.com/databricks/cli/libs/structs/structvar" ) @@ -53,6 +54,33 @@ type Trigger struct { Reason string `json:"reason,omitempty"` } +// HasChange checks if there are any changes for fields with the given prefix. +// This function is path-aware and correctly handles path component boundaries. +// For example: +// - HasChange("a") matches "a" and "a.b" but not "aa" +// - HasChange("config") matches "config" and "config.name" but not "configuration" +// +// Note: This function does not support wildcard patterns. +func (c *Changes) HasChange(fieldPath string) bool { + if c == nil { + return false + } + + for field := range c.Local { + if structpath.HasPrefix(field, fieldPath) { + return true + } + } + + for field := range c.Remote { + if structpath.HasPrefix(field, fieldPath) { + return true + } + } + + return false +} + func (p *Plan) GetActions() []Action { actions := make([]Action, 0, len(p.Plan)) for key, entry := range p.Plan { diff --git a/bundle/direct/dresources/model_serving_endpoint.go b/bundle/direct/dresources/model_serving_endpoint.go index c01300bec3..6795df6407 100644 --- a/bundle/direct/dresources/model_serving_endpoint.go +++ b/bundle/direct/dresources/model_serving_endpoint.go @@ -10,7 +10,6 @@ import ( "github.com/databricks/cli/libs/utils" "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/service/serving" - "golang.org/x/sync/errgroup" ) type ResourceModelServingEndpoint struct { @@ -141,6 +140,10 @@ func (r *ResourceModelServingEndpoint) WaitAfterCreate(ctx context.Context, conf return r.waitForEndpointReady(ctx, config.Name) } +func (r *ResourceModelServingEndpoint) WaitAfterUpdate(ctx context.Context, config *serving.CreateServingEndpoint) (*RefreshOutput, error) { + return r.waitForEndpointReady(ctx, config.Name) +} + func (r *ResourceModelServingEndpoint) updateAiGateway(ctx context.Context, id string, aiGateway *serving.AiGatewayConfig) error { if aiGateway == nil { req := serving.PutAiGatewayRequest{ @@ -273,25 +276,41 @@ func (r *ResourceModelServingEndpoint) updateTags(ctx context.Context, id string return nil } -func (r *ResourceModelServingEndpoint) DoUpdate(ctx context.Context, id string, config *serving.CreateServingEndpoint, _ *Changes) (*RefreshOutput, error) { - errGroup := errgroup.Group{} - errGroup.Go(func() error { - return r.updateAiGateway(ctx, id, config.AiGateway) - }) - errGroup.Go(func() error { - return r.updateConfig(ctx, id, config.Config) - }) - errGroup.Go(func() error { - return r.updateNotifications(ctx, id, config.EmailNotifications) - }) - errGroup.Go(func() error { - return r.updateTags(ctx, id, config.Tags) - }) - return nil, errGroup.Wait() -} +func (r *ResourceModelServingEndpoint) DoUpdate(ctx context.Context, id string, config *serving.CreateServingEndpoint, changes *Changes) (*RefreshOutput, error) { + var err error -func (r *ResourceModelServingEndpoint) WaitAfterUpdate(ctx context.Context, config *serving.CreateServingEndpoint) (*RefreshOutput, error) { - return r.waitForEndpointReady(ctx, config.Name) + // Terraform makes these API calls sequentially. We do the same here. + // It's an unknown as of 1st Dec 2025 if these APIs are safe to make in parallel. (we did not check) + // https://github.com/databricks/terraform-provider-databricks/blob/c61a32300445f84efb2bb6827dee35e6e523f4ff/serving/resource_model_serving.go#L373 + if changes.HasChange("tags") { + err = r.updateTags(ctx, id, config.Tags) + if err != nil { + return nil, err + } + } + + if changes.HasChange("ai_gateway") { + err = r.updateAiGateway(ctx, id, config.AiGateway) + if err != nil { + return nil, err + } + } + + if changes.HasChange("config") { + err = r.updateConfig(ctx, id, config.Config) + if err != nil { + return nil, err + } + } + + if changes.HasChange("email_notifications") { + err = r.updateNotifications(ctx, id, config.EmailNotifications) + if err != nil { + return nil, err + } + } + + return nil, nil } func (r *ResourceModelServingEndpoint) DoDelete(ctx context.Context, id string) error { diff --git a/libs/structs/structpath/path.go b/libs/structs/structpath/path.go index 1627ae6f7d..a7870a4b0e 100644 --- a/libs/structs/structpath/path.go +++ b/libs/structs/structpath/path.go @@ -623,3 +623,37 @@ func (p *PathNode) Prefix(n int) *PathNode { return current } + +// HasPrefix tests whether the path string s begins with prefix. +// Unlike strings.HasPrefix, this function is path-aware and correctly handles +// path component boundaries. For example: +// - HasPrefix("a.b", "a") returns true (matches field "a") +// - HasPrefix("aa", "a") returns false (field "aa" is different from "a") +// - HasPrefix("a[0]", "a") returns true (matches field "a") +// - HasPrefix("config.name", "config") returns true +func HasPrefix(s, prefix string) bool { + // Handle edge cases + if prefix == "" { + return true + } + if s == "" { + return false + } + if s == prefix { + return true + } + + // Check if s starts with prefix using string comparison + if !strings.HasPrefix(s, prefix) { + return false + } + + // Ensure the match is at a path boundary + // The character after the prefix must be a path separator: '.', '[', or end of string + if len(s) > len(prefix) { + nextChar := s[len(prefix)] + return nextChar == '.' || nextChar == '[' + } + + return true +} diff --git a/libs/structs/structpath/path_test.go b/libs/structs/structpath/path_test.go index c4e427666d..6ab75b542d 100644 --- a/libs/structs/structpath/path_test.go +++ b/libs/structs/structpath/path_test.go @@ -655,3 +655,99 @@ func TestPureReferenceToPath(t *testing.T) { }) } } + +func TestHasPrefix(t *testing.T) { + tests := []struct { + name string + s string + prefix string + expected bool + }{ + // Edge cases + { + name: "empty prefix", + s: "a.b.c", + prefix: "", + expected: true, + }, + { + name: "empty string", + s: "", + prefix: "a", + expected: false, + }, + { + name: "exact match", + s: "config", + prefix: "config", + expected: true, + }, + + // Correct matches - path boundary aware + { + name: "simple field match", + s: "a.b", + prefix: "a", + expected: true, + }, + { + name: "nested field match", + s: "config.database.name", + prefix: "config.database", + expected: true, + }, + { + name: "field with array index", + s: "items[3].name", + prefix: "items", + expected: true, + }, + { + name: "array with prefix match", + s: "items[0].name", + prefix: "items[0]", + expected: true, + }, + { + name: "field with bracket notation", + s: "config['spark.conf'].value", + prefix: "config['spark.conf']", + expected: true, + }, + + // Incorrect matches - should NOT match + { + name: "substring match without boundary", + s: "ai_gateway", + prefix: "ai", + expected: false, + }, + { + name: "different nested field", + s: "configuration.name", + prefix: "config", + expected: false, + }, + + // wildcard patterns are NOT supported - treated as literals + { + name: "regex pattern not respected - star quantifier", + s: "aaa", + prefix: "a*", + expected: false, + }, + { + name: "regex pattern not respected - bracket class", + s: "a[1]", + prefix: "a[*]", + expected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := HasPrefix(tt.s, tt.prefix) + assert.Equal(t, tt.expected, result, "HasPrefix(%q, %q)", tt.s, tt.prefix) + }) + } +}