Skip to content

Commit f2859cf

Browse files
committed
test for model deletion steps
1 parent bfa38fb commit f2859cf

File tree

7 files changed

+235
-101
lines changed

7 files changed

+235
-101
lines changed

tests/integration/godog/features/model/explicit_model_deployment.feature renamed to tests/integration/godog/features/model/custom_model_deployment.feature

Lines changed: 40 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1,24 +1,25 @@
1-
@ModelDeployment @Functional @Models @Explicit
1+
@ModelDeployment @Functional @Models @CustomModelSpec
22
Feature: Explicit Model deployment
33
I deploy a custom model spec, wait for model to be deployed to the servers
4-
and send an inference request to that model
4+
and send an inference request to that model and expect a successful response.
5+
I then delete the model and send inference requests and expect them to fail.
56

67
Scenario: Load model and send inference request to envoy
7-
Given I deploy model spec:
8+
Given I deploy model spec with timeout "10s":
89
"""
910
apiVersion: mlops.seldon.io/v1alpha1
1011
kind: Model
1112
metadata:
12-
name: iris
13+
name: alpha-1
1314
spec:
1415
replicas: 1
1516
requirements:
1617
- sklearn
1718
- mlserver
1819
storageUri: gs://seldon-models/scv2/samples/mlserver_1.3.5/iris-sklearn
1920
"""
20-
When the model "iris" should eventually become Ready with timeout "20s"
21-
Then send HTTP inference request with timeout "20s" to model "iris" with payload:
21+
When the model "alpha-1" should eventually become Ready with timeout "20s"
22+
Then send HTTP inference request with timeout "20s" to model "alpha-1" with payload:
2223
"""
2324
{
2425
"inputs": [
@@ -51,7 +52,7 @@ Feature: Explicit Model deployment
5152
}
5253
] }
5354
"""
54-
Then send gRPC inference request with timeout "20s" to model "iris" with payload:
55+
Then send gRPC inference request with timeout "20s" to model "alpha-1" with payload:
5556
"""
5657
{
5758
"inputs": [
@@ -82,4 +83,35 @@ Feature: Explicit Model deployment
8283
"contents": {"int64_contents" : [2]}
8384
}
8485
] }
85-
"""
86+
"""
87+
Then delete the model "alpha-1" with timeout "10s"
88+
Then send HTTP inference request with timeout "20s" to model "alpha-1" with payload:
89+
"""
90+
{
91+
"inputs": [
92+
{
93+
"name": "predict",
94+
"shape": [1, 4],
95+
"datatype": "FP32",
96+
"data": [[1, 2, 3, 4]]
97+
}
98+
]
99+
}
100+
"""
101+
And expect http response status code "404"
102+
Then send gRPC inference request with timeout "20s" to model "alpha-1" with payload:
103+
"""
104+
{
105+
"inputs": [
106+
{
107+
"name": "predict",
108+
"shape": [1, 4],
109+
"datatype": "FP32",
110+
"contents": {
111+
"int64_contents" : [1, 2, 3, 4]
112+
}
113+
}
114+
]
115+
}
116+
"""
117+
And expect gRPC response error to contain "Unimplemented"
Lines changed: 114 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,114 @@
1+
/*
2+
Copyright (c) 2024 Seldon Technologies Ltd.
3+
4+
Use of this software is governed BY
5+
(1) the license included in the LICENSE file or
6+
(2) if the license included in the LICENSE file is the Business Source License 1.1,
7+
the Change License after the Change Date as each is defined in accordance with the LICENSE file.
8+
*/
9+
10+
package steps
11+
12+
import (
13+
"context"
14+
"errors"
15+
"fmt"
16+
17+
"github.com/seldonio/seldon-core/operator/v2/apis/mlops/v1alpha1"
18+
k8serrors "k8s.io/apimachinery/pkg/api/errors"
19+
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
20+
"k8s.io/apimachinery/pkg/watch"
21+
)
22+
23+
// deleteModel we have to wait for model to be deleted, as there is a finalizer attached so the scheduler can confirm
24+
// when model has been unloaded from inference pod, model-gw, dataflow-engine, pipeline-gw and controller will remove
25+
// finalizer so deletion can complete.
26+
func (m *Model) deleteModel(ctx context.Context, model string) error {
27+
modelCR, err := m.k8sClient.MlopsV1alpha1().Models(m.namespace).Get(ctx, model, metav1.GetOptions{})
28+
if err != nil {
29+
if k8serrors.IsNotFound(err) {
30+
return fmt.Errorf("model %s can't be deleted, does not exist", model)
31+
}
32+
return fmt.Errorf("failed to get model %s", model)
33+
}
34+
35+
if err := m.k8sClient.MlopsV1alpha1().Models(m.namespace).Delete(ctx, model, metav1.DeleteOptions{}); err != nil {
36+
return fmt.Errorf("failed deleting model: %w", err)
37+
}
38+
m.log.Debugf("Delete request for model %s sent", model)
39+
40+
watcher, err := m.k8sClient.MlopsV1alpha1().Models(m.namespace).Watch(ctx, metav1.ListOptions{
41+
FieldSelector: fmt.Sprintf("metadata.name=%s", model),
42+
ResourceVersion: modelCR.ResourceVersion,
43+
})
44+
if err != nil {
45+
return fmt.Errorf("failed watching model: %w", err)
46+
}
47+
defer watcher.Stop()
48+
49+
m.log.Debugf("Waiting for %s model deletion confirmation", model)
50+
51+
for {
52+
select {
53+
case <-ctx.Done():
54+
return ctx.Err()
55+
case event, ok := <-watcher.ResultChan():
56+
if !ok {
57+
return errors.New("watcher channel closed")
58+
}
59+
if event.Type == watch.Error {
60+
return fmt.Errorf("watch error: %v", event.Object)
61+
}
62+
if event.Type == watch.Deleted {
63+
return nil
64+
}
65+
}
66+
}
67+
}
68+
69+
func (m *Model) waitForModelReady(ctx context.Context, model string) error {
70+
foundModel, err := m.k8sClient.MlopsV1alpha1().Models(m.namespace).Get(ctx, model, metav1.GetOptions{})
71+
if err != nil {
72+
return fmt.Errorf("failed getting model: %w", err)
73+
}
74+
75+
if foundModel.Status.IsReady() {
76+
return nil
77+
}
78+
79+
watcher, err := m.k8sClient.MlopsV1alpha1().Models(m.namespace).Watch(ctx, metav1.ListOptions{
80+
FieldSelector: fmt.Sprintf("metadata.name=%s", model),
81+
ResourceVersion: foundModel.ResourceVersion,
82+
Watch: true,
83+
})
84+
if err != nil {
85+
return fmt.Errorf("failed subscribed to watch model: %w", err)
86+
}
87+
defer watcher.Stop()
88+
89+
for {
90+
select {
91+
case <-ctx.Done():
92+
return ctx.Err()
93+
case event, ok := <-watcher.ResultChan():
94+
if !ok {
95+
return fmt.Errorf("watch channel closed")
96+
}
97+
98+
if event.Type == watch.Error {
99+
return fmt.Errorf("watch error: %v", event.Object)
100+
}
101+
102+
if event.Type == watch.Added || event.Type == watch.Modified {
103+
model := event.Object.(*v1alpha1.Model)
104+
if model.Status.IsReady() {
105+
return nil
106+
}
107+
}
108+
109+
if event.Type == watch.Deleted {
110+
return fmt.Errorf("resource was deleted")
111+
}
112+
}
113+
}
114+
}

tests/integration/godog/steps/explicit_model_steps.go

Lines changed: 0 additions & 63 deletions
This file was deleted.

tests/integration/godog/steps/infer.go

Lines changed: 16 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -61,10 +61,10 @@ func (i *inference) sendGRPCModelInferenceRequest(ctx context.Context, model str
6161
ctx = metadata.NewOutgoingContext(context.Background(), md)
6262
resp, err := i.grpc.ModelInfer(ctx, msg)
6363
if err != nil {
64-
return fmt.Errorf("could not send grpc model inference: %w", err)
64+
i.lastGRPCResponse.err = err
6565
}
6666

67-
i.lastGRPCResponse = resp
67+
i.lastGRPCResponse.response = resp
6868
return nil
6969
}
7070

@@ -126,12 +126,24 @@ func jsonContainsObjectSubset(jsonStr, needleStr string) (bool, error) {
126126
return containsSubset(needle, hay), nil
127127
}
128128

129+
func (i *inference) gRPCRespContainsError(err string) error {
130+
if i.lastGRPCResponse.err == nil {
131+
return errors.New("no gRPC response error found")
132+
}
133+
134+
if strings.Contains(i.lastGRPCResponse.err.Error(), err) {
135+
return nil
136+
}
137+
138+
return fmt.Errorf("error %s does not contain %s", i.lastGRPCResponse.err.Error(), err)
139+
}
140+
129141
func (i *inference) gRPCRespCheckBodyContainsJSON(expectJSON *godog.DocString) error {
130-
if i.lastGRPCResponse == nil {
142+
if i.lastGRPCResponse.response == nil {
131143
return errors.New("no gRPC response found")
132144
}
133145

134-
gotJson, err := json.Marshal(i.lastGRPCResponse)
146+
gotJson, err := json.Marshal(i.lastGRPCResponse.response)
135147
if err != nil {
136148
return fmt.Errorf("could not marshal gRPC json: %w", err)
137149
}

0 commit comments

Comments
 (0)