Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 10 additions & 8 deletions tests/integration/godog/features/model/inference.feature
Original file line number Diff line number Diff line change
Expand Up @@ -7,14 +7,16 @@ Feature: Basic model inferencing
Then the model should eventually become Ready
When I send a valid HTTP inference request with timeout "20s"
Then expect http response status code "200"
And expect http response body to contain valid JSON
When I send a valid gRPC inference request with timeout "20s"
And expect gRPC response to not return an error

Examples:
| model |
| iris |
# | income-xgb | having errors with GRPC
# | mnist-onnx |
# | income-lgb | having errors with response
| tfsimple1 |
| wine |
# | mnist-pytorch | having errors with response
| model |
| mnist-pytorch |
| wine |
| tfsimple1 |
| iris |
| income-xgb |
| income-lgb |
| mnist-onnx |
29 changes: 22 additions & 7 deletions tests/integration/godog/features/model/server_setup.feature
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
@Server
@ServerSetup
Feature: Server setup
Deploys an mlserver with one replica. We ensure the pods
become ready and remove any other server pods for different
servers.

@ServerSetup
@ServerSetup @ServerSetupMLServer
Scenario: Deploy mlserver Server and remove other servers
Given I deploy server spec with timeout "10s":
"""
Expand All @@ -15,14 +15,29 @@ Feature: Server setup
spec:
replicas: 1
serverConfig: mlserver
requirements:
- sklearn
- mlserver
storageUri: gs://seldon-models/scv2/samples/mlserver_1.3.5/iris-sklearn
"""
When the server should eventually become Ready with timeout "30s"
Then ensure only "1" pod(s) are deployed for server and they are Ready
And remove any other server deployments

@ServerSetup @ServerSetupTritonServer
Scenario: Deploy triton Server
Given I deploy server spec with timeout "10s":
"""
apiVersion: mlops.seldon.io/v1alpha1
kind: Server
metadata:
name: godog-triton
spec:
replicas: 1
serverConfig: triton
"""
When the server should eventually become Ready with timeout "30s"
Then ensure only "1" pod(s) are deployed for server and they are Ready


@ServerSetup @ServerClean
Scenario: Remove any other pre-existing servers
Given I remove any other server deployments which are not "godog-mlserver,godog-triton"

# TODO decide if we want to keep this, if we keep testers will need to ensure they don't run this tag when running all
# all features in this directory, as tests will fail when server is deleted. We can not delete and it's up to the
Expand Down
42 changes: 38 additions & 4 deletions tests/integration/godog/steps/infer_steps.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,9 @@ import (

"github.com/cucumber/godog"
"github.com/seldonio/seldon-core/apis/go/v2/mlops/v2_dataplane"
"github.com/sirupsen/logrus"
"google.golang.org/grpc/metadata"
"google.golang.org/protobuf/encoding/protojson"
)

type inference struct {
Expand All @@ -33,6 +35,7 @@ type inference struct {
httpPort uint
lastHTTPResponse *http.Response
lastGRPCResponse lastGRPCResponse
log logrus.FieldLogger
}

func LoadInferenceSteps(scenario *godog.ScenarioContext, w *World) {
Expand Down Expand Up @@ -61,6 +64,14 @@ func LoadInferenceSteps(scenario *godog.ScenarioContext, w *World) {
scenario.Step(`^expect http response body to contain JSON:$`, w.infer.httpRespCheckBodyContainsJSON)
scenario.Step(`^expect gRPC response body to contain JSON:$`, w.infer.gRPCRespCheckBodyContainsJSON)
scenario.Step(`^expect gRPC response error to contain "([^"]+)"`, w.infer.gRPCRespContainsError)
scenario.Step(`^expect gRPC response to not return an error$`, w.infer.gRPCRespContainsNoError)
scenario.Step(`^expect http response body to contain valid JSON$`, func() error {
testModel, ok := testModels[w.currentModel.modelType]
if !ok {
return fmt.Errorf("model %s not found", w.currentModel.modelType)
}
return w.infer.doHttpRespCheckBodyContainsJSON(testModel.ValidJSONResponse)
})
}

func (i *inference) doHTTPModelInferenceRequest(ctx context.Context, modelName, body string) error {
Expand Down Expand Up @@ -102,7 +113,7 @@ func (i *inference) sendHTTPModelInferenceRequestFromModel(ctx context.Context,
return fmt.Errorf("could not find test model %s", m.model.Name)
}

return i.doHTTPModelInferenceRequest(ctx, m.modelName, testModel.ValidInferenceRequest)
return i.doHTTPModelInferenceRequest(ctx, m.modelName, testModel.ValidHTTPInferenceRequest)
}

func httpScheme(useSSL bool) string {
Expand All @@ -121,7 +132,7 @@ func (i *inference) sendGRPCModelInferenceRequestFromModel(ctx context.Context,
if !ok {
return fmt.Errorf("could not find test model %s", m.model.Name)
}
return i.doGRPCModelInferenceRequest(ctx, m.modelName, testModel.ValidInferenceRequest)
return i.doGRPCModelInferenceRequest(ctx, m.modelName, testModel.ValidGRPCInferenceRequest)
}

func (i *inference) doGRPCModelInferenceRequest(
Expand All @@ -130,15 +141,19 @@ func (i *inference) doGRPCModelInferenceRequest(
payload string,
) error {
var req v2_dataplane.ModelInferRequest
if err := json.Unmarshal([]byte(payload), &req); err != nil {
if err := protojson.Unmarshal([]byte(payload), &req); err != nil {
return fmt.Errorf("could not unmarshal gRPC json payload: %w", err)
}
req.ModelName = model

md := metadata.Pairs("seldon-model", model)
ctx = metadata.NewOutgoingContext(ctx, md)

i.log.Debugf("sending gRPC model inference %+v", &req)

resp, err := i.grpc.ModelInfer(ctx, &req)
i.log.Debugf("grpc model infer response: %+v", resp)
i.log.Debugf("grpc model infer error: %+v", err)

i.lastGRPCResponse.response = resp
i.lastGRPCResponse.err = err
Expand Down Expand Up @@ -212,6 +227,16 @@ func jsonContainsObjectSubset(jsonStr, needleStr string) (bool, error) {
return containsSubset(needle, hay), nil
}

func (i *inference) gRPCRespContainsNoError() error {
if i.lastGRPCResponse.err != nil {
return fmt.Errorf("grpc response contains error: %w", i.lastGRPCResponse.err)
}
if i.lastGRPCResponse.response == nil {
return errors.New("grpc contains no response")
}
return nil
}

func (i *inference) gRPCRespContainsError(err string) error {
if i.lastGRPCResponse.err == nil {
return errors.New("no gRPC response error found")
Expand All @@ -226,6 +251,9 @@ func (i *inference) gRPCRespContainsError(err string) error {

func (i *inference) gRPCRespCheckBodyContainsJSON(expectJSON *godog.DocString) error {
if i.lastGRPCResponse.response == nil {
if i.lastGRPCResponse.err != nil {
return fmt.Errorf("no gRPC response, error found: %s", i.lastGRPCResponse.err.Error())
}
return errors.New("no gRPC response found")
}

Expand All @@ -234,6 +262,7 @@ func (i *inference) gRPCRespCheckBodyContainsJSON(expectJSON *godog.DocString) e
return fmt.Errorf("could not marshal gRPC json: %w", err)
}

i.log.Debugf("checking gRPC response: %s contains %s", string(gotJson), expectJSON.Content)
ok, err := jsonContainsObjectSubset(string(gotJson), expectJSON.Content)
if err != nil {
return fmt.Errorf("could not check if json contains object: %w", err)
Expand All @@ -247,6 +276,10 @@ func (i *inference) gRPCRespCheckBodyContainsJSON(expectJSON *godog.DocString) e
}

func (i *inference) httpRespCheckBodyContainsJSON(expectJSON *godog.DocString) error {
return i.doHttpRespCheckBodyContainsJSON(expectJSON.Content)
}

func (i *inference) doHttpRespCheckBodyContainsJSON(expectJSON string) error {
if i.lastHTTPResponse == nil {
return errors.New("no http response found")
}
Expand All @@ -256,7 +289,8 @@ func (i *inference) httpRespCheckBodyContainsJSON(expectJSON *godog.DocString) e
return fmt.Errorf("could not read response body: %w", err)
}

ok, err := jsonContainsObjectSubset(string(body), expectJSON.Content)
i.log.Debugf("checking HTTP response: %s contains %s", string(body), expectJSON)
ok, err := jsonContainsObjectSubset(string(body), expectJSON)
if err != nil {
return fmt.Errorf("could not check if json contains object: %w", err)
}
Expand Down
Loading
Loading