We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 8c1c6bd commit bee8fdeCopy full SHA for bee8fde
src/libtorch.cc
@@ -554,12 +554,9 @@ class ModelInstanceState : public BackendModelInstance {
554
// If the model supports batching.
555
bool supports_batching_;
556
557
-#ifdef TRITON_ENABLE_GPU
558
- // PyTorch stream used for execution of inferences.
559
cudaEvent_t compute_input_start_event_;
560
cudaEvent_t compute_infer_start_event_;
561
cudaEvent_t compute_output_start_event_;
562
-#endif
563
};
564
565
TRITONSERVER_Error*
@@ -616,7 +613,6 @@ ModelInstanceState::ModelInstanceState(
616
613
}
617
614
618
615
619
-
620
// If this is a sequence model then make sure that the required
621
// inputs are present in the model and have the correct shape and
622
// datatype.
0 commit comments