diff --git a/src/model_instance_state.cc b/src/model_instance_state.cc index 19cae27..d634f3b 100644 --- a/src/model_instance_state.cc +++ b/src/model_instance_state.cc @@ -240,6 +240,27 @@ ModelInstanceState::Create( return nullptr; // success } +void +ModelInstanceState::CreateCudaEvents(const int32_t& device_id) +{ +#ifdef TRITON_ENABLE_GPU + // Need to set the CUDA context so that the context that events are + // created on match with contexts that events are recorded with. + THROW_IF_BACKEND_INSTANCE_ERROR(ConvertCUDAStatusToTritonError( + cudaSetDevice(device_id), TRITONSERVER_ERROR_INTERNAL, + "Failed to set the device")); + THROW_IF_BACKEND_INSTANCE_ERROR(ConvertCUDAStatusToTritonError( + cudaEventCreate(&compute_input_start_event_), TRITONSERVER_ERROR_INTERNAL, + "Failed to create cuda event")); + THROW_IF_BACKEND_INSTANCE_ERROR(ConvertCUDAStatusToTritonError( + cudaEventCreate(&compute_infer_start_event_), TRITONSERVER_ERROR_INTERNAL, + "Failed to create cuda event")); + THROW_IF_BACKEND_INSTANCE_ERROR(ConvertCUDAStatusToTritonError( + cudaEventCreate(&compute_output_start_event_), + TRITONSERVER_ERROR_INTERNAL, "Failed to create cuda event")); +#endif +} + void ModelInstanceState::Execute( std::vector* responses, @@ -1230,6 +1251,12 @@ ModelInstanceState::SetInputTensors( return nullptr; } +ModelState* +ModelInstanceState::StateForModel() const +{ + return model_state_; +} + TRITONSERVER_Error* ModelInstanceState::ValidateBooleanSequenceControl( triton::common::TritonJson::Value& sequence_batching,