-
Notifications
You must be signed in to change notification settings - Fork 2.2k
Open
Description
我的模型初始化和模型释放代码分别如下:
int OBJDetecor::ModelInit(){
// ModelPath_ = "/***/***.mnn";
std::cout << "Current state: " << stateToString(getCurrentState()) << std::endl;
if (!transitionTo(current_state_, ModelState::INITIALIZING)){
std::lock_guard<std::mutex> lock(mutex_);
DoInit = 0;
return -1;
}
auto cleanupOnFailure = [this]() {
// 释放手动创建的资源
if (nhwcTensor_) {
// 假设 MNN::Tensor 有对应的释放函数
// 如果没有,需要确保正确的释放方式
// nhwcTensor_.reset();
MNN::Tensor::destroy(nhwcTensor_);
nhwcTensor_ = nullptr;
}
// 状态转换回 UNINITIALIZED
transitionTo(current_state_, ModelState::UNINITIALIZED);
};
try{
// 创建解释器
auto new_net = std::shared_ptr<MNN::Interpreter>(
MNN::Interpreter::createFromFile(ModelPath_.c_str()));
if (!new_net) {
std::cerr << "Failed to create interpreter. Model file might be invalid or not found: " << ModelPath_ << std::endl;
cleanupOnFailure();
return -1;
}
// 配置后端
MNN::ScheduleConfig NetConfig_;
MNN::BackendConfig BackConfig_;
NetConfig_.type = MNN_FORWARD_OPENCL;
NetConfig_.numThread = 1;
BackConfig_.precision = static_cast<MNN::BackendConfig::PrecisionMode>(
MNN::BackendConfig::Precision_Low);
NetConfig_.backendConfig = &BackConfig_;
std::cout << "create session..." << std::endl;
// 创建会话
new_net->setSessionMode(MNN::Interpreter::SessionMode::Session_Release);
auto new_session = new_net->createSession(NetConfig_);
if (!new_session) {
std::cerr << "Failed to create MNN session. Check backend configuration." << std::endl;
cleanupOnFailure();
return -1;
}
std::cout << "create session finished..." << std::endl;
// 获取输入输出张量
auto new_input_tensor = new_net->getSessionInput(new_session, "input");
if (!new_input_tensor) {
std::cerr << "Failed to get input tensor with name 'input'" << std::endl;
cleanupOnFailure();
return -1;
}
std::cout << "get input tensor finished..." << std::endl;
auto shape = new_input_tensor->shape();
std::cout << "InputTensor_ dimensions: ["
<< shape[0] << ", " << shape[1] << ", " << shape[2] << ", " << shape[3] << "]"
<< std::endl;
auto new_output_tensor_p3 = new_net->getSessionOutput(new_session, "tf.concat_2");
if (!new_output_tensor_p3) {
std::cerr << "Failed to get output tensor p3" << std::endl;
cleanupOnFailure();
return -1;
}
auto new_output_tensor_p4 = new_net->getSessionOutput(new_session, "tf.concat_5");
if (!new_output_tensor_p4) {
std::cerr << "Failed to get output tensor p4" << std::endl;
cleanupOnFailure();
return -1;
}
auto new_output_tensor_p5 = new_net->getSessionOutput(new_session, "tf.concat_8");
if (!new_output_tensor_p5) {
std::cerr << "Failed to get output tensor p5" << std::endl;
cleanupOnFailure();
return -1;
}
std::cout << "get output tensor finished..." << std::endl;
// 创建处理用的张量
std::vector<int> dims{1, ModelSize_, ModelSize_, 3};
auto new_nhwc_tensor = MNN::Tensor::create<float>(dims, nullptr, MNN::Tensor::CAFFE);
if (!new_nhwc_tensor) {
std::cerr << "Failed to create nhwcTensor_" << std::endl;
cleanupOnFailure();
return -1;
}
std::cout << "init nhwc tensor tensor finished..." << std::endl;
// 所有步骤成功,更新成员变量
OBJNet_ = std::move(new_net);
Session_ = new_session; // 注意: 这是原始指针,生命周期由 OBJNet_ 管理
InputTensor_ = new_input_tensor;
HeadTensorP3_ = new_output_tensor_p3;
HeadTensorP4_ = new_output_tensor_p4;
HeadTensorP5_ = new_output_tensor_p5;
nhwcTensor_ = std::move(new_nhwc_tensor);
std::cout << "modify local variables finished..." << std::endl;
// 状态转换到 READY
if (!transitionTo(current_state_, ModelState::READY)) {
cleanupOnFailure();
return -1;
}
{
std::lock_guard<std::mutex> lock(mutex_);
DoInit = 0;
}
std::cout << "Finish OBJdetector configuration" << std::endl;
return 0;
}catch(...){
std::cerr << "OBJDetector init failed!!! unknown error!!!" << std::endl;
cleanupOnFailure();
return -1;
}
}
int OBJDetecor::ModelRelease(){
std::cout << "model release......, Current state: " << stateToString(getCurrentState()) << std::endl;
// 检查并转换状态
if (!transitionTo(current_state_, ModelState::RELEASING)) {
std::lock_guardstd::mutex lock(mutex_);
DoRelease = 0;
return -1;
}
try{
// 1. 释放会话
if (Session_) {
std::cout << "Releasing session..." << std::endl;
OBJNet_->releaseSession(Session_);
Session_ = nullptr;
}
if (nhwcTensor_) {
std::cout << "Deleting nhwcTensor_..." << std::endl;
// delete nhwcTensor_;
MNN::Tensor::destroy(nhwcTensor_);
nhwcTensor_ = nullptr;
}
InputTensor_ = nullptr;
HeadTensorP3_ = nullptr;
HeadTensorP4_ = nullptr;
HeadTensorP5_ = nullptr;
if(OBJNet_){
OBJNet_->releaseModel();
}
// 状态转换到 UNINITIALIZED
transitionTo(current_state_, ModelState::UNINITIALIZED);
{
std::lock_guard<std::mutex> lock(mutex_);
DoRelease = 0;
}
return 0;
}catch(...){
std::cerr << "model release failed! unknow error!" << std::endl;
transitionTo(current_state_, ModelState::UNINITIALIZED);
return -1;
}
}
但是在项目实际运行过程中,我发先,虽然执行了我的ModelRelease函数,但是使用命令cat /proc/pid/status 查看VmRSS内存占用并没有减少,其中pid是进程id。
请问我的释放代码是否有问题,如果有问题,请告诉我是哪里的问题,如果没问题,我想知道为什么似乎释放操作并没有生效?
Reactions are currently unavailable
Metadata
Metadata
Assignees
Labels
No labels