-
Notifications
You must be signed in to change notification settings - Fork 244
Open
Labels
questionQuestion(s) from user.Question(s) from user.
Description
void saveCrop(const nvcv::Tensor& crop, const std::string& filename)
{
auto dataCuda = crop.exportData<nvcv::TensorDataStridedCuda>();
auto sizes = crop.shape(); // [N, H, W, C]
int N = sizes[0], H = sizes[1], W = sizes[2], C = sizes[3];
std::cout << "Input image N: " << N << std::endl;
std::cout << "Input image H: " << H << std::endl;
std::cout << "Input image W: " << W << std::endl;
std::cout << "Input image C: " << C << std::endl;
std::vector<uint8_t> host(N * H * W * C);
cudaMemcpy(host.data(), dataCuda->basePtr(),
host.size() * sizeof(uint8_t), cudaMemcpyDeviceToHost);
cv::Mat img(H, W, CV_8U, host.data());
cv::imwrite(filename, img);
}
int maxImageWidth = extParams.maxImageWidth;
int maxImageHeight = extParams.maxImageHeight;
int maxChannels = extParams.maxChannels;
// tag: Create the cuda stream
cudaStream_t stream;
CHECK_CUDA_ERROR(cudaStreamCreate(&stream));
nvcv::TensorDataStridedCuda::Buffer inBuf;
inBuf.strides[3] = sizeof(uint8_t);
inBuf.strides[2] = maxChannels * inBuf.strides[3];
inBuf.strides[1] = maxImageWidth * inBuf.strides[2];
inBuf.strides[0] = maxImageHeight * inBuf.strides[1];
CHECK_CUDA_ERROR(cudaMallocAsync(&inBuf.basePtr, batchSize * inBuf.strides[0], stream));
nvcv::Tensor::Requirements inReqs
= nvcv::Tensor::CalcRequirements(batchSize, {maxImageWidth, maxImageHeight}, nvcv::FMT_RGB8);
nvcv::TensorDataStridedCuda inData(nvcv::TensorShape{inReqs.shape, inReqs.rank, inReqs.layout},
nvcv::DataType{inReqs.dtype}, inBuf);
nvcv::Tensor inTensor = nvcv::TensorWrapData(inData);
uint8_t *gpuInput = reinterpret_cast<uint8_t *>(inBuf.basePtr);
uint32_t totalImages = batchSize;
nvjpegOutputFormat_t outputFormat = NVJPEG_OUTPUT_RGBI;
std::vector<int> w(batchSize);
std::vector<int> h(batchSize);
NvDecode(images_dir, batchSize, totalImages, outputFormat, gpuInput, w, h);
int offset = 0;
for(int i=0;i<batchSize;i++){
int wOrigin = w[I];
int hOrigin = h[I];
nvcv::TensorDataStridedCuda::Buffer inputBuf;
// 设置 NHWC 的 stride
inputBuf.strides[3] = sizeof(uint8_t); // C
inputBuf.strides[2] = 3 * inputBuf.strides[3]; // W
inputBuf.strides[1] = wOrigin * inputBuf.strides[2]; // H
inputBuf.strides[0] = hOrigin * inputBuf.strides[1]; // N
inputBuf.basePtr = reinterpret_cast<NVCVByte *>(gpuInput+offset);
offset += wOrigin * hOrigin * 3;
nvcv::Tensor::Requirements inReqs1
= nvcv::Tensor::CalcRequirements(1, {wOrigin, hOrigin}, nvcv::FMT_RGB8);
nvcv::TensorDataStridedCuda inData1(nvcv::TensorShape{inReqs1.shape, inReqs1.rank, inReqs1.layout},
nvcv::DataType{inReqs1.dtype}, inputBuf);
// Wrap 成 Tensor
nvcv::Tensor inputTensor = nvcv::TensorWrapData(inData1);
nvcv::Tensor gray(1, {w, h}, nvcv::FMT_U8);
cvcuda::CvtColor cvtOp;
cvtOp(stream, inputTensor, gray, NVCV_COLOR_RGB2GRAY);
saveCrop(gray, "gray.jpeg");
}
cvtOp(stream, inputTensor, gray, NVCV_COLOR_RGB2GRAY);
Why does my grayscale image look like this after the conversion?
Metadata
Metadata
Assignees
Labels
questionQuestion(s) from user.Question(s) from user.
