@@ -567,7 +567,6 @@ SetStringInputTensor(
567
567
cudaStream_t stream, const char * host_policy_name)
568
568
{
569
569
bool cuda_copy = false ;
570
- size_t element_idx = 0 ;
571
570
572
571
// For string data type, we always need to have the data on CPU so
573
572
// that we can read string length and construct the string
@@ -582,11 +581,7 @@ SetStringInputTensor(
582
581
&contiguous_buffer, stream, &cuda_copy);
583
582
if (err != nullptr ) {
584
583
RESPOND_AND_SET_NULL_IF_ERROR (response, err);
585
- if (element_idx < request_element_cnt) {
586
- FillStringTensor (
587
- tensor, tensor_offset + element_idx,
588
- request_element_cnt - element_idx);
589
- }
584
+ FillStringTensor (tensor, tensor_offset, request_element_cnt);
590
585
free (contiguous_buffer);
591
586
return cuda_copy;
592
587
}
@@ -602,15 +597,16 @@ SetStringInputTensor(
602
597
err = ValidateStringBuffer (
603
598
content, content_byte_size, request_element_cnt, name, &str_list);
604
599
// Set string values.
605
- for (; element_idx < str_list.size (); ++element_idx) {
600
+ for (size_t element_idx = 0 ; element_idx < str_list.size (); ++element_idx) {
606
601
const auto & [addr, len] = str_list[element_idx];
607
602
TRITONTF_TensorSetString (tensor, tensor_offset + element_idx, addr, len);
608
603
}
609
604
605
+ size_t element_cnt = str_list.size ();
610
606
if (err != nullptr ) {
611
607
RESPOND_AND_SET_NULL_IF_ERROR (response, err);
612
608
FillStringTensor (
613
- tensor, tensor_offset + element_idx , request_element_cnt - element_idx );
609
+ tensor, tensor_offset + element_cnt , request_element_cnt - element_cnt );
614
610
}
615
611
free (contiguous_buffer);
616
612
return cuda_copy;
0 commit comments