Skip to content

Commit 3f4f500

Browse files
authored
Use std::optional (#12707)
### Summary Move remaining usage of optional to `std::optional`. --------- Signed-off-by: cyy <[email protected]> Signed-off-by: Yuanyuan Chen <[email protected]>
1 parent 72d8082 commit 3f4f500

File tree

81 files changed

+519
-495
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

81 files changed

+519
-495
lines changed

backends/cadence/hifi/operators/op_mean.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,8 +17,8 @@ using executorch::aten::RuntimeContext;
1717
using executorch::aten::ScalarType;
1818
using executorch::aten::Tensor;
1919
using executorch::runtime::ArrayRef;
20+
using std::optional;
2021
using torch::executor::Error;
21-
using torch::executor::optional;
2222

2323
namespace impl {
2424
namespace HiFi {

backends/cadence/hifi/operators/op_quantized_matmul_out.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@ void inline _typed_quantized_matmul(
2929
int64_t X_zero_point,
3030
const Tensor& Y,
3131
int64_t Y_zero_point,
32-
const exec_aten::optional<Tensor>& bias,
32+
const std::optional<Tensor>& bias,
3333
int64_t out_multiplier,
3434
int64_t out_shift,
3535
int64_t out_zero_point,
@@ -182,7 +182,7 @@ void quantized_matmul_out(
182182
int64_t X_zero_point,
183183
const Tensor& Y,
184184
int64_t Y_zero_point,
185-
const exec_aten::optional<Tensor>& bias,
185+
const std::optional<Tensor>& bias,
186186
int64_t out_multiplier,
187187
int64_t out_shift,
188188
int64_t out_zero_point,

backends/cadence/hifi/operators/op_slice_copy.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -29,8 +29,8 @@ Tensor& slice_copy_Tensor_out(
2929
KernelRuntimeContext& ctx,
3030
const Tensor& in,
3131
int64_t dim,
32-
exec_aten::optional<int64_t> start_val,
33-
exec_aten::optional<int64_t> end_val,
32+
std::optional<int64_t> start_val,
33+
std::optional<int64_t> end_val,
3434
int64_t step,
3535
Tensor& out) {
3636
(void)ctx;

backends/cadence/hifi/operators/operators.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -67,7 +67,7 @@ void quantized_linear_out(
6767
const ::executorch::aten::Tensor& out_multiplier,
6868
const ::executorch::aten::Tensor& out_shift,
6969
int64_t out_zero_point,
70-
const ::executorch::aten::optional<::executorch::aten::Tensor>& offset,
70+
const ::std::optional<::executorch::aten::Tensor>& offset,
7171
::executorch::aten::Tensor& out);
7272

7373
void quantized_linear_per_tensor_out(
@@ -80,7 +80,7 @@ void quantized_linear_per_tensor_out(
8080
int64_t out_multiplier,
8181
int64_t out_shift,
8282
int64_t out_zero_point,
83-
const ::executorch::aten::optional<::executorch::aten::Tensor>& offset,
83+
const ::std::optional<::executorch::aten::Tensor>& offset,
8484
::executorch::aten::Tensor& out);
8585

8686
void quantized_conv2d_nhwc_out(

backends/cortex_m/ops/cmsis_scratch_buffer_context.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,7 @@ class CMSISScratchBufferContext final {
5050
Tensor& scratch_buffer,
5151
const Tensor& weights,
5252
const Tensor& weight_zero_point,
53-
const torch::executor::optional<Tensor>& bias)
53+
const ::std::optional<Tensor>& bias)
5454
: scratch_ptr_(scratch_buffer.mutable_data_ptr<int8_t>()),
5555
total_size_(scratch_buffer.size(0)),
5656
base_ptr_(reinterpret_cast<uint8_t*>(scratch_ptr_)),

backends/cortex_m/ops/op_quantized_linear.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ Tensor& quantized_linear_out(
2727
const Tensor& weight_zero_point,
2828
const Tensor& weight_multiplier,
2929
const Tensor& weight_shift,
30-
const torch::executor::optional<Tensor>& bias,
30+
const ::std::optional<Tensor>& bias,
3131
const Tensor& bias_multiplier,
3232
const Tensor& bias_shift,
3333
const Tensor& scratch_buffer,
@@ -155,7 +155,7 @@ Tensor quantized_linear(
155155
const Tensor& weight_zero_point,
156156
const Tensor& weight_multiplier,
157157
const Tensor& weight_shift,
158-
const torch::executor::optional<Tensor>& bias,
158+
const ::std::optional<Tensor>& bias,
159159
const Tensor& bias_multiplier,
160160
const Tensor& bias_shift,
161161
const Tensor& scratch_buffer,

backends/vulkan/test/op_tests/dequantize_test.cpp

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ Tensor& dequantize_per_tensor_out(
3636
int64_t quant_min,
3737
int64_t quant_max,
3838
ScalarType dtype,
39-
executorch::aten::optional<ScalarType> out_dtype,
39+
std::optional<ScalarType> out_dtype,
4040
Tensor& out);
4141

4242
Tensor& dequantize_per_token_out(
@@ -57,7 +57,7 @@ Tensor& dequantize_per_channel_out(
5757
int64_t quant_min,
5858
int64_t quant_max,
5959
ScalarType dtype,
60-
executorch::aten::optional<ScalarType> out_dtype,
60+
std::optional<ScalarType> out_dtype,
6161
Tensor& out);
6262

6363
Tensor& dequantize_per_tensor_tensor_args_out(
@@ -67,7 +67,7 @@ Tensor& dequantize_per_tensor_tensor_args_out(
6767
int64_t quant_min,
6868
int64_t quant_max,
6969
ScalarType dtype,
70-
executorch::aten::optional<ScalarType> out_dtype,
70+
std::optional<ScalarType> out_dtype,
7171
Tensor& out);
7272

7373
// Wrapper function for dequantize_per_tensor_out without context
@@ -78,7 +78,7 @@ Tensor& dequantize_per_tensor_out_no_context(
7878
int64_t quant_min,
7979
int64_t quant_max,
8080
ScalarType dtype,
81-
executorch::aten::optional<ScalarType> out_dtype,
81+
std::optional<ScalarType> out_dtype,
8282
Tensor& out) {
8383
return torch::executor::native::dequantize_per_tensor_out(
8484
input, scale, zero_point, quant_min, quant_max, dtype, out_dtype, out);
@@ -107,7 +107,7 @@ Tensor& dequantize_per_channel_out_no_context(
107107
int64_t quant_min,
108108
int64_t quant_max,
109109
ScalarType dtype,
110-
executorch::aten::optional<ScalarType> out_dtype,
110+
std::optional<ScalarType> out_dtype,
111111
Tensor& out) {
112112
return torch::executor::native::dequantize_per_channel_out(
113113
input,
@@ -129,7 +129,7 @@ Tensor& dequantize_per_tensor_tensor_args_out_no_context(
129129
int64_t quant_min,
130130
int64_t quant_max,
131131
ScalarType dtype,
132-
executorch::aten::optional<ScalarType> out_dtype,
132+
std::optional<ScalarType> out_dtype,
133133
Tensor& out) {
134134
return torch::executor::native::dequantize_per_tensor_tensor_args_out(
135135
input, scale, zero_point, quant_min, quant_max, dtype, out_dtype, out);
@@ -149,7 +149,7 @@ at::Tensor dequantize_per_tensor_aten(
149149
ScalarType et_dtype = at_scalartype_to_et_scalartype(dtype);
150150
ScalarType et_out_dtype = at_scalartype_to_et_scalartype(out_dtype);
151151

152-
executorch::aten::optional<ScalarType> opt_et_out_dtype(et_out_dtype);
152+
std::optional<ScalarType> opt_et_out_dtype(et_out_dtype);
153153

154154
WRAP_TO_ATEN(dequantize_per_tensor_out_no_context, 7)
155155
(input,
@@ -204,7 +204,7 @@ at::Tensor dequantize_per_channel_aten(
204204
ScalarType et_dtype = at_scalartype_to_et_scalartype(dtype);
205205
ScalarType et_out_dtype = at_scalartype_to_et_scalartype(out_dtype);
206206

207-
executorch::aten::optional<ScalarType> opt_et_out_dtype(et_out_dtype);
207+
std::optional<ScalarType> opt_et_out_dtype(et_out_dtype);
208208

209209
WRAP_TO_ATEN(dequantize_per_channel_out_no_context, 8)
210210
(input,
@@ -233,7 +233,7 @@ at::Tensor dequantize_per_tensor_tensor_args_aten(
233233
ScalarType et_dtype = at_scalartype_to_et_scalartype(dtype);
234234
ScalarType et_out_dtype = at_scalartype_to_et_scalartype(out_dtype);
235235

236-
executorch::aten::optional<ScalarType> opt_et_out_dtype(et_out_dtype);
236+
std::optional<ScalarType> opt_et_out_dtype(et_out_dtype);
237237

238238
WRAP_TO_ATEN(dequantize_per_tensor_tensor_args_out_no_context, 7)
239239
(input,

backends/vulkan/test/op_tests/quantize_affine_test.cpp

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -35,11 +35,11 @@ at::Tensor quantize_affine_reference_impl(
3535
const at::Tensor& input_,
3636
const std::vector<int64_t>& block_size,
3737
const at::Tensor& scale,
38-
const c10::optional<at::Tensor>& zero_point_opt,
38+
const std::optional<at::Tensor>& zero_point_opt,
3939
int64_t quant_min,
4040
int64_t quant_max,
4141
at::ScalarType out_dtype,
42-
c10::optional<std::string> zero_point_domain_opt = std::string("INT")) {
42+
std::optional<std::string> zero_point_domain_opt = std::string("INT")) {
4343
constexpr float kEps = 1e-7f;
4444

4545
const int64_t ndim = input_.dim();
@@ -138,11 +138,11 @@ at::Tensor dequantize_affine_reference_impl(
138138
const at::Tensor& input_,
139139
const std::vector<int64_t>& block_size,
140140
const at::Tensor& scale,
141-
const c10::optional<at::Tensor>& zero_point_opt,
141+
const std::optional<at::Tensor>& zero_point_opt,
142142
int64_t quant_min,
143143
int64_t quant_max,
144144
at::ScalarType out_dtype,
145-
c10::optional<std::string> zero_point_domain_opt = std::string("INT")) {
145+
std::optional<std::string> zero_point_domain_opt = std::string("INT")) {
146146
const int64_t ndim = input_.dim();
147147
_check_dims("input", block_size.size(), ndim);
148148

@@ -252,7 +252,7 @@ at::Tensor quantize_affine_reference_impl(
252252
input,
253253
block_size,
254254
scale,
255-
c10::optional<at::Tensor>(zero_point),
255+
std::optional<at::Tensor>(zero_point),
256256
quant_min,
257257
quant_max,
258258
dtype,
@@ -272,7 +272,7 @@ at::Tensor dequantize_affine_reference_impl(
272272
input,
273273
block_size,
274274
scale,
275-
c10::optional<at::Tensor>(zero_point),
275+
std::optional<at::Tensor>(zero_point),
276276
quant_min,
277277
quant_max,
278278
dtype,
@@ -1373,4 +1373,4 @@ TEST(VulkanChooseQParamsAffineTest, test_symmetric_no_clipping_narrow_range) {
13731373
10, // quant_max (narrow range)
13741374
1e-5, // eps
13751375
at::kFloat); // input dtype
1376-
}
1376+
}

codegen/api/et_cpp.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -243,7 +243,7 @@ def return_names(f: NativeFunction, *, fallback_name: str = "result") -> Sequenc
243243
JIT_TO_CPP_DEFAULT = {
244244
"False": "false",
245245
"True": "true",
246-
"None": "torch::execustd::nullopt", # UGH this one is type directed
246+
"None": "std::nullopt", # UGH this one is type directed
247247
"[]": "{}",
248248
"contiguous_format": "torch::executorch::MemoryFormat::Contiguous",
249249
"long": "torch::executorch::kLong",
@@ -278,7 +278,7 @@ def default_expr(d: str, t: Type) -> str:
278278

279279
if isinstance(t, OptionalType):
280280
if d == "None":
281-
return "torch::executor::nullopt"
281+
return "std::nullopt"
282282

283283
return default_expr(d, t.elem)
284284

codegen/api/types/types.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,7 @@ class OptionalCType(CType):
5959

6060
def cpp_type(self, *, strip_ref: bool = False) -> str:
6161
# Do not pass `strip_ref` recursively.
62-
return f"torch::executor::optional<{self.elem.cpp_type()}>"
62+
return f"std::optional<{self.elem.cpp_type()}>"
6363

6464
def remove_const_ref(self) -> CType:
6565
return OptionalCType(self.elem.remove_const_ref())

0 commit comments

Comments
 (0)