@@ -36,7 +36,7 @@ Tensor& dequantize_per_tensor_out(
3636 int64_t quant_min,
3737 int64_t quant_max,
3838 ScalarType dtype,
39- executorch::aten ::optional<ScalarType> out_dtype,
39+ std ::optional<ScalarType> out_dtype,
4040 Tensor& out);
4141
4242Tensor& dequantize_per_token_out (
@@ -57,7 +57,7 @@ Tensor& dequantize_per_channel_out(
5757 int64_t quant_min,
5858 int64_t quant_max,
5959 ScalarType dtype,
60- executorch::aten ::optional<ScalarType> out_dtype,
60+ std ::optional<ScalarType> out_dtype,
6161 Tensor& out);
6262
6363Tensor& dequantize_per_tensor_tensor_args_out (
@@ -67,7 +67,7 @@ Tensor& dequantize_per_tensor_tensor_args_out(
6767 int64_t quant_min,
6868 int64_t quant_max,
6969 ScalarType dtype,
70- executorch::aten ::optional<ScalarType> out_dtype,
70+ std ::optional<ScalarType> out_dtype,
7171 Tensor& out);
7272
7373// Wrapper function for dequantize_per_tensor_out without context
@@ -78,7 +78,7 @@ Tensor& dequantize_per_tensor_out_no_context(
7878 int64_t quant_min,
7979 int64_t quant_max,
8080 ScalarType dtype,
81- executorch::aten ::optional<ScalarType> out_dtype,
81+ std ::optional<ScalarType> out_dtype,
8282 Tensor& out) {
8383 return torch::executor::native::dequantize_per_tensor_out (
8484 input, scale, zero_point, quant_min, quant_max, dtype, out_dtype, out);
@@ -107,7 +107,7 @@ Tensor& dequantize_per_channel_out_no_context(
107107 int64_t quant_min,
108108 int64_t quant_max,
109109 ScalarType dtype,
110- executorch::aten ::optional<ScalarType> out_dtype,
110+ std ::optional<ScalarType> out_dtype,
111111 Tensor& out) {
112112 return torch::executor::native::dequantize_per_channel_out (
113113 input,
@@ -129,7 +129,7 @@ Tensor& dequantize_per_tensor_tensor_args_out_no_context(
129129 int64_t quant_min,
130130 int64_t quant_max,
131131 ScalarType dtype,
132- executorch::aten ::optional<ScalarType> out_dtype,
132+ std ::optional<ScalarType> out_dtype,
133133 Tensor& out) {
134134 return torch::executor::native::dequantize_per_tensor_tensor_args_out (
135135 input, scale, zero_point, quant_min, quant_max, dtype, out_dtype, out);
@@ -149,7 +149,7 @@ at::Tensor dequantize_per_tensor_aten(
149149 ScalarType et_dtype = at_scalartype_to_et_scalartype (dtype);
150150 ScalarType et_out_dtype = at_scalartype_to_et_scalartype (out_dtype);
151151
152- executorch::aten ::optional<ScalarType> opt_et_out_dtype (et_out_dtype);
152+ std ::optional<ScalarType> opt_et_out_dtype (et_out_dtype);
153153
154154 WRAP_TO_ATEN (dequantize_per_tensor_out_no_context, 7 )
155155 (input,
@@ -204,7 +204,7 @@ at::Tensor dequantize_per_channel_aten(
204204 ScalarType et_dtype = at_scalartype_to_et_scalartype (dtype);
205205 ScalarType et_out_dtype = at_scalartype_to_et_scalartype (out_dtype);
206206
207- executorch::aten ::optional<ScalarType> opt_et_out_dtype (et_out_dtype);
207+ std ::optional<ScalarType> opt_et_out_dtype (et_out_dtype);
208208
209209 WRAP_TO_ATEN (dequantize_per_channel_out_no_context, 8 )
210210 (input,
@@ -233,7 +233,7 @@ at::Tensor dequantize_per_tensor_tensor_args_aten(
233233 ScalarType et_dtype = at_scalartype_to_et_scalartype (dtype);
234234 ScalarType et_out_dtype = at_scalartype_to_et_scalartype (out_dtype);
235235
236- executorch::aten ::optional<ScalarType> opt_et_out_dtype (et_out_dtype);
236+ std ::optional<ScalarType> opt_et_out_dtype (et_out_dtype);
237237
238238 WRAP_TO_ATEN (dequantize_per_tensor_tensor_args_out_no_context, 7 )
239239 (input,
0 commit comments