Skip to content

Commit 3375de1

Browse files
r-barnesfacebook-github-bot
authored andcommitted
c10::nullopt -> std::nullopt
Summary: X-link: meta-pytorch/torchrec#2515 X-link: pytorch/executorch#6461 X-link: pytorch/audio#3848 X-link: pytorch/ao#1151 X-link: facebookincubator/AITemplate#1032 Reviewed By: houseroad Differential Revision: D64835967 fbshipit-source-id: 9f9f65335aaf5497680561027ef9314e4b36f8d9
1 parent 2834998 commit 3375de1

20 files changed

+59
-59
lines changed

fbgemm_gpu/codegen/training/backward/embedding_backward_dense_host_cpu.cpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -164,11 +164,11 @@ Tensor split_embedding_codegen_lookup_dense_function(
164164
const std::optional<Tensor>& indice_weights,
165165
const std::optional<Tensor>& feature_requires_grad,
166166
int64_t /* output_dtype = static_cast<int64_t>(SparseType::FP32) */,
167-
const std::optional<Tensor>& /* B_offsets = c10::nullopt */,
167+
const std::optional<Tensor>& /* B_offsets = std::nullopt */,
168168
const std::optional<
169-
Tensor>& /* vbe_output_offsets_feature_rank = c10::nullopt */,
169+
Tensor>& /* vbe_output_offsets_feature_rank = std::nullopt */,
170170
const std::optional<
171-
Tensor>& /* vbe_B_offsets_rank_per_feature = c10::nullopt */,
171+
Tensor>& /* vbe_B_offsets_rank_per_feature = std::nullopt */,
172172
c10::SymInt /* max_B = -1 */,
173173
c10::SymInt /* max_B_feature_rank = -1 */,
174174
c10::SymInt /* vbe_output_size = -1 */) {

fbgemm_gpu/codegen/training/backward/embedding_backward_split_host_template.cpp

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1006,26 +1006,26 @@ Tensor {{ bwd_mdesc }}_embedding_codegen_lookup_{{ optimizer }}_function(
10061006
{{ args.split_function_args | join(", ") }},
10071007
{%- endif %}
10081008
const int64_t output_dtype = static_cast<int64_t>(SparseType::FP32),
1009-
const std::optional<Tensor>& B_offsets = c10::nullopt,
1010-
const std::optional<Tensor>& vbe_output_offsets_feature_rank = c10::nullopt,
1011-
const std::optional<Tensor>& vbe_B_offsets_rank_per_feature = c10::nullopt,
1009+
const std::optional<Tensor>& B_offsets = std::nullopt,
1010+
const std::optional<Tensor>& vbe_output_offsets_feature_rank = std::nullopt,
1011+
const std::optional<Tensor>& vbe_B_offsets_rank_per_feature = std::nullopt,
10121012
const c10::SymInt max_B = -1,
10131013
const c10::SymInt max_B_feature_rank = -1,
10141014
{%- if not dense %}
10151015
const c10::SymInt vbe_output_size = -1,
10161016
const bool is_experimental_tbe = false, // formerly named is_experimental
10171017
const bool use_uniq_cache_locations_bwd = false,
10181018
const bool use_homogeneous_placements = false,
1019-
const std::optional<Tensor>& uvm_cache_stats = c10::nullopt,
1019+
const std::optional<Tensor>& uvm_cache_stats = std::nullopt,
10201020
{%- if "prev_iter_dev" not in args.split_function_arg_names %}
1021-
const std::optional<Tensor>& prev_iter_dev = c10::nullopt,
1021+
const std::optional<Tensor>& prev_iter_dev = std::nullopt,
10221022
{%- endif %}
10231023
{%- if "iter" not in args.split_function_arg_names %}
10241024
const int64_t iter = 0,
10251025
{%- endif %}
10261026
const bool apply_global_weight_decay = false,
10271027
{%- if ssd %}
1028-
const std::optional<at::TensorList>& ssd_tensors = c10::nullopt,
1028+
const std::optional<at::TensorList>& ssd_tensors = std::nullopt,
10291029
{%- endif %}
10301030
const double gwd_lower_bound = 0
10311031
{%- else %}

fbgemm_gpu/codegen/training/pt2/embedding_split_host_pt2_autograd_template.cpp

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -987,25 +987,25 @@ Tensor {{ bwd_mdesc }}_embedding_codegen_lookup_{{ optimizer }}_function_pt2(
987987
{%- endif %}
988988
{{ args_pt2.unified_pt2.split_function_args | join(", ") }},
989989
const int64_t output_dtype = static_cast<int64_t>(SparseType::FP32),
990-
const std::optional<Tensor>& B_offsets = c10::nullopt,
991-
const std::optional<Tensor>& vbe_output_offsets_feature_rank = c10::nullopt,
992-
const std::optional<Tensor>& vbe_B_offsets_rank_per_feature = c10::nullopt,
990+
const std::optional<Tensor>& B_offsets = std::nullopt,
991+
const std::optional<Tensor>& vbe_output_offsets_feature_rank = std::nullopt,
992+
const std::optional<Tensor>& vbe_B_offsets_rank_per_feature = std::nullopt,
993993
const c10::SymInt max_B = -1,
994994
const c10::SymInt max_B_feature_rank = -1,
995995
const c10::SymInt vbe_output_size = -1,
996996
const bool is_experimental_tbe = false, // formerly named is_experimental
997997
const bool use_uniq_cache_locations_bwd = false,
998998
const bool use_homogeneous_placements = false,
999-
const std::optional<Tensor>& uvm_cache_stats = c10::nullopt,
999+
const std::optional<Tensor>& uvm_cache_stats = std::nullopt,
10001000
{%- if "prev_iter_dev" not in args_pt2.split_function_arg_names %}
1001-
const std::optional<Tensor>& prev_iter_dev = c10::nullopt,
1001+
const std::optional<Tensor>& prev_iter_dev = std::nullopt,
10021002
{%- endif %}
10031003
{%- if "iter" not in args_pt2.split_function_arg_names %}
10041004
const int64_t iter = 0,
10051005
{%- endif %}
10061006
const bool apply_global_weight_decay = false,
10071007
{%- if ssd %}
1008-
const std::optional<at::TensorList>& ssd_tensors = c10::nullopt,
1008+
const std::optional<at::TensorList>& ssd_tensors = std::nullopt,
10091009
{%- endif %}
10101010
const double gwd_lower_bound = 0
10111011
) {

fbgemm_gpu/experimental/gen_ai/src/gemm/ck_extensions.hip

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -211,7 +211,7 @@ at::Tensor dispatch_bf16_gemm(at::Tensor A, at::Tensor B, std::optional<at::Tens
211211
}
212212
}
213213

214-
at::Tensor bf16_gemm(at::Tensor A, at::Tensor B, std::optional<at::Tensor> bias = c10::nullopt) {
214+
at::Tensor bf16_gemm(at::Tensor A, at::Tensor B, std::optional<at::Tensor> bias = std::nullopt) {
215215
TORCH_CHECK(
216216
A.dtype() == at::kBFloat16 && B.dtype() == at::kBFloat16,
217217
"Inputs must be bfloat16.");

fbgemm_gpu/experimental/gen_ai/src/gemm/gemm.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ namespace fbgemm_gpu {
1414
at::Tensor bf16_gemm(
1515
at::Tensor A,
1616
at::Tensor B,
17-
std::optional<at::Tensor> bias = c10::nullopt);
17+
std::optional<at::Tensor> bias = std::nullopt);
1818

1919
TORCH_LIBRARY_FRAGMENT(fbgemm, m) {
2020
#ifdef USE_ROCM
@@ -31,7 +31,7 @@ TORCH_LIBRARY_IMPL(fbgemm, CUDA, m) {
3131
at::Tensor bf16_gemm_meta(
3232
at::Tensor A,
3333
at::Tensor B,
34-
std::optional<at::Tensor> /* bias */ = c10::nullopt) {
34+
std::optional<at::Tensor> /* bias */ = std::nullopt) {
3535
const at::SymInt M = A.sym_size(0);
3636
const at::SymInt N = B.sym_size(0);
3737
auto C = at::empty_symint({M, N}, A.options().dtype(at::kBFloat16));

fbgemm_gpu/experimental/gen_ai/src/quantize/ck_extensions/fp8_rowwise_gemm.hip

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -257,7 +257,7 @@ at::Tensor f8f8bf16_rowwise(
257257
at::Tensor w_scale,
258258
std::optional<at::Tensor> bias,
259259
bool use_fast_accum,
260-
std::optional<at::Tensor> output = c10::nullopt) {
260+
std::optional<at::Tensor> output = std::nullopt) {
261261
// Check that input datatypes are valid.
262262
TORCH_CHECK(
263263
(XQ.dtype() == at::kFloat8_e4m3fnuz) &&

fbgemm_gpu/experimental/gen_ai/src/quantize/cutlass_extensions/f8f8bf16_cublas.cu

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -20,10 +20,10 @@ namespace fbgemm_gpu {
2020
at::Tensor f8f8bf16_cublas(
2121
at::Tensor A, // FP8
2222
at::Tensor B, // FP8
23-
std::optional<at::Tensor> Ainvs = c10::nullopt,
24-
std::optional<at::Tensor> Binvs = c10::nullopt,
23+
std::optional<at::Tensor> Ainvs = std::nullopt,
24+
std::optional<at::Tensor> Binvs = std::nullopt,
2525
bool use_fast_accum = true,
26-
std::optional<at::Tensor> output = c10::nullopt) {
26+
std::optional<at::Tensor> output = std::nullopt) {
2727
auto m = A.size(0);
2828
auto n = B.size(0);
2929
auto k = A.size(1);
@@ -167,10 +167,10 @@ at::Tensor f8f8bf16_cublas(
167167
at::Tensor f8f8bf16_cublas(
168168
at::Tensor A, // FP8
169169
at::Tensor B, // FP8
170-
std::optional<at::Tensor> Ainvs = c10::nullopt,
171-
std::optional<at::Tensor> Binvs = c10::nullopt,
170+
std::optional<at::Tensor> Ainvs = std::nullopt,
171+
std::optional<at::Tensor> Binvs = std::nullopt,
172172
bool use_fast_accum = true,
173-
std::optional<at::Tensor> output = c10::nullopt) {
173+
std::optional<at::Tensor> output = std::nullopt) {
174174
throw std::runtime_error(
175175
"CUDA version is older than 12.0"); // requires CUDA>=12
176176
}

fbgemm_gpu/experimental/gen_ai/src/quantize/cutlass_extensions/f8f8bf16_rowwise.cu

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -361,9 +361,9 @@ at::Tensor f8f8bf16_rowwise(
361361
at::Tensor WQ, // FP8
362362
at::Tensor x_scale, // FP32
363363
at::Tensor w_scale, // FP32
364-
std::optional<at::Tensor> bias = c10::nullopt,
364+
std::optional<at::Tensor> bias = std::nullopt,
365365
bool use_fast_accum = true,
366-
std::optional<at::Tensor> output = c10::nullopt) {
366+
std::optional<at::Tensor> output = std::nullopt) {
367367
// Check datatypes.
368368
TORCH_CHECK(
369369
x_scale.dtype() == at::kFloat && w_scale.dtype() == at::kFloat,
@@ -482,9 +482,9 @@ at::Tensor f8f8bf16_rowwise(
482482
at::Tensor WQ, // FP8
483483
at::Tensor x_scale,
484484
at::Tensor w_scale,
485-
std::optional<at::Tensor> bias = c10::nullopt,
485+
std::optional<at::Tensor> bias = std::nullopt,
486486
bool use_fast_accum = true,
487-
std::optional<at::Tensor> output = c10::nullopt) {
487+
std::optional<at::Tensor> output = std::nullopt) {
488488
throw std::runtime_error(
489489
"CUDA version is older than 12.0"); // requires CUDA>=12
490490
}

fbgemm_gpu/experimental/gen_ai/src/quantize/cutlass_extensions/f8f8bf16_rowwise_batched.cu

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -599,9 +599,9 @@ at::Tensor f8f8bf16_rowwise_batched(
599599
at::Tensor WQ, // FP8
600600
at::Tensor x_scale, // FP32
601601
at::Tensor w_scale, // FP32
602-
std::optional<at::Tensor> bias = c10::nullopt,
602+
std::optional<at::Tensor> bias = std::nullopt,
603603
bool use_fast_accum = true,
604-
std::optional<at::Tensor> output = c10::nullopt) {
604+
std::optional<at::Tensor> output = std::nullopt) {
605605
// Check datatypes.
606606
TORCH_CHECK(
607607
x_scale.dtype() == at::kFloat && w_scale.dtype() == at::kFloat,
@@ -720,9 +720,9 @@ at::Tensor f8f8bf16_rowwise_batched(
720720
at::Tensor WQ, // FP8
721721
at::Tensor x_scale,
722722
at::Tensor w_scale,
723-
std::optional<at::Tensor> bias = c10::nullopt,
723+
std::optional<at::Tensor> bias = std::nullopt,
724724
bool use_fast_accum = true,
725-
std::optional<at::Tensor> output = c10::nullopt) {
725+
std::optional<at::Tensor> output = std::nullopt) {
726726
throw std::runtime_error(
727727
"CUDA version is older than 12.0"); // requires CUDA>=12
728728
}

fbgemm_gpu/experimental/gen_ai/src/quantize/quantize.cpp

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -59,17 +59,17 @@ at::Tensor f8f8bf16_rowwise(
5959
at::Tensor WQ,
6060
at::Tensor x_scale,
6161
at::Tensor w_scale,
62-
std::optional<at::Tensor> bias = c10::nullopt,
62+
std::optional<at::Tensor> bias = std::nullopt,
6363
bool use_fast_accum = true,
64-
std::optional<at::Tensor> output = c10::nullopt);
64+
std::optional<at::Tensor> output = std::nullopt);
6565
at::Tensor f8f8bf16_rowwise_batched(
6666
at::Tensor XQ,
6767
at::Tensor WQ,
6868
at::Tensor x_scale,
6969
at::Tensor w_scale,
70-
std::optional<at::Tensor> bias = c10::nullopt,
70+
std::optional<at::Tensor> bias = std::nullopt,
7171
bool use_fast_accum = true,
72-
std::optional<at::Tensor> output = c10::nullopt);
72+
std::optional<at::Tensor> output = std::nullopt);
7373
at::Tensor f8f8bf16_blockwise(
7474
at::Tensor XQ,
7575
at::Tensor WQ,
@@ -81,10 +81,10 @@ at::Tensor f8f8bf16_blockwise(
8181
at::Tensor f8f8bf16_cublas(
8282
at::Tensor A,
8383
at::Tensor B,
84-
std::optional<at::Tensor> Ainvs = c10::nullopt,
85-
std::optional<at::Tensor> Binvs = c10::nullopt,
84+
std::optional<at::Tensor> Ainvs = std::nullopt,
85+
std::optional<at::Tensor> Binvs = std::nullopt,
8686
bool use_fast_accum = true,
87-
std::optional<at::Tensor> output = c10::nullopt);
87+
std::optional<at::Tensor> output = std::nullopt);
8888
at::Tensor f8i4bf16_rowwise(
8989
at::Tensor XQ,
9090
at::Tensor WQ,

0 commit comments

Comments
 (0)