Skip to content

Commit a5491a5

Browse files
committed
refactor(elementwise): remove unrelated comments
Signed-off-by: Naren Dasan <[email protected]> Signed-off-by: Naren Dasan <[email protected]>
1 parent 605b549 commit a5491a5

File tree

1 file changed

+0
-13
lines changed

1 file changed

+0
-13
lines changed

core/conversion/converters/impl/element_wise.cpp

Lines changed: 0 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -262,7 +262,6 @@ auto element_wise_registrations TRTORCH_UNUSED =
262262
}})
263263
.pattern({"aten::ne.Tensor(Tensor self, Tensor other) -> (Tensor)",
264264
[](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
265-
// TODO: Remove with functionalization
266265
auto self = args[0].ITensorOrFreeze(ctx);
267266
auto other = args[1].ITensorOrFreeze(ctx);
268267
auto equal = add_elementwise(
@@ -326,7 +325,6 @@ auto element_wise_registrations TRTORCH_UNUSED =
326325
}})
327326
.pattern({"aten::pow.Tensor_Tensor(Tensor self, Tensor exponent) -> (Tensor)",
328327
[](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
329-
// TODO: Remove with functionalization
330328
auto self = args[0].ITensorOrFreeze(ctx);
331329
auto exponent = args[1].ITensorOrFreeze(ctx);
332330
auto pow =
@@ -341,7 +339,6 @@ auto element_wise_registrations TRTORCH_UNUSED =
341339
}})
342340
.pattern({"aten::pow.Tensor_Scalar(Tensor self, Scalar exponent) -> (Tensor)",
343341
[](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
344-
// TODO: Remove with functionalization
345342
auto self = args[0].ITensorOrFreeze(ctx);
346343
auto exponentScalar = args[1].unwrapToScalar().to<float>();
347344
auto exponent = tensor_to_const(ctx, torch::tensor({exponentScalar}));
@@ -357,7 +354,6 @@ auto element_wise_registrations TRTORCH_UNUSED =
357354
}})
358355
.pattern({"aten::gt.Tensor(Tensor self, Tensor other) -> (Tensor)",
359356
[](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
360-
// TODO: Remove with functionalization
361357
auto self = args[0].ITensorOrFreeze(ctx);
362358
auto other = args[1].ITensorOrFreeze(ctx);
363359
auto gt =
@@ -371,7 +367,6 @@ auto element_wise_registrations TRTORCH_UNUSED =
371367
}})
372368
.pattern({"aten::gt.Scalar(Tensor self, Scalar other) -> (Tensor)",
373369
[](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
374-
// TODO: Remove with functionalization
375370
auto self = args[0].ITensorOrFreeze(ctx);
376371
auto otherScalar = args[1].unwrapToScalar().to<float>();
377372
auto other = tensor_to_const(ctx, torch::tensor({otherScalar}));
@@ -386,7 +381,6 @@ auto element_wise_registrations TRTORCH_UNUSED =
386381
}})
387382
.pattern({"aten::lt.Tensor(Tensor self, Tensor other) -> (Tensor)",
388383
[](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
389-
// TODO: Remove with functionalization
390384
auto self = args[0].ITensorOrFreeze(ctx);
391385
auto other = args[1].ITensorOrFreeze(ctx);
392386
auto lt =
@@ -400,7 +394,6 @@ auto element_wise_registrations TRTORCH_UNUSED =
400394
}})
401395
.pattern({"aten::lt.Scalar(Tensor self, Scalar other) -> (Tensor)",
402396
[](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
403-
// TODO: Remove with functionalization
404397
auto self = args[0].ITensorOrFreeze(ctx);
405398
auto otherScalar = args[1].unwrapToScalar().to<float>();
406399
auto other = tensor_to_const(ctx, torch::tensor({otherScalar}));
@@ -415,7 +408,6 @@ auto element_wise_registrations TRTORCH_UNUSED =
415408
}})
416409
.pattern({"aten::eq.Tensor(Tensor self, Tensor other) -> (Tensor)",
417410
[](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
418-
// TODO: Remove with functionalization
419411
auto self = args[0].ITensorOrFreeze(ctx);
420412
auto other = args[1].ITensorOrFreeze(ctx);
421413
auto eq =
@@ -429,7 +421,6 @@ auto element_wise_registrations TRTORCH_UNUSED =
429421
}})
430422
.pattern({"aten::eq.Scalar(Tensor self, Scalar other) -> (Tensor)",
431423
[](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
432-
// TODO: Remove with functionalization
433424
auto self = args[0].ITensorOrFreeze(ctx);
434425
auto otherScalar = args[1].unwrapToScalar().to<float>();
435426
auto other = tensor_to_const(ctx, torch::tensor({otherScalar}));
@@ -444,7 +435,6 @@ auto element_wise_registrations TRTORCH_UNUSED =
444435
}})
445436
.pattern({"aten::ge.Tensor(Tensor self, Tensor other) -> (Tensor)",
446437
[](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
447-
// TODO: Remove with functionalization
448438
auto self = args[0].ITensorOrFreeze(ctx);
449439
auto other = args[1].ITensorOrFreeze(ctx);
450440

@@ -468,7 +458,6 @@ auto element_wise_registrations TRTORCH_UNUSED =
468458
}})
469459
.pattern({"aten::ge.Scalar(Tensor self, Scalar other) -> (Tensor)",
470460
[](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
471-
// TODO: Remove with functionalization
472461
auto self = args[0].ITensorOrFreeze(ctx);
473462
auto otherScalar = args[1].unwrapToScalar().to<float>();
474463
auto other = tensor_to_const(ctx, torch::tensor({otherScalar}));
@@ -493,7 +482,6 @@ auto element_wise_registrations TRTORCH_UNUSED =
493482
}})
494483
.pattern({"aten::le.Tensor(Tensor self, Tensor other) -> (Tensor)",
495484
[](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
496-
// TODO: Remove with functionalization
497485
auto self = args[0].ITensorOrFreeze(ctx);
498486
auto other = args[1].ITensorOrFreeze(ctx);
499487

@@ -517,7 +505,6 @@ auto element_wise_registrations TRTORCH_UNUSED =
517505
}})
518506
.pattern({"aten::le.Scalar(Tensor self, Scalar other) -> (Tensor)",
519507
[](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
520-
// TODO: Remove with functionalization
521508
auto self = args[0].ITensorOrFreeze(ctx);
522509
auto otherScalar = args[1].unwrapToScalar().to<float>();
523510
auto other = tensor_to_const(ctx, torch::tensor({otherScalar}));

0 commit comments

Comments
 (0)