@@ -262,7 +262,6 @@ auto element_wise_registrations TRTORCH_UNUSED =
262
262
}})
263
263
.pattern({" aten::ne.Tensor(Tensor self, Tensor other) -> (Tensor)" ,
264
264
[](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
265
- // TODO: Remove with functionalization
266
265
auto self = args[0 ].ITensorOrFreeze (ctx);
267
266
auto other = args[1 ].ITensorOrFreeze (ctx);
268
267
auto equal = add_elementwise (
@@ -326,7 +325,6 @@ auto element_wise_registrations TRTORCH_UNUSED =
326
325
}})
327
326
.pattern({" aten::pow.Tensor_Tensor(Tensor self, Tensor exponent) -> (Tensor)" ,
328
327
[](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
329
- // TODO: Remove with functionalization
330
328
auto self = args[0 ].ITensorOrFreeze (ctx);
331
329
auto exponent = args[1 ].ITensorOrFreeze (ctx);
332
330
auto pow =
@@ -341,7 +339,6 @@ auto element_wise_registrations TRTORCH_UNUSED =
341
339
}})
342
340
.pattern({" aten::pow.Tensor_Scalar(Tensor self, Scalar exponent) -> (Tensor)" ,
343
341
[](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
344
- // TODO: Remove with functionalization
345
342
auto self = args[0 ].ITensorOrFreeze (ctx);
346
343
auto exponentScalar = args[1 ].unwrapToScalar ().to <float >();
347
344
auto exponent = tensor_to_const (ctx, torch::tensor ({exponentScalar}));
@@ -357,7 +354,6 @@ auto element_wise_registrations TRTORCH_UNUSED =
357
354
}})
358
355
.pattern({" aten::gt.Tensor(Tensor self, Tensor other) -> (Tensor)" ,
359
356
[](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
360
- // TODO: Remove with functionalization
361
357
auto self = args[0 ].ITensorOrFreeze (ctx);
362
358
auto other = args[1 ].ITensorOrFreeze (ctx);
363
359
auto gt =
@@ -371,7 +367,6 @@ auto element_wise_registrations TRTORCH_UNUSED =
371
367
}})
372
368
.pattern({" aten::gt.Scalar(Tensor self, Scalar other) -> (Tensor)" ,
373
369
[](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
374
- // TODO: Remove with functionalization
375
370
auto self = args[0 ].ITensorOrFreeze (ctx);
376
371
auto otherScalar = args[1 ].unwrapToScalar ().to <float >();
377
372
auto other = tensor_to_const (ctx, torch::tensor ({otherScalar}));
@@ -386,7 +381,6 @@ auto element_wise_registrations TRTORCH_UNUSED =
386
381
}})
387
382
.pattern({" aten::lt.Tensor(Tensor self, Tensor other) -> (Tensor)" ,
388
383
[](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
389
- // TODO: Remove with functionalization
390
384
auto self = args[0 ].ITensorOrFreeze (ctx);
391
385
auto other = args[1 ].ITensorOrFreeze (ctx);
392
386
auto lt =
@@ -400,7 +394,6 @@ auto element_wise_registrations TRTORCH_UNUSED =
400
394
}})
401
395
.pattern({" aten::lt.Scalar(Tensor self, Scalar other) -> (Tensor)" ,
402
396
[](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
403
- // TODO: Remove with functionalization
404
397
auto self = args[0 ].ITensorOrFreeze (ctx);
405
398
auto otherScalar = args[1 ].unwrapToScalar ().to <float >();
406
399
auto other = tensor_to_const (ctx, torch::tensor ({otherScalar}));
@@ -415,7 +408,6 @@ auto element_wise_registrations TRTORCH_UNUSED =
415
408
}})
416
409
.pattern({" aten::eq.Tensor(Tensor self, Tensor other) -> (Tensor)" ,
417
410
[](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
418
- // TODO: Remove with functionalization
419
411
auto self = args[0 ].ITensorOrFreeze (ctx);
420
412
auto other = args[1 ].ITensorOrFreeze (ctx);
421
413
auto eq =
@@ -429,7 +421,6 @@ auto element_wise_registrations TRTORCH_UNUSED =
429
421
}})
430
422
.pattern({" aten::eq.Scalar(Tensor self, Scalar other) -> (Tensor)" ,
431
423
[](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
432
- // TODO: Remove with functionalization
433
424
auto self = args[0 ].ITensorOrFreeze (ctx);
434
425
auto otherScalar = args[1 ].unwrapToScalar ().to <float >();
435
426
auto other = tensor_to_const (ctx, torch::tensor ({otherScalar}));
@@ -444,7 +435,6 @@ auto element_wise_registrations TRTORCH_UNUSED =
444
435
}})
445
436
.pattern({" aten::ge.Tensor(Tensor self, Tensor other) -> (Tensor)" ,
446
437
[](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
447
- // TODO: Remove with functionalization
448
438
auto self = args[0 ].ITensorOrFreeze (ctx);
449
439
auto other = args[1 ].ITensorOrFreeze (ctx);
450
440
@@ -468,7 +458,6 @@ auto element_wise_registrations TRTORCH_UNUSED =
468
458
}})
469
459
.pattern({" aten::ge.Scalar(Tensor self, Scalar other) -> (Tensor)" ,
470
460
[](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
471
- // TODO: Remove with functionalization
472
461
auto self = args[0 ].ITensorOrFreeze (ctx);
473
462
auto otherScalar = args[1 ].unwrapToScalar ().to <float >();
474
463
auto other = tensor_to_const (ctx, torch::tensor ({otherScalar}));
@@ -493,7 +482,6 @@ auto element_wise_registrations TRTORCH_UNUSED =
493
482
}})
494
483
.pattern({" aten::le.Tensor(Tensor self, Tensor other) -> (Tensor)" ,
495
484
[](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
496
- // TODO: Remove with functionalization
497
485
auto self = args[0 ].ITensorOrFreeze (ctx);
498
486
auto other = args[1 ].ITensorOrFreeze (ctx);
499
487
@@ -517,7 +505,6 @@ auto element_wise_registrations TRTORCH_UNUSED =
517
505
}})
518
506
.pattern({" aten::le.Scalar(Tensor self, Scalar other) -> (Tensor)" ,
519
507
[](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
520
- // TODO: Remove with functionalization
521
508
auto self = args[0 ].ITensorOrFreeze (ctx);
522
509
auto otherScalar = args[1 ].unwrapToScalar ().to <float >();
523
510
auto other = tensor_to_const (ctx, torch::tensor ({otherScalar}));
0 commit comments