Skip to content

Commit b9f37cc

Browse files
lucylqfacebook-github-bot
authored andcommitted
Remove redundant prim ops (#160)
Summary: Pull Request resolved: #160 D47965096 added a bunch of prim ops that can operate on Scalars, hence we should deprecate the ones only work on specific dtype (e.g., int). Reviewed By: larryliu0820 Differential Revision: D48745658 fbshipit-source-id: 346c3f42b0c78af88e94d11c61ec6cbdcc01055c
1 parent 7ce2369 commit b9f37cc

File tree

1 file changed

+0
-90
lines changed

1 file changed

+0
-90
lines changed

kernels/prim_ops/register_prim_ops.cpp

Lines changed: 0 additions & 90 deletions
Original file line numberDiff line numberDiff line change
@@ -217,41 +217,6 @@ static Kernel prim_ops[] = {
217217
}
218218
}),
219219

220-
// TODO(T159977211): wait a little bit so older models with these ops are
221-
// regenerated and then delete them
222-
// executorch_prim::add.int(int, int) -> int
223-
Kernel(
224-
"executorch_prim::add.int",
225-
[](RuntimeContext& context, EValue** stack) {
226-
(void)context;
227-
EValue& a = *stack[0];
228-
EValue& b = *stack[1];
229-
EValue& out = *stack[2];
230-
out = EValue(a.toInt() + b.toInt());
231-
}),
232-
233-
// executorch_prim::sub.int(int, int) -> int
234-
Kernel(
235-
"executorch_prim::sub.int",
236-
[](RuntimeContext& context, EValue** stack) {
237-
(void)context;
238-
EValue& a = *stack[0];
239-
EValue& b = *stack[1];
240-
EValue& out = *stack[2];
241-
out = EValue(a.toInt() - b.toInt());
242-
}),
243-
244-
// executorch_prim::mul.int(int, int) -> int
245-
Kernel(
246-
"executorch_prim::mul.int",
247-
[](RuntimeContext& context, EValue** stack) {
248-
(void)context;
249-
EValue& a = *stack[0];
250-
EValue& b = *stack[1];
251-
EValue& out = *stack[2];
252-
out = EValue(a.toInt() * b.toInt());
253-
}),
254-
255220
// executorch_prim::floordiv.int(int, int) -> int
256221
Kernel(
257222
"executorch_prim::floordiv.int",
@@ -263,61 +228,6 @@ static Kernel prim_ops[] = {
263228
out = EValue(a.toInt() / b.toInt());
264229
}),
265230

266-
// executorch_prim::eq.int(int, int) -> bool
267-
Kernel(
268-
"executorch_prim::eq.int",
269-
[](RuntimeContext& context, EValue** stack) {
270-
(void)context;
271-
EValue& a = *stack[0];
272-
EValue& b = *stack[1];
273-
EValue& out = *stack[2];
274-
out = EValue(a.toInt() == b.toInt());
275-
}),
276-
277-
// executorch_prim::gt.int(int, int) -> bool
278-
Kernel(
279-
"executorch_prim::gt.int",
280-
[](RuntimeContext& context, EValue** stack) {
281-
(void)context;
282-
EValue& a = *stack[0];
283-
EValue& b = *stack[1];
284-
EValue& out = *stack[2];
285-
out = EValue(a.toInt() > b.toInt());
286-
}),
287-
288-
// executorch_prim::lt.int(int, int) -> bool
289-
Kernel(
290-
"executorch_prim::lt.int",
291-
[](RuntimeContext& context, EValue** stack) {
292-
(void)context;
293-
EValue& a = *stack[0];
294-
EValue& b = *stack[1];
295-
EValue& out = *stack[2];
296-
out = EValue(a.toInt() < b.toInt());
297-
}),
298-
299-
// executorch_prim::ge.int(int, int) -> bool
300-
Kernel(
301-
"executorch_prim::ge.int",
302-
[](RuntimeContext& context, EValue** stack) {
303-
(void)context;
304-
EValue& a = *stack[0];
305-
EValue& b = *stack[1];
306-
EValue& out = *stack[2];
307-
out = EValue(a.toInt() >= b.toInt());
308-
}),
309-
310-
// executorch_prim::le.int(int, int) -> bool
311-
Kernel(
312-
"executorch_prim::le.int",
313-
[](RuntimeContext& context, EValue** stack) {
314-
(void)context;
315-
EValue& a = *stack[0];
316-
EValue& b = *stack[1];
317-
EValue& out = *stack[2];
318-
out = EValue(a.toInt() <= b.toInt());
319-
}),
320-
321231
// executorch_prim::et_copy_index.tensor(tensor, tensor) -> tensor
322232
Kernel("executorch_prim::et_copy_index.tensor", &et_copy_index),
323233

0 commit comments

Comments
 (0)