Skip to content

Commit b162d19

Browse files
committed
Merge branch 'export-D57463419' of github.com:kirklandsign/executorch into export-D57463419
2 parents 29a9163 + ab6a764 commit b162d19

File tree

17 files changed

+69
-45
lines changed

17 files changed

+69
-45
lines changed

backends/arm/tosa_utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@ def dbg_node(node):
3333
logger.info(" node.meta = ")
3434
for k, v in node.meta.items():
3535
logger.info(f" '{k}' = {v}")
36-
if type([]) == type(v):
36+
if isinstance(v, list):
3737
for i in v:
3838
logger.info(f" {i} ")
3939

backends/qualcomm/passes/convert_to_linear.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -192,7 +192,11 @@ def _convert(self, graph_module: torch.fx.GraphModule):
192192
for _, src_partitions in partitions.items():
193193
for src_partition in src_partitions:
194194
op_cnt = Counter(
195-
[n.target for n in src_partition.nodes if type(n.target) == edge_op]
195+
[
196+
n.target
197+
for n in src_partition.nodes
198+
if isinstance(n.target, edge_op)
199+
]
196200
)
197201
if self.linear in op_cnt:
198202
continue

backends/qualcomm/passes/fold_qdq.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,7 @@ def _fold(self, graph_module: torch.fx.GraphModule) -> torch.fx.GraphModule:
4545

4646
# collecting quant nodes to be removed
4747
for i in range(1, len(n.args)):
48-
if type(n.args[i]) == torch.fx.node.Node:
48+
if isinstance(n.args[i], torch.fx.node.Node):
4949
to_be_removed.append(n.args[i])
5050
# could be a commonly shared attribute between q & dq
5151
if n.args[i].target == exir_ops.edge.aten._to_copy.default:

backends/qualcomm/passes/insert_io_qdq.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,9 @@ def _ceate_args(self, target: torch.fx.node.Target, quant_attrs: Dict):
4747
if name == "out_dtype":
4848
continue
4949
value = quant_attrs[name]
50-
if type(arg_schema.type) == torch.tensor and type(value) in [int, float]:
50+
if isinstance(arg_schema.type, torch.tensor) and (
51+
isinstance(value, int) or isinstance(value, float)
52+
):
5153
value = torch.tensor(value)
5254
ret.append(value)
5355
return ret

backends/qualcomm/passes/utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ def get_quant_attrs(
3232
attr_n = quant_node.args[i]
3333

3434
value = attr_n
35-
if type(attr_n) == torch.fx.node.Node:
35+
if isinstance(attr_n, torch.fx.node.Node):
3636
# could be a commonly shared attribute between q & dq
3737
if attr_n.target == exir_ops.edge.aten._to_copy.default:
3838
value = get_parameter(attr_n.args[0], edge_program)

backends/qualcomm/quantizer/quantizer.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -295,7 +295,7 @@ def _get_quant_config(self, op: str | OpOverload) -> Optional[QuantizationConfig
295295
1. is one of use_per_channel_weight_quant_ops
296296
2. int8 / int16 config
297297
"""
298-
if type(op) == str:
298+
if isinstance(op, str):
299299
return
300300

301301
if op in self.use_per_channel_weight_quant_ops:

backends/transforms/i64_to_i32.py

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,14 @@
1414

1515
class I64toI32(ExportPass):
1616

17+
def __init__(self, _skip_dim_order=False):
18+
super(I64toI32, self).__init__()
19+
self.copy_op = (
20+
exir_ops.edge.aten._to_copy.default
21+
if _skip_dim_order
22+
else exir_ops.edge.dim_order_ops._to_dim_order_copy.default
23+
)
24+
1725
def _is_i64_tensor(self, node_val):
1826
return isinstance(node_val, FakeTensor) and node_val.dtype == torch.int64
1927

@@ -34,7 +42,7 @@ def _apply_to_i32(self, graph: torch.fx.Graph):
3442
args = (node,)
3543
node_i32 = graph.create_node(
3644
"call_function",
37-
exir_ops.edge.dim_order_ops._to_dim_order_copy.default,
45+
self.copy_op,
3846
args,
3947
{"dtype": torch.int32},
4048
)
@@ -62,7 +70,7 @@ def _apply_to_i32(self, graph: torch.fx.Graph):
6270
args = (node.args[0][i],)
6371
node_i64 = graph.create_node(
6472
"call_function",
65-
exir_ops.edge.dim_order_ops._to_dim_order_copy.default,
73+
self.copy_op,
6674
args,
6775
{"dtype": torch.int64},
6876
)

backends/vulkan/runtime/graph/ops/glsl/view.glsl

Lines changed: 6 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -16,16 +16,11 @@ layout(std430) buffer;
1616

1717
#include "indexing_utils.h"
1818

19-
layout(set = 0, binding = 0, ${IMAGE_FORMAT[DTYPE]}) uniform PRECISION restrict writeonly ${IMAGE_T[NDIM][DTYPE]} image_out;
20-
layout(set = 0, binding = 1) uniform PRECISION sampler3D image_in;
19+
${layout_declare_tensor(0, "w", "t_out", DTYPE, STORAGE)}
20+
${layout_declare_tensor(1, "r", "t_in", DTYPE, STORAGE)}
2121

22-
layout(set = 0, binding = 2) uniform PRECISION restrict OutSizes {
23-
ivec4 out_sizes;
24-
};
25-
26-
layout(set = 0, binding = 3) uniform PRECISION restrict InSizes {
27-
ivec4 in_sizes;
28-
};
22+
${layout_declare_ubo(2, "ivec4", "out_sizes")}
23+
${layout_declare_ubo(3, "ivec4", "in_sizes")}
2924

3025
layout(local_size_x_id = 0, local_size_y_id = 1, local_size_z_id = 2) in;
3126

@@ -52,10 +47,10 @@ void main() {
5247
if (out_tensor_idx[out_packed_dim]++ < out_sizes[out_packed_dim]) {
5348
ivec4 user_coor = from_nchw_buffer_i(buf_indices[i], in_sizes);
5449
ivec4 in_pos_elem = to_texture_elem_pos(user_coor, in_sizes, in_packed_dim);
55-
VEC4_T intex = texelFetch(image_in, in_pos_elem.xyz, 0);
50+
VEC4_T intex = texelFetch(t_in, in_pos_elem.xyz, 0);
5651
value[i] = intex[in_pos_elem.w];
5752
}
5853
}
5954

60-
imageStore(image_out, out_pos, value);
55+
imageStore(t_out, out_pos, value);
6156
}

backends/vulkan/runtime/graph/ops/glsl/view.yaml

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,9 +2,11 @@ view:
22
parameter_names_with_default_values:
33
DTYPE: float
44
NDIM: 3
5+
STORAGE: texture3d
56
generate_variant_forall:
67
DTYPE:
78
- VALUE: half
89
- VALUE: float
10+
- VALUE: int
911
shader_variants:
1012
- NAME: view

backends/vulkan/test/test_vulkan_delegate.py

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1083,6 +1083,22 @@ def forward(self, x):
10831083
memory_layouts=[vk_graph_schema.VkMemoryLayout.TENSOR_CHANNELS_PACKED],
10841084
)
10851085

1086+
def test_vulkan_backend_view_int(self):
1087+
class ViewModule(torch.nn.Module):
1088+
def __init__(self):
1089+
super().__init__()
1090+
1091+
def forward(self, x):
1092+
return x.view([-1, x.size(-1)])
1093+
1094+
sample_inputs = (torch.randint(size=(3, 6, 2, 7), high=100, dtype=torch.int32),)
1095+
1096+
self.lower_module_and_test_output(
1097+
ViewModule(),
1098+
sample_inputs,
1099+
memory_layouts=[vk_graph_schema.VkMemoryLayout.TENSOR_CHANNELS_PACKED],
1100+
)
1101+
10861102
def test_vulkan_backend_unsqueeze(self):
10871103
class UnsqueezeModule(torch.nn.Module):
10881104
def __init__(self):

0 commit comments

Comments
 (0)