diff --git a/paddle_samples/PaddleX/BlazeFace/subgraph_7/graph_hash.txt b/paddle_samples/PaddleX/BlazeFace/subgraph_7/graph_hash.txt new file mode 100644 index 000000000..31f8fd314 --- /dev/null +++ b/paddle_samples/PaddleX/BlazeFace/subgraph_7/graph_hash.txt @@ -0,0 +1 @@ +18a852ad880473507191eb54f8a8608f811a784a84fe0bbffea60b8d2672dafc \ No newline at end of file diff --git a/paddle_samples/PaddleX/BlazeFace/subgraph_7/graph_net.json b/paddle_samples/PaddleX/BlazeFace/subgraph_7/graph_net.json new file mode 100644 index 000000000..d9c8238e8 --- /dev/null +++ b/paddle_samples/PaddleX/BlazeFace/subgraph_7/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "BlazeFace", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/BlazeFace/subgraph_7/input_meta.py b/paddle_samples/PaddleX/BlazeFace/subgraph_7/input_meta.py new file mode 100644 index 000000000..b353fa323 --- /dev/null +++ b/paddle_samples/PaddleX/BlazeFace/subgraph_7/input_meta.py @@ -0,0 +1,83 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [4, 12800, 4] + dtype = "float32" + min_val = float("-5.16022") + max_val = float("9.99565") + mean = float("0.199758") + std = float("1.08608") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [4, 9600, 4] + dtype = "float32" + min_val = float("-4.81517") + max_val = float("5.22088") + mean = float("-0.0449079") + std = float("1.06453") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [4, 12800, 2] + dtype = "float32" + min_val = float("-7.10677") + max_val = float("7.10966") + mean = float("-0.00799267") + std = float("2.00454") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [4, 9600, 2] + dtype = "float32" + min_val = float("-9.31402") + max_val = float("9.31491") + mean = float("9.33382e-05") + std = float("2.34683") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [4, 90, 4] + dtype = "float32" + max_val = float("0.994016") + mean = float("0.151741") + std = float("0.272922") + data = None + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [4, 90] + dtype = "int32" + min_val = 0 + max_val = 0 + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [12800, 4] + dtype = "float32" + min_val = float("-0.0125") + max_val = float("1.0125") + mean = float("0.5") + std = float("0.289092") + data = None + + +class Program_weight_tensor_data_7: + name = "data_7" + shape = [9600, 4] + dtype = "float32" + min_val = float("-0.0875") + max_val = float("1.0875") + mean = float("0.5") + std = float("0.295452") + data = None diff --git a/paddle_samples/PaddleX/BlazeFace/subgraph_7/model.py b/paddle_samples/PaddleX/BlazeFace/subgraph_7/model.py new file mode 100644 index 000000000..4e3c795d2 --- /dev/null +++ b/paddle_samples/PaddleX/BlazeFace/subgraph_7/model.py @@ -0,0 +1,570 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6, data_7): + # pd_op.full: (1xi32) <- () + full_0 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xi32) <- (1xi32) + assign_0 = full_0 + + # builtin.combine: ([4x12800x4xf32, 4x9600x4xf32]) <- (4x12800x4xf32, 4x9600x4xf32) + combine_0 = [data_0, data_1] + del data_0, data_1 + + # pd_op.concat: (4x22400x4xf32) <- ([4x12800x4xf32, 4x9600x4xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_0, full_0) + del combine_0 + + # builtin.combine: ([4x12800x2xf32, 4x9600x2xf32]) <- (4x12800x2xf32, 4x9600x2xf32) + combine_1 = [data_2, data_3] + del data_2, data_3 + + # pd_op.concat: (4x22400x2xf32) <- ([4x12800x2xf32, 4x9600x2xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_1, full_0) + del combine_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [-1] + + # pd_op.unsqueeze: (4x90x1xi32) <- (4x90xi32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(data_5, full_int_array_0) + del data_5 + + # pd_op.cast: (4x90x1xi64) <- (4x90x1xi32) + cast_0 = paddle._C_ops.cast(unsqueeze_0, paddle.int64) + del unsqueeze_0 + + # pd_op.full: (1xi32) <- () + full_1 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([12800x4xf32, 9600x4xf32]) <- (12800x4xf32, 9600x4xf32) + combine_2 = [data_6, data_7] + del data_6, data_7 + + # pd_op.concat: (22400x4xf32) <- ([12800x4xf32, 9600x4xf32], 1xi32) + concat_2 = paddle._C_ops.concat(combine_2, full_1) + del combine_2, full_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [-1, 4] + + # pd_op.reshape: (360x4xf32) <- (4x90x4xf32, 2xi64) + reshape_1 = paddle._C_ops.reshape(data_4, full_int_array_1) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [1] + + # pd_op.unsqueeze: (360x1x4xf32) <- (360x4xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(reshape_1, full_int_array_2) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [0] + + # pd_op.unsqueeze: (1x22400x4xf32) <- (22400x4xf32, 1xi64) + unsqueeze_2 = paddle._C_ops.unsqueeze(concat_2, full_int_array_3) + del concat_2 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [2] + + # pd_op.slice: (360x1x2xf32) <- (360x1x4xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + unsqueeze_1, [2], full_int_array_3, full_int_array_4, [1], [] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_5 = [2147483647] + + # pd_op.slice: (360x1x2xf32) <- (360x1x4xf32, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + unsqueeze_1, [2], full_int_array_4, full_int_array_5, [1], [] + ) + del unsqueeze_1 + + # pd_op.slice: (1x22400x2xf32) <- (1x22400x4xf32, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + unsqueeze_2, [2], full_int_array_3, full_int_array_4, [1], [] + ) + + # pd_op.slice: (1x22400x2xf32) <- (1x22400x4xf32, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + unsqueeze_2, [2], full_int_array_4, full_int_array_5, [1], [] + ) + del full_int_array_5 + + # pd_op.maximum: (360x22400x2xf32) <- (360x1x2xf32, 1x22400x2xf32) + maximum_0 = paddle._C_ops.maximum(slice_0, slice_2) + + # pd_op.minimum: (360x22400x2xf32) <- (360x1x2xf32, 1x22400x2xf32) + minimum_0 = paddle._C_ops.minimum(slice_1, slice_3) + + # pd_op.subtract: (360x22400x2xf32) <- (360x22400x2xf32, 360x22400x2xf32) + subtract_0 = paddle._C_ops.subtract(minimum_0, maximum_0) + del maximum_0, minimum_0 + + # pd_op.full: (1xf32) <- () + full_2 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.clip: (360x22400x2xf32) <- (360x22400x2xf32, 1xf32, 1xf32) + clip_0 = paddle._C_ops.clip(subtract_0, full_2, full_3) + del subtract_0 + + # pd_op.prod: (360x22400xf32) <- (360x22400x2xf32, 1xi64) + prod_0 = paddle._C_ops.prod(clip_0, full_int_array_0, False, False) + del clip_0 + + # pd_op.subtract: (360x1x2xf32) <- (360x1x2xf32, 360x1x2xf32) + subtract_1 = paddle._C_ops.subtract(slice_1, slice_0) + del slice_0, slice_1 + + # pd_op.clip: (360x1x2xf32) <- (360x1x2xf32, 1xf32, 1xf32) + clip_1 = paddle._C_ops.clip(subtract_1, full_2, full_3) + del subtract_1 + + # pd_op.prod: (360x1xf32) <- (360x1x2xf32, 1xi64) + prod_1 = paddle._C_ops.prod(clip_1, full_int_array_0, False, False) + del clip_1 + + # pd_op.subtract: (1x22400x2xf32) <- (1x22400x2xf32, 1x22400x2xf32) + subtract_2 = paddle._C_ops.subtract(slice_3, slice_2) + del slice_2, slice_3 + + # pd_op.clip: (1x22400x2xf32) <- (1x22400x2xf32, 1xf32, 1xf32) + clip_2 = paddle._C_ops.clip(subtract_2, full_2, full_3) + del full_2, full_3, subtract_2 + + # pd_op.prod: (1x22400xf32) <- (1x22400x2xf32, 1xi64) + prod_2 = paddle._C_ops.prod(clip_2, full_int_array_0, False, False) + del clip_2 + + # pd_op.add: (360x22400xf32) <- (360x1xf32, 1x22400xf32) + add_0 = paddle._C_ops.add(prod_1, prod_2) + del prod_1, prod_2 + + # pd_op.subtract: (360x22400xf32) <- (360x22400xf32, 360x22400xf32) + subtract_3 = paddle._C_ops.subtract(add_0, prod_0) + del add_0 + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (360x22400xf32) <- (360x22400xf32, 1xf32) + scale_0 = paddle._C_ops.scale(subtract_3, full_4, float("1e-10"), True) + del full_4, subtract_3 + + # pd_op.divide: (360x22400xf32) <- (360x22400xf32, 360x22400xf32) + divide_0 = paddle._C_ops.divide(prod_0, scale_0) + del prod_0, scale_0 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_6 = [4, -1, 22400] + + # pd_op.reshape: (4x90x22400xf32) <- (360x22400xf32, 3xi64) + reshape_2 = paddle._C_ops.reshape(divide_0, full_int_array_6) + del divide_0, full_int_array_6 + + # pd_op.max: (4x22400xf32) <- (4x90x22400xf32, 1xi64) + max_0 = paddle._C_ops.max(reshape_2, full_int_array_2, False) + + # pd_op.full: (1xi64) <- () + full_5 = paddle._C_ops.full( + [1], float("1"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (4x22400xi64) <- (4x90x22400xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(reshape_2, full_5, False, False, paddle.int64) + del full_5 + + # pd_op.max: (4x90xf32) <- (4x90x22400xf32, 1xi64) + max_1 = paddle._C_ops.max(reshape_2, full_int_array_4, False) + + # pd_op.full: (1xi64) <- () + full_6 = paddle._C_ops.full( + [1], float("2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (4x90xi64) <- (4x90x22400xf32, 1xi64) + argmax_1 = paddle._C_ops.argmax(reshape_2, full_6, False, False, paddle.int64) + del full_6, reshape_2 + + # pd_op.full: (1xf64) <- () + full_7 = paddle._C_ops.full( + [1], float("0"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_8 = paddle._C_ops.full( + [1], float("4"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_9 = paddle._C_ops.full( + [1], float("1"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (4xi64) <- (1xf64, 1xf64, 1xf64) + arange_0 = paddle.arange(full_7, full_8, full_9, dtype="int64") + del full_7, full_8, full_9 + + # pd_op.unsqueeze: (4x1xi64) <- (4xi64, 1xi64) + unsqueeze_3 = paddle._C_ops.unsqueeze(arange_0, full_int_array_0) + del arange_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_7 = [1, 22400] + + # pd_op.tile: (4x22400xi64) <- (4x1xi64, 2xi64) + tile_0 = paddle._C_ops.tile(unsqueeze_3, full_int_array_7) + del full_int_array_7 + + # builtin.combine: ([4x22400xi64, 4x22400xi64]) <- (4x22400xi64, 4x22400xi64) + combine_3 = [tile_0, argmax_0] + del argmax_0, tile_0 + + # pd_op.stack: (4x22400x2xi64) <- ([4x22400xi64, 4x22400xi64]) + stack_0 = paddle._C_ops.stack(combine_3, -1) + del combine_3 + + # pd_op.gather_nd: (4x22400x4xf32) <- (4x90x4xf32, 4x22400x2xi64) + gather_nd_0 = paddle._C_ops.gather_nd(data_4, stack_0) + del data_4 + + # pd_op.gather_nd: (4x22400x1xi64) <- (4x90x1xi64, 4x22400x2xi64) + gather_nd_1 = paddle._C_ops.gather_nd(cast_0, stack_0) + del stack_0 + + # pd_op.full: (4x22400x1xi64) <- () + full_10 = paddle._C_ops.full( + [4, 22400, 1], + float("1"), + paddle.int64, + paddle.framework._current_expected_place(), + ) + + # pd_op.unsqueeze: (4x22400x1xf32) <- (4x22400xf32, 1xi64) + unsqueeze_4 = paddle._C_ops.unsqueeze(max_0, full_int_array_0) + del full_int_array_0, max_0 + + # pd_op.full: (xf32) <- () + full_11 = paddle._C_ops.full( + [], + float("0.35"), + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.less_than: (4x22400x1xb) <- (4x22400x1xf32, xf32) + less_than_0 = paddle._C_ops.less_than(unsqueeze_4, full_11) + del full_11, unsqueeze_4 + + # pd_op.where: (4x22400x1xi64) <- (4x22400x1xb, 4x22400x1xi64, 4x22400x1xi64) + where_0 = paddle._C_ops.where(less_than_0, full_10, gather_nd_1) + del full_10, gather_nd_1, less_than_0 + + # pd_op.full: (1xf32) <- () + full_12 = paddle._C_ops.full( + [1], float("22400"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (4x1xi64) <- (4x1xi64, 1xf32) + scale_1 = paddle._C_ops.scale(unsqueeze_3, full_12, float("0"), True) + del full_12, unsqueeze_3 + + # pd_op.add: (4x90xi64) <- (4x1xi64, 4x90xi64) + add_1 = paddle._C_ops.add(scale_1, argmax_1) + del argmax_1, scale_1 + + # pd_op.flatten: (360xi64) <- (4x90xi64) + flatten_0 = paddle._C_ops.flatten(add_1, 0, 1) + del add_1 + + # pd_op.reshape: (89600x4xf32) <- (4x22400x4xf32, 2xi64) + reshape_3 = paddle._C_ops.reshape(gather_nd_0, full_int_array_1) + del gather_nd_0 + + # pd_op.scatter: (89600x4xf32) <- (89600x4xf32, 360xi64, 360x4xf32) + scatter_0 = paddle._C_ops.scatter(reshape_3, flatten_0, reshape_1, True) + del reshape_1, reshape_3 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_8 = [4, -1, 4] + + # pd_op.reshape: (4x22400x4xf32) <- (89600x4xf32, 3xi64) + reshape_4 = paddle._C_ops.reshape(scatter_0, full_int_array_8) + del scatter_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_9 = [-1, 1] + + # pd_op.reshape: (89600x1xi64) <- (4x22400x1xi64, 2xi64) + reshape_5 = paddle._C_ops.reshape(where_0, full_int_array_9) + del where_0 + + # pd_op.reshape: (360x1xi64) <- (4x90x1xi64, 2xi64) + reshape_6 = paddle._C_ops.reshape(cast_0, full_int_array_9) + del cast_0, full_int_array_9 + + # pd_op.scatter: (89600x1xi64) <- (89600x1xi64, 360xi64, 360x1xi64) + scatter_1 = paddle._C_ops.scatter(reshape_5, flatten_0, reshape_6, True) + del flatten_0, reshape_5, reshape_6 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_10 = [4, -1, 1] + + # pd_op.reshape: (4x22400x1xi64) <- (89600x1xi64, 3xi64) + reshape_7 = paddle._C_ops.reshape(scatter_1, full_int_array_10) + del full_int_array_10, scatter_1 + + # pd_op.set_value_: (4x22400x1xi64) <- (4x22400x1xi64, 1xi64, 1xi64, 1xi64) + set_value__0 = paddle._C_ops.set_value_( + reshape_7, + full_int_array_3, + full_int_array_2, + full_int_array_2, + [1], + [], + [], + [1], + [float("1")], + ) + del reshape_7 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_11 = [4, 1, 1] + + # pd_op.tile: (4x22400x4xf32) <- (1x22400x4xf32, 3xi64) + tile_1 = paddle._C_ops.tile(unsqueeze_2, full_int_array_11) + del full_int_array_11, unsqueeze_2 + + # pd_op.reshape: (89600x4xf32) <- (4x22400x4xf32, 2xi64) + reshape_8 = paddle._C_ops.reshape(tile_1, full_int_array_1) + del tile_1 + + # pd_op.reshape: (89600x4xf32) <- (4x22400x4xf32, 2xi64) + reshape_9 = paddle._C_ops.reshape(reshape_4, full_int_array_1) + del full_int_array_1, reshape_4 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_12 = [3] + + # pd_op.slice: (89600xf32) <- (89600x4xf32, 1xi64, 1xi64) + slice_4 = paddle._C_ops.slice( + reshape_8, [1], full_int_array_4, full_int_array_12, [1], [1] + ) + + # pd_op.slice: (89600xf32) <- (89600x4xf32, 1xi64, 1xi64) + slice_5 = paddle._C_ops.slice( + reshape_8, [1], full_int_array_3, full_int_array_2, [1], [1] + ) + + # pd_op.subtract: (89600xf32) <- (89600xf32, 89600xf32) + subtract_4 = paddle._C_ops.subtract(slice_4, slice_5) + del slice_4 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_13 = [4] + + # pd_op.slice: (89600xf32) <- (89600x4xf32, 1xi64, 1xi64) + slice_6 = paddle._C_ops.slice( + reshape_8, [1], full_int_array_12, full_int_array_13, [1], [1] + ) + + # pd_op.slice: (89600xf32) <- (89600x4xf32, 1xi64, 1xi64) + slice_7 = paddle._C_ops.slice( + reshape_8, [1], full_int_array_2, full_int_array_4, [1], [1] + ) + del reshape_8 + + # pd_op.subtract: (89600xf32) <- (89600xf32, 89600xf32) + subtract_5 = paddle._C_ops.subtract(slice_6, slice_7) + del slice_6 + + # pd_op.full: (1xf32) <- () + full_13 = paddle._C_ops.full( + [1], float("0.5"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (89600xf32) <- (89600xf32, 1xf32) + scale_2 = paddle._C_ops.scale(subtract_4, full_13, float("0"), True) + + # pd_op.add: (89600xf32) <- (89600xf32, 89600xf32) + add_2 = paddle._C_ops.add(slice_5, scale_2) + del scale_2, slice_5 + + # pd_op.scale: (89600xf32) <- (89600xf32, 1xf32) + scale_3 = paddle._C_ops.scale(subtract_5, full_13, float("0"), True) + + # pd_op.add: (89600xf32) <- (89600xf32, 89600xf32) + add_3 = paddle._C_ops.add(slice_7, scale_3) + del scale_3, slice_7 + + # pd_op.slice: (89600xf32) <- (89600x4xf32, 1xi64, 1xi64) + slice_8 = paddle._C_ops.slice( + reshape_9, [1], full_int_array_4, full_int_array_12, [1], [1] + ) + + # pd_op.slice: (89600xf32) <- (89600x4xf32, 1xi64, 1xi64) + slice_9 = paddle._C_ops.slice( + reshape_9, [1], full_int_array_3, full_int_array_2, [1], [1] + ) + del full_int_array_3 + + # pd_op.subtract: (89600xf32) <- (89600xf32, 89600xf32) + subtract_6 = paddle._C_ops.subtract(slice_8, slice_9) + del slice_8 + + # pd_op.slice: (89600xf32) <- (89600x4xf32, 1xi64, 1xi64) + slice_10 = paddle._C_ops.slice( + reshape_9, [1], full_int_array_12, full_int_array_13, [1], [1] + ) + del full_int_array_12, full_int_array_13 + + # pd_op.slice: (89600xf32) <- (89600x4xf32, 1xi64, 1xi64) + slice_11 = paddle._C_ops.slice( + reshape_9, [1], full_int_array_2, full_int_array_4, [1], [1] + ) + del full_int_array_2, full_int_array_4, reshape_9 + + # pd_op.subtract: (89600xf32) <- (89600xf32, 89600xf32) + subtract_7 = paddle._C_ops.subtract(slice_10, slice_11) + del slice_10 + + # pd_op.scale: (89600xf32) <- (89600xf32, 1xf32) + scale_4 = paddle._C_ops.scale(subtract_6, full_13, float("0"), True) + + # pd_op.add: (89600xf32) <- (89600xf32, 89600xf32) + add_4 = paddle._C_ops.add(slice_9, scale_4) + del scale_4, slice_9 + + # pd_op.scale: (89600xf32) <- (89600xf32, 1xf32) + scale_5 = paddle._C_ops.scale(subtract_7, full_13, float("0"), True) + del full_13 + + # pd_op.add: (89600xf32) <- (89600xf32, 89600xf32) + add_5 = paddle._C_ops.add(slice_11, scale_5) + del scale_5, slice_11 + + # pd_op.subtract: (89600xf32) <- (89600xf32, 89600xf32) + subtract_8 = paddle._C_ops.subtract(add_4, add_2) + del add_2, add_4 + + # pd_op.full: (1xf32) <- () + full_14 = paddle._C_ops.full( + [1], float("10"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (89600xf32) <- (89600xf32, 1xf32) + scale_6 = paddle._C_ops.scale(subtract_8, full_14, float("0"), True) + del subtract_8 + + # pd_op.divide: (89600xf32) <- (89600xf32, 89600xf32) + divide_1 = paddle._C_ops.divide(scale_6, subtract_4) + del scale_6 + + # pd_op.subtract: (89600xf32) <- (89600xf32, 89600xf32) + subtract_9 = paddle._C_ops.subtract(add_5, add_3) + del add_3, add_5 + + # pd_op.scale: (89600xf32) <- (89600xf32, 1xf32) + scale_7 = paddle._C_ops.scale(subtract_9, full_14, float("0"), True) + del full_14, subtract_9 + + # pd_op.divide: (89600xf32) <- (89600xf32, 89600xf32) + divide_2 = paddle._C_ops.divide(scale_7, subtract_5) + del scale_7 + + # pd_op.divide: (89600xf32) <- (89600xf32, 89600xf32) + divide_3 = paddle._C_ops.divide(subtract_6, subtract_4) + del subtract_4, subtract_6 + + # pd_op.log: (89600xf32) <- (89600xf32) + log_0 = paddle._C_ops.log(divide_3) + del divide_3 + + # pd_op.full: (1xf32) <- () + full_15 = paddle._C_ops.full( + [1], float("5"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (89600xf32) <- (89600xf32, 1xf32) + scale_8 = paddle._C_ops.scale(log_0, full_15, float("0"), True) + del log_0 + + # pd_op.divide: (89600xf32) <- (89600xf32, 89600xf32) + divide_4 = paddle._C_ops.divide(subtract_7, subtract_5) + del subtract_5, subtract_7 + + # pd_op.log: (89600xf32) <- (89600xf32) + log_1 = paddle._C_ops.log(divide_4) + del divide_4 + + # pd_op.scale: (89600xf32) <- (89600xf32, 1xf32) + scale_9 = paddle._C_ops.scale(log_1, full_15, float("0"), True) + del full_15, log_1 + + # builtin.combine: ([89600xf32, 89600xf32, 89600xf32, 89600xf32]) <- (89600xf32, 89600xf32, 89600xf32, 89600xf32) + combine_4 = [divide_1, divide_2, scale_8, scale_9] + del divide_1, divide_2, scale_8, scale_9 + + # pd_op.stack: (89600x4xf32) <- ([89600xf32, 89600xf32, 89600xf32, 89600xf32]) + stack_1 = paddle._C_ops.stack(combine_4, 1) + del combine_4 + + # pd_op.reshape: (4x22400x4xf32) <- (89600x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(stack_1, full_int_array_8) + del full_int_array_8, stack_1 + + # pd_op.full: (xi64) <- () + full_16 = paddle._C_ops.full( + [], float("1"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.not_equal: (4x22400x1xb) <- (4x22400x1xi64, xi64) + not_equal_0 = paddle._C_ops.not_equal(set_value__0, full_16) + del full_16 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_14 = [1, 1, 4] + + # pd_op.tile: (4x22400x4xb) <- (4x22400x1xb, 3xi64) + tile_2 = paddle._C_ops.tile(not_equal_0, full_int_array_14) + del full_int_array_14, not_equal_0 + + # pd_op.cast: (4x22400x4xf32) <- (4x22400x4xb) + cast_1 = paddle._C_ops.cast(tile_2, paddle.float32) + + # pd_op.full_int_array: (0xi64) <- () + full_int_array_15 = [] + + # pd_op.sum: (xf32) <- (4x22400x4xf32, 0xi64) + sum_0 = paddle._C_ops.sum(cast_1, full_int_array_15, None, False) + del cast_1, full_int_array_15 + + # pd_op.full: (xf32) <- () + full_17 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (xb) <- (xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(sum_0, full_17) + del assign_0, full_0, full_17, set_value__0, sum_0, tile_2 + + return greater_than_0, concat_0, reshape_0, concat_1 diff --git a/paddle_samples/PaddleX/BlazeFace/subgraph_7/weight_meta.py b/paddle_samples/PaddleX/BlazeFace/subgraph_7/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/BlazeFace/subgraph_7/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/Co-DINO-R50/subgraph_17/graph_hash.txt b/paddle_samples/PaddleX/Co-DINO-R50/subgraph_17/graph_hash.txt new file mode 100644 index 000000000..20c143a93 --- /dev/null +++ b/paddle_samples/PaddleX/Co-DINO-R50/subgraph_17/graph_hash.txt @@ -0,0 +1 @@ +8ba29eb681f8ccf67551951fccafed8c182876cdbe2d806f67c0e390081861f1 \ No newline at end of file diff --git a/paddle_samples/PaddleX/Co-DINO-R50/subgraph_17/graph_net.json b/paddle_samples/PaddleX/Co-DINO-R50/subgraph_17/graph_net.json new file mode 100644 index 000000000..0090ee192 --- /dev/null +++ b/paddle_samples/PaddleX/Co-DINO-R50/subgraph_17/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "Co-DINO-R50", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/Co-DINO-R50/subgraph_17/input_meta.py b/paddle_samples/PaddleX/Co-DINO-R50/subgraph_17/input_meta.py new file mode 100644 index 000000000..84cb300a6 --- /dev/null +++ b/paddle_samples/PaddleX/Co-DINO-R50/subgraph_17/input_meta.py @@ -0,0 +1,32 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [1, 1, 45675] + dtype = "float32" + max_val = float("1.0") + mean = float("0.000394089") + std = float("0.0198478") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [1, 1, 1] + dtype = "int32" + data = [0] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [1, 45675] + dtype = "float32" + max_val = float("1.0") + mean = float("0.000394089") + std = float("0.0198478") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [1, 1, 4] + dtype = "float32" + data = [234.667, 258.033, 330.667, 353.995] diff --git a/paddle_samples/PaddleX/Co-DINO-R50/subgraph_17/model.py b/paddle_samples/PaddleX/Co-DINO-R50/subgraph_17/model.py new file mode 100644 index 000000000..ee31d4abf --- /dev/null +++ b/paddle_samples/PaddleX/Co-DINO-R50/subgraph_17/model.py @@ -0,0 +1,152 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3): + # pd_op.full: (1xi64) <- () + full_0 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (1x45675xi64) <- (1x1x45675xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_0, full_0, False, False, paddle.int64) + del data_0, full_0 + + # pd_op.full: (1xf64) <- () + full_1 = paddle._C_ops.full( + [1], float("0"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_2 = paddle._C_ops.full( + [1], float("1"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (1xi32) <- (1xf64, 1xf64, 1xf64) + arange_0 = paddle.arange(full_1, full_2, full_2, dtype="int32") + del full_1, full_2 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [-1] + + # pd_op.unsqueeze: (1x1xi32) <- (1xi32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(arange_0, full_int_array_0) + del arange_0, full_int_array_0 + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (1x1xi32) <- (1x1xi32, 1xf32) + scale_0 = paddle._C_ops.scale(unsqueeze_0, full_3, float("0"), True) + del full_3, unsqueeze_0 + + # pd_op.cast: (1x1xi64) <- (1x1xi32) + cast_0 = paddle._C_ops.cast(scale_0, paddle.int64) + del scale_0 + + # pd_op.add: (1x45675xi64) <- (1x45675xi64, 1x1xi64) + add_0 = paddle._C_ops.add(argmax_0, cast_0) + del argmax_0, cast_0 + + # pd_op.flatten: (1xi32) <- (1x1x1xi32) + flatten_0 = paddle._C_ops.flatten(data_1, 0, 2) + del data_1 + + # pd_op.flatten: (45675xi64) <- (1x45675xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_4 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (45675xi32) <- (1xi32, 45675xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_4) + del flatten_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [1, 45675] + + # pd_op.reshape: (1x45675xi32) <- (45675xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, full_int_array_1) + del full_int_array_1, gather_0 + + # pd_op.full: (xf32) <- () + full_5 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (1x45675xb) <- (1x45675xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(data_2, full_5) + del data_2, full_5 + + # pd_op.full: (1xf32) <- () + full_6 = paddle._C_ops.full( + [1], float("4"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (1x45675xi32) <- (1x45675xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_6, paddle.int32, paddle.framework._current_expected_place() + ) + del full_6 + + # pd_op.where: (1x45675xi32) <- (1x45675xb, 1x45675xi32, 1x45675xi32) + where_0 = paddle._C_ops.where(greater_than_0, reshape_1, full_like_0) + del full_like_0, greater_than_0, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_2 = [-1, 4] + + # pd_op.reshape: (1x4xf32) <- (1x1x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_3, full_int_array_2) + del data_3, full_int_array_2 + + # pd_op.gather: (45675x4xf32) <- (1x4xf32, 45675xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_4) + del flatten_1, full_4, reshape_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_3 = [1, 45675, 4] + + # pd_op.reshape: (1x45675x4xf32) <- (45675x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, full_int_array_3) + del full_int_array_3, gather_1 + + # pd_op.full: (1xi32) <- () + full_7 = paddle._C_ops.full( + [1], float("5"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (1x45675x5xf32) <- (1x45675xi32, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + where_0 % paddle.cast(full_7, where_0.dtype), full_7 + ) + del full_7 + + # pd_op.full: (4xi64) <- () + full_8 = paddle._C_ops.full( + [4], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (4xi64) <- (4xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_8, + [4], + paddle.int64, + [float("0"), float("1"), float("2"), float("3")], + paddle.framework._current_expected_place(), + ) + del full_8 + + # pd_op.index_select: (1x45675x4xf32) <- (1x45675x5xf32, 4xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_0, assign_value__0, -1) + del assign_value__0, one_hot_0, where_0 + + return reshape_0, index_select_0 diff --git a/paddle_samples/PaddleX/Co-DINO-R50/subgraph_17/weight_meta.py b/paddle_samples/PaddleX/Co-DINO-R50/subgraph_17/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/Co-DINO-R50/subgraph_17/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/DETR-R50/subgraph_15/graph_hash.txt b/paddle_samples/PaddleX/DETR-R50/subgraph_15/graph_hash.txt new file mode 100644 index 000000000..b9b2382f8 --- /dev/null +++ b/paddle_samples/PaddleX/DETR-R50/subgraph_15/graph_hash.txt @@ -0,0 +1 @@ +c5d023bf65a93977c70557fdb65ca8e6d9f54021218fdaaf7e84771804b852c1 \ No newline at end of file diff --git a/paddle_samples/PaddleX/DETR-R50/subgraph_15/graph_net.json b/paddle_samples/PaddleX/DETR-R50/subgraph_15/graph_net.json new file mode 100644 index 000000000..9fc894bd5 --- /dev/null +++ b/paddle_samples/PaddleX/DETR-R50/subgraph_15/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "DETR-R50", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/DETR-R50/subgraph_15/input_meta.py b/paddle_samples/PaddleX/DETR-R50/subgraph_15/input_meta.py new file mode 100644 index 000000000..35a4ff2ca --- /dev/null +++ b/paddle_samples/PaddleX/DETR-R50/subgraph_15/input_meta.py @@ -0,0 +1,430 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [256, 768] + dtype = "float32" + min_val = float("-1.63085") + max_val = float("1.267") + mean = float("-0.000355538") + std = float("0.0713932") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [768] + dtype = "float32" + min_val = float("-0.207461") + max_val = float("0.221246") + mean = float("-0.000491435") + std = float("0.0355132") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [256, 768] + dtype = "float32" + min_val = float("-0.618734") + max_val = float("0.627476") + mean = float("-0.000102626") + std = float("0.101776") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [768] + dtype = "float32" + min_val = float("-0.0815249") + max_val = float("0.126028") + mean = float("0.000600134") + std = float("0.0164441") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [256, 768] + dtype = "float32" + min_val = float("-1.20947") + max_val = float("1.03771") + mean = float("-0.000570685") + std = float("0.0955079") + data = None + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [768] + dtype = "float32" + min_val = float("-0.100344") + max_val = float("0.217363") + mean = float("-0.000461502") + std = float("0.0206661") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [256, 768] + dtype = "float32" + min_val = float("-1.53645") + max_val = float("1.8139") + mean = float("-0.000285924") + std = float("0.0959809") + data = None + + +class Program_weight_tensor_data_7: + name = "data_7" + shape = [768] + dtype = "float32" + min_val = float("-0.151873") + max_val = float("0.162472") + mean = float("0.000600261") + std = float("0.0242047") + data = None + + +class Program_weight_tensor_data_8: + name = "data_8" + shape = [256, 768] + dtype = "float32" + min_val = float("-1.36585") + max_val = float("1.43055") + mean = float("7.37652e-06") + std = float("0.0990333") + data = None + + +class Program_weight_tensor_data_9: + name = "data_9" + shape = [768] + dtype = "float32" + min_val = float("-0.0735198") + max_val = float("0.0842782") + mean = float("0.0010846") + std = float("0.0203189") + data = None + + +class Program_weight_tensor_data_10: + name = "data_10" + shape = [256, 768] + dtype = "float32" + min_val = float("-1.53265") + max_val = float("1.21911") + mean = float("0.00022341") + std = float("0.0901821") + data = None + + +class Program_weight_tensor_data_11: + name = "data_11" + shape = [768] + dtype = "float32" + min_val = float("-0.0555385") + max_val = float("0.063343") + mean = float("0.000162717") + std = float("0.0180918") + data = None + + +class Program_weight_tensor_data_12: + name = "data_12" + shape = [100, 256] + dtype = "float32" + min_val = float("-4.02943") + max_val = float("3.99959") + mean = float("0.00429286") + std = float("0.966819") + data = None + + +class Program_weight_tensor_data_13: + name = "data_13" + shape = [256, 768] + dtype = "float32" + min_val = float("-0.22778") + max_val = float("0.239259") + mean = float("3.67452e-05") + std = float("0.0539069") + data = None + + +class Program_weight_tensor_data_14: + name = "data_14" + shape = [768] + dtype = "float32" + min_val = float("-0.307561") + max_val = float("0.257047") + mean = float("-0.00338742") + std = float("0.0694692") + data = None + + +class Program_weight_tensor_data_15: + name = "data_15" + shape = [256, 768] + dtype = "float32" + min_val = float("-1.28518") + max_val = float("0.990493") + mean = float("0.00016438") + std = float("0.128939") + data = None + + +class Program_weight_tensor_data_16: + name = "data_16" + shape = [768] + dtype = "float32" + min_val = float("-0.405551") + max_val = float("0.414575") + mean = float("-0.00544822") + std = float("0.0841028") + data = None + + +class Program_weight_tensor_data_17: + name = "data_17" + shape = [256, 768] + dtype = "float32" + min_val = float("-0.527811") + max_val = float("0.557855") + mean = float("-0.000112733") + std = float("0.0637865") + data = None + + +class Program_weight_tensor_data_18: + name = "data_18" + shape = [768] + dtype = "float32" + min_val = float("-0.191098") + max_val = float("0.174426") + mean = float("-2.75892e-05") + std = float("0.0344122") + data = None + + +class Program_weight_tensor_data_19: + name = "data_19" + shape = [256, 768] + dtype = "float32" + min_val = float("-1.84868") + max_val = float("1.72467") + mean = float("0.00039185") + std = float("0.126066") + data = None + + +class Program_weight_tensor_data_20: + name = "data_20" + shape = [768] + dtype = "float32" + min_val = float("-0.368801") + max_val = float("0.462026") + mean = float("-0.00065409") + std = float("0.0948952") + data = None + + +class Program_weight_tensor_data_21: + name = "data_21" + shape = [256, 768] + dtype = "float32" + min_val = float("-0.727664") + max_val = float("0.505205") + mean = float("6.57902e-05") + std = float("0.064879") + data = None + + +class Program_weight_tensor_data_22: + name = "data_22" + shape = [768] + dtype = "float32" + min_val = float("-0.139697") + max_val = float("0.212708") + mean = float("0.000286629") + std = float("0.0344799") + data = None + + +class Program_weight_tensor_data_23: + name = "data_23" + shape = [256, 768] + dtype = "float32" + min_val = float("-2.28813") + max_val = float("2.36439") + mean = float("-4.41011e-05") + std = float("0.126715") + data = None + + +class Program_weight_tensor_data_24: + name = "data_24" + shape = [768] + dtype = "float32" + min_val = float("-0.413832") + max_val = float("0.427045") + mean = float("-7.04009e-05") + std = float("0.0972169") + data = None + + +class Program_weight_tensor_data_25: + name = "data_25" + shape = [256, 768] + dtype = "float32" + min_val = float("-0.469144") + max_val = float("0.686823") + mean = float("0.000205739") + std = float("0.0678089") + data = None + + +class Program_weight_tensor_data_26: + name = "data_26" + shape = [768] + dtype = "float32" + min_val = float("-0.229024") + max_val = float("0.201455") + mean = float("-0.000322507") + std = float("0.0411113") + data = None + + +class Program_weight_tensor_data_27: + name = "data_27" + shape = [256, 768] + dtype = "float32" + min_val = float("-1.69643") + max_val = float("1.49155") + mean = float("-2.62935e-05") + std = float("0.123823") + data = None + + +class Program_weight_tensor_data_28: + name = "data_28" + shape = [768] + dtype = "float32" + min_val = float("-0.461533") + max_val = float("0.43026") + mean = float("-0.00329753") + std = float("0.0931051") + data = None + + +class Program_weight_tensor_data_29: + name = "data_29" + shape = [256, 768] + dtype = "float32" + min_val = float("-0.671008") + max_val = float("0.553923") + mean = float("0.000106955") + std = float("0.0692481") + data = None + + +class Program_weight_tensor_data_30: + name = "data_30" + shape = [768] + dtype = "float32" + min_val = float("-0.202222") + max_val = float("0.188967") + mean = float("-0.000664419") + std = float("0.0404861") + data = None + + +class Program_weight_tensor_data_31: + name = "data_31" + shape = [256, 768] + dtype = "float32" + min_val = float("-2.3972") + max_val = float("1.80686") + mean = float("0.000181416") + std = float("0.119292") + data = None + + +class Program_weight_tensor_data_32: + name = "data_32" + shape = [768] + dtype = "float32" + min_val = float("-0.34518") + max_val = float("0.498495") + mean = float("0.00220785") + std = float("0.0898271") + data = None + + +class Program_weight_tensor_data_33: + name = "data_33" + shape = [256, 768] + dtype = "float32" + min_val = float("-0.408996") + max_val = float("0.470185") + mean = float("-2.81642e-05") + std = float("0.062295") + data = None + + +class Program_weight_tensor_data_34: + name = "data_34" + shape = [768] + dtype = "float32" + min_val = float("-0.318677") + max_val = float("0.28566") + mean = float("0.000167696") + std = float("0.0409036") + data = None + + +class Program_weight_tensor_data_35: + name = "data_35" + shape = [256, 768] + dtype = "float32" + min_val = float("-2.31921") + max_val = float("1.91952") + mean = float("0.000351658") + std = float("0.122176") + data = None + + +class Program_weight_tensor_data_36: + name = "data_36" + shape = [768] + dtype = "float32" + min_val = float("-0.448017") + max_val = float("0.554305") + mean = float("0.00193022") + std = float("0.0859855") + data = None + + +class Program_weight_tensor_data_37: + name = "data_37" + shape = [1, 3, 1199, 800] + dtype = "float32" + min_val = float("-1.91241") + max_val = float("1.9254") + mean = float("0.190818") + std = float("1.25268") + data = None + + +class Program_weight_tensor_data_38: + name = "data_38" + shape = [1, 2] + dtype = "float32" + data = [1199.0, 800.0] + + +class Program_weight_tensor_data_39: + name = "data_39" + shape = [1, 2] + dtype = "float32" + data = [2.9975, 2.99625] diff --git a/paddle_samples/PaddleX/DETR-R50/subgraph_15/model.py b/paddle_samples/PaddleX/DETR-R50/subgraph_15/model.py new file mode 100644 index 000000000..fd42d6df4 --- /dev/null +++ b/paddle_samples/PaddleX/DETR-R50/subgraph_15/model.py @@ -0,0 +1,6763 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + parameter_24, + parameter_25, + parameter_26, + parameter_27, + parameter_28, + parameter_29, + parameter_30, + parameter_31, + parameter_32, + parameter_33, + parameter_34, + parameter_35, + parameter_36, + parameter_37, + parameter_38, + parameter_39, + parameter_40, + parameter_41, + parameter_42, + parameter_43, + parameter_44, + parameter_45, + parameter_46, + parameter_47, + parameter_48, + parameter_49, + parameter_50, + parameter_51, + parameter_52, + parameter_53, + parameter_54, + parameter_55, + parameter_56, + parameter_57, + parameter_58, + parameter_59, + parameter_60, + parameter_61, + parameter_62, + parameter_63, + parameter_64, + parameter_65, + parameter_66, + parameter_67, + parameter_68, + parameter_69, + parameter_70, + parameter_71, + parameter_72, + parameter_73, + parameter_74, + parameter_75, + parameter_76, + parameter_77, + parameter_78, + parameter_79, + parameter_80, + parameter_81, + parameter_82, + parameter_83, + parameter_84, + parameter_85, + parameter_86, + parameter_87, + parameter_88, + parameter_89, + parameter_90, + parameter_91, + parameter_92, + parameter_93, + parameter_94, + parameter_95, + parameter_96, + parameter_97, + parameter_98, + parameter_99, + parameter_100, + parameter_101, + parameter_102, + parameter_103, + parameter_104, + parameter_105, + parameter_106, + parameter_107, + parameter_108, + parameter_109, + parameter_110, + parameter_111, + parameter_112, + parameter_113, + parameter_114, + parameter_115, + parameter_116, + parameter_117, + parameter_118, + parameter_119, + parameter_120, + parameter_121, + parameter_122, + parameter_123, + parameter_124, + parameter_125, + parameter_126, + parameter_127, + parameter_128, + parameter_129, + parameter_130, + parameter_131, + parameter_132, + parameter_133, + parameter_134, + parameter_135, + parameter_136, + parameter_137, + parameter_138, + parameter_139, + parameter_140, + parameter_141, + parameter_142, + parameter_143, + parameter_144, + parameter_145, + parameter_146, + parameter_147, + parameter_148, + parameter_149, + parameter_150, + parameter_151, + parameter_152, + parameter_153, + parameter_154, + parameter_155, + parameter_156, + parameter_157, + parameter_158, + parameter_159, + parameter_160, + parameter_161, + parameter_162, + parameter_163, + parameter_164, + parameter_165, + parameter_166, + parameter_167, + parameter_168, + parameter_169, + parameter_170, + parameter_171, + parameter_172, + parameter_173, + parameter_174, + parameter_175, + parameter_176, + parameter_177, + parameter_178, + parameter_179, + parameter_180, + parameter_181, + parameter_182, + parameter_183, + parameter_184, + parameter_185, + parameter_186, + parameter_187, + parameter_188, + parameter_189, + parameter_190, + parameter_191, + parameter_192, + parameter_193, + parameter_194, + parameter_195, + parameter_196, + parameter_197, + parameter_198, + parameter_199, + parameter_200, + parameter_201, + parameter_202, + parameter_203, + parameter_204, + parameter_205, + parameter_206, + parameter_207, + parameter_208, + parameter_209, + parameter_210, + parameter_211, + parameter_212, + parameter_213, + parameter_214, + parameter_215, + parameter_216, + parameter_217, + parameter_218, + parameter_219, + parameter_220, + parameter_221, + parameter_222, + parameter_223, + parameter_224, + parameter_225, + parameter_226, + parameter_227, + parameter_228, + parameter_229, + parameter_230, + parameter_231, + parameter_232, + parameter_233, + parameter_234, + parameter_235, + parameter_236, + parameter_237, + parameter_238, + parameter_239, + parameter_240, + parameter_241, + parameter_242, + parameter_243, + parameter_244, + parameter_245, + parameter_246, + parameter_247, + parameter_248, + parameter_249, + parameter_250, + parameter_251, + parameter_252, + parameter_253, + parameter_254, + parameter_255, + parameter_256, + parameter_257, + parameter_258, + parameter_259, + parameter_260, + parameter_261, + parameter_262, + parameter_263, + parameter_264, + parameter_265, + parameter_266, + parameter_267, + parameter_268, + parameter_269, + parameter_270, + parameter_271, + parameter_272, + parameter_273, + parameter_274, + parameter_275, + parameter_276, + parameter_277, + parameter_278, + parameter_279, + parameter_280, + parameter_281, + parameter_282, + parameter_283, + parameter_284, + parameter_285, + parameter_286, + parameter_287, + parameter_288, + parameter_289, + parameter_290, + parameter_291, + parameter_292, + parameter_293, + parameter_294, + parameter_295, + parameter_296, + parameter_297, + parameter_298, + parameter_299, + parameter_300, + parameter_301, + parameter_302, + parameter_303, + parameter_304, + parameter_305, + parameter_306, + parameter_307, + parameter_308, + parameter_309, + parameter_310, + parameter_311, + parameter_312, + parameter_313, + parameter_314, + parameter_315, + parameter_316, + parameter_317, + parameter_318, + parameter_319, + parameter_320, + parameter_321, + parameter_322, + parameter_323, + parameter_324, + parameter_325, + parameter_326, + parameter_327, + parameter_328, + parameter_329, + parameter_330, + parameter_331, + parameter_332, + parameter_333, + parameter_334, + parameter_335, + parameter_336, + parameter_337, + parameter_338, + parameter_339, + parameter_340, + parameter_341, + parameter_342, + parameter_343, + parameter_344, + parameter_345, + parameter_346, + parameter_347, + parameter_348, + parameter_349, + parameter_350, + parameter_351, + parameter_352, + parameter_353, + parameter_354, + parameter_355, + parameter_356, + parameter_357, + parameter_358, + parameter_359, + parameter_360, + parameter_361, + parameter_362, + parameter_363, + parameter_364, + parameter_365, + parameter_366, + parameter_367, + parameter_368, + parameter_369, + parameter_370, + parameter_371, + parameter_372, + parameter_373, + parameter_374, + parameter_375, + parameter_376, + parameter_377, + parameter_378, + parameter_379, + parameter_380, + parameter_381, + parameter_382, + parameter_383, + parameter_384, + parameter_385, + parameter_386, + parameter_387, + parameter_388, + parameter_389, + parameter_390, + parameter_391, + parameter_392, + parameter_393, + parameter_394, + parameter_395, + parameter_396, + parameter_397, + parameter_398, + parameter_399, + parameter_400, + parameter_401, + parameter_402, + parameter_403, + parameter_404, + parameter_405, + parameter_406, + parameter_407, + parameter_408, + parameter_409, + parameter_410, + parameter_411, + parameter_412, + parameter_413, + parameter_414, + parameter_415, + parameter_416, + parameter_417, + parameter_418, + parameter_419, + parameter_420, + data_0, + data_1, + data_2, + data_3, + data_4, + data_5, + data_6, + data_7, + data_8, + data_9, + data_10, + data_11, + data_12, + data_13, + data_14, + data_15, + data_16, + data_17, + data_18, + data_19, + data_20, + data_21, + data_22, + data_23, + data_24, + data_25, + data_26, + data_27, + data_28, + data_29, + data_30, + data_31, + data_32, + data_33, + data_34, + data_35, + data_36, + data_37, + data_38, + data_39, + ): + # pd_op.conv2d: (-1x64x-1x-1xf32) <- (-1x3x-1x-1xf32, 64x3x7x7xf32) + conv2d_0 = paddle._C_ops.conv2d( + data_37, parameter_420, [2, 2], [3, 3], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_420 + + # pd_op.batch_norm_: (-1x64x-1x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32, -1xui8) <- (-1x64x-1x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32) + ( + batch_norm__0, + batch_norm__1, + batch_norm__2, + batch_norm__3, + batch_norm__4, + batch_norm__5, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_0, + parameter_419, + parameter_418, + parameter_417, + parameter_416, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_0, parameter_416, parameter_417, parameter_418, parameter_419 + + # pd_op.relu: (-1x64x-1x-1xf32) <- (-1x64x-1x-1xf32) + relu_0 = paddle._C_ops.relu(batch_norm__0) + del batch_norm__0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [3, 3] + + # pd_op.pool2d: (-1x64x-1x-1xf32) <- (-1x64x-1x-1xf32, 2xi64) + pool2d_0 = paddle._C_ops.pool2d( + relu_0, + full_int_array_0, + [2, 2], + [1, 1], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + del full_int_array_0, relu_0 + + # pd_op.conv2d: (-1x64x-1x-1xf32) <- (-1x64x-1x-1xf32, 64x64x1x1xf32) + conv2d_1 = paddle._C_ops.conv2d( + pool2d_0, parameter_415, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_415 + + # pd_op.batch_norm_: (-1x64x-1x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32, -1xui8) <- (-1x64x-1x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32) + ( + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + batch_norm__10, + batch_norm__11, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_1, + parameter_414, + parameter_413, + parameter_412, + parameter_411, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_1, parameter_411, parameter_412, parameter_413, parameter_414 + + # pd_op.relu: (-1x64x-1x-1xf32) <- (-1x64x-1x-1xf32) + relu_1 = paddle._C_ops.relu(batch_norm__6) + del batch_norm__6 + + # pd_op.conv2d: (-1x64x-1x-1xf32) <- (-1x64x-1x-1xf32, 64x64x3x3xf32) + conv2d_2 = paddle._C_ops.conv2d( + relu_1, parameter_410, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_410, relu_1 + + # pd_op.batch_norm_: (-1x64x-1x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32, -1xui8) <- (-1x64x-1x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32) + ( + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_2, + parameter_409, + parameter_408, + parameter_407, + parameter_406, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_2, parameter_406, parameter_407, parameter_408, parameter_409 + + # pd_op.relu: (-1x64x-1x-1xf32) <- (-1x64x-1x-1xf32) + relu_2 = paddle._C_ops.relu(batch_norm__12) + del batch_norm__12 + + # pd_op.conv2d: (-1x256x-1x-1xf32) <- (-1x64x-1x-1xf32, 256x64x1x1xf32) + conv2d_3 = paddle._C_ops.conv2d( + relu_2, parameter_405, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_405, relu_2 + + # pd_op.batch_norm_: (-1x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (-1x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__18, + batch_norm__19, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_3, + parameter_404, + parameter_403, + parameter_402, + parameter_401, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_3, parameter_401, parameter_402, parameter_403, parameter_404 + + # pd_op.conv2d: (-1x256x-1x-1xf32) <- (-1x64x-1x-1xf32, 256x64x1x1xf32) + conv2d_4 = paddle._C_ops.conv2d( + pool2d_0, parameter_400, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_400, pool2d_0 + + # pd_op.batch_norm_: (-1x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (-1x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_4, + parameter_399, + parameter_398, + parameter_397, + parameter_396, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_4, parameter_396, parameter_397, parameter_398, parameter_399 + + # pd_op.add: (-1x256x-1x-1xf32) <- (-1x256x-1x-1xf32, -1x256x-1x-1xf32) + add_0 = paddle._C_ops.add(batch_norm__18, batch_norm__24) + del batch_norm__18, batch_norm__24 + + # pd_op.relu: (-1x256x-1x-1xf32) <- (-1x256x-1x-1xf32) + relu_3 = paddle._C_ops.relu(add_0) + del add_0 + + # pd_op.conv2d: (-1x64x-1x-1xf32) <- (-1x256x-1x-1xf32, 64x256x1x1xf32) + conv2d_5 = paddle._C_ops.conv2d( + relu_3, parameter_395, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_395 + + # pd_op.batch_norm_: (-1x64x-1x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32, -1xui8) <- (-1x64x-1x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32) + ( + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_5, + parameter_394, + parameter_393, + parameter_392, + parameter_391, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_5, parameter_391, parameter_392, parameter_393, parameter_394 + + # pd_op.relu: (-1x64x-1x-1xf32) <- (-1x64x-1x-1xf32) + relu_4 = paddle._C_ops.relu(batch_norm__30) + del batch_norm__30 + + # pd_op.conv2d: (-1x64x-1x-1xf32) <- (-1x64x-1x-1xf32, 64x64x3x3xf32) + conv2d_6 = paddle._C_ops.conv2d( + relu_4, parameter_390, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_390, relu_4 + + # pd_op.batch_norm_: (-1x64x-1x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32, -1xui8) <- (-1x64x-1x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32) + ( + batch_norm__36, + batch_norm__37, + batch_norm__38, + batch_norm__39, + batch_norm__40, + batch_norm__41, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_6, + parameter_389, + parameter_388, + parameter_387, + parameter_386, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_6, parameter_386, parameter_387, parameter_388, parameter_389 + + # pd_op.relu: (-1x64x-1x-1xf32) <- (-1x64x-1x-1xf32) + relu_5 = paddle._C_ops.relu(batch_norm__36) + del batch_norm__36 + + # pd_op.conv2d: (-1x256x-1x-1xf32) <- (-1x64x-1x-1xf32, 256x64x1x1xf32) + conv2d_7 = paddle._C_ops.conv2d( + relu_5, parameter_385, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_385, relu_5 + + # pd_op.batch_norm_: (-1x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (-1x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__42, + batch_norm__43, + batch_norm__44, + batch_norm__45, + batch_norm__46, + batch_norm__47, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_7, + parameter_384, + parameter_383, + parameter_382, + parameter_381, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_7, parameter_381, parameter_382, parameter_383, parameter_384 + + # pd_op.add: (-1x256x-1x-1xf32) <- (-1x256x-1x-1xf32, -1x256x-1x-1xf32) + add_1 = paddle._C_ops.add(batch_norm__42, relu_3) + del batch_norm__42, relu_3 + + # pd_op.relu: (-1x256x-1x-1xf32) <- (-1x256x-1x-1xf32) + relu_6 = paddle._C_ops.relu(add_1) + del add_1 + + # pd_op.conv2d: (-1x64x-1x-1xf32) <- (-1x256x-1x-1xf32, 64x256x1x1xf32) + conv2d_8 = paddle._C_ops.conv2d( + relu_6, parameter_380, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_380 + + # pd_op.batch_norm_: (-1x64x-1x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32, -1xui8) <- (-1x64x-1x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32) + ( + batch_norm__48, + batch_norm__49, + batch_norm__50, + batch_norm__51, + batch_norm__52, + batch_norm__53, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_8, + parameter_379, + parameter_378, + parameter_377, + parameter_376, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_8, parameter_376, parameter_377, parameter_378, parameter_379 + + # pd_op.relu: (-1x64x-1x-1xf32) <- (-1x64x-1x-1xf32) + relu_7 = paddle._C_ops.relu(batch_norm__48) + del batch_norm__48 + + # pd_op.conv2d: (-1x64x-1x-1xf32) <- (-1x64x-1x-1xf32, 64x64x3x3xf32) + conv2d_9 = paddle._C_ops.conv2d( + relu_7, parameter_375, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_375, relu_7 + + # pd_op.batch_norm_: (-1x64x-1x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32, -1xui8) <- (-1x64x-1x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32) + ( + batch_norm__54, + batch_norm__55, + batch_norm__56, + batch_norm__57, + batch_norm__58, + batch_norm__59, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_9, + parameter_374, + parameter_373, + parameter_372, + parameter_371, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_9, parameter_371, parameter_372, parameter_373, parameter_374 + + # pd_op.relu: (-1x64x-1x-1xf32) <- (-1x64x-1x-1xf32) + relu_8 = paddle._C_ops.relu(batch_norm__54) + del batch_norm__54 + + # pd_op.conv2d: (-1x256x-1x-1xf32) <- (-1x64x-1x-1xf32, 256x64x1x1xf32) + conv2d_10 = paddle._C_ops.conv2d( + relu_8, parameter_370, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_370, relu_8 + + # pd_op.batch_norm_: (-1x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (-1x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__60, + batch_norm__61, + batch_norm__62, + batch_norm__63, + batch_norm__64, + batch_norm__65, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_10, + parameter_369, + parameter_368, + parameter_367, + parameter_366, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_10, parameter_366, parameter_367, parameter_368, parameter_369 + + # pd_op.add: (-1x256x-1x-1xf32) <- (-1x256x-1x-1xf32, -1x256x-1x-1xf32) + add_2 = paddle._C_ops.add(batch_norm__60, relu_6) + del batch_norm__60, relu_6 + + # pd_op.relu: (-1x256x-1x-1xf32) <- (-1x256x-1x-1xf32) + relu_9 = paddle._C_ops.relu(add_2) + del add_2 + + # pd_op.conv2d: (-1x128x-1x-1xf32) <- (-1x256x-1x-1xf32, 128x256x1x1xf32) + conv2d_11 = paddle._C_ops.conv2d( + relu_9, parameter_365, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_365 + + # pd_op.batch_norm_: (-1x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32, -1xui8) <- (-1x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32) + ( + batch_norm__66, + batch_norm__67, + batch_norm__68, + batch_norm__69, + batch_norm__70, + batch_norm__71, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_11, + parameter_364, + parameter_363, + parameter_362, + parameter_361, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_11, parameter_361, parameter_362, parameter_363, parameter_364 + + # pd_op.relu: (-1x128x-1x-1xf32) <- (-1x128x-1x-1xf32) + relu_10 = paddle._C_ops.relu(batch_norm__66) + del batch_norm__66 + + # pd_op.conv2d: (-1x128x-1x-1xf32) <- (-1x128x-1x-1xf32, 128x128x3x3xf32) + conv2d_12 = paddle._C_ops.conv2d( + relu_10, parameter_360, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_360, relu_10 + + # pd_op.batch_norm_: (-1x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32, -1xui8) <- (-1x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32) + ( + batch_norm__72, + batch_norm__73, + batch_norm__74, + batch_norm__75, + batch_norm__76, + batch_norm__77, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_12, + parameter_359, + parameter_358, + parameter_357, + parameter_356, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_12, parameter_356, parameter_357, parameter_358, parameter_359 + + # pd_op.relu: (-1x128x-1x-1xf32) <- (-1x128x-1x-1xf32) + relu_11 = paddle._C_ops.relu(batch_norm__72) + del batch_norm__72 + + # pd_op.conv2d: (-1x512x-1x-1xf32) <- (-1x128x-1x-1xf32, 512x128x1x1xf32) + conv2d_13 = paddle._C_ops.conv2d( + relu_11, parameter_355, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_355, relu_11 + + # pd_op.batch_norm_: (-1x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (-1x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__78, + batch_norm__79, + batch_norm__80, + batch_norm__81, + batch_norm__82, + batch_norm__83, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_13, + parameter_354, + parameter_353, + parameter_352, + parameter_351, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_13, parameter_351, parameter_352, parameter_353, parameter_354 + + # pd_op.conv2d: (-1x512x-1x-1xf32) <- (-1x256x-1x-1xf32, 512x256x1x1xf32) + conv2d_14 = paddle._C_ops.conv2d( + relu_9, parameter_350, [2, 2], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_350, relu_9 + + # pd_op.batch_norm_: (-1x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (-1x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__84, + batch_norm__85, + batch_norm__86, + batch_norm__87, + batch_norm__88, + batch_norm__89, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_14, + parameter_349, + parameter_348, + parameter_347, + parameter_346, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_14, parameter_346, parameter_347, parameter_348, parameter_349 + + # pd_op.add: (-1x512x-1x-1xf32) <- (-1x512x-1x-1xf32, -1x512x-1x-1xf32) + add_3 = paddle._C_ops.add(batch_norm__78, batch_norm__84) + del batch_norm__78, batch_norm__84 + + # pd_op.relu: (-1x512x-1x-1xf32) <- (-1x512x-1x-1xf32) + relu_12 = paddle._C_ops.relu(add_3) + del add_3 + + # pd_op.conv2d: (-1x128x-1x-1xf32) <- (-1x512x-1x-1xf32, 128x512x1x1xf32) + conv2d_15 = paddle._C_ops.conv2d( + relu_12, parameter_345, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_345 + + # pd_op.batch_norm_: (-1x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32, -1xui8) <- (-1x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32) + ( + batch_norm__90, + batch_norm__91, + batch_norm__92, + batch_norm__93, + batch_norm__94, + batch_norm__95, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_15, + parameter_344, + parameter_343, + parameter_342, + parameter_341, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_15, parameter_341, parameter_342, parameter_343, parameter_344 + + # pd_op.relu: (-1x128x-1x-1xf32) <- (-1x128x-1x-1xf32) + relu_13 = paddle._C_ops.relu(batch_norm__90) + del batch_norm__90 + + # pd_op.conv2d: (-1x128x-1x-1xf32) <- (-1x128x-1x-1xf32, 128x128x3x3xf32) + conv2d_16 = paddle._C_ops.conv2d( + relu_13, parameter_340, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_340, relu_13 + + # pd_op.batch_norm_: (-1x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32, -1xui8) <- (-1x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32) + ( + batch_norm__96, + batch_norm__97, + batch_norm__98, + batch_norm__99, + batch_norm__100, + batch_norm__101, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_16, + parameter_339, + parameter_338, + parameter_337, + parameter_336, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_16, parameter_336, parameter_337, parameter_338, parameter_339 + + # pd_op.relu: (-1x128x-1x-1xf32) <- (-1x128x-1x-1xf32) + relu_14 = paddle._C_ops.relu(batch_norm__96) + del batch_norm__96 + + # pd_op.conv2d: (-1x512x-1x-1xf32) <- (-1x128x-1x-1xf32, 512x128x1x1xf32) + conv2d_17 = paddle._C_ops.conv2d( + relu_14, parameter_335, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_335, relu_14 + + # pd_op.batch_norm_: (-1x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (-1x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__102, + batch_norm__103, + batch_norm__104, + batch_norm__105, + batch_norm__106, + batch_norm__107, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_17, + parameter_334, + parameter_333, + parameter_332, + parameter_331, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_17, parameter_331, parameter_332, parameter_333, parameter_334 + + # pd_op.add: (-1x512x-1x-1xf32) <- (-1x512x-1x-1xf32, -1x512x-1x-1xf32) + add_4 = paddle._C_ops.add(batch_norm__102, relu_12) + del batch_norm__102, relu_12 + + # pd_op.relu: (-1x512x-1x-1xf32) <- (-1x512x-1x-1xf32) + relu_15 = paddle._C_ops.relu(add_4) + del add_4 + + # pd_op.conv2d: (-1x128x-1x-1xf32) <- (-1x512x-1x-1xf32, 128x512x1x1xf32) + conv2d_18 = paddle._C_ops.conv2d( + relu_15, parameter_330, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_330 + + # pd_op.batch_norm_: (-1x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32, -1xui8) <- (-1x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32) + ( + batch_norm__108, + batch_norm__109, + batch_norm__110, + batch_norm__111, + batch_norm__112, + batch_norm__113, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_18, + parameter_329, + parameter_328, + parameter_327, + parameter_326, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_18, parameter_326, parameter_327, parameter_328, parameter_329 + + # pd_op.relu: (-1x128x-1x-1xf32) <- (-1x128x-1x-1xf32) + relu_16 = paddle._C_ops.relu(batch_norm__108) + del batch_norm__108 + + # pd_op.conv2d: (-1x128x-1x-1xf32) <- (-1x128x-1x-1xf32, 128x128x3x3xf32) + conv2d_19 = paddle._C_ops.conv2d( + relu_16, parameter_325, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_325, relu_16 + + # pd_op.batch_norm_: (-1x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32, -1xui8) <- (-1x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32) + ( + batch_norm__114, + batch_norm__115, + batch_norm__116, + batch_norm__117, + batch_norm__118, + batch_norm__119, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_19, + parameter_324, + parameter_323, + parameter_322, + parameter_321, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_19, parameter_321, parameter_322, parameter_323, parameter_324 + + # pd_op.relu: (-1x128x-1x-1xf32) <- (-1x128x-1x-1xf32) + relu_17 = paddle._C_ops.relu(batch_norm__114) + del batch_norm__114 + + # pd_op.conv2d: (-1x512x-1x-1xf32) <- (-1x128x-1x-1xf32, 512x128x1x1xf32) + conv2d_20 = paddle._C_ops.conv2d( + relu_17, parameter_320, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_320, relu_17 + + # pd_op.batch_norm_: (-1x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (-1x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__120, + batch_norm__121, + batch_norm__122, + batch_norm__123, + batch_norm__124, + batch_norm__125, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_20, + parameter_319, + parameter_318, + parameter_317, + parameter_316, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_20, parameter_316, parameter_317, parameter_318, parameter_319 + + # pd_op.add: (-1x512x-1x-1xf32) <- (-1x512x-1x-1xf32, -1x512x-1x-1xf32) + add_5 = paddle._C_ops.add(batch_norm__120, relu_15) + del batch_norm__120, relu_15 + + # pd_op.relu: (-1x512x-1x-1xf32) <- (-1x512x-1x-1xf32) + relu_18 = paddle._C_ops.relu(add_5) + del add_5 + + # pd_op.conv2d: (-1x128x-1x-1xf32) <- (-1x512x-1x-1xf32, 128x512x1x1xf32) + conv2d_21 = paddle._C_ops.conv2d( + relu_18, parameter_315, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_315 + + # pd_op.batch_norm_: (-1x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32, -1xui8) <- (-1x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32) + ( + batch_norm__126, + batch_norm__127, + batch_norm__128, + batch_norm__129, + batch_norm__130, + batch_norm__131, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_21, + parameter_314, + parameter_313, + parameter_312, + parameter_311, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_21, parameter_311, parameter_312, parameter_313, parameter_314 + + # pd_op.relu: (-1x128x-1x-1xf32) <- (-1x128x-1x-1xf32) + relu_19 = paddle._C_ops.relu(batch_norm__126) + del batch_norm__126 + + # pd_op.conv2d: (-1x128x-1x-1xf32) <- (-1x128x-1x-1xf32, 128x128x3x3xf32) + conv2d_22 = paddle._C_ops.conv2d( + relu_19, parameter_310, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_310, relu_19 + + # pd_op.batch_norm_: (-1x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32, -1xui8) <- (-1x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32) + ( + batch_norm__132, + batch_norm__133, + batch_norm__134, + batch_norm__135, + batch_norm__136, + batch_norm__137, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_22, + parameter_309, + parameter_308, + parameter_307, + parameter_306, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_22, parameter_306, parameter_307, parameter_308, parameter_309 + + # pd_op.relu: (-1x128x-1x-1xf32) <- (-1x128x-1x-1xf32) + relu_20 = paddle._C_ops.relu(batch_norm__132) + del batch_norm__132 + + # pd_op.conv2d: (-1x512x-1x-1xf32) <- (-1x128x-1x-1xf32, 512x128x1x1xf32) + conv2d_23 = paddle._C_ops.conv2d( + relu_20, parameter_305, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_305, relu_20 + + # pd_op.batch_norm_: (-1x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (-1x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__138, + batch_norm__139, + batch_norm__140, + batch_norm__141, + batch_norm__142, + batch_norm__143, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_23, + parameter_304, + parameter_303, + parameter_302, + parameter_301, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_23, parameter_301, parameter_302, parameter_303, parameter_304 + + # pd_op.add: (-1x512x-1x-1xf32) <- (-1x512x-1x-1xf32, -1x512x-1x-1xf32) + add_6 = paddle._C_ops.add(batch_norm__138, relu_18) + del batch_norm__138, relu_18 + + # pd_op.relu: (-1x512x-1x-1xf32) <- (-1x512x-1x-1xf32) + relu_21 = paddle._C_ops.relu(add_6) + del add_6 + + # pd_op.conv2d: (-1x256x-1x-1xf32) <- (-1x512x-1x-1xf32, 256x512x1x1xf32) + conv2d_24 = paddle._C_ops.conv2d( + relu_21, parameter_300, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_300 + + # pd_op.batch_norm_: (-1x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (-1x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__144, + batch_norm__145, + batch_norm__146, + batch_norm__147, + batch_norm__148, + batch_norm__149, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_24, + parameter_299, + parameter_298, + parameter_297, + parameter_296, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_24, parameter_296, parameter_297, parameter_298, parameter_299 + + # pd_op.relu: (-1x256x-1x-1xf32) <- (-1x256x-1x-1xf32) + relu_22 = paddle._C_ops.relu(batch_norm__144) + del batch_norm__144 + + # pd_op.conv2d: (-1x256x-1x-1xf32) <- (-1x256x-1x-1xf32, 256x256x3x3xf32) + conv2d_25 = paddle._C_ops.conv2d( + relu_22, parameter_295, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_295, relu_22 + + # pd_op.batch_norm_: (-1x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (-1x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__150, + batch_norm__151, + batch_norm__152, + batch_norm__153, + batch_norm__154, + batch_norm__155, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_25, + parameter_294, + parameter_293, + parameter_292, + parameter_291, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_25, parameter_291, parameter_292, parameter_293, parameter_294 + + # pd_op.relu: (-1x256x-1x-1xf32) <- (-1x256x-1x-1xf32) + relu_23 = paddle._C_ops.relu(batch_norm__150) + del batch_norm__150 + + # pd_op.conv2d: (-1x1024x-1x-1xf32) <- (-1x256x-1x-1xf32, 1024x256x1x1xf32) + conv2d_26 = paddle._C_ops.conv2d( + relu_23, parameter_290, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_290, relu_23 + + # pd_op.batch_norm_: (-1x1024x-1x-1xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32, -1xui8) <- (-1x1024x-1x-1xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32) + ( + batch_norm__156, + batch_norm__157, + batch_norm__158, + batch_norm__159, + batch_norm__160, + batch_norm__161, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_26, + parameter_289, + parameter_288, + parameter_287, + parameter_286, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_26, parameter_286, parameter_287, parameter_288, parameter_289 + + # pd_op.conv2d: (-1x1024x-1x-1xf32) <- (-1x512x-1x-1xf32, 1024x512x1x1xf32) + conv2d_27 = paddle._C_ops.conv2d( + relu_21, parameter_285, [2, 2], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_285, relu_21 + + # pd_op.batch_norm_: (-1x1024x-1x-1xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32, -1xui8) <- (-1x1024x-1x-1xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32) + ( + batch_norm__162, + batch_norm__163, + batch_norm__164, + batch_norm__165, + batch_norm__166, + batch_norm__167, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_27, + parameter_284, + parameter_283, + parameter_282, + parameter_281, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_27, parameter_281, parameter_282, parameter_283, parameter_284 + + # pd_op.add: (-1x1024x-1x-1xf32) <- (-1x1024x-1x-1xf32, -1x1024x-1x-1xf32) + add_7 = paddle._C_ops.add(batch_norm__156, batch_norm__162) + del batch_norm__156, batch_norm__162 + + # pd_op.relu: (-1x1024x-1x-1xf32) <- (-1x1024x-1x-1xf32) + relu_24 = paddle._C_ops.relu(add_7) + del add_7 + + # pd_op.conv2d: (-1x256x-1x-1xf32) <- (-1x1024x-1x-1xf32, 256x1024x1x1xf32) + conv2d_28 = paddle._C_ops.conv2d( + relu_24, parameter_280, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_280 + + # pd_op.batch_norm_: (-1x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (-1x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__168, + batch_norm__169, + batch_norm__170, + batch_norm__171, + batch_norm__172, + batch_norm__173, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_28, + parameter_279, + parameter_278, + parameter_277, + parameter_276, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_28, parameter_276, parameter_277, parameter_278, parameter_279 + + # pd_op.relu: (-1x256x-1x-1xf32) <- (-1x256x-1x-1xf32) + relu_25 = paddle._C_ops.relu(batch_norm__168) + del batch_norm__168 + + # pd_op.conv2d: (-1x256x-1x-1xf32) <- (-1x256x-1x-1xf32, 256x256x3x3xf32) + conv2d_29 = paddle._C_ops.conv2d( + relu_25, parameter_275, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_275, relu_25 + + # pd_op.batch_norm_: (-1x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (-1x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__174, + batch_norm__175, + batch_norm__176, + batch_norm__177, + batch_norm__178, + batch_norm__179, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_29, + parameter_274, + parameter_273, + parameter_272, + parameter_271, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_29, parameter_271, parameter_272, parameter_273, parameter_274 + + # pd_op.relu: (-1x256x-1x-1xf32) <- (-1x256x-1x-1xf32) + relu_26 = paddle._C_ops.relu(batch_norm__174) + del batch_norm__174 + + # pd_op.conv2d: (-1x1024x-1x-1xf32) <- (-1x256x-1x-1xf32, 1024x256x1x1xf32) + conv2d_30 = paddle._C_ops.conv2d( + relu_26, parameter_270, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_270, relu_26 + + # pd_op.batch_norm_: (-1x1024x-1x-1xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32, -1xui8) <- (-1x1024x-1x-1xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32) + ( + batch_norm__180, + batch_norm__181, + batch_norm__182, + batch_norm__183, + batch_norm__184, + batch_norm__185, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_30, + parameter_269, + parameter_268, + parameter_267, + parameter_266, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_30, parameter_266, parameter_267, parameter_268, parameter_269 + + # pd_op.add: (-1x1024x-1x-1xf32) <- (-1x1024x-1x-1xf32, -1x1024x-1x-1xf32) + add_8 = paddle._C_ops.add(batch_norm__180, relu_24) + del batch_norm__180, relu_24 + + # pd_op.relu: (-1x1024x-1x-1xf32) <- (-1x1024x-1x-1xf32) + relu_27 = paddle._C_ops.relu(add_8) + del add_8 + + # pd_op.conv2d: (-1x256x-1x-1xf32) <- (-1x1024x-1x-1xf32, 256x1024x1x1xf32) + conv2d_31 = paddle._C_ops.conv2d( + relu_27, parameter_265, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_265 + + # pd_op.batch_norm_: (-1x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (-1x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__186, + batch_norm__187, + batch_norm__188, + batch_norm__189, + batch_norm__190, + batch_norm__191, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_31, + parameter_264, + parameter_263, + parameter_262, + parameter_261, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_31, parameter_261, parameter_262, parameter_263, parameter_264 + + # pd_op.relu: (-1x256x-1x-1xf32) <- (-1x256x-1x-1xf32) + relu_28 = paddle._C_ops.relu(batch_norm__186) + del batch_norm__186 + + # pd_op.conv2d: (-1x256x-1x-1xf32) <- (-1x256x-1x-1xf32, 256x256x3x3xf32) + conv2d_32 = paddle._C_ops.conv2d( + relu_28, parameter_260, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_260, relu_28 + + # pd_op.batch_norm_: (-1x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (-1x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__192, + batch_norm__193, + batch_norm__194, + batch_norm__195, + batch_norm__196, + batch_norm__197, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_32, + parameter_259, + parameter_258, + parameter_257, + parameter_256, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_32, parameter_256, parameter_257, parameter_258, parameter_259 + + # pd_op.relu: (-1x256x-1x-1xf32) <- (-1x256x-1x-1xf32) + relu_29 = paddle._C_ops.relu(batch_norm__192) + del batch_norm__192 + + # pd_op.conv2d: (-1x1024x-1x-1xf32) <- (-1x256x-1x-1xf32, 1024x256x1x1xf32) + conv2d_33 = paddle._C_ops.conv2d( + relu_29, parameter_255, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_255, relu_29 + + # pd_op.batch_norm_: (-1x1024x-1x-1xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32, -1xui8) <- (-1x1024x-1x-1xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32) + ( + batch_norm__198, + batch_norm__199, + batch_norm__200, + batch_norm__201, + batch_norm__202, + batch_norm__203, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_33, + parameter_254, + parameter_253, + parameter_252, + parameter_251, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_33, parameter_251, parameter_252, parameter_253, parameter_254 + + # pd_op.add: (-1x1024x-1x-1xf32) <- (-1x1024x-1x-1xf32, -1x1024x-1x-1xf32) + add_9 = paddle._C_ops.add(batch_norm__198, relu_27) + del batch_norm__198, relu_27 + + # pd_op.relu: (-1x1024x-1x-1xf32) <- (-1x1024x-1x-1xf32) + relu_30 = paddle._C_ops.relu(add_9) + del add_9 + + # pd_op.conv2d: (-1x256x-1x-1xf32) <- (-1x1024x-1x-1xf32, 256x1024x1x1xf32) + conv2d_34 = paddle._C_ops.conv2d( + relu_30, parameter_250, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_250 + + # pd_op.batch_norm_: (-1x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (-1x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__204, + batch_norm__205, + batch_norm__206, + batch_norm__207, + batch_norm__208, + batch_norm__209, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_34, + parameter_249, + parameter_248, + parameter_247, + parameter_246, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_34, parameter_246, parameter_247, parameter_248, parameter_249 + + # pd_op.relu: (-1x256x-1x-1xf32) <- (-1x256x-1x-1xf32) + relu_31 = paddle._C_ops.relu(batch_norm__204) + del batch_norm__204 + + # pd_op.conv2d: (-1x256x-1x-1xf32) <- (-1x256x-1x-1xf32, 256x256x3x3xf32) + conv2d_35 = paddle._C_ops.conv2d( + relu_31, parameter_245, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_245, relu_31 + + # pd_op.batch_norm_: (-1x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (-1x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__210, + batch_norm__211, + batch_norm__212, + batch_norm__213, + batch_norm__214, + batch_norm__215, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_35, + parameter_244, + parameter_243, + parameter_242, + parameter_241, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_35, parameter_241, parameter_242, parameter_243, parameter_244 + + # pd_op.relu: (-1x256x-1x-1xf32) <- (-1x256x-1x-1xf32) + relu_32 = paddle._C_ops.relu(batch_norm__210) + del batch_norm__210 + + # pd_op.conv2d: (-1x1024x-1x-1xf32) <- (-1x256x-1x-1xf32, 1024x256x1x1xf32) + conv2d_36 = paddle._C_ops.conv2d( + relu_32, parameter_240, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_240, relu_32 + + # pd_op.batch_norm_: (-1x1024x-1x-1xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32, -1xui8) <- (-1x1024x-1x-1xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32) + ( + batch_norm__216, + batch_norm__217, + batch_norm__218, + batch_norm__219, + batch_norm__220, + batch_norm__221, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_36, + parameter_239, + parameter_238, + parameter_237, + parameter_236, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_36, parameter_236, parameter_237, parameter_238, parameter_239 + + # pd_op.add: (-1x1024x-1x-1xf32) <- (-1x1024x-1x-1xf32, -1x1024x-1x-1xf32) + add_10 = paddle._C_ops.add(batch_norm__216, relu_30) + del batch_norm__216, relu_30 + + # pd_op.relu: (-1x1024x-1x-1xf32) <- (-1x1024x-1x-1xf32) + relu_33 = paddle._C_ops.relu(add_10) + del add_10 + + # pd_op.conv2d: (-1x256x-1x-1xf32) <- (-1x1024x-1x-1xf32, 256x1024x1x1xf32) + conv2d_37 = paddle._C_ops.conv2d( + relu_33, parameter_235, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_235 + + # pd_op.batch_norm_: (-1x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (-1x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__222, + batch_norm__223, + batch_norm__224, + batch_norm__225, + batch_norm__226, + batch_norm__227, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_37, + parameter_234, + parameter_233, + parameter_232, + parameter_231, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_37, parameter_231, parameter_232, parameter_233, parameter_234 + + # pd_op.relu: (-1x256x-1x-1xf32) <- (-1x256x-1x-1xf32) + relu_34 = paddle._C_ops.relu(batch_norm__222) + del batch_norm__222 + + # pd_op.conv2d: (-1x256x-1x-1xf32) <- (-1x256x-1x-1xf32, 256x256x3x3xf32) + conv2d_38 = paddle._C_ops.conv2d( + relu_34, parameter_230, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_230, relu_34 + + # pd_op.batch_norm_: (-1x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (-1x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__228, + batch_norm__229, + batch_norm__230, + batch_norm__231, + batch_norm__232, + batch_norm__233, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_38, + parameter_229, + parameter_228, + parameter_227, + parameter_226, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_38, parameter_226, parameter_227, parameter_228, parameter_229 + + # pd_op.relu: (-1x256x-1x-1xf32) <- (-1x256x-1x-1xf32) + relu_35 = paddle._C_ops.relu(batch_norm__228) + del batch_norm__228 + + # pd_op.conv2d: (-1x1024x-1x-1xf32) <- (-1x256x-1x-1xf32, 1024x256x1x1xf32) + conv2d_39 = paddle._C_ops.conv2d( + relu_35, parameter_225, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_225, relu_35 + + # pd_op.batch_norm_: (-1x1024x-1x-1xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32, -1xui8) <- (-1x1024x-1x-1xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32) + ( + batch_norm__234, + batch_norm__235, + batch_norm__236, + batch_norm__237, + batch_norm__238, + batch_norm__239, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_39, + parameter_224, + parameter_223, + parameter_222, + parameter_221, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_39, parameter_221, parameter_222, parameter_223, parameter_224 + + # pd_op.add: (-1x1024x-1x-1xf32) <- (-1x1024x-1x-1xf32, -1x1024x-1x-1xf32) + add_11 = paddle._C_ops.add(batch_norm__234, relu_33) + del batch_norm__234, relu_33 + + # pd_op.relu: (-1x1024x-1x-1xf32) <- (-1x1024x-1x-1xf32) + relu_36 = paddle._C_ops.relu(add_11) + del add_11 + + # pd_op.conv2d: (-1x256x-1x-1xf32) <- (-1x1024x-1x-1xf32, 256x1024x1x1xf32) + conv2d_40 = paddle._C_ops.conv2d( + relu_36, parameter_220, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_220 + + # pd_op.batch_norm_: (-1x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (-1x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__240, + batch_norm__241, + batch_norm__242, + batch_norm__243, + batch_norm__244, + batch_norm__245, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_40, + parameter_219, + parameter_218, + parameter_217, + parameter_216, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_40, parameter_216, parameter_217, parameter_218, parameter_219 + + # pd_op.relu: (-1x256x-1x-1xf32) <- (-1x256x-1x-1xf32) + relu_37 = paddle._C_ops.relu(batch_norm__240) + del batch_norm__240 + + # pd_op.conv2d: (-1x256x-1x-1xf32) <- (-1x256x-1x-1xf32, 256x256x3x3xf32) + conv2d_41 = paddle._C_ops.conv2d( + relu_37, parameter_215, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_215, relu_37 + + # pd_op.batch_norm_: (-1x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (-1x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__246, + batch_norm__247, + batch_norm__248, + batch_norm__249, + batch_norm__250, + batch_norm__251, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_41, + parameter_214, + parameter_213, + parameter_212, + parameter_211, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_41, parameter_211, parameter_212, parameter_213, parameter_214 + + # pd_op.relu: (-1x256x-1x-1xf32) <- (-1x256x-1x-1xf32) + relu_38 = paddle._C_ops.relu(batch_norm__246) + del batch_norm__246 + + # pd_op.conv2d: (-1x1024x-1x-1xf32) <- (-1x256x-1x-1xf32, 1024x256x1x1xf32) + conv2d_42 = paddle._C_ops.conv2d( + relu_38, parameter_210, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_210, relu_38 + + # pd_op.batch_norm_: (-1x1024x-1x-1xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32, -1xui8) <- (-1x1024x-1x-1xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32) + ( + batch_norm__252, + batch_norm__253, + batch_norm__254, + batch_norm__255, + batch_norm__256, + batch_norm__257, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_42, + parameter_209, + parameter_208, + parameter_207, + parameter_206, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_42, parameter_206, parameter_207, parameter_208, parameter_209 + + # pd_op.add: (-1x1024x-1x-1xf32) <- (-1x1024x-1x-1xf32, -1x1024x-1x-1xf32) + add_12 = paddle._C_ops.add(batch_norm__252, relu_36) + del batch_norm__252, relu_36 + + # pd_op.relu: (-1x1024x-1x-1xf32) <- (-1x1024x-1x-1xf32) + relu_39 = paddle._C_ops.relu(add_12) + del add_12 + + # pd_op.conv2d: (-1x512x-1x-1xf32) <- (-1x1024x-1x-1xf32, 512x1024x1x1xf32) + conv2d_43 = paddle._C_ops.conv2d( + relu_39, parameter_205, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_205 + + # pd_op.batch_norm_: (-1x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (-1x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__258, + batch_norm__259, + batch_norm__260, + batch_norm__261, + batch_norm__262, + batch_norm__263, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_43, + parameter_204, + parameter_203, + parameter_202, + parameter_201, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_43, parameter_201, parameter_202, parameter_203, parameter_204 + + # pd_op.relu: (-1x512x-1x-1xf32) <- (-1x512x-1x-1xf32) + relu_40 = paddle._C_ops.relu(batch_norm__258) + del batch_norm__258 + + # pd_op.conv2d: (-1x512x-1x-1xf32) <- (-1x512x-1x-1xf32, 512x512x3x3xf32) + conv2d_44 = paddle._C_ops.conv2d( + relu_40, parameter_200, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_200, relu_40 + + # pd_op.batch_norm_: (-1x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (-1x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__264, + batch_norm__265, + batch_norm__266, + batch_norm__267, + batch_norm__268, + batch_norm__269, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_44, + parameter_199, + parameter_198, + parameter_197, + parameter_196, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_44, parameter_196, parameter_197, parameter_198, parameter_199 + + # pd_op.relu: (-1x512x-1x-1xf32) <- (-1x512x-1x-1xf32) + relu_41 = paddle._C_ops.relu(batch_norm__264) + del batch_norm__264 + + # pd_op.conv2d: (-1x2048x-1x-1xf32) <- (-1x512x-1x-1xf32, 2048x512x1x1xf32) + conv2d_45 = paddle._C_ops.conv2d( + relu_41, parameter_195, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_195, relu_41 + + # pd_op.batch_norm_: (-1x2048x-1x-1xf32, 2048xf32, 2048xf32, 2048xf32, 2048xf32, -1xui8) <- (-1x2048x-1x-1xf32, 2048xf32, 2048xf32, 2048xf32, 2048xf32) + ( + batch_norm__270, + batch_norm__271, + batch_norm__272, + batch_norm__273, + batch_norm__274, + batch_norm__275, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_45, + parameter_194, + parameter_193, + parameter_192, + parameter_191, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_45, parameter_191, parameter_192, parameter_193, parameter_194 + + # pd_op.conv2d: (-1x2048x-1x-1xf32) <- (-1x1024x-1x-1xf32, 2048x1024x1x1xf32) + conv2d_46 = paddle._C_ops.conv2d( + relu_39, parameter_190, [2, 2], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_190, relu_39 + + # pd_op.batch_norm_: (-1x2048x-1x-1xf32, 2048xf32, 2048xf32, 2048xf32, 2048xf32, -1xui8) <- (-1x2048x-1x-1xf32, 2048xf32, 2048xf32, 2048xf32, 2048xf32) + ( + batch_norm__276, + batch_norm__277, + batch_norm__278, + batch_norm__279, + batch_norm__280, + batch_norm__281, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_46, + parameter_189, + parameter_188, + parameter_187, + parameter_186, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_46, parameter_186, parameter_187, parameter_188, parameter_189 + + # pd_op.add: (-1x2048x-1x-1xf32) <- (-1x2048x-1x-1xf32, -1x2048x-1x-1xf32) + add_13 = paddle._C_ops.add(batch_norm__270, batch_norm__276) + del batch_norm__270, batch_norm__276 + + # pd_op.relu: (-1x2048x-1x-1xf32) <- (-1x2048x-1x-1xf32) + relu_42 = paddle._C_ops.relu(add_13) + del add_13 + + # pd_op.conv2d: (-1x512x-1x-1xf32) <- (-1x2048x-1x-1xf32, 512x2048x1x1xf32) + conv2d_47 = paddle._C_ops.conv2d( + relu_42, parameter_185, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_185 + + # pd_op.batch_norm_: (-1x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (-1x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__282, + batch_norm__283, + batch_norm__284, + batch_norm__285, + batch_norm__286, + batch_norm__287, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_47, + parameter_184, + parameter_183, + parameter_182, + parameter_181, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_47, parameter_181, parameter_182, parameter_183, parameter_184 + + # pd_op.relu: (-1x512x-1x-1xf32) <- (-1x512x-1x-1xf32) + relu_43 = paddle._C_ops.relu(batch_norm__282) + del batch_norm__282 + + # pd_op.conv2d: (-1x512x-1x-1xf32) <- (-1x512x-1x-1xf32, 512x512x3x3xf32) + conv2d_48 = paddle._C_ops.conv2d( + relu_43, parameter_180, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_180, relu_43 + + # pd_op.batch_norm_: (-1x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (-1x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__288, + batch_norm__289, + batch_norm__290, + batch_norm__291, + batch_norm__292, + batch_norm__293, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_48, + parameter_179, + parameter_178, + parameter_177, + parameter_176, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_48, parameter_176, parameter_177, parameter_178, parameter_179 + + # pd_op.relu: (-1x512x-1x-1xf32) <- (-1x512x-1x-1xf32) + relu_44 = paddle._C_ops.relu(batch_norm__288) + del batch_norm__288 + + # pd_op.conv2d: (-1x2048x-1x-1xf32) <- (-1x512x-1x-1xf32, 2048x512x1x1xf32) + conv2d_49 = paddle._C_ops.conv2d( + relu_44, parameter_175, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_175, relu_44 + + # pd_op.batch_norm_: (-1x2048x-1x-1xf32, 2048xf32, 2048xf32, 2048xf32, 2048xf32, -1xui8) <- (-1x2048x-1x-1xf32, 2048xf32, 2048xf32, 2048xf32, 2048xf32) + ( + batch_norm__294, + batch_norm__295, + batch_norm__296, + batch_norm__297, + batch_norm__298, + batch_norm__299, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_49, + parameter_174, + parameter_173, + parameter_172, + parameter_171, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_49, parameter_171, parameter_172, parameter_173, parameter_174 + + # pd_op.add: (-1x2048x-1x-1xf32) <- (-1x2048x-1x-1xf32, -1x2048x-1x-1xf32) + add_14 = paddle._C_ops.add(batch_norm__294, relu_42) + del batch_norm__294, relu_42 + + # pd_op.relu: (-1x2048x-1x-1xf32) <- (-1x2048x-1x-1xf32) + relu_45 = paddle._C_ops.relu(add_14) + del add_14 + + # pd_op.conv2d: (-1x512x-1x-1xf32) <- (-1x2048x-1x-1xf32, 512x2048x1x1xf32) + conv2d_50 = paddle._C_ops.conv2d( + relu_45, parameter_170, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_170 + + # pd_op.batch_norm_: (-1x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (-1x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__300, + batch_norm__301, + batch_norm__302, + batch_norm__303, + batch_norm__304, + batch_norm__305, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_50, + parameter_169, + parameter_168, + parameter_167, + parameter_166, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_50, parameter_166, parameter_167, parameter_168, parameter_169 + + # pd_op.relu: (-1x512x-1x-1xf32) <- (-1x512x-1x-1xf32) + relu_46 = paddle._C_ops.relu(batch_norm__300) + del batch_norm__300 + + # pd_op.conv2d: (-1x512x-1x-1xf32) <- (-1x512x-1x-1xf32, 512x512x3x3xf32) + conv2d_51 = paddle._C_ops.conv2d( + relu_46, parameter_165, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_165, relu_46 + + # pd_op.batch_norm_: (-1x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (-1x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__306, + batch_norm__307, + batch_norm__308, + batch_norm__309, + batch_norm__310, + batch_norm__311, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_51, + parameter_164, + parameter_163, + parameter_162, + parameter_161, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_51, parameter_161, parameter_162, parameter_163, parameter_164 + + # pd_op.relu: (-1x512x-1x-1xf32) <- (-1x512x-1x-1xf32) + relu_47 = paddle._C_ops.relu(batch_norm__306) + del batch_norm__306 + + # pd_op.conv2d: (-1x2048x-1x-1xf32) <- (-1x512x-1x-1xf32, 2048x512x1x1xf32) + conv2d_52 = paddle._C_ops.conv2d( + relu_47, parameter_160, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_160, relu_47 + + # pd_op.batch_norm_: (-1x2048x-1x-1xf32, 2048xf32, 2048xf32, 2048xf32, 2048xf32, -1xui8) <- (-1x2048x-1x-1xf32, 2048xf32, 2048xf32, 2048xf32, 2048xf32) + ( + batch_norm__312, + batch_norm__313, + batch_norm__314, + batch_norm__315, + batch_norm__316, + batch_norm__317, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_52, + parameter_159, + parameter_158, + parameter_157, + parameter_156, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_52, parameter_156, parameter_157, parameter_158, parameter_159 + + # pd_op.add: (-1x2048x-1x-1xf32) <- (-1x2048x-1x-1xf32, -1x2048x-1x-1xf32) + add_15 = paddle._C_ops.add(batch_norm__312, relu_45) + del batch_norm__312, relu_45 + + # pd_op.relu: (-1x2048x-1x-1xf32) <- (-1x2048x-1x-1xf32) + relu_48 = paddle._C_ops.relu(add_15) + del add_15 + + # pd_op.conv2d: (-1x256x-1x-1xf32) <- (-1x2048x-1x-1xf32, 256x2048x1x1xf32) + conv2d_53 = paddle._C_ops.conv2d( + relu_48, parameter_155, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_155, relu_48 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_1 = [1, -1, 1, 1] + + # pd_op.reshape: (1x256x1x1xf32) <- (256xf32, 4xi64) + reshape_1 = paddle._C_ops.reshape(parameter_154, full_int_array_1) + del full_int_array_1, parameter_154 + + # pd_op.add: (-1x256x-1x-1xf32) <- (-1x256x-1x-1xf32, 1x256x1x1xf32) + add_16 = paddle._C_ops.add(conv2d_53, reshape_1) + del conv2d_53, reshape_1 + + # pd_op.shape64: (4xi64) <- (-1x256x-1x-1xf32) + shape64_0 = paddle._C_ops.shape64(add_16) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [1] + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_0 + + # pd_op.shape64: (4xi64) <- (-1x256x-1x-1xf32) + shape64_1 = paddle._C_ops.shape64(add_16) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [2] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_5 = [3] + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + shape64_1, [0], full_int_array_4, full_int_array_5, [1], [0] + ) + del shape64_1 + + # pd_op.shape64: (4xi64) <- (-1x256x-1x-1xf32) + shape64_2 = paddle._C_ops.shape64(add_16) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_6 = [4] + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + shape64_2, [0], full_int_array_5, full_int_array_6, [1], [0] + ) + del shape64_2 + + # pd_op.flatten: (-1x256x-1xf32) <- (-1x256x-1x-1xf32) + flatten_0 = paddle._C_ops.flatten(add_16, 2, 3) + del add_16 + + # pd_op.transpose: (-1x-1x256xf32) <- (-1x256x-1xf32) + transpose_0 = paddle._C_ops.transpose(flatten_0, [0, 2, 1]) + del flatten_0 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_0 = [slice_0, slice_1, slice_2] + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.full: (1xf32) <- () + full_0 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_with_tensor: (-1x-1x-1xf32) <- (1xf32, 3xi64) + full_with_tensor_0 = paddle._C_ops.full_with_tensor( + full_0, stack_0, paddle.float32 + ) + del stack_0 + + # pd_op.full: (1xi32) <- () + full_1 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.cumsum: (-1x-1x-1xf32) <- (-1x-1x-1xf32, 1xi32) + cumsum_0 = paddle._C_ops.cumsum(full_with_tensor_0, full_1, False, False, False) + + # pd_op.full: (1xi32) <- () + full_2 = paddle._C_ops.full( + [1], float("2"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.cumsum: (-1x-1x-1xf32) <- (-1x-1x-1xf32, 1xi32) + cumsum_1 = paddle._C_ops.cumsum(full_with_tensor_0, full_2, False, False, False) + del full_with_tensor_0 + + # pd_op.scale: (-1x-1x-1xf32) <- (-1x-1x-1xf32, 1xf32) + scale_0 = paddle._C_ops.scale(cumsum_0, full_0, float("0"), True) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_7 = [-1] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_8 = [2147483647] + + # pd_op.slice: (-1x-1x-1xf32) <- (-1x-1x-1xf32, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + cumsum_0, [1], full_int_array_7, full_int_array_8, [1], [] + ) + del cumsum_0 + + # pd_op.scale: (-1x-1x-1xf32) <- (-1x-1x-1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(slice_3, full_0, float("1e-06"), True) + del slice_3 + + # pd_op.divide: (-1x-1x-1xf32) <- (-1x-1x-1xf32, -1x-1x-1xf32) + divide_0 = paddle._C_ops.divide(scale_0, scale_1) + del scale_0, scale_1 + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("6.28319"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1x-1x-1xf32) <- (-1x-1x-1xf32, 1xf32) + scale_2 = paddle._C_ops.scale(divide_0, full_3, float("0"), True) + del divide_0 + + # pd_op.scale: (-1x-1x-1xf32) <- (-1x-1x-1xf32, 1xf32) + scale_3 = paddle._C_ops.scale(cumsum_1, full_0, float("0"), True) + + # pd_op.slice: (-1x-1x-1xf32) <- (-1x-1x-1xf32, 1xi64, 1xi64) + slice_4 = paddle._C_ops.slice( + cumsum_1, [2], full_int_array_7, full_int_array_8, [1], [] + ) + del cumsum_1 + + # pd_op.scale: (-1x-1x-1xf32) <- (-1x-1x-1xf32, 1xf32) + scale_4 = paddle._C_ops.scale(slice_4, full_0, float("1e-06"), True) + del slice_4 + + # pd_op.divide: (-1x-1x-1xf32) <- (-1x-1x-1xf32, -1x-1x-1xf32) + divide_1 = paddle._C_ops.divide(scale_3, scale_4) + del scale_3, scale_4 + + # pd_op.scale: (-1x-1x-1xf32) <- (-1x-1x-1xf32, 1xf32) + scale_5 = paddle._C_ops.scale(divide_1, full_3, float("0"), True) + del divide_1, full_3 + + # pd_op.full: (1xf64) <- () + full_4 = paddle._C_ops.full( + [1], float("0"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_5 = paddle._C_ops.full( + [1], float("128"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_6 = paddle._C_ops.full( + [1], float("1"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (128xi64) <- (1xf64, 1xf64, 1xf64) + arange_0 = paddle.arange(full_4, full_5, full_6, dtype="int64") + del full_4, full_5, full_6 + + # pd_op.full: (xi64) <- () + full_7 = paddle._C_ops.full( + [], float("2"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.floor_divide: (128xi64) <- (128xi64, xi64) + floor_divide_0 = paddle._C_ops.floor_divide(arange_0, full_7) + del arange_0, full_7 + + # pd_op.cast: (128xf32) <- (128xi64) + cast_0 = paddle._C_ops.cast(floor_divide_0, paddle.float32) + del floor_divide_0 + + # pd_op.full: (1xf32) <- () + full_8 = paddle._C_ops.full( + [1], float("2"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (128xf32) <- (128xf32, 1xf32) + scale_6 = paddle._C_ops.scale(cast_0, full_8, float("0"), True) + del cast_0, full_8 + + # pd_op.full: (1xf32) <- () + full_9 = paddle._C_ops.full( + [1], float("0.0078125"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (128xf32) <- (128xf32, 1xf32) + scale_7 = paddle._C_ops.scale(scale_6, full_9, float("0"), True) + del full_9, scale_6 + + # pd_op.full: (128xf32) <- () + full_10 = paddle._C_ops.full( + [128], + float("10000"), + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.elementwise_pow: (128xf32) <- (128xf32, 128xf32) + elementwise_pow_0 = paddle._C_ops.elementwise_pow(full_10, scale_7) + del full_10, scale_7 + + # pd_op.unsqueeze: (-1x-1x-1x1xf32) <- (-1x-1x-1xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(scale_5, full_int_array_7) + del scale_5 + + # pd_op.divide: (-1x-1x-1x128xf32) <- (-1x-1x-1x1xf32, 128xf32) + divide_2 = paddle._C_ops.divide(unsqueeze_0, elementwise_pow_0) + del unsqueeze_0 + + # pd_op.unsqueeze: (-1x-1x-1x1xf32) <- (-1x-1x-1xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(scale_2, full_int_array_7) + del scale_2 + + # pd_op.divide: (-1x-1x-1x128xf32) <- (-1x-1x-1x1xf32, 128xf32) + divide_3 = paddle._C_ops.divide(unsqueeze_1, elementwise_pow_0) + del elementwise_pow_0, unsqueeze_1 + + # pd_op.strided_slice: (-1x-1x-1x64xf32) <- (-1x-1x-1x128xf32, 1xi64, 1xi64, 1xi64) + strided_slice_0 = paddle._C_ops.strided_slice( + divide_2, [3], full_int_array_2, full_int_array_8, full_int_array_4 + ) + + # pd_op.sin: (-1x-1x-1x64xf32) <- (-1x-1x-1x64xf32) + sin_0 = paddle._C_ops.sin(strided_slice_0) + del strided_slice_0 + + # pd_op.strided_slice: (-1x-1x-1x64xf32) <- (-1x-1x-1x128xf32, 1xi64, 1xi64, 1xi64) + strided_slice_1 = paddle._C_ops.strided_slice( + divide_2, [3], full_int_array_3, full_int_array_8, full_int_array_4 + ) + del divide_2 + + # pd_op.cos: (-1x-1x-1x64xf32) <- (-1x-1x-1x64xf32) + cos_0 = paddle._C_ops.cos(strided_slice_1) + del strided_slice_1 + + # builtin.combine: ([-1x-1x-1x64xf32, -1x-1x-1x64xf32]) <- (-1x-1x-1x64xf32, -1x-1x-1x64xf32) + combine_1 = [sin_0, cos_0] + del cos_0, sin_0 + + # pd_op.stack: (-1x-1x-1x64x2xf32) <- ([-1x-1x-1x64xf32, -1x-1x-1x64xf32]) + stack_1 = paddle._C_ops.stack(combine_1, 4) + del combine_1 + + # pd_op.flatten: (-1x-1x-1x128xf32) <- (-1x-1x-1x64x2xf32) + flatten_1 = paddle._C_ops.flatten(stack_1, 3, 4) + del stack_1 + + # pd_op.strided_slice: (-1x-1x-1x64xf32) <- (-1x-1x-1x128xf32, 1xi64, 1xi64, 1xi64) + strided_slice_2 = paddle._C_ops.strided_slice( + divide_3, [3], full_int_array_2, full_int_array_8, full_int_array_4 + ) + + # pd_op.sin: (-1x-1x-1x64xf32) <- (-1x-1x-1x64xf32) + sin_1 = paddle._C_ops.sin(strided_slice_2) + del strided_slice_2 + + # pd_op.strided_slice: (-1x-1x-1x64xf32) <- (-1x-1x-1x128xf32, 1xi64, 1xi64, 1xi64) + strided_slice_3 = paddle._C_ops.strided_slice( + divide_3, [3], full_int_array_3, full_int_array_8, full_int_array_4 + ) + del divide_3 + + # pd_op.cos: (-1x-1x-1x64xf32) <- (-1x-1x-1x64xf32) + cos_1 = paddle._C_ops.cos(strided_slice_3) + del strided_slice_3 + + # builtin.combine: ([-1x-1x-1x64xf32, -1x-1x-1x64xf32]) <- (-1x-1x-1x64xf32, -1x-1x-1x64xf32) + combine_2 = [sin_1, cos_1] + del cos_1, sin_1 + + # pd_op.stack: (-1x-1x-1x64x2xf32) <- ([-1x-1x-1x64xf32, -1x-1x-1x64xf32]) + stack_2 = paddle._C_ops.stack(combine_2, 4) + del combine_2 + + # pd_op.flatten: (-1x-1x-1x128xf32) <- (-1x-1x-1x64x2xf32) + flatten_2 = paddle._C_ops.flatten(stack_2, 3, 4) + del stack_2 + + # pd_op.full: (1xi32) <- () + full_11 = paddle._C_ops.full( + [1], float("3"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([-1x-1x-1x128xf32, -1x-1x-1x128xf32]) <- (-1x-1x-1x128xf32, -1x-1x-1x128xf32) + combine_3 = [flatten_2, flatten_1] + del flatten_1, flatten_2 + + # pd_op.concat: (-1x-1x-1x256xf32) <- ([-1x-1x-1x128xf32, -1x-1x-1x128xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_3, full_11) + del combine_3, full_11 + + # pd_op.flatten: (-1x-1x256xf32) <- (-1x-1x-1x256xf32) + flatten_3 = paddle._C_ops.flatten(concat_0, 1, 2) + del concat_0 + + # pd_op.add: (-1x-1x256xf32) <- (-1x-1x256xf32, -1x-1x256xf32) + add_17 = paddle._C_ops.add(transpose_0, flatten_3) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_9 = [256] + + # pd_op.slice: (256x256xf32) <- (256x768xf32, 1xi64, 1xi64) + slice_5 = paddle._C_ops.slice( + data_0, [1], full_int_array_2, full_int_array_9, [1], [] + ) + + # pd_op.slice: (256xf32) <- (768xf32, 1xi64, 1xi64) + slice_6 = paddle._C_ops.slice( + data_1, [0], full_int_array_2, full_int_array_9, [1], [] + ) + + # pd_op.matmul: (-1x-1x256xf32) <- (-1x-1x256xf32, 256x256xf32) + matmul_0 = paddle._C_ops.matmul(add_17, slice_5, False, False) + del slice_5 + + # pd_op.add: (-1x-1x256xf32) <- (-1x-1x256xf32, 256xf32) + add_18 = paddle._C_ops.add(matmul_0, slice_6) + del matmul_0, slice_6 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_10 = [0, 0, 8, 32] + + # pd_op.reshape: (-1x-1x8x32xf32) <- (-1x-1x256xf32, 4xi64) + reshape_2 = paddle._C_ops.reshape(add_18, full_int_array_10) + del add_18 + + # pd_op.transpose: (-1x8x-1x32xf32) <- (-1x-1x8x32xf32) + transpose_1 = paddle._C_ops.transpose(reshape_2, [0, 2, 1, 3]) + del reshape_2 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_11 = [512] + + # pd_op.slice: (256x256xf32) <- (256x768xf32, 1xi64, 1xi64) + slice_7 = paddle._C_ops.slice( + data_0, [1], full_int_array_9, full_int_array_11, [1], [] + ) + + # pd_op.slice: (256xf32) <- (768xf32, 1xi64, 1xi64) + slice_8 = paddle._C_ops.slice( + data_1, [0], full_int_array_9, full_int_array_11, [1], [] + ) + + # pd_op.matmul: (-1x-1x256xf32) <- (-1x-1x256xf32, 256x256xf32) + matmul_1 = paddle._C_ops.matmul(add_17, slice_7, False, False) + del add_17, slice_7 + + # pd_op.add: (-1x-1x256xf32) <- (-1x-1x256xf32, 256xf32) + add_19 = paddle._C_ops.add(matmul_1, slice_8) + del matmul_1, slice_8 + + # pd_op.reshape: (-1x-1x8x32xf32) <- (-1x-1x256xf32, 4xi64) + reshape_3 = paddle._C_ops.reshape(add_19, full_int_array_10) + del add_19 + + # pd_op.transpose: (-1x8x-1x32xf32) <- (-1x-1x8x32xf32) + transpose_2 = paddle._C_ops.transpose(reshape_3, [0, 2, 1, 3]) + del reshape_3 + + # pd_op.slice: (256x256xf32) <- (256x768xf32, 1xi64, 1xi64) + slice_9 = paddle._C_ops.slice( + data_0, [1], full_int_array_11, full_int_array_8, [1], [] + ) + del data_0 + + # pd_op.slice: (256xf32) <- (768xf32, 1xi64, 1xi64) + slice_10 = paddle._C_ops.slice( + data_1, [0], full_int_array_11, full_int_array_8, [1], [] + ) + del data_1 + + # pd_op.matmul: (-1x-1x256xf32) <- (-1x-1x256xf32, 256x256xf32) + matmul_2 = paddle._C_ops.matmul(transpose_0, slice_9, False, False) + del slice_9 + + # pd_op.add: (-1x-1x256xf32) <- (-1x-1x256xf32, 256xf32) + add_20 = paddle._C_ops.add(matmul_2, slice_10) + del matmul_2, slice_10 + + # pd_op.reshape: (-1x-1x8x32xf32) <- (-1x-1x256xf32, 4xi64) + reshape_4 = paddle._C_ops.reshape(add_20, full_int_array_10) + del add_20 + + # pd_op.transpose: (-1x8x-1x32xf32) <- (-1x-1x8x32xf32) + transpose_3 = paddle._C_ops.transpose(reshape_4, [0, 2, 1, 3]) + del reshape_4 + + # pd_op.matmul: (-1x8x-1x-1xf32) <- (-1x8x-1x32xf32, -1x8x-1x32xf32) + matmul_3 = paddle._C_ops.matmul(transpose_1, transpose_2, False, True) + del transpose_1, transpose_2 + + # pd_op.full: (1xf32) <- () + full_12 = paddle._C_ops.full( + [1], float("0.176777"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1x8x-1x-1xf32) <- (-1x8x-1x-1xf32, 1xf32) + scale_8 = paddle._C_ops.scale(matmul_3, full_12, float("0"), True) + del matmul_3 + + # pd_op.softmax: (-1x8x-1x-1xf32) <- (-1x8x-1x-1xf32) + softmax_0 = paddle._C_ops.softmax(scale_8, -1) + del scale_8 + + # pd_op.full: (1xf32) <- () + full_13 = paddle._C_ops.full( + [1], float("0.1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.dropout: (-1x8x-1x-1xf32, -1x8x-1x-1xui8) <- (-1x8x-1x-1xf32, None, 1xf32) + dropout_0, dropout_1 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + softmax_0, None, full_13, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del softmax_0 + + # pd_op.matmul: (-1x8x-1x32xf32) <- (-1x8x-1x-1xf32, -1x8x-1x32xf32) + matmul_4 = paddle._C_ops.matmul(dropout_0, transpose_3, False, False) + del dropout_0, transpose_3 + + # pd_op.transpose: (-1x-1x8x32xf32) <- (-1x8x-1x32xf32) + transpose_4 = paddle._C_ops.transpose(matmul_4, [0, 2, 1, 3]) + del matmul_4 + + # pd_op.shape64: (4xi64) <- (-1x-1x8x32xf32) + shape64_3 = paddle._C_ops.shape64(transpose_4) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_11 = paddle._C_ops.slice( + shape64_3, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_3 + + # pd_op.shape64: (4xi64) <- (-1x-1x8x32xf32) + shape64_4 = paddle._C_ops.shape64(transpose_4) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_12 = paddle._C_ops.slice( + shape64_4, [0], full_int_array_3, full_int_array_4, [1], [0] + ) + del shape64_4 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_12 = [0, 0, 256] + + # pd_op.reshape: (-1x-1x256xf32) <- (-1x-1x8x32xf32, 3xi64) + reshape_5 = paddle._C_ops.reshape(transpose_4, full_int_array_12) + del transpose_4 + + # pd_op.matmul: (-1x-1x256xf32) <- (-1x-1x256xf32, 256x256xf32) + matmul_5 = paddle._C_ops.matmul(reshape_5, parameter_153, False, False) + del parameter_153, reshape_5 + + # pd_op.add: (-1x-1x256xf32) <- (-1x-1x256xf32, 256xf32) + add_21 = paddle._C_ops.add(matmul_5, parameter_152) + del matmul_5, parameter_152 + + # pd_op.dropout: (-1x-1x256xf32, -1x-1x256xui8) <- (-1x-1x256xf32, None, 1xf32) + dropout_2, dropout_3 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_21, None, full_13, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_21 + + # pd_op.add: (-1x-1x256xf32) <- (-1x-1x256xf32, -1x-1x256xf32) + add_22 = paddle._C_ops.add(transpose_0, dropout_2) + del dropout_2, transpose_0 + + # pd_op.layer_norm: (-1x-1x256xf32, -1x-1xf32, -1x-1xf32) <- (-1x-1x256xf32, 256xf32, 256xf32) + layer_norm_0, layer_norm_1, layer_norm_2 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_22, parameter_151, parameter_150, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_22, parameter_150, parameter_151 + + # pd_op.matmul: (-1x-1x2048xf32) <- (-1x-1x256xf32, 256x2048xf32) + matmul_6 = paddle._C_ops.matmul(layer_norm_0, parameter_149, False, False) + del parameter_149 + + # pd_op.add: (-1x-1x2048xf32) <- (-1x-1x2048xf32, 2048xf32) + add_23 = paddle._C_ops.add(matmul_6, parameter_148) + del matmul_6, parameter_148 + + # pd_op.relu: (-1x-1x2048xf32) <- (-1x-1x2048xf32) + relu_49 = paddle._C_ops.relu(add_23) + del add_23 + + # pd_op.dropout: (-1x-1x2048xf32, -1x-1x2048xui8) <- (-1x-1x2048xf32, None, 1xf32) + dropout_4, dropout_5 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + relu_49, None, full_13, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del relu_49 + + # pd_op.matmul: (-1x-1x256xf32) <- (-1x-1x2048xf32, 2048x256xf32) + matmul_7 = paddle._C_ops.matmul(dropout_4, parameter_147, False, False) + del dropout_4, parameter_147 + + # pd_op.add: (-1x-1x256xf32) <- (-1x-1x256xf32, 256xf32) + add_24 = paddle._C_ops.add(matmul_7, parameter_146) + del matmul_7, parameter_146 + + # pd_op.dropout: (-1x-1x256xf32, -1x-1x256xui8) <- (-1x-1x256xf32, None, 1xf32) + dropout_6, dropout_7 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_24, None, full_13, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_24 + + # pd_op.add: (-1x-1x256xf32) <- (-1x-1x256xf32, -1x-1x256xf32) + add_25 = paddle._C_ops.add(layer_norm_0, dropout_6) + del dropout_6, layer_norm_0 + + # pd_op.layer_norm: (-1x-1x256xf32, -1x-1xf32, -1x-1xf32) <- (-1x-1x256xf32, 256xf32, 256xf32) + layer_norm_3, layer_norm_4, layer_norm_5 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_25, parameter_145, parameter_144, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_25, parameter_144, parameter_145 + + # pd_op.add: (-1x-1x256xf32) <- (-1x-1x256xf32, -1x-1x256xf32) + add_26 = paddle._C_ops.add(layer_norm_3, flatten_3) + + # pd_op.slice: (256x256xf32) <- (256x768xf32, 1xi64, 1xi64) + slice_13 = paddle._C_ops.slice( + data_2, [1], full_int_array_2, full_int_array_9, [1], [] + ) + + # pd_op.slice: (256xf32) <- (768xf32, 1xi64, 1xi64) + slice_14 = paddle._C_ops.slice( + data_3, [0], full_int_array_2, full_int_array_9, [1], [] + ) + + # pd_op.matmul: (-1x-1x256xf32) <- (-1x-1x256xf32, 256x256xf32) + matmul_8 = paddle._C_ops.matmul(add_26, slice_13, False, False) + del slice_13 + + # pd_op.add: (-1x-1x256xf32) <- (-1x-1x256xf32, 256xf32) + add_27 = paddle._C_ops.add(matmul_8, slice_14) + del matmul_8, slice_14 + + # pd_op.reshape: (-1x-1x8x32xf32) <- (-1x-1x256xf32, 4xi64) + reshape_6 = paddle._C_ops.reshape(add_27, full_int_array_10) + del add_27 + + # pd_op.transpose: (-1x8x-1x32xf32) <- (-1x-1x8x32xf32) + transpose_5 = paddle._C_ops.transpose(reshape_6, [0, 2, 1, 3]) + del reshape_6 + + # pd_op.slice: (256x256xf32) <- (256x768xf32, 1xi64, 1xi64) + slice_15 = paddle._C_ops.slice( + data_2, [1], full_int_array_9, full_int_array_11, [1], [] + ) + + # pd_op.slice: (256xf32) <- (768xf32, 1xi64, 1xi64) + slice_16 = paddle._C_ops.slice( + data_3, [0], full_int_array_9, full_int_array_11, [1], [] + ) + + # pd_op.matmul: (-1x-1x256xf32) <- (-1x-1x256xf32, 256x256xf32) + matmul_9 = paddle._C_ops.matmul(add_26, slice_15, False, False) + del add_26, slice_15 + + # pd_op.add: (-1x-1x256xf32) <- (-1x-1x256xf32, 256xf32) + add_28 = paddle._C_ops.add(matmul_9, slice_16) + del matmul_9, slice_16 + + # pd_op.reshape: (-1x-1x8x32xf32) <- (-1x-1x256xf32, 4xi64) + reshape_7 = paddle._C_ops.reshape(add_28, full_int_array_10) + del add_28 + + # pd_op.transpose: (-1x8x-1x32xf32) <- (-1x-1x8x32xf32) + transpose_6 = paddle._C_ops.transpose(reshape_7, [0, 2, 1, 3]) + del reshape_7 + + # pd_op.slice: (256x256xf32) <- (256x768xf32, 1xi64, 1xi64) + slice_17 = paddle._C_ops.slice( + data_2, [1], full_int_array_11, full_int_array_8, [1], [] + ) + del data_2 + + # pd_op.slice: (256xf32) <- (768xf32, 1xi64, 1xi64) + slice_18 = paddle._C_ops.slice( + data_3, [0], full_int_array_11, full_int_array_8, [1], [] + ) + del data_3 + + # pd_op.matmul: (-1x-1x256xf32) <- (-1x-1x256xf32, 256x256xf32) + matmul_10 = paddle._C_ops.matmul(layer_norm_3, slice_17, False, False) + del slice_17 + + # pd_op.add: (-1x-1x256xf32) <- (-1x-1x256xf32, 256xf32) + add_29 = paddle._C_ops.add(matmul_10, slice_18) + del matmul_10, slice_18 + + # pd_op.reshape: (-1x-1x8x32xf32) <- (-1x-1x256xf32, 4xi64) + reshape_8 = paddle._C_ops.reshape(add_29, full_int_array_10) + del add_29 + + # pd_op.transpose: (-1x8x-1x32xf32) <- (-1x-1x8x32xf32) + transpose_7 = paddle._C_ops.transpose(reshape_8, [0, 2, 1, 3]) + del reshape_8 + + # pd_op.matmul: (-1x8x-1x-1xf32) <- (-1x8x-1x32xf32, -1x8x-1x32xf32) + matmul_11 = paddle._C_ops.matmul(transpose_5, transpose_6, False, True) + del transpose_5, transpose_6 + + # pd_op.scale: (-1x8x-1x-1xf32) <- (-1x8x-1x-1xf32, 1xf32) + scale_9 = paddle._C_ops.scale(matmul_11, full_12, float("0"), True) + del matmul_11 + + # pd_op.softmax: (-1x8x-1x-1xf32) <- (-1x8x-1x-1xf32) + softmax_1 = paddle._C_ops.softmax(scale_9, -1) + del scale_9 + + # pd_op.dropout: (-1x8x-1x-1xf32, -1x8x-1x-1xui8) <- (-1x8x-1x-1xf32, None, 1xf32) + dropout_8, dropout_9 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + softmax_1, None, full_13, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del softmax_1 + + # pd_op.matmul: (-1x8x-1x32xf32) <- (-1x8x-1x-1xf32, -1x8x-1x32xf32) + matmul_12 = paddle._C_ops.matmul(dropout_8, transpose_7, False, False) + del dropout_8, transpose_7 + + # pd_op.transpose: (-1x-1x8x32xf32) <- (-1x8x-1x32xf32) + transpose_8 = paddle._C_ops.transpose(matmul_12, [0, 2, 1, 3]) + del matmul_12 + + # pd_op.shape64: (4xi64) <- (-1x-1x8x32xf32) + shape64_5 = paddle._C_ops.shape64(transpose_8) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_19 = paddle._C_ops.slice( + shape64_5, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_5 + + # pd_op.shape64: (4xi64) <- (-1x-1x8x32xf32) + shape64_6 = paddle._C_ops.shape64(transpose_8) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_20 = paddle._C_ops.slice( + shape64_6, [0], full_int_array_3, full_int_array_4, [1], [0] + ) + del shape64_6 + + # pd_op.reshape: (-1x-1x256xf32) <- (-1x-1x8x32xf32, 3xi64) + reshape_9 = paddle._C_ops.reshape(transpose_8, full_int_array_12) + del transpose_8 + + # pd_op.matmul: (-1x-1x256xf32) <- (-1x-1x256xf32, 256x256xf32) + matmul_13 = paddle._C_ops.matmul(reshape_9, parameter_143, False, False) + del parameter_143, reshape_9 + + # pd_op.add: (-1x-1x256xf32) <- (-1x-1x256xf32, 256xf32) + add_30 = paddle._C_ops.add(matmul_13, parameter_142) + del matmul_13, parameter_142 + + # pd_op.dropout: (-1x-1x256xf32, -1x-1x256xui8) <- (-1x-1x256xf32, None, 1xf32) + dropout_10, dropout_11 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_30, None, full_13, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_30 + + # pd_op.add: (-1x-1x256xf32) <- (-1x-1x256xf32, -1x-1x256xf32) + add_31 = paddle._C_ops.add(layer_norm_3, dropout_10) + del dropout_10, layer_norm_3 + + # pd_op.layer_norm: (-1x-1x256xf32, -1x-1xf32, -1x-1xf32) <- (-1x-1x256xf32, 256xf32, 256xf32) + layer_norm_6, layer_norm_7, layer_norm_8 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_31, parameter_141, parameter_140, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_31, parameter_140, parameter_141 + + # pd_op.matmul: (-1x-1x2048xf32) <- (-1x-1x256xf32, 256x2048xf32) + matmul_14 = paddle._C_ops.matmul(layer_norm_6, parameter_139, False, False) + del parameter_139 + + # pd_op.add: (-1x-1x2048xf32) <- (-1x-1x2048xf32, 2048xf32) + add_32 = paddle._C_ops.add(matmul_14, parameter_138) + del matmul_14, parameter_138 + + # pd_op.relu: (-1x-1x2048xf32) <- (-1x-1x2048xf32) + relu_50 = paddle._C_ops.relu(add_32) + del add_32 + + # pd_op.dropout: (-1x-1x2048xf32, -1x-1x2048xui8) <- (-1x-1x2048xf32, None, 1xf32) + dropout_12, dropout_13 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + relu_50, None, full_13, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del relu_50 + + # pd_op.matmul: (-1x-1x256xf32) <- (-1x-1x2048xf32, 2048x256xf32) + matmul_15 = paddle._C_ops.matmul(dropout_12, parameter_137, False, False) + del dropout_12, parameter_137 + + # pd_op.add: (-1x-1x256xf32) <- (-1x-1x256xf32, 256xf32) + add_33 = paddle._C_ops.add(matmul_15, parameter_136) + del matmul_15, parameter_136 + + # pd_op.dropout: (-1x-1x256xf32, -1x-1x256xui8) <- (-1x-1x256xf32, None, 1xf32) + dropout_14, dropout_15 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_33, None, full_13, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_33 + + # pd_op.add: (-1x-1x256xf32) <- (-1x-1x256xf32, -1x-1x256xf32) + add_34 = paddle._C_ops.add(layer_norm_6, dropout_14) + del dropout_14, layer_norm_6 + + # pd_op.layer_norm: (-1x-1x256xf32, -1x-1xf32, -1x-1xf32) <- (-1x-1x256xf32, 256xf32, 256xf32) + layer_norm_9, layer_norm_10, layer_norm_11 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_34, parameter_135, parameter_134, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_34, parameter_134, parameter_135 + + # pd_op.add: (-1x-1x256xf32) <- (-1x-1x256xf32, -1x-1x256xf32) + add_35 = paddle._C_ops.add(layer_norm_9, flatten_3) + + # pd_op.slice: (256x256xf32) <- (256x768xf32, 1xi64, 1xi64) + slice_21 = paddle._C_ops.slice( + data_4, [1], full_int_array_2, full_int_array_9, [1], [] + ) + + # pd_op.slice: (256xf32) <- (768xf32, 1xi64, 1xi64) + slice_22 = paddle._C_ops.slice( + data_5, [0], full_int_array_2, full_int_array_9, [1], [] + ) + + # pd_op.matmul: (-1x-1x256xf32) <- (-1x-1x256xf32, 256x256xf32) + matmul_16 = paddle._C_ops.matmul(add_35, slice_21, False, False) + del slice_21 + + # pd_op.add: (-1x-1x256xf32) <- (-1x-1x256xf32, 256xf32) + add_36 = paddle._C_ops.add(matmul_16, slice_22) + del matmul_16, slice_22 + + # pd_op.reshape: (-1x-1x8x32xf32) <- (-1x-1x256xf32, 4xi64) + reshape_10 = paddle._C_ops.reshape(add_36, full_int_array_10) + del add_36 + + # pd_op.transpose: (-1x8x-1x32xf32) <- (-1x-1x8x32xf32) + transpose_9 = paddle._C_ops.transpose(reshape_10, [0, 2, 1, 3]) + del reshape_10 + + # pd_op.slice: (256x256xf32) <- (256x768xf32, 1xi64, 1xi64) + slice_23 = paddle._C_ops.slice( + data_4, [1], full_int_array_9, full_int_array_11, [1], [] + ) + + # pd_op.slice: (256xf32) <- (768xf32, 1xi64, 1xi64) + slice_24 = paddle._C_ops.slice( + data_5, [0], full_int_array_9, full_int_array_11, [1], [] + ) + + # pd_op.matmul: (-1x-1x256xf32) <- (-1x-1x256xf32, 256x256xf32) + matmul_17 = paddle._C_ops.matmul(add_35, slice_23, False, False) + del add_35, slice_23 + + # pd_op.add: (-1x-1x256xf32) <- (-1x-1x256xf32, 256xf32) + add_37 = paddle._C_ops.add(matmul_17, slice_24) + del matmul_17, slice_24 + + # pd_op.reshape: (-1x-1x8x32xf32) <- (-1x-1x256xf32, 4xi64) + reshape_11 = paddle._C_ops.reshape(add_37, full_int_array_10) + del add_37 + + # pd_op.transpose: (-1x8x-1x32xf32) <- (-1x-1x8x32xf32) + transpose_10 = paddle._C_ops.transpose(reshape_11, [0, 2, 1, 3]) + del reshape_11 + + # pd_op.slice: (256x256xf32) <- (256x768xf32, 1xi64, 1xi64) + slice_25 = paddle._C_ops.slice( + data_4, [1], full_int_array_11, full_int_array_8, [1], [] + ) + del data_4 + + # pd_op.slice: (256xf32) <- (768xf32, 1xi64, 1xi64) + slice_26 = paddle._C_ops.slice( + data_5, [0], full_int_array_11, full_int_array_8, [1], [] + ) + del data_5 + + # pd_op.matmul: (-1x-1x256xf32) <- (-1x-1x256xf32, 256x256xf32) + matmul_18 = paddle._C_ops.matmul(layer_norm_9, slice_25, False, False) + del slice_25 + + # pd_op.add: (-1x-1x256xf32) <- (-1x-1x256xf32, 256xf32) + add_38 = paddle._C_ops.add(matmul_18, slice_26) + del matmul_18, slice_26 + + # pd_op.reshape: (-1x-1x8x32xf32) <- (-1x-1x256xf32, 4xi64) + reshape_12 = paddle._C_ops.reshape(add_38, full_int_array_10) + del add_38 + + # pd_op.transpose: (-1x8x-1x32xf32) <- (-1x-1x8x32xf32) + transpose_11 = paddle._C_ops.transpose(reshape_12, [0, 2, 1, 3]) + del reshape_12 + + # pd_op.matmul: (-1x8x-1x-1xf32) <- (-1x8x-1x32xf32, -1x8x-1x32xf32) + matmul_19 = paddle._C_ops.matmul(transpose_9, transpose_10, False, True) + del transpose_10, transpose_9 + + # pd_op.scale: (-1x8x-1x-1xf32) <- (-1x8x-1x-1xf32, 1xf32) + scale_10 = paddle._C_ops.scale(matmul_19, full_12, float("0"), True) + del matmul_19 + + # pd_op.softmax: (-1x8x-1x-1xf32) <- (-1x8x-1x-1xf32) + softmax_2 = paddle._C_ops.softmax(scale_10, -1) + del scale_10 + + # pd_op.dropout: (-1x8x-1x-1xf32, -1x8x-1x-1xui8) <- (-1x8x-1x-1xf32, None, 1xf32) + dropout_16, dropout_17 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + softmax_2, None, full_13, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del softmax_2 + + # pd_op.matmul: (-1x8x-1x32xf32) <- (-1x8x-1x-1xf32, -1x8x-1x32xf32) + matmul_20 = paddle._C_ops.matmul(dropout_16, transpose_11, False, False) + del dropout_16, transpose_11 + + # pd_op.transpose: (-1x-1x8x32xf32) <- (-1x8x-1x32xf32) + transpose_12 = paddle._C_ops.transpose(matmul_20, [0, 2, 1, 3]) + del matmul_20 + + # pd_op.shape64: (4xi64) <- (-1x-1x8x32xf32) + shape64_7 = paddle._C_ops.shape64(transpose_12) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_27 = paddle._C_ops.slice( + shape64_7, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_7 + + # pd_op.shape64: (4xi64) <- (-1x-1x8x32xf32) + shape64_8 = paddle._C_ops.shape64(transpose_12) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_28 = paddle._C_ops.slice( + shape64_8, [0], full_int_array_3, full_int_array_4, [1], [0] + ) + del shape64_8 + + # pd_op.reshape: (-1x-1x256xf32) <- (-1x-1x8x32xf32, 3xi64) + reshape_13 = paddle._C_ops.reshape(transpose_12, full_int_array_12) + del transpose_12 + + # pd_op.matmul: (-1x-1x256xf32) <- (-1x-1x256xf32, 256x256xf32) + matmul_21 = paddle._C_ops.matmul(reshape_13, parameter_133, False, False) + del parameter_133, reshape_13 + + # pd_op.add: (-1x-1x256xf32) <- (-1x-1x256xf32, 256xf32) + add_39 = paddle._C_ops.add(matmul_21, parameter_132) + del matmul_21, parameter_132 + + # pd_op.dropout: (-1x-1x256xf32, -1x-1x256xui8) <- (-1x-1x256xf32, None, 1xf32) + dropout_18, dropout_19 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_39, None, full_13, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_39 + + # pd_op.add: (-1x-1x256xf32) <- (-1x-1x256xf32, -1x-1x256xf32) + add_40 = paddle._C_ops.add(layer_norm_9, dropout_18) + del dropout_18, layer_norm_9 + + # pd_op.layer_norm: (-1x-1x256xf32, -1x-1xf32, -1x-1xf32) <- (-1x-1x256xf32, 256xf32, 256xf32) + layer_norm_12, layer_norm_13, layer_norm_14 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_40, parameter_131, parameter_130, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_40, parameter_130, parameter_131 + + # pd_op.matmul: (-1x-1x2048xf32) <- (-1x-1x256xf32, 256x2048xf32) + matmul_22 = paddle._C_ops.matmul(layer_norm_12, parameter_129, False, False) + del parameter_129 + + # pd_op.add: (-1x-1x2048xf32) <- (-1x-1x2048xf32, 2048xf32) + add_41 = paddle._C_ops.add(matmul_22, parameter_128) + del matmul_22, parameter_128 + + # pd_op.relu: (-1x-1x2048xf32) <- (-1x-1x2048xf32) + relu_51 = paddle._C_ops.relu(add_41) + del add_41 + + # pd_op.dropout: (-1x-1x2048xf32, -1x-1x2048xui8) <- (-1x-1x2048xf32, None, 1xf32) + dropout_20, dropout_21 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + relu_51, None, full_13, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del relu_51 + + # pd_op.matmul: (-1x-1x256xf32) <- (-1x-1x2048xf32, 2048x256xf32) + matmul_23 = paddle._C_ops.matmul(dropout_20, parameter_127, False, False) + del dropout_20, parameter_127 + + # pd_op.add: (-1x-1x256xf32) <- (-1x-1x256xf32, 256xf32) + add_42 = paddle._C_ops.add(matmul_23, parameter_126) + del matmul_23, parameter_126 + + # pd_op.dropout: (-1x-1x256xf32, -1x-1x256xui8) <- (-1x-1x256xf32, None, 1xf32) + dropout_22, dropout_23 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_42, None, full_13, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_42 + + # pd_op.add: (-1x-1x256xf32) <- (-1x-1x256xf32, -1x-1x256xf32) + add_43 = paddle._C_ops.add(layer_norm_12, dropout_22) + del dropout_22, layer_norm_12 + + # pd_op.layer_norm: (-1x-1x256xf32, -1x-1xf32, -1x-1xf32) <- (-1x-1x256xf32, 256xf32, 256xf32) + layer_norm_15, layer_norm_16, layer_norm_17 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_43, parameter_125, parameter_124, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_43, parameter_124, parameter_125 + + # pd_op.add: (-1x-1x256xf32) <- (-1x-1x256xf32, -1x-1x256xf32) + add_44 = paddle._C_ops.add(layer_norm_15, flatten_3) + + # pd_op.slice: (256x256xf32) <- (256x768xf32, 1xi64, 1xi64) + slice_29 = paddle._C_ops.slice( + data_6, [1], full_int_array_2, full_int_array_9, [1], [] + ) + + # pd_op.slice: (256xf32) <- (768xf32, 1xi64, 1xi64) + slice_30 = paddle._C_ops.slice( + data_7, [0], full_int_array_2, full_int_array_9, [1], [] + ) + + # pd_op.matmul: (-1x-1x256xf32) <- (-1x-1x256xf32, 256x256xf32) + matmul_24 = paddle._C_ops.matmul(add_44, slice_29, False, False) + del slice_29 + + # pd_op.add: (-1x-1x256xf32) <- (-1x-1x256xf32, 256xf32) + add_45 = paddle._C_ops.add(matmul_24, slice_30) + del matmul_24, slice_30 + + # pd_op.reshape: (-1x-1x8x32xf32) <- (-1x-1x256xf32, 4xi64) + reshape_14 = paddle._C_ops.reshape(add_45, full_int_array_10) + del add_45 + + # pd_op.transpose: (-1x8x-1x32xf32) <- (-1x-1x8x32xf32) + transpose_13 = paddle._C_ops.transpose(reshape_14, [0, 2, 1, 3]) + del reshape_14 + + # pd_op.slice: (256x256xf32) <- (256x768xf32, 1xi64, 1xi64) + slice_31 = paddle._C_ops.slice( + data_6, [1], full_int_array_9, full_int_array_11, [1], [] + ) + + # pd_op.slice: (256xf32) <- (768xf32, 1xi64, 1xi64) + slice_32 = paddle._C_ops.slice( + data_7, [0], full_int_array_9, full_int_array_11, [1], [] + ) + + # pd_op.matmul: (-1x-1x256xf32) <- (-1x-1x256xf32, 256x256xf32) + matmul_25 = paddle._C_ops.matmul(add_44, slice_31, False, False) + del add_44, slice_31 + + # pd_op.add: (-1x-1x256xf32) <- (-1x-1x256xf32, 256xf32) + add_46 = paddle._C_ops.add(matmul_25, slice_32) + del matmul_25, slice_32 + + # pd_op.reshape: (-1x-1x8x32xf32) <- (-1x-1x256xf32, 4xi64) + reshape_15 = paddle._C_ops.reshape(add_46, full_int_array_10) + del add_46 + + # pd_op.transpose: (-1x8x-1x32xf32) <- (-1x-1x8x32xf32) + transpose_14 = paddle._C_ops.transpose(reshape_15, [0, 2, 1, 3]) + del reshape_15 + + # pd_op.slice: (256x256xf32) <- (256x768xf32, 1xi64, 1xi64) + slice_33 = paddle._C_ops.slice( + data_6, [1], full_int_array_11, full_int_array_8, [1], [] + ) + del data_6 + + # pd_op.slice: (256xf32) <- (768xf32, 1xi64, 1xi64) + slice_34 = paddle._C_ops.slice( + data_7, [0], full_int_array_11, full_int_array_8, [1], [] + ) + del data_7 + + # pd_op.matmul: (-1x-1x256xf32) <- (-1x-1x256xf32, 256x256xf32) + matmul_26 = paddle._C_ops.matmul(layer_norm_15, slice_33, False, False) + del slice_33 + + # pd_op.add: (-1x-1x256xf32) <- (-1x-1x256xf32, 256xf32) + add_47 = paddle._C_ops.add(matmul_26, slice_34) + del matmul_26, slice_34 + + # pd_op.reshape: (-1x-1x8x32xf32) <- (-1x-1x256xf32, 4xi64) + reshape_16 = paddle._C_ops.reshape(add_47, full_int_array_10) + del add_47 + + # pd_op.transpose: (-1x8x-1x32xf32) <- (-1x-1x8x32xf32) + transpose_15 = paddle._C_ops.transpose(reshape_16, [0, 2, 1, 3]) + del reshape_16 + + # pd_op.matmul: (-1x8x-1x-1xf32) <- (-1x8x-1x32xf32, -1x8x-1x32xf32) + matmul_27 = paddle._C_ops.matmul(transpose_13, transpose_14, False, True) + del transpose_13, transpose_14 + + # pd_op.scale: (-1x8x-1x-1xf32) <- (-1x8x-1x-1xf32, 1xf32) + scale_11 = paddle._C_ops.scale(matmul_27, full_12, float("0"), True) + del matmul_27 + + # pd_op.softmax: (-1x8x-1x-1xf32) <- (-1x8x-1x-1xf32) + softmax_3 = paddle._C_ops.softmax(scale_11, -1) + del scale_11 + + # pd_op.dropout: (-1x8x-1x-1xf32, -1x8x-1x-1xui8) <- (-1x8x-1x-1xf32, None, 1xf32) + dropout_24, dropout_25 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + softmax_3, None, full_13, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del softmax_3 + + # pd_op.matmul: (-1x8x-1x32xf32) <- (-1x8x-1x-1xf32, -1x8x-1x32xf32) + matmul_28 = paddle._C_ops.matmul(dropout_24, transpose_15, False, False) + del dropout_24, transpose_15 + + # pd_op.transpose: (-1x-1x8x32xf32) <- (-1x8x-1x32xf32) + transpose_16 = paddle._C_ops.transpose(matmul_28, [0, 2, 1, 3]) + del matmul_28 + + # pd_op.shape64: (4xi64) <- (-1x-1x8x32xf32) + shape64_9 = paddle._C_ops.shape64(transpose_16) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_35 = paddle._C_ops.slice( + shape64_9, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_9 + + # pd_op.shape64: (4xi64) <- (-1x-1x8x32xf32) + shape64_10 = paddle._C_ops.shape64(transpose_16) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_36 = paddle._C_ops.slice( + shape64_10, [0], full_int_array_3, full_int_array_4, [1], [0] + ) + del shape64_10 + + # pd_op.reshape: (-1x-1x256xf32) <- (-1x-1x8x32xf32, 3xi64) + reshape_17 = paddle._C_ops.reshape(transpose_16, full_int_array_12) + del transpose_16 + + # pd_op.matmul: (-1x-1x256xf32) <- (-1x-1x256xf32, 256x256xf32) + matmul_29 = paddle._C_ops.matmul(reshape_17, parameter_123, False, False) + del parameter_123, reshape_17 + + # pd_op.add: (-1x-1x256xf32) <- (-1x-1x256xf32, 256xf32) + add_48 = paddle._C_ops.add(matmul_29, parameter_122) + del matmul_29, parameter_122 + + # pd_op.dropout: (-1x-1x256xf32, -1x-1x256xui8) <- (-1x-1x256xf32, None, 1xf32) + dropout_26, dropout_27 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_48, None, full_13, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_48 + + # pd_op.add: (-1x-1x256xf32) <- (-1x-1x256xf32, -1x-1x256xf32) + add_49 = paddle._C_ops.add(layer_norm_15, dropout_26) + del dropout_26, layer_norm_15 + + # pd_op.layer_norm: (-1x-1x256xf32, -1x-1xf32, -1x-1xf32) <- (-1x-1x256xf32, 256xf32, 256xf32) + layer_norm_18, layer_norm_19, layer_norm_20 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_49, parameter_121, parameter_120, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_49, parameter_120, parameter_121 + + # pd_op.matmul: (-1x-1x2048xf32) <- (-1x-1x256xf32, 256x2048xf32) + matmul_30 = paddle._C_ops.matmul(layer_norm_18, parameter_119, False, False) + del parameter_119 + + # pd_op.add: (-1x-1x2048xf32) <- (-1x-1x2048xf32, 2048xf32) + add_50 = paddle._C_ops.add(matmul_30, parameter_118) + del matmul_30, parameter_118 + + # pd_op.relu: (-1x-1x2048xf32) <- (-1x-1x2048xf32) + relu_52 = paddle._C_ops.relu(add_50) + del add_50 + + # pd_op.dropout: (-1x-1x2048xf32, -1x-1x2048xui8) <- (-1x-1x2048xf32, None, 1xf32) + dropout_28, dropout_29 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + relu_52, None, full_13, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del relu_52 + + # pd_op.matmul: (-1x-1x256xf32) <- (-1x-1x2048xf32, 2048x256xf32) + matmul_31 = paddle._C_ops.matmul(dropout_28, parameter_117, False, False) + del dropout_28, parameter_117 + + # pd_op.add: (-1x-1x256xf32) <- (-1x-1x256xf32, 256xf32) + add_51 = paddle._C_ops.add(matmul_31, parameter_116) + del matmul_31, parameter_116 + + # pd_op.dropout: (-1x-1x256xf32, -1x-1x256xui8) <- (-1x-1x256xf32, None, 1xf32) + dropout_30, dropout_31 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_51, None, full_13, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_51 + + # pd_op.add: (-1x-1x256xf32) <- (-1x-1x256xf32, -1x-1x256xf32) + add_52 = paddle._C_ops.add(layer_norm_18, dropout_30) + del dropout_30, layer_norm_18 + + # pd_op.layer_norm: (-1x-1x256xf32, -1x-1xf32, -1x-1xf32) <- (-1x-1x256xf32, 256xf32, 256xf32) + layer_norm_21, layer_norm_22, layer_norm_23 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_52, parameter_115, parameter_114, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_52, parameter_114, parameter_115 + + # pd_op.add: (-1x-1x256xf32) <- (-1x-1x256xf32, -1x-1x256xf32) + add_53 = paddle._C_ops.add(layer_norm_21, flatten_3) + + # pd_op.slice: (256x256xf32) <- (256x768xf32, 1xi64, 1xi64) + slice_37 = paddle._C_ops.slice( + data_8, [1], full_int_array_2, full_int_array_9, [1], [] + ) + + # pd_op.slice: (256xf32) <- (768xf32, 1xi64, 1xi64) + slice_38 = paddle._C_ops.slice( + data_9, [0], full_int_array_2, full_int_array_9, [1], [] + ) + + # pd_op.matmul: (-1x-1x256xf32) <- (-1x-1x256xf32, 256x256xf32) + matmul_32 = paddle._C_ops.matmul(add_53, slice_37, False, False) + del slice_37 + + # pd_op.add: (-1x-1x256xf32) <- (-1x-1x256xf32, 256xf32) + add_54 = paddle._C_ops.add(matmul_32, slice_38) + del matmul_32, slice_38 + + # pd_op.reshape: (-1x-1x8x32xf32) <- (-1x-1x256xf32, 4xi64) + reshape_18 = paddle._C_ops.reshape(add_54, full_int_array_10) + del add_54 + + # pd_op.transpose: (-1x8x-1x32xf32) <- (-1x-1x8x32xf32) + transpose_17 = paddle._C_ops.transpose(reshape_18, [0, 2, 1, 3]) + del reshape_18 + + # pd_op.slice: (256x256xf32) <- (256x768xf32, 1xi64, 1xi64) + slice_39 = paddle._C_ops.slice( + data_8, [1], full_int_array_9, full_int_array_11, [1], [] + ) + + # pd_op.slice: (256xf32) <- (768xf32, 1xi64, 1xi64) + slice_40 = paddle._C_ops.slice( + data_9, [0], full_int_array_9, full_int_array_11, [1], [] + ) + + # pd_op.matmul: (-1x-1x256xf32) <- (-1x-1x256xf32, 256x256xf32) + matmul_33 = paddle._C_ops.matmul(add_53, slice_39, False, False) + del add_53, slice_39 + + # pd_op.add: (-1x-1x256xf32) <- (-1x-1x256xf32, 256xf32) + add_55 = paddle._C_ops.add(matmul_33, slice_40) + del matmul_33, slice_40 + + # pd_op.reshape: (-1x-1x8x32xf32) <- (-1x-1x256xf32, 4xi64) + reshape_19 = paddle._C_ops.reshape(add_55, full_int_array_10) + del add_55 + + # pd_op.transpose: (-1x8x-1x32xf32) <- (-1x-1x8x32xf32) + transpose_18 = paddle._C_ops.transpose(reshape_19, [0, 2, 1, 3]) + del reshape_19 + + # pd_op.slice: (256x256xf32) <- (256x768xf32, 1xi64, 1xi64) + slice_41 = paddle._C_ops.slice( + data_8, [1], full_int_array_11, full_int_array_8, [1], [] + ) + del data_8 + + # pd_op.slice: (256xf32) <- (768xf32, 1xi64, 1xi64) + slice_42 = paddle._C_ops.slice( + data_9, [0], full_int_array_11, full_int_array_8, [1], [] + ) + del data_9 + + # pd_op.matmul: (-1x-1x256xf32) <- (-1x-1x256xf32, 256x256xf32) + matmul_34 = paddle._C_ops.matmul(layer_norm_21, slice_41, False, False) + del slice_41 + + # pd_op.add: (-1x-1x256xf32) <- (-1x-1x256xf32, 256xf32) + add_56 = paddle._C_ops.add(matmul_34, slice_42) + del matmul_34, slice_42 + + # pd_op.reshape: (-1x-1x8x32xf32) <- (-1x-1x256xf32, 4xi64) + reshape_20 = paddle._C_ops.reshape(add_56, full_int_array_10) + del add_56 + + # pd_op.transpose: (-1x8x-1x32xf32) <- (-1x-1x8x32xf32) + transpose_19 = paddle._C_ops.transpose(reshape_20, [0, 2, 1, 3]) + del reshape_20 + + # pd_op.matmul: (-1x8x-1x-1xf32) <- (-1x8x-1x32xf32, -1x8x-1x32xf32) + matmul_35 = paddle._C_ops.matmul(transpose_17, transpose_18, False, True) + del transpose_17, transpose_18 + + # pd_op.scale: (-1x8x-1x-1xf32) <- (-1x8x-1x-1xf32, 1xf32) + scale_12 = paddle._C_ops.scale(matmul_35, full_12, float("0"), True) + del matmul_35 + + # pd_op.softmax: (-1x8x-1x-1xf32) <- (-1x8x-1x-1xf32) + softmax_4 = paddle._C_ops.softmax(scale_12, -1) + del scale_12 + + # pd_op.dropout: (-1x8x-1x-1xf32, -1x8x-1x-1xui8) <- (-1x8x-1x-1xf32, None, 1xf32) + dropout_32, dropout_33 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + softmax_4, None, full_13, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del softmax_4 + + # pd_op.matmul: (-1x8x-1x32xf32) <- (-1x8x-1x-1xf32, -1x8x-1x32xf32) + matmul_36 = paddle._C_ops.matmul(dropout_32, transpose_19, False, False) + del dropout_32, transpose_19 + + # pd_op.transpose: (-1x-1x8x32xf32) <- (-1x8x-1x32xf32) + transpose_20 = paddle._C_ops.transpose(matmul_36, [0, 2, 1, 3]) + del matmul_36 + + # pd_op.shape64: (4xi64) <- (-1x-1x8x32xf32) + shape64_11 = paddle._C_ops.shape64(transpose_20) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_43 = paddle._C_ops.slice( + shape64_11, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_11 + + # pd_op.shape64: (4xi64) <- (-1x-1x8x32xf32) + shape64_12 = paddle._C_ops.shape64(transpose_20) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_44 = paddle._C_ops.slice( + shape64_12, [0], full_int_array_3, full_int_array_4, [1], [0] + ) + del shape64_12 + + # pd_op.reshape: (-1x-1x256xf32) <- (-1x-1x8x32xf32, 3xi64) + reshape_21 = paddle._C_ops.reshape(transpose_20, full_int_array_12) + del transpose_20 + + # pd_op.matmul: (-1x-1x256xf32) <- (-1x-1x256xf32, 256x256xf32) + matmul_37 = paddle._C_ops.matmul(reshape_21, parameter_113, False, False) + del parameter_113, reshape_21 + + # pd_op.add: (-1x-1x256xf32) <- (-1x-1x256xf32, 256xf32) + add_57 = paddle._C_ops.add(matmul_37, parameter_112) + del matmul_37, parameter_112 + + # pd_op.dropout: (-1x-1x256xf32, -1x-1x256xui8) <- (-1x-1x256xf32, None, 1xf32) + dropout_34, dropout_35 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_57, None, full_13, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_57 + + # pd_op.add: (-1x-1x256xf32) <- (-1x-1x256xf32, -1x-1x256xf32) + add_58 = paddle._C_ops.add(layer_norm_21, dropout_34) + del dropout_34, layer_norm_21 + + # pd_op.layer_norm: (-1x-1x256xf32, -1x-1xf32, -1x-1xf32) <- (-1x-1x256xf32, 256xf32, 256xf32) + layer_norm_24, layer_norm_25, layer_norm_26 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_58, parameter_111, parameter_110, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_58, parameter_110, parameter_111 + + # pd_op.matmul: (-1x-1x2048xf32) <- (-1x-1x256xf32, 256x2048xf32) + matmul_38 = paddle._C_ops.matmul(layer_norm_24, parameter_109, False, False) + del parameter_109 + + # pd_op.add: (-1x-1x2048xf32) <- (-1x-1x2048xf32, 2048xf32) + add_59 = paddle._C_ops.add(matmul_38, parameter_108) + del matmul_38, parameter_108 + + # pd_op.relu: (-1x-1x2048xf32) <- (-1x-1x2048xf32) + relu_53 = paddle._C_ops.relu(add_59) + del add_59 + + # pd_op.dropout: (-1x-1x2048xf32, -1x-1x2048xui8) <- (-1x-1x2048xf32, None, 1xf32) + dropout_36, dropout_37 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + relu_53, None, full_13, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del relu_53 + + # pd_op.matmul: (-1x-1x256xf32) <- (-1x-1x2048xf32, 2048x256xf32) + matmul_39 = paddle._C_ops.matmul(dropout_36, parameter_107, False, False) + del dropout_36, parameter_107 + + # pd_op.add: (-1x-1x256xf32) <- (-1x-1x256xf32, 256xf32) + add_60 = paddle._C_ops.add(matmul_39, parameter_106) + del matmul_39, parameter_106 + + # pd_op.dropout: (-1x-1x256xf32, -1x-1x256xui8) <- (-1x-1x256xf32, None, 1xf32) + dropout_38, dropout_39 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_60, None, full_13, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_60 + + # pd_op.add: (-1x-1x256xf32) <- (-1x-1x256xf32, -1x-1x256xf32) + add_61 = paddle._C_ops.add(layer_norm_24, dropout_38) + del dropout_38, layer_norm_24 + + # pd_op.layer_norm: (-1x-1x256xf32, -1x-1xf32, -1x-1xf32) <- (-1x-1x256xf32, 256xf32, 256xf32) + layer_norm_27, layer_norm_28, layer_norm_29 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_61, parameter_105, parameter_104, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_61, parameter_104, parameter_105 + + # pd_op.add: (-1x-1x256xf32) <- (-1x-1x256xf32, -1x-1x256xf32) + add_62 = paddle._C_ops.add(layer_norm_27, flatten_3) + + # pd_op.slice: (256x256xf32) <- (256x768xf32, 1xi64, 1xi64) + slice_45 = paddle._C_ops.slice( + data_10, [1], full_int_array_2, full_int_array_9, [1], [] + ) + + # pd_op.slice: (256xf32) <- (768xf32, 1xi64, 1xi64) + slice_46 = paddle._C_ops.slice( + data_11, [0], full_int_array_2, full_int_array_9, [1], [] + ) + + # pd_op.matmul: (-1x-1x256xf32) <- (-1x-1x256xf32, 256x256xf32) + matmul_40 = paddle._C_ops.matmul(add_62, slice_45, False, False) + del slice_45 + + # pd_op.add: (-1x-1x256xf32) <- (-1x-1x256xf32, 256xf32) + add_63 = paddle._C_ops.add(matmul_40, slice_46) + del matmul_40, slice_46 + + # pd_op.reshape: (-1x-1x8x32xf32) <- (-1x-1x256xf32, 4xi64) + reshape_22 = paddle._C_ops.reshape(add_63, full_int_array_10) + del add_63 + + # pd_op.transpose: (-1x8x-1x32xf32) <- (-1x-1x8x32xf32) + transpose_21 = paddle._C_ops.transpose(reshape_22, [0, 2, 1, 3]) + del reshape_22 + + # pd_op.slice: (256x256xf32) <- (256x768xf32, 1xi64, 1xi64) + slice_47 = paddle._C_ops.slice( + data_10, [1], full_int_array_9, full_int_array_11, [1], [] + ) + + # pd_op.slice: (256xf32) <- (768xf32, 1xi64, 1xi64) + slice_48 = paddle._C_ops.slice( + data_11, [0], full_int_array_9, full_int_array_11, [1], [] + ) + + # pd_op.matmul: (-1x-1x256xf32) <- (-1x-1x256xf32, 256x256xf32) + matmul_41 = paddle._C_ops.matmul(add_62, slice_47, False, False) + del add_62, slice_47 + + # pd_op.add: (-1x-1x256xf32) <- (-1x-1x256xf32, 256xf32) + add_64 = paddle._C_ops.add(matmul_41, slice_48) + del matmul_41, slice_48 + + # pd_op.reshape: (-1x-1x8x32xf32) <- (-1x-1x256xf32, 4xi64) + reshape_23 = paddle._C_ops.reshape(add_64, full_int_array_10) + del add_64 + + # pd_op.transpose: (-1x8x-1x32xf32) <- (-1x-1x8x32xf32) + transpose_22 = paddle._C_ops.transpose(reshape_23, [0, 2, 1, 3]) + del reshape_23 + + # pd_op.slice: (256x256xf32) <- (256x768xf32, 1xi64, 1xi64) + slice_49 = paddle._C_ops.slice( + data_10, [1], full_int_array_11, full_int_array_8, [1], [] + ) + del data_10 + + # pd_op.slice: (256xf32) <- (768xf32, 1xi64, 1xi64) + slice_50 = paddle._C_ops.slice( + data_11, [0], full_int_array_11, full_int_array_8, [1], [] + ) + del data_11 + + # pd_op.matmul: (-1x-1x256xf32) <- (-1x-1x256xf32, 256x256xf32) + matmul_42 = paddle._C_ops.matmul(layer_norm_27, slice_49, False, False) + del slice_49 + + # pd_op.add: (-1x-1x256xf32) <- (-1x-1x256xf32, 256xf32) + add_65 = paddle._C_ops.add(matmul_42, slice_50) + del matmul_42, slice_50 + + # pd_op.reshape: (-1x-1x8x32xf32) <- (-1x-1x256xf32, 4xi64) + reshape_24 = paddle._C_ops.reshape(add_65, full_int_array_10) + del add_65 + + # pd_op.transpose: (-1x8x-1x32xf32) <- (-1x-1x8x32xf32) + transpose_23 = paddle._C_ops.transpose(reshape_24, [0, 2, 1, 3]) + del reshape_24 + + # pd_op.matmul: (-1x8x-1x-1xf32) <- (-1x8x-1x32xf32, -1x8x-1x32xf32) + matmul_43 = paddle._C_ops.matmul(transpose_21, transpose_22, False, True) + del transpose_21, transpose_22 + + # pd_op.scale: (-1x8x-1x-1xf32) <- (-1x8x-1x-1xf32, 1xf32) + scale_13 = paddle._C_ops.scale(matmul_43, full_12, float("0"), True) + del matmul_43 + + # pd_op.softmax: (-1x8x-1x-1xf32) <- (-1x8x-1x-1xf32) + softmax_5 = paddle._C_ops.softmax(scale_13, -1) + del scale_13 + + # pd_op.dropout: (-1x8x-1x-1xf32, -1x8x-1x-1xui8) <- (-1x8x-1x-1xf32, None, 1xf32) + dropout_40, dropout_41 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + softmax_5, None, full_13, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del softmax_5 + + # pd_op.matmul: (-1x8x-1x32xf32) <- (-1x8x-1x-1xf32, -1x8x-1x32xf32) + matmul_44 = paddle._C_ops.matmul(dropout_40, transpose_23, False, False) + del dropout_40, transpose_23 + + # pd_op.transpose: (-1x-1x8x32xf32) <- (-1x8x-1x32xf32) + transpose_24 = paddle._C_ops.transpose(matmul_44, [0, 2, 1, 3]) + del matmul_44 + + # pd_op.shape64: (4xi64) <- (-1x-1x8x32xf32) + shape64_13 = paddle._C_ops.shape64(transpose_24) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_51 = paddle._C_ops.slice( + shape64_13, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_13 + + # pd_op.shape64: (4xi64) <- (-1x-1x8x32xf32) + shape64_14 = paddle._C_ops.shape64(transpose_24) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_52 = paddle._C_ops.slice( + shape64_14, [0], full_int_array_3, full_int_array_4, [1], [0] + ) + del shape64_14 + + # pd_op.reshape: (-1x-1x256xf32) <- (-1x-1x8x32xf32, 3xi64) + reshape_25 = paddle._C_ops.reshape(transpose_24, full_int_array_12) + del transpose_24 + + # pd_op.matmul: (-1x-1x256xf32) <- (-1x-1x256xf32, 256x256xf32) + matmul_45 = paddle._C_ops.matmul(reshape_25, parameter_103, False, False) + del parameter_103, reshape_25 + + # pd_op.add: (-1x-1x256xf32) <- (-1x-1x256xf32, 256xf32) + add_66 = paddle._C_ops.add(matmul_45, parameter_102) + del matmul_45, parameter_102 + + # pd_op.dropout: (-1x-1x256xf32, -1x-1x256xui8) <- (-1x-1x256xf32, None, 1xf32) + dropout_42, dropout_43 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_66, None, full_13, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_66 + + # pd_op.add: (-1x-1x256xf32) <- (-1x-1x256xf32, -1x-1x256xf32) + add_67 = paddle._C_ops.add(layer_norm_27, dropout_42) + del dropout_42, layer_norm_27 + + # pd_op.layer_norm: (-1x-1x256xf32, -1x-1xf32, -1x-1xf32) <- (-1x-1x256xf32, 256xf32, 256xf32) + layer_norm_30, layer_norm_31, layer_norm_32 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_67, parameter_101, parameter_100, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_67, parameter_100, parameter_101 + + # pd_op.matmul: (-1x-1x2048xf32) <- (-1x-1x256xf32, 256x2048xf32) + matmul_46 = paddle._C_ops.matmul(layer_norm_30, parameter_99, False, False) + del parameter_99 + + # pd_op.add: (-1x-1x2048xf32) <- (-1x-1x2048xf32, 2048xf32) + add_68 = paddle._C_ops.add(matmul_46, parameter_98) + del matmul_46, parameter_98 + + # pd_op.relu: (-1x-1x2048xf32) <- (-1x-1x2048xf32) + relu_54 = paddle._C_ops.relu(add_68) + del add_68 + + # pd_op.dropout: (-1x-1x2048xf32, -1x-1x2048xui8) <- (-1x-1x2048xf32, None, 1xf32) + dropout_44, dropout_45 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + relu_54, None, full_13, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del relu_54 + + # pd_op.matmul: (-1x-1x256xf32) <- (-1x-1x2048xf32, 2048x256xf32) + matmul_47 = paddle._C_ops.matmul(dropout_44, parameter_97, False, False) + del dropout_44, parameter_97 + + # pd_op.add: (-1x-1x256xf32) <- (-1x-1x256xf32, 256xf32) + add_69 = paddle._C_ops.add(matmul_47, parameter_96) + del matmul_47, parameter_96 + + # pd_op.dropout: (-1x-1x256xf32, -1x-1x256xui8) <- (-1x-1x256xf32, None, 1xf32) + dropout_46, dropout_47 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_69, None, full_13, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_69 + + # pd_op.add: (-1x-1x256xf32) <- (-1x-1x256xf32, -1x-1x256xf32) + add_70 = paddle._C_ops.add(layer_norm_30, dropout_46) + del dropout_46, layer_norm_30 + + # pd_op.layer_norm: (-1x-1x256xf32, -1x-1xf32, -1x-1xf32) <- (-1x-1x256xf32, 256xf32, 256xf32) + layer_norm_33, layer_norm_34, layer_norm_35 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_70, parameter_95, parameter_94, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_70, parameter_94, parameter_95 + + # pd_op.unsqueeze: (1x100x256xf32) <- (100x256xf32, 1xi64) + unsqueeze_2 = paddle._C_ops.unsqueeze(data_12, full_int_array_2) + del data_12 + + # pd_op.full: (xi64) <- () + full_14 = paddle._C_ops.full( + [], float("1"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_4 = [slice_0, full_14, full_14] + del full_14 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_3 = paddle._C_ops.stack(combine_4, 0) + del combine_4 + + # pd_op.tile: (-1x100x256xf32) <- (1x100x256xf32, 3xi64) + tile_1 = paddle._C_ops.tile(unsqueeze_2, stack_3) + del stack_3, unsqueeze_2 + + # pd_op.full: (1xf32) <- () + full_15 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (-1x100x256xf32) <- (-1x100x256xf32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + tile_1, full_15, paddle.float32, paddle.framework._current_expected_place() + ) + del full_15 + + # pd_op.add: (-1x100x256xf32) <- (-1x100x256xf32, -1x100x256xf32) + add_71 = paddle._C_ops.add(full_like_0, tile_1) + + # pd_op.slice: (256x256xf32) <- (256x768xf32, 1xi64, 1xi64) + slice_53 = paddle._C_ops.slice( + data_13, [1], full_int_array_2, full_int_array_9, [1], [] + ) + + # pd_op.slice: (256xf32) <- (768xf32, 1xi64, 1xi64) + slice_54 = paddle._C_ops.slice( + data_14, [0], full_int_array_2, full_int_array_9, [1], [] + ) + + # pd_op.matmul: (-1x100x256xf32) <- (-1x100x256xf32, 256x256xf32) + matmul_48 = paddle._C_ops.matmul(add_71, slice_53, False, False) + del slice_53 + + # pd_op.add: (-1x100x256xf32) <- (-1x100x256xf32, 256xf32) + add_72 = paddle._C_ops.add(matmul_48, slice_54) + del matmul_48, slice_54 + + # pd_op.reshape: (-1x100x8x32xf32) <- (-1x100x256xf32, 4xi64) + reshape_26 = paddle._C_ops.reshape(add_72, full_int_array_10) + del add_72 + + # pd_op.transpose: (-1x8x100x32xf32) <- (-1x100x8x32xf32) + transpose_25 = paddle._C_ops.transpose(reshape_26, [0, 2, 1, 3]) + del reshape_26 + + # pd_op.slice: (256x256xf32) <- (256x768xf32, 1xi64, 1xi64) + slice_55 = paddle._C_ops.slice( + data_13, [1], full_int_array_9, full_int_array_11, [1], [] + ) + + # pd_op.slice: (256xf32) <- (768xf32, 1xi64, 1xi64) + slice_56 = paddle._C_ops.slice( + data_14, [0], full_int_array_9, full_int_array_11, [1], [] + ) + + # pd_op.matmul: (-1x100x256xf32) <- (-1x100x256xf32, 256x256xf32) + matmul_49 = paddle._C_ops.matmul(add_71, slice_55, False, False) + del add_71, slice_55 + + # pd_op.add: (-1x100x256xf32) <- (-1x100x256xf32, 256xf32) + add_73 = paddle._C_ops.add(matmul_49, slice_56) + del matmul_49, slice_56 + + # pd_op.reshape: (-1x100x8x32xf32) <- (-1x100x256xf32, 4xi64) + reshape_27 = paddle._C_ops.reshape(add_73, full_int_array_10) + del add_73 + + # pd_op.transpose: (-1x8x100x32xf32) <- (-1x100x8x32xf32) + transpose_26 = paddle._C_ops.transpose(reshape_27, [0, 2, 1, 3]) + del reshape_27 + + # pd_op.slice: (256x256xf32) <- (256x768xf32, 1xi64, 1xi64) + slice_57 = paddle._C_ops.slice( + data_13, [1], full_int_array_11, full_int_array_8, [1], [] + ) + del data_13 + + # pd_op.slice: (256xf32) <- (768xf32, 1xi64, 1xi64) + slice_58 = paddle._C_ops.slice( + data_14, [0], full_int_array_11, full_int_array_8, [1], [] + ) + del data_14 + + # pd_op.matmul: (-1x100x256xf32) <- (-1x100x256xf32, 256x256xf32) + matmul_50 = paddle._C_ops.matmul(full_like_0, slice_57, False, False) + del slice_57 + + # pd_op.add: (-1x100x256xf32) <- (-1x100x256xf32, 256xf32) + add_74 = paddle._C_ops.add(matmul_50, slice_58) + del matmul_50, slice_58 + + # pd_op.reshape: (-1x100x8x32xf32) <- (-1x100x256xf32, 4xi64) + reshape_28 = paddle._C_ops.reshape(add_74, full_int_array_10) + del add_74 + + # pd_op.transpose: (-1x8x100x32xf32) <- (-1x100x8x32xf32) + transpose_27 = paddle._C_ops.transpose(reshape_28, [0, 2, 1, 3]) + del reshape_28 + + # pd_op.matmul: (-1x8x100x100xf32) <- (-1x8x100x32xf32, -1x8x100x32xf32) + matmul_51 = paddle._C_ops.matmul(transpose_25, transpose_26, False, True) + del transpose_25, transpose_26 + + # pd_op.scale: (-1x8x100x100xf32) <- (-1x8x100x100xf32, 1xf32) + scale_14 = paddle._C_ops.scale(matmul_51, full_12, float("0"), True) + del matmul_51 + + # pd_op.softmax: (-1x8x100x100xf32) <- (-1x8x100x100xf32) + softmax_6 = paddle._C_ops.softmax(scale_14, -1) + del scale_14 + + # pd_op.dropout: (-1x8x100x100xf32, -1x8x100x100xui8) <- (-1x8x100x100xf32, None, 1xf32) + dropout_48, dropout_49 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + softmax_6, None, full_13, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del softmax_6 + + # pd_op.matmul: (-1x8x100x32xf32) <- (-1x8x100x100xf32, -1x8x100x32xf32) + matmul_52 = paddle._C_ops.matmul(dropout_48, transpose_27, False, False) + del dropout_48, transpose_27 + + # pd_op.transpose: (-1x100x8x32xf32) <- (-1x8x100x32xf32) + transpose_28 = paddle._C_ops.transpose(matmul_52, [0, 2, 1, 3]) + del matmul_52 + + # pd_op.shape64: (4xi64) <- (-1x100x8x32xf32) + shape64_15 = paddle._C_ops.shape64(transpose_28) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_59 = paddle._C_ops.slice( + shape64_15, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_15 + + # pd_op.reshape: (-1x100x256xf32) <- (-1x100x8x32xf32, 3xi64) + reshape_29 = paddle._C_ops.reshape(transpose_28, full_int_array_12) + del transpose_28 + + # pd_op.matmul: (-1x100x256xf32) <- (-1x100x256xf32, 256x256xf32) + matmul_53 = paddle._C_ops.matmul(reshape_29, parameter_93, False, False) + del parameter_93, reshape_29 + + # pd_op.add: (-1x100x256xf32) <- (-1x100x256xf32, 256xf32) + add_75 = paddle._C_ops.add(matmul_53, parameter_92) + del matmul_53, parameter_92 + + # pd_op.dropout: (-1x100x256xf32, -1x100x256xui8) <- (-1x100x256xf32, None, 1xf32) + dropout_50, dropout_51 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_75, None, full_13, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_75 + + # pd_op.add: (-1x100x256xf32) <- (-1x100x256xf32, -1x100x256xf32) + add_76 = paddle._C_ops.add(full_like_0, dropout_50) + del dropout_50, full_like_0 + + # pd_op.layer_norm: (-1x100x256xf32, -1x100xf32, -1x100xf32) <- (-1x100x256xf32, 256xf32, 256xf32) + layer_norm_36, layer_norm_37, layer_norm_38 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_76, parameter_91, parameter_90, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_76, parameter_90, parameter_91 + + # pd_op.add: (-1x100x256xf32) <- (-1x100x256xf32, -1x100x256xf32) + add_77 = paddle._C_ops.add(layer_norm_36, tile_1) + + # pd_op.add: (-1x-1x256xf32) <- (-1x-1x256xf32, -1x-1x256xf32) + add_78 = paddle._C_ops.add(layer_norm_33, flatten_3) + + # pd_op.slice: (256x256xf32) <- (256x768xf32, 1xi64, 1xi64) + slice_60 = paddle._C_ops.slice( + data_15, [1], full_int_array_2, full_int_array_9, [1], [] + ) + + # pd_op.slice: (256xf32) <- (768xf32, 1xi64, 1xi64) + slice_61 = paddle._C_ops.slice( + data_16, [0], full_int_array_2, full_int_array_9, [1], [] + ) + + # pd_op.matmul: (-1x100x256xf32) <- (-1x100x256xf32, 256x256xf32) + matmul_54 = paddle._C_ops.matmul(add_77, slice_60, False, False) + del add_77, slice_60 + + # pd_op.add: (-1x100x256xf32) <- (-1x100x256xf32, 256xf32) + add_79 = paddle._C_ops.add(matmul_54, slice_61) + del matmul_54, slice_61 + + # pd_op.reshape: (-1x100x8x32xf32) <- (-1x100x256xf32, 4xi64) + reshape_30 = paddle._C_ops.reshape(add_79, full_int_array_10) + del add_79 + + # pd_op.transpose: (-1x8x100x32xf32) <- (-1x100x8x32xf32) + transpose_29 = paddle._C_ops.transpose(reshape_30, [0, 2, 1, 3]) + del reshape_30 + + # pd_op.slice: (256x256xf32) <- (256x768xf32, 1xi64, 1xi64) + slice_62 = paddle._C_ops.slice( + data_15, [1], full_int_array_9, full_int_array_11, [1], [] + ) + + # pd_op.slice: (256xf32) <- (768xf32, 1xi64, 1xi64) + slice_63 = paddle._C_ops.slice( + data_16, [0], full_int_array_9, full_int_array_11, [1], [] + ) + + # pd_op.matmul: (-1x-1x256xf32) <- (-1x-1x256xf32, 256x256xf32) + matmul_55 = paddle._C_ops.matmul(add_78, slice_62, False, False) + del add_78, slice_62 + + # pd_op.add: (-1x-1x256xf32) <- (-1x-1x256xf32, 256xf32) + add_80 = paddle._C_ops.add(matmul_55, slice_63) + del matmul_55, slice_63 + + # pd_op.reshape: (-1x-1x8x32xf32) <- (-1x-1x256xf32, 4xi64) + reshape_31 = paddle._C_ops.reshape(add_80, full_int_array_10) + del add_80 + + # pd_op.transpose: (-1x8x-1x32xf32) <- (-1x-1x8x32xf32) + transpose_30 = paddle._C_ops.transpose(reshape_31, [0, 2, 1, 3]) + del reshape_31 + + # pd_op.slice: (256x256xf32) <- (256x768xf32, 1xi64, 1xi64) + slice_64 = paddle._C_ops.slice( + data_15, [1], full_int_array_11, full_int_array_8, [1], [] + ) + del data_15 + + # pd_op.slice: (256xf32) <- (768xf32, 1xi64, 1xi64) + slice_65 = paddle._C_ops.slice( + data_16, [0], full_int_array_11, full_int_array_8, [1], [] + ) + del data_16 + + # pd_op.matmul: (-1x-1x256xf32) <- (-1x-1x256xf32, 256x256xf32) + matmul_56 = paddle._C_ops.matmul(layer_norm_33, slice_64, False, False) + del slice_64 + + # pd_op.add: (-1x-1x256xf32) <- (-1x-1x256xf32, 256xf32) + add_81 = paddle._C_ops.add(matmul_56, slice_65) + del matmul_56, slice_65 + + # pd_op.reshape: (-1x-1x8x32xf32) <- (-1x-1x256xf32, 4xi64) + reshape_32 = paddle._C_ops.reshape(add_81, full_int_array_10) + del add_81 + + # pd_op.transpose: (-1x8x-1x32xf32) <- (-1x-1x8x32xf32) + transpose_31 = paddle._C_ops.transpose(reshape_32, [0, 2, 1, 3]) + del reshape_32 + + # pd_op.matmul: (-1x8x100x-1xf32) <- (-1x8x100x32xf32, -1x8x-1x32xf32) + matmul_57 = paddle._C_ops.matmul(transpose_29, transpose_30, False, True) + del transpose_29, transpose_30 + + # pd_op.scale: (-1x8x100x-1xf32) <- (-1x8x100x-1xf32, 1xf32) + scale_15 = paddle._C_ops.scale(matmul_57, full_12, float("0"), True) + del matmul_57 + + # pd_op.softmax: (-1x8x100x-1xf32) <- (-1x8x100x-1xf32) + softmax_7 = paddle._C_ops.softmax(scale_15, -1) + del scale_15 + + # pd_op.dropout: (-1x8x100x-1xf32, -1x8x100x-1xui8) <- (-1x8x100x-1xf32, None, 1xf32) + dropout_52, dropout_53 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + softmax_7, None, full_13, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del softmax_7 + + # pd_op.matmul: (-1x8x100x32xf32) <- (-1x8x100x-1xf32, -1x8x-1x32xf32) + matmul_58 = paddle._C_ops.matmul(dropout_52, transpose_31, False, False) + del dropout_52, transpose_31 + + # pd_op.transpose: (-1x100x8x32xf32) <- (-1x8x100x32xf32) + transpose_32 = paddle._C_ops.transpose(matmul_58, [0, 2, 1, 3]) + del matmul_58 + + # pd_op.shape64: (4xi64) <- (-1x100x8x32xf32) + shape64_16 = paddle._C_ops.shape64(transpose_32) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_66 = paddle._C_ops.slice( + shape64_16, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_16 + + # pd_op.reshape: (-1x100x256xf32) <- (-1x100x8x32xf32, 3xi64) + reshape_33 = paddle._C_ops.reshape(transpose_32, full_int_array_12) + del transpose_32 + + # pd_op.matmul: (-1x100x256xf32) <- (-1x100x256xf32, 256x256xf32) + matmul_59 = paddle._C_ops.matmul(reshape_33, parameter_89, False, False) + del parameter_89, reshape_33 + + # pd_op.add: (-1x100x256xf32) <- (-1x100x256xf32, 256xf32) + add_82 = paddle._C_ops.add(matmul_59, parameter_88) + del matmul_59, parameter_88 + + # pd_op.dropout: (-1x100x256xf32, -1x100x256xui8) <- (-1x100x256xf32, None, 1xf32) + dropout_54, dropout_55 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_82, None, full_13, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_82 + + # pd_op.add: (-1x100x256xf32) <- (-1x100x256xf32, -1x100x256xf32) + add_83 = paddle._C_ops.add(layer_norm_36, dropout_54) + del dropout_54, layer_norm_36 + + # pd_op.layer_norm: (-1x100x256xf32, -1x100xf32, -1x100xf32) <- (-1x100x256xf32, 256xf32, 256xf32) + layer_norm_39, layer_norm_40, layer_norm_41 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_83, parameter_87, parameter_86, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_83, parameter_86, parameter_87 + + # pd_op.matmul: (-1x100x2048xf32) <- (-1x100x256xf32, 256x2048xf32) + matmul_60 = paddle._C_ops.matmul(layer_norm_39, parameter_85, False, False) + del parameter_85 + + # pd_op.add: (-1x100x2048xf32) <- (-1x100x2048xf32, 2048xf32) + add_84 = paddle._C_ops.add(matmul_60, parameter_84) + del matmul_60, parameter_84 + + # pd_op.relu: (-1x100x2048xf32) <- (-1x100x2048xf32) + relu_55 = paddle._C_ops.relu(add_84) + del add_84 + + # pd_op.dropout: (-1x100x2048xf32, -1x100x2048xui8) <- (-1x100x2048xf32, None, 1xf32) + dropout_56, dropout_57 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + relu_55, None, full_13, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del relu_55 + + # pd_op.matmul: (-1x100x256xf32) <- (-1x100x2048xf32, 2048x256xf32) + matmul_61 = paddle._C_ops.matmul(dropout_56, parameter_83, False, False) + del dropout_56, parameter_83 + + # pd_op.add: (-1x100x256xf32) <- (-1x100x256xf32, 256xf32) + add_85 = paddle._C_ops.add(matmul_61, parameter_82) + del matmul_61, parameter_82 + + # pd_op.dropout: (-1x100x256xf32, -1x100x256xui8) <- (-1x100x256xf32, None, 1xf32) + dropout_58, dropout_59 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_85, None, full_13, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_85 + + # pd_op.add: (-1x100x256xf32) <- (-1x100x256xf32, -1x100x256xf32) + add_86 = paddle._C_ops.add(layer_norm_39, dropout_58) + del dropout_58, layer_norm_39 + + # pd_op.layer_norm: (-1x100x256xf32, -1x100xf32, -1x100xf32) <- (-1x100x256xf32, 256xf32, 256xf32) + layer_norm_42, layer_norm_43, layer_norm_44 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_86, parameter_81, parameter_80, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_86, parameter_80, parameter_81 + + # pd_op.layer_norm: (-1x100x256xf32, -1x100xf32, -1x100xf32) <- (-1x100x256xf32, 256xf32, 256xf32) + layer_norm_45, layer_norm_46, layer_norm_47 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + layer_norm_42, parameter_79, parameter_78, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + + # pd_op.add: (-1x100x256xf32) <- (-1x100x256xf32, -1x100x256xf32) + add_87 = paddle._C_ops.add(layer_norm_42, tile_1) + + # pd_op.slice: (256x256xf32) <- (256x768xf32, 1xi64, 1xi64) + slice_67 = paddle._C_ops.slice( + data_17, [1], full_int_array_2, full_int_array_9, [1], [] + ) + + # pd_op.slice: (256xf32) <- (768xf32, 1xi64, 1xi64) + slice_68 = paddle._C_ops.slice( + data_18, [0], full_int_array_2, full_int_array_9, [1], [] + ) + + # pd_op.matmul: (-1x100x256xf32) <- (-1x100x256xf32, 256x256xf32) + matmul_62 = paddle._C_ops.matmul(add_87, slice_67, False, False) + del slice_67 + + # pd_op.add: (-1x100x256xf32) <- (-1x100x256xf32, 256xf32) + add_88 = paddle._C_ops.add(matmul_62, slice_68) + del matmul_62, slice_68 + + # pd_op.reshape: (-1x100x8x32xf32) <- (-1x100x256xf32, 4xi64) + reshape_34 = paddle._C_ops.reshape(add_88, full_int_array_10) + del add_88 + + # pd_op.transpose: (-1x8x100x32xf32) <- (-1x100x8x32xf32) + transpose_33 = paddle._C_ops.transpose(reshape_34, [0, 2, 1, 3]) + del reshape_34 + + # pd_op.slice: (256x256xf32) <- (256x768xf32, 1xi64, 1xi64) + slice_69 = paddle._C_ops.slice( + data_17, [1], full_int_array_9, full_int_array_11, [1], [] + ) + + # pd_op.slice: (256xf32) <- (768xf32, 1xi64, 1xi64) + slice_70 = paddle._C_ops.slice( + data_18, [0], full_int_array_9, full_int_array_11, [1], [] + ) + + # pd_op.matmul: (-1x100x256xf32) <- (-1x100x256xf32, 256x256xf32) + matmul_63 = paddle._C_ops.matmul(add_87, slice_69, False, False) + del add_87, slice_69 + + # pd_op.add: (-1x100x256xf32) <- (-1x100x256xf32, 256xf32) + add_89 = paddle._C_ops.add(matmul_63, slice_70) + del matmul_63, slice_70 + + # pd_op.reshape: (-1x100x8x32xf32) <- (-1x100x256xf32, 4xi64) + reshape_35 = paddle._C_ops.reshape(add_89, full_int_array_10) + del add_89 + + # pd_op.transpose: (-1x8x100x32xf32) <- (-1x100x8x32xf32) + transpose_34 = paddle._C_ops.transpose(reshape_35, [0, 2, 1, 3]) + del reshape_35 + + # pd_op.slice: (256x256xf32) <- (256x768xf32, 1xi64, 1xi64) + slice_71 = paddle._C_ops.slice( + data_17, [1], full_int_array_11, full_int_array_8, [1], [] + ) + del data_17 + + # pd_op.slice: (256xf32) <- (768xf32, 1xi64, 1xi64) + slice_72 = paddle._C_ops.slice( + data_18, [0], full_int_array_11, full_int_array_8, [1], [] + ) + del data_18 + + # pd_op.matmul: (-1x100x256xf32) <- (-1x100x256xf32, 256x256xf32) + matmul_64 = paddle._C_ops.matmul(layer_norm_42, slice_71, False, False) + del slice_71 + + # pd_op.add: (-1x100x256xf32) <- (-1x100x256xf32, 256xf32) + add_90 = paddle._C_ops.add(matmul_64, slice_72) + del matmul_64, slice_72 + + # pd_op.reshape: (-1x100x8x32xf32) <- (-1x100x256xf32, 4xi64) + reshape_36 = paddle._C_ops.reshape(add_90, full_int_array_10) + del add_90 + + # pd_op.transpose: (-1x8x100x32xf32) <- (-1x100x8x32xf32) + transpose_35 = paddle._C_ops.transpose(reshape_36, [0, 2, 1, 3]) + del reshape_36 + + # pd_op.matmul: (-1x8x100x100xf32) <- (-1x8x100x32xf32, -1x8x100x32xf32) + matmul_65 = paddle._C_ops.matmul(transpose_33, transpose_34, False, True) + del transpose_33, transpose_34 + + # pd_op.scale: (-1x8x100x100xf32) <- (-1x8x100x100xf32, 1xf32) + scale_16 = paddle._C_ops.scale(matmul_65, full_12, float("0"), True) + del matmul_65 + + # pd_op.softmax: (-1x8x100x100xf32) <- (-1x8x100x100xf32) + softmax_8 = paddle._C_ops.softmax(scale_16, -1) + del scale_16 + + # pd_op.dropout: (-1x8x100x100xf32, -1x8x100x100xui8) <- (-1x8x100x100xf32, None, 1xf32) + dropout_60, dropout_61 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + softmax_8, None, full_13, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del softmax_8 + + # pd_op.matmul: (-1x8x100x32xf32) <- (-1x8x100x100xf32, -1x8x100x32xf32) + matmul_66 = paddle._C_ops.matmul(dropout_60, transpose_35, False, False) + del dropout_60, transpose_35 + + # pd_op.transpose: (-1x100x8x32xf32) <- (-1x8x100x32xf32) + transpose_36 = paddle._C_ops.transpose(matmul_66, [0, 2, 1, 3]) + del matmul_66 + + # pd_op.shape64: (4xi64) <- (-1x100x8x32xf32) + shape64_17 = paddle._C_ops.shape64(transpose_36) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_73 = paddle._C_ops.slice( + shape64_17, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_17 + + # pd_op.reshape: (-1x100x256xf32) <- (-1x100x8x32xf32, 3xi64) + reshape_37 = paddle._C_ops.reshape(transpose_36, full_int_array_12) + del transpose_36 + + # pd_op.matmul: (-1x100x256xf32) <- (-1x100x256xf32, 256x256xf32) + matmul_67 = paddle._C_ops.matmul(reshape_37, parameter_77, False, False) + del parameter_77, reshape_37 + + # pd_op.add: (-1x100x256xf32) <- (-1x100x256xf32, 256xf32) + add_91 = paddle._C_ops.add(matmul_67, parameter_76) + del matmul_67, parameter_76 + + # pd_op.dropout: (-1x100x256xf32, -1x100x256xui8) <- (-1x100x256xf32, None, 1xf32) + dropout_62, dropout_63 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_91, None, full_13, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_91 + + # pd_op.add: (-1x100x256xf32) <- (-1x100x256xf32, -1x100x256xf32) + add_92 = paddle._C_ops.add(layer_norm_42, dropout_62) + del dropout_62, layer_norm_42 + + # pd_op.layer_norm: (-1x100x256xf32, -1x100xf32, -1x100xf32) <- (-1x100x256xf32, 256xf32, 256xf32) + layer_norm_48, layer_norm_49, layer_norm_50 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_92, parameter_75, parameter_74, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_92, parameter_74, parameter_75 + + # pd_op.add: (-1x100x256xf32) <- (-1x100x256xf32, -1x100x256xf32) + add_93 = paddle._C_ops.add(layer_norm_48, tile_1) + + # pd_op.add: (-1x-1x256xf32) <- (-1x-1x256xf32, -1x-1x256xf32) + add_94 = paddle._C_ops.add(layer_norm_33, flatten_3) + + # pd_op.slice: (256x256xf32) <- (256x768xf32, 1xi64, 1xi64) + slice_74 = paddle._C_ops.slice( + data_19, [1], full_int_array_2, full_int_array_9, [1], [] + ) + + # pd_op.slice: (256xf32) <- (768xf32, 1xi64, 1xi64) + slice_75 = paddle._C_ops.slice( + data_20, [0], full_int_array_2, full_int_array_9, [1], [] + ) + + # pd_op.matmul: (-1x100x256xf32) <- (-1x100x256xf32, 256x256xf32) + matmul_68 = paddle._C_ops.matmul(add_93, slice_74, False, False) + del add_93, slice_74 + + # pd_op.add: (-1x100x256xf32) <- (-1x100x256xf32, 256xf32) + add_95 = paddle._C_ops.add(matmul_68, slice_75) + del matmul_68, slice_75 + + # pd_op.reshape: (-1x100x8x32xf32) <- (-1x100x256xf32, 4xi64) + reshape_38 = paddle._C_ops.reshape(add_95, full_int_array_10) + del add_95 + + # pd_op.transpose: (-1x8x100x32xf32) <- (-1x100x8x32xf32) + transpose_37 = paddle._C_ops.transpose(reshape_38, [0, 2, 1, 3]) + del reshape_38 + + # pd_op.slice: (256x256xf32) <- (256x768xf32, 1xi64, 1xi64) + slice_76 = paddle._C_ops.slice( + data_19, [1], full_int_array_9, full_int_array_11, [1], [] + ) + + # pd_op.slice: (256xf32) <- (768xf32, 1xi64, 1xi64) + slice_77 = paddle._C_ops.slice( + data_20, [0], full_int_array_9, full_int_array_11, [1], [] + ) + + # pd_op.matmul: (-1x-1x256xf32) <- (-1x-1x256xf32, 256x256xf32) + matmul_69 = paddle._C_ops.matmul(add_94, slice_76, False, False) + del add_94, slice_76 + + # pd_op.add: (-1x-1x256xf32) <- (-1x-1x256xf32, 256xf32) + add_96 = paddle._C_ops.add(matmul_69, slice_77) + del matmul_69, slice_77 + + # pd_op.reshape: (-1x-1x8x32xf32) <- (-1x-1x256xf32, 4xi64) + reshape_39 = paddle._C_ops.reshape(add_96, full_int_array_10) + del add_96 + + # pd_op.transpose: (-1x8x-1x32xf32) <- (-1x-1x8x32xf32) + transpose_38 = paddle._C_ops.transpose(reshape_39, [0, 2, 1, 3]) + del reshape_39 + + # pd_op.slice: (256x256xf32) <- (256x768xf32, 1xi64, 1xi64) + slice_78 = paddle._C_ops.slice( + data_19, [1], full_int_array_11, full_int_array_8, [1], [] + ) + del data_19 + + # pd_op.slice: (256xf32) <- (768xf32, 1xi64, 1xi64) + slice_79 = paddle._C_ops.slice( + data_20, [0], full_int_array_11, full_int_array_8, [1], [] + ) + del data_20 + + # pd_op.matmul: (-1x-1x256xf32) <- (-1x-1x256xf32, 256x256xf32) + matmul_70 = paddle._C_ops.matmul(layer_norm_33, slice_78, False, False) + del slice_78 + + # pd_op.add: (-1x-1x256xf32) <- (-1x-1x256xf32, 256xf32) + add_97 = paddle._C_ops.add(matmul_70, slice_79) + del matmul_70, slice_79 + + # pd_op.reshape: (-1x-1x8x32xf32) <- (-1x-1x256xf32, 4xi64) + reshape_40 = paddle._C_ops.reshape(add_97, full_int_array_10) + del add_97 + + # pd_op.transpose: (-1x8x-1x32xf32) <- (-1x-1x8x32xf32) + transpose_39 = paddle._C_ops.transpose(reshape_40, [0, 2, 1, 3]) + del reshape_40 + + # pd_op.matmul: (-1x8x100x-1xf32) <- (-1x8x100x32xf32, -1x8x-1x32xf32) + matmul_71 = paddle._C_ops.matmul(transpose_37, transpose_38, False, True) + del transpose_37, transpose_38 + + # pd_op.scale: (-1x8x100x-1xf32) <- (-1x8x100x-1xf32, 1xf32) + scale_17 = paddle._C_ops.scale(matmul_71, full_12, float("0"), True) + del matmul_71 + + # pd_op.softmax: (-1x8x100x-1xf32) <- (-1x8x100x-1xf32) + softmax_9 = paddle._C_ops.softmax(scale_17, -1) + del scale_17 + + # pd_op.dropout: (-1x8x100x-1xf32, -1x8x100x-1xui8) <- (-1x8x100x-1xf32, None, 1xf32) + dropout_64, dropout_65 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + softmax_9, None, full_13, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del softmax_9 + + # pd_op.matmul: (-1x8x100x32xf32) <- (-1x8x100x-1xf32, -1x8x-1x32xf32) + matmul_72 = paddle._C_ops.matmul(dropout_64, transpose_39, False, False) + del dropout_64, transpose_39 + + # pd_op.transpose: (-1x100x8x32xf32) <- (-1x8x100x32xf32) + transpose_40 = paddle._C_ops.transpose(matmul_72, [0, 2, 1, 3]) + del matmul_72 + + # pd_op.shape64: (4xi64) <- (-1x100x8x32xf32) + shape64_18 = paddle._C_ops.shape64(transpose_40) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_80 = paddle._C_ops.slice( + shape64_18, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_18 + + # pd_op.reshape: (-1x100x256xf32) <- (-1x100x8x32xf32, 3xi64) + reshape_41 = paddle._C_ops.reshape(transpose_40, full_int_array_12) + del transpose_40 + + # pd_op.matmul: (-1x100x256xf32) <- (-1x100x256xf32, 256x256xf32) + matmul_73 = paddle._C_ops.matmul(reshape_41, parameter_73, False, False) + del parameter_73, reshape_41 + + # pd_op.add: (-1x100x256xf32) <- (-1x100x256xf32, 256xf32) + add_98 = paddle._C_ops.add(matmul_73, parameter_72) + del matmul_73, parameter_72 + + # pd_op.dropout: (-1x100x256xf32, -1x100x256xui8) <- (-1x100x256xf32, None, 1xf32) + dropout_66, dropout_67 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_98, None, full_13, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_98 + + # pd_op.add: (-1x100x256xf32) <- (-1x100x256xf32, -1x100x256xf32) + add_99 = paddle._C_ops.add(layer_norm_48, dropout_66) + del dropout_66, layer_norm_48 + + # pd_op.layer_norm: (-1x100x256xf32, -1x100xf32, -1x100xf32) <- (-1x100x256xf32, 256xf32, 256xf32) + layer_norm_51, layer_norm_52, layer_norm_53 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_99, parameter_71, parameter_70, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_99, parameter_70, parameter_71 + + # pd_op.matmul: (-1x100x2048xf32) <- (-1x100x256xf32, 256x2048xf32) + matmul_74 = paddle._C_ops.matmul(layer_norm_51, parameter_69, False, False) + del parameter_69 + + # pd_op.add: (-1x100x2048xf32) <- (-1x100x2048xf32, 2048xf32) + add_100 = paddle._C_ops.add(matmul_74, parameter_68) + del matmul_74, parameter_68 + + # pd_op.relu: (-1x100x2048xf32) <- (-1x100x2048xf32) + relu_56 = paddle._C_ops.relu(add_100) + del add_100 + + # pd_op.dropout: (-1x100x2048xf32, -1x100x2048xui8) <- (-1x100x2048xf32, None, 1xf32) + dropout_68, dropout_69 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + relu_56, None, full_13, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del relu_56 + + # pd_op.matmul: (-1x100x256xf32) <- (-1x100x2048xf32, 2048x256xf32) + matmul_75 = paddle._C_ops.matmul(dropout_68, parameter_67, False, False) + del dropout_68, parameter_67 + + # pd_op.add: (-1x100x256xf32) <- (-1x100x256xf32, 256xf32) + add_101 = paddle._C_ops.add(matmul_75, parameter_66) + del matmul_75, parameter_66 + + # pd_op.dropout: (-1x100x256xf32, -1x100x256xui8) <- (-1x100x256xf32, None, 1xf32) + dropout_70, dropout_71 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_101, None, full_13, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_101 + + # pd_op.add: (-1x100x256xf32) <- (-1x100x256xf32, -1x100x256xf32) + add_102 = paddle._C_ops.add(layer_norm_51, dropout_70) + del dropout_70, layer_norm_51 + + # pd_op.layer_norm: (-1x100x256xf32, -1x100xf32, -1x100xf32) <- (-1x100x256xf32, 256xf32, 256xf32) + layer_norm_54, layer_norm_55, layer_norm_56 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_102, parameter_65, parameter_64, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_102, parameter_64, parameter_65 + + # pd_op.layer_norm: (-1x100x256xf32, -1x100xf32, -1x100xf32) <- (-1x100x256xf32, 256xf32, 256xf32) + layer_norm_57, layer_norm_58, layer_norm_59 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + layer_norm_54, parameter_79, parameter_78, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + + # pd_op.add: (-1x100x256xf32) <- (-1x100x256xf32, -1x100x256xf32) + add_103 = paddle._C_ops.add(layer_norm_54, tile_1) + + # pd_op.slice: (256x256xf32) <- (256x768xf32, 1xi64, 1xi64) + slice_81 = paddle._C_ops.slice( + data_21, [1], full_int_array_2, full_int_array_9, [1], [] + ) + + # pd_op.slice: (256xf32) <- (768xf32, 1xi64, 1xi64) + slice_82 = paddle._C_ops.slice( + data_22, [0], full_int_array_2, full_int_array_9, [1], [] + ) + + # pd_op.matmul: (-1x100x256xf32) <- (-1x100x256xf32, 256x256xf32) + matmul_76 = paddle._C_ops.matmul(add_103, slice_81, False, False) + del slice_81 + + # pd_op.add: (-1x100x256xf32) <- (-1x100x256xf32, 256xf32) + add_104 = paddle._C_ops.add(matmul_76, slice_82) + del matmul_76, slice_82 + + # pd_op.reshape: (-1x100x8x32xf32) <- (-1x100x256xf32, 4xi64) + reshape_42 = paddle._C_ops.reshape(add_104, full_int_array_10) + del add_104 + + # pd_op.transpose: (-1x8x100x32xf32) <- (-1x100x8x32xf32) + transpose_41 = paddle._C_ops.transpose(reshape_42, [0, 2, 1, 3]) + del reshape_42 + + # pd_op.slice: (256x256xf32) <- (256x768xf32, 1xi64, 1xi64) + slice_83 = paddle._C_ops.slice( + data_21, [1], full_int_array_9, full_int_array_11, [1], [] + ) + + # pd_op.slice: (256xf32) <- (768xf32, 1xi64, 1xi64) + slice_84 = paddle._C_ops.slice( + data_22, [0], full_int_array_9, full_int_array_11, [1], [] + ) + + # pd_op.matmul: (-1x100x256xf32) <- (-1x100x256xf32, 256x256xf32) + matmul_77 = paddle._C_ops.matmul(add_103, slice_83, False, False) + del add_103, slice_83 + + # pd_op.add: (-1x100x256xf32) <- (-1x100x256xf32, 256xf32) + add_105 = paddle._C_ops.add(matmul_77, slice_84) + del matmul_77, slice_84 + + # pd_op.reshape: (-1x100x8x32xf32) <- (-1x100x256xf32, 4xi64) + reshape_43 = paddle._C_ops.reshape(add_105, full_int_array_10) + del add_105 + + # pd_op.transpose: (-1x8x100x32xf32) <- (-1x100x8x32xf32) + transpose_42 = paddle._C_ops.transpose(reshape_43, [0, 2, 1, 3]) + del reshape_43 + + # pd_op.slice: (256x256xf32) <- (256x768xf32, 1xi64, 1xi64) + slice_85 = paddle._C_ops.slice( + data_21, [1], full_int_array_11, full_int_array_8, [1], [] + ) + del data_21 + + # pd_op.slice: (256xf32) <- (768xf32, 1xi64, 1xi64) + slice_86 = paddle._C_ops.slice( + data_22, [0], full_int_array_11, full_int_array_8, [1], [] + ) + del data_22 + + # pd_op.matmul: (-1x100x256xf32) <- (-1x100x256xf32, 256x256xf32) + matmul_78 = paddle._C_ops.matmul(layer_norm_54, slice_85, False, False) + del slice_85 + + # pd_op.add: (-1x100x256xf32) <- (-1x100x256xf32, 256xf32) + add_106 = paddle._C_ops.add(matmul_78, slice_86) + del matmul_78, slice_86 + + # pd_op.reshape: (-1x100x8x32xf32) <- (-1x100x256xf32, 4xi64) + reshape_44 = paddle._C_ops.reshape(add_106, full_int_array_10) + del add_106 + + # pd_op.transpose: (-1x8x100x32xf32) <- (-1x100x8x32xf32) + transpose_43 = paddle._C_ops.transpose(reshape_44, [0, 2, 1, 3]) + del reshape_44 + + # pd_op.matmul: (-1x8x100x100xf32) <- (-1x8x100x32xf32, -1x8x100x32xf32) + matmul_79 = paddle._C_ops.matmul(transpose_41, transpose_42, False, True) + del transpose_41, transpose_42 + + # pd_op.scale: (-1x8x100x100xf32) <- (-1x8x100x100xf32, 1xf32) + scale_18 = paddle._C_ops.scale(matmul_79, full_12, float("0"), True) + del matmul_79 + + # pd_op.softmax: (-1x8x100x100xf32) <- (-1x8x100x100xf32) + softmax_10 = paddle._C_ops.softmax(scale_18, -1) + del scale_18 + + # pd_op.dropout: (-1x8x100x100xf32, -1x8x100x100xui8) <- (-1x8x100x100xf32, None, 1xf32) + dropout_72, dropout_73 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + softmax_10, None, full_13, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del softmax_10 + + # pd_op.matmul: (-1x8x100x32xf32) <- (-1x8x100x100xf32, -1x8x100x32xf32) + matmul_80 = paddle._C_ops.matmul(dropout_72, transpose_43, False, False) + del dropout_72, transpose_43 + + # pd_op.transpose: (-1x100x8x32xf32) <- (-1x8x100x32xf32) + transpose_44 = paddle._C_ops.transpose(matmul_80, [0, 2, 1, 3]) + del matmul_80 + + # pd_op.shape64: (4xi64) <- (-1x100x8x32xf32) + shape64_19 = paddle._C_ops.shape64(transpose_44) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_87 = paddle._C_ops.slice( + shape64_19, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_19 + + # pd_op.reshape: (-1x100x256xf32) <- (-1x100x8x32xf32, 3xi64) + reshape_45 = paddle._C_ops.reshape(transpose_44, full_int_array_12) + del transpose_44 + + # pd_op.matmul: (-1x100x256xf32) <- (-1x100x256xf32, 256x256xf32) + matmul_81 = paddle._C_ops.matmul(reshape_45, parameter_63, False, False) + del parameter_63, reshape_45 + + # pd_op.add: (-1x100x256xf32) <- (-1x100x256xf32, 256xf32) + add_107 = paddle._C_ops.add(matmul_81, parameter_62) + del matmul_81, parameter_62 + + # pd_op.dropout: (-1x100x256xf32, -1x100x256xui8) <- (-1x100x256xf32, None, 1xf32) + dropout_74, dropout_75 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_107, None, full_13, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_107 + + # pd_op.add: (-1x100x256xf32) <- (-1x100x256xf32, -1x100x256xf32) + add_108 = paddle._C_ops.add(layer_norm_54, dropout_74) + del dropout_74, layer_norm_54 + + # pd_op.layer_norm: (-1x100x256xf32, -1x100xf32, -1x100xf32) <- (-1x100x256xf32, 256xf32, 256xf32) + layer_norm_60, layer_norm_61, layer_norm_62 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_108, parameter_61, parameter_60, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_108, parameter_60, parameter_61 + + # pd_op.add: (-1x100x256xf32) <- (-1x100x256xf32, -1x100x256xf32) + add_109 = paddle._C_ops.add(layer_norm_60, tile_1) + + # pd_op.add: (-1x-1x256xf32) <- (-1x-1x256xf32, -1x-1x256xf32) + add_110 = paddle._C_ops.add(layer_norm_33, flatten_3) + + # pd_op.slice: (256x256xf32) <- (256x768xf32, 1xi64, 1xi64) + slice_88 = paddle._C_ops.slice( + data_23, [1], full_int_array_2, full_int_array_9, [1], [] + ) + + # pd_op.slice: (256xf32) <- (768xf32, 1xi64, 1xi64) + slice_89 = paddle._C_ops.slice( + data_24, [0], full_int_array_2, full_int_array_9, [1], [] + ) + + # pd_op.matmul: (-1x100x256xf32) <- (-1x100x256xf32, 256x256xf32) + matmul_82 = paddle._C_ops.matmul(add_109, slice_88, False, False) + del add_109, slice_88 + + # pd_op.add: (-1x100x256xf32) <- (-1x100x256xf32, 256xf32) + add_111 = paddle._C_ops.add(matmul_82, slice_89) + del matmul_82, slice_89 + + # pd_op.reshape: (-1x100x8x32xf32) <- (-1x100x256xf32, 4xi64) + reshape_46 = paddle._C_ops.reshape(add_111, full_int_array_10) + del add_111 + + # pd_op.transpose: (-1x8x100x32xf32) <- (-1x100x8x32xf32) + transpose_45 = paddle._C_ops.transpose(reshape_46, [0, 2, 1, 3]) + del reshape_46 + + # pd_op.slice: (256x256xf32) <- (256x768xf32, 1xi64, 1xi64) + slice_90 = paddle._C_ops.slice( + data_23, [1], full_int_array_9, full_int_array_11, [1], [] + ) + + # pd_op.slice: (256xf32) <- (768xf32, 1xi64, 1xi64) + slice_91 = paddle._C_ops.slice( + data_24, [0], full_int_array_9, full_int_array_11, [1], [] + ) + + # pd_op.matmul: (-1x-1x256xf32) <- (-1x-1x256xf32, 256x256xf32) + matmul_83 = paddle._C_ops.matmul(add_110, slice_90, False, False) + del add_110, slice_90 + + # pd_op.add: (-1x-1x256xf32) <- (-1x-1x256xf32, 256xf32) + add_112 = paddle._C_ops.add(matmul_83, slice_91) + del matmul_83, slice_91 + + # pd_op.reshape: (-1x-1x8x32xf32) <- (-1x-1x256xf32, 4xi64) + reshape_47 = paddle._C_ops.reshape(add_112, full_int_array_10) + del add_112 + + # pd_op.transpose: (-1x8x-1x32xf32) <- (-1x-1x8x32xf32) + transpose_46 = paddle._C_ops.transpose(reshape_47, [0, 2, 1, 3]) + del reshape_47 + + # pd_op.slice: (256x256xf32) <- (256x768xf32, 1xi64, 1xi64) + slice_92 = paddle._C_ops.slice( + data_23, [1], full_int_array_11, full_int_array_8, [1], [] + ) + del data_23 + + # pd_op.slice: (256xf32) <- (768xf32, 1xi64, 1xi64) + slice_93 = paddle._C_ops.slice( + data_24, [0], full_int_array_11, full_int_array_8, [1], [] + ) + del data_24 + + # pd_op.matmul: (-1x-1x256xf32) <- (-1x-1x256xf32, 256x256xf32) + matmul_84 = paddle._C_ops.matmul(layer_norm_33, slice_92, False, False) + del slice_92 + + # pd_op.add: (-1x-1x256xf32) <- (-1x-1x256xf32, 256xf32) + add_113 = paddle._C_ops.add(matmul_84, slice_93) + del matmul_84, slice_93 + + # pd_op.reshape: (-1x-1x8x32xf32) <- (-1x-1x256xf32, 4xi64) + reshape_48 = paddle._C_ops.reshape(add_113, full_int_array_10) + del add_113 + + # pd_op.transpose: (-1x8x-1x32xf32) <- (-1x-1x8x32xf32) + transpose_47 = paddle._C_ops.transpose(reshape_48, [0, 2, 1, 3]) + del reshape_48 + + # pd_op.matmul: (-1x8x100x-1xf32) <- (-1x8x100x32xf32, -1x8x-1x32xf32) + matmul_85 = paddle._C_ops.matmul(transpose_45, transpose_46, False, True) + del transpose_45, transpose_46 + + # pd_op.scale: (-1x8x100x-1xf32) <- (-1x8x100x-1xf32, 1xf32) + scale_19 = paddle._C_ops.scale(matmul_85, full_12, float("0"), True) + del matmul_85 + + # pd_op.softmax: (-1x8x100x-1xf32) <- (-1x8x100x-1xf32) + softmax_11 = paddle._C_ops.softmax(scale_19, -1) + del scale_19 + + # pd_op.dropout: (-1x8x100x-1xf32, -1x8x100x-1xui8) <- (-1x8x100x-1xf32, None, 1xf32) + dropout_76, dropout_77 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + softmax_11, None, full_13, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del softmax_11 + + # pd_op.matmul: (-1x8x100x32xf32) <- (-1x8x100x-1xf32, -1x8x-1x32xf32) + matmul_86 = paddle._C_ops.matmul(dropout_76, transpose_47, False, False) + del dropout_76, transpose_47 + + # pd_op.transpose: (-1x100x8x32xf32) <- (-1x8x100x32xf32) + transpose_48 = paddle._C_ops.transpose(matmul_86, [0, 2, 1, 3]) + del matmul_86 + + # pd_op.shape64: (4xi64) <- (-1x100x8x32xf32) + shape64_20 = paddle._C_ops.shape64(transpose_48) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_94 = paddle._C_ops.slice( + shape64_20, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_20 + + # pd_op.reshape: (-1x100x256xf32) <- (-1x100x8x32xf32, 3xi64) + reshape_49 = paddle._C_ops.reshape(transpose_48, full_int_array_12) + del transpose_48 + + # pd_op.matmul: (-1x100x256xf32) <- (-1x100x256xf32, 256x256xf32) + matmul_87 = paddle._C_ops.matmul(reshape_49, parameter_59, False, False) + del parameter_59, reshape_49 + + # pd_op.add: (-1x100x256xf32) <- (-1x100x256xf32, 256xf32) + add_114 = paddle._C_ops.add(matmul_87, parameter_58) + del matmul_87, parameter_58 + + # pd_op.dropout: (-1x100x256xf32, -1x100x256xui8) <- (-1x100x256xf32, None, 1xf32) + dropout_78, dropout_79 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_114, None, full_13, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_114 + + # pd_op.add: (-1x100x256xf32) <- (-1x100x256xf32, -1x100x256xf32) + add_115 = paddle._C_ops.add(layer_norm_60, dropout_78) + del dropout_78, layer_norm_60 + + # pd_op.layer_norm: (-1x100x256xf32, -1x100xf32, -1x100xf32) <- (-1x100x256xf32, 256xf32, 256xf32) + layer_norm_63, layer_norm_64, layer_norm_65 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_115, parameter_57, parameter_56, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_115, parameter_56, parameter_57 + + # pd_op.matmul: (-1x100x2048xf32) <- (-1x100x256xf32, 256x2048xf32) + matmul_88 = paddle._C_ops.matmul(layer_norm_63, parameter_55, False, False) + del parameter_55 + + # pd_op.add: (-1x100x2048xf32) <- (-1x100x2048xf32, 2048xf32) + add_116 = paddle._C_ops.add(matmul_88, parameter_54) + del matmul_88, parameter_54 + + # pd_op.relu: (-1x100x2048xf32) <- (-1x100x2048xf32) + relu_57 = paddle._C_ops.relu(add_116) + del add_116 + + # pd_op.dropout: (-1x100x2048xf32, -1x100x2048xui8) <- (-1x100x2048xf32, None, 1xf32) + dropout_80, dropout_81 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + relu_57, None, full_13, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del relu_57 + + # pd_op.matmul: (-1x100x256xf32) <- (-1x100x2048xf32, 2048x256xf32) + matmul_89 = paddle._C_ops.matmul(dropout_80, parameter_53, False, False) + del dropout_80, parameter_53 + + # pd_op.add: (-1x100x256xf32) <- (-1x100x256xf32, 256xf32) + add_117 = paddle._C_ops.add(matmul_89, parameter_52) + del matmul_89, parameter_52 + + # pd_op.dropout: (-1x100x256xf32, -1x100x256xui8) <- (-1x100x256xf32, None, 1xf32) + dropout_82, dropout_83 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_117, None, full_13, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_117 + + # pd_op.add: (-1x100x256xf32) <- (-1x100x256xf32, -1x100x256xf32) + add_118 = paddle._C_ops.add(layer_norm_63, dropout_82) + del dropout_82, layer_norm_63 + + # pd_op.layer_norm: (-1x100x256xf32, -1x100xf32, -1x100xf32) <- (-1x100x256xf32, 256xf32, 256xf32) + layer_norm_66, layer_norm_67, layer_norm_68 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_118, parameter_51, parameter_50, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_118, parameter_50, parameter_51 + + # pd_op.layer_norm: (-1x100x256xf32, -1x100xf32, -1x100xf32) <- (-1x100x256xf32, 256xf32, 256xf32) + layer_norm_69, layer_norm_70, layer_norm_71 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + layer_norm_66, parameter_79, parameter_78, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + + # pd_op.add: (-1x100x256xf32) <- (-1x100x256xf32, -1x100x256xf32) + add_119 = paddle._C_ops.add(layer_norm_66, tile_1) + + # pd_op.slice: (256x256xf32) <- (256x768xf32, 1xi64, 1xi64) + slice_95 = paddle._C_ops.slice( + data_25, [1], full_int_array_2, full_int_array_9, [1], [] + ) + + # pd_op.slice: (256xf32) <- (768xf32, 1xi64, 1xi64) + slice_96 = paddle._C_ops.slice( + data_26, [0], full_int_array_2, full_int_array_9, [1], [] + ) + + # pd_op.matmul: (-1x100x256xf32) <- (-1x100x256xf32, 256x256xf32) + matmul_90 = paddle._C_ops.matmul(add_119, slice_95, False, False) + del slice_95 + + # pd_op.add: (-1x100x256xf32) <- (-1x100x256xf32, 256xf32) + add_120 = paddle._C_ops.add(matmul_90, slice_96) + del matmul_90, slice_96 + + # pd_op.reshape: (-1x100x8x32xf32) <- (-1x100x256xf32, 4xi64) + reshape_50 = paddle._C_ops.reshape(add_120, full_int_array_10) + del add_120 + + # pd_op.transpose: (-1x8x100x32xf32) <- (-1x100x8x32xf32) + transpose_49 = paddle._C_ops.transpose(reshape_50, [0, 2, 1, 3]) + del reshape_50 + + # pd_op.slice: (256x256xf32) <- (256x768xf32, 1xi64, 1xi64) + slice_97 = paddle._C_ops.slice( + data_25, [1], full_int_array_9, full_int_array_11, [1], [] + ) + + # pd_op.slice: (256xf32) <- (768xf32, 1xi64, 1xi64) + slice_98 = paddle._C_ops.slice( + data_26, [0], full_int_array_9, full_int_array_11, [1], [] + ) + + # pd_op.matmul: (-1x100x256xf32) <- (-1x100x256xf32, 256x256xf32) + matmul_91 = paddle._C_ops.matmul(add_119, slice_97, False, False) + del add_119, slice_97 + + # pd_op.add: (-1x100x256xf32) <- (-1x100x256xf32, 256xf32) + add_121 = paddle._C_ops.add(matmul_91, slice_98) + del matmul_91, slice_98 + + # pd_op.reshape: (-1x100x8x32xf32) <- (-1x100x256xf32, 4xi64) + reshape_51 = paddle._C_ops.reshape(add_121, full_int_array_10) + del add_121 + + # pd_op.transpose: (-1x8x100x32xf32) <- (-1x100x8x32xf32) + transpose_50 = paddle._C_ops.transpose(reshape_51, [0, 2, 1, 3]) + del reshape_51 + + # pd_op.slice: (256x256xf32) <- (256x768xf32, 1xi64, 1xi64) + slice_99 = paddle._C_ops.slice( + data_25, [1], full_int_array_11, full_int_array_8, [1], [] + ) + del data_25 + + # pd_op.slice: (256xf32) <- (768xf32, 1xi64, 1xi64) + slice_100 = paddle._C_ops.slice( + data_26, [0], full_int_array_11, full_int_array_8, [1], [] + ) + del data_26 + + # pd_op.matmul: (-1x100x256xf32) <- (-1x100x256xf32, 256x256xf32) + matmul_92 = paddle._C_ops.matmul(layer_norm_66, slice_99, False, False) + del slice_99 + + # pd_op.add: (-1x100x256xf32) <- (-1x100x256xf32, 256xf32) + add_122 = paddle._C_ops.add(matmul_92, slice_100) + del matmul_92, slice_100 + + # pd_op.reshape: (-1x100x8x32xf32) <- (-1x100x256xf32, 4xi64) + reshape_52 = paddle._C_ops.reshape(add_122, full_int_array_10) + del add_122 + + # pd_op.transpose: (-1x8x100x32xf32) <- (-1x100x8x32xf32) + transpose_51 = paddle._C_ops.transpose(reshape_52, [0, 2, 1, 3]) + del reshape_52 + + # pd_op.matmul: (-1x8x100x100xf32) <- (-1x8x100x32xf32, -1x8x100x32xf32) + matmul_93 = paddle._C_ops.matmul(transpose_49, transpose_50, False, True) + del transpose_49, transpose_50 + + # pd_op.scale: (-1x8x100x100xf32) <- (-1x8x100x100xf32, 1xf32) + scale_20 = paddle._C_ops.scale(matmul_93, full_12, float("0"), True) + del matmul_93 + + # pd_op.softmax: (-1x8x100x100xf32) <- (-1x8x100x100xf32) + softmax_12 = paddle._C_ops.softmax(scale_20, -1) + del scale_20 + + # pd_op.dropout: (-1x8x100x100xf32, -1x8x100x100xui8) <- (-1x8x100x100xf32, None, 1xf32) + dropout_84, dropout_85 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + softmax_12, None, full_13, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del softmax_12 + + # pd_op.matmul: (-1x8x100x32xf32) <- (-1x8x100x100xf32, -1x8x100x32xf32) + matmul_94 = paddle._C_ops.matmul(dropout_84, transpose_51, False, False) + del dropout_84, transpose_51 + + # pd_op.transpose: (-1x100x8x32xf32) <- (-1x8x100x32xf32) + transpose_52 = paddle._C_ops.transpose(matmul_94, [0, 2, 1, 3]) + del matmul_94 + + # pd_op.shape64: (4xi64) <- (-1x100x8x32xf32) + shape64_21 = paddle._C_ops.shape64(transpose_52) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_101 = paddle._C_ops.slice( + shape64_21, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_21 + + # pd_op.reshape: (-1x100x256xf32) <- (-1x100x8x32xf32, 3xi64) + reshape_53 = paddle._C_ops.reshape(transpose_52, full_int_array_12) + del transpose_52 + + # pd_op.matmul: (-1x100x256xf32) <- (-1x100x256xf32, 256x256xf32) + matmul_95 = paddle._C_ops.matmul(reshape_53, parameter_49, False, False) + del parameter_49, reshape_53 + + # pd_op.add: (-1x100x256xf32) <- (-1x100x256xf32, 256xf32) + add_123 = paddle._C_ops.add(matmul_95, parameter_48) + del matmul_95, parameter_48 + + # pd_op.dropout: (-1x100x256xf32, -1x100x256xui8) <- (-1x100x256xf32, None, 1xf32) + dropout_86, dropout_87 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_123, None, full_13, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_123 + + # pd_op.add: (-1x100x256xf32) <- (-1x100x256xf32, -1x100x256xf32) + add_124 = paddle._C_ops.add(layer_norm_66, dropout_86) + del dropout_86, layer_norm_66 + + # pd_op.layer_norm: (-1x100x256xf32, -1x100xf32, -1x100xf32) <- (-1x100x256xf32, 256xf32, 256xf32) + layer_norm_72, layer_norm_73, layer_norm_74 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_124, parameter_47, parameter_46, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_124, parameter_46, parameter_47 + + # pd_op.add: (-1x100x256xf32) <- (-1x100x256xf32, -1x100x256xf32) + add_125 = paddle._C_ops.add(layer_norm_72, tile_1) + + # pd_op.add: (-1x-1x256xf32) <- (-1x-1x256xf32, -1x-1x256xf32) + add_126 = paddle._C_ops.add(layer_norm_33, flatten_3) + + # pd_op.slice: (256x256xf32) <- (256x768xf32, 1xi64, 1xi64) + slice_102 = paddle._C_ops.slice( + data_27, [1], full_int_array_2, full_int_array_9, [1], [] + ) + + # pd_op.slice: (256xf32) <- (768xf32, 1xi64, 1xi64) + slice_103 = paddle._C_ops.slice( + data_28, [0], full_int_array_2, full_int_array_9, [1], [] + ) + + # pd_op.matmul: (-1x100x256xf32) <- (-1x100x256xf32, 256x256xf32) + matmul_96 = paddle._C_ops.matmul(add_125, slice_102, False, False) + del add_125, slice_102 + + # pd_op.add: (-1x100x256xf32) <- (-1x100x256xf32, 256xf32) + add_127 = paddle._C_ops.add(matmul_96, slice_103) + del matmul_96, slice_103 + + # pd_op.reshape: (-1x100x8x32xf32) <- (-1x100x256xf32, 4xi64) + reshape_54 = paddle._C_ops.reshape(add_127, full_int_array_10) + del add_127 + + # pd_op.transpose: (-1x8x100x32xf32) <- (-1x100x8x32xf32) + transpose_53 = paddle._C_ops.transpose(reshape_54, [0, 2, 1, 3]) + del reshape_54 + + # pd_op.slice: (256x256xf32) <- (256x768xf32, 1xi64, 1xi64) + slice_104 = paddle._C_ops.slice( + data_27, [1], full_int_array_9, full_int_array_11, [1], [] + ) + + # pd_op.slice: (256xf32) <- (768xf32, 1xi64, 1xi64) + slice_105 = paddle._C_ops.slice( + data_28, [0], full_int_array_9, full_int_array_11, [1], [] + ) + + # pd_op.matmul: (-1x-1x256xf32) <- (-1x-1x256xf32, 256x256xf32) + matmul_97 = paddle._C_ops.matmul(add_126, slice_104, False, False) + del add_126, slice_104 + + # pd_op.add: (-1x-1x256xf32) <- (-1x-1x256xf32, 256xf32) + add_128 = paddle._C_ops.add(matmul_97, slice_105) + del matmul_97, slice_105 + + # pd_op.reshape: (-1x-1x8x32xf32) <- (-1x-1x256xf32, 4xi64) + reshape_55 = paddle._C_ops.reshape(add_128, full_int_array_10) + del add_128 + + # pd_op.transpose: (-1x8x-1x32xf32) <- (-1x-1x8x32xf32) + transpose_54 = paddle._C_ops.transpose(reshape_55, [0, 2, 1, 3]) + del reshape_55 + + # pd_op.slice: (256x256xf32) <- (256x768xf32, 1xi64, 1xi64) + slice_106 = paddle._C_ops.slice( + data_27, [1], full_int_array_11, full_int_array_8, [1], [] + ) + del data_27 + + # pd_op.slice: (256xf32) <- (768xf32, 1xi64, 1xi64) + slice_107 = paddle._C_ops.slice( + data_28, [0], full_int_array_11, full_int_array_8, [1], [] + ) + del data_28 + + # pd_op.matmul: (-1x-1x256xf32) <- (-1x-1x256xf32, 256x256xf32) + matmul_98 = paddle._C_ops.matmul(layer_norm_33, slice_106, False, False) + del slice_106 + + # pd_op.add: (-1x-1x256xf32) <- (-1x-1x256xf32, 256xf32) + add_129 = paddle._C_ops.add(matmul_98, slice_107) + del matmul_98, slice_107 + + # pd_op.reshape: (-1x-1x8x32xf32) <- (-1x-1x256xf32, 4xi64) + reshape_56 = paddle._C_ops.reshape(add_129, full_int_array_10) + del add_129 + + # pd_op.transpose: (-1x8x-1x32xf32) <- (-1x-1x8x32xf32) + transpose_55 = paddle._C_ops.transpose(reshape_56, [0, 2, 1, 3]) + del reshape_56 + + # pd_op.matmul: (-1x8x100x-1xf32) <- (-1x8x100x32xf32, -1x8x-1x32xf32) + matmul_99 = paddle._C_ops.matmul(transpose_53, transpose_54, False, True) + del transpose_53, transpose_54 + + # pd_op.scale: (-1x8x100x-1xf32) <- (-1x8x100x-1xf32, 1xf32) + scale_21 = paddle._C_ops.scale(matmul_99, full_12, float("0"), True) + del matmul_99 + + # pd_op.softmax: (-1x8x100x-1xf32) <- (-1x8x100x-1xf32) + softmax_13 = paddle._C_ops.softmax(scale_21, -1) + del scale_21 + + # pd_op.dropout: (-1x8x100x-1xf32, -1x8x100x-1xui8) <- (-1x8x100x-1xf32, None, 1xf32) + dropout_88, dropout_89 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + softmax_13, None, full_13, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del softmax_13 + + # pd_op.matmul: (-1x8x100x32xf32) <- (-1x8x100x-1xf32, -1x8x-1x32xf32) + matmul_100 = paddle._C_ops.matmul(dropout_88, transpose_55, False, False) + del dropout_88, transpose_55 + + # pd_op.transpose: (-1x100x8x32xf32) <- (-1x8x100x32xf32) + transpose_56 = paddle._C_ops.transpose(matmul_100, [0, 2, 1, 3]) + del matmul_100 + + # pd_op.shape64: (4xi64) <- (-1x100x8x32xf32) + shape64_22 = paddle._C_ops.shape64(transpose_56) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_108 = paddle._C_ops.slice( + shape64_22, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_22 + + # pd_op.reshape: (-1x100x256xf32) <- (-1x100x8x32xf32, 3xi64) + reshape_57 = paddle._C_ops.reshape(transpose_56, full_int_array_12) + del transpose_56 + + # pd_op.matmul: (-1x100x256xf32) <- (-1x100x256xf32, 256x256xf32) + matmul_101 = paddle._C_ops.matmul(reshape_57, parameter_45, False, False) + del parameter_45, reshape_57 + + # pd_op.add: (-1x100x256xf32) <- (-1x100x256xf32, 256xf32) + add_130 = paddle._C_ops.add(matmul_101, parameter_44) + del matmul_101, parameter_44 + + # pd_op.dropout: (-1x100x256xf32, -1x100x256xui8) <- (-1x100x256xf32, None, 1xf32) + dropout_90, dropout_91 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_130, None, full_13, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_130 + + # pd_op.add: (-1x100x256xf32) <- (-1x100x256xf32, -1x100x256xf32) + add_131 = paddle._C_ops.add(layer_norm_72, dropout_90) + del dropout_90, layer_norm_72 + + # pd_op.layer_norm: (-1x100x256xf32, -1x100xf32, -1x100xf32) <- (-1x100x256xf32, 256xf32, 256xf32) + layer_norm_75, layer_norm_76, layer_norm_77 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_131, parameter_43, parameter_42, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_131, parameter_42, parameter_43 + + # pd_op.matmul: (-1x100x2048xf32) <- (-1x100x256xf32, 256x2048xf32) + matmul_102 = paddle._C_ops.matmul(layer_norm_75, parameter_41, False, False) + del parameter_41 + + # pd_op.add: (-1x100x2048xf32) <- (-1x100x2048xf32, 2048xf32) + add_132 = paddle._C_ops.add(matmul_102, parameter_40) + del matmul_102, parameter_40 + + # pd_op.relu: (-1x100x2048xf32) <- (-1x100x2048xf32) + relu_58 = paddle._C_ops.relu(add_132) + del add_132 + + # pd_op.dropout: (-1x100x2048xf32, -1x100x2048xui8) <- (-1x100x2048xf32, None, 1xf32) + dropout_92, dropout_93 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + relu_58, None, full_13, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del relu_58 + + # pd_op.matmul: (-1x100x256xf32) <- (-1x100x2048xf32, 2048x256xf32) + matmul_103 = paddle._C_ops.matmul(dropout_92, parameter_39, False, False) + del dropout_92, parameter_39 + + # pd_op.add: (-1x100x256xf32) <- (-1x100x256xf32, 256xf32) + add_133 = paddle._C_ops.add(matmul_103, parameter_38) + del matmul_103, parameter_38 + + # pd_op.dropout: (-1x100x256xf32, -1x100x256xui8) <- (-1x100x256xf32, None, 1xf32) + dropout_94, dropout_95 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_133, None, full_13, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_133 + + # pd_op.add: (-1x100x256xf32) <- (-1x100x256xf32, -1x100x256xf32) + add_134 = paddle._C_ops.add(layer_norm_75, dropout_94) + del dropout_94, layer_norm_75 + + # pd_op.layer_norm: (-1x100x256xf32, -1x100xf32, -1x100xf32) <- (-1x100x256xf32, 256xf32, 256xf32) + layer_norm_78, layer_norm_79, layer_norm_80 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_134, parameter_37, parameter_36, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_134, parameter_36, parameter_37 + + # pd_op.layer_norm: (-1x100x256xf32, -1x100xf32, -1x100xf32) <- (-1x100x256xf32, 256xf32, 256xf32) + layer_norm_81, layer_norm_82, layer_norm_83 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + layer_norm_78, parameter_79, parameter_78, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + + # pd_op.add: (-1x100x256xf32) <- (-1x100x256xf32, -1x100x256xf32) + add_135 = paddle._C_ops.add(layer_norm_78, tile_1) + + # pd_op.slice: (256x256xf32) <- (256x768xf32, 1xi64, 1xi64) + slice_109 = paddle._C_ops.slice( + data_29, [1], full_int_array_2, full_int_array_9, [1], [] + ) + + # pd_op.slice: (256xf32) <- (768xf32, 1xi64, 1xi64) + slice_110 = paddle._C_ops.slice( + data_30, [0], full_int_array_2, full_int_array_9, [1], [] + ) + + # pd_op.matmul: (-1x100x256xf32) <- (-1x100x256xf32, 256x256xf32) + matmul_104 = paddle._C_ops.matmul(add_135, slice_109, False, False) + del slice_109 + + # pd_op.add: (-1x100x256xf32) <- (-1x100x256xf32, 256xf32) + add_136 = paddle._C_ops.add(matmul_104, slice_110) + del matmul_104, slice_110 + + # pd_op.reshape: (-1x100x8x32xf32) <- (-1x100x256xf32, 4xi64) + reshape_58 = paddle._C_ops.reshape(add_136, full_int_array_10) + del add_136 + + # pd_op.transpose: (-1x8x100x32xf32) <- (-1x100x8x32xf32) + transpose_57 = paddle._C_ops.transpose(reshape_58, [0, 2, 1, 3]) + del reshape_58 + + # pd_op.slice: (256x256xf32) <- (256x768xf32, 1xi64, 1xi64) + slice_111 = paddle._C_ops.slice( + data_29, [1], full_int_array_9, full_int_array_11, [1], [] + ) + + # pd_op.slice: (256xf32) <- (768xf32, 1xi64, 1xi64) + slice_112 = paddle._C_ops.slice( + data_30, [0], full_int_array_9, full_int_array_11, [1], [] + ) + + # pd_op.matmul: (-1x100x256xf32) <- (-1x100x256xf32, 256x256xf32) + matmul_105 = paddle._C_ops.matmul(add_135, slice_111, False, False) + del add_135, slice_111 + + # pd_op.add: (-1x100x256xf32) <- (-1x100x256xf32, 256xf32) + add_137 = paddle._C_ops.add(matmul_105, slice_112) + del matmul_105, slice_112 + + # pd_op.reshape: (-1x100x8x32xf32) <- (-1x100x256xf32, 4xi64) + reshape_59 = paddle._C_ops.reshape(add_137, full_int_array_10) + del add_137 + + # pd_op.transpose: (-1x8x100x32xf32) <- (-1x100x8x32xf32) + transpose_58 = paddle._C_ops.transpose(reshape_59, [0, 2, 1, 3]) + del reshape_59 + + # pd_op.slice: (256x256xf32) <- (256x768xf32, 1xi64, 1xi64) + slice_113 = paddle._C_ops.slice( + data_29, [1], full_int_array_11, full_int_array_8, [1], [] + ) + del data_29 + + # pd_op.slice: (256xf32) <- (768xf32, 1xi64, 1xi64) + slice_114 = paddle._C_ops.slice( + data_30, [0], full_int_array_11, full_int_array_8, [1], [] + ) + del data_30 + + # pd_op.matmul: (-1x100x256xf32) <- (-1x100x256xf32, 256x256xf32) + matmul_106 = paddle._C_ops.matmul(layer_norm_78, slice_113, False, False) + del slice_113 + + # pd_op.add: (-1x100x256xf32) <- (-1x100x256xf32, 256xf32) + add_138 = paddle._C_ops.add(matmul_106, slice_114) + del matmul_106, slice_114 + + # pd_op.reshape: (-1x100x8x32xf32) <- (-1x100x256xf32, 4xi64) + reshape_60 = paddle._C_ops.reshape(add_138, full_int_array_10) + del add_138 + + # pd_op.transpose: (-1x8x100x32xf32) <- (-1x100x8x32xf32) + transpose_59 = paddle._C_ops.transpose(reshape_60, [0, 2, 1, 3]) + del reshape_60 + + # pd_op.matmul: (-1x8x100x100xf32) <- (-1x8x100x32xf32, -1x8x100x32xf32) + matmul_107 = paddle._C_ops.matmul(transpose_57, transpose_58, False, True) + del transpose_57, transpose_58 + + # pd_op.scale: (-1x8x100x100xf32) <- (-1x8x100x100xf32, 1xf32) + scale_22 = paddle._C_ops.scale(matmul_107, full_12, float("0"), True) + del matmul_107 + + # pd_op.softmax: (-1x8x100x100xf32) <- (-1x8x100x100xf32) + softmax_14 = paddle._C_ops.softmax(scale_22, -1) + del scale_22 + + # pd_op.dropout: (-1x8x100x100xf32, -1x8x100x100xui8) <- (-1x8x100x100xf32, None, 1xf32) + dropout_96, dropout_97 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + softmax_14, None, full_13, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del softmax_14 + + # pd_op.matmul: (-1x8x100x32xf32) <- (-1x8x100x100xf32, -1x8x100x32xf32) + matmul_108 = paddle._C_ops.matmul(dropout_96, transpose_59, False, False) + del dropout_96, transpose_59 + + # pd_op.transpose: (-1x100x8x32xf32) <- (-1x8x100x32xf32) + transpose_60 = paddle._C_ops.transpose(matmul_108, [0, 2, 1, 3]) + del matmul_108 + + # pd_op.shape64: (4xi64) <- (-1x100x8x32xf32) + shape64_23 = paddle._C_ops.shape64(transpose_60) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_115 = paddle._C_ops.slice( + shape64_23, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_23 + + # pd_op.reshape: (-1x100x256xf32) <- (-1x100x8x32xf32, 3xi64) + reshape_61 = paddle._C_ops.reshape(transpose_60, full_int_array_12) + del transpose_60 + + # pd_op.matmul: (-1x100x256xf32) <- (-1x100x256xf32, 256x256xf32) + matmul_109 = paddle._C_ops.matmul(reshape_61, parameter_35, False, False) + del parameter_35, reshape_61 + + # pd_op.add: (-1x100x256xf32) <- (-1x100x256xf32, 256xf32) + add_139 = paddle._C_ops.add(matmul_109, parameter_34) + del matmul_109, parameter_34 + + # pd_op.dropout: (-1x100x256xf32, -1x100x256xui8) <- (-1x100x256xf32, None, 1xf32) + dropout_98, dropout_99 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_139, None, full_13, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_139 + + # pd_op.add: (-1x100x256xf32) <- (-1x100x256xf32, -1x100x256xf32) + add_140 = paddle._C_ops.add(layer_norm_78, dropout_98) + del dropout_98, layer_norm_78 + + # pd_op.layer_norm: (-1x100x256xf32, -1x100xf32, -1x100xf32) <- (-1x100x256xf32, 256xf32, 256xf32) + layer_norm_84, layer_norm_85, layer_norm_86 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_140, parameter_33, parameter_32, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_140, parameter_32, parameter_33 + + # pd_op.add: (-1x100x256xf32) <- (-1x100x256xf32, -1x100x256xf32) + add_141 = paddle._C_ops.add(layer_norm_84, tile_1) + + # pd_op.add: (-1x-1x256xf32) <- (-1x-1x256xf32, -1x-1x256xf32) + add_142 = paddle._C_ops.add(layer_norm_33, flatten_3) + + # pd_op.slice: (256x256xf32) <- (256x768xf32, 1xi64, 1xi64) + slice_116 = paddle._C_ops.slice( + data_31, [1], full_int_array_2, full_int_array_9, [1], [] + ) + + # pd_op.slice: (256xf32) <- (768xf32, 1xi64, 1xi64) + slice_117 = paddle._C_ops.slice( + data_32, [0], full_int_array_2, full_int_array_9, [1], [] + ) + + # pd_op.matmul: (-1x100x256xf32) <- (-1x100x256xf32, 256x256xf32) + matmul_110 = paddle._C_ops.matmul(add_141, slice_116, False, False) + del add_141, slice_116 + + # pd_op.add: (-1x100x256xf32) <- (-1x100x256xf32, 256xf32) + add_143 = paddle._C_ops.add(matmul_110, slice_117) + del matmul_110, slice_117 + + # pd_op.reshape: (-1x100x8x32xf32) <- (-1x100x256xf32, 4xi64) + reshape_62 = paddle._C_ops.reshape(add_143, full_int_array_10) + del add_143 + + # pd_op.transpose: (-1x8x100x32xf32) <- (-1x100x8x32xf32) + transpose_61 = paddle._C_ops.transpose(reshape_62, [0, 2, 1, 3]) + del reshape_62 + + # pd_op.slice: (256x256xf32) <- (256x768xf32, 1xi64, 1xi64) + slice_118 = paddle._C_ops.slice( + data_31, [1], full_int_array_9, full_int_array_11, [1], [] + ) + + # pd_op.slice: (256xf32) <- (768xf32, 1xi64, 1xi64) + slice_119 = paddle._C_ops.slice( + data_32, [0], full_int_array_9, full_int_array_11, [1], [] + ) + + # pd_op.matmul: (-1x-1x256xf32) <- (-1x-1x256xf32, 256x256xf32) + matmul_111 = paddle._C_ops.matmul(add_142, slice_118, False, False) + del add_142, slice_118 + + # pd_op.add: (-1x-1x256xf32) <- (-1x-1x256xf32, 256xf32) + add_144 = paddle._C_ops.add(matmul_111, slice_119) + del matmul_111, slice_119 + + # pd_op.reshape: (-1x-1x8x32xf32) <- (-1x-1x256xf32, 4xi64) + reshape_63 = paddle._C_ops.reshape(add_144, full_int_array_10) + del add_144 + + # pd_op.transpose: (-1x8x-1x32xf32) <- (-1x-1x8x32xf32) + transpose_62 = paddle._C_ops.transpose(reshape_63, [0, 2, 1, 3]) + del reshape_63 + + # pd_op.slice: (256x256xf32) <- (256x768xf32, 1xi64, 1xi64) + slice_120 = paddle._C_ops.slice( + data_31, [1], full_int_array_11, full_int_array_8, [1], [] + ) + del data_31 + + # pd_op.slice: (256xf32) <- (768xf32, 1xi64, 1xi64) + slice_121 = paddle._C_ops.slice( + data_32, [0], full_int_array_11, full_int_array_8, [1], [] + ) + del data_32 + + # pd_op.matmul: (-1x-1x256xf32) <- (-1x-1x256xf32, 256x256xf32) + matmul_112 = paddle._C_ops.matmul(layer_norm_33, slice_120, False, False) + del slice_120 + + # pd_op.add: (-1x-1x256xf32) <- (-1x-1x256xf32, 256xf32) + add_145 = paddle._C_ops.add(matmul_112, slice_121) + del matmul_112, slice_121 + + # pd_op.reshape: (-1x-1x8x32xf32) <- (-1x-1x256xf32, 4xi64) + reshape_64 = paddle._C_ops.reshape(add_145, full_int_array_10) + del add_145 + + # pd_op.transpose: (-1x8x-1x32xf32) <- (-1x-1x8x32xf32) + transpose_63 = paddle._C_ops.transpose(reshape_64, [0, 2, 1, 3]) + del reshape_64 + + # pd_op.matmul: (-1x8x100x-1xf32) <- (-1x8x100x32xf32, -1x8x-1x32xf32) + matmul_113 = paddle._C_ops.matmul(transpose_61, transpose_62, False, True) + del transpose_61, transpose_62 + + # pd_op.scale: (-1x8x100x-1xf32) <- (-1x8x100x-1xf32, 1xf32) + scale_23 = paddle._C_ops.scale(matmul_113, full_12, float("0"), True) + del matmul_113 + + # pd_op.softmax: (-1x8x100x-1xf32) <- (-1x8x100x-1xf32) + softmax_15 = paddle._C_ops.softmax(scale_23, -1) + del scale_23 + + # pd_op.dropout: (-1x8x100x-1xf32, -1x8x100x-1xui8) <- (-1x8x100x-1xf32, None, 1xf32) + dropout_100, dropout_101 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + softmax_15, None, full_13, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del softmax_15 + + # pd_op.matmul: (-1x8x100x32xf32) <- (-1x8x100x-1xf32, -1x8x-1x32xf32) + matmul_114 = paddle._C_ops.matmul(dropout_100, transpose_63, False, False) + del dropout_100, transpose_63 + + # pd_op.transpose: (-1x100x8x32xf32) <- (-1x8x100x32xf32) + transpose_64 = paddle._C_ops.transpose(matmul_114, [0, 2, 1, 3]) + del matmul_114 + + # pd_op.shape64: (4xi64) <- (-1x100x8x32xf32) + shape64_24 = paddle._C_ops.shape64(transpose_64) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_122 = paddle._C_ops.slice( + shape64_24, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_24 + + # pd_op.reshape: (-1x100x256xf32) <- (-1x100x8x32xf32, 3xi64) + reshape_65 = paddle._C_ops.reshape(transpose_64, full_int_array_12) + del transpose_64 + + # pd_op.matmul: (-1x100x256xf32) <- (-1x100x256xf32, 256x256xf32) + matmul_115 = paddle._C_ops.matmul(reshape_65, parameter_31, False, False) + del parameter_31, reshape_65 + + # pd_op.add: (-1x100x256xf32) <- (-1x100x256xf32, 256xf32) + add_146 = paddle._C_ops.add(matmul_115, parameter_30) + del matmul_115, parameter_30 + + # pd_op.dropout: (-1x100x256xf32, -1x100x256xui8) <- (-1x100x256xf32, None, 1xf32) + dropout_102, dropout_103 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_146, None, full_13, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_146 + + # pd_op.add: (-1x100x256xf32) <- (-1x100x256xf32, -1x100x256xf32) + add_147 = paddle._C_ops.add(layer_norm_84, dropout_102) + del dropout_102, layer_norm_84 + + # pd_op.layer_norm: (-1x100x256xf32, -1x100xf32, -1x100xf32) <- (-1x100x256xf32, 256xf32, 256xf32) + layer_norm_87, layer_norm_88, layer_norm_89 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_147, parameter_29, parameter_28, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_147, parameter_28, parameter_29 + + # pd_op.matmul: (-1x100x2048xf32) <- (-1x100x256xf32, 256x2048xf32) + matmul_116 = paddle._C_ops.matmul(layer_norm_87, parameter_27, False, False) + del parameter_27 + + # pd_op.add: (-1x100x2048xf32) <- (-1x100x2048xf32, 2048xf32) + add_148 = paddle._C_ops.add(matmul_116, parameter_26) + del matmul_116, parameter_26 + + # pd_op.relu: (-1x100x2048xf32) <- (-1x100x2048xf32) + relu_59 = paddle._C_ops.relu(add_148) + del add_148 + + # pd_op.dropout: (-1x100x2048xf32, -1x100x2048xui8) <- (-1x100x2048xf32, None, 1xf32) + dropout_104, dropout_105 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + relu_59, None, full_13, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del relu_59 + + # pd_op.matmul: (-1x100x256xf32) <- (-1x100x2048xf32, 2048x256xf32) + matmul_117 = paddle._C_ops.matmul(dropout_104, parameter_25, False, False) + del dropout_104, parameter_25 + + # pd_op.add: (-1x100x256xf32) <- (-1x100x256xf32, 256xf32) + add_149 = paddle._C_ops.add(matmul_117, parameter_24) + del matmul_117, parameter_24 + + # pd_op.dropout: (-1x100x256xf32, -1x100x256xui8) <- (-1x100x256xf32, None, 1xf32) + dropout_106, dropout_107 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_149, None, full_13, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_149 + + # pd_op.add: (-1x100x256xf32) <- (-1x100x256xf32, -1x100x256xf32) + add_150 = paddle._C_ops.add(layer_norm_87, dropout_106) + del dropout_106, layer_norm_87 + + # pd_op.layer_norm: (-1x100x256xf32, -1x100xf32, -1x100xf32) <- (-1x100x256xf32, 256xf32, 256xf32) + layer_norm_90, layer_norm_91, layer_norm_92 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_150, parameter_23, parameter_22, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_150, parameter_22, parameter_23 + + # pd_op.layer_norm: (-1x100x256xf32, -1x100xf32, -1x100xf32) <- (-1x100x256xf32, 256xf32, 256xf32) + layer_norm_93, layer_norm_94, layer_norm_95 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + layer_norm_90, parameter_79, parameter_78, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + + # pd_op.add: (-1x100x256xf32) <- (-1x100x256xf32, -1x100x256xf32) + add_151 = paddle._C_ops.add(layer_norm_90, tile_1) + + # pd_op.slice: (256x256xf32) <- (256x768xf32, 1xi64, 1xi64) + slice_123 = paddle._C_ops.slice( + data_33, [1], full_int_array_2, full_int_array_9, [1], [] + ) + + # pd_op.slice: (256xf32) <- (768xf32, 1xi64, 1xi64) + slice_124 = paddle._C_ops.slice( + data_34, [0], full_int_array_2, full_int_array_9, [1], [] + ) + + # pd_op.matmul: (-1x100x256xf32) <- (-1x100x256xf32, 256x256xf32) + matmul_118 = paddle._C_ops.matmul(add_151, slice_123, False, False) + del slice_123 + + # pd_op.add: (-1x100x256xf32) <- (-1x100x256xf32, 256xf32) + add_152 = paddle._C_ops.add(matmul_118, slice_124) + del matmul_118, slice_124 + + # pd_op.reshape: (-1x100x8x32xf32) <- (-1x100x256xf32, 4xi64) + reshape_66 = paddle._C_ops.reshape(add_152, full_int_array_10) + del add_152 + + # pd_op.transpose: (-1x8x100x32xf32) <- (-1x100x8x32xf32) + transpose_65 = paddle._C_ops.transpose(reshape_66, [0, 2, 1, 3]) + del reshape_66 + + # pd_op.slice: (256x256xf32) <- (256x768xf32, 1xi64, 1xi64) + slice_125 = paddle._C_ops.slice( + data_33, [1], full_int_array_9, full_int_array_11, [1], [] + ) + + # pd_op.slice: (256xf32) <- (768xf32, 1xi64, 1xi64) + slice_126 = paddle._C_ops.slice( + data_34, [0], full_int_array_9, full_int_array_11, [1], [] + ) + + # pd_op.matmul: (-1x100x256xf32) <- (-1x100x256xf32, 256x256xf32) + matmul_119 = paddle._C_ops.matmul(add_151, slice_125, False, False) + del add_151, slice_125 + + # pd_op.add: (-1x100x256xf32) <- (-1x100x256xf32, 256xf32) + add_153 = paddle._C_ops.add(matmul_119, slice_126) + del matmul_119, slice_126 + + # pd_op.reshape: (-1x100x8x32xf32) <- (-1x100x256xf32, 4xi64) + reshape_67 = paddle._C_ops.reshape(add_153, full_int_array_10) + del add_153 + + # pd_op.transpose: (-1x8x100x32xf32) <- (-1x100x8x32xf32) + transpose_66 = paddle._C_ops.transpose(reshape_67, [0, 2, 1, 3]) + del reshape_67 + + # pd_op.slice: (256x256xf32) <- (256x768xf32, 1xi64, 1xi64) + slice_127 = paddle._C_ops.slice( + data_33, [1], full_int_array_11, full_int_array_8, [1], [] + ) + del data_33 + + # pd_op.slice: (256xf32) <- (768xf32, 1xi64, 1xi64) + slice_128 = paddle._C_ops.slice( + data_34, [0], full_int_array_11, full_int_array_8, [1], [] + ) + del data_34 + + # pd_op.matmul: (-1x100x256xf32) <- (-1x100x256xf32, 256x256xf32) + matmul_120 = paddle._C_ops.matmul(layer_norm_90, slice_127, False, False) + del slice_127 + + # pd_op.add: (-1x100x256xf32) <- (-1x100x256xf32, 256xf32) + add_154 = paddle._C_ops.add(matmul_120, slice_128) + del matmul_120, slice_128 + + # pd_op.reshape: (-1x100x8x32xf32) <- (-1x100x256xf32, 4xi64) + reshape_68 = paddle._C_ops.reshape(add_154, full_int_array_10) + del add_154 + + # pd_op.transpose: (-1x8x100x32xf32) <- (-1x100x8x32xf32) + transpose_67 = paddle._C_ops.transpose(reshape_68, [0, 2, 1, 3]) + del reshape_68 + + # pd_op.matmul: (-1x8x100x100xf32) <- (-1x8x100x32xf32, -1x8x100x32xf32) + matmul_121 = paddle._C_ops.matmul(transpose_65, transpose_66, False, True) + del transpose_65, transpose_66 + + # pd_op.scale: (-1x8x100x100xf32) <- (-1x8x100x100xf32, 1xf32) + scale_24 = paddle._C_ops.scale(matmul_121, full_12, float("0"), True) + del matmul_121 + + # pd_op.softmax: (-1x8x100x100xf32) <- (-1x8x100x100xf32) + softmax_16 = paddle._C_ops.softmax(scale_24, -1) + del scale_24 + + # pd_op.dropout: (-1x8x100x100xf32, -1x8x100x100xui8) <- (-1x8x100x100xf32, None, 1xf32) + dropout_108, dropout_109 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + softmax_16, None, full_13, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del softmax_16 + + # pd_op.matmul: (-1x8x100x32xf32) <- (-1x8x100x100xf32, -1x8x100x32xf32) + matmul_122 = paddle._C_ops.matmul(dropout_108, transpose_67, False, False) + del dropout_108, transpose_67 + + # pd_op.transpose: (-1x100x8x32xf32) <- (-1x8x100x32xf32) + transpose_68 = paddle._C_ops.transpose(matmul_122, [0, 2, 1, 3]) + del matmul_122 + + # pd_op.shape64: (4xi64) <- (-1x100x8x32xf32) + shape64_25 = paddle._C_ops.shape64(transpose_68) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_129 = paddle._C_ops.slice( + shape64_25, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_25 + + # pd_op.reshape: (-1x100x256xf32) <- (-1x100x8x32xf32, 3xi64) + reshape_69 = paddle._C_ops.reshape(transpose_68, full_int_array_12) + del transpose_68 + + # pd_op.matmul: (-1x100x256xf32) <- (-1x100x256xf32, 256x256xf32) + matmul_123 = paddle._C_ops.matmul(reshape_69, parameter_21, False, False) + del parameter_21, reshape_69 + + # pd_op.add: (-1x100x256xf32) <- (-1x100x256xf32, 256xf32) + add_155 = paddle._C_ops.add(matmul_123, parameter_20) + del matmul_123, parameter_20 + + # pd_op.dropout: (-1x100x256xf32, -1x100x256xui8) <- (-1x100x256xf32, None, 1xf32) + dropout_110, dropout_111 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_155, None, full_13, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_155 + + # pd_op.add: (-1x100x256xf32) <- (-1x100x256xf32, -1x100x256xf32) + add_156 = paddle._C_ops.add(layer_norm_90, dropout_110) + del dropout_110, layer_norm_90 + + # pd_op.layer_norm: (-1x100x256xf32, -1x100xf32, -1x100xf32) <- (-1x100x256xf32, 256xf32, 256xf32) + layer_norm_96, layer_norm_97, layer_norm_98 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_156, parameter_19, parameter_18, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_156, parameter_18, parameter_19 + + # pd_op.add: (-1x100x256xf32) <- (-1x100x256xf32, -1x100x256xf32) + add_157 = paddle._C_ops.add(layer_norm_96, tile_1) + del tile_1 + + # pd_op.add: (-1x-1x256xf32) <- (-1x-1x256xf32, -1x-1x256xf32) + add_158 = paddle._C_ops.add(layer_norm_33, flatten_3) + del flatten_3 + + # pd_op.slice: (256x256xf32) <- (256x768xf32, 1xi64, 1xi64) + slice_130 = paddle._C_ops.slice( + data_35, [1], full_int_array_2, full_int_array_9, [1], [] + ) + + # pd_op.slice: (256xf32) <- (768xf32, 1xi64, 1xi64) + slice_131 = paddle._C_ops.slice( + data_36, [0], full_int_array_2, full_int_array_9, [1], [] + ) + + # pd_op.matmul: (-1x100x256xf32) <- (-1x100x256xf32, 256x256xf32) + matmul_124 = paddle._C_ops.matmul(add_157, slice_130, False, False) + del add_157, slice_130 + + # pd_op.add: (-1x100x256xf32) <- (-1x100x256xf32, 256xf32) + add_159 = paddle._C_ops.add(matmul_124, slice_131) + del matmul_124, slice_131 + + # pd_op.reshape: (-1x100x8x32xf32) <- (-1x100x256xf32, 4xi64) + reshape_70 = paddle._C_ops.reshape(add_159, full_int_array_10) + del add_159 + + # pd_op.transpose: (-1x8x100x32xf32) <- (-1x100x8x32xf32) + transpose_69 = paddle._C_ops.transpose(reshape_70, [0, 2, 1, 3]) + del reshape_70 + + # pd_op.slice: (256x256xf32) <- (256x768xf32, 1xi64, 1xi64) + slice_132 = paddle._C_ops.slice( + data_35, [1], full_int_array_9, full_int_array_11, [1], [] + ) + + # pd_op.slice: (256xf32) <- (768xf32, 1xi64, 1xi64) + slice_133 = paddle._C_ops.slice( + data_36, [0], full_int_array_9, full_int_array_11, [1], [] + ) + del full_int_array_9 + + # pd_op.matmul: (-1x-1x256xf32) <- (-1x-1x256xf32, 256x256xf32) + matmul_125 = paddle._C_ops.matmul(add_158, slice_132, False, False) + del add_158, slice_132 + + # pd_op.add: (-1x-1x256xf32) <- (-1x-1x256xf32, 256xf32) + add_160 = paddle._C_ops.add(matmul_125, slice_133) + del matmul_125, slice_133 + + # pd_op.reshape: (-1x-1x8x32xf32) <- (-1x-1x256xf32, 4xi64) + reshape_71 = paddle._C_ops.reshape(add_160, full_int_array_10) + del add_160 + + # pd_op.transpose: (-1x8x-1x32xf32) <- (-1x-1x8x32xf32) + transpose_70 = paddle._C_ops.transpose(reshape_71, [0, 2, 1, 3]) + del reshape_71 + + # pd_op.slice: (256x256xf32) <- (256x768xf32, 1xi64, 1xi64) + slice_134 = paddle._C_ops.slice( + data_35, [1], full_int_array_11, full_int_array_8, [1], [] + ) + del data_35 + + # pd_op.slice: (256xf32) <- (768xf32, 1xi64, 1xi64) + slice_135 = paddle._C_ops.slice( + data_36, [0], full_int_array_11, full_int_array_8, [1], [] + ) + del data_36, full_int_array_11 + + # pd_op.matmul: (-1x-1x256xf32) <- (-1x-1x256xf32, 256x256xf32) + matmul_126 = paddle._C_ops.matmul(layer_norm_33, slice_134, False, False) + del slice_134 + + # pd_op.add: (-1x-1x256xf32) <- (-1x-1x256xf32, 256xf32) + add_161 = paddle._C_ops.add(matmul_126, slice_135) + del matmul_126, slice_135 + + # pd_op.reshape: (-1x-1x8x32xf32) <- (-1x-1x256xf32, 4xi64) + reshape_72 = paddle._C_ops.reshape(add_161, full_int_array_10) + del add_161, full_int_array_10 + + # pd_op.transpose: (-1x8x-1x32xf32) <- (-1x-1x8x32xf32) + transpose_71 = paddle._C_ops.transpose(reshape_72, [0, 2, 1, 3]) + del reshape_72 + + # pd_op.matmul: (-1x8x100x-1xf32) <- (-1x8x100x32xf32, -1x8x-1x32xf32) + matmul_127 = paddle._C_ops.matmul(transpose_69, transpose_70, False, True) + del transpose_69, transpose_70 + + # pd_op.scale: (-1x8x100x-1xf32) <- (-1x8x100x-1xf32, 1xf32) + scale_25 = paddle._C_ops.scale(matmul_127, full_12, float("0"), True) + del full_12, matmul_127 + + # pd_op.softmax: (-1x8x100x-1xf32) <- (-1x8x100x-1xf32) + softmax_17 = paddle._C_ops.softmax(scale_25, -1) + del scale_25 + + # pd_op.dropout: (-1x8x100x-1xf32, -1x8x100x-1xui8) <- (-1x8x100x-1xf32, None, 1xf32) + dropout_112, dropout_113 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + softmax_17, None, full_13, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del softmax_17 + + # pd_op.matmul: (-1x8x100x32xf32) <- (-1x8x100x-1xf32, -1x8x-1x32xf32) + matmul_128 = paddle._C_ops.matmul(dropout_112, transpose_71, False, False) + del dropout_112, transpose_71 + + # pd_op.transpose: (-1x100x8x32xf32) <- (-1x8x100x32xf32) + transpose_72 = paddle._C_ops.transpose(matmul_128, [0, 2, 1, 3]) + del matmul_128 + + # pd_op.shape64: (4xi64) <- (-1x100x8x32xf32) + shape64_26 = paddle._C_ops.shape64(transpose_72) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_136 = paddle._C_ops.slice( + shape64_26, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_26 + + # pd_op.reshape: (-1x100x256xf32) <- (-1x100x8x32xf32, 3xi64) + reshape_73 = paddle._C_ops.reshape(transpose_72, full_int_array_12) + del full_int_array_12, transpose_72 + + # pd_op.matmul: (-1x100x256xf32) <- (-1x100x256xf32, 256x256xf32) + matmul_129 = paddle._C_ops.matmul(reshape_73, parameter_17, False, False) + del parameter_17, reshape_73 + + # pd_op.add: (-1x100x256xf32) <- (-1x100x256xf32, 256xf32) + add_162 = paddle._C_ops.add(matmul_129, parameter_16) + del matmul_129, parameter_16 + + # pd_op.dropout: (-1x100x256xf32, -1x100x256xui8) <- (-1x100x256xf32, None, 1xf32) + dropout_114, dropout_115 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_162, None, full_13, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_162 + + # pd_op.add: (-1x100x256xf32) <- (-1x100x256xf32, -1x100x256xf32) + add_163 = paddle._C_ops.add(layer_norm_96, dropout_114) + del dropout_114, layer_norm_96 + + # pd_op.layer_norm: (-1x100x256xf32, -1x100xf32, -1x100xf32) <- (-1x100x256xf32, 256xf32, 256xf32) + layer_norm_99, layer_norm_100, layer_norm_101 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_163, parameter_15, parameter_14, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_163, parameter_14, parameter_15 + + # pd_op.matmul: (-1x100x2048xf32) <- (-1x100x256xf32, 256x2048xf32) + matmul_130 = paddle._C_ops.matmul(layer_norm_99, parameter_13, False, False) + del parameter_13 + + # pd_op.add: (-1x100x2048xf32) <- (-1x100x2048xf32, 2048xf32) + add_164 = paddle._C_ops.add(matmul_130, parameter_12) + del matmul_130, parameter_12 + + # pd_op.relu: (-1x100x2048xf32) <- (-1x100x2048xf32) + relu_60 = paddle._C_ops.relu(add_164) + del add_164 + + # pd_op.dropout: (-1x100x2048xf32, -1x100x2048xui8) <- (-1x100x2048xf32, None, 1xf32) + dropout_116, dropout_117 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + relu_60, None, full_13, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del relu_60 + + # pd_op.matmul: (-1x100x256xf32) <- (-1x100x2048xf32, 2048x256xf32) + matmul_131 = paddle._C_ops.matmul(dropout_116, parameter_11, False, False) + del dropout_116, parameter_11 + + # pd_op.add: (-1x100x256xf32) <- (-1x100x256xf32, 256xf32) + add_165 = paddle._C_ops.add(matmul_131, parameter_10) + del matmul_131, parameter_10 + + # pd_op.dropout: (-1x100x256xf32, -1x100x256xui8) <- (-1x100x256xf32, None, 1xf32) + dropout_118, dropout_119 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_165, None, full_13, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_165, full_13 + + # pd_op.add: (-1x100x256xf32) <- (-1x100x256xf32, -1x100x256xf32) + add_166 = paddle._C_ops.add(layer_norm_99, dropout_118) + del dropout_118, layer_norm_99 + + # pd_op.layer_norm: (-1x100x256xf32, -1x100xf32, -1x100xf32) <- (-1x100x256xf32, 256xf32, 256xf32) + layer_norm_102, layer_norm_103, layer_norm_104 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_166, parameter_9, parameter_8, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_166, parameter_8, parameter_9 + + # pd_op.layer_norm: (-1x100x256xf32, -1x100xf32, -1x100xf32) <- (-1x100x256xf32, 256xf32, 256xf32) + layer_norm_105, layer_norm_106, layer_norm_107 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + layer_norm_102, parameter_79, parameter_78, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + + # pd_op.layer_norm: (-1x100x256xf32, -1x100xf32, -1x100xf32) <- (-1x100x256xf32, 256xf32, 256xf32) + layer_norm_108, layer_norm_109, layer_norm_110 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + layer_norm_102, parameter_79, parameter_78, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del layer_norm_102, parameter_78, parameter_79 + + # builtin.combine: ([-1x100x256xf32, -1x100x256xf32, -1x100x256xf32, -1x100x256xf32, -1x100x256xf32, -1x100x256xf32]) <- (-1x100x256xf32, -1x100x256xf32, -1x100x256xf32, -1x100x256xf32, -1x100x256xf32, -1x100x256xf32) + combine_5 = [ + layer_norm_45, + layer_norm_57, + layer_norm_69, + layer_norm_81, + layer_norm_93, + layer_norm_105, + ] + del ( + layer_norm_105, + layer_norm_45, + layer_norm_57, + layer_norm_69, + layer_norm_81, + layer_norm_93, + ) + + # pd_op.stack: (6x-1x100x256xf32) <- ([-1x100x256xf32, -1x100x256xf32, -1x100x256xf32, -1x100x256xf32, -1x100x256xf32, -1x100x256xf32]) + stack_4 = paddle._C_ops.stack(combine_5, 0) + del combine_5 + + # pd_op.transpose: (-1x256x-1xf32) <- (-1x-1x256xf32) + transpose_73 = paddle._C_ops.transpose(layer_norm_33, [0, 2, 1]) + del layer_norm_33 + + # pd_op.full: (xi64) <- () + full_16 = paddle._C_ops.full( + [], float("256"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_6 = [slice_0, full_16, slice_1, slice_2] + del full_16, slice_0, slice_1, slice_2 + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_5 = paddle._C_ops.stack(combine_6, 0) + del combine_6 + + # pd_op.reshape: (-1x256x-1x-1xf32) <- (-1x256x-1xf32, 4xi64) + reshape_74 = paddle._C_ops.reshape(transpose_73, stack_5) + del stack_5, transpose_73 + + # pd_op.matmul: (6x-1x100x5xf32) <- (6x-1x100x256xf32, 256x5xf32) + matmul_132 = paddle._C_ops.matmul(stack_4, parameter_7, False, False) + del parameter_7 + + # pd_op.add: (6x-1x100x5xf32) <- (6x-1x100x5xf32, 5xf32) + add_167 = paddle._C_ops.add(matmul_132, parameter_6) + del matmul_132, parameter_6 + + # pd_op.matmul: (6x-1x100x256xf32) <- (6x-1x100x256xf32, 256x256xf32) + matmul_133 = paddle._C_ops.matmul(stack_4, parameter_5, False, False) + del parameter_5, stack_4 + + # pd_op.add: (6x-1x100x256xf32) <- (6x-1x100x256xf32, 256xf32) + add_168 = paddle._C_ops.add(matmul_133, parameter_4) + del matmul_133, parameter_4 + + # pd_op.relu: (6x-1x100x256xf32) <- (6x-1x100x256xf32) + relu_61 = paddle._C_ops.relu(add_168) + del add_168 + + # pd_op.matmul: (6x-1x100x256xf32) <- (6x-1x100x256xf32, 256x256xf32) + matmul_134 = paddle._C_ops.matmul(relu_61, parameter_3, False, False) + del parameter_3, relu_61 + + # pd_op.add: (6x-1x100x256xf32) <- (6x-1x100x256xf32, 256xf32) + add_169 = paddle._C_ops.add(matmul_134, parameter_2) + del matmul_134, parameter_2 + + # pd_op.relu: (6x-1x100x256xf32) <- (6x-1x100x256xf32) + relu_62 = paddle._C_ops.relu(add_169) + del add_169 + + # pd_op.matmul: (6x-1x100x4xf32) <- (6x-1x100x256xf32, 256x4xf32) + matmul_135 = paddle._C_ops.matmul(relu_62, parameter_1, False, False) + del parameter_1, relu_62 + + # pd_op.add: (6x-1x100x4xf32) <- (6x-1x100x4xf32, 4xf32) + add_170 = paddle._C_ops.add(matmul_135, parameter_0) + del matmul_135, parameter_0 + + # pd_op.sigmoid: (6x-1x100x4xf32) <- (6x-1x100x4xf32) + sigmoid_0 = paddle._C_ops.sigmoid(add_170) + del add_170 + + # pd_op.slice: (-1x100x4xf32) <- (6x-1x100x4xf32, 1xi64, 1xi64) + slice_137 = paddle._C_ops.slice( + sigmoid_0, [0], full_int_array_7, full_int_array_8, [1], [0] + ) + del sigmoid_0 + + # pd_op.slice: (-1x100x5xf32) <- (6x-1x100x5xf32, 1xi64, 1xi64) + slice_138 = paddle._C_ops.slice( + add_167, [0], full_int_array_7, full_int_array_8, [1], [0] + ) + del add_167 + + # pd_op.slice: (-1x3x-1x-1xf32) <- (-1x3x-1x-1xf32, 1xi64, 1xi64) + slice_139 = paddle._C_ops.slice( + data_37, [0], full_int_array_4, full_int_array_8, [1], [] + ) + del data_37, full_int_array_8 + + # pd_op.shape64: (4xi64) <- (-1x3x-1x-1xf32) + shape64_27 = paddle._C_ops.shape64(slice_139) + del slice_139 + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_140 = paddle._C_ops.slice( + shape64_27, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_141 = paddle._C_ops.slice( + shape64_27, [0], full_int_array_4, full_int_array_5, [1], [0] + ) + del full_int_array_4 + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_142 = paddle._C_ops.slice( + shape64_27, [0], full_int_array_5, full_int_array_6, [1], [0] + ) + del full_int_array_5, full_int_array_6, shape64_27 + + # pd_op.split_with_num: ([-1x100x2xf32, -1x100x2xf32]) <- (-1x100x4xf32, 1xi32) + split_with_num_0 = paddle._C_ops.split_with_num(slice_137, 2, full_2) + del full_2, slice_137 + + # builtin.split: (-1x100x2xf32, -1x100x2xf32) <- ([-1x100x2xf32, -1x100x2xf32]) + ( + split_0, + split_1, + ) = split_with_num_0 + del split_with_num_0 + + # pd_op.full: (1xf32) <- () + full_17 = paddle._C_ops.full( + [1], float("0.5"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1x100x2xf32) <- (-1x100x2xf32, 1xf32) + scale_26 = paddle._C_ops.scale(split_1, full_17, float("0"), True) + + # pd_op.subtract: (-1x100x2xf32) <- (-1x100x2xf32, -1x100x2xf32) + subtract_0 = paddle._C_ops.subtract(split_0, scale_26) + del scale_26 + + # pd_op.scale: (-1x100x2xf32) <- (-1x100x2xf32, 1xf32) + scale_27 = paddle._C_ops.scale(split_1, full_17, float("0"), True) + del full_17, split_1 + + # pd_op.add: (-1x100x2xf32) <- (-1x100x2xf32, -1x100x2xf32) + add_171 = paddle._C_ops.add(split_0, scale_27) + del scale_27, split_0 + + # pd_op.full: (1xi32) <- () + full_18 = paddle._C_ops.full( + [1], float("-1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([-1x100x2xf32, -1x100x2xf32]) <- (-1x100x2xf32, -1x100x2xf32) + combine_7 = [subtract_0, add_171] + del add_171, subtract_0 + + # pd_op.concat: (-1x100x4xf32) <- ([-1x100x2xf32, -1x100x2xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_7, full_18) + del combine_7 + + # pd_op.divide: (-1x2xf32) <- (-1x2xf32, -1x2xf32) + divide_4 = paddle._C_ops.divide(data_38, data_39) + del data_38, data_39 + + # pd_op.scale: (-1x2xf32) <- (-1x2xf32, 1xf32) + scale_28 = paddle._C_ops.scale(divide_4, full_0, float("0.5"), True) + del divide_4, full_0 + + # pd_op.floor: (-1x2xf32) <- (-1x2xf32) + floor_0 = paddle._C_ops.floor(scale_28) + del scale_28 + + # pd_op.split_with_num: ([-1x1xf32, -1x1xf32]) <- (-1x2xf32, 1xi32) + split_with_num_1 = paddle._C_ops.split_with_num(floor_0, 2, full_1) + del full_1 + + # builtin.split: (-1x1xf32, -1x1xf32) <- ([-1x1xf32, -1x1xf32]) + ( + split_2, + split_3, + ) = split_with_num_1 + del split_with_num_1 + + # pd_op.flip: (-1x2xf32) <- (-1x2xf32) + flip_0 = paddle._C_ops.flip(floor_0, [1]) + del floor_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_13 = [1, 2] + + # pd_op.tile: (-1x4xf32) <- (-1x2xf32, 2xi64) + tile_2 = paddle._C_ops.tile(flip_0, full_int_array_13) + del flip_0, full_int_array_13 + + # pd_op.unsqueeze: (-1x1x4xf32) <- (-1x4xf32, 1xi64) + unsqueeze_3 = paddle._C_ops.unsqueeze(tile_2, full_int_array_3) + del tile_2 + + # pd_op.multiply: (-1x100x4xf32) <- (-1x100x4xf32, -1x1x4xf32) + multiply_0 = paddle._C_ops.multiply(concat_1, unsqueeze_3) + del concat_1, unsqueeze_3 + + # pd_op.softmax: (-1x100x5xf32) <- (-1x100x5xf32) + softmax_18 = paddle._C_ops.softmax(slice_138, -1) + del slice_138 + + # pd_op.slice: (-1x100x4xf32) <- (-1x100x5xf32, 1xi64, 1xi64) + slice_143 = paddle._C_ops.slice( + softmax_18, [2], full_int_array_2, full_int_array_7, [1], [] + ) + del softmax_18 + + # pd_op.max: (-1x100xf32) <- (-1x100x4xf32, 1xi64) + max_0 = paddle._C_ops.max(slice_143, full_int_array_7, False) + + # pd_op.full: (1xi64) <- () + full_19 = paddle._C_ops.full( + [1], float("-1"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (-1x100xi64) <- (-1x100x4xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(slice_143, full_19, False, False, paddle.int64) + del full_19, slice_143 + + # pd_op.shape64: (2xi64) <- (-1x100xf32) + shape64_28 = paddle._C_ops.shape64(max_0) + + # pd_op.slice: (xi64) <- (2xi64, 1xi64, 1xi64) + slice_144 = paddle._C_ops.slice( + shape64_28, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_28 + + # pd_op.unsqueeze: (-1x100x1xi64) <- (-1x100xi64, 1xi64) + unsqueeze_4 = paddle._C_ops.unsqueeze(argmax_0, full_int_array_7) + del argmax_0 + + # pd_op.cast: (-1x100x1xf32) <- (-1x100x1xi64) + cast_1 = paddle._C_ops.cast(unsqueeze_4, paddle.float32) + del unsqueeze_4 + + # pd_op.unsqueeze: (-1x100x1xf32) <- (-1x100xf32, 1xi64) + unsqueeze_5 = paddle._C_ops.unsqueeze(max_0, full_int_array_7) + del full_int_array_7, max_0 + + # builtin.combine: ([-1x100x1xf32, -1x100x1xf32, -1x100x4xf32]) <- (-1x100x1xf32, -1x100x1xf32, -1x100x4xf32) + combine_8 = [cast_1, unsqueeze_5, multiply_0] + del cast_1, multiply_0, unsqueeze_5 + + # pd_op.concat: (-1x100x6xf32) <- ([-1x100x1xf32, -1x100x1xf32, -1x100x4xf32], 1xi32) + concat_2 = paddle._C_ops.concat(combine_8, full_18) + del combine_8, full_18 + + # pd_op.full: (xi64) <- () + full_20 = paddle._C_ops.full( + [], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (xi64) <- (xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_20, + [], + paddle.int64, + [float("100")], + paddle.framework._current_expected_place(), + ) + del full_20 + + # pd_op.cast: (xi32) <- (xi64) + cast_2 = paddle._C_ops.cast(assign_value__0, paddle.int32) + del assign_value__0 + + # pd_op.shape64: (3xi64) <- (-1x100x6xf32) + shape64_29 = paddle._C_ops.shape64(concat_2) + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_145 = paddle._C_ops.slice( + shape64_29, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del full_int_array_2, full_int_array_3, shape64_29 + + # builtin.combine: ([xi64]) <- (xi64) + combine_9 = [slice_145] + del slice_145 + + # pd_op.stack: (1xi64) <- ([xi64]) + stack_6 = paddle._C_ops.stack(combine_9, 0) + del combine_9 + + # pd_op.tile: (-1xi32) <- (xi32, 1xi64) + tile_0 = paddle._C_ops.tile(cast_2, stack_6) + del cast_2, stack_6 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_14 = [-1, 6] + + # pd_op.reshape: (-1x6xf32) <- (-1x100x6xf32, 2xi64) + reshape_0 = paddle._C_ops.reshape(concat_2, full_int_array_14) + del concat_2, full_int_array_14 + + return reshape_0, tile_0 diff --git a/paddle_samples/PaddleX/DETR-R50/subgraph_15/weight_meta.py b/paddle_samples/PaddleX/DETR-R50/subgraph_15/weight_meta.py new file mode 100644 index 000000000..236d8a903 --- /dev/null +++ b/paddle_samples/PaddleX/DETR-R50/subgraph_15/weight_meta.py @@ -0,0 +1,4569 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [4] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [256, 4] + dtype = "float32" + min_val = float("-0.232141") + max_val = float("5.42856") + mean = float("0.0140943") + std = float("0.28553") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [256] + dtype = "float32" + min_val = float("-1.02332") + max_val = float("0.652791") + mean = float("0.0877175") + std = float("0.255533") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [256, 256] + dtype = "float32" + min_val = float("-3.90605") + max_val = float("1.23044") + mean = float("-0.0276389") + std = float("0.171635") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [256] + dtype = "float32" + min_val = float("-0.60445") + max_val = float("0.79862") + mean = float("0.0647772") + std = float("0.232089") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [256, 256] + dtype = "float32" + min_val = float("-0.683392") + max_val = float("1.05609") + mean = float("-0.000968782") + std = float("0.0861314") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [5] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [256, 5] + dtype = "float32" + min_val = float("-0.0635023") + max_val = float("0.0636244") + mean = float("-0.000271111") + std = float("0.0364667") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [256] + dtype = "float32" + min_val = float("-0.393691") + max_val = float("0.383097") + mean = float("-0.00068044") + std = float("0.133391") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [256] + dtype = "float32" + min_val = float("0.53428") + max_val = float("1.22945") + mean = float("1.01815") + std = float("0.128591") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [256] + dtype = "float32" + min_val = float("-0.0933033") + max_val = float("0.0438699") + mean = float("-0.000580406") + std = float("0.0122153") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [2048, 256] + dtype = "float32" + min_val = float("-0.68834") + max_val = float("0.485084") + mean = float("-0.000165497") + std = float("0.0427264") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [2048] + dtype = "float32" + min_val = float("-0.30303") + max_val = float("0.169284") + mean = float("-0.0662955") + std = float("0.0574202") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [256, 2048] + dtype = "float32" + min_val = float("-0.561525") + max_val = float("0.683178") + mean = float("-0.000252838") + std = float("0.0669215") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [256] + dtype = "float32" + min_val = float("-1.09396") + max_val = float("0.752301") + mean = float("0.0125658") + std = float("0.189185") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [256] + dtype = "float32" + min_val = float("0.790224") + max_val = float("1.64086") + mean = float("0.97153") + std = float("0.135004") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [256] + dtype = "float32" + min_val = float("-0.20775") + max_val = float("0.201086") + mean = float("-0.00242783") + std = float("0.0578811") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [256, 256] + dtype = "float32" + min_val = float("-0.508476") + max_val = float("0.321617") + mean = float("7.5497e-05") + std = float("0.056087") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [256] + dtype = "float32" + min_val = float("-0.775543") + max_val = float("0.538989") + mean = float("-0.00537864") + std = float("0.146321") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [256] + dtype = "float32" + min_val = float("1.37202") + max_val = float("2.03113") + mean = float("1.61081") + std = float("0.0952637") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [256] + dtype = "float32" + min_val = float("-0.0275508") + max_val = float("0.0454657") + mean = float("-0.000317247") + std = float("0.00737727") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [256, 256] + dtype = "float32" + min_val = float("-0.233445") + max_val = float("0.23805") + mean = float("4.11699e-05") + std = float("0.0180258") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [256] + dtype = "float32" + min_val = float("-0.436769") + max_val = float("0.460958") + mean = float("-0.00319134") + std = float("0.140208") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [256] + dtype = "float32" + min_val = float("0.50913") + max_val = float("1.29742") + mean = float("1.02677") + std = float("0.153935") + data = None + + +class Program_weight_tensor_parameter_24: + name = "parameter_24" + shape = [256] + dtype = "float32" + min_val = float("-0.172754") + max_val = float("0.188077") + mean = float("0.000190143") + std = float("0.0274792") + data = None + + +class Program_weight_tensor_parameter_25: + name = "parameter_25" + shape = [2048, 256] + dtype = "float32" + min_val = float("-0.750985") + max_val = float("0.638919") + mean = float("-0.000368411") + std = float("0.0502117") + data = None + + +class Program_weight_tensor_parameter_26: + name = "parameter_26" + shape = [2048] + dtype = "float32" + min_val = float("-0.307434") + max_val = float("0.194336") + mean = float("-0.0726624") + std = float("0.0618399") + data = None + + +class Program_weight_tensor_parameter_27: + name = "parameter_27" + shape = [256, 2048] + dtype = "float32" + min_val = float("-0.646148") + max_val = float("0.708896") + mean = float("-0.000220036") + std = float("0.0779736") + data = None + + +class Program_weight_tensor_parameter_28: + name = "parameter_28" + shape = [256] + dtype = "float32" + min_val = float("-1.43076") + max_val = float("1.08705") + mean = float("0.0192745") + std = float("0.249858") + data = None + + +class Program_weight_tensor_parameter_29: + name = "parameter_29" + shape = [256] + dtype = "float32" + min_val = float("0.655107") + max_val = float("1.80048") + mean = float("0.938696") + std = float("0.177489") + data = None + + +class Program_weight_tensor_parameter_30: + name = "parameter_30" + shape = [256] + dtype = "float32" + min_val = float("-0.316718") + max_val = float("0.30222") + mean = float("-0.00202317") + std = float("0.0985308") + data = None + + +class Program_weight_tensor_parameter_31: + name = "parameter_31" + shape = [256, 256] + dtype = "float32" + min_val = float("-0.502333") + max_val = float("0.477475") + mean = float("-5.21322e-07") + std = float("0.0680016") + data = None + + +class Program_weight_tensor_parameter_32: + name = "parameter_32" + shape = [256] + dtype = "float32" + min_val = float("-0.994382") + max_val = float("0.901512") + mean = float("-0.0072871") + std = float("0.203343") + data = None + + +class Program_weight_tensor_parameter_33: + name = "parameter_33" + shape = [256] + dtype = "float32" + min_val = float("1.46521") + max_val = float("2.11163") + mean = float("1.7097") + std = float("0.112835") + data = None + + +class Program_weight_tensor_parameter_34: + name = "parameter_34" + shape = [256] + dtype = "float32" + min_val = float("-0.0537846") + max_val = float("0.075743") + mean = float("0.000593598") + std = float("0.0170944") + data = None + + +class Program_weight_tensor_parameter_35: + name = "parameter_35" + shape = [256, 256] + dtype = "float32" + min_val = float("-0.203263") + max_val = float("0.399298") + mean = float("-1.89456e-05") + std = float("0.0229662") + data = None + + +class Program_weight_tensor_parameter_36: + name = "parameter_36" + shape = [256] + dtype = "float32" + min_val = float("-0.678425") + max_val = float("0.498215") + mean = float("-0.0019277") + std = float("0.16312") + data = None + + +class Program_weight_tensor_parameter_37: + name = "parameter_37" + shape = [256] + dtype = "float32" + min_val = float("0.560138") + max_val = float("1.30469") + mean = float("1.0505") + std = float("0.148494") + data = None + + +class Program_weight_tensor_parameter_38: + name = "parameter_38" + shape = [256] + dtype = "float32" + min_val = float("-0.257242") + max_val = float("0.25366") + mean = float("-0.000239239") + std = float("0.0535658") + data = None + + +class Program_weight_tensor_parameter_39: + name = "parameter_39" + shape = [2048, 256] + dtype = "float32" + min_val = float("-0.829895") + max_val = float("0.584612") + mean = float("-0.000386933") + std = float("0.0576597") + data = None + + +class Program_weight_tensor_parameter_40: + name = "parameter_40" + shape = [2048] + dtype = "float32" + min_val = float("-0.345676") + max_val = float("0.204961") + mean = float("-0.0783853") + std = float("0.0610927") + data = None + + +class Program_weight_tensor_parameter_41: + name = "parameter_41" + shape = [256, 2048] + dtype = "float32" + min_val = float("-0.598276") + max_val = float("0.735644") + mean = float("-0.000295977") + std = float("0.0840206") + data = None + + +class Program_weight_tensor_parameter_42: + name = "parameter_42" + shape = [256] + dtype = "float32" + min_val = float("-1.47089") + max_val = float("1.33382") + mean = float("0.0193151") + std = float("0.322406") + data = None + + +class Program_weight_tensor_parameter_43: + name = "parameter_43" + shape = [256] + dtype = "float32" + min_val = float("0.656791") + max_val = float("1.70373") + mean = float("0.925998") + std = float("0.166804") + data = None + + +class Program_weight_tensor_parameter_44: + name = "parameter_44" + shape = [256] + dtype = "float32" + min_val = float("-0.495625") + max_val = float("0.470086") + mean = float("-0.0044425") + std = float("0.139665") + data = None + + +class Program_weight_tensor_parameter_45: + name = "parameter_45" + shape = [256, 256] + dtype = "float32" + min_val = float("-0.480828") + max_val = float("0.496876") + mean = float("-0.000171827") + std = float("0.0864673") + data = None + + +class Program_weight_tensor_parameter_46: + name = "parameter_46" + shape = [256] + dtype = "float32" + min_val = float("-0.885304") + max_val = float("0.973465") + mean = float("-0.0134531") + std = float("0.228207") + data = None + + +class Program_weight_tensor_parameter_47: + name = "parameter_47" + shape = [256] + dtype = "float32" + min_val = float("1.08375") + max_val = float("2.07925") + mean = float("1.7154") + std = float("0.138312") + data = None + + +class Program_weight_tensor_parameter_48: + name = "parameter_48" + shape = [256] + dtype = "float32" + min_val = float("-0.0900854") + max_val = float("0.105374") + mean = float("-0.00164161") + std = float("0.0251511") + data = None + + +class Program_weight_tensor_parameter_49: + name = "parameter_49" + shape = [256, 256] + dtype = "float32" + min_val = float("-0.278191") + max_val = float("0.201305") + mean = float("-0.000144267") + std = float("0.0266549") + data = None + + +class Program_weight_tensor_parameter_50: + name = "parameter_50" + shape = [256] + dtype = "float32" + min_val = float("-0.447865") + max_val = float("0.476604") + mean = float("-0.00348389") + std = float("0.179869") + data = None + + +class Program_weight_tensor_parameter_51: + name = "parameter_51" + shape = [256] + dtype = "float32" + min_val = float("0.466545") + max_val = float("1.27896") + mean = float("1.04204") + std = float("0.121961") + data = None + + +class Program_weight_tensor_parameter_52: + name = "parameter_52" + shape = [256] + dtype = "float32" + min_val = float("-0.326171") + max_val = float("0.348513") + mean = float("-0.000631303") + std = float("0.0797257") + data = None + + +class Program_weight_tensor_parameter_53: + name = "parameter_53" + shape = [2048, 256] + dtype = "float32" + min_val = float("-0.50548") + max_val = float("0.688567") + mean = float("-7.53725e-05") + std = float("0.0587415") + data = None + + +class Program_weight_tensor_parameter_54: + name = "parameter_54" + shape = [2048] + dtype = "float32" + min_val = float("-0.328285") + max_val = float("0.162543") + mean = float("-0.080739") + std = float("0.0540025") + data = None + + +class Program_weight_tensor_parameter_55: + name = "parameter_55" + shape = [256, 2048] + dtype = "float32" + min_val = float("-0.651256") + max_val = float("0.679438") + mean = float("-0.000636991") + std = float("0.0799614") + data = None + + +class Program_weight_tensor_parameter_56: + name = "parameter_56" + shape = [256] + dtype = "float32" + min_val = float("-1.6596") + max_val = float("1.53246") + mean = float("0.0183475") + std = float("0.340895") + data = None + + +class Program_weight_tensor_parameter_57: + name = "parameter_57" + shape = [256] + dtype = "float32" + min_val = float("0.628425") + max_val = float("1.4845") + mean = float("0.924896") + std = float("0.136319") + data = None + + +class Program_weight_tensor_parameter_58: + name = "parameter_58" + shape = [256] + dtype = "float32" + min_val = float("-0.581603") + max_val = float("0.534566") + mean = float("-0.00513157") + std = float("0.158646") + data = None + + +class Program_weight_tensor_parameter_59: + name = "parameter_59" + shape = [256, 256] + dtype = "float32" + min_val = float("-0.50546") + max_val = float("0.574034") + mean = float("3.721e-05") + std = float("0.0996466") + data = None + + +class Program_weight_tensor_parameter_60: + name = "parameter_60" + shape = [256] + dtype = "float32" + min_val = float("-0.912569") + max_val = float("0.737715") + mean = float("-0.0058969") + std = float("0.209269") + data = None + + +class Program_weight_tensor_parameter_61: + name = "parameter_61" + shape = [256] + dtype = "float32" + min_val = float("0.940756") + max_val = float("2.13465") + mean = float("1.59443") + std = float("0.177381") + data = None + + +class Program_weight_tensor_parameter_62: + name = "parameter_62" + shape = [256] + dtype = "float32" + min_val = float("-0.11128") + max_val = float("0.138091") + mean = float("0.001425") + std = float("0.04497") + data = None + + +class Program_weight_tensor_parameter_63: + name = "parameter_63" + shape = [256, 256] + dtype = "float32" + min_val = float("-0.220607") + max_val = float("0.243991") + mean = float("-4.16883e-07") + std = float("0.0314562") + data = None + + +class Program_weight_tensor_parameter_64: + name = "parameter_64" + shape = [256] + dtype = "float32" + min_val = float("-0.638927") + max_val = float("0.528616") + mean = float("-0.00125148") + std = float("0.192554") + data = None + + +class Program_weight_tensor_parameter_65: + name = "parameter_65" + shape = [256] + dtype = "float32" + min_val = float("0.39804") + max_val = float("1.16579") + mean = float("0.967825") + std = float("0.106905") + data = None + + +class Program_weight_tensor_parameter_66: + name = "parameter_66" + shape = [256] + dtype = "float32" + min_val = float("-0.429495") + max_val = float("0.314154") + mean = float("-0.000948673") + std = float("0.101559") + data = None + + +class Program_weight_tensor_parameter_67: + name = "parameter_67" + shape = [2048, 256] + dtype = "float32" + min_val = float("-0.604448") + max_val = float("0.935511") + mean = float("-0.000113771") + std = float("0.0553018") + data = None + + +class Program_weight_tensor_parameter_68: + name = "parameter_68" + shape = [2048] + dtype = "float32" + min_val = float("-0.375567") + max_val = float("0.0995794") + mean = float("-0.0846395") + std = float("0.0483301") + data = None + + +class Program_weight_tensor_parameter_69: + name = "parameter_69" + shape = [256, 2048] + dtype = "float32" + min_val = float("-0.539443") + max_val = float("0.623978") + mean = float("-0.000606409") + std = float("0.0732435") + data = None + + +class Program_weight_tensor_parameter_70: + name = "parameter_70" + shape = [256] + dtype = "float32" + min_val = float("-2.1079") + max_val = float("1.75457") + mean = float("0.0191356") + std = float("0.417741") + data = None + + +class Program_weight_tensor_parameter_71: + name = "parameter_71" + shape = [256] + dtype = "float32" + min_val = float("0.531337") + max_val = float("1.54415") + mean = float("0.894469") + std = float("0.141655") + data = None + + +class Program_weight_tensor_parameter_72: + name = "parameter_72" + shape = [256] + dtype = "float32" + min_val = float("-0.544029") + max_val = float("0.619458") + mean = float("-0.0050162") + std = float("0.165886") + data = None + + +class Program_weight_tensor_parameter_73: + name = "parameter_73" + shape = [256, 256] + dtype = "float32" + min_val = float("-0.584279") + max_val = float("0.622739") + mean = float("0.0004762") + std = float("0.116418") + data = None + + +class Program_weight_tensor_parameter_74: + name = "parameter_74" + shape = [256] + dtype = "float32" + min_val = float("-0.961869") + max_val = float("0.481142") + mean = float("-0.00872789") + std = float("0.205884") + data = None + + +class Program_weight_tensor_parameter_75: + name = "parameter_75" + shape = [256] + dtype = "float32" + min_val = float("0.477035") + max_val = float("2.10077") + mean = float("1.24848") + std = float("0.266047") + data = None + + +class Program_weight_tensor_parameter_76: + name = "parameter_76" + shape = [256] + dtype = "float32" + min_val = float("-0.199806") + max_val = float("0.281282") + mean = float("-0.00229986") + std = float("0.0669608") + data = None + + +class Program_weight_tensor_parameter_77: + name = "parameter_77" + shape = [256, 256] + dtype = "float32" + min_val = float("-0.40653") + max_val = float("0.247415") + mean = float("0.000125584") + std = float("0.0404502") + data = None + + +class Program_weight_tensor_parameter_78: + name = "parameter_78" + shape = [256] + dtype = "float32" + min_val = float("-0.497978") + max_val = float("0.402947") + mean = float("0.0206851") + std = float("0.147267") + data = None + + +class Program_weight_tensor_parameter_79: + name = "parameter_79" + shape = [256] + dtype = "float32" + min_val = float("0.105589") + max_val = float("1.45906") + mean = float("0.81892") + std = float("0.225017") + data = None + + +class Program_weight_tensor_parameter_80: + name = "parameter_80" + shape = [256] + dtype = "float32" + min_val = float("-1.56475") + max_val = float("0.878568") + mean = float("-0.00180572") + std = float("0.2132") + data = None + + +class Program_weight_tensor_parameter_81: + name = "parameter_81" + shape = [256] + dtype = "float32" + min_val = float("0.154536") + max_val = float("1.44863") + mean = float("0.876691") + std = float("0.136083") + data = None + + +class Program_weight_tensor_parameter_82: + name = "parameter_82" + shape = [256] + dtype = "float32" + min_val = float("-0.92395") + max_val = float("0.705618") + mean = float("-0.00522894") + std = float("0.158273") + data = None + + +class Program_weight_tensor_parameter_83: + name = "parameter_83" + shape = [2048, 256] + dtype = "float32" + min_val = float("-0.958393") + max_val = float("1.08949") + mean = float("-4.94654e-05") + std = float("0.0572793") + data = None + + +class Program_weight_tensor_parameter_84: + name = "parameter_84" + shape = [2048] + dtype = "float32" + min_val = float("-0.281729") + max_val = float("0.0586648") + mean = float("-0.0844367") + std = float("0.0490229") + data = None + + +class Program_weight_tensor_parameter_85: + name = "parameter_85" + shape = [256, 2048] + dtype = "float32" + min_val = float("-1.17126") + max_val = float("1.17454") + mean = float("-0.00130519") + std = float("0.0736103") + data = None + + +class Program_weight_tensor_parameter_86: + name = "parameter_86" + shape = [256] + dtype = "float32" + min_val = float("-2.54974") + max_val = float("1.71304") + mean = float("0.0258927") + std = float("0.522932") + data = None + + +class Program_weight_tensor_parameter_87: + name = "parameter_87" + shape = [256] + dtype = "float32" + min_val = float("0.435372") + max_val = float("1.41702") + mean = float("0.837144") + std = float("0.181581") + data = None + + +class Program_weight_tensor_parameter_88: + name = "parameter_88" + shape = [256] + dtype = "float32" + min_val = float("-0.572077") + max_val = float("0.424765") + mean = float("-0.00138101") + std = float("0.140215") + data = None + + +class Program_weight_tensor_parameter_89: + name = "parameter_89" + shape = [256, 256] + dtype = "float32" + min_val = float("-0.973207") + max_val = float("0.692706") + mean = float("0.000186634") + std = float("0.135568") + data = None + + +class Program_weight_tensor_parameter_90: + name = "parameter_90" + shape = [256] + dtype = "float32" + min_val = float("-1.12204") + max_val = float("0.453432") + mean = float("0.00521934") + std = float("0.14887") + data = None + + +class Program_weight_tensor_parameter_91: + name = "parameter_91" + shape = [256] + dtype = "float32" + min_val = float("-0.0294916") + max_val = float("0.896161") + mean = float("0.331944") + std = float("0.23036") + data = None + + +class Program_weight_tensor_parameter_92: + name = "parameter_92" + shape = [256] + dtype = "float32" + min_val = float("-1.59341") + max_val = float("2.12006") + mean = float("0.0126392") + std = float("0.219694") + data = None + + +class Program_weight_tensor_parameter_93: + name = "parameter_93" + shape = [256, 256] + dtype = "float32" + min_val = float("-2.14733") + max_val = float("2.13301") + mean = float("-0.000635372") + std = float("0.182762") + data = None + + +class Program_weight_tensor_parameter_94: + name = "parameter_94" + shape = [256] + dtype = "float32" + min_val = float("-1.07068") + max_val = float("2.63335") + mean = float("-0.00876931") + std = float("0.238998") + data = None + + +class Program_weight_tensor_parameter_95: + name = "parameter_95" + shape = [256] + dtype = "float32" + min_val = float("0.00101656") + max_val = float("1.0199") + mean = float("0.508818") + std = float("0.21538") + data = None + + +class Program_weight_tensor_parameter_96: + name = "parameter_96" + shape = [256] + dtype = "float32" + min_val = float("-0.1891") + max_val = float("0.105869") + mean = float("0.00109283") + std = float("0.0193423") + data = None + + +class Program_weight_tensor_parameter_97: + name = "parameter_97" + shape = [2048, 256] + dtype = "float32" + min_val = float("-1.09661") + max_val = float("0.784857") + mean = float("0.000129105") + std = float("0.0463137") + data = None + + +class Program_weight_tensor_parameter_98: + name = "parameter_98" + shape = [2048] + dtype = "float32" + min_val = float("-0.182381") + max_val = float("0.0414338") + mean = float("-0.0673398") + std = float("0.0405996") + data = None + + +class Program_weight_tensor_parameter_99: + name = "parameter_99" + shape = [256, 2048] + dtype = "float32" + min_val = float("-0.635605") + max_val = float("0.585233") + mean = float("-0.000198993") + std = float("0.0582874") + data = None + + +class Program_weight_tensor_parameter_100: + name = "parameter_100" + shape = [256] + dtype = "float32" + min_val = float("-1.50339") + max_val = float("1.3285") + mean = float("-0.0157601") + std = float("0.331869") + data = None + + +class Program_weight_tensor_parameter_101: + name = "parameter_101" + shape = [256] + dtype = "float32" + min_val = float("0.344983") + max_val = float("1.79401") + mean = float("0.760999") + std = float("0.274586") + data = None + + +class Program_weight_tensor_parameter_102: + name = "parameter_102" + shape = [256] + dtype = "float32" + min_val = float("-0.225155") + max_val = float("0.177794") + mean = float("-0.000702957") + std = float("0.0395093") + data = None + + +class Program_weight_tensor_parameter_103: + name = "parameter_103" + shape = [256, 256] + dtype = "float32" + min_val = float("-0.573776") + max_val = float("0.50882") + mean = float("0.000288043") + std = float("0.055402") + data = None + + +class Program_weight_tensor_parameter_104: + name = "parameter_104" + shape = [256] + dtype = "float32" + min_val = float("-0.400267") + max_val = float("0.298573") + mean = float("-0.08312") + std = float("0.11065") + data = None + + +class Program_weight_tensor_parameter_105: + name = "parameter_105" + shape = [256] + dtype = "float32" + min_val = float("-0.0100348") + max_val = float("1.43529") + mean = float("0.671001") + std = float("0.282497") + data = None + + +class Program_weight_tensor_parameter_106: + name = "parameter_106" + shape = [256] + dtype = "float32" + min_val = float("-0.380922") + max_val = float("0.863187") + mean = float("0.000493848") + std = float("0.103033") + data = None + + +class Program_weight_tensor_parameter_107: + name = "parameter_107" + shape = [2048, 256] + dtype = "float32" + min_val = float("-1.42505") + max_val = float("0.916444") + mean = float("-0.000865722") + std = float("0.0737075") + data = None + + +class Program_weight_tensor_parameter_108: + name = "parameter_108" + shape = [2048] + dtype = "float32" + min_val = float("-0.168884") + max_val = float("0.0625911") + mean = float("-0.0582119") + std = float("0.0408516") + data = None + + +class Program_weight_tensor_parameter_109: + name = "parameter_109" + shape = [256, 2048] + dtype = "float32" + min_val = float("-0.672861") + max_val = float("0.594443") + mean = float("-0.00038788") + std = float("0.0803572") + data = None + + +class Program_weight_tensor_parameter_110: + name = "parameter_110" + shape = [256] + dtype = "float32" + min_val = float("-2.06846") + max_val = float("1.80472") + mean = float("-0.0204048") + std = float("0.408607") + data = None + + +class Program_weight_tensor_parameter_111: + name = "parameter_111" + shape = [256] + dtype = "float32" + min_val = float("0.0941364") + max_val = float("2.1266") + mean = float("0.746255") + std = float("0.281252") + data = None + + +class Program_weight_tensor_parameter_112: + name = "parameter_112" + shape = [256] + dtype = "float32" + min_val = float("-0.727518") + max_val = float("0.461616") + mean = float("0.00804983") + std = float("0.101467") + data = None + + +class Program_weight_tensor_parameter_113: + name = "parameter_113" + shape = [256, 256] + dtype = "float32" + min_val = float("-0.439936") + max_val = float("0.401663") + mean = float("7.82087e-05") + std = float("0.0721749") + data = None + + +class Program_weight_tensor_parameter_114: + name = "parameter_114" + shape = [256] + dtype = "float32" + min_val = float("-0.754913") + max_val = float("0.349145") + mean = float("-0.0456962") + std = float("0.164497") + data = None + + +class Program_weight_tensor_parameter_115: + name = "parameter_115" + shape = [256] + dtype = "float32" + min_val = float("-0.0039139") + max_val = float("1.1175") + mean = float("0.568497") + std = float("0.276212") + data = None + + +class Program_weight_tensor_parameter_116: + name = "parameter_116" + shape = [256] + dtype = "float32" + min_val = float("-0.512874") + max_val = float("0.837815") + mean = float("-0.00459227") + std = float("0.169711") + data = None + + +class Program_weight_tensor_parameter_117: + name = "parameter_117" + shape = [2048, 256] + dtype = "float32" + min_val = float("-1.47377") + max_val = float("0.689233") + mean = float("0.00124009") + std = float("0.0893337") + data = None + + +class Program_weight_tensor_parameter_118: + name = "parameter_118" + shape = [2048] + dtype = "float32" + min_val = float("-0.157694") + max_val = float("0.0582328") + mean = float("-0.0517658") + std = float("0.0419887") + data = None + + +class Program_weight_tensor_parameter_119: + name = "parameter_119" + shape = [256, 2048] + dtype = "float32" + min_val = float("-0.560955") + max_val = float("0.608842") + mean = float("0.00067971") + std = float("0.100222") + data = None + + +class Program_weight_tensor_parameter_120: + name = "parameter_120" + shape = [256] + dtype = "float32" + min_val = float("-1.91382") + max_val = float("2.22828") + mean = float("-0.00305764") + std = float("0.516542") + data = None + + +class Program_weight_tensor_parameter_121: + name = "parameter_121" + shape = [256] + dtype = "float32" + min_val = float("0.128166") + max_val = float("2.04548") + mean = float("0.712529") + std = float("0.278679") + data = None + + +class Program_weight_tensor_parameter_122: + name = "parameter_122" + shape = [256] + dtype = "float32" + min_val = float("-0.807002") + max_val = float("0.790796") + mean = float("0.00544793") + std = float("0.167046") + data = None + + +class Program_weight_tensor_parameter_123: + name = "parameter_123" + shape = [256, 256] + dtype = "float32" + min_val = float("-0.588973") + max_val = float("0.688141") + mean = float("-0.000198103") + std = float("0.0767302") + data = None + + +class Program_weight_tensor_parameter_124: + name = "parameter_124" + shape = [256] + dtype = "float32" + min_val = float("-1.01085") + max_val = float("0.982782") + mean = float("-0.0267732") + std = float("0.195944") + data = None + + +class Program_weight_tensor_parameter_125: + name = "parameter_125" + shape = [256] + dtype = "float32" + min_val = float("0.00362406") + max_val = float("1.17116") + mean = float("0.537523") + std = float("0.280663") + data = None + + +class Program_weight_tensor_parameter_126: + name = "parameter_126" + shape = [256] + dtype = "float32" + min_val = float("-0.58727") + max_val = float("0.620579") + mean = float("-0.00442793") + std = float("0.192926") + data = None + + +class Program_weight_tensor_parameter_127: + name = "parameter_127" + shape = [2048, 256] + dtype = "float32" + min_val = float("-1.27467") + max_val = float("0.723867") + mean = float("0.00206403") + std = float("0.10485") + data = None + + +class Program_weight_tensor_parameter_128: + name = "parameter_128" + shape = [2048] + dtype = "float32" + min_val = float("-0.174557") + max_val = float("0.0827612") + mean = float("-0.046646") + std = float("0.0408697") + data = None + + +class Program_weight_tensor_parameter_129: + name = "parameter_129" + shape = [256, 2048] + dtype = "float32" + min_val = float("-0.689502") + max_val = float("0.647158") + mean = float("0.00030935") + std = float("0.113661") + data = None + + +class Program_weight_tensor_parameter_130: + name = "parameter_130" + shape = [256] + dtype = "float32" + min_val = float("-1.7667") + max_val = float("2.11065") + mean = float("0.000981415") + std = float("0.544927") + data = None + + +class Program_weight_tensor_parameter_131: + name = "parameter_131" + shape = [256] + dtype = "float32" + min_val = float("0.232356") + max_val = float("1.74983") + mean = float("0.755989") + std = float("0.246501") + data = None + + +class Program_weight_tensor_parameter_132: + name = "parameter_132" + shape = [256] + dtype = "float32" + min_val = float("-0.706176") + max_val = float("0.569351") + mean = float("0.00265587") + std = float("0.166963") + data = None + + +class Program_weight_tensor_parameter_133: + name = "parameter_133" + shape = [256, 256] + dtype = "float32" + min_val = float("-0.440651") + max_val = float("0.389195") + mean = float("-9.48045e-05") + std = float("0.0746954") + data = None + + +class Program_weight_tensor_parameter_134: + name = "parameter_134" + shape = [256] + dtype = "float32" + min_val = float("-0.923763") + max_val = float("0.88494") + mean = float("-0.0378896") + std = float("0.217134") + data = None + + +class Program_weight_tensor_parameter_135: + name = "parameter_135" + shape = [256] + dtype = "float32" + min_val = float("0.00584625") + max_val = float("1.25942") + mean = float("0.703808") + std = float("0.261936") + data = None + + +class Program_weight_tensor_parameter_136: + name = "parameter_136" + shape = [256] + dtype = "float32" + min_val = float("-0.704222") + max_val = float("0.577684") + mean = float("0.00332775") + std = float("0.17267") + data = None + + +class Program_weight_tensor_parameter_137: + name = "parameter_137" + shape = [2048, 256] + dtype = "float32" + min_val = float("-1.67739") + max_val = float("1.08925") + mean = float("0.00121976") + std = float("0.10437") + data = None + + +class Program_weight_tensor_parameter_138: + name = "parameter_138" + shape = [2048] + dtype = "float32" + min_val = float("-0.157707") + max_val = float("0.0597051") + mean = float("-0.0479329") + std = float("0.0411897") + data = None + + +class Program_weight_tensor_parameter_139: + name = "parameter_139" + shape = [256, 2048] + dtype = "float32" + min_val = float("-0.725007") + max_val = float("0.667406") + mean = float("0.000182182") + std = float("0.116384") + data = None + + +class Program_weight_tensor_parameter_140: + name = "parameter_140" + shape = [256] + dtype = "float32" + min_val = float("-1.83846") + max_val = float("2.39136") + mean = float("-0.0225087") + std = float("0.493145") + data = None + + +class Program_weight_tensor_parameter_141: + name = "parameter_141" + shape = [256] + dtype = "float32" + min_val = float("0.231657") + max_val = float("1.99666") + mean = float("0.789582") + std = float("0.237693") + data = None + + +class Program_weight_tensor_parameter_142: + name = "parameter_142" + shape = [256] + dtype = "float32" + min_val = float("-0.725461") + max_val = float("0.947713") + mean = float("0.00529589") + std = float("0.180089") + data = None + + +class Program_weight_tensor_parameter_143: + name = "parameter_143" + shape = [256, 256] + dtype = "float32" + min_val = float("-0.412899") + max_val = float("0.479099") + mean = float("-8.88891e-05") + std = float("0.0682201") + data = None + + +class Program_weight_tensor_parameter_144: + name = "parameter_144" + shape = [256] + dtype = "float32" + min_val = float("-0.799912") + max_val = float("1.11176") + mean = float("-0.0540481") + std = float("0.315498") + data = None + + +class Program_weight_tensor_parameter_145: + name = "parameter_145" + shape = [256] + dtype = "float32" + min_val = float("0.187897") + max_val = float("1.22121") + mean = float("0.925398") + std = float("0.195161") + data = None + + +class Program_weight_tensor_parameter_146: + name = "parameter_146" + shape = [256] + dtype = "float32" + min_val = float("-0.462471") + max_val = float("0.848073") + mean = float("0.00598153") + std = float("0.171054") + data = None + + +class Program_weight_tensor_parameter_147: + name = "parameter_147" + shape = [2048, 256] + dtype = "float32" + min_val = float("-1.50476") + max_val = float("0.829249") + mean = float("-0.00029805") + std = float("0.0972634") + data = None + + +class Program_weight_tensor_parameter_148: + name = "parameter_148" + shape = [2048] + dtype = "float32" + min_val = float("-0.174853") + max_val = float("0.0940887") + mean = float("-0.0544859") + std = float("0.0445216") + data = None + + +class Program_weight_tensor_parameter_149: + name = "parameter_149" + shape = [256, 2048] + dtype = "float32" + min_val = float("-0.665117") + max_val = float("0.60316") + mean = float("-0.000227223") + std = float("0.106722") + data = None + + +class Program_weight_tensor_parameter_150: + name = "parameter_150" + shape = [256] + dtype = "float32" + min_val = float("-1.50188") + max_val = float("1.07115") + mean = float("-0.0116708") + std = float("0.337499") + data = None + + +class Program_weight_tensor_parameter_151: + name = "parameter_151" + shape = [256] + dtype = "float32" + min_val = float("0.438111") + max_val = float("2.01903") + mean = float("0.901865") + std = float("0.211954") + data = None + + +class Program_weight_tensor_parameter_152: + name = "parameter_152" + shape = [256] + dtype = "float32" + min_val = float("-0.799413") + max_val = float("0.650867") + mean = float("0.00416878") + std = float("0.181264") + data = None + + +class Program_weight_tensor_parameter_153: + name = "parameter_153" + shape = [256, 256] + dtype = "float32" + min_val = float("-0.58488") + max_val = float("0.383021") + mean = float("0.000122757") + std = float("0.0783749") + data = None + + +class Program_weight_tensor_parameter_154: + name = "parameter_154" + shape = [256] + dtype = "float32" + min_val = float("-0.344458") + max_val = float("0.226539") + mean = float("-0.00163663") + std = float("0.083705") + data = None + + +class Program_weight_tensor_parameter_155: + name = "parameter_155" + shape = [256, 2048, 1, 1] + dtype = "float32" + min_val = float("-1.03448") + max_val = float("0.766663") + mean = float("-0.000129382") + std = float("0.118571") + data = None + + +class Program_weight_tensor_parameter_156: + name = "parameter_156" + shape = [2048] + dtype = "float32" + min_val = float("-0.150527") + max_val = float("0.188528") + mean = float("0.0246261") + std = float("0.0255993") + data = None + + +class Program_weight_tensor_parameter_157: + name = "parameter_157" + shape = [2048] + dtype = "float32" + min_val = float("0.112439") + max_val = float("1.32142") + mean = float("0.716479") + std = float("0.151574") + data = None + + +class Program_weight_tensor_parameter_158: + name = "parameter_158" + shape = [2048] + dtype = "float32" + min_val = float("4.58735e-05") + max_val = float("0.00369584") + mean = float("0.000960031") + std = float("0.000362844") + data = None + + +class Program_weight_tensor_parameter_159: + name = "parameter_159" + shape = [2048] + dtype = "float32" + min_val = float("-0.0298307") + max_val = float("0.0471954") + mean = float("-0.00565156") + std = float("0.00547144") + data = None + + +class Program_weight_tensor_parameter_160: + name = "parameter_160" + shape = [2048, 512, 1, 1] + dtype = "float32" + min_val = float("-0.152963") + max_val = float("0.257291") + mean = float("-0.00175805") + std = float("0.0187826") + data = None + + +class Program_weight_tensor_parameter_161: + name = "parameter_161" + shape = [512] + dtype = "float32" + min_val = float("-0.294133") + max_val = float("0.201477") + mean = float("-0.0877172") + std = float("0.0474851") + data = None + + +class Program_weight_tensor_parameter_162: + name = "parameter_162" + shape = [512] + dtype = "float32" + min_val = float("0.134461") + max_val = float("0.329211") + mean = float("0.212124") + std = float("0.0239999") + data = None + + +class Program_weight_tensor_parameter_163: + name = "parameter_163" + shape = [512] + dtype = "float32" + min_val = float("0.00774206") + max_val = float("0.0236867") + mean = float("0.0108919") + std = float("0.00156208") + data = None + + +class Program_weight_tensor_parameter_164: + name = "parameter_164" + shape = [512] + dtype = "float32" + min_val = float("-0.14823") + max_val = float("-0.0189421") + mean = float("-0.0869487") + std = float("0.0160111") + data = None + + +class Program_weight_tensor_parameter_165: + name = "parameter_165" + shape = [512, 512, 3, 3] + dtype = "float32" + min_val = float("-0.166515") + max_val = float("0.149378") + mean = float("-0.000822103") + std = float("0.0161888") + data = None + + +class Program_weight_tensor_parameter_166: + name = "parameter_166" + shape = [512] + dtype = "float32" + min_val = float("-0.334933") + max_val = float("0.0811895") + mean = float("-0.172064") + std = float("0.0626698") + data = None + + +class Program_weight_tensor_parameter_167: + name = "parameter_167" + shape = [512] + dtype = "float32" + min_val = float("0.112925") + max_val = float("0.486881") + mean = float("0.216875") + std = float("0.0288568") + data = None + + +class Program_weight_tensor_parameter_168: + name = "parameter_168" + shape = [512] + dtype = "float32" + min_val = float("0.189456") + max_val = float("1.20989") + mean = float("0.274782") + std = float("0.0769211") + data = None + + +class Program_weight_tensor_parameter_169: + name = "parameter_169" + shape = [512] + dtype = "float32" + min_val = float("-0.987977") + max_val = float("2.29887") + mean = float("-0.342218") + std = float("0.204948") + data = None + + +class Program_weight_tensor_parameter_170: + name = "parameter_170" + shape = [512, 2048, 1, 1] + dtype = "float32" + min_val = float("-0.362694") + max_val = float("0.286455") + mean = float("0.000237189") + std = float("0.0219248") + data = None + + +class Program_weight_tensor_parameter_171: + name = "parameter_171" + shape = [2048] + dtype = "float32" + min_val = float("-0.216137") + max_val = float("0.221407") + mean = float("-0.0680507") + std = float("0.0293753") + data = None + + +class Program_weight_tensor_parameter_172: + name = "parameter_172" + shape = [2048] + dtype = "float32" + min_val = float("0.12979") + max_val = float("0.764242") + mean = float("0.358006") + std = float("0.0820777") + data = None + + +class Program_weight_tensor_parameter_173: + name = "parameter_173" + shape = [2048] + dtype = "float32" + min_val = float("0.000190594") + max_val = float("0.00769928") + mean = float("0.000818215") + std = float("0.000341386") + data = None + + +class Program_weight_tensor_parameter_174: + name = "parameter_174" + shape = [2048] + dtype = "float32" + min_val = float("-0.156159") + max_val = float("0.168641") + mean = float("-0.00157883") + std = float("0.0152213") + data = None + + +class Program_weight_tensor_parameter_175: + name = "parameter_175" + shape = [2048, 512, 1, 1] + dtype = "float32" + min_val = float("-0.187735") + max_val = float("0.241332") + mean = float("-0.00190183") + std = float("0.0190855") + data = None + + +class Program_weight_tensor_parameter_176: + name = "parameter_176" + shape = [512] + dtype = "float32" + min_val = float("-0.347939") + max_val = float("0.0808077") + mean = float("-0.120346") + std = float("0.0449829") + data = None + + +class Program_weight_tensor_parameter_177: + name = "parameter_177" + shape = [512] + dtype = "float32" + min_val = float("0.147412") + max_val = float("0.305603") + mean = float("0.210427") + std = float("0.0189769") + data = None + + +class Program_weight_tensor_parameter_178: + name = "parameter_178" + shape = [512] + dtype = "float32" + min_val = float("0.00820432") + max_val = float("0.0488806") + mean = float("0.0127073") + std = float("0.00352804") + data = None + + +class Program_weight_tensor_parameter_179: + name = "parameter_179" + shape = [512] + dtype = "float32" + min_val = float("-0.379537") + max_val = float("0.449208") + mean = float("-0.0900743") + std = float("0.0496366") + data = None + + +class Program_weight_tensor_parameter_180: + name = "parameter_180" + shape = [512, 512, 3, 3] + dtype = "float32" + min_val = float("-0.24297") + max_val = float("0.173966") + mean = float("-0.000917874") + std = float("0.0169111") + data = None + + +class Program_weight_tensor_parameter_181: + name = "parameter_181" + shape = [512] + dtype = "float32" + min_val = float("-0.319972") + max_val = float("0.117418") + mean = float("-0.123687") + std = float("0.0522333") + data = None + + +class Program_weight_tensor_parameter_182: + name = "parameter_182" + shape = [512] + dtype = "float32" + min_val = float("0.0942218") + max_val = float("0.291194") + mean = float("0.193996") + std = float("0.02197") + data = None + + +class Program_weight_tensor_parameter_183: + name = "parameter_183" + shape = [512] + dtype = "float32" + min_val = float("0.0889617") + max_val = float("0.43583") + mean = float("0.148295") + std = float("0.0320646") + data = None + + +class Program_weight_tensor_parameter_184: + name = "parameter_184" + shape = [512] + dtype = "float32" + min_val = float("-0.879044") + max_val = float("1.12529") + mean = float("-0.284224") + std = float("0.153635") + data = None + + +class Program_weight_tensor_parameter_185: + name = "parameter_185" + shape = [512, 2048, 1, 1] + dtype = "float32" + min_val = float("-0.389516") + max_val = float("0.661501") + mean = float("-0.000936533") + std = float("0.0188508") + data = None + + +class Program_weight_tensor_parameter_186: + name = "parameter_186" + shape = [2048] + dtype = "float32" + min_val = float("-0.14098") + max_val = float("0.205861") + mean = float("-0.0538996") + std = float("0.0277602") + data = None + + +class Program_weight_tensor_parameter_187: + name = "parameter_187" + shape = [2048] + dtype = "float32" + min_val = float("0.111932") + max_val = float("0.898998") + mean = float("0.268466") + std = float("0.0711822") + data = None + + +class Program_weight_tensor_parameter_188: + name = "parameter_188" + shape = [2048] + dtype = "float32" + min_val = float("0.000303141") + max_val = float("0.0243086") + mean = float("0.00167544") + std = float("0.0010297") + data = None + + +class Program_weight_tensor_parameter_189: + name = "parameter_189" + shape = [2048] + dtype = "float32" + min_val = float("-0.0979072") + max_val = float("0.0991096") + mean = float("-0.00301773") + std = float("0.0173265") + data = None + + +class Program_weight_tensor_parameter_190: + name = "parameter_190" + shape = [2048, 1024, 1, 1] + dtype = "float32" + min_val = float("-0.365754") + max_val = float("0.687587") + mean = float("-0.000610378") + std = float("0.0159039") + data = None + + +class Program_weight_tensor_parameter_191: + name = "parameter_191" + shape = [2048] + dtype = "float32" + min_val = float("-0.14098") + max_val = float("0.205861") + mean = float("-0.0538996") + std = float("0.0277602") + data = None + + +class Program_weight_tensor_parameter_192: + name = "parameter_192" + shape = [2048] + dtype = "float32" + min_val = float("0.0344127") + max_val = float("0.639953") + mean = float("0.362204") + std = float("0.0661635") + data = None + + +class Program_weight_tensor_parameter_193: + name = "parameter_193" + shape = [2048] + dtype = "float32" + min_val = float("0.000190504") + max_val = float("0.018823") + mean = float("0.00138726") + std = float("0.000604914") + data = None + + +class Program_weight_tensor_parameter_194: + name = "parameter_194" + shape = [2048] + dtype = "float32" + min_val = float("-0.124299") + max_val = float("0.22565") + mean = float("-0.0111037") + std = float("0.0172798") + data = None + + +class Program_weight_tensor_parameter_195: + name = "parameter_195" + shape = [2048, 512, 1, 1] + dtype = "float32" + min_val = float("-0.23301") + max_val = float("0.374155") + mean = float("-0.0015059") + std = float("0.0194246") + data = None + + +class Program_weight_tensor_parameter_196: + name = "parameter_196" + shape = [512] + dtype = "float32" + min_val = float("-0.187605") + max_val = float("0.16453") + mean = float("-0.0703182") + std = float("0.0431299") + data = None + + +class Program_weight_tensor_parameter_197: + name = "parameter_197" + shape = [512] + dtype = "float32" + min_val = float("0.149369") + max_val = float("0.296008") + mean = float("0.195746") + std = float("0.0212932") + data = None + + +class Program_weight_tensor_parameter_198: + name = "parameter_198" + shape = [512] + dtype = "float32" + min_val = float("0.00846331") + max_val = float("0.214247") + mean = float("0.0135238") + std = float("0.00921358") + data = None + + +class Program_weight_tensor_parameter_199: + name = "parameter_199" + shape = [512] + dtype = "float32" + min_val = float("-0.145937") + max_val = float("0.31364") + mean = float("-0.059381") + std = float("0.0351479") + data = None + + +class Program_weight_tensor_parameter_200: + name = "parameter_200" + shape = [512, 512, 3, 3] + dtype = "float32" + min_val = float("-0.355525") + max_val = float("0.410474") + mean = float("-0.00073188") + std = float("0.0170831") + data = None + + +class Program_weight_tensor_parameter_201: + name = "parameter_201" + shape = [512] + dtype = "float32" + min_val = float("-0.350427") + max_val = float("0.126916") + mean = float("-0.17715") + std = float("0.0610143") + data = None + + +class Program_weight_tensor_parameter_202: + name = "parameter_202" + shape = [512] + dtype = "float32" + min_val = float("0.106783") + max_val = float("0.294279") + mean = float("0.220861") + std = float("0.0240491") + data = None + + +class Program_weight_tensor_parameter_203: + name = "parameter_203" + shape = [512] + dtype = "float32" + min_val = float("0.00811881") + max_val = float("0.0330579") + mean = float("0.0138603") + std = float("0.00283198") + data = None + + +class Program_weight_tensor_parameter_204: + name = "parameter_204" + shape = [512] + dtype = "float32" + min_val = float("-0.208793") + max_val = float("0.181114") + mean = float("-0.0708247") + std = float("0.0392943") + data = None + + +class Program_weight_tensor_parameter_205: + name = "parameter_205" + shape = [512, 1024, 1, 1] + dtype = "float32" + min_val = float("-0.355415") + max_val = float("0.341373") + mean = float("-0.00133683") + std = float("0.0268514") + data = None + + +class Program_weight_tensor_parameter_206: + name = "parameter_206" + shape = [1024] + dtype = "float32" + min_val = float("-0.37105") + max_val = float("0.146711") + mean = float("-0.11116") + std = float("0.0677375") + data = None + + +class Program_weight_tensor_parameter_207: + name = "parameter_207" + shape = [1024] + dtype = "float32" + min_val = float("-0.0664701") + max_val = float("0.30088") + mean = float("0.106271") + std = float("0.0492594") + data = None + + +class Program_weight_tensor_parameter_208: + name = "parameter_208" + shape = [1024] + dtype = "float32" + min_val = float("0.000188123") + max_val = float("0.0148109") + mean = float("0.00127457") + std = float("0.000957626") + data = None + + +class Program_weight_tensor_parameter_209: + name = "parameter_209" + shape = [1024] + dtype = "float32" + min_val = float("-0.112751") + max_val = float("0.0799388") + mean = float("-0.0247645") + std = float("0.0210223") + data = None + + +class Program_weight_tensor_parameter_210: + name = "parameter_210" + shape = [1024, 256, 1, 1] + dtype = "float32" + min_val = float("-0.27484") + max_val = float("0.275748") + mean = float("-0.00307577") + std = float("0.0220223") + data = None + + +class Program_weight_tensor_parameter_211: + name = "parameter_211" + shape = [256] + dtype = "float32" + min_val = float("-0.399598") + max_val = float("0.166281") + mean = float("-0.0891333") + std = float("0.064173") + data = None + + +class Program_weight_tensor_parameter_212: + name = "parameter_212" + shape = [256] + dtype = "float32" + min_val = float("0.124327") + max_val = float("0.525773") + mean = float("0.193335") + std = float("0.0383771") + data = None + + +class Program_weight_tensor_parameter_213: + name = "parameter_213" + shape = [256] + dtype = "float32" + min_val = float("0.00574077") + max_val = float("0.0565694") + mean = float("0.0118631") + std = float("0.00615368") + data = None + + +class Program_weight_tensor_parameter_214: + name = "parameter_214" + shape = [256] + dtype = "float32" + min_val = float("-0.224049") + max_val = float("0.236745") + mean = float("-0.0575767") + std = float("0.0596759") + data = None + + +class Program_weight_tensor_parameter_215: + name = "parameter_215" + shape = [256, 256, 3, 3] + dtype = "float32" + min_val = float("-0.231189") + max_val = float("0.224271") + mean = float("-0.00122248") + std = float("0.0191439") + data = None + + +class Program_weight_tensor_parameter_216: + name = "parameter_216" + shape = [256] + dtype = "float32" + min_val = float("-0.314595") + max_val = float("0.136538") + mean = float("-0.107686") + std = float("0.0722921") + data = None + + +class Program_weight_tensor_parameter_217: + name = "parameter_217" + shape = [256] + dtype = "float32" + min_val = float("0.0897327") + max_val = float("0.294624") + mean = float("0.184145") + std = float("0.031837") + data = None + + +class Program_weight_tensor_parameter_218: + name = "parameter_218" + shape = [256] + dtype = "float32" + min_val = float("0.00800957") + max_val = float("0.0443686") + mean = float("0.0142626") + std = float("0.00449212") + data = None + + +class Program_weight_tensor_parameter_219: + name = "parameter_219" + shape = [256] + dtype = "float32" + min_val = float("-0.183684") + max_val = float("0.182932") + mean = float("-0.0562674") + std = float("0.0516993") + data = None + + +class Program_weight_tensor_parameter_220: + name = "parameter_220" + shape = [256, 1024, 1, 1] + dtype = "float32" + min_val = float("-0.231134") + max_val = float("0.343793") + mean = float("-0.00111587") + std = float("0.0228593") + data = None + + +class Program_weight_tensor_parameter_221: + name = "parameter_221" + shape = [1024] + dtype = "float32" + min_val = float("-0.316334") + max_val = float("0.128377") + mean = float("-0.0833158") + std = float("0.055064") + data = None + + +class Program_weight_tensor_parameter_222: + name = "parameter_222" + shape = [1024] + dtype = "float32" + min_val = float("-0.0489748") + max_val = float("0.235271") + mean = float("0.0912102") + std = float("0.049008") + data = None + + +class Program_weight_tensor_parameter_223: + name = "parameter_223" + shape = [1024] + dtype = "float32" + min_val = float("7.4467e-05") + max_val = float("0.00461952") + mean = float("0.000694832") + std = float("0.000459077") + data = None + + +class Program_weight_tensor_parameter_224: + name = "parameter_224" + shape = [1024] + dtype = "float32" + min_val = float("-0.102153") + max_val = float("0.114211") + mean = float("-0.016671") + std = float("0.0161816") + data = None + + +class Program_weight_tensor_parameter_225: + name = "parameter_225" + shape = [1024, 256, 1, 1] + dtype = "float32" + min_val = float("-0.223398") + max_val = float("0.297585") + mean = float("-0.00191571") + std = float("0.0214433") + data = None + + +class Program_weight_tensor_parameter_226: + name = "parameter_226" + shape = [256] + dtype = "float32" + min_val = float("-0.330179") + max_val = float("0.209275") + mean = float("-0.0884143") + std = float("0.065203") + data = None + + +class Program_weight_tensor_parameter_227: + name = "parameter_227" + shape = [256] + dtype = "float32" + min_val = float("0.0994349") + max_val = float("0.258814") + mean = float("0.177764") + std = float("0.0279457") + data = None + + +class Program_weight_tensor_parameter_228: + name = "parameter_228" + shape = [256] + dtype = "float32" + min_val = float("0.00524969") + max_val = float("0.0268517") + mean = float("0.00904326") + std = float("0.00327082") + data = None + + +class Program_weight_tensor_parameter_229: + name = "parameter_229" + shape = [256] + dtype = "float32" + min_val = float("-0.214608") + max_val = float("0.288934") + mean = float("-0.0606133") + std = float("0.0649088") + data = None + + +class Program_weight_tensor_parameter_230: + name = "parameter_230" + shape = [256, 256, 3, 3] + dtype = "float32" + min_val = float("-0.198941") + max_val = float("0.224121") + mean = float("-0.0013227") + std = float("0.0189829") + data = None + + +class Program_weight_tensor_parameter_231: + name = "parameter_231" + shape = [256] + dtype = "float32" + min_val = float("-0.324729") + max_val = float("0.124819") + mean = float("-0.0879191") + std = float("0.0728394") + data = None + + +class Program_weight_tensor_parameter_232: + name = "parameter_232" + shape = [256] + dtype = "float32" + min_val = float("0.0949755") + max_val = float("0.269096") + mean = float("0.166833") + std = float("0.0309423") + data = None + + +class Program_weight_tensor_parameter_233: + name = "parameter_233" + shape = [256] + dtype = "float32" + min_val = float("0.0083299") + max_val = float("0.0309425") + mean = float("0.0150742") + std = float("0.00411806") + data = None + + +class Program_weight_tensor_parameter_234: + name = "parameter_234" + shape = [256] + dtype = "float32" + min_val = float("-0.241283") + max_val = float("0.114376") + mean = float("-0.0670993") + std = float("0.0595296") + data = None + + +class Program_weight_tensor_parameter_235: + name = "parameter_235" + shape = [256, 1024, 1, 1] + dtype = "float32" + min_val = float("-0.214577") + max_val = float("0.276275") + mean = float("-0.00121762") + std = float("0.021596") + data = None + + +class Program_weight_tensor_parameter_236: + name = "parameter_236" + shape = [1024] + dtype = "float32" + min_val = float("-0.225338") + max_val = float("0.137865") + mean = float("-0.0628498") + std = float("0.0467866") + data = None + + +class Program_weight_tensor_parameter_237: + name = "parameter_237" + shape = [1024] + dtype = "float32" + min_val = float("-0.0302217") + max_val = float("0.294567") + mean = float("0.0881126") + std = float("0.0465468") + data = None + + +class Program_weight_tensor_parameter_238: + name = "parameter_238" + shape = [1024] + dtype = "float32" + min_val = float("3.12756e-11") + max_val = float("0.00738943") + mean = float("0.000745958") + std = float("0.000513986") + data = None + + +class Program_weight_tensor_parameter_239: + name = "parameter_239" + shape = [1024] + dtype = "float32" + min_val = float("-0.0749911") + max_val = float("0.072959") + mean = float("-0.0130057") + std = float("0.0176837") + data = None + + +class Program_weight_tensor_parameter_240: + name = "parameter_240" + shape = [1024, 256, 1, 1] + dtype = "float32" + min_val = float("-0.249728") + max_val = float("0.321745") + mean = float("-0.00162893") + std = float("0.0215891") + data = None + + +class Program_weight_tensor_parameter_241: + name = "parameter_241" + shape = [256] + dtype = "float32" + min_val = float("-0.208466") + max_val = float("0.171709") + mean = float("-0.0665257") + std = float("0.0624121") + data = None + + +class Program_weight_tensor_parameter_242: + name = "parameter_242" + shape = [256] + dtype = "float32" + min_val = float("0.0968268") + max_val = float("0.259417") + mean = float("0.17254") + std = float("0.0277603") + data = None + + +class Program_weight_tensor_parameter_243: + name = "parameter_243" + shape = [256] + dtype = "float32" + min_val = float("0.00534852") + max_val = float("0.0459264") + mean = float("0.0100621") + std = float("0.00449181") + data = None + + +class Program_weight_tensor_parameter_244: + name = "parameter_244" + shape = [256] + dtype = "float32" + min_val = float("-0.16278") + max_val = float("0.376944") + mean = float("-0.0503672") + std = float("0.0639342") + data = None + + +class Program_weight_tensor_parameter_245: + name = "parameter_245" + shape = [256, 256, 3, 3] + dtype = "float32" + min_val = float("-0.17354") + max_val = float("0.293337") + mean = float("-0.00112971") + std = float("0.019075") + data = None + + +class Program_weight_tensor_parameter_246: + name = "parameter_246" + shape = [256] + dtype = "float32" + min_val = float("-0.259591") + max_val = float("0.110611") + mean = float("-0.0783603") + std = float("0.0694604") + data = None + + +class Program_weight_tensor_parameter_247: + name = "parameter_247" + shape = [256] + dtype = "float32" + min_val = float("0.0954529") + max_val = float("0.244835") + mean = float("0.16478") + std = float("0.0292143") + data = None + + +class Program_weight_tensor_parameter_248: + name = "parameter_248" + shape = [256] + dtype = "float32" + min_val = float("0.00863567") + max_val = float("0.0474885") + mean = float("0.016353") + std = float("0.00455709") + data = None + + +class Program_weight_tensor_parameter_249: + name = "parameter_249" + shape = [256] + dtype = "float32" + min_val = float("-0.262994") + max_val = float("0.182295") + mean = float("-0.0598833") + std = float("0.0624309") + data = None + + +class Program_weight_tensor_parameter_250: + name = "parameter_250" + shape = [256, 1024, 1, 1] + dtype = "float32" + min_val = float("-0.22814") + max_val = float("0.267239") + mean = float("-0.000984469") + std = float("0.021033") + data = None + + +class Program_weight_tensor_parameter_251: + name = "parameter_251" + shape = [1024] + dtype = "float32" + min_val = float("-0.262797") + max_val = float("0.174403") + mean = float("-0.057934") + std = float("0.0466456") + data = None + + +class Program_weight_tensor_parameter_252: + name = "parameter_252" + shape = [1024] + dtype = "float32" + min_val = float("-0.0342223") + max_val = float("0.246383") + mean = float("0.0897479") + std = float("0.0416275") + data = None + + +class Program_weight_tensor_parameter_253: + name = "parameter_253" + shape = [1024] + dtype = "float32" + min_val = float("7.41147e-11") + max_val = float("0.00606549") + mean = float("0.000971128") + std = float("0.000613432") + data = None + + +class Program_weight_tensor_parameter_254: + name = "parameter_254" + shape = [1024] + dtype = "float32" + min_val = float("-0.114956") + max_val = float("0.0647468") + mean = float("-0.00977189") + std = float("0.0208888") + data = None + + +class Program_weight_tensor_parameter_255: + name = "parameter_255" + shape = [1024, 256, 1, 1] + dtype = "float32" + min_val = float("-0.398915") + max_val = float("0.339388") + mean = float("-0.00105051") + std = float("0.0226211") + data = None + + +class Program_weight_tensor_parameter_256: + name = "parameter_256" + shape = [256] + dtype = "float32" + min_val = float("-0.220337") + max_val = float("0.218964") + mean = float("-0.0691") + std = float("0.0720895") + data = None + + +class Program_weight_tensor_parameter_257: + name = "parameter_257" + shape = [256] + dtype = "float32" + min_val = float("0.0969687") + max_val = float("0.318569") + mean = float("0.184019") + std = float("0.0315848") + data = None + + +class Program_weight_tensor_parameter_258: + name = "parameter_258" + shape = [256] + dtype = "float32" + min_val = float("0.00717385") + max_val = float("0.0574594") + mean = float("0.0153799") + std = float("0.00680699") + data = None + + +class Program_weight_tensor_parameter_259: + name = "parameter_259" + shape = [256] + dtype = "float32" + min_val = float("-0.330301") + max_val = float("0.343243") + mean = float("-0.0574789") + std = float("0.0707058") + data = None + + +class Program_weight_tensor_parameter_260: + name = "parameter_260" + shape = [256, 256, 3, 3] + dtype = "float32" + min_val = float("-0.243632") + max_val = float("0.239646") + mean = float("-0.00107878") + std = float("0.0192886") + data = None + + +class Program_weight_tensor_parameter_261: + name = "parameter_261" + shape = [256] + dtype = "float32" + min_val = float("-0.231781") + max_val = float("0.118692") + mean = float("-0.0460137") + std = float("0.0604592") + data = None + + +class Program_weight_tensor_parameter_262: + name = "parameter_262" + shape = [256] + dtype = "float32" + min_val = float("0.0980921") + max_val = float("0.236746") + mean = float("0.155482") + std = float("0.0269225") + data = None + + +class Program_weight_tensor_parameter_263: + name = "parameter_263" + shape = [256] + dtype = "float32" + min_val = float("0.00813574") + max_val = float("0.0498284") + mean = float("0.0182897") + std = float("0.00592212") + data = None + + +class Program_weight_tensor_parameter_264: + name = "parameter_264" + shape = [256] + dtype = "float32" + min_val = float("-0.219824") + max_val = float("0.21104") + mean = float("-0.0504437") + std = float("0.0667762") + data = None + + +class Program_weight_tensor_parameter_265: + name = "parameter_265" + shape = [256, 1024, 1, 1] + dtype = "float32" + min_val = float("-0.224133") + max_val = float("0.27467") + mean = float("-0.000951004") + std = float("0.0197382") + data = None + + +class Program_weight_tensor_parameter_266: + name = "parameter_266" + shape = [1024] + dtype = "float32" + min_val = float("-0.214824") + max_val = float("0.149485") + mean = float("-0.0535878") + std = float("0.053554") + data = None + + +class Program_weight_tensor_parameter_267: + name = "parameter_267" + shape = [1024] + dtype = "float32" + min_val = float("-0.0148128") + max_val = float("0.411195") + mean = float("0.102254") + std = float("0.046109") + data = None + + +class Program_weight_tensor_parameter_268: + name = "parameter_268" + shape = [1024] + dtype = "float32" + min_val = float("2.18064e-11") + max_val = float("0.0117738") + mean = float("0.00142576") + std = float("0.00113339") + data = None + + +class Program_weight_tensor_parameter_269: + name = "parameter_269" + shape = [1024] + dtype = "float32" + min_val = float("-0.154879") + max_val = float("0.148762") + mean = float("-0.0124112") + std = float("0.0283412") + data = None + + +class Program_weight_tensor_parameter_270: + name = "parameter_270" + shape = [1024, 256, 1, 1] + dtype = "float32" + min_val = float("-0.516658") + max_val = float("0.471191") + mean = float("-0.00122065") + std = float("0.0238276") + data = None + + +class Program_weight_tensor_parameter_271: + name = "parameter_271" + shape = [256] + dtype = "float32" + min_val = float("-0.384578") + max_val = float("0.196868") + mean = float("-0.0545921") + std = float("0.0771969") + data = None + + +class Program_weight_tensor_parameter_272: + name = "parameter_272" + shape = [256] + dtype = "float32" + min_val = float("0.101888") + max_val = float("0.419177") + mean = float("0.186474") + std = float("0.03952") + data = None + + +class Program_weight_tensor_parameter_273: + name = "parameter_273" + shape = [256] + dtype = "float32" + min_val = float("0.0092756") + max_val = float("0.0703266") + mean = float("0.0223368") + std = float("0.00941892") + data = None + + +class Program_weight_tensor_parameter_274: + name = "parameter_274" + shape = [256] + dtype = "float32" + min_val = float("-0.897851") + max_val = float("0.574582") + mean = float("-0.0316883") + std = float("0.135088") + data = None + + +class Program_weight_tensor_parameter_275: + name = "parameter_275" + shape = [256, 256, 3, 3] + dtype = "float32" + min_val = float("-0.244514") + max_val = float("0.265582") + mean = float("-0.000582699") + std = float("0.0192182") + data = None + + +class Program_weight_tensor_parameter_276: + name = "parameter_276" + shape = [256] + dtype = "float32" + min_val = float("-0.154269") + max_val = float("0.174846") + mean = float("-0.0135944") + std = float("0.057572") + data = None + + +class Program_weight_tensor_parameter_277: + name = "parameter_277" + shape = [256] + dtype = "float32" + min_val = float("0.0954599") + max_val = float("0.303545") + mean = float("0.157145") + std = float("0.0283408") + data = None + + +class Program_weight_tensor_parameter_278: + name = "parameter_278" + shape = [256] + dtype = "float32" + min_val = float("0.00849376") + max_val = float("0.0744244") + mean = float("0.0217353") + std = float("0.00825733") + data = None + + +class Program_weight_tensor_parameter_279: + name = "parameter_279" + shape = [256] + dtype = "float32" + min_val = float("-0.884052") + max_val = float("0.558478") + mean = float("-0.0462692") + std = float("0.13055") + data = None + + +class Program_weight_tensor_parameter_280: + name = "parameter_280" + shape = [256, 1024, 1, 1] + dtype = "float32" + min_val = float("-0.220034") + max_val = float("0.31305") + mean = float("-0.00143785") + std = float("0.0189875") + data = None + + +class Program_weight_tensor_parameter_281: + name = "parameter_281" + shape = [1024] + dtype = "float32" + min_val = float("-0.123197") + max_val = float("0.162764") + mean = float("-0.0100117") + std = float("0.0356646") + data = None + + +class Program_weight_tensor_parameter_282: + name = "parameter_282" + shape = [1024] + dtype = "float32" + min_val = float("-0.0803065") + max_val = float("0.297987") + mean = float("0.109479") + std = float("0.0558552") + data = None + + +class Program_weight_tensor_parameter_283: + name = "parameter_283" + shape = [1024] + dtype = "float32" + min_val = float("2.12582e-14") + max_val = float("0.0415676") + mean = float("0.00852993") + std = float("0.00636468") + data = None + + +class Program_weight_tensor_parameter_284: + name = "parameter_284" + shape = [1024] + dtype = "float32" + min_val = float("-0.313221") + max_val = float("0.26957") + mean = float("0.00201026") + std = float("0.0666961") + data = None + + +class Program_weight_tensor_parameter_285: + name = "parameter_285" + shape = [1024, 512, 1, 1] + dtype = "float32" + min_val = float("-0.33807") + max_val = float("0.330514") + mean = float("-0.000173678") + std = float("0.0195764") + data = None + + +class Program_weight_tensor_parameter_286: + name = "parameter_286" + shape = [1024] + dtype = "float32" + min_val = float("-0.123197") + max_val = float("0.162764") + mean = float("-0.0100117") + std = float("0.0356646") + data = None + + +class Program_weight_tensor_parameter_287: + name = "parameter_287" + shape = [1024] + dtype = "float32" + min_val = float("-0.00173043") + max_val = float("0.34635") + mean = float("0.137926") + std = float("0.0618488") + data = None + + +class Program_weight_tensor_parameter_288: + name = "parameter_288" + shape = [1024] + dtype = "float32" + min_val = float("1.97795e-14") + max_val = float("0.0148342") + mean = float("0.00334332") + std = float("0.00210923") + data = None + + +class Program_weight_tensor_parameter_289: + name = "parameter_289" + shape = [1024] + dtype = "float32" + min_val = float("-0.321187") + max_val = float("0.248685") + mean = float("-0.0121783") + std = float("0.0542128") + data = None + + +class Program_weight_tensor_parameter_290: + name = "parameter_290" + shape = [1024, 256, 1, 1] + dtype = "float32" + min_val = float("-0.306746") + max_val = float("0.346255") + mean = float("-0.000746115") + std = float("0.0264304") + data = None + + +class Program_weight_tensor_parameter_291: + name = "parameter_291" + shape = [256] + dtype = "float32" + min_val = float("-0.177472") + max_val = float("0.268404") + mean = float("0.0332534") + std = float("0.0815877") + data = None + + +class Program_weight_tensor_parameter_292: + name = "parameter_292" + shape = [256] + dtype = "float32" + min_val = float("0.127064") + max_val = float("0.322623") + mean = float("0.181835") + std = float("0.0291336") + data = None + + +class Program_weight_tensor_parameter_293: + name = "parameter_293" + shape = [256] + dtype = "float32" + min_val = float("0.0155781") + max_val = float("0.099729") + mean = float("0.0317055") + std = float("0.0145424") + data = None + + +class Program_weight_tensor_parameter_294: + name = "parameter_294" + shape = [256] + dtype = "float32" + min_val = float("-0.280572") + max_val = float("0.257584") + mean = float("-0.0411857") + std = float("0.0793484") + data = None + + +class Program_weight_tensor_parameter_295: + name = "parameter_295" + shape = [256, 256, 3, 3] + dtype = "float32" + min_val = float("-0.222291") + max_val = float("0.202243") + mean = float("-0.000828644") + std = float("0.0211677") + data = None + + +class Program_weight_tensor_parameter_296: + name = "parameter_296" + shape = [256] + dtype = "float32" + min_val = float("-0.379022") + max_val = float("0.113554") + mean = float("-0.12801") + std = float("0.0836729") + data = None + + +class Program_weight_tensor_parameter_297: + name = "parameter_297" + shape = [256] + dtype = "float32" + min_val = float("0.144939") + max_val = float("0.315938") + mean = float("0.233647") + std = float("0.0331057") + data = None + + +class Program_weight_tensor_parameter_298: + name = "parameter_298" + shape = [256] + dtype = "float32" + min_val = float("0.0157358") + max_val = float("0.084335") + mean = float("0.030021") + std = float("0.0097328") + data = None + + +class Program_weight_tensor_parameter_299: + name = "parameter_299" + shape = [256] + dtype = "float32" + min_val = float("-0.391091") + max_val = float("0.277721") + mean = float("-0.0841709") + std = float("0.0968033") + data = None + + +class Program_weight_tensor_parameter_300: + name = "parameter_300" + shape = [256, 512, 1, 1] + dtype = "float32" + min_val = float("-0.384681") + max_val = float("0.351507") + mean = float("-0.00200178") + std = float("0.0337197") + data = None + + +class Program_weight_tensor_parameter_301: + name = "parameter_301" + shape = [512] + dtype = "float32" + min_val = float("-0.212799") + max_val = float("0.123047") + mean = float("-0.0537054") + std = float("0.0671427") + data = None + + +class Program_weight_tensor_parameter_302: + name = "parameter_302" + shape = [512] + dtype = "float32" + min_val = float("-0.0505383") + max_val = float("0.316638") + mean = float("0.0857567") + std = float("0.066906") + data = None + + +class Program_weight_tensor_parameter_303: + name = "parameter_303" + shape = [512] + dtype = "float32" + min_val = float("5.65548e-05") + max_val = float("0.00477888") + mean = float("0.000992431") + std = float("0.000751775") + data = None + + +class Program_weight_tensor_parameter_304: + name = "parameter_304" + shape = [512] + dtype = "float32" + min_val = float("-0.142087") + max_val = float("0.0860772") + mean = float("-0.00395994") + std = float("0.026336") + data = None + + +class Program_weight_tensor_parameter_305: + name = "parameter_305" + shape = [512, 128, 1, 1] + dtype = "float32" + min_val = float("-0.289345") + max_val = float("0.300975") + mean = float("-0.00269945") + std = float("0.0272173") + data = None + + +class Program_weight_tensor_parameter_306: + name = "parameter_306" + shape = [128] + dtype = "float32" + min_val = float("-0.218001") + max_val = float("0.277785") + mean = float("-0.0622226") + std = float("0.0777359") + data = None + + +class Program_weight_tensor_parameter_307: + name = "parameter_307" + shape = [128] + dtype = "float32" + min_val = float("0.12126") + max_val = float("0.259108") + mean = float("0.202974") + std = float("0.0289768") + data = None + + +class Program_weight_tensor_parameter_308: + name = "parameter_308" + shape = [128] + dtype = "float32" + min_val = float("0.00803345") + max_val = float("0.0448862") + mean = float("0.0153688") + std = float("0.00453606") + data = None + + +class Program_weight_tensor_parameter_309: + name = "parameter_309" + shape = [128] + dtype = "float32" + min_val = float("-0.202342") + max_val = float("0.464715") + mean = float("-0.0443255") + std = float("0.0721449") + data = None + + +class Program_weight_tensor_parameter_310: + name = "parameter_310" + shape = [128, 128, 3, 3] + dtype = "float32" + min_val = float("-0.196379") + max_val = float("0.270822") + mean = float("-0.000914539") + std = float("0.0257217") + data = None + + +class Program_weight_tensor_parameter_311: + name = "parameter_311" + shape = [128] + dtype = "float32" + min_val = float("-0.234752") + max_val = float("0.111152") + mean = float("-0.0271686") + std = float("0.0682536") + data = None + + +class Program_weight_tensor_parameter_312: + name = "parameter_312" + shape = [128] + dtype = "float32" + min_val = float("0.119073") + max_val = float("0.239107") + mean = float("0.170345") + std = float("0.0241035") + data = None + + +class Program_weight_tensor_parameter_313: + name = "parameter_313" + shape = [128] + dtype = "float32" + min_val = float("0.00940873") + max_val = float("0.0423662") + mean = float("0.024084") + std = float("0.00648166") + data = None + + +class Program_weight_tensor_parameter_314: + name = "parameter_314" + shape = [128] + dtype = "float32" + min_val = float("-0.277258") + max_val = float("0.154955") + mean = float("-0.0558077") + std = float("0.0944891") + data = None + + +class Program_weight_tensor_parameter_315: + name = "parameter_315" + shape = [128, 512, 1, 1] + dtype = "float32" + min_val = float("-0.22204") + max_val = float("0.302197") + mean = float("-0.00153641") + std = float("0.0276082") + data = None + + +class Program_weight_tensor_parameter_316: + name = "parameter_316" + shape = [512] + dtype = "float32" + min_val = float("-0.269283") + max_val = float("0.165785") + mean = float("-0.0554003") + std = float("0.0577406") + data = None + + +class Program_weight_tensor_parameter_317: + name = "parameter_317" + shape = [512] + dtype = "float32" + min_val = float("-0.064757") + max_val = float("0.328984") + mean = float("0.101395") + std = float("0.0609696") + data = None + + +class Program_weight_tensor_parameter_318: + name = "parameter_318" + shape = [512] + dtype = "float32" + min_val = float("0.00010981") + max_val = float("0.00865738") + mean = float("0.00142261") + std = float("0.00111325") + data = None + + +class Program_weight_tensor_parameter_319: + name = "parameter_319" + shape = [512] + dtype = "float32" + min_val = float("-0.0958375") + max_val = float("0.104051") + mean = float("-0.000987899") + std = float("0.0280524") + data = None + + +class Program_weight_tensor_parameter_320: + name = "parameter_320" + shape = [512, 128, 1, 1] + dtype = "float32" + min_val = float("-0.275244") + max_val = float("0.360033") + mean = float("-0.00111632") + std = float("0.0287799") + data = None + + +class Program_weight_tensor_parameter_321: + name = "parameter_321" + shape = [128] + dtype = "float32" + min_val = float("-0.172696") + max_val = float("0.280668") + mean = float("-0.00563719") + std = float("0.0843656") + data = None + + +class Program_weight_tensor_parameter_322: + name = "parameter_322" + shape = [128] + dtype = "float32" + min_val = float("0.108401") + max_val = float("0.253884") + mean = float("0.182666") + std = float("0.0319054") + data = None + + +class Program_weight_tensor_parameter_323: + name = "parameter_323" + shape = [128] + dtype = "float32" + min_val = float("0.00716233") + max_val = float("0.0525756") + mean = float("0.0214706") + std = float("0.00820575") + data = None + + +class Program_weight_tensor_parameter_324: + name = "parameter_324" + shape = [128] + dtype = "float32" + min_val = float("-0.345887") + max_val = float("0.247573") + mean = float("-0.0131534") + std = float("0.115415") + data = None + + +class Program_weight_tensor_parameter_325: + name = "parameter_325" + shape = [128, 128, 3, 3] + dtype = "float32" + min_val = float("-0.229856") + max_val = float("0.273187") + mean = float("-0.000494359") + std = float("0.024656") + data = None + + +class Program_weight_tensor_parameter_326: + name = "parameter_326" + shape = [128] + dtype = "float32" + min_val = float("-0.20215") + max_val = float("0.268189") + mean = float("0.00325904") + std = float("0.0728937") + data = None + + +class Program_weight_tensor_parameter_327: + name = "parameter_327" + shape = [128] + dtype = "float32" + min_val = float("0.106804") + max_val = float("0.240495") + mean = float("0.171034") + std = float("0.0317785") + data = None + + +class Program_weight_tensor_parameter_328: + name = "parameter_328" + shape = [128] + dtype = "float32" + min_val = float("0.00988462") + max_val = float("0.111599") + mean = float("0.0284943") + std = float("0.0144854") + data = None + + +class Program_weight_tensor_parameter_329: + name = "parameter_329" + shape = [128] + dtype = "float32" + min_val = float("-0.342912") + max_val = float("0.321763") + mean = float("-0.0347685") + std = float("0.101924") + data = None + + +class Program_weight_tensor_parameter_330: + name = "parameter_330" + shape = [128, 512, 1, 1] + dtype = "float32" + min_val = float("-0.253549") + max_val = float("0.200036") + mean = float("-0.00152054") + std = float("0.0260746") + data = None + + +class Program_weight_tensor_parameter_331: + name = "parameter_331" + shape = [512] + dtype = "float32" + min_val = float("-0.23853") + max_val = float("0.155874") + mean = float("-0.0190411") + std = float("0.0546756") + data = None + + +class Program_weight_tensor_parameter_332: + name = "parameter_332" + shape = [512] + dtype = "float32" + min_val = float("-0.0710341") + max_val = float("0.391184") + mean = float("0.0764015") + std = float("0.0977321") + data = None + + +class Program_weight_tensor_parameter_333: + name = "parameter_333" + shape = [512] + dtype = "float32" + min_val = float("6.23026e-13") + max_val = float("0.0143552") + mean = float("0.00162119") + std = float("0.00243167") + data = None + + +class Program_weight_tensor_parameter_334: + name = "parameter_334" + shape = [512] + dtype = "float32" + min_val = float("-0.204336") + max_val = float("0.124056") + mean = float("-0.0113854") + std = float("0.030948") + data = None + + +class Program_weight_tensor_parameter_335: + name = "parameter_335" + shape = [512, 128, 1, 1] + dtype = "float32" + min_val = float("-0.312581") + max_val = float("0.31736") + mean = float("-0.00165604") + std = float("0.0250843") + data = None + + +class Program_weight_tensor_parameter_336: + name = "parameter_336" + shape = [128] + dtype = "float32" + min_val = float("-0.18261") + max_val = float("0.197773") + mean = float("0.00456132") + std = float("0.0809956") + data = None + + +class Program_weight_tensor_parameter_337: + name = "parameter_337" + shape = [128] + dtype = "float32" + min_val = float("0.0797577") + max_val = float("0.286888") + mean = float("0.163693") + std = float("0.035144") + data = None + + +class Program_weight_tensor_parameter_338: + name = "parameter_338" + shape = [128] + dtype = "float32" + min_val = float("0.00281523") + max_val = float("0.0713853") + mean = float("0.0216071") + std = float("0.0144345") + data = None + + +class Program_weight_tensor_parameter_339: + name = "parameter_339" + shape = [128] + dtype = "float32" + min_val = float("-0.67043") + max_val = float("0.479344") + mean = float("0.0179736") + std = float("0.178814") + data = None + + +class Program_weight_tensor_parameter_340: + name = "parameter_340" + shape = [128, 128, 3, 3] + dtype = "float32" + min_val = float("-0.244004") + max_val = float("0.310831") + mean = float("-0.000155751") + std = float("0.0227858") + data = None + + +class Program_weight_tensor_parameter_341: + name = "parameter_341" + shape = [128] + dtype = "float32" + min_val = float("-0.186854") + max_val = float("0.385645") + mean = float("0.0425543") + std = float("0.101889") + data = None + + +class Program_weight_tensor_parameter_342: + name = "parameter_342" + shape = [128] + dtype = "float32" + min_val = float("0.0506852") + max_val = float("0.218929") + mean = float("0.116802") + std = float("0.0293754") + data = None + + +class Program_weight_tensor_parameter_343: + name = "parameter_343" + shape = [128] + dtype = "float32" + min_val = float("0.00210365") + max_val = float("0.424652") + mean = float("0.0454897") + std = float("0.055058") + data = None + + +class Program_weight_tensor_parameter_344: + name = "parameter_344" + shape = [128] + dtype = "float32" + min_val = float("-0.322818") + max_val = float("0.306999") + mean = float("-0.00124681") + std = float("0.102821") + data = None + + +class Program_weight_tensor_parameter_345: + name = "parameter_345" + shape = [128, 512, 1, 1] + dtype = "float32" + min_val = float("-0.201681") + max_val = float("0.275286") + mean = float("-0.00146772") + std = float("0.0205848") + data = None + + +class Program_weight_tensor_parameter_346: + name = "parameter_346" + shape = [512] + dtype = "float32" + min_val = float("-0.171269") + max_val = float("0.198351") + mean = float("0.0160549") + std = float("0.0483947") + data = None + + +class Program_weight_tensor_parameter_347: + name = "parameter_347" + shape = [512] + dtype = "float32" + min_val = float("-0.0194952") + max_val = float("0.373477") + mean = float("0.126143") + std = float("0.0892147") + data = None + + +class Program_weight_tensor_parameter_348: + name = "parameter_348" + shape = [512] + dtype = "float32" + min_val = float("1.17813e-15") + max_val = float("0.10769") + mean = float("0.0126099") + std = float("0.0152547") + data = None + + +class Program_weight_tensor_parameter_349: + name = "parameter_349" + shape = [512] + dtype = "float32" + min_val = float("-0.498065") + max_val = float("0.336252") + mean = float("-0.0223367") + std = float("0.0939334") + data = None + + +class Program_weight_tensor_parameter_350: + name = "parameter_350" + shape = [512, 256, 1, 1] + dtype = "float32" + min_val = float("-0.344297") + max_val = float("0.575926") + mean = float("-0.000605165") + std = float("0.0251005") + data = None + + +class Program_weight_tensor_parameter_351: + name = "parameter_351" + shape = [512] + dtype = "float32" + min_val = float("-0.171269") + max_val = float("0.198351") + mean = float("0.0160549") + std = float("0.0483947") + data = None + + +class Program_weight_tensor_parameter_352: + name = "parameter_352" + shape = [512] + dtype = "float32" + min_val = float("-0.0357003") + max_val = float("0.328334") + mean = float("0.10477") + std = float("0.088463") + data = None + + +class Program_weight_tensor_parameter_353: + name = "parameter_353" + shape = [512] + dtype = "float32" + min_val = float("6.21035e-16") + max_val = float("0.0155421") + mean = float("0.00236627") + std = float("0.00229989") + data = None + + +class Program_weight_tensor_parameter_354: + name = "parameter_354" + shape = [512] + dtype = "float32" + min_val = float("-0.288731") + max_val = float("0.179648") + mean = float("-0.000663535") + std = float("0.0451221") + data = None + + +class Program_weight_tensor_parameter_355: + name = "parameter_355" + shape = [512, 128, 1, 1] + dtype = "float32" + min_val = float("-0.377839") + max_val = float("0.398991") + mean = float("-0.000435927") + std = float("0.030224") + data = None + + +class Program_weight_tensor_parameter_356: + name = "parameter_356" + shape = [128] + dtype = "float32" + min_val = float("-0.316317") + max_val = float("0.258669") + mean = float("0.0185988") + std = float("0.0919956") + data = None + + +class Program_weight_tensor_parameter_357: + name = "parameter_357" + shape = [128] + dtype = "float32" + min_val = float("0.144936") + max_val = float("0.290925") + mean = float("0.201593") + std = float("0.0320721") + data = None + + +class Program_weight_tensor_parameter_358: + name = "parameter_358" + shape = [128] + dtype = "float32" + min_val = float("0.0130321") + max_val = float("0.0873744") + mean = float("0.0300229") + std = float("0.0134817") + data = None + + +class Program_weight_tensor_parameter_359: + name = "parameter_359" + shape = [128] + dtype = "float32" + min_val = float("-0.267559") + max_val = float("0.439399") + mean = float("-0.049912") + std = float("0.0897533") + data = None + + +class Program_weight_tensor_parameter_360: + name = "parameter_360" + shape = [128, 128, 3, 3] + dtype = "float32" + min_val = float("-0.31475") + max_val = float("0.155191") + mean = float("-0.000901963") + std = float("0.0253696") + data = None + + +class Program_weight_tensor_parameter_361: + name = "parameter_361" + shape = [128] + dtype = "float32" + min_val = float("-0.303742") + max_val = float("0.116234") + mean = float("-0.0685898") + std = float("0.0934722") + data = None + + +class Program_weight_tensor_parameter_362: + name = "parameter_362" + shape = [128] + dtype = "float32" + min_val = float("0.107714") + max_val = float("0.351428") + mean = float("0.205884") + std = float("0.0380995") + data = None + + +class Program_weight_tensor_parameter_363: + name = "parameter_363" + shape = [128] + dtype = "float32" + min_val = float("0.00342099") + max_val = float("0.156317") + mean = float("0.0248288") + std = float("0.0175854") + data = None + + +class Program_weight_tensor_parameter_364: + name = "parameter_364" + shape = [128] + dtype = "float32" + min_val = float("-0.400608") + max_val = float("0.362051") + mean = float("-0.0733421") + std = float("0.132905") + data = None + + +class Program_weight_tensor_parameter_365: + name = "parameter_365" + shape = [128, 256, 1, 1] + dtype = "float32" + min_val = float("-0.290996") + max_val = float("0.352579") + mean = float("-0.00199918") + std = float("0.0375048") + data = None + + +class Program_weight_tensor_parameter_366: + name = "parameter_366" + shape = [256] + dtype = "float32" + min_val = float("-0.168494") + max_val = float("0.197048") + mean = float("-0.0115231") + std = float("0.0583548") + data = None + + +class Program_weight_tensor_parameter_367: + name = "parameter_367" + shape = [256] + dtype = "float32" + min_val = float("-0.0979541") + max_val = float("0.375747") + mean = float("0.0743772") + std = float("0.0877783") + data = None + + +class Program_weight_tensor_parameter_368: + name = "parameter_368" + shape = [256] + dtype = "float32" + min_val = float("5.57425e-05") + max_val = float("0.0060912") + mean = float("0.00103065") + std = float("0.00110517") + data = None + + +class Program_weight_tensor_parameter_369: + name = "parameter_369" + shape = [256] + dtype = "float32" + min_val = float("-0.124398") + max_val = float("0.0629273") + mean = float("-0.00875287") + std = float("0.0249098") + data = None + + +class Program_weight_tensor_parameter_370: + name = "parameter_370" + shape = [256, 64, 1, 1] + dtype = "float32" + min_val = float("-0.216931") + max_val = float("0.275356") + mean = float("-0.00255118") + std = float("0.0311081") + data = None + + +class Program_weight_tensor_parameter_371: + name = "parameter_371" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_372: + name = "parameter_372" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_373: + name = "parameter_373" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_374: + name = "parameter_374" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_375: + name = "parameter_375" + shape = [64, 64, 3, 3] + dtype = "float32" + min_val = float("-0.225166") + max_val = float("0.285775") + mean = float("-0.00085429") + std = float("0.031798") + data = None + + +class Program_weight_tensor_parameter_376: + name = "parameter_376" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_377: + name = "parameter_377" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_378: + name = "parameter_378" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_379: + name = "parameter_379" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_380: + name = "parameter_380" + shape = [64, 256, 1, 1] + dtype = "float32" + min_val = float("-0.192207") + max_val = float("0.157948") + mean = float("0.000121389") + std = float("0.0301013") + data = None + + +class Program_weight_tensor_parameter_381: + name = "parameter_381" + shape = [256] + dtype = "float32" + min_val = float("-0.148446") + max_val = float("0.156131") + mean = float("0.000795069") + std = float("0.0497265") + data = None + + +class Program_weight_tensor_parameter_382: + name = "parameter_382" + shape = [256] + dtype = "float32" + min_val = float("-0.113751") + max_val = float("0.2753") + mean = float("0.077119") + std = float("0.068062") + data = None + + +class Program_weight_tensor_parameter_383: + name = "parameter_383" + shape = [256] + dtype = "float32" + min_val = float("7.12508e-15") + max_val = float("0.00637797") + mean = float("0.0013944") + std = float("0.00123247") + data = None + + +class Program_weight_tensor_parameter_384: + name = "parameter_384" + shape = [256] + dtype = "float32" + min_val = float("-0.104892") + max_val = float("0.117978") + mean = float("0.000947246") + std = float("0.0325347") + data = None + + +class Program_weight_tensor_parameter_385: + name = "parameter_385" + shape = [256, 64, 1, 1] + dtype = "float32" + min_val = float("-0.294842") + max_val = float("0.285534") + mean = float("-0.000188581") + std = float("0.0325632") + data = None + + +class Program_weight_tensor_parameter_386: + name = "parameter_386" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_387: + name = "parameter_387" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_388: + name = "parameter_388" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_389: + name = "parameter_389" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_390: + name = "parameter_390" + shape = [64, 64, 3, 3] + dtype = "float32" + min_val = float("-0.403756") + max_val = float("0.520484") + mean = float("4.93498e-05") + std = float("0.028831") + data = None + + +class Program_weight_tensor_parameter_391: + name = "parameter_391" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_392: + name = "parameter_392" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_393: + name = "parameter_393" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_394: + name = "parameter_394" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_395: + name = "parameter_395" + shape = [64, 256, 1, 1] + dtype = "float32" + min_val = float("-0.201867") + max_val = float("0.261951") + mean = float("0.00134667") + std = float("0.0306459") + data = None + + +class Program_weight_tensor_parameter_396: + name = "parameter_396" + shape = [256] + dtype = "float32" + min_val = float("-0.307605") + max_val = float("0.216679") + mean = float("0.0298959") + std = float("0.069153") + data = None + + +class Program_weight_tensor_parameter_397: + name = "parameter_397" + shape = [256] + dtype = "float32" + min_val = float("-0.0421802") + max_val = float("0.456393") + mean = float("0.210286") + std = float("0.11374") + data = None + + +class Program_weight_tensor_parameter_398: + name = "parameter_398" + shape = [256] + dtype = "float32" + min_val = float("4.74149e-14") + max_val = float("0.356434") + mean = float("0.0389892") + std = float("0.0418573") + data = None + + +class Program_weight_tensor_parameter_399: + name = "parameter_399" + shape = [256] + dtype = "float32" + min_val = float("-1.03327") + max_val = float("1.63666") + mean = float("-0.108345") + std = float("0.292635") + data = None + + +class Program_weight_tensor_parameter_400: + name = "parameter_400" + shape = [256, 64, 1, 1] + dtype = "float32" + min_val = float("-0.744987") + max_val = float("0.988602") + mean = float("-0.00341787") + std = float("0.0575443") + data = None + + +class Program_weight_tensor_parameter_401: + name = "parameter_401" + shape = [256] + dtype = "float32" + min_val = float("-0.307605") + max_val = float("0.216679") + mean = float("0.0298959") + std = float("0.069153") + data = None + + +class Program_weight_tensor_parameter_402: + name = "parameter_402" + shape = [256] + dtype = "float32" + min_val = float("-0.112846") + max_val = float("0.38913") + mean = float("0.126908") + std = float("0.102665") + data = None + + +class Program_weight_tensor_parameter_403: + name = "parameter_403" + shape = [256] + dtype = "float32" + min_val = float("1.2631e-14") + max_val = float("0.0355659") + mean = float("0.00633698") + std = float("0.00679561") + data = None + + +class Program_weight_tensor_parameter_404: + name = "parameter_404" + shape = [256] + dtype = "float32" + min_val = float("-0.223497") + max_val = float("0.238473") + mean = float("0.00154235") + std = float("0.0576803") + data = None + + +class Program_weight_tensor_parameter_405: + name = "parameter_405" + shape = [256, 64, 1, 1] + dtype = "float32" + min_val = float("-0.36369") + max_val = float("0.393922") + mean = float("0.000417388") + std = float("0.0352915") + data = None + + +class Program_weight_tensor_parameter_406: + name = "parameter_406" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_407: + name = "parameter_407" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_408: + name = "parameter_408" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_409: + name = "parameter_409" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_410: + name = "parameter_410" + shape = [64, 64, 3, 3] + dtype = "float32" + min_val = float("-0.468206") + max_val = float("0.443407") + mean = float("0.00079491") + std = float("0.02917") + data = None + + +class Program_weight_tensor_parameter_411: + name = "parameter_411" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_412: + name = "parameter_412" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_413: + name = "parameter_413" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_414: + name = "parameter_414" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_415: + name = "parameter_415" + shape = [64, 64, 1, 1] + dtype = "float32" + min_val = float("-0.727158") + max_val = float("0.389601") + mean = float("-0.00439352") + std = float("0.0708728") + data = None + + +class Program_weight_tensor_parameter_416: + name = "parameter_416" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_417: + name = "parameter_417" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_418: + name = "parameter_418" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_419: + name = "parameter_419" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_420: + name = "parameter_420" + shape = [64, 3, 7, 7] + dtype = "float32" + min_val = float("-0.782291") + max_val = float("0.781985") + mean = float("-0.000495427") + std = float("0.123302") + data = None diff --git a/paddle_samples/PaddleX/MaskRCNN-ResNet50-FPN/subgraph_8/graph_hash.txt b/paddle_samples/PaddleX/MaskRCNN-ResNet50-FPN/subgraph_8/graph_hash.txt new file mode 100644 index 000000000..decee0441 --- /dev/null +++ b/paddle_samples/PaddleX/MaskRCNN-ResNet50-FPN/subgraph_8/graph_hash.txt @@ -0,0 +1 @@ +2108f4cdfbd0ca6eada410b98918f2eedb61e1eab24fd004e8823a4ece0aec2e \ No newline at end of file diff --git a/paddle_samples/PaddleX/MaskRCNN-ResNet50-FPN/subgraph_8/graph_net.json b/paddle_samples/PaddleX/MaskRCNN-ResNet50-FPN/subgraph_8/graph_net.json new file mode 100644 index 000000000..d2a8b796c --- /dev/null +++ b/paddle_samples/PaddleX/MaskRCNN-ResNet50-FPN/subgraph_8/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "MaskRCNN-ResNet50-FPN", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/MaskRCNN-ResNet50-FPN/subgraph_8/input_meta.py b/paddle_samples/PaddleX/MaskRCNN-ResNet50-FPN/subgraph_8/input_meta.py new file mode 100644 index 000000000..4fcde3054 --- /dev/null +++ b/paddle_samples/PaddleX/MaskRCNN-ResNet50-FPN/subgraph_8/input_meta.py @@ -0,0 +1,61 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [5, 6] + dtype = "float32" + data = [ + 0.0, + 0.0921853, + 283.449, + 160.655, + 725.424, + 800.0, + 0.0, + 0.060502, + 159.214, + 0.0, + 486.816, + 800.0, + 1.0, + 0.100104, + 285.323, + 161.253, + 724.656, + 800.0, + 1.0, + 0.0639813, + 0.0356445, + 250.236, + 809.064, + 800.0, + 1.0, + 0.0629455, + 160.543, + 0.0, + 486.277, + 800.0, + ] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [1] + dtype = "int32" + data = [5] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [5, 28, 28] + dtype = "float32" + min_val = float("2.88148e-08") + max_val = float("1.0") + mean = float("0.733777") + std = float("0.38065") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [5, 2] + dtype = "float32" + data = [800.0, 901.0, 800.0, 901.0, 800.0, 901.0, 800.0, 901.0, 800.0, 901.0] diff --git a/paddle_samples/PaddleX/MaskRCNN-ResNet50-FPN/subgraph_8/model.py b/paddle_samples/PaddleX/MaskRCNN-ResNet50-FPN/subgraph_8/model.py new file mode 100644 index 000000000..e69de29bb diff --git a/paddle_samples/PaddleX/MaskRCNN-ResNet50-FPN/subgraph_8/weight_meta.py b/paddle_samples/PaddleX/MaskRCNN-ResNet50-FPN/subgraph_8/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/MaskRCNN-ResNet50-FPN/subgraph_8/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_0/graph_hash.txt index 37247bcfc..93e46fba7 100644 --- a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_0/graph_hash.txt @@ -1 +1 @@ -2c064b6a13f4854594bea413eae10aa074794f5d944de3549e53a005e636993a \ No newline at end of file +2c064b6a13f4854594bea413eae10aa074794f5d944de3549e53a005e636993a diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_14/graph_hash.txt b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_14/graph_hash.txt new file mode 100644 index 000000000..5eb2d9c53 --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_14/graph_hash.txt @@ -0,0 +1 @@ +685994ba5055506ab45515e898051e995b4ce706e875ee9dbc275d706629a5e1 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_14/graph_net.json b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_14/graph_net.json new file mode 100644 index 000000000..69905996a --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_14/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-DocLayout-M", + "num_devices_required": 1, + "num_nodes_required": 1 +} diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_14/input_meta.py b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_14/input_meta.py new file mode 100644 index 000000000..8c7a3f029 --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_14/input_meta.py @@ -0,0 +1,70 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [3] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [] + dtype = "int64" + data = [10285] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [1, 3, 10285] + dtype = "float32" + max_val = float("1.0") + mean = float("0.000356506") + std = float("0.018878") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [1, 3, 1] + dtype = "int32" + data = [1, 5, 6] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [1, 10285] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00106952") + std = float("0.032686") + data = None + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [1, 3, 4] + dtype = "float32" + data = [ + 93.0648, + 61.4989, + 115.856, + 74.446, + 89.8993, + 77.6828, + 690.072, + 652.211, + 263.367, + 658.685, + 515.971, + 672.441, + ] + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [1, 10285, 4] + dtype = "float32" + min_val = float("-242.744") + max_val = float("874.992") + mean = float("352.037") + std = float("207.783") + data = None diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_14/model.py b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_14/model.py new file mode 100644 index 000000000..4271229a2 --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_14/model.py @@ -0,0 +1,317 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): + # pd_op.full: (1xi64) <- () + full_0 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (1x-1xi64) <- (1x-1x-1xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_2, full_0, False, False, paddle.int64) + del full_0 + + # pd_op.full: (1xf64) <- () + full_1 = paddle._C_ops.full( + [1], float("0"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_2 = paddle._C_ops.full( + [1], float("1"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (1xi32) <- (1xf64, 1xf64, 1xf64) + arange_0 = paddle.arange(full_1, full_2, full_2, dtype="int32") + del full_1, full_2 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [-1] + + # pd_op.unsqueeze: (1x1xi32) <- (1xi32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(arange_0, full_int_array_0) + del arange_0 + + # pd_op.cast: (xi32) <- (xi64) + cast_0 = paddle._C_ops.cast(data_0, paddle.int32) + del data_0 + + # pd_op.multiply: (1x1xi32) <- (1x1xi32, xi32) + multiply_1 = paddle._C_ops.multiply(unsqueeze_0, cast_0) + del cast_0, unsqueeze_0 + + # pd_op.cast: (1x1xi64) <- (1x1xi32) + cast_1 = paddle._C_ops.cast(multiply_1, paddle.int64) + del multiply_1 + + # pd_op.add: (1x-1xi64) <- (1x-1xi64, 1x1xi64) + add_0 = paddle._C_ops.add(argmax_0, cast_1) + del argmax_0, cast_1 + + # pd_op.flatten: (-1xi32) <- (1x-1x1xi32) + flatten_0 = paddle._C_ops.flatten(data_3, 0, 2) + del data_3 + + # pd_op.flatten: (-1xi64) <- (1x-1xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_3 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (-1xi32) <- (-1xi32, -1xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_3) + del flatten_0 + + # pd_op.full: (xi64) <- () + full_4 = paddle._C_ops.full( + [], float("1"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_0 = [full_4, data_1] + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.reshape: (1x-1xi32) <- (-1xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, stack_0) + del gather_0, stack_0 + + # pd_op.full: (xf32) <- () + full_5 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (1x-1xb) <- (1x-1xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(data_4, full_5) + del data_4, full_5 + + # pd_op.full: (1xf32) <- () + full_6 = paddle._C_ops.full( + [1], float("11"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (1x-1xi32) <- (1x-1xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_6, paddle.int32, paddle.framework._current_expected_place() + ) + del full_6 + + # pd_op.where: (1x-1xi32) <- (1x-1xb, 1x-1xi32, 1x-1xi32) + where_0 = paddle._C_ops.where(greater_than_0, reshape_1, full_like_0) + del full_like_0, greater_than_0, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [-1, 4] + + # pd_op.reshape: (-1x4xf32) <- (1x-1x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_5, full_int_array_1) + del full_int_array_1 + + # pd_op.gather: (-1x4xf32) <- (-1x4xf32, -1xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_3) + del flatten_1, full_3, reshape_2 + + # pd_op.full: (xi64) <- () + full_7 = paddle._C_ops.full( + [], float("4"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_1 = [full_4, data_1, full_7] + del data_1, full_4, full_7 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_1 = paddle._C_ops.stack(combine_1, 0) + del combine_1 + + # pd_op.reshape: (1x-1x4xf32) <- (-1x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, stack_1) + del gather_1, stack_1 + + # pd_op.full: (1xi32) <- () + full_8 = paddle._C_ops.full( + [1], float("12"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (1x-1x12xf32) <- (1x-1xi32, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + where_0 % paddle.cast(full_8, where_0.dtype), full_8 + ) + del full_8 + + # pd_op.full: (11xi64) <- () + full_9 = paddle._C_ops.full( + [11], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (11xi64) <- (11xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_9, + [11], + paddle.int64, + [ + float("0"), + float("1"), + float("2"), + float("3"), + float("4"), + float("5"), + float("6"), + float("7"), + float("8"), + float("9"), + float("10"), + ], + paddle.framework._current_expected_place(), + ) + del full_9 + + # pd_op.index_select: (1x-1x11xf32) <- (1x-1x12xf32, 11xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_0, assign_value__0, -1) + del assign_value__0, one_hot_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [2] + + # pd_op.unsqueeze: (1x-1x1x4xf32) <- (1x-1x4xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(data_5, full_int_array_2) + del data_5 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [1] + + # pd_op.unsqueeze: (1x1x-1x4xf32) <- (1x-1x4xf32, 1xi64) + unsqueeze_2 = paddle._C_ops.unsqueeze(data_6, full_int_array_3) + del data_6, full_int_array_3 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [0] + + # pd_op.slice: (1x-1x1x2xf32) <- (1x-1x1x4xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + unsqueeze_1, [3], full_int_array_4, full_int_array_2, [1], [] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_5 = [2147483647] + + # pd_op.slice: (1x-1x1x2xf32) <- (1x-1x1x4xf32, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + unsqueeze_1, [3], full_int_array_2, full_int_array_5, [1], [] + ) + del unsqueeze_1 + + # pd_op.slice: (1x1x-1x2xf32) <- (1x1x-1x4xf32, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + unsqueeze_2, [3], full_int_array_4, full_int_array_2, [1], [] + ) + del full_int_array_4 + + # pd_op.slice: (1x1x-1x2xf32) <- (1x1x-1x4xf32, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + unsqueeze_2, [3], full_int_array_2, full_int_array_5, [1], [] + ) + del full_int_array_2, full_int_array_5, unsqueeze_2 + + # pd_op.maximum: (1x-1x-1x2xf32) <- (1x-1x1x2xf32, 1x1x-1x2xf32) + maximum_0 = paddle._C_ops.maximum(slice_0, slice_2) + + # pd_op.minimum: (1x-1x-1x2xf32) <- (1x-1x1x2xf32, 1x1x-1x2xf32) + minimum_0 = paddle._C_ops.minimum(slice_1, slice_3) + + # pd_op.subtract: (1x-1x-1x2xf32) <- (1x-1x-1x2xf32, 1x-1x-1x2xf32) + subtract_0 = paddle._C_ops.subtract(minimum_0, maximum_0) + del maximum_0, minimum_0 + + # pd_op.full: (1xf32) <- () + full_10 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf32) <- () + full_11 = paddle._C_ops.full( + [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.clip: (1x-1x-1x2xf32) <- (1x-1x-1x2xf32, 1xf32, 1xf32) + clip_0 = paddle._C_ops.clip(subtract_0, full_10, full_11) + del subtract_0 + + # pd_op.prod: (1x-1x-1xf32) <- (1x-1x-1x2xf32, 1xi64) + prod_0 = paddle._C_ops.prod(clip_0, full_int_array_0, False, False) + del clip_0 + + # pd_op.subtract: (1x-1x1x2xf32) <- (1x-1x1x2xf32, 1x-1x1x2xf32) + subtract_1 = paddle._C_ops.subtract(slice_1, slice_0) + del slice_0, slice_1 + + # pd_op.clip: (1x-1x1x2xf32) <- (1x-1x1x2xf32, 1xf32, 1xf32) + clip_1 = paddle._C_ops.clip(subtract_1, full_10, full_11) + del subtract_1 + + # pd_op.prod: (1x-1x1xf32) <- (1x-1x1x2xf32, 1xi64) + prod_1 = paddle._C_ops.prod(clip_1, full_int_array_0, False, False) + del clip_1 + + # pd_op.subtract: (1x1x-1x2xf32) <- (1x1x-1x2xf32, 1x1x-1x2xf32) + subtract_2 = paddle._C_ops.subtract(slice_3, slice_2) + del slice_2, slice_3 + + # pd_op.clip: (1x1x-1x2xf32) <- (1x1x-1x2xf32, 1xf32, 1xf32) + clip_2 = paddle._C_ops.clip(subtract_2, full_10, full_11) + del full_10, full_11, subtract_2 + + # pd_op.prod: (1x1x-1xf32) <- (1x1x-1x2xf32, 1xi64) + prod_2 = paddle._C_ops.prod(clip_2, full_int_array_0, False, False) + del clip_2 + + # pd_op.add: (1x-1x-1xf32) <- (1x-1x1xf32, 1x1x-1xf32) + add_1 = paddle._C_ops.add(prod_1, prod_2) + del prod_1, prod_2 + + # pd_op.subtract: (1x-1x-1xf32) <- (1x-1x-1xf32, 1x-1x-1xf32) + subtract_3 = paddle._C_ops.subtract(add_1, prod_0) + del add_1 + + # pd_op.full: (1xf32) <- () + full_12 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (1x-1x-1xf32) <- (1x-1x-1xf32, 1xf32) + scale_0 = paddle._C_ops.scale(subtract_3, full_12, float("1e-09"), True) + del full_12, subtract_3 + + # pd_op.divide: (1x-1x-1xf32) <- (1x-1x-1xf32, 1x-1x-1xf32) + divide_0 = paddle._C_ops.divide(prod_0, scale_0) + del prod_0, scale_0 + + # pd_op.multiply: (1x-1x-1xf32) <- (1x-1x-1xf32, 1x-1x-1xf32) + multiply_2 = paddle._C_ops.multiply(divide_0, data_2) + del data_2, divide_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_6 = [-2] + + # pd_op.max: (1x-1xf32) <- (1x-1x-1xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_2, full_int_array_6, False) + del full_int_array_6, multiply_2 + + # pd_op.unsqueeze: (1x-1x1xf32) <- (1x-1xf32, 1xi64) + unsqueeze_3 = paddle._C_ops.unsqueeze(max_0, full_int_array_0) + del full_int_array_0, max_0 + + # pd_op.multiply: (1x-1x11xf32) <- (1x-1x11xf32, 1x-1x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_3) + del index_select_0, unsqueeze_3, where_0 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_14/weight_meta.py b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_14/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_14/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_15/graph_hash.txt b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_15/graph_hash.txt new file mode 100644 index 000000000..c430479dc --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_15/graph_hash.txt @@ -0,0 +1 @@ +a6a0eda7bf5832ef9b476d69381efe654735d7437a657ef81f8292ac3434a5ca \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_15/graph_net.json b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_15/graph_net.json new file mode 100644 index 000000000..69905996a --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_15/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-DocLayout-M", + "num_devices_required": 1, + "num_nodes_required": 1 +} diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_15/input_meta.py b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_15/input_meta.py new file mode 100644 index 000000000..3cc05a789 --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_15/input_meta.py @@ -0,0 +1,50 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [1] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [1, 1, 8500] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00105882") + std = float("0.0325223") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [1, 1, 1] + dtype = "int32" + data = [1] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [1, 8500] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00105882") + std = float("0.0325223") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [1, 1, 4] + dtype = "float32" + data = [0.0, 0.0, 565.437, 640.0] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [1, 8500, 4] + dtype = "float32" + min_val = float("-89.0917") + max_val = float("831.367") + mean = float("319.721") + std = float("189.73") + data = None diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_15/model.py b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_15/model.py new file mode 100644 index 000000000..ddf39ee9a --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_15/model.py @@ -0,0 +1,298 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5): + # pd_op.full: (1xi64) <- () + full_0 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (1x8500xi64) <- (1x-1x8500xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_1, full_0, False, False, paddle.int64) + del full_0 + + # pd_op.full: (1xf64) <- () + full_1 = paddle._C_ops.full( + [1], float("0"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_2 = paddle._C_ops.full( + [1], float("1"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (1xi32) <- (1xf64, 1xf64, 1xf64) + arange_0 = paddle.arange(full_1, full_2, full_2, dtype="int32") + del full_1, full_2 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [-1] + + # pd_op.unsqueeze: (1x1xi32) <- (1xi32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(arange_0, full_int_array_0) + del arange_0 + + # pd_op.cast: (xi32) <- (xi64) + cast_0 = paddle._C_ops.cast(data_0, paddle.int32) + del data_0 + + # pd_op.multiply: (1x1xi32) <- (1x1xi32, xi32) + multiply_1 = paddle._C_ops.multiply(unsqueeze_0, cast_0) + del cast_0, unsqueeze_0 + + # pd_op.cast: (1x1xi64) <- (1x1xi32) + cast_1 = paddle._C_ops.cast(multiply_1, paddle.int64) + del multiply_1 + + # pd_op.add: (1x8500xi64) <- (1x8500xi64, 1x1xi64) + add_0 = paddle._C_ops.add(argmax_0, cast_1) + del argmax_0, cast_1 + + # pd_op.flatten: (-1xi32) <- (1x-1x1xi32) + flatten_0 = paddle._C_ops.flatten(data_2, 0, 2) + del data_2 + + # pd_op.flatten: (8500xi64) <- (1x8500xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_3 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (8500xi32) <- (-1xi32, 8500xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_3) + del flatten_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [1, 8500] + + # pd_op.reshape: (1x8500xi32) <- (8500xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, full_int_array_1) + del full_int_array_1, gather_0 + + # pd_op.full: (xf32) <- () + full_4 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (1x8500xb) <- (1x8500xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(data_3, full_4) + del data_3, full_4 + + # pd_op.full: (1xf32) <- () + full_5 = paddle._C_ops.full( + [1], float("11"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (1x8500xi32) <- (1x8500xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_5, paddle.int32, paddle.framework._current_expected_place() + ) + del full_5 + + # pd_op.where: (1x8500xi32) <- (1x8500xb, 1x8500xi32, 1x8500xi32) + where_0 = paddle._C_ops.where(greater_than_0, reshape_1, full_like_0) + del full_like_0, greater_than_0, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_2 = [-1, 4] + + # pd_op.reshape: (-1x4xf32) <- (1x-1x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_4, full_int_array_2) + del full_int_array_2 + + # pd_op.gather: (8500x4xf32) <- (-1x4xf32, 8500xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_3) + del flatten_1, full_3, reshape_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_3 = [1, 8500, 4] + + # pd_op.reshape: (1x8500x4xf32) <- (8500x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, full_int_array_3) + del full_int_array_3, gather_1 + + # pd_op.full: (1xi32) <- () + full_6 = paddle._C_ops.full( + [1], float("12"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (1x8500x12xf32) <- (1x8500xi32, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + where_0 % paddle.cast(full_6, where_0.dtype), full_6 + ) + del full_6 + + # pd_op.full: (11xi64) <- () + full_7 = paddle._C_ops.full( + [11], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (11xi64) <- (11xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_7, + [11], + paddle.int64, + [ + float("0"), + float("1"), + float("2"), + float("3"), + float("4"), + float("5"), + float("6"), + float("7"), + float("8"), + float("9"), + float("10"), + ], + paddle.framework._current_expected_place(), + ) + del full_7 + + # pd_op.index_select: (1x8500x11xf32) <- (1x8500x12xf32, 11xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_0, assign_value__0, -1) + del assign_value__0, one_hot_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [2] + + # pd_op.unsqueeze: (1x-1x1x4xf32) <- (1x-1x4xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(data_4, full_int_array_4) + del data_4 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_5 = [1] + + # pd_op.unsqueeze: (1x1x8500x4xf32) <- (1x8500x4xf32, 1xi64) + unsqueeze_2 = paddle._C_ops.unsqueeze(data_5, full_int_array_5) + del data_5, full_int_array_5 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_6 = [0] + + # pd_op.slice: (1x-1x1x2xf32) <- (1x-1x1x4xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + unsqueeze_1, [3], full_int_array_6, full_int_array_4, [1], [] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_7 = [2147483647] + + # pd_op.slice: (1x-1x1x2xf32) <- (1x-1x1x4xf32, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + unsqueeze_1, [3], full_int_array_4, full_int_array_7, [1], [] + ) + del unsqueeze_1 + + # pd_op.slice: (1x1x8500x2xf32) <- (1x1x8500x4xf32, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + unsqueeze_2, [3], full_int_array_6, full_int_array_4, [1], [] + ) + del full_int_array_6 + + # pd_op.slice: (1x1x8500x2xf32) <- (1x1x8500x4xf32, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + unsqueeze_2, [3], full_int_array_4, full_int_array_7, [1], [] + ) + del full_int_array_4, full_int_array_7, unsqueeze_2 + + # pd_op.maximum: (1x-1x8500x2xf32) <- (1x-1x1x2xf32, 1x1x8500x2xf32) + maximum_0 = paddle._C_ops.maximum(slice_0, slice_2) + + # pd_op.minimum: (1x-1x8500x2xf32) <- (1x-1x1x2xf32, 1x1x8500x2xf32) + minimum_0 = paddle._C_ops.minimum(slice_1, slice_3) + + # pd_op.subtract: (1x-1x8500x2xf32) <- (1x-1x8500x2xf32, 1x-1x8500x2xf32) + subtract_0 = paddle._C_ops.subtract(minimum_0, maximum_0) + del maximum_0, minimum_0 + + # pd_op.full: (1xf32) <- () + full_8 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf32) <- () + full_9 = paddle._C_ops.full( + [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.clip: (1x-1x8500x2xf32) <- (1x-1x8500x2xf32, 1xf32, 1xf32) + clip_0 = paddle._C_ops.clip(subtract_0, full_8, full_9) + del subtract_0 + + # pd_op.prod: (1x-1x8500xf32) <- (1x-1x8500x2xf32, 1xi64) + prod_0 = paddle._C_ops.prod(clip_0, full_int_array_0, False, False) + del clip_0 + + # pd_op.subtract: (1x-1x1x2xf32) <- (1x-1x1x2xf32, 1x-1x1x2xf32) + subtract_1 = paddle._C_ops.subtract(slice_1, slice_0) + del slice_0, slice_1 + + # pd_op.clip: (1x-1x1x2xf32) <- (1x-1x1x2xf32, 1xf32, 1xf32) + clip_1 = paddle._C_ops.clip(subtract_1, full_8, full_9) + del subtract_1 + + # pd_op.prod: (1x-1x1xf32) <- (1x-1x1x2xf32, 1xi64) + prod_1 = paddle._C_ops.prod(clip_1, full_int_array_0, False, False) + del clip_1 + + # pd_op.subtract: (1x1x8500x2xf32) <- (1x1x8500x2xf32, 1x1x8500x2xf32) + subtract_2 = paddle._C_ops.subtract(slice_3, slice_2) + del slice_2, slice_3 + + # pd_op.clip: (1x1x8500x2xf32) <- (1x1x8500x2xf32, 1xf32, 1xf32) + clip_2 = paddle._C_ops.clip(subtract_2, full_8, full_9) + del full_8, full_9, subtract_2 + + # pd_op.prod: (1x1x8500xf32) <- (1x1x8500x2xf32, 1xi64) + prod_2 = paddle._C_ops.prod(clip_2, full_int_array_0, False, False) + del clip_2 + + # pd_op.add: (1x-1x8500xf32) <- (1x-1x1xf32, 1x1x8500xf32) + add_1 = paddle._C_ops.add(prod_1, prod_2) + del prod_1, prod_2 + + # pd_op.subtract: (1x-1x8500xf32) <- (1x-1x8500xf32, 1x-1x8500xf32) + subtract_3 = paddle._C_ops.subtract(add_1, prod_0) + del add_1 + + # pd_op.full: (1xf32) <- () + full_10 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (1x-1x8500xf32) <- (1x-1x8500xf32, 1xf32) + scale_0 = paddle._C_ops.scale(subtract_3, full_10, float("1e-09"), True) + del full_10, subtract_3 + + # pd_op.divide: (1x-1x8500xf32) <- (1x-1x8500xf32, 1x-1x8500xf32) + divide_0 = paddle._C_ops.divide(prod_0, scale_0) + del prod_0, scale_0 + + # pd_op.multiply: (1x-1x8500xf32) <- (1x-1x8500xf32, 1x-1x8500xf32) + multiply_2 = paddle._C_ops.multiply(divide_0, data_1) + del data_1, divide_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_8 = [-2] + + # pd_op.max: (1x8500xf32) <- (1x-1x8500xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_2, full_int_array_8, False) + del full_int_array_8, multiply_2 + + # pd_op.unsqueeze: (1x8500x1xf32) <- (1x8500xf32, 1xi64) + unsqueeze_3 = paddle._C_ops.unsqueeze(max_0, full_int_array_0) + del full_int_array_0, max_0 + + # pd_op.multiply: (1x8500x11xf32) <- (1x8500x11xf32, 1x8500x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_3) + del index_select_0, unsqueeze_3, where_0 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_15/weight_meta.py b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_15/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_15/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-DocLayout-S/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/PP-DocLayout-S/subgraph_0/graph_hash.txt new file mode 100644 index 000000000..5eb2d9c53 --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-S/subgraph_0/graph_hash.txt @@ -0,0 +1 @@ +685994ba5055506ab45515e898051e995b4ce706e875ee9dbc275d706629a5e1 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-DocLayout-S/subgraph_0/graph_net.json b/paddle_samples/PaddleX/PP-DocLayout-S/subgraph_0/graph_net.json new file mode 100644 index 000000000..40c50f2b8 --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-S/subgraph_0/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-DocLayout-S", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-DocLayout-S/subgraph_0/input_meta.py b/paddle_samples/PaddleX/PP-DocLayout-S/subgraph_0/input_meta.py new file mode 100644 index 000000000..50df25092 --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-S/subgraph_0/input_meta.py @@ -0,0 +1,70 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [3] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [] + dtype = "int64" + data = [4789] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [1, 3, 4789] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00139208") + std = float("0.0372846") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [1, 3, 1] + dtype = "int32" + data = [7, 1, 1] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [1, 4789] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00417624") + std = float("0.0644887") + data = None + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [1, 3, 4] + dtype = "float32" + data = [ + 180.0, + 33.1129, + 313.548, + 39.9544, + 51.4839, + 43.7856, + 238.839, + 447.161, + 254.323, + 43.512, + 441.677, + 447.434, + ] + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [1, 4789, 4] + dtype = "float32" + min_val = float("-279.283") + max_val = float("866.826") + mean = float("240.181") + std = float("144.015") + data = None diff --git a/paddle_samples/PaddleX/PP-DocLayout-S/subgraph_0/model.py b/paddle_samples/PaddleX/PP-DocLayout-S/subgraph_0/model.py new file mode 100644 index 000000000..4271229a2 --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-S/subgraph_0/model.py @@ -0,0 +1,317 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): + # pd_op.full: (1xi64) <- () + full_0 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (1x-1xi64) <- (1x-1x-1xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_2, full_0, False, False, paddle.int64) + del full_0 + + # pd_op.full: (1xf64) <- () + full_1 = paddle._C_ops.full( + [1], float("0"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_2 = paddle._C_ops.full( + [1], float("1"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (1xi32) <- (1xf64, 1xf64, 1xf64) + arange_0 = paddle.arange(full_1, full_2, full_2, dtype="int32") + del full_1, full_2 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [-1] + + # pd_op.unsqueeze: (1x1xi32) <- (1xi32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(arange_0, full_int_array_0) + del arange_0 + + # pd_op.cast: (xi32) <- (xi64) + cast_0 = paddle._C_ops.cast(data_0, paddle.int32) + del data_0 + + # pd_op.multiply: (1x1xi32) <- (1x1xi32, xi32) + multiply_1 = paddle._C_ops.multiply(unsqueeze_0, cast_0) + del cast_0, unsqueeze_0 + + # pd_op.cast: (1x1xi64) <- (1x1xi32) + cast_1 = paddle._C_ops.cast(multiply_1, paddle.int64) + del multiply_1 + + # pd_op.add: (1x-1xi64) <- (1x-1xi64, 1x1xi64) + add_0 = paddle._C_ops.add(argmax_0, cast_1) + del argmax_0, cast_1 + + # pd_op.flatten: (-1xi32) <- (1x-1x1xi32) + flatten_0 = paddle._C_ops.flatten(data_3, 0, 2) + del data_3 + + # pd_op.flatten: (-1xi64) <- (1x-1xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_3 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (-1xi32) <- (-1xi32, -1xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_3) + del flatten_0 + + # pd_op.full: (xi64) <- () + full_4 = paddle._C_ops.full( + [], float("1"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_0 = [full_4, data_1] + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.reshape: (1x-1xi32) <- (-1xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, stack_0) + del gather_0, stack_0 + + # pd_op.full: (xf32) <- () + full_5 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (1x-1xb) <- (1x-1xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(data_4, full_5) + del data_4, full_5 + + # pd_op.full: (1xf32) <- () + full_6 = paddle._C_ops.full( + [1], float("11"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (1x-1xi32) <- (1x-1xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_6, paddle.int32, paddle.framework._current_expected_place() + ) + del full_6 + + # pd_op.where: (1x-1xi32) <- (1x-1xb, 1x-1xi32, 1x-1xi32) + where_0 = paddle._C_ops.where(greater_than_0, reshape_1, full_like_0) + del full_like_0, greater_than_0, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [-1, 4] + + # pd_op.reshape: (-1x4xf32) <- (1x-1x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_5, full_int_array_1) + del full_int_array_1 + + # pd_op.gather: (-1x4xf32) <- (-1x4xf32, -1xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_3) + del flatten_1, full_3, reshape_2 + + # pd_op.full: (xi64) <- () + full_7 = paddle._C_ops.full( + [], float("4"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_1 = [full_4, data_1, full_7] + del data_1, full_4, full_7 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_1 = paddle._C_ops.stack(combine_1, 0) + del combine_1 + + # pd_op.reshape: (1x-1x4xf32) <- (-1x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, stack_1) + del gather_1, stack_1 + + # pd_op.full: (1xi32) <- () + full_8 = paddle._C_ops.full( + [1], float("12"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (1x-1x12xf32) <- (1x-1xi32, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + where_0 % paddle.cast(full_8, where_0.dtype), full_8 + ) + del full_8 + + # pd_op.full: (11xi64) <- () + full_9 = paddle._C_ops.full( + [11], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (11xi64) <- (11xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_9, + [11], + paddle.int64, + [ + float("0"), + float("1"), + float("2"), + float("3"), + float("4"), + float("5"), + float("6"), + float("7"), + float("8"), + float("9"), + float("10"), + ], + paddle.framework._current_expected_place(), + ) + del full_9 + + # pd_op.index_select: (1x-1x11xf32) <- (1x-1x12xf32, 11xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_0, assign_value__0, -1) + del assign_value__0, one_hot_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [2] + + # pd_op.unsqueeze: (1x-1x1x4xf32) <- (1x-1x4xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(data_5, full_int_array_2) + del data_5 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [1] + + # pd_op.unsqueeze: (1x1x-1x4xf32) <- (1x-1x4xf32, 1xi64) + unsqueeze_2 = paddle._C_ops.unsqueeze(data_6, full_int_array_3) + del data_6, full_int_array_3 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [0] + + # pd_op.slice: (1x-1x1x2xf32) <- (1x-1x1x4xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + unsqueeze_1, [3], full_int_array_4, full_int_array_2, [1], [] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_5 = [2147483647] + + # pd_op.slice: (1x-1x1x2xf32) <- (1x-1x1x4xf32, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + unsqueeze_1, [3], full_int_array_2, full_int_array_5, [1], [] + ) + del unsqueeze_1 + + # pd_op.slice: (1x1x-1x2xf32) <- (1x1x-1x4xf32, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + unsqueeze_2, [3], full_int_array_4, full_int_array_2, [1], [] + ) + del full_int_array_4 + + # pd_op.slice: (1x1x-1x2xf32) <- (1x1x-1x4xf32, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + unsqueeze_2, [3], full_int_array_2, full_int_array_5, [1], [] + ) + del full_int_array_2, full_int_array_5, unsqueeze_2 + + # pd_op.maximum: (1x-1x-1x2xf32) <- (1x-1x1x2xf32, 1x1x-1x2xf32) + maximum_0 = paddle._C_ops.maximum(slice_0, slice_2) + + # pd_op.minimum: (1x-1x-1x2xf32) <- (1x-1x1x2xf32, 1x1x-1x2xf32) + minimum_0 = paddle._C_ops.minimum(slice_1, slice_3) + + # pd_op.subtract: (1x-1x-1x2xf32) <- (1x-1x-1x2xf32, 1x-1x-1x2xf32) + subtract_0 = paddle._C_ops.subtract(minimum_0, maximum_0) + del maximum_0, minimum_0 + + # pd_op.full: (1xf32) <- () + full_10 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf32) <- () + full_11 = paddle._C_ops.full( + [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.clip: (1x-1x-1x2xf32) <- (1x-1x-1x2xf32, 1xf32, 1xf32) + clip_0 = paddle._C_ops.clip(subtract_0, full_10, full_11) + del subtract_0 + + # pd_op.prod: (1x-1x-1xf32) <- (1x-1x-1x2xf32, 1xi64) + prod_0 = paddle._C_ops.prod(clip_0, full_int_array_0, False, False) + del clip_0 + + # pd_op.subtract: (1x-1x1x2xf32) <- (1x-1x1x2xf32, 1x-1x1x2xf32) + subtract_1 = paddle._C_ops.subtract(slice_1, slice_0) + del slice_0, slice_1 + + # pd_op.clip: (1x-1x1x2xf32) <- (1x-1x1x2xf32, 1xf32, 1xf32) + clip_1 = paddle._C_ops.clip(subtract_1, full_10, full_11) + del subtract_1 + + # pd_op.prod: (1x-1x1xf32) <- (1x-1x1x2xf32, 1xi64) + prod_1 = paddle._C_ops.prod(clip_1, full_int_array_0, False, False) + del clip_1 + + # pd_op.subtract: (1x1x-1x2xf32) <- (1x1x-1x2xf32, 1x1x-1x2xf32) + subtract_2 = paddle._C_ops.subtract(slice_3, slice_2) + del slice_2, slice_3 + + # pd_op.clip: (1x1x-1x2xf32) <- (1x1x-1x2xf32, 1xf32, 1xf32) + clip_2 = paddle._C_ops.clip(subtract_2, full_10, full_11) + del full_10, full_11, subtract_2 + + # pd_op.prod: (1x1x-1xf32) <- (1x1x-1x2xf32, 1xi64) + prod_2 = paddle._C_ops.prod(clip_2, full_int_array_0, False, False) + del clip_2 + + # pd_op.add: (1x-1x-1xf32) <- (1x-1x1xf32, 1x1x-1xf32) + add_1 = paddle._C_ops.add(prod_1, prod_2) + del prod_1, prod_2 + + # pd_op.subtract: (1x-1x-1xf32) <- (1x-1x-1xf32, 1x-1x-1xf32) + subtract_3 = paddle._C_ops.subtract(add_1, prod_0) + del add_1 + + # pd_op.full: (1xf32) <- () + full_12 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (1x-1x-1xf32) <- (1x-1x-1xf32, 1xf32) + scale_0 = paddle._C_ops.scale(subtract_3, full_12, float("1e-09"), True) + del full_12, subtract_3 + + # pd_op.divide: (1x-1x-1xf32) <- (1x-1x-1xf32, 1x-1x-1xf32) + divide_0 = paddle._C_ops.divide(prod_0, scale_0) + del prod_0, scale_0 + + # pd_op.multiply: (1x-1x-1xf32) <- (1x-1x-1xf32, 1x-1x-1xf32) + multiply_2 = paddle._C_ops.multiply(divide_0, data_2) + del data_2, divide_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_6 = [-2] + + # pd_op.max: (1x-1xf32) <- (1x-1x-1xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_2, full_int_array_6, False) + del full_int_array_6, multiply_2 + + # pd_op.unsqueeze: (1x-1x1xf32) <- (1x-1xf32, 1xi64) + unsqueeze_3 = paddle._C_ops.unsqueeze(max_0, full_int_array_0) + del full_int_array_0, max_0 + + # pd_op.multiply: (1x-1x11xf32) <- (1x-1x11xf32, 1x-1x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_3) + del index_select_0, unsqueeze_3, where_0 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-DocLayout-S/subgraph_0/weight_meta.py b/paddle_samples/PaddleX/PP-DocLayout-S/subgraph_0/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-S/subgraph_0/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-DocLayout-S/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/PP-DocLayout-S/subgraph_1/graph_hash.txt new file mode 100644 index 000000000..1d816d92f --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-S/subgraph_1/graph_hash.txt @@ -0,0 +1 @@ +2f4d8648deebaac6731292091a8c9094bd1807a7d86960c2817b21ab5ae32777 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-DocLayout-S/subgraph_1/graph_net.json b/paddle_samples/PaddleX/PP-DocLayout-S/subgraph_1/graph_net.json new file mode 100644 index 000000000..40c50f2b8 --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-S/subgraph_1/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-DocLayout-S", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-DocLayout-S/subgraph_1/input_meta.py b/paddle_samples/PaddleX/PP-DocLayout-S/subgraph_1/input_meta.py new file mode 100644 index 000000000..f5567e23a --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-S/subgraph_1/input_meta.py @@ -0,0 +1,43 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [1, 2, 4165] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00156062") + std = float("0.0394739") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [1, 2, 1] + dtype = "int32" + data = [3, 4] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [1, 4165] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00312125") + std = float("0.0557809") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [1, 2, 4] + dtype = "float32" + data = [75.6364, 0.0, 448.0, 402.379, 133.172, 411.503, 448.0, 432.489] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [1, 4165, 4] + dtype = "float32" + min_val = float("-158.959") + max_val = float("548.106") + mean = float("223.931") + std = float("134.941") + data = None diff --git a/paddle_samples/PaddleX/PP-DocLayout-S/subgraph_1/model.py b/paddle_samples/PaddleX/PP-DocLayout-S/subgraph_1/model.py new file mode 100644 index 000000000..6c5040153 --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-S/subgraph_1/model.py @@ -0,0 +1,299 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4): + # pd_op.full: (1xi64) <- () + full_0 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (1x4165xi64) <- (1x2x4165xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_0, full_0, False, False, paddle.int64) + del full_0 + + # pd_op.full: (1xf64) <- () + full_1 = paddle._C_ops.full( + [1], float("0"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_2 = paddle._C_ops.full( + [1], float("1"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (1xi32) <- (1xf64, 1xf64, 1xf64) + arange_0 = paddle.arange(full_1, full_2, full_2, dtype="int32") + del full_1, full_2 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [-1] + + # pd_op.unsqueeze: (1x1xi32) <- (1xi32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(arange_0, full_int_array_0) + del arange_0 + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("2"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (1x1xi32) <- (1x1xi32, 1xf32) + scale_0 = paddle._C_ops.scale(unsqueeze_0, full_3, float("0"), True) + del full_3, unsqueeze_0 + + # pd_op.cast: (1x1xi64) <- (1x1xi32) + cast_0 = paddle._C_ops.cast(scale_0, paddle.int64) + del scale_0 + + # pd_op.add: (1x4165xi64) <- (1x4165xi64, 1x1xi64) + add_0 = paddle._C_ops.add(argmax_0, cast_0) + del argmax_0, cast_0 + + # pd_op.flatten: (2xi32) <- (1x2x1xi32) + flatten_0 = paddle._C_ops.flatten(data_1, 0, 2) + del data_1 + + # pd_op.flatten: (4165xi64) <- (1x4165xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_4 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (4165xi32) <- (2xi32, 4165xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_4) + del flatten_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [1, 4165] + + # pd_op.reshape: (1x4165xi32) <- (4165xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, full_int_array_1) + del full_int_array_1, gather_0 + + # pd_op.full: (xf32) <- () + full_5 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (1x4165xb) <- (1x4165xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(data_2, full_5) + del data_2, full_5 + + # pd_op.full: (1xf32) <- () + full_6 = paddle._C_ops.full( + [1], float("11"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (1x4165xi32) <- (1x4165xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_6, paddle.int32, paddle.framework._current_expected_place() + ) + del full_6 + + # pd_op.where: (1x4165xi32) <- (1x4165xb, 1x4165xi32, 1x4165xi32) + where_0 = paddle._C_ops.where(greater_than_0, reshape_1, full_like_0) + del full_like_0, greater_than_0, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_2 = [-1, 4] + + # pd_op.reshape: (2x4xf32) <- (1x2x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_3, full_int_array_2) + del full_int_array_2 + + # pd_op.gather: (4165x4xf32) <- (2x4xf32, 4165xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_4) + del flatten_1, full_4, reshape_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_3 = [1, 4165, 4] + + # pd_op.reshape: (1x4165x4xf32) <- (4165x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, full_int_array_3) + del full_int_array_3, gather_1 + + # pd_op.full: (1xi32) <- () + full_7 = paddle._C_ops.full( + [1], float("12"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (1x4165x12xf32) <- (1x4165xi32, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + where_0 % paddle.cast(full_7, where_0.dtype), full_7 + ) + del full_7 + + # pd_op.full: (11xi64) <- () + full_8 = paddle._C_ops.full( + [11], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (11xi64) <- (11xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_8, + [11], + paddle.int64, + [ + float("0"), + float("1"), + float("2"), + float("3"), + float("4"), + float("5"), + float("6"), + float("7"), + float("8"), + float("9"), + float("10"), + ], + paddle.framework._current_expected_place(), + ) + del full_8 + + # pd_op.index_select: (1x4165x11xf32) <- (1x4165x12xf32, 11xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_0, assign_value__0, -1) + del assign_value__0, one_hot_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [2] + + # pd_op.unsqueeze: (1x2x1x4xf32) <- (1x2x4xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(data_3, full_int_array_4) + del data_3 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_5 = [1] + + # pd_op.unsqueeze: (1x1x4165x4xf32) <- (1x4165x4xf32, 1xi64) + unsqueeze_2 = paddle._C_ops.unsqueeze(data_4, full_int_array_5) + del data_4, full_int_array_5 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_6 = [0] + + # pd_op.slice: (1x2x1x2xf32) <- (1x2x1x4xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + unsqueeze_1, [3], full_int_array_6, full_int_array_4, [1], [] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_7 = [2147483647] + + # pd_op.slice: (1x2x1x2xf32) <- (1x2x1x4xf32, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + unsqueeze_1, [3], full_int_array_4, full_int_array_7, [1], [] + ) + del unsqueeze_1 + + # pd_op.slice: (1x1x4165x2xf32) <- (1x1x4165x4xf32, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + unsqueeze_2, [3], full_int_array_6, full_int_array_4, [1], [] + ) + del full_int_array_6 + + # pd_op.slice: (1x1x4165x2xf32) <- (1x1x4165x4xf32, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + unsqueeze_2, [3], full_int_array_4, full_int_array_7, [1], [] + ) + del full_int_array_4, full_int_array_7, unsqueeze_2 + + # pd_op.maximum: (1x2x4165x2xf32) <- (1x2x1x2xf32, 1x1x4165x2xf32) + maximum_0 = paddle._C_ops.maximum(slice_0, slice_2) + + # pd_op.minimum: (1x2x4165x2xf32) <- (1x2x1x2xf32, 1x1x4165x2xf32) + minimum_0 = paddle._C_ops.minimum(slice_1, slice_3) + + # pd_op.subtract: (1x2x4165x2xf32) <- (1x2x4165x2xf32, 1x2x4165x2xf32) + subtract_0 = paddle._C_ops.subtract(minimum_0, maximum_0) + del maximum_0, minimum_0 + + # pd_op.full: (1xf32) <- () + full_9 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf32) <- () + full_10 = paddle._C_ops.full( + [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.clip: (1x2x4165x2xf32) <- (1x2x4165x2xf32, 1xf32, 1xf32) + clip_0 = paddle._C_ops.clip(subtract_0, full_9, full_10) + del subtract_0 + + # pd_op.prod: (1x2x4165xf32) <- (1x2x4165x2xf32, 1xi64) + prod_0 = paddle._C_ops.prod(clip_0, full_int_array_0, False, False) + del clip_0 + + # pd_op.subtract: (1x2x1x2xf32) <- (1x2x1x2xf32, 1x2x1x2xf32) + subtract_1 = paddle._C_ops.subtract(slice_1, slice_0) + del slice_0, slice_1 + + # pd_op.clip: (1x2x1x2xf32) <- (1x2x1x2xf32, 1xf32, 1xf32) + clip_1 = paddle._C_ops.clip(subtract_1, full_9, full_10) + del subtract_1 + + # pd_op.prod: (1x2x1xf32) <- (1x2x1x2xf32, 1xi64) + prod_1 = paddle._C_ops.prod(clip_1, full_int_array_0, False, False) + del clip_1 + + # pd_op.subtract: (1x1x4165x2xf32) <- (1x1x4165x2xf32, 1x1x4165x2xf32) + subtract_2 = paddle._C_ops.subtract(slice_3, slice_2) + del slice_2, slice_3 + + # pd_op.clip: (1x1x4165x2xf32) <- (1x1x4165x2xf32, 1xf32, 1xf32) + clip_2 = paddle._C_ops.clip(subtract_2, full_9, full_10) + del full_10, full_9, subtract_2 + + # pd_op.prod: (1x1x4165xf32) <- (1x1x4165x2xf32, 1xi64) + prod_2 = paddle._C_ops.prod(clip_2, full_int_array_0, False, False) + del clip_2 + + # pd_op.add: (1x2x4165xf32) <- (1x2x1xf32, 1x1x4165xf32) + add_1 = paddle._C_ops.add(prod_1, prod_2) + del prod_1, prod_2 + + # pd_op.subtract: (1x2x4165xf32) <- (1x2x4165xf32, 1x2x4165xf32) + subtract_3 = paddle._C_ops.subtract(add_1, prod_0) + del add_1 + + # pd_op.full: (1xf32) <- () + full_11 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (1x2x4165xf32) <- (1x2x4165xf32, 1xf32) + scale_1 = paddle._C_ops.scale(subtract_3, full_11, float("1e-09"), True) + del full_11, subtract_3 + + # pd_op.divide: (1x2x4165xf32) <- (1x2x4165xf32, 1x2x4165xf32) + divide_0 = paddle._C_ops.divide(prod_0, scale_1) + del prod_0, scale_1 + + # pd_op.multiply: (1x2x4165xf32) <- (1x2x4165xf32, 1x2x4165xf32) + multiply_1 = paddle._C_ops.multiply(divide_0, data_0) + del data_0, divide_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_8 = [-2] + + # pd_op.max: (1x4165xf32) <- (1x2x4165xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_1, full_int_array_8, False) + del full_int_array_8, multiply_1 + + # pd_op.unsqueeze: (1x4165x1xf32) <- (1x4165xf32, 1xi64) + unsqueeze_3 = paddle._C_ops.unsqueeze(max_0, full_int_array_0) + del full_int_array_0, max_0 + + # pd_op.multiply: (1x4165x11xf32) <- (1x4165x11xf32, 1x4165x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_3) + del index_select_0, unsqueeze_3, where_0 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-DocLayout-S/subgraph_1/weight_meta.py b/paddle_samples/PaddleX/PP-DocLayout-S/subgraph_1/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-S/subgraph_1/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_1/graph_hash.txt new file mode 100644 index 000000000..f3ee1e92c --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_1/graph_hash.txt @@ -0,0 +1 @@ +ccef89c785301336e92928a91f66ad752e4ce87933446e15f2082e86ac6642d6 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_1/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_1/graph_net.json new file mode 100644 index 000000000..cf4d2108b --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_1/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-L_human", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_1/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_1/input_meta.py new file mode 100644 index 000000000..38dba74a4 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_1/input_meta.py @@ -0,0 +1,121 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [2, 29, 8400] + dtype = "float32" + max_val = float("1.0") + mean = float("0.000933908") + std = float("0.0305456") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [2, 1] + dtype = "int32" + data = [0, 1] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [2, 29, 1] + dtype = "int32" + data = [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [2, 8400] + dtype = "float32" + max_val = float("1.0") + mean = float("0.0270833") + std = float("0.162326") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [2, 29, 4] + dtype = "float32" + max_val = float("640.0") + mean = float("225.221") + std = float("222.396") + data = None + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [2, 29, 8400] + dtype = "float32" + max_val = float("0.332125") + mean = float("6.7312e-05") + std = float("0.00215112") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [2, 29, 8400] + dtype = "float32" + max_val = float("0.941182") + mean = float("0.00754317") + std = float("0.0517711") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_1/model.py b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_1/model.py new file mode 100644 index 000000000..08f9efbb7 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_1/model.py @@ -0,0 +1,175 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): + # pd_op.full: (1xi64) <- () + full_0 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (2x8400xi64) <- (2x29x8400xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_0, full_0, False, False, paddle.int64) + del full_0 + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("29"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x1xi32) <- (2x1xi32, 1xf32) + scale_0 = paddle._C_ops.scale(data_1, full_1, float("0"), True) + del data_1, full_1 + + # pd_op.cast: (2x1xi64) <- (2x1xi32) + cast_0 = paddle._C_ops.cast(scale_0, paddle.int64) + del scale_0 + + # pd_op.add: (2x8400xi64) <- (2x8400xi64, 2x1xi64) + add_0 = paddle._C_ops.add(argmax_0, cast_0) + del argmax_0, cast_0 + + # pd_op.flatten: (58xi32) <- (2x29x1xi32) + flatten_0 = paddle._C_ops.flatten(data_2, 0, 2) + del data_2 + + # pd_op.flatten: (16800xi64) <- (2x8400xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_2 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (16800xi32) <- (58xi32, 16800xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_2) + del flatten_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [2, 8400] + + # pd_op.reshape: (2x8400xi32) <- (16800xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, full_int_array_0) + del full_int_array_0, gather_0 + + # pd_op.full: (xf32) <- () + full_3 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (2x8400xb) <- (2x8400xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(data_3, full_3) + del data_3, full_3 + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (2x8400xi32) <- (2x8400xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_4, paddle.int32, paddle.framework._current_expected_place() + ) + + # pd_op.where: (2x8400xi32) <- (2x8400xb, 2x8400xi32, 2x8400xi32) + where_0 = paddle._C_ops.where(greater_than_0, reshape_1, full_like_0) + del full_like_0, greater_than_0, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [-1, 4] + + # pd_op.reshape: (58x4xf32) <- (2x29x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_4, full_int_array_1) + del data_4, full_int_array_1 + + # pd_op.gather: (16800x4xf32) <- (58x4xf32, 16800xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_2) + del flatten_1, full_2, reshape_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_2 = [2, 8400, 4] + + # pd_op.reshape: (2x8400x4xf32) <- (16800x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, full_int_array_2) + del full_int_array_2, gather_1 + + # pd_op.full: (1xi32) <- () + full_5 = paddle._C_ops.full( + [1], float("2"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (2x8400x2xf32) <- (2x8400xi32, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + where_0 % paddle.cast(full_5, where_0.dtype), full_5 + ) + del full_5 + + # pd_op.full: (1xi64) <- () + full_6 = paddle._C_ops.full( + [1], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (1xi64) <- (1xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_6, + [1], + paddle.int64, + [float("0")], + paddle.framework._current_expected_place(), + ) + del full_6 + + # pd_op.index_select: (2x8400x1xf32) <- (2x8400x2xf32, 1xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_0, assign_value__0, -1) + del assign_value__0, one_hot_0 + + # pd_op.multiply: (2x29x8400xf32) <- (2x29x8400xf32, 2x29x8400xf32) + multiply_1 = paddle._C_ops.multiply(data_5, data_0) + del data_5 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [-1] + + # pd_op.max: (2x29x1xf32) <- (2x29x8400xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_1, full_int_array_3, True) + + # pd_op.multiply: (2x29x8400xf32) <- (2x29x8400xf32, 2x29x8400xf32) + multiply_2 = paddle._C_ops.multiply(data_6, data_0) + del data_0, data_6 + + # pd_op.max: (2x29x1xf32) <- (2x29x8400xf32, 1xi64) + max_1 = paddle._C_ops.max(multiply_2, full_int_array_3, True) + del multiply_2 + + # pd_op.scale: (2x29x1xf32) <- (2x29x1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(max_0, full_4, float("1e-09"), True) + del full_4, max_0 + + # pd_op.divide: (2x29x8400xf32) <- (2x29x8400xf32, 2x29x1xf32) + divide_0 = paddle._C_ops.divide(multiply_1, scale_1) + del multiply_1, scale_1 + + # pd_op.multiply: (2x29x8400xf32) <- (2x29x8400xf32, 2x29x1xf32) + multiply_3 = paddle._C_ops.multiply(divide_0, max_1) + del divide_0, max_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [-2] + + # pd_op.max: (2x8400xf32) <- (2x29x8400xf32, 1xi64) + max_2 = paddle._C_ops.max(multiply_3, full_int_array_4, False) + del full_int_array_4, multiply_3 + + # pd_op.unsqueeze: (2x8400x1xf32) <- (2x8400xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(max_2, full_int_array_3) + del full_int_array_3, max_2 + + # pd_op.multiply: (2x8400x1xf32) <- (2x8400x1xf32, 2x8400x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_0) + del index_select_0, unsqueeze_0, where_0 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_1/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_1/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_1/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_3/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_3/graph_hash.txt new file mode 100644 index 000000000..fa10de35c --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_3/graph_hash.txt @@ -0,0 +1 @@ +dd4756aee481284a2f105818ca2768c725dcd96b0e5b32c9865f9e346e77ab73 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_3/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_3/graph_net.json new file mode 100644 index 000000000..cf4d2108b --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_3/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-L_human", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_3/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_3/input_meta.py new file mode 100644 index 000000000..71f1bf11a --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_3/input_meta.py @@ -0,0 +1,105 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [2, 2100] + dtype = "float32" + max_val = float("3.0") + mean = float("0.0607143") + std = float("0.274954") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [2, 21, 2100] + dtype = "float32" + max_val = float("0.912588") + mean = float("0.02087") + std = float("0.0876462") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [2, 21, 2100] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00289116") + std = float("0.0536917") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [2, 1] + dtype = "int32" + data = [0, 1] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [2, 21, 1] + dtype = "int32" + data = [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [2, 21, 4] + dtype = "float32" + max_val = float("320.0") + mean = float("97.1572") + std = float("87.9478") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [2, 21, 2100] + dtype = "float32" + max_val = float("0.241469") + mean = float("0.000188227") + std = float("0.00363226") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_3/model.py b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_3/model.py new file mode 100644 index 000000000..23048d518 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_3/model.py @@ -0,0 +1,223 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [1] + + # pd_op.unsqueeze: (2x1x2100xf32) <- (2x2100xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(data_0, full_int_array_0) + del data_0, full_int_array_0 + + # pd_op.full: (xf32) <- () + full_0 = paddle._C_ops.full( + [], float("1"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (2x1x2100xb) <- (2x1x2100xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(unsqueeze_0, full_0) + del full_0, unsqueeze_0 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_1 = [1, 21, 1] + + # pd_op.tile: (2x21x2100xb) <- (2x1x2100xb, 3xi64) + tile_0 = paddle._C_ops.tile(greater_than_0, full_int_array_1) + del full_int_array_1, greater_than_0 + + # pd_op.full: (1xi64) <- () + full_1 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (2x2100xi64) <- (2x21x2100xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_1, full_1, False, False, paddle.int64) + + # pd_op.full: (1xi32) <- () + full_2 = paddle._C_ops.full( + [1], float("21"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (2x2100x21xf32) <- (2x2100xi64, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + argmax_0 % paddle.cast(full_2, argmax_0.dtype), full_2 + ) + del argmax_0, full_2 + + # pd_op.transpose: (2x21x2100xf32) <- (2x2100x21xf32) + transpose_0 = paddle._C_ops.transpose(one_hot_0, [0, 2, 1]) + del one_hot_0 + + # pd_op.where: (2x21x2100xf32) <- (2x21x2100xb, 2x21x2100xf32, 2x21x2100xf32) + where_0 = paddle._C_ops.where(tile_0, transpose_0, data_2) + del data_2, tile_0, transpose_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [-2] + + # pd_op.sum: (2x2100xf32) <- (2x21x2100xf32, 1xi64) + sum_0 = paddle._C_ops.sum(where_0, full_int_array_2, None, False) + + # pd_op.argmax: (2x2100xi64) <- (2x21x2100xf32, 1xi64) + argmax_1 = paddle._C_ops.argmax(where_0, full_1, False, False, paddle.int64) + del full_1 + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("21"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x1xi32) <- (2x1xi32, 1xf32) + scale_0 = paddle._C_ops.scale(data_3, full_3, float("0"), True) + del data_3, full_3 + + # pd_op.cast: (2x1xi64) <- (2x1xi32) + cast_0 = paddle._C_ops.cast(scale_0, paddle.int64) + del scale_0 + + # pd_op.add: (2x2100xi64) <- (2x2100xi64, 2x1xi64) + add_0 = paddle._C_ops.add(argmax_1, cast_0) + del argmax_1, cast_0 + + # pd_op.flatten: (42xi32) <- (2x21x1xi32) + flatten_0 = paddle._C_ops.flatten(data_4, 0, 2) + del data_4 + + # pd_op.flatten: (4200xi64) <- (2x2100xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_4 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (4200xi32) <- (42xi32, 4200xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_4) + del flatten_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_3 = [2, 2100] + + # pd_op.reshape: (2x2100xi32) <- (4200xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, full_int_array_3) + del full_int_array_3, gather_0 + + # pd_op.full: (xf32) <- () + full_5 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (2x2100xb) <- (2x2100xf32, xf32) + greater_than_1 = paddle._C_ops.greater_than(sum_0, full_5) + del full_5, sum_0 + + # pd_op.full: (1xf32) <- () + full_6 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (2x2100xi32) <- (2x2100xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_6, paddle.int32, paddle.framework._current_expected_place() + ) + + # pd_op.where: (2x2100xi32) <- (2x2100xb, 2x2100xi32, 2x2100xi32) + where_1 = paddle._C_ops.where(greater_than_1, reshape_1, full_like_0) + del full_like_0, greater_than_1, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_4 = [-1, 4] + + # pd_op.reshape: (42x4xf32) <- (2x21x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_5, full_int_array_4) + del data_5, full_int_array_4 + + # pd_op.gather: (4200x4xf32) <- (42x4xf32, 4200xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_4) + del flatten_1, full_4, reshape_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_5 = [2, 2100, 4] + + # pd_op.reshape: (2x2100x4xf32) <- (4200x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, full_int_array_5) + del full_int_array_5, gather_1 + + # pd_op.full: (1xi32) <- () + full_7 = paddle._C_ops.full( + [1], float("2"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (2x2100x2xf32) <- (2x2100xi32, 1xi32) + one_hot_1 = paddle._C_ops.one_hot( + where_1 % paddle.cast(full_7, where_1.dtype), full_7 + ) + del full_7 + + # pd_op.full: (1xi64) <- () + full_8 = paddle._C_ops.full( + [1], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (1xi64) <- (1xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_8, + [1], + paddle.int64, + [float("0")], + paddle.framework._current_expected_place(), + ) + del full_8 + + # pd_op.index_select: (2x2100x1xf32) <- (2x2100x2xf32, 1xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_1, assign_value__0, -1) + del assign_value__0, one_hot_1 + + # pd_op.multiply: (2x21x2100xf32) <- (2x21x2100xf32, 2x21x2100xf32) + multiply_1 = paddle._C_ops.multiply(data_6, where_0) + del data_6 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_6 = [-1] + + # pd_op.max: (2x21x1xf32) <- (2x21x2100xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_1, full_int_array_6, True) + + # pd_op.multiply: (2x21x2100xf32) <- (2x21x2100xf32, 2x21x2100xf32) + multiply_2 = paddle._C_ops.multiply(data_1, where_0) + del data_1, where_0 + + # pd_op.max: (2x21x1xf32) <- (2x21x2100xf32, 1xi64) + max_1 = paddle._C_ops.max(multiply_2, full_int_array_6, True) + del multiply_2 + + # pd_op.scale: (2x21x1xf32) <- (2x21x1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(max_0, full_6, float("1e-09"), True) + del full_6, max_0 + + # pd_op.divide: (2x21x2100xf32) <- (2x21x2100xf32, 2x21x1xf32) + divide_0 = paddle._C_ops.divide(multiply_1, scale_1) + del multiply_1, scale_1 + + # pd_op.multiply: (2x21x2100xf32) <- (2x21x2100xf32, 2x21x1xf32) + multiply_3 = paddle._C_ops.multiply(divide_0, max_1) + del divide_0, max_1 + + # pd_op.max: (2x2100xf32) <- (2x21x2100xf32, 1xi64) + max_2 = paddle._C_ops.max(multiply_3, full_int_array_2, False) + del full_int_array_2, multiply_3 + + # pd_op.unsqueeze: (2x2100x1xf32) <- (2x2100xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(max_2, full_int_array_6) + del full_int_array_6, max_2 + + # pd_op.multiply: (2x2100x1xf32) <- (2x2100x1xf32, 2x2100x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_1) + del index_select_0, unsqueeze_1, where_1 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_3/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_3/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_3/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_2/graph_hash.txt new file mode 100644 index 000000000..a62e3c346 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_2/graph_hash.txt @@ -0,0 +1 @@ +b3076122b18bff71174d70b804480bc7272686491c85cf6a5e3c4b3eba6b39ef \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_2/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_2/graph_net.json new file mode 100644 index 000000000..4a2e26ae4 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_2/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-L_vehicle", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_2/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_2/input_meta.py new file mode 100644 index 000000000..b47fda69a --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_2/input_meta.py @@ -0,0 +1,62 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [2, 12096] + dtype = "float32" + max_val = float("2.0") + mean = float("0.00223214") + std = float("0.0480607") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [2, 11, 12096] + dtype = "float32" + max_val = float("0.940076") + mean = float("0.000804856") + std = float("0.0222414") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [2, 11, 12096] + dtype = "float32" + max_val = float("1.0") + mean = float("0.000202922") + std = float("0.0142436") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [2, 1] + dtype = "int32" + data = [0, 1] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [2, 11, 1] + dtype = "int32" + data = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [2, 11, 4] + dtype = "float32" + max_val = float("629.571") + mean = float("192.521") + std = float("244.622") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [2, 11, 12096] + dtype = "float32" + max_val = float("0.00694391") + mean = float("1.95634e-06") + std = float("9.01196e-05") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_2/model.py b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_2/model.py new file mode 100644 index 000000000..41382ee7e --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_2/model.py @@ -0,0 +1,229 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [1] + + # pd_op.unsqueeze: (2x1x12096xf32) <- (2x12096xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(data_0, full_int_array_0) + del data_0, full_int_array_0 + + # pd_op.full: (xf32) <- () + full_0 = paddle._C_ops.full( + [], float("1"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (2x1x12096xb) <- (2x1x12096xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(unsqueeze_0, full_0) + del full_0, unsqueeze_0 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_1 = [1, 11, 1] + + # pd_op.tile: (2x11x12096xb) <- (2x1x12096xb, 3xi64) + tile_0 = paddle._C_ops.tile(greater_than_0, full_int_array_1) + del full_int_array_1, greater_than_0 + + # pd_op.full: (1xi64) <- () + full_1 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (2x12096xi64) <- (2x11x12096xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_1, full_1, False, False, paddle.int64) + + # pd_op.full: (1xi32) <- () + full_2 = paddle._C_ops.full( + [1], float("11"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (2x12096x11xf32) <- (2x12096xi64, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + argmax_0 % paddle.cast(full_2, argmax_0.dtype), full_2 + ) + del argmax_0, full_2 + + # pd_op.transpose: (2x11x12096xf32) <- (2x12096x11xf32) + transpose_0 = paddle._C_ops.transpose(one_hot_0, [0, 2, 1]) + del one_hot_0 + + # pd_op.where: (2x11x12096xf32) <- (2x11x12096xb, 2x11x12096xf32, 2x11x12096xf32) + where_0 = paddle._C_ops.where(tile_0, transpose_0, data_2) + del data_2, tile_0, transpose_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [-2] + + # pd_op.sum: (2x12096xf32) <- (2x11x12096xf32, 1xi64) + sum_0 = paddle._C_ops.sum(where_0, full_int_array_2, None, False) + + # pd_op.argmax: (2x12096xi64) <- (2x11x12096xf32, 1xi64) + argmax_1 = paddle._C_ops.argmax(where_0, full_1, False, False, paddle.int64) + del full_1 + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("11"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x1xi32) <- (2x1xi32, 1xf32) + scale_0 = paddle._C_ops.scale(data_3, full_3, float("0"), True) + del data_3, full_3 + + # pd_op.cast: (2x1xi64) <- (2x1xi32) + cast_0 = paddle._C_ops.cast(scale_0, paddle.int64) + del scale_0 + + # pd_op.add: (2x12096xi64) <- (2x12096xi64, 2x1xi64) + add_0 = paddle._C_ops.add(argmax_1, cast_0) + del argmax_1, cast_0 + + # pd_op.flatten: (22xi32) <- (2x11x1xi32) + flatten_0 = paddle._C_ops.flatten(data_4, 0, 2) + del data_4 + + # pd_op.flatten: (24192xi64) <- (2x12096xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_4 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (24192xi32) <- (22xi32, 24192xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_4) + del flatten_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_3 = [2, 12096] + + # pd_op.reshape: (2x12096xi32) <- (24192xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, full_int_array_3) + del full_int_array_3, gather_0 + + # pd_op.full: (xf32) <- () + full_5 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (2x12096xb) <- (2x12096xf32, xf32) + greater_than_1 = paddle._C_ops.greater_than(sum_0, full_5) + del full_5, sum_0 + + # pd_op.full: (1xf32) <- () + full_6 = paddle._C_ops.full( + [1], float("4"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (2x12096xi32) <- (2x12096xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_6, paddle.int32, paddle.framework._current_expected_place() + ) + del full_6 + + # pd_op.where: (2x12096xi32) <- (2x12096xb, 2x12096xi32, 2x12096xi32) + where_1 = paddle._C_ops.where(greater_than_1, reshape_1, full_like_0) + del full_like_0, greater_than_1, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_4 = [-1, 4] + + # pd_op.reshape: (22x4xf32) <- (2x11x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_5, full_int_array_4) + del data_5, full_int_array_4 + + # pd_op.gather: (24192x4xf32) <- (22x4xf32, 24192xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_4) + del flatten_1, full_4, reshape_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_5 = [2, 12096, 4] + + # pd_op.reshape: (2x12096x4xf32) <- (24192x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, full_int_array_5) + del full_int_array_5, gather_1 + + # pd_op.full: (1xi32) <- () + full_7 = paddle._C_ops.full( + [1], float("5"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (2x12096x5xf32) <- (2x12096xi32, 1xi32) + one_hot_1 = paddle._C_ops.one_hot( + where_1 % paddle.cast(full_7, where_1.dtype), full_7 + ) + del full_7 + + # pd_op.full: (4xi64) <- () + full_8 = paddle._C_ops.full( + [4], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (4xi64) <- (4xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_8, + [4], + paddle.int64, + [float("0"), float("1"), float("2"), float("3")], + paddle.framework._current_expected_place(), + ) + del full_8 + + # pd_op.index_select: (2x12096x4xf32) <- (2x12096x5xf32, 4xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_1, assign_value__0, -1) + del assign_value__0, one_hot_1 + + # pd_op.multiply: (2x11x12096xf32) <- (2x11x12096xf32, 2x11x12096xf32) + multiply_1 = paddle._C_ops.multiply(data_6, where_0) + del data_6 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_6 = [-1] + + # pd_op.max: (2x11x1xf32) <- (2x11x12096xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_1, full_int_array_6, True) + + # pd_op.multiply: (2x11x12096xf32) <- (2x11x12096xf32, 2x11x12096xf32) + multiply_2 = paddle._C_ops.multiply(data_1, where_0) + del data_1, where_0 + + # pd_op.max: (2x11x1xf32) <- (2x11x12096xf32, 1xi64) + max_1 = paddle._C_ops.max(multiply_2, full_int_array_6, True) + del multiply_2 + + # pd_op.full: (1xf32) <- () + full_9 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x11x1xf32) <- (2x11x1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(max_0, full_9, float("1e-09"), True) + del full_9, max_0 + + # pd_op.divide: (2x11x12096xf32) <- (2x11x12096xf32, 2x11x1xf32) + divide_0 = paddle._C_ops.divide(multiply_1, scale_1) + del multiply_1, scale_1 + + # pd_op.multiply: (2x11x12096xf32) <- (2x11x12096xf32, 2x11x1xf32) + multiply_3 = paddle._C_ops.multiply(divide_0, max_1) + del divide_0, max_1 + + # pd_op.max: (2x12096xf32) <- (2x11x12096xf32, 1xi64) + max_2 = paddle._C_ops.max(multiply_3, full_int_array_2, False) + del full_int_array_2, multiply_3 + + # pd_op.unsqueeze: (2x12096x1xf32) <- (2x12096xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(max_2, full_int_array_6) + del full_int_array_6, max_2 + + # pd_op.multiply: (2x12096x4xf32) <- (2x12096x4xf32, 2x12096x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_1) + del index_select_0, unsqueeze_1, where_1 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_2/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_2/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_2/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_3/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_3/graph_hash.txt new file mode 100644 index 000000000..065083791 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_3/graph_hash.txt @@ -0,0 +1 @@ +753a38369a6c1cbad2faa292d21d2c22dc88ca45239db771fc9c111eed147698 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_3/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_3/graph_net.json new file mode 100644 index 000000000..4a2e26ae4 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_3/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-L_vehicle", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_3/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_3/input_meta.py new file mode 100644 index 000000000..c0dc17231 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_3/input_meta.py @@ -0,0 +1,69 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [7581] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [2, 7581] + dtype = "float32" + max_val = float("2.0") + mean = float("0.00995911") + std = float("0.10127") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [2, 11, 7581] + dtype = "float32" + max_val = float("0.971142") + mean = float("0.00550478") + std = float("0.0532604") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [2, 11, 7581] + dtype = "float32" + max_val = float("1.0") + mean = float("0.000905374") + std = float("0.0300758") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [2, 1] + dtype = "int32" + data = [0, 1] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [2, 11, 1] + dtype = "int32" + data = [0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [2, 11, 4] + dtype = "float32" + max_val = float("608.0") + mean = float("218.815") + std = float("214.701") + data = None + + +class Program_weight_tensor_data_7: + name = "data_7" + shape = [2, 11, 7581] + dtype = "float32" + max_val = float("0.00885437") + mean = float("1.14459e-05") + std = float("0.000226499") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_3/model.py b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_3/model.py new file mode 100644 index 000000000..d6c3a388b --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_3/model.py @@ -0,0 +1,248 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6, data_7): + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [1] + + # pd_op.unsqueeze: (2x1x-1xf32) <- (2x-1xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(data_1, full_int_array_0) + del data_1, full_int_array_0 + + # pd_op.full: (xf32) <- () + full_0 = paddle._C_ops.full( + [], float("1"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (2x1x-1xb) <- (2x1x-1xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(unsqueeze_0, full_0) + del full_0, unsqueeze_0 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_1 = [1, 11, 1] + + # pd_op.tile: (2x11x-1xb) <- (2x1x-1xb, 3xi64) + tile_0 = paddle._C_ops.tile(greater_than_0, full_int_array_1) + del full_int_array_1, greater_than_0 + + # pd_op.full: (1xi64) <- () + full_1 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (2x-1xi64) <- (2x11x-1xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_2, full_1, False, False, paddle.int64) + + # pd_op.full: (1xi32) <- () + full_2 = paddle._C_ops.full( + [1], float("11"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (2x-1x11xf32) <- (2x-1xi64, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + argmax_0 % paddle.cast(full_2, argmax_0.dtype), full_2 + ) + del argmax_0, full_2 + + # pd_op.transpose: (2x11x-1xf32) <- (2x-1x11xf32) + transpose_0 = paddle._C_ops.transpose(one_hot_0, [0, 2, 1]) + del one_hot_0 + + # pd_op.where: (2x11x-1xf32) <- (2x11x-1xb, 2x11x-1xf32, 2x11x-1xf32) + where_0 = paddle._C_ops.where(tile_0, transpose_0, data_3) + del data_3, tile_0, transpose_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [-2] + + # pd_op.sum: (2x-1xf32) <- (2x11x-1xf32, 1xi64) + sum_0 = paddle._C_ops.sum(where_0, full_int_array_2, None, False) + + # pd_op.argmax: (2x-1xi64) <- (2x11x-1xf32, 1xi64) + argmax_1 = paddle._C_ops.argmax(where_0, full_1, False, False, paddle.int64) + del full_1 + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("11"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x1xi32) <- (2x1xi32, 1xf32) + scale_0 = paddle._C_ops.scale(data_4, full_3, float("0"), True) + del data_4, full_3 + + # pd_op.cast: (2x1xi64) <- (2x1xi32) + cast_0 = paddle._C_ops.cast(scale_0, paddle.int64) + del scale_0 + + # pd_op.add: (2x-1xi64) <- (2x-1xi64, 2x1xi64) + add_0 = paddle._C_ops.add(argmax_1, cast_0) + del argmax_1, cast_0 + + # pd_op.flatten: (22xi32) <- (2x11x1xi32) + flatten_0 = paddle._C_ops.flatten(data_5, 0, 2) + del data_5 + + # pd_op.flatten: (-1xi64) <- (2x-1xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_4 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (-1xi32) <- (22xi32, -1xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_4) + del flatten_0 + + # pd_op.full: (xi64) <- () + full_5 = paddle._C_ops.full( + [], float("2"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_0 = [full_5, data_0] + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.reshape: (2x-1xi32) <- (-1xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, stack_0) + del gather_0, stack_0 + + # pd_op.full: (xf32) <- () + full_6 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (2x-1xb) <- (2x-1xf32, xf32) + greater_than_1 = paddle._C_ops.greater_than(sum_0, full_6) + del full_6, sum_0 + + # pd_op.full: (1xf32) <- () + full_7 = paddle._C_ops.full( + [1], float("4"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (2x-1xi32) <- (2x-1xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_7, paddle.int32, paddle.framework._current_expected_place() + ) + del full_7 + + # pd_op.where: (2x-1xi32) <- (2x-1xb, 2x-1xi32, 2x-1xi32) + where_1 = paddle._C_ops.where(greater_than_1, reshape_1, full_like_0) + del full_like_0, greater_than_1, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_3 = [-1, 4] + + # pd_op.reshape: (22x4xf32) <- (2x11x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_6, full_int_array_3) + del data_6, full_int_array_3 + + # pd_op.gather: (-1x4xf32) <- (22x4xf32, -1xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_4) + del flatten_1, full_4, reshape_2 + + # pd_op.full: (xi64) <- () + full_8 = paddle._C_ops.full( + [], float("4"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_1 = [full_5, data_0, full_8] + del data_0, full_5, full_8 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_1 = paddle._C_ops.stack(combine_1, 0) + del combine_1 + + # pd_op.reshape: (2x-1x4xf32) <- (-1x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, stack_1) + del gather_1, stack_1 + + # pd_op.full: (1xi32) <- () + full_9 = paddle._C_ops.full( + [1], float("5"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (2x-1x5xf32) <- (2x-1xi32, 1xi32) + one_hot_1 = paddle._C_ops.one_hot( + where_1 % paddle.cast(full_9, where_1.dtype), full_9 + ) + del full_9 + + # pd_op.full: (4xi64) <- () + full_10 = paddle._C_ops.full( + [4], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (4xi64) <- (4xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_10, + [4], + paddle.int64, + [float("0"), float("1"), float("2"), float("3")], + paddle.framework._current_expected_place(), + ) + del full_10 + + # pd_op.index_select: (2x-1x4xf32) <- (2x-1x5xf32, 4xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_1, assign_value__0, -1) + del assign_value__0, one_hot_1 + + # pd_op.multiply: (2x11x-1xf32) <- (2x11x-1xf32, 2x11x-1xf32) + multiply_1 = paddle._C_ops.multiply(data_7, where_0) + del data_7 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [-1] + + # pd_op.max: (2x11x1xf32) <- (2x11x-1xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_1, full_int_array_4, True) + + # pd_op.multiply: (2x11x-1xf32) <- (2x11x-1xf32, 2x11x-1xf32) + multiply_2 = paddle._C_ops.multiply(data_2, where_0) + del data_2, where_0 + + # pd_op.max: (2x11x1xf32) <- (2x11x-1xf32, 1xi64) + max_1 = paddle._C_ops.max(multiply_2, full_int_array_4, True) + del multiply_2 + + # pd_op.full: (1xf32) <- () + full_11 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x11x1xf32) <- (2x11x1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(max_0, full_11, float("1e-09"), True) + del full_11, max_0 + + # pd_op.divide: (2x11x-1xf32) <- (2x11x-1xf32, 2x11x1xf32) + divide_0 = paddle._C_ops.divide(multiply_1, scale_1) + del multiply_1, scale_1 + + # pd_op.multiply: (2x11x-1xf32) <- (2x11x-1xf32, 2x11x1xf32) + multiply_3 = paddle._C_ops.multiply(divide_0, max_1) + del divide_0, max_1 + + # pd_op.max: (2x-1xf32) <- (2x11x-1xf32, 1xi64) + max_2 = paddle._C_ops.max(multiply_3, full_int_array_2, False) + del full_int_array_2, multiply_3 + + # pd_op.unsqueeze: (2x-1x1xf32) <- (2x-1xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(max_2, full_int_array_4) + del full_int_array_4, max_2 + + # pd_op.multiply: (2x-1x4xf32) <- (2x-1x4xf32, 2x-1x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_1) + del index_select_0, unsqueeze_1, where_1 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_3/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_3/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_3/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_4/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_4/graph_hash.txt new file mode 100644 index 000000000..353f8f884 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_4/graph_hash.txt @@ -0,0 +1 @@ +08a098f21657c7b30d6d76353b09026c37c3d8117fe79f45a73221c13072771d \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_4/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_4/graph_net.json new file mode 100644 index 000000000..4a2e26ae4 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_4/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-L_vehicle", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_4/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_4/input_meta.py new file mode 100644 index 000000000..685cf9586 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_4/input_meta.py @@ -0,0 +1,63 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [2, 11, 5376] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00186012") + std = float("0.043089") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [2, 1] + dtype = "int32" + data = [0, 1] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [2, 11, 1] + dtype = "int32" + data = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 3, 0, 0, 0, 0, 3] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [2, 5376] + dtype = "float32" + max_val = float("1.0") + mean = float("0.0204613") + std = float("0.141572") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [2, 11, 4] + dtype = "float32" + min_val = float("60.4005") + max_val = float("512.0") + mean = float("223.716") + std = float("117.023") + data = None + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [2, 11, 5376] + dtype = "float32" + max_val = float("0.00924298") + mean = float("2.15111e-05") + std = float("0.000307684") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [2, 11, 5376] + dtype = "float32" + max_val = float("0.986966") + mean = float("0.00984969") + std = float("0.0727765") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_4/model.py b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_4/model.py new file mode 100644 index 000000000..e2e902cde --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_4/model.py @@ -0,0 +1,181 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): + # pd_op.full: (1xi64) <- () + full_0 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (2x5376xi64) <- (2x11x5376xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_0, full_0, False, False, paddle.int64) + del full_0 + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("11"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x1xi32) <- (2x1xi32, 1xf32) + scale_0 = paddle._C_ops.scale(data_1, full_1, float("0"), True) + del data_1, full_1 + + # pd_op.cast: (2x1xi64) <- (2x1xi32) + cast_0 = paddle._C_ops.cast(scale_0, paddle.int64) + del scale_0 + + # pd_op.add: (2x5376xi64) <- (2x5376xi64, 2x1xi64) + add_0 = paddle._C_ops.add(argmax_0, cast_0) + del argmax_0, cast_0 + + # pd_op.flatten: (22xi32) <- (2x11x1xi32) + flatten_0 = paddle._C_ops.flatten(data_2, 0, 2) + del data_2 + + # pd_op.flatten: (10752xi64) <- (2x5376xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_2 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (10752xi32) <- (22xi32, 10752xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_2) + del flatten_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [2, 5376] + + # pd_op.reshape: (2x5376xi32) <- (10752xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, full_int_array_0) + del full_int_array_0, gather_0 + + # pd_op.full: (xf32) <- () + full_3 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (2x5376xb) <- (2x5376xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(data_3, full_3) + del data_3, full_3 + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("4"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (2x5376xi32) <- (2x5376xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_4, paddle.int32, paddle.framework._current_expected_place() + ) + del full_4 + + # pd_op.where: (2x5376xi32) <- (2x5376xb, 2x5376xi32, 2x5376xi32) + where_0 = paddle._C_ops.where(greater_than_0, reshape_1, full_like_0) + del full_like_0, greater_than_0, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [-1, 4] + + # pd_op.reshape: (22x4xf32) <- (2x11x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_4, full_int_array_1) + del data_4, full_int_array_1 + + # pd_op.gather: (10752x4xf32) <- (22x4xf32, 10752xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_2) + del flatten_1, full_2, reshape_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_2 = [2, 5376, 4] + + # pd_op.reshape: (2x5376x4xf32) <- (10752x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, full_int_array_2) + del full_int_array_2, gather_1 + + # pd_op.full: (1xi32) <- () + full_5 = paddle._C_ops.full( + [1], float("5"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (2x5376x5xf32) <- (2x5376xi32, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + where_0 % paddle.cast(full_5, where_0.dtype), full_5 + ) + del full_5 + + # pd_op.full: (4xi64) <- () + full_6 = paddle._C_ops.full( + [4], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (4xi64) <- (4xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_6, + [4], + paddle.int64, + [float("0"), float("1"), float("2"), float("3")], + paddle.framework._current_expected_place(), + ) + del full_6 + + # pd_op.index_select: (2x5376x4xf32) <- (2x5376x5xf32, 4xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_0, assign_value__0, -1) + del assign_value__0, one_hot_0 + + # pd_op.multiply: (2x11x5376xf32) <- (2x11x5376xf32, 2x11x5376xf32) + multiply_1 = paddle._C_ops.multiply(data_5, data_0) + del data_5 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [-1] + + # pd_op.max: (2x11x1xf32) <- (2x11x5376xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_1, full_int_array_3, True) + + # pd_op.multiply: (2x11x5376xf32) <- (2x11x5376xf32, 2x11x5376xf32) + multiply_2 = paddle._C_ops.multiply(data_6, data_0) + del data_0, data_6 + + # pd_op.max: (2x11x1xf32) <- (2x11x5376xf32, 1xi64) + max_1 = paddle._C_ops.max(multiply_2, full_int_array_3, True) + del multiply_2 + + # pd_op.full: (1xf32) <- () + full_7 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x11x1xf32) <- (2x11x1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(max_0, full_7, float("1e-09"), True) + del full_7, max_0 + + # pd_op.divide: (2x11x5376xf32) <- (2x11x5376xf32, 2x11x1xf32) + divide_0 = paddle._C_ops.divide(multiply_1, scale_1) + del multiply_1, scale_1 + + # pd_op.multiply: (2x11x5376xf32) <- (2x11x5376xf32, 2x11x1xf32) + multiply_3 = paddle._C_ops.multiply(divide_0, max_1) + del divide_0, max_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [-2] + + # pd_op.max: (2x5376xf32) <- (2x11x5376xf32, 1xi64) + max_2 = paddle._C_ops.max(multiply_3, full_int_array_4, False) + del full_int_array_4, multiply_3 + + # pd_op.unsqueeze: (2x5376x1xf32) <- (2x5376xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(max_2, full_int_array_3) + del full_int_array_3, max_2 + + # pd_op.multiply: (2x5376x4xf32) <- (2x5376x4xf32, 2x5376x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_0) + del index_select_0, unsqueeze_0, where_0 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_4/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_4/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_4/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_13/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_13/graph_hash.txt new file mode 100644 index 000000000..316b84702 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_13/graph_hash.txt @@ -0,0 +1 @@ +8ac2a7e06c1b12c8919f3d3ffe45bfc4bc8d60d57ba8f6b1886ede9e5a15eebb \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_13/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_13/graph_net.json new file mode 100644 index 000000000..93527f12f --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_13/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-R-L", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_13/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_13/input_meta.py new file mode 100644 index 000000000..a4b29d39d --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_13/input_meta.py @@ -0,0 +1,63 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [1, 25, 21504] + dtype = "float32" + max_val = float("1.0") + mean = float("0.000336682") + std = float("0.0183458") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [1, 1] + dtype = "int32" + data = [0] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [1, 25, 1] + dtype = "int32" + data = [4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 7, 7, 7, 7, 7, 7, 4, 4, 4, 4, 4, 4, 4, 4] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [1, 21504] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00841704") + std = float("0.0913575") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [1, 25, 5] + dtype = "float32" + min_val = float("0.666691") + max_val = float("891.995") + mean = float("262.9") + std = float("311.619") + data = None + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [1, 25, 21504] + dtype = "float32" + max_val = float("9.76424e-05") + mean = float("5.83661e-10") + std = float("1.62072e-07") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [1, 25, 21504] + dtype = "float32" + max_val = float("0.450419") + mean = float("6.86159e-05") + std = float("0.00270931") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_13/model.py b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_13/model.py new file mode 100644 index 000000000..f6c3c14ea --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_13/model.py @@ -0,0 +1,197 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): + # pd_op.full: (1xi64) <- () + full_0 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (1x21504xi64) <- (1x25x21504xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_0, full_0, False, False, paddle.int64) + del full_0 + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("25"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (1x1xi32) <- (1x1xi32, 1xf32) + scale_0 = paddle._C_ops.scale(data_1, full_1, float("0"), True) + del data_1, full_1 + + # pd_op.cast: (1x1xi64) <- (1x1xi32) + cast_0 = paddle._C_ops.cast(scale_0, paddle.int64) + del scale_0 + + # pd_op.add: (1x21504xi64) <- (1x21504xi64, 1x1xi64) + add_0 = paddle._C_ops.add(argmax_0, cast_0) + del argmax_0, cast_0 + + # pd_op.flatten: (25xi32) <- (1x25x1xi32) + flatten_0 = paddle._C_ops.flatten(data_2, 0, 2) + del data_2 + + # pd_op.flatten: (21504xi64) <- (1x21504xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_2 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (21504xi32) <- (25xi32, 21504xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_2) + del flatten_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [1, 21504] + + # pd_op.reshape: (1x21504xi32) <- (21504xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, full_int_array_0) + del full_int_array_0, gather_0 + + # pd_op.full: (xf32) <- () + full_3 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (1x21504xb) <- (1x21504xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(data_3, full_3) + del data_3, full_3 + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("15"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (1x21504xi32) <- (1x21504xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_4, paddle.int32, paddle.framework._current_expected_place() + ) + del full_4 + + # pd_op.where: (1x21504xi32) <- (1x21504xb, 1x21504xi32, 1x21504xi32) + where_0 = paddle._C_ops.where(greater_than_0, reshape_1, full_like_0) + del full_like_0, greater_than_0, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [-1, 5] + + # pd_op.reshape: (25x5xf32) <- (1x25x5xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_4, full_int_array_1) + del data_4, full_int_array_1 + + # pd_op.gather: (21504x5xf32) <- (25x5xf32, 21504xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_2) + del flatten_1, full_2, reshape_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_2 = [1, 21504, 5] + + # pd_op.reshape: (1x21504x5xf32) <- (21504x5xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, full_int_array_2) + del full_int_array_2, gather_1 + + # pd_op.full: (1xi32) <- () + full_5 = paddle._C_ops.full( + [1], float("16"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (1x21504x16xf32) <- (1x21504xi32, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + where_0 % paddle.cast(full_5, where_0.dtype), full_5 + ) + del full_5 + + # pd_op.full: (15xi64) <- () + full_6 = paddle._C_ops.full( + [15], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (15xi64) <- (15xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_6, + [15], + paddle.int64, + [ + float("0"), + float("1"), + float("2"), + float("3"), + float("4"), + float("5"), + float("6"), + float("7"), + float("8"), + float("9"), + float("10"), + float("11"), + float("12"), + float("13"), + float("14"), + ], + paddle.framework._current_expected_place(), + ) + del full_6 + + # pd_op.index_select: (1x21504x15xf32) <- (1x21504x16xf32, 15xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_0, assign_value__0, -1) + del assign_value__0, one_hot_0 + + # pd_op.multiply: (1x25x21504xf32) <- (1x25x21504xf32, 1x25x21504xf32) + multiply_1 = paddle._C_ops.multiply(data_5, data_0) + del data_5 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [-1] + + # pd_op.max: (1x25x1xf32) <- (1x25x21504xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_1, full_int_array_3, True) + + # pd_op.multiply: (1x25x21504xf32) <- (1x25x21504xf32, 1x25x21504xf32) + multiply_2 = paddle._C_ops.multiply(data_6, data_0) + del data_0, data_6 + + # pd_op.max: (1x25x1xf32) <- (1x25x21504xf32, 1xi64) + max_1 = paddle._C_ops.max(multiply_2, full_int_array_3, True) + del multiply_2 + + # pd_op.full: (1xf32) <- () + full_7 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (1x25x1xf32) <- (1x25x1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(max_0, full_7, float("1e-09"), True) + del full_7, max_0 + + # pd_op.divide: (1x25x21504xf32) <- (1x25x21504xf32, 1x25x1xf32) + divide_0 = paddle._C_ops.divide(multiply_1, scale_1) + del multiply_1, scale_1 + + # pd_op.multiply: (1x25x21504xf32) <- (1x25x21504xf32, 1x25x1xf32) + multiply_3 = paddle._C_ops.multiply(divide_0, max_1) + del divide_0, max_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [-2] + + # pd_op.max: (1x21504xf32) <- (1x25x21504xf32, 1xi64) + max_2 = paddle._C_ops.max(multiply_3, full_int_array_4, False) + del full_int_array_4, multiply_3 + + # pd_op.unsqueeze: (1x21504x1xf32) <- (1x21504xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(max_2, full_int_array_3) + del full_int_array_3, max_2 + + # pd_op.multiply: (1x21504x15xf32) <- (1x21504x15xf32, 1x21504x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_0) + del index_select_0, unsqueeze_0, where_0 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_13/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_13/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_13/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_14/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_14/graph_hash.txt new file mode 100644 index 000000000..195bb0704 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_14/graph_hash.txt @@ -0,0 +1 @@ +627740428144c5c24fe1381cb2b9944a4e5062c1b9c67ff2cf42745c5a541450 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_14/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_14/graph_net.json new file mode 100644 index 000000000..93527f12f --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_14/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-R-L", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_14/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_14/input_meta.py new file mode 100644 index 000000000..a51e55d48 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_14/input_meta.py @@ -0,0 +1,104 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [6] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [] + dtype = "int64" + data = [6] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [1, 21504] + dtype = "float32" + max_val = float("2.0") + mean = float("0.00199963") + std = float("0.053223") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [1, 6, 21504] + dtype = "float32" + max_val = float("0.533057") + mean = float("8.40499e-05") + std = float("0.00395285") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [1, 6, 21504] + dtype = "float32" + max_val = float("1.0") + mean = float("0.000333271") + std = float("0.0182527") + data = None + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [1, 1] + dtype = "int32" + data = [0] + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [1, 6, 1] + dtype = "int32" + data = [2, 2, 3, 10, 3, 10] + + +class Program_weight_tensor_data_7: + name = "data_7" + shape = [1, 6, 5] + dtype = "float32" + data = [ + 653.0, + 372.0, + 26.2372, + 7.18399, + 0.896055, + 647.0, + 376.0, + 24.577, + 7.20924, + 0.96007, + 1017.5, + 514.0, + 43.1335, + 24.7487, + 0.785398, + 1021.0, + 516.5, + 24.7487, + 16.2635, + 0.785398, + 247.234, + 69.7757, + 27.5441, + 45.4497, + 0.866302, + 247.013, + 68.5477, + 10.5418, + 22.6716, + 0.847817, + ] + + +class Program_weight_tensor_data_8: + name = "data_8" + shape = [1, 6, 21504] + dtype = "float32" + max_val = float("0.000219842") + mean = float("2.68748e-09") + std = float("6.40427e-07") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_14/model.py b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_14/model.py new file mode 100644 index 000000000..a62adcdd8 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_14/model.py @@ -0,0 +1,251 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, data_0, data_1, data_2, data_3, data_4, data_5, data_6, data_7, data_8 + ): + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [1] + + # pd_op.unsqueeze: (1x1x21504xf32) <- (1x21504xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(data_2, full_int_array_0) + del data_2, full_int_array_0 + + # pd_op.full: (xf32) <- () + full_0 = paddle._C_ops.full( + [], float("1"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (1x1x21504xb) <- (1x1x21504xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(unsqueeze_0, full_0) + del full_0, unsqueeze_0 + + # pd_op.full: (xi64) <- () + full_1 = paddle._C_ops.full( + [], float("1"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_0 = [full_1, data_0, full_1] + del full_1 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.tile: (1x-1x21504xb) <- (1x1x21504xb, 3xi64) + tile_0 = paddle._C_ops.tile(greater_than_0, stack_0) + del greater_than_0, stack_0 + + # pd_op.full: (1xi64) <- () + full_2 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (1x21504xi64) <- (1x-1x21504xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_3, full_2, False, False, paddle.int64) + + # pd_op.one_hot: (1x21504x-1xf32) <- (1x21504xi64, xi64) + one_hot_0 = paddle._C_ops.one_hot( + argmax_0 % paddle.cast(data_1, argmax_0.dtype), data_1 + ) + del argmax_0, data_1 + + # pd_op.transpose: (1x-1x21504xf32) <- (1x21504x-1xf32) + transpose_0 = paddle._C_ops.transpose(one_hot_0, [0, 2, 1]) + del one_hot_0 + + # pd_op.where: (1x-1x21504xf32) <- (1x-1x21504xb, 1x-1x21504xf32, 1x-1x21504xf32) + where_0 = paddle._C_ops.where(tile_0, transpose_0, data_4) + del data_4, tile_0, transpose_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [-2] + + # pd_op.sum: (1x21504xf32) <- (1x-1x21504xf32, 1xi64) + sum_0 = paddle._C_ops.sum(where_0, full_int_array_1, None, False) + + # pd_op.argmax: (1x21504xi64) <- (1x-1x21504xf32, 1xi64) + argmax_1 = paddle._C_ops.argmax(where_0, full_2, False, False, paddle.int64) + del full_2 + + # pd_op.cast: (xi32) <- (xi64) + cast_0 = paddle._C_ops.cast(data_0, paddle.int32) + del data_0 + + # pd_op.multiply: (1x1xi32) <- (1x1xi32, xi32) + multiply_1 = paddle._C_ops.multiply(data_5, cast_0) + del cast_0, data_5 + + # pd_op.cast: (1x1xi64) <- (1x1xi32) + cast_1 = paddle._C_ops.cast(multiply_1, paddle.int64) + del multiply_1 + + # pd_op.add: (1x21504xi64) <- (1x21504xi64, 1x1xi64) + add_0 = paddle._C_ops.add(argmax_1, cast_1) + del argmax_1, cast_1 + + # pd_op.flatten: (-1xi32) <- (1x-1x1xi32) + flatten_0 = paddle._C_ops.flatten(data_6, 0, 2) + del data_6 + + # pd_op.flatten: (21504xi64) <- (1x21504xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_3 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (21504xi32) <- (-1xi32, 21504xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_3) + del flatten_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_2 = [1, 21504] + + # pd_op.reshape: (1x21504xi32) <- (21504xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, full_int_array_2) + del full_int_array_2, gather_0 + + # pd_op.full: (xf32) <- () + full_4 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (1x21504xb) <- (1x21504xf32, xf32) + greater_than_1 = paddle._C_ops.greater_than(sum_0, full_4) + del full_4, sum_0 + + # pd_op.full: (1xf32) <- () + full_5 = paddle._C_ops.full( + [1], float("15"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (1x21504xi32) <- (1x21504xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_5, paddle.int32, paddle.framework._current_expected_place() + ) + del full_5 + + # pd_op.where: (1x21504xi32) <- (1x21504xb, 1x21504xi32, 1x21504xi32) + where_1 = paddle._C_ops.where(greater_than_1, reshape_1, full_like_0) + del full_like_0, greater_than_1, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_3 = [-1, 5] + + # pd_op.reshape: (-1x5xf32) <- (1x-1x5xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_7, full_int_array_3) + del data_7, full_int_array_3 + + # pd_op.gather: (21504x5xf32) <- (-1x5xf32, 21504xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_3) + del flatten_1, full_3, reshape_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_4 = [1, 21504, 5] + + # pd_op.reshape: (1x21504x5xf32) <- (21504x5xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, full_int_array_4) + del full_int_array_4, gather_1 + + # pd_op.full: (1xi32) <- () + full_6 = paddle._C_ops.full( + [1], float("16"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (1x21504x16xf32) <- (1x21504xi32, 1xi32) + one_hot_1 = paddle._C_ops.one_hot( + where_1 % paddle.cast(full_6, where_1.dtype), full_6 + ) + del full_6 + + # pd_op.full: (15xi64) <- () + full_7 = paddle._C_ops.full( + [15], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (15xi64) <- (15xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_7, + [15], + paddle.int64, + [ + float("0"), + float("1"), + float("2"), + float("3"), + float("4"), + float("5"), + float("6"), + float("7"), + float("8"), + float("9"), + float("10"), + float("11"), + float("12"), + float("13"), + float("14"), + ], + paddle.framework._current_expected_place(), + ) + del full_7 + + # pd_op.index_select: (1x21504x15xf32) <- (1x21504x16xf32, 15xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_1, assign_value__0, -1) + del assign_value__0, one_hot_1 + + # pd_op.multiply: (1x-1x21504xf32) <- (1x-1x21504xf32, 1x-1x21504xf32) + multiply_2 = paddle._C_ops.multiply(data_8, where_0) + del data_8 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_5 = [-1] + + # pd_op.max: (1x-1x1xf32) <- (1x-1x21504xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_2, full_int_array_5, True) + + # pd_op.multiply: (1x-1x21504xf32) <- (1x-1x21504xf32, 1x-1x21504xf32) + multiply_3 = paddle._C_ops.multiply(data_3, where_0) + del data_3, where_0 + + # pd_op.max: (1x-1x1xf32) <- (1x-1x21504xf32, 1xi64) + max_1 = paddle._C_ops.max(multiply_3, full_int_array_5, True) + del multiply_3 + + # pd_op.full: (1xf32) <- () + full_8 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (1x-1x1xf32) <- (1x-1x1xf32, 1xf32) + scale_0 = paddle._C_ops.scale(max_0, full_8, float("1e-09"), True) + del full_8, max_0 + + # pd_op.divide: (1x-1x21504xf32) <- (1x-1x21504xf32, 1x-1x1xf32) + divide_0 = paddle._C_ops.divide(multiply_2, scale_0) + del multiply_2, scale_0 + + # pd_op.multiply: (1x-1x21504xf32) <- (1x-1x21504xf32, 1x-1x1xf32) + multiply_4 = paddle._C_ops.multiply(divide_0, max_1) + del divide_0, max_1 + + # pd_op.max: (1x21504xf32) <- (1x-1x21504xf32, 1xi64) + max_2 = paddle._C_ops.max(multiply_4, full_int_array_1, False) + del full_int_array_1, multiply_4 + + # pd_op.unsqueeze: (1x21504x1xf32) <- (1x21504xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(max_2, full_int_array_5) + del full_int_array_5, max_2 + + # pd_op.multiply: (1x21504x15xf32) <- (1x21504x15xf32, 1x21504x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_1) + del index_select_0, unsqueeze_1, where_1 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_14/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_14/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_14/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_15/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_15/graph_hash.txt new file mode 100644 index 000000000..2a648a0fc --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_15/graph_hash.txt @@ -0,0 +1 @@ +9ce4bf27641cec424c76266f8f3dde955f3ed3b34c19bebede2db6063aa5fff6 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_15/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_15/graph_net.json new file mode 100644 index 000000000..93527f12f --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_15/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-R-L", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_15/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_15/input_meta.py new file mode 100644 index 000000000..81be1c2ff --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_15/input_meta.py @@ -0,0 +1,82 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [3] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [1, 3, 21504] + dtype = "float32" + max_val = float("1.0") + mean = float("0.000604539") + std = float("0.0245799") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [1, 1] + dtype = "int32" + data = [0] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [1, 3, 1] + dtype = "int32" + data = [1, 1, 1] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [1, 21504] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00181362") + std = float("0.0425479") + data = None + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [1, 3, 5] + dtype = "float32" + data = [ + 248.448, + 614.298, + 137.72, + 133.965, + 1.06359, + 602.824, + 672.606, + 103.848, + 95.0988, + 1.06909, + 498.844, + 472.744, + 142.145, + 125.935, + 1.05772, + ] + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [1, 3, 21504] + dtype = "float32" + max_val = float("4.25288e-07") + mean = float("7.54294e-12") + std = float("1.67909e-09") + data = None + + +class Program_weight_tensor_data_7: + name = "data_7" + shape = [1, 3, 21504] + dtype = "float32" + max_val = float("0.207679") + mean = float("0.00012728") + std = float("0.00194434") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_15/model.py b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_15/model.py new file mode 100644 index 000000000..6f65f2340 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_15/model.py @@ -0,0 +1,196 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6, data_7): + # pd_op.full: (1xi64) <- () + full_0 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (1x21504xi64) <- (1x-1x21504xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_1, full_0, False, False, paddle.int64) + del full_0 + + # pd_op.cast: (xi32) <- (xi64) + cast_0 = paddle._C_ops.cast(data_0, paddle.int32) + del data_0 + + # pd_op.multiply: (1x1xi32) <- (1x1xi32, xi32) + multiply_1 = paddle._C_ops.multiply(data_2, cast_0) + del cast_0, data_2 + + # pd_op.cast: (1x1xi64) <- (1x1xi32) + cast_1 = paddle._C_ops.cast(multiply_1, paddle.int64) + del multiply_1 + + # pd_op.add: (1x21504xi64) <- (1x21504xi64, 1x1xi64) + add_0 = paddle._C_ops.add(argmax_0, cast_1) + del argmax_0, cast_1 + + # pd_op.flatten: (-1xi32) <- (1x-1x1xi32) + flatten_0 = paddle._C_ops.flatten(data_3, 0, 2) + del data_3 + + # pd_op.flatten: (21504xi64) <- (1x21504xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_1 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (21504xi32) <- (-1xi32, 21504xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_1) + del flatten_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [1, 21504] + + # pd_op.reshape: (1x21504xi32) <- (21504xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, full_int_array_0) + del full_int_array_0, gather_0 + + # pd_op.full: (xf32) <- () + full_2 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (1x21504xb) <- (1x21504xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(data_4, full_2) + del data_4, full_2 + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("15"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (1x21504xi32) <- (1x21504xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_3, paddle.int32, paddle.framework._current_expected_place() + ) + del full_3 + + # pd_op.where: (1x21504xi32) <- (1x21504xb, 1x21504xi32, 1x21504xi32) + where_0 = paddle._C_ops.where(greater_than_0, reshape_1, full_like_0) + del full_like_0, greater_than_0, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [-1, 5] + + # pd_op.reshape: (-1x5xf32) <- (1x-1x5xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_5, full_int_array_1) + del data_5, full_int_array_1 + + # pd_op.gather: (21504x5xf32) <- (-1x5xf32, 21504xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_1) + del flatten_1, full_1, reshape_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_2 = [1, 21504, 5] + + # pd_op.reshape: (1x21504x5xf32) <- (21504x5xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, full_int_array_2) + del full_int_array_2, gather_1 + + # pd_op.full: (1xi32) <- () + full_4 = paddle._C_ops.full( + [1], float("16"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (1x21504x16xf32) <- (1x21504xi32, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + where_0 % paddle.cast(full_4, where_0.dtype), full_4 + ) + del full_4 + + # pd_op.full: (15xi64) <- () + full_5 = paddle._C_ops.full( + [15], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (15xi64) <- (15xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_5, + [15], + paddle.int64, + [ + float("0"), + float("1"), + float("2"), + float("3"), + float("4"), + float("5"), + float("6"), + float("7"), + float("8"), + float("9"), + float("10"), + float("11"), + float("12"), + float("13"), + float("14"), + ], + paddle.framework._current_expected_place(), + ) + del full_5 + + # pd_op.index_select: (1x21504x15xf32) <- (1x21504x16xf32, 15xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_0, assign_value__0, -1) + del assign_value__0, one_hot_0 + + # pd_op.multiply: (1x-1x21504xf32) <- (1x-1x21504xf32, 1x-1x21504xf32) + multiply_2 = paddle._C_ops.multiply(data_6, data_1) + del data_6 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [-1] + + # pd_op.max: (1x-1x1xf32) <- (1x-1x21504xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_2, full_int_array_3, True) + + # pd_op.multiply: (1x-1x21504xf32) <- (1x-1x21504xf32, 1x-1x21504xf32) + multiply_3 = paddle._C_ops.multiply(data_7, data_1) + del data_1, data_7 + + # pd_op.max: (1x-1x1xf32) <- (1x-1x21504xf32, 1xi64) + max_1 = paddle._C_ops.max(multiply_3, full_int_array_3, True) + del multiply_3 + + # pd_op.full: (1xf32) <- () + full_6 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (1x-1x1xf32) <- (1x-1x1xf32, 1xf32) + scale_0 = paddle._C_ops.scale(max_0, full_6, float("1e-09"), True) + del full_6, max_0 + + # pd_op.divide: (1x-1x21504xf32) <- (1x-1x21504xf32, 1x-1x1xf32) + divide_0 = paddle._C_ops.divide(multiply_2, scale_0) + del multiply_2, scale_0 + + # pd_op.multiply: (1x-1x21504xf32) <- (1x-1x21504xf32, 1x-1x1xf32) + multiply_4 = paddle._C_ops.multiply(divide_0, max_1) + del divide_0, max_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [-2] + + # pd_op.max: (1x21504xf32) <- (1x-1x21504xf32, 1xi64) + max_2 = paddle._C_ops.max(multiply_4, full_int_array_4, False) + del full_int_array_4, multiply_4 + + # pd_op.unsqueeze: (1x21504x1xf32) <- (1x21504xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(max_2, full_int_array_3) + del full_int_array_3, max_2 + + # pd_op.multiply: (1x21504x15xf32) <- (1x21504x15xf32, 1x21504x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_0) + del index_select_0, unsqueeze_0, where_0 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_15/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_15/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_15/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_7/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_7/graph_hash.txt new file mode 100644 index 000000000..ade792b54 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_7/graph_hash.txt @@ -0,0 +1 @@ +3836fac6cea1c912765cb5cd7f72d4d7a8d10c3d7f1d4c26a9c90da8fa16edf0 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_7/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_7/graph_net.json new file mode 100644 index 000000000..93527f12f --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_7/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-R-L", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_7/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_7/input_meta.py new file mode 100644 index 000000000..f37a0b7a4 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_7/input_meta.py @@ -0,0 +1,65 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [1, 21504] + dtype = "float32" + max_val = float("2.0") + mean = float("0.0815197") + std = float("0.278349") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [1, 155, 21504] + dtype = "float32" + max_val = float("0.637167") + mean = float("0.000108427") + std = float("0.00408889") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [1, 155, 21504] + dtype = "float32" + max_val = float("1.0") + mean = float("0.000525934") + std = float("0.0229272") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [1, 1] + dtype = "int32" + data = [0] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [1, 155, 1] + dtype = "int32" + min_val = 6 + max_val = 12 + data = None + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [1, 155, 5] + dtype = "float32" + min_val = float("0.231091") + max_val = float("1021.82") + mean = float("266.123") + std = float("330.341") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [1, 155, 21504] + dtype = "float32" + max_val = float("0.00115321") + mean = float("2.20005e-09") + std = float("7.35257e-07") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_7/model.py b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_7/model.py new file mode 100644 index 000000000..b64476001 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_7/model.py @@ -0,0 +1,245 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [1] + + # pd_op.unsqueeze: (1x1x21504xf32) <- (1x21504xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(data_0, full_int_array_0) + del data_0, full_int_array_0 + + # pd_op.full: (xf32) <- () + full_0 = paddle._C_ops.full( + [], float("1"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (1x1x21504xb) <- (1x1x21504xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(unsqueeze_0, full_0) + del full_0, unsqueeze_0 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_1 = [1, 155, 1] + + # pd_op.tile: (1x155x21504xb) <- (1x1x21504xb, 3xi64) + tile_0 = paddle._C_ops.tile(greater_than_0, full_int_array_1) + del full_int_array_1, greater_than_0 + + # pd_op.full: (1xi64) <- () + full_1 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (1x21504xi64) <- (1x155x21504xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_1, full_1, False, False, paddle.int64) + + # pd_op.full: (1xi32) <- () + full_2 = paddle._C_ops.full( + [1], float("155"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (1x21504x155xf32) <- (1x21504xi64, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + argmax_0 % paddle.cast(full_2, argmax_0.dtype), full_2 + ) + del argmax_0, full_2 + + # pd_op.transpose: (1x155x21504xf32) <- (1x21504x155xf32) + transpose_0 = paddle._C_ops.transpose(one_hot_0, [0, 2, 1]) + del one_hot_0 + + # pd_op.where: (1x155x21504xf32) <- (1x155x21504xb, 1x155x21504xf32, 1x155x21504xf32) + where_0 = paddle._C_ops.where(tile_0, transpose_0, data_2) + del data_2, tile_0, transpose_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [-2] + + # pd_op.sum: (1x21504xf32) <- (1x155x21504xf32, 1xi64) + sum_0 = paddle._C_ops.sum(where_0, full_int_array_2, None, False) + + # pd_op.argmax: (1x21504xi64) <- (1x155x21504xf32, 1xi64) + argmax_1 = paddle._C_ops.argmax(where_0, full_1, False, False, paddle.int64) + del full_1 + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("155"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (1x1xi32) <- (1x1xi32, 1xf32) + scale_0 = paddle._C_ops.scale(data_3, full_3, float("0"), True) + del data_3, full_3 + + # pd_op.cast: (1x1xi64) <- (1x1xi32) + cast_0 = paddle._C_ops.cast(scale_0, paddle.int64) + del scale_0 + + # pd_op.add: (1x21504xi64) <- (1x21504xi64, 1x1xi64) + add_0 = paddle._C_ops.add(argmax_1, cast_0) + del argmax_1, cast_0 + + # pd_op.flatten: (155xi32) <- (1x155x1xi32) + flatten_0 = paddle._C_ops.flatten(data_4, 0, 2) + del data_4 + + # pd_op.flatten: (21504xi64) <- (1x21504xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_4 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (21504xi32) <- (155xi32, 21504xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_4) + del flatten_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_3 = [1, 21504] + + # pd_op.reshape: (1x21504xi32) <- (21504xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, full_int_array_3) + del full_int_array_3, gather_0 + + # pd_op.full: (xf32) <- () + full_5 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (1x21504xb) <- (1x21504xf32, xf32) + greater_than_1 = paddle._C_ops.greater_than(sum_0, full_5) + del full_5, sum_0 + + # pd_op.full: (1xf32) <- () + full_6 = paddle._C_ops.full( + [1], float("15"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (1x21504xi32) <- (1x21504xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_6, paddle.int32, paddle.framework._current_expected_place() + ) + del full_6 + + # pd_op.where: (1x21504xi32) <- (1x21504xb, 1x21504xi32, 1x21504xi32) + where_1 = paddle._C_ops.where(greater_than_1, reshape_1, full_like_0) + del full_like_0, greater_than_1, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_4 = [-1, 5] + + # pd_op.reshape: (155x5xf32) <- (1x155x5xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_5, full_int_array_4) + del data_5, full_int_array_4 + + # pd_op.gather: (21504x5xf32) <- (155x5xf32, 21504xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_4) + del flatten_1, full_4, reshape_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_5 = [1, 21504, 5] + + # pd_op.reshape: (1x21504x5xf32) <- (21504x5xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, full_int_array_5) + del full_int_array_5, gather_1 + + # pd_op.full: (1xi32) <- () + full_7 = paddle._C_ops.full( + [1], float("16"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (1x21504x16xf32) <- (1x21504xi32, 1xi32) + one_hot_1 = paddle._C_ops.one_hot( + where_1 % paddle.cast(full_7, where_1.dtype), full_7 + ) + del full_7 + + # pd_op.full: (15xi64) <- () + full_8 = paddle._C_ops.full( + [15], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (15xi64) <- (15xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_8, + [15], + paddle.int64, + [ + float("0"), + float("1"), + float("2"), + float("3"), + float("4"), + float("5"), + float("6"), + float("7"), + float("8"), + float("9"), + float("10"), + float("11"), + float("12"), + float("13"), + float("14"), + ], + paddle.framework._current_expected_place(), + ) + del full_8 + + # pd_op.index_select: (1x21504x15xf32) <- (1x21504x16xf32, 15xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_1, assign_value__0, -1) + del assign_value__0, one_hot_1 + + # pd_op.multiply: (1x155x21504xf32) <- (1x155x21504xf32, 1x155x21504xf32) + multiply_1 = paddle._C_ops.multiply(data_6, where_0) + del data_6 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_6 = [-1] + + # pd_op.max: (1x155x1xf32) <- (1x155x21504xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_1, full_int_array_6, True) + + # pd_op.multiply: (1x155x21504xf32) <- (1x155x21504xf32, 1x155x21504xf32) + multiply_2 = paddle._C_ops.multiply(data_1, where_0) + del data_1, where_0 + + # pd_op.max: (1x155x1xf32) <- (1x155x21504xf32, 1xi64) + max_1 = paddle._C_ops.max(multiply_2, full_int_array_6, True) + del multiply_2 + + # pd_op.full: (1xf32) <- () + full_9 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (1x155x1xf32) <- (1x155x1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(max_0, full_9, float("1e-09"), True) + del full_9, max_0 + + # pd_op.divide: (1x155x21504xf32) <- (1x155x21504xf32, 1x155x1xf32) + divide_0 = paddle._C_ops.divide(multiply_1, scale_1) + del multiply_1, scale_1 + + # pd_op.multiply: (1x155x21504xf32) <- (1x155x21504xf32, 1x155x1xf32) + multiply_3 = paddle._C_ops.multiply(divide_0, max_1) + del divide_0, max_1 + + # pd_op.max: (1x21504xf32) <- (1x155x21504xf32, 1xi64) + max_2 = paddle._C_ops.max(multiply_3, full_int_array_2, False) + del full_int_array_2, multiply_3 + + # pd_op.unsqueeze: (1x21504x1xf32) <- (1x21504xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(max_2, full_int_array_6) + del full_int_array_6, max_2 + + # pd_op.multiply: (1x21504x15xf32) <- (1x21504x15xf32, 1x21504x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_1) + del index_select_0, unsqueeze_1, where_1 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_7/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_7/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_7/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_0/graph_hash.txt new file mode 100644 index 000000000..9e010ed0a --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_0/graph_hash.txt @@ -0,0 +1 @@ +7079a2746f95c95103bab62fbcdce91e663d70fd55597de2100a0b654fbdd66c \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_0/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_0/graph_net.json new file mode 100644 index 000000000..399d8354e --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_0/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-S_human", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_0/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_0/input_meta.py new file mode 100644 index 000000000..ebedcc374 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_0/input_meta.py @@ -0,0 +1,93 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [2, 5376] + dtype = "float32" + max_val = float("3.0") + mean = float("0.0205543") + std = float("0.179507") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [2, 15, 5376] + dtype = "float32" + max_val = float("0.964353") + mean = float("0.0217275") + std = float("0.0736307") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [2, 15, 5376] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00137029") + std = float("0.036992") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [2, 1] + dtype = "int32" + data = [0, 1] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [2, 15, 1] + dtype = "int32" + data = [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [2, 15, 4] + dtype = "float32" + max_val = float("512.0") + mean = float("126.218") + std = float("182.758") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [2, 15, 5376] + dtype = "float32" + max_val = float("0.455376") + mean = float("7.96332e-05") + std = float("0.0033028") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_0/model.py b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_0/model.py new file mode 100644 index 000000000..d3950802b --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_0/model.py @@ -0,0 +1,223 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [1] + + # pd_op.unsqueeze: (2x1x5376xf32) <- (2x5376xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(data_0, full_int_array_0) + del data_0, full_int_array_0 + + # pd_op.full: (xf32) <- () + full_0 = paddle._C_ops.full( + [], float("1"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (2x1x5376xb) <- (2x1x5376xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(unsqueeze_0, full_0) + del full_0, unsqueeze_0 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_1 = [1, 15, 1] + + # pd_op.tile: (2x15x5376xb) <- (2x1x5376xb, 3xi64) + tile_0 = paddle._C_ops.tile(greater_than_0, full_int_array_1) + del full_int_array_1, greater_than_0 + + # pd_op.full: (1xi64) <- () + full_1 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (2x5376xi64) <- (2x15x5376xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_1, full_1, False, False, paddle.int64) + + # pd_op.full: (1xi32) <- () + full_2 = paddle._C_ops.full( + [1], float("15"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (2x5376x15xf32) <- (2x5376xi64, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + argmax_0 % paddle.cast(full_2, argmax_0.dtype), full_2 + ) + del argmax_0, full_2 + + # pd_op.transpose: (2x15x5376xf32) <- (2x5376x15xf32) + transpose_0 = paddle._C_ops.transpose(one_hot_0, [0, 2, 1]) + del one_hot_0 + + # pd_op.where: (2x15x5376xf32) <- (2x15x5376xb, 2x15x5376xf32, 2x15x5376xf32) + where_0 = paddle._C_ops.where(tile_0, transpose_0, data_2) + del data_2, tile_0, transpose_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [-2] + + # pd_op.sum: (2x5376xf32) <- (2x15x5376xf32, 1xi64) + sum_0 = paddle._C_ops.sum(where_0, full_int_array_2, None, False) + + # pd_op.argmax: (2x5376xi64) <- (2x15x5376xf32, 1xi64) + argmax_1 = paddle._C_ops.argmax(where_0, full_1, False, False, paddle.int64) + del full_1 + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("15"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x1xi32) <- (2x1xi32, 1xf32) + scale_0 = paddle._C_ops.scale(data_3, full_3, float("0"), True) + del data_3, full_3 + + # pd_op.cast: (2x1xi64) <- (2x1xi32) + cast_0 = paddle._C_ops.cast(scale_0, paddle.int64) + del scale_0 + + # pd_op.add: (2x5376xi64) <- (2x5376xi64, 2x1xi64) + add_0 = paddle._C_ops.add(argmax_1, cast_0) + del argmax_1, cast_0 + + # pd_op.flatten: (30xi32) <- (2x15x1xi32) + flatten_0 = paddle._C_ops.flatten(data_4, 0, 2) + del data_4 + + # pd_op.flatten: (10752xi64) <- (2x5376xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_4 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (10752xi32) <- (30xi32, 10752xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_4) + del flatten_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_3 = [2, 5376] + + # pd_op.reshape: (2x5376xi32) <- (10752xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, full_int_array_3) + del full_int_array_3, gather_0 + + # pd_op.full: (xf32) <- () + full_5 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (2x5376xb) <- (2x5376xf32, xf32) + greater_than_1 = paddle._C_ops.greater_than(sum_0, full_5) + del full_5, sum_0 + + # pd_op.full: (1xf32) <- () + full_6 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (2x5376xi32) <- (2x5376xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_6, paddle.int32, paddle.framework._current_expected_place() + ) + + # pd_op.where: (2x5376xi32) <- (2x5376xb, 2x5376xi32, 2x5376xi32) + where_1 = paddle._C_ops.where(greater_than_1, reshape_1, full_like_0) + del full_like_0, greater_than_1, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_4 = [-1, 4] + + # pd_op.reshape: (30x4xf32) <- (2x15x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_5, full_int_array_4) + del data_5, full_int_array_4 + + # pd_op.gather: (10752x4xf32) <- (30x4xf32, 10752xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_4) + del flatten_1, full_4, reshape_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_5 = [2, 5376, 4] + + # pd_op.reshape: (2x5376x4xf32) <- (10752x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, full_int_array_5) + del full_int_array_5, gather_1 + + # pd_op.full: (1xi32) <- () + full_7 = paddle._C_ops.full( + [1], float("2"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (2x5376x2xf32) <- (2x5376xi32, 1xi32) + one_hot_1 = paddle._C_ops.one_hot( + where_1 % paddle.cast(full_7, where_1.dtype), full_7 + ) + del full_7 + + # pd_op.full: (1xi64) <- () + full_8 = paddle._C_ops.full( + [1], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (1xi64) <- (1xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_8, + [1], + paddle.int64, + [float("0")], + paddle.framework._current_expected_place(), + ) + del full_8 + + # pd_op.index_select: (2x5376x1xf32) <- (2x5376x2xf32, 1xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_1, assign_value__0, -1) + del assign_value__0, one_hot_1 + + # pd_op.multiply: (2x15x5376xf32) <- (2x15x5376xf32, 2x15x5376xf32) + multiply_1 = paddle._C_ops.multiply(data_6, where_0) + del data_6 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_6 = [-1] + + # pd_op.max: (2x15x1xf32) <- (2x15x5376xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_1, full_int_array_6, True) + + # pd_op.multiply: (2x15x5376xf32) <- (2x15x5376xf32, 2x15x5376xf32) + multiply_2 = paddle._C_ops.multiply(data_1, where_0) + del data_1, where_0 + + # pd_op.max: (2x15x1xf32) <- (2x15x5376xf32, 1xi64) + max_1 = paddle._C_ops.max(multiply_2, full_int_array_6, True) + del multiply_2 + + # pd_op.scale: (2x15x1xf32) <- (2x15x1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(max_0, full_6, float("1e-09"), True) + del full_6, max_0 + + # pd_op.divide: (2x15x5376xf32) <- (2x15x5376xf32, 2x15x1xf32) + divide_0 = paddle._C_ops.divide(multiply_1, scale_1) + del multiply_1, scale_1 + + # pd_op.multiply: (2x15x5376xf32) <- (2x15x5376xf32, 2x15x1xf32) + multiply_3 = paddle._C_ops.multiply(divide_0, max_1) + del divide_0, max_1 + + # pd_op.max: (2x5376xf32) <- (2x15x5376xf32, 1xi64) + max_2 = paddle._C_ops.max(multiply_3, full_int_array_2, False) + del full_int_array_2, multiply_3 + + # pd_op.unsqueeze: (2x5376x1xf32) <- (2x5376xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(max_2, full_int_array_6) + del full_int_array_6, max_2 + + # pd_op.multiply: (2x5376x1xf32) <- (2x5376x1xf32, 2x5376x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_1) + del index_select_0, unsqueeze_1, where_1 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_0/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_0/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_0/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_10/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_10/graph_hash.txt new file mode 100644 index 000000000..a21679610 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_10/graph_hash.txt @@ -0,0 +1 @@ +9950c1f0a716c3b900cccedec1f4dab335765398c1e5ddc0809a2c904750e9f1 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_10/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_10/graph_net.json new file mode 100644 index 000000000..399d8354e --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_10/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-S_human", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_10/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_10/input_meta.py new file mode 100644 index 000000000..9b27ac5ab --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_10/input_meta.py @@ -0,0 +1,108 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [2, 6, 12096] + dtype = "float32" + max_val = float("1.0") + mean = float("0.000895613") + std = float("0.0299134") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [2, 1] + dtype = "int32" + data = [0, 1] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [2, 6, 1] + dtype = "int32" + data = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [2, 12096] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00537368") + std = float("0.0731081") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [2, 6, 4] + dtype = "float32" + data = [ + 359.226, + 0.0, + 740.129, + 768.0, + 148.645, + 0.0, + 219.871, + 102.165, + 337.548, + 0.0, + 414.968, + 123.303, + 0.0, + 0.0, + 52.6452, + 112.734, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 296.727, + 100.905, + 506.182, + 605.431, + 645.818, + 117.723, + 750.545, + 369.985, + 546.909, + 128.934, + 610.909, + 280.292, + 436.364, + 123.328, + 523.636, + 330.745, + 209.455, + 140.146, + 354.909, + 493.314, + 157.091, + 112.117, + 238.545, + 308.321, + ] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [2, 6, 12096] + dtype = "float32" + max_val = float("0.42711") + mean = float("7.96203e-05") + std = float("0.00288483") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [2, 6, 12096] + dtype = "float32" + max_val = float("0.933696") + mean = float("0.0107169") + std = float("0.0520399") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_10/model.py b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_10/model.py new file mode 100644 index 000000000..a58ceab85 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_10/model.py @@ -0,0 +1,175 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): + # pd_op.full: (1xi64) <- () + full_0 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (2x12096xi64) <- (2x6x12096xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_0, full_0, False, False, paddle.int64) + del full_0 + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("6"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x1xi32) <- (2x1xi32, 1xf32) + scale_0 = paddle._C_ops.scale(data_1, full_1, float("0"), True) + del data_1, full_1 + + # pd_op.cast: (2x1xi64) <- (2x1xi32) + cast_0 = paddle._C_ops.cast(scale_0, paddle.int64) + del scale_0 + + # pd_op.add: (2x12096xi64) <- (2x12096xi64, 2x1xi64) + add_0 = paddle._C_ops.add(argmax_0, cast_0) + del argmax_0, cast_0 + + # pd_op.flatten: (12xi32) <- (2x6x1xi32) + flatten_0 = paddle._C_ops.flatten(data_2, 0, 2) + del data_2 + + # pd_op.flatten: (24192xi64) <- (2x12096xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_2 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (24192xi32) <- (12xi32, 24192xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_2) + del flatten_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [2, 12096] + + # pd_op.reshape: (2x12096xi32) <- (24192xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, full_int_array_0) + del full_int_array_0, gather_0 + + # pd_op.full: (xf32) <- () + full_3 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (2x12096xb) <- (2x12096xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(data_3, full_3) + del data_3, full_3 + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (2x12096xi32) <- (2x12096xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_4, paddle.int32, paddle.framework._current_expected_place() + ) + + # pd_op.where: (2x12096xi32) <- (2x12096xb, 2x12096xi32, 2x12096xi32) + where_0 = paddle._C_ops.where(greater_than_0, reshape_1, full_like_0) + del full_like_0, greater_than_0, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [-1, 4] + + # pd_op.reshape: (12x4xf32) <- (2x6x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_4, full_int_array_1) + del data_4, full_int_array_1 + + # pd_op.gather: (24192x4xf32) <- (12x4xf32, 24192xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_2) + del flatten_1, full_2, reshape_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_2 = [2, 12096, 4] + + # pd_op.reshape: (2x12096x4xf32) <- (24192x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, full_int_array_2) + del full_int_array_2, gather_1 + + # pd_op.full: (1xi32) <- () + full_5 = paddle._C_ops.full( + [1], float("2"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (2x12096x2xf32) <- (2x12096xi32, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + where_0 % paddle.cast(full_5, where_0.dtype), full_5 + ) + del full_5 + + # pd_op.full: (1xi64) <- () + full_6 = paddle._C_ops.full( + [1], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (1xi64) <- (1xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_6, + [1], + paddle.int64, + [float("0")], + paddle.framework._current_expected_place(), + ) + del full_6 + + # pd_op.index_select: (2x12096x1xf32) <- (2x12096x2xf32, 1xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_0, assign_value__0, -1) + del assign_value__0, one_hot_0 + + # pd_op.multiply: (2x6x12096xf32) <- (2x6x12096xf32, 2x6x12096xf32) + multiply_1 = paddle._C_ops.multiply(data_5, data_0) + del data_5 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [-1] + + # pd_op.max: (2x6x1xf32) <- (2x6x12096xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_1, full_int_array_3, True) + + # pd_op.multiply: (2x6x12096xf32) <- (2x6x12096xf32, 2x6x12096xf32) + multiply_2 = paddle._C_ops.multiply(data_6, data_0) + del data_0, data_6 + + # pd_op.max: (2x6x1xf32) <- (2x6x12096xf32, 1xi64) + max_1 = paddle._C_ops.max(multiply_2, full_int_array_3, True) + del multiply_2 + + # pd_op.scale: (2x6x1xf32) <- (2x6x1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(max_0, full_4, float("1e-09"), True) + del full_4, max_0 + + # pd_op.divide: (2x6x12096xf32) <- (2x6x12096xf32, 2x6x1xf32) + divide_0 = paddle._C_ops.divide(multiply_1, scale_1) + del multiply_1, scale_1 + + # pd_op.multiply: (2x6x12096xf32) <- (2x6x12096xf32, 2x6x1xf32) + multiply_3 = paddle._C_ops.multiply(divide_0, max_1) + del divide_0, max_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [-2] + + # pd_op.max: (2x12096xf32) <- (2x6x12096xf32, 1xi64) + max_2 = paddle._C_ops.max(multiply_3, full_int_array_4, False) + del full_int_array_4, multiply_3 + + # pd_op.unsqueeze: (2x12096x1xf32) <- (2x12096xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(max_2, full_int_array_3) + del full_int_array_3, max_2 + + # pd_op.multiply: (2x12096x1xf32) <- (2x12096x1xf32, 2x12096x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_0) + del index_select_0, unsqueeze_0, where_0 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_10/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_10/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_10/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_11/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_11/graph_hash.txt new file mode 100644 index 000000000..82cfbe272 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_11/graph_hash.txt @@ -0,0 +1 @@ +1cf576ba841b87c90f808dd83fc76cf6b31385742f2a6f80953fa0a3f614b06c \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_11/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_11/graph_net.json new file mode 100644 index 000000000..399d8354e --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_11/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-S_human", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_11/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_11/input_meta.py new file mode 100644 index 000000000..d01cfc936 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_11/input_meta.py @@ -0,0 +1,111 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [17] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [] + dtype = "int64" + data = [6804] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [2, 17, 6804] + dtype = "float32" + max_val = float("1.0") + mean = float("0.000989902") + std = float("0.0314471") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [2, 1] + dtype = "int32" + data = [0, 1] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [2, 17, 1] + dtype = "int32" + data = [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [2, 6804] + dtype = "float32" + max_val = float("1.0") + mean = float("0.0168283") + std = float("0.128628") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [2, 17, 4] + dtype = "float32" + max_val = float("576.0") + mean = float("141.197") + std = float("163.352") + data = None + + +class Program_weight_tensor_data_7: + name = "data_7" + shape = [2, 17, 6804] + dtype = "float32" + max_val = float("0.438622") + mean = float("6.76947e-05") + std = float("0.00266896") + data = None + + +class Program_weight_tensor_data_8: + name = "data_8" + shape = [2, 17, 6804] + dtype = "float32" + max_val = float("0.949961") + mean = float("0.00868228") + std = float("0.0526563") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_11/model.py b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_11/model.py new file mode 100644 index 000000000..2c208d4ad --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_11/model.py @@ -0,0 +1,195 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, data_0, data_1, data_2, data_3, data_4, data_5, data_6, data_7, data_8 + ): + # pd_op.full: (1xi64) <- () + full_0 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (2x-1xi64) <- (2x-1x-1xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_2, full_0, False, False, paddle.int64) + del full_0 + + # pd_op.cast: (xi32) <- (xi64) + cast_0 = paddle._C_ops.cast(data_0, paddle.int32) + del data_0 + + # pd_op.multiply: (2x1xi32) <- (2x1xi32, xi32) + multiply_1 = paddle._C_ops.multiply(data_3, cast_0) + del cast_0, data_3 + + # pd_op.cast: (2x1xi64) <- (2x1xi32) + cast_1 = paddle._C_ops.cast(multiply_1, paddle.int64) + del multiply_1 + + # pd_op.add: (2x-1xi64) <- (2x-1xi64, 2x1xi64) + add_0 = paddle._C_ops.add(argmax_0, cast_1) + del argmax_0, cast_1 + + # pd_op.flatten: (-1xi32) <- (2x-1x1xi32) + flatten_0 = paddle._C_ops.flatten(data_4, 0, 2) + del data_4 + + # pd_op.flatten: (-1xi64) <- (2x-1xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_1 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (-1xi32) <- (-1xi32, -1xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_1) + del flatten_0 + + # pd_op.full: (xi64) <- () + full_2 = paddle._C_ops.full( + [], float("2"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_0 = [full_2, data_1] + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.reshape: (2x-1xi32) <- (-1xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, stack_0) + del gather_0, stack_0 + + # pd_op.full: (xf32) <- () + full_3 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (2x-1xb) <- (2x-1xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(data_5, full_3) + del data_5, full_3 + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (2x-1xi32) <- (2x-1xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_4, paddle.int32, paddle.framework._current_expected_place() + ) + + # pd_op.where: (2x-1xi32) <- (2x-1xb, 2x-1xi32, 2x-1xi32) + where_0 = paddle._C_ops.where(greater_than_0, reshape_1, full_like_0) + del full_like_0, greater_than_0, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [-1, 4] + + # pd_op.reshape: (-1x4xf32) <- (2x-1x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_6, full_int_array_0) + del data_6, full_int_array_0 + + # pd_op.gather: (-1x4xf32) <- (-1x4xf32, -1xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_1) + del flatten_1, full_1, reshape_2 + + # pd_op.full: (xi64) <- () + full_5 = paddle._C_ops.full( + [], float("4"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_1 = [full_2, data_1, full_5] + del data_1, full_2, full_5 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_1 = paddle._C_ops.stack(combine_1, 0) + del combine_1 + + # pd_op.reshape: (2x-1x4xf32) <- (-1x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, stack_1) + del gather_1, stack_1 + + # pd_op.full: (1xi32) <- () + full_6 = paddle._C_ops.full( + [1], float("2"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (2x-1x2xf32) <- (2x-1xi32, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + where_0 % paddle.cast(full_6, where_0.dtype), full_6 + ) + del full_6 + + # pd_op.full: (1xi64) <- () + full_7 = paddle._C_ops.full( + [1], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (1xi64) <- (1xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_7, + [1], + paddle.int64, + [float("0")], + paddle.framework._current_expected_place(), + ) + del full_7 + + # pd_op.index_select: (2x-1x1xf32) <- (2x-1x2xf32, 1xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_0, assign_value__0, -1) + del assign_value__0, one_hot_0 + + # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x-1xf32) + multiply_2 = paddle._C_ops.multiply(data_7, data_2) + del data_7 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [-1] + + # pd_op.max: (2x-1x1xf32) <- (2x-1x-1xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_2, full_int_array_1, True) + + # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x-1xf32) + multiply_3 = paddle._C_ops.multiply(data_8, data_2) + del data_2, data_8 + + # pd_op.max: (2x-1x1xf32) <- (2x-1x-1xf32, 1xi64) + max_1 = paddle._C_ops.max(multiply_3, full_int_array_1, True) + del multiply_3 + + # pd_op.scale: (2x-1x1xf32) <- (2x-1x1xf32, 1xf32) + scale_0 = paddle._C_ops.scale(max_0, full_4, float("1e-09"), True) + del full_4, max_0 + + # pd_op.divide: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x1xf32) + divide_0 = paddle._C_ops.divide(multiply_2, scale_0) + del multiply_2, scale_0 + + # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x1xf32) + multiply_4 = paddle._C_ops.multiply(divide_0, max_1) + del divide_0, max_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [-2] + + # pd_op.max: (2x-1xf32) <- (2x-1x-1xf32, 1xi64) + max_2 = paddle._C_ops.max(multiply_4, full_int_array_2, False) + del full_int_array_2, multiply_4 + + # pd_op.unsqueeze: (2x-1x1xf32) <- (2x-1xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(max_2, full_int_array_1) + del full_int_array_1, max_2 + + # pd_op.multiply: (2x-1x1xf32) <- (2x-1x1xf32, 2x-1x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_0) + del index_select_0, unsqueeze_0, where_0 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_11/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_11/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_11/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_3/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_3/graph_hash.txt new file mode 100644 index 000000000..d3368bec1 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_3/graph_hash.txt @@ -0,0 +1 @@ +b31d9174479d0938255cd2ec58334899ecf03916288acd4eafc0c43a6b55388d \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_3/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_3/graph_net.json new file mode 100644 index 000000000..399d8354e --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_3/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-S_human", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_3/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_3/input_meta.py new file mode 100644 index 000000000..268c9fefb --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_3/input_meta.py @@ -0,0 +1,124 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [20] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [] + dtype = "int64" + data = [8400] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [] + dtype = "int64" + data = [20] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [2, 8400] + dtype = "float32" + max_val = float("3.0") + mean = float("0.0289881") + std = float("0.183368") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [2, 20, 8400] + dtype = "float32" + max_val = float("0.911359") + mean = float("0.0080156") + std = float("0.0534019") + data = None + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [2, 20, 8400] + dtype = "float32" + max_val = float("1.0") + mean = float("0.0014494") + std = float("0.0380434") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [2, 1] + dtype = "int32" + data = [0, 1] + + +class Program_weight_tensor_data_7: + name = "data_7" + shape = [2, 20, 1] + dtype = "int32" + data = [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ] + + +class Program_weight_tensor_data_8: + name = "data_8" + shape = [2, 20, 4] + dtype = "float32" + max_val = float("640.0") + mean = float("258.282") + std = float("180.734") + data = None + + +class Program_weight_tensor_data_9: + name = "data_9" + shape = [2, 20, 8400] + dtype = "float32" + max_val = float("0.331258") + mean = float("9.44418e-05") + std = float("0.00284092") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_3/model.py b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_3/model.py new file mode 100644 index 000000000..0eefa8f8c --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_3/model.py @@ -0,0 +1,258 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + data_0, + data_1, + data_2, + data_3, + data_4, + data_5, + data_6, + data_7, + data_8, + data_9, + ): + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [1] + + # pd_op.unsqueeze: (2x1x-1xf32) <- (2x-1xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(data_3, full_int_array_0) + del data_3, full_int_array_0 + + # pd_op.full: (xf32) <- () + full_0 = paddle._C_ops.full( + [], float("1"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (2x1x-1xb) <- (2x1x-1xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(unsqueeze_0, full_0) + del full_0, unsqueeze_0 + + # pd_op.full: (xi64) <- () + full_1 = paddle._C_ops.full( + [], float("1"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_0 = [full_1, data_0, full_1] + del full_1 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.tile: (2x-1x-1xb) <- (2x1x-1xb, 3xi64) + tile_0 = paddle._C_ops.tile(greater_than_0, stack_0) + del greater_than_0, stack_0 + + # pd_op.full: (1xi64) <- () + full_2 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (2x-1xi64) <- (2x-1x-1xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_4, full_2, False, False, paddle.int64) + + # pd_op.one_hot: (2x-1x-1xf32) <- (2x-1xi64, xi64) + one_hot_0 = paddle._C_ops.one_hot( + argmax_0 % paddle.cast(data_2, argmax_0.dtype), data_2 + ) + del argmax_0, data_2 + + # pd_op.transpose: (2x-1x-1xf32) <- (2x-1x-1xf32) + transpose_0 = paddle._C_ops.transpose(one_hot_0, [0, 2, 1]) + del one_hot_0 + + # pd_op.where: (2x-1x-1xf32) <- (2x-1x-1xb, 2x-1x-1xf32, 2x-1x-1xf32) + where_0 = paddle._C_ops.where(tile_0, transpose_0, data_5) + del data_5, tile_0, transpose_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [-2] + + # pd_op.sum: (2x-1xf32) <- (2x-1x-1xf32, 1xi64) + sum_0 = paddle._C_ops.sum(where_0, full_int_array_1, None, False) + + # pd_op.argmax: (2x-1xi64) <- (2x-1x-1xf32, 1xi64) + argmax_1 = paddle._C_ops.argmax(where_0, full_2, False, False, paddle.int64) + del full_2 + + # pd_op.cast: (xi32) <- (xi64) + cast_0 = paddle._C_ops.cast(data_0, paddle.int32) + del data_0 + + # pd_op.multiply: (2x1xi32) <- (2x1xi32, xi32) + multiply_1 = paddle._C_ops.multiply(data_6, cast_0) + del cast_0, data_6 + + # pd_op.cast: (2x1xi64) <- (2x1xi32) + cast_1 = paddle._C_ops.cast(multiply_1, paddle.int64) + del multiply_1 + + # pd_op.add: (2x-1xi64) <- (2x-1xi64, 2x1xi64) + add_0 = paddle._C_ops.add(argmax_1, cast_1) + del argmax_1, cast_1 + + # pd_op.flatten: (-1xi32) <- (2x-1x1xi32) + flatten_0 = paddle._C_ops.flatten(data_7, 0, 2) + del data_7 + + # pd_op.flatten: (-1xi64) <- (2x-1xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_3 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (-1xi32) <- (-1xi32, -1xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_3) + del flatten_0 + + # pd_op.full: (xi64) <- () + full_4 = paddle._C_ops.full( + [], float("2"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_1 = [full_4, data_1] + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_1 = paddle._C_ops.stack(combine_1, 0) + del combine_1 + + # pd_op.reshape: (2x-1xi32) <- (-1xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, stack_1) + del gather_0, stack_1 + + # pd_op.full: (xf32) <- () + full_5 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (2x-1xb) <- (2x-1xf32, xf32) + greater_than_1 = paddle._C_ops.greater_than(sum_0, full_5) + del full_5, sum_0 + + # pd_op.full: (1xf32) <- () + full_6 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (2x-1xi32) <- (2x-1xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_6, paddle.int32, paddle.framework._current_expected_place() + ) + + # pd_op.where: (2x-1xi32) <- (2x-1xb, 2x-1xi32, 2x-1xi32) + where_1 = paddle._C_ops.where(greater_than_1, reshape_1, full_like_0) + del full_like_0, greater_than_1, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_2 = [-1, 4] + + # pd_op.reshape: (-1x4xf32) <- (2x-1x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_8, full_int_array_2) + del data_8, full_int_array_2 + + # pd_op.gather: (-1x4xf32) <- (-1x4xf32, -1xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_3) + del flatten_1, full_3, reshape_2 + + # pd_op.full: (xi64) <- () + full_7 = paddle._C_ops.full( + [], float("4"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_2 = [full_4, data_1, full_7] + del data_1, full_4, full_7 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_2 = paddle._C_ops.stack(combine_2, 0) + del combine_2 + + # pd_op.reshape: (2x-1x4xf32) <- (-1x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, stack_2) + del gather_1, stack_2 + + # pd_op.full: (1xi32) <- () + full_8 = paddle._C_ops.full( + [1], float("2"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (2x-1x2xf32) <- (2x-1xi32, 1xi32) + one_hot_1 = paddle._C_ops.one_hot( + where_1 % paddle.cast(full_8, where_1.dtype), full_8 + ) + del full_8 + + # pd_op.full: (1xi64) <- () + full_9 = paddle._C_ops.full( + [1], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (1xi64) <- (1xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_9, + [1], + paddle.int64, + [float("0")], + paddle.framework._current_expected_place(), + ) + del full_9 + + # pd_op.index_select: (2x-1x1xf32) <- (2x-1x2xf32, 1xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_1, assign_value__0, -1) + del assign_value__0, one_hot_1 + + # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x-1xf32) + multiply_2 = paddle._C_ops.multiply(data_9, where_0) + del data_9 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [-1] + + # pd_op.max: (2x-1x1xf32) <- (2x-1x-1xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_2, full_int_array_3, True) + + # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x-1xf32) + multiply_3 = paddle._C_ops.multiply(data_4, where_0) + del data_4, where_0 + + # pd_op.max: (2x-1x1xf32) <- (2x-1x-1xf32, 1xi64) + max_1 = paddle._C_ops.max(multiply_3, full_int_array_3, True) + del multiply_3 + + # pd_op.scale: (2x-1x1xf32) <- (2x-1x1xf32, 1xf32) + scale_0 = paddle._C_ops.scale(max_0, full_6, float("1e-09"), True) + del full_6, max_0 + + # pd_op.divide: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x1xf32) + divide_0 = paddle._C_ops.divide(multiply_2, scale_0) + del multiply_2, scale_0 + + # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x1xf32) + multiply_4 = paddle._C_ops.multiply(divide_0, max_1) + del divide_0, max_1 + + # pd_op.max: (2x-1xf32) <- (2x-1x-1xf32, 1xi64) + max_2 = paddle._C_ops.max(multiply_4, full_int_array_1, False) + del full_int_array_1, multiply_4 + + # pd_op.unsqueeze: (2x-1x1xf32) <- (2x-1xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(max_2, full_int_array_3) + del full_int_array_3, max_2 + + # pd_op.multiply: (2x-1x1xf32) <- (2x-1x1xf32, 2x-1x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_1) + del index_select_0, unsqueeze_1, where_1 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_3/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_3/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_3/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_0/graph_hash.txt new file mode 100644 index 000000000..248541aad --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_0/graph_hash.txt @@ -0,0 +1 @@ +c94cf7aa14030cc58b016c1f88c7cd23c6ff7ce7be2dce9f0f95c5469f2ac412 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_0/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_0/graph_net.json new file mode 100644 index 000000000..d8719c2c9 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_0/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-S_vehicle", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_0/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_0/input_meta.py new file mode 100644 index 000000000..12c56f973 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_0/input_meta.py @@ -0,0 +1,76 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [12] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [] + dtype = "int64" + data = [5376] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [2, 12, 5376] + dtype = "float32" + max_val = float("1.0") + mean = float("0.000612289") + std = float("0.0247369") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [2, 1] + dtype = "int32" + data = [0, 1] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [2, 12, 1] + dtype = "int32" + data = [0, 3, 0, 0, 1, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [2, 5376] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00734747") + std = float("0.0854019") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [2, 12, 4] + dtype = "float32" + max_val = float("326.78") + mean = float("160.052") + std = float("110.95") + data = None + + +class Program_weight_tensor_data_7: + name = "data_7" + shape = [2, 12, 5376] + dtype = "float32" + max_val = float("0.00716361") + mean = float("2.14567e-06") + std = float("8.59189e-05") + data = None + + +class Program_weight_tensor_data_8: + name = "data_8" + shape = [2, 12, 5376] + dtype = "float32" + max_val = float("0.945922") + mean = float("0.00158376") + std = float("0.0267929") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_0/model.py b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_0/model.py new file mode 100644 index 000000000..4ae94d4a6 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_0/model.py @@ -0,0 +1,201 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, data_0, data_1, data_2, data_3, data_4, data_5, data_6, data_7, data_8 + ): + # pd_op.full: (1xi64) <- () + full_0 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (2x-1xi64) <- (2x-1x-1xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_2, full_0, False, False, paddle.int64) + del full_0 + + # pd_op.cast: (xi32) <- (xi64) + cast_0 = paddle._C_ops.cast(data_0, paddle.int32) + del data_0 + + # pd_op.multiply: (2x1xi32) <- (2x1xi32, xi32) + multiply_1 = paddle._C_ops.multiply(data_3, cast_0) + del cast_0, data_3 + + # pd_op.cast: (2x1xi64) <- (2x1xi32) + cast_1 = paddle._C_ops.cast(multiply_1, paddle.int64) + del multiply_1 + + # pd_op.add: (2x-1xi64) <- (2x-1xi64, 2x1xi64) + add_0 = paddle._C_ops.add(argmax_0, cast_1) + del argmax_0, cast_1 + + # pd_op.flatten: (-1xi32) <- (2x-1x1xi32) + flatten_0 = paddle._C_ops.flatten(data_4, 0, 2) + del data_4 + + # pd_op.flatten: (-1xi64) <- (2x-1xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_1 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (-1xi32) <- (-1xi32, -1xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_1) + del flatten_0 + + # pd_op.full: (xi64) <- () + full_2 = paddle._C_ops.full( + [], float("2"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_0 = [full_2, data_1] + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.reshape: (2x-1xi32) <- (-1xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, stack_0) + del gather_0, stack_0 + + # pd_op.full: (xf32) <- () + full_3 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (2x-1xb) <- (2x-1xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(data_5, full_3) + del data_5, full_3 + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("4"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (2x-1xi32) <- (2x-1xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_4, paddle.int32, paddle.framework._current_expected_place() + ) + del full_4 + + # pd_op.where: (2x-1xi32) <- (2x-1xb, 2x-1xi32, 2x-1xi32) + where_0 = paddle._C_ops.where(greater_than_0, reshape_1, full_like_0) + del full_like_0, greater_than_0, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [-1, 4] + + # pd_op.reshape: (-1x4xf32) <- (2x-1x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_6, full_int_array_0) + del data_6, full_int_array_0 + + # pd_op.gather: (-1x4xf32) <- (-1x4xf32, -1xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_1) + del flatten_1, full_1, reshape_2 + + # pd_op.full: (xi64) <- () + full_5 = paddle._C_ops.full( + [], float("4"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_1 = [full_2, data_1, full_5] + del data_1, full_2, full_5 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_1 = paddle._C_ops.stack(combine_1, 0) + del combine_1 + + # pd_op.reshape: (2x-1x4xf32) <- (-1x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, stack_1) + del gather_1, stack_1 + + # pd_op.full: (1xi32) <- () + full_6 = paddle._C_ops.full( + [1], float("5"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (2x-1x5xf32) <- (2x-1xi32, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + where_0 % paddle.cast(full_6, where_0.dtype), full_6 + ) + del full_6 + + # pd_op.full: (4xi64) <- () + full_7 = paddle._C_ops.full( + [4], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (4xi64) <- (4xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_7, + [4], + paddle.int64, + [float("0"), float("1"), float("2"), float("3")], + paddle.framework._current_expected_place(), + ) + del full_7 + + # pd_op.index_select: (2x-1x4xf32) <- (2x-1x5xf32, 4xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_0, assign_value__0, -1) + del assign_value__0, one_hot_0 + + # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x-1xf32) + multiply_2 = paddle._C_ops.multiply(data_7, data_2) + del data_7 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [-1] + + # pd_op.max: (2x-1x1xf32) <- (2x-1x-1xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_2, full_int_array_1, True) + + # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x-1xf32) + multiply_3 = paddle._C_ops.multiply(data_8, data_2) + del data_2, data_8 + + # pd_op.max: (2x-1x1xf32) <- (2x-1x-1xf32, 1xi64) + max_1 = paddle._C_ops.max(multiply_3, full_int_array_1, True) + del multiply_3 + + # pd_op.full: (1xf32) <- () + full_8 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x-1x1xf32) <- (2x-1x1xf32, 1xf32) + scale_0 = paddle._C_ops.scale(max_0, full_8, float("1e-09"), True) + del full_8, max_0 + + # pd_op.divide: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x1xf32) + divide_0 = paddle._C_ops.divide(multiply_2, scale_0) + del multiply_2, scale_0 + + # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x1xf32) + multiply_4 = paddle._C_ops.multiply(divide_0, max_1) + del divide_0, max_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [-2] + + # pd_op.max: (2x-1xf32) <- (2x-1x-1xf32, 1xi64) + max_2 = paddle._C_ops.max(multiply_4, full_int_array_2, False) + del full_int_array_2, multiply_4 + + # pd_op.unsqueeze: (2x-1x1xf32) <- (2x-1xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(max_2, full_int_array_1) + del full_int_array_1, max_2 + + # pd_op.multiply: (2x-1x4xf32) <- (2x-1x4xf32, 2x-1x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_0) + del index_select_0, unsqueeze_0, where_0 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_0/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_0/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_0/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_2/graph_hash.txt new file mode 100644 index 000000000..4d04a175d --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_2/graph_hash.txt @@ -0,0 +1 @@ +6f0f40cbf909627fa867337174f532d3e179ced4784c7fc2c9cb00ae6193ac2e \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_2/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_2/graph_net.json new file mode 100644 index 000000000..d8719c2c9 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_2/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-S_vehicle", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_2/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_2/input_meta.py new file mode 100644 index 000000000..aa6620489 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_2/input_meta.py @@ -0,0 +1,59 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [2, 1, 7581] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00171481") + std = float("0.0413748") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [2, 1] + dtype = "int32" + data = [0, 1] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [2, 1, 1] + dtype = "int32" + data = [0, 3] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [2, 7581] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00171481") + std = float("0.0413748") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [2, 1, 4] + dtype = "float32" + data = [376.443, 61.9806, 517.447, 398.447, 562.465, 468.683, 608.0, 608.0] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [2, 1, 7581] + dtype = "float32" + max_val = float("0.00652957") + mean = float("2.09634e-05") + std = float("0.000282651") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [2, 1, 7581] + dtype = "float32" + max_val = float("0.931424") + mean = float("0.0255599") + std = float("0.091854") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_2/model.py b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_2/model.py new file mode 100644 index 000000000..80304c15d --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_2/model.py @@ -0,0 +1,176 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): + # pd_op.full: (1xi64) <- () + full_0 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (2x7581xi64) <- (2x1x7581xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_0, full_0, False, False, paddle.int64) + del full_0 + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x1xi32) <- (2x1xi32, 1xf32) + scale_0 = paddle._C_ops.scale(data_1, full_1, float("0"), True) + del data_1 + + # pd_op.cast: (2x1xi64) <- (2x1xi32) + cast_0 = paddle._C_ops.cast(scale_0, paddle.int64) + del scale_0 + + # pd_op.add: (2x7581xi64) <- (2x7581xi64, 2x1xi64) + add_0 = paddle._C_ops.add(argmax_0, cast_0) + del argmax_0, cast_0 + + # pd_op.flatten: (2xi32) <- (2x1x1xi32) + flatten_0 = paddle._C_ops.flatten(data_2, 0, 2) + del data_2 + + # pd_op.flatten: (15162xi64) <- (2x7581xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_2 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (15162xi32) <- (2xi32, 15162xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_2) + del flatten_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [2, 7581] + + # pd_op.reshape: (2x7581xi32) <- (15162xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, full_int_array_0) + del full_int_array_0, gather_0 + + # pd_op.full: (xf32) <- () + full_3 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (2x7581xb) <- (2x7581xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(data_3, full_3) + del data_3, full_3 + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("4"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (2x7581xi32) <- (2x7581xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_4, paddle.int32, paddle.framework._current_expected_place() + ) + del full_4 + + # pd_op.where: (2x7581xi32) <- (2x7581xb, 2x7581xi32, 2x7581xi32) + where_0 = paddle._C_ops.where(greater_than_0, reshape_1, full_like_0) + del full_like_0, greater_than_0, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [-1, 4] + + # pd_op.reshape: (2x4xf32) <- (2x1x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_4, full_int_array_1) + del data_4, full_int_array_1 + + # pd_op.gather: (15162x4xf32) <- (2x4xf32, 15162xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_2) + del flatten_1, full_2, reshape_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_2 = [2, 7581, 4] + + # pd_op.reshape: (2x7581x4xf32) <- (15162x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, full_int_array_2) + del full_int_array_2, gather_1 + + # pd_op.full: (1xi32) <- () + full_5 = paddle._C_ops.full( + [1], float("5"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (2x7581x5xf32) <- (2x7581xi32, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + where_0 % paddle.cast(full_5, where_0.dtype), full_5 + ) + del full_5 + + # pd_op.full: (4xi64) <- () + full_6 = paddle._C_ops.full( + [4], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (4xi64) <- (4xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_6, + [4], + paddle.int64, + [float("0"), float("1"), float("2"), float("3")], + paddle.framework._current_expected_place(), + ) + del full_6 + + # pd_op.index_select: (2x7581x4xf32) <- (2x7581x5xf32, 4xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_0, assign_value__0, -1) + del assign_value__0, one_hot_0 + + # pd_op.multiply: (2x1x7581xf32) <- (2x1x7581xf32, 2x1x7581xf32) + multiply_1 = paddle._C_ops.multiply(data_5, data_0) + del data_5 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [-1] + + # pd_op.max: (2x1x1xf32) <- (2x1x7581xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_1, full_int_array_3, True) + + # pd_op.multiply: (2x1x7581xf32) <- (2x1x7581xf32, 2x1x7581xf32) + multiply_2 = paddle._C_ops.multiply(data_6, data_0) + del data_0, data_6 + + # pd_op.max: (2x1x1xf32) <- (2x1x7581xf32, 1xi64) + max_1 = paddle._C_ops.max(multiply_2, full_int_array_3, True) + del multiply_2 + + # pd_op.scale: (2x1x1xf32) <- (2x1x1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(max_0, full_1, float("1e-09"), True) + del full_1, max_0 + + # pd_op.divide: (2x1x7581xf32) <- (2x1x7581xf32, 2x1x1xf32) + divide_0 = paddle._C_ops.divide(multiply_1, scale_1) + del multiply_1, scale_1 + + # pd_op.multiply: (2x1x7581xf32) <- (2x1x7581xf32, 2x1x1xf32) + multiply_3 = paddle._C_ops.multiply(divide_0, max_1) + del divide_0, max_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [-2] + + # pd_op.max: (2x7581xf32) <- (2x1x7581xf32, 1xi64) + max_2 = paddle._C_ops.max(multiply_3, full_int_array_4, False) + del full_int_array_4, multiply_3 + + # pd_op.unsqueeze: (2x7581x1xf32) <- (2x7581xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(max_2, full_int_array_3) + del full_int_array_3, max_2 + + # pd_op.multiply: (2x7581x4xf32) <- (2x7581x4xf32, 2x7581x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_0) + del index_select_0, unsqueeze_0, where_0 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_2/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_2/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_2/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_3/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_3/graph_hash.txt new file mode 100644 index 000000000..e5ec97328 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_3/graph_hash.txt @@ -0,0 +1 @@ +d8e2807e0c261d57e00c887aba4b333ffd83562a1bf230d5a26bacf0379fad87 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_3/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_3/graph_net.json new file mode 100644 index 000000000..d8719c2c9 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_3/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-S_vehicle", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_3/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_3/input_meta.py new file mode 100644 index 000000000..3217a39a7 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_3/input_meta.py @@ -0,0 +1,91 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [2, 3549] + dtype = "float32" + max_val = float("2.0") + mean = float("0.0250775") + std = float("0.157259") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [2, 14, 3549] + dtype = "float32" + max_val = float("0.973582") + mean = float("0.00986958") + std = float("0.0654711") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [2, 14, 3549] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00179125") + std = float("0.0422852") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [2, 1] + dtype = "int32" + data = [0, 1] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [2, 14, 1] + dtype = "int32" + data = [ + 0, + 0, + 1, + 0, + 0, + 0, + 0, + 0, + 3, + 0, + 0, + 0, + 0, + 3, + 0, + 0, + 1, + 0, + 0, + 0, + 0, + 0, + 3, + 0, + 0, + 0, + 0, + 0, + ] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [2, 14, 4] + dtype = "float32" + max_val = float("384.824") + mean = float("133.114") + std = float("96.9844") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [2, 14, 3549] + dtype = "float32" + max_val = float("0.00888292") + mean = float("1.5455e-05") + std = float("0.000268339") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_3/model.py b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_3/model.py new file mode 100644 index 000000000..2432102f1 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_3/model.py @@ -0,0 +1,229 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [1] + + # pd_op.unsqueeze: (2x1x3549xf32) <- (2x3549xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(data_0, full_int_array_0) + del data_0, full_int_array_0 + + # pd_op.full: (xf32) <- () + full_0 = paddle._C_ops.full( + [], float("1"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (2x1x3549xb) <- (2x1x3549xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(unsqueeze_0, full_0) + del full_0, unsqueeze_0 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_1 = [1, 14, 1] + + # pd_op.tile: (2x14x3549xb) <- (2x1x3549xb, 3xi64) + tile_0 = paddle._C_ops.tile(greater_than_0, full_int_array_1) + del full_int_array_1, greater_than_0 + + # pd_op.full: (1xi64) <- () + full_1 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (2x3549xi64) <- (2x14x3549xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_1, full_1, False, False, paddle.int64) + + # pd_op.full: (1xi32) <- () + full_2 = paddle._C_ops.full( + [1], float("14"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (2x3549x14xf32) <- (2x3549xi64, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + argmax_0 % paddle.cast(full_2, argmax_0.dtype), full_2 + ) + del argmax_0, full_2 + + # pd_op.transpose: (2x14x3549xf32) <- (2x3549x14xf32) + transpose_0 = paddle._C_ops.transpose(one_hot_0, [0, 2, 1]) + del one_hot_0 + + # pd_op.where: (2x14x3549xf32) <- (2x14x3549xb, 2x14x3549xf32, 2x14x3549xf32) + where_0 = paddle._C_ops.where(tile_0, transpose_0, data_2) + del data_2, tile_0, transpose_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [-2] + + # pd_op.sum: (2x3549xf32) <- (2x14x3549xf32, 1xi64) + sum_0 = paddle._C_ops.sum(where_0, full_int_array_2, None, False) + + # pd_op.argmax: (2x3549xi64) <- (2x14x3549xf32, 1xi64) + argmax_1 = paddle._C_ops.argmax(where_0, full_1, False, False, paddle.int64) + del full_1 + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("14"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x1xi32) <- (2x1xi32, 1xf32) + scale_0 = paddle._C_ops.scale(data_3, full_3, float("0"), True) + del data_3, full_3 + + # pd_op.cast: (2x1xi64) <- (2x1xi32) + cast_0 = paddle._C_ops.cast(scale_0, paddle.int64) + del scale_0 + + # pd_op.add: (2x3549xi64) <- (2x3549xi64, 2x1xi64) + add_0 = paddle._C_ops.add(argmax_1, cast_0) + del argmax_1, cast_0 + + # pd_op.flatten: (28xi32) <- (2x14x1xi32) + flatten_0 = paddle._C_ops.flatten(data_4, 0, 2) + del data_4 + + # pd_op.flatten: (7098xi64) <- (2x3549xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_4 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (7098xi32) <- (28xi32, 7098xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_4) + del flatten_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_3 = [2, 3549] + + # pd_op.reshape: (2x3549xi32) <- (7098xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, full_int_array_3) + del full_int_array_3, gather_0 + + # pd_op.full: (xf32) <- () + full_5 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (2x3549xb) <- (2x3549xf32, xf32) + greater_than_1 = paddle._C_ops.greater_than(sum_0, full_5) + del full_5, sum_0 + + # pd_op.full: (1xf32) <- () + full_6 = paddle._C_ops.full( + [1], float("4"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (2x3549xi32) <- (2x3549xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_6, paddle.int32, paddle.framework._current_expected_place() + ) + del full_6 + + # pd_op.where: (2x3549xi32) <- (2x3549xb, 2x3549xi32, 2x3549xi32) + where_1 = paddle._C_ops.where(greater_than_1, reshape_1, full_like_0) + del full_like_0, greater_than_1, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_4 = [-1, 4] + + # pd_op.reshape: (28x4xf32) <- (2x14x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_5, full_int_array_4) + del data_5, full_int_array_4 + + # pd_op.gather: (7098x4xf32) <- (28x4xf32, 7098xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_4) + del flatten_1, full_4, reshape_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_5 = [2, 3549, 4] + + # pd_op.reshape: (2x3549x4xf32) <- (7098x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, full_int_array_5) + del full_int_array_5, gather_1 + + # pd_op.full: (1xi32) <- () + full_7 = paddle._C_ops.full( + [1], float("5"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (2x3549x5xf32) <- (2x3549xi32, 1xi32) + one_hot_1 = paddle._C_ops.one_hot( + where_1 % paddle.cast(full_7, where_1.dtype), full_7 + ) + del full_7 + + # pd_op.full: (4xi64) <- () + full_8 = paddle._C_ops.full( + [4], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (4xi64) <- (4xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_8, + [4], + paddle.int64, + [float("0"), float("1"), float("2"), float("3")], + paddle.framework._current_expected_place(), + ) + del full_8 + + # pd_op.index_select: (2x3549x4xf32) <- (2x3549x5xf32, 4xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_1, assign_value__0, -1) + del assign_value__0, one_hot_1 + + # pd_op.multiply: (2x14x3549xf32) <- (2x14x3549xf32, 2x14x3549xf32) + multiply_1 = paddle._C_ops.multiply(data_6, where_0) + del data_6 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_6 = [-1] + + # pd_op.max: (2x14x1xf32) <- (2x14x3549xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_1, full_int_array_6, True) + + # pd_op.multiply: (2x14x3549xf32) <- (2x14x3549xf32, 2x14x3549xf32) + multiply_2 = paddle._C_ops.multiply(data_1, where_0) + del data_1, where_0 + + # pd_op.max: (2x14x1xf32) <- (2x14x3549xf32, 1xi64) + max_1 = paddle._C_ops.max(multiply_2, full_int_array_6, True) + del multiply_2 + + # pd_op.full: (1xf32) <- () + full_9 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x14x1xf32) <- (2x14x1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(max_0, full_9, float("1e-09"), True) + del full_9, max_0 + + # pd_op.divide: (2x14x3549xf32) <- (2x14x3549xf32, 2x14x1xf32) + divide_0 = paddle._C_ops.divide(multiply_1, scale_1) + del multiply_1, scale_1 + + # pd_op.multiply: (2x14x3549xf32) <- (2x14x3549xf32, 2x14x1xf32) + multiply_3 = paddle._C_ops.multiply(divide_0, max_1) + del divide_0, max_1 + + # pd_op.max: (2x3549xf32) <- (2x14x3549xf32, 1xi64) + max_2 = paddle._C_ops.max(multiply_3, full_int_array_2, False) + del full_int_array_2, multiply_3 + + # pd_op.unsqueeze: (2x3549x1xf32) <- (2x3549xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(max_2, full_int_array_6) + del full_int_array_6, max_2 + + # pd_op.multiply: (2x3549x4xf32) <- (2x3549x4xf32, 2x3549x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_1) + del index_select_0, unsqueeze_1, where_1 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_3/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_3/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_3/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_4/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_4/graph_hash.txt new file mode 100644 index 000000000..18426c718 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_4/graph_hash.txt @@ -0,0 +1 @@ +c4af36497f7852167288dc3ac1e4b55956d1b6c42ca46e70cd27bb1ccc05b8bd \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_4/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_4/graph_net.json new file mode 100644 index 000000000..d8719c2c9 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_4/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-S_vehicle", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_4/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_4/input_meta.py new file mode 100644 index 000000000..5a54b3b62 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_4/input_meta.py @@ -0,0 +1,83 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [12] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [] + dtype = "int64" + data = [10164] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [] + dtype = "int64" + data = [12] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [2, 10164] + dtype = "float32" + max_val = float("2.0") + mean = float("0.00491932") + std = float("0.0706648") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [2, 12, 10164] + dtype = "float32" + max_val = float("0.964484") + mean = float("0.00145547") + std = float("0.0263451") + data = None + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [2, 12, 10164] + dtype = "float32" + max_val = float("1.0") + mean = float("0.000409944") + std = float("0.0202429") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [2, 1] + dtype = "int32" + data = [0, 1] + + +class Program_weight_tensor_data_7: + name = "data_7" + shape = [2, 12, 1] + dtype = "int32" + data = [0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + + +class Program_weight_tensor_data_8: + name = "data_8" + shape = [2, 12, 4] + dtype = "float32" + max_val = float("658.644") + mean = float("251.921") + std = float("253.674") + data = None + + +class Program_weight_tensor_data_9: + name = "data_9" + shape = [2, 12, 10164] + dtype = "float32" + max_val = float("0.00990145") + mean = float("2.84369e-06") + std = float("0.000124311") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_4/model.py b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_4/model.py new file mode 100644 index 000000000..d3b764f89 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_4/model.py @@ -0,0 +1,264 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + data_0, + data_1, + data_2, + data_3, + data_4, + data_5, + data_6, + data_7, + data_8, + data_9, + ): + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [1] + + # pd_op.unsqueeze: (2x1x-1xf32) <- (2x-1xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(data_3, full_int_array_0) + del data_3, full_int_array_0 + + # pd_op.full: (xf32) <- () + full_0 = paddle._C_ops.full( + [], float("1"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (2x1x-1xb) <- (2x1x-1xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(unsqueeze_0, full_0) + del full_0, unsqueeze_0 + + # pd_op.full: (xi64) <- () + full_1 = paddle._C_ops.full( + [], float("1"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_0 = [full_1, data_0, full_1] + del full_1 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.tile: (2x-1x-1xb) <- (2x1x-1xb, 3xi64) + tile_0 = paddle._C_ops.tile(greater_than_0, stack_0) + del greater_than_0, stack_0 + + # pd_op.full: (1xi64) <- () + full_2 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (2x-1xi64) <- (2x-1x-1xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_4, full_2, False, False, paddle.int64) + + # pd_op.one_hot: (2x-1x-1xf32) <- (2x-1xi64, xi64) + one_hot_0 = paddle._C_ops.one_hot( + argmax_0 % paddle.cast(data_2, argmax_0.dtype), data_2 + ) + del argmax_0, data_2 + + # pd_op.transpose: (2x-1x-1xf32) <- (2x-1x-1xf32) + transpose_0 = paddle._C_ops.transpose(one_hot_0, [0, 2, 1]) + del one_hot_0 + + # pd_op.where: (2x-1x-1xf32) <- (2x-1x-1xb, 2x-1x-1xf32, 2x-1x-1xf32) + where_0 = paddle._C_ops.where(tile_0, transpose_0, data_5) + del data_5, tile_0, transpose_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [-2] + + # pd_op.sum: (2x-1xf32) <- (2x-1x-1xf32, 1xi64) + sum_0 = paddle._C_ops.sum(where_0, full_int_array_1, None, False) + + # pd_op.argmax: (2x-1xi64) <- (2x-1x-1xf32, 1xi64) + argmax_1 = paddle._C_ops.argmax(where_0, full_2, False, False, paddle.int64) + del full_2 + + # pd_op.cast: (xi32) <- (xi64) + cast_0 = paddle._C_ops.cast(data_0, paddle.int32) + del data_0 + + # pd_op.multiply: (2x1xi32) <- (2x1xi32, xi32) + multiply_1 = paddle._C_ops.multiply(data_6, cast_0) + del cast_0, data_6 + + # pd_op.cast: (2x1xi64) <- (2x1xi32) + cast_1 = paddle._C_ops.cast(multiply_1, paddle.int64) + del multiply_1 + + # pd_op.add: (2x-1xi64) <- (2x-1xi64, 2x1xi64) + add_0 = paddle._C_ops.add(argmax_1, cast_1) + del argmax_1, cast_1 + + # pd_op.flatten: (-1xi32) <- (2x-1x1xi32) + flatten_0 = paddle._C_ops.flatten(data_7, 0, 2) + del data_7 + + # pd_op.flatten: (-1xi64) <- (2x-1xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_3 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (-1xi32) <- (-1xi32, -1xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_3) + del flatten_0 + + # pd_op.full: (xi64) <- () + full_4 = paddle._C_ops.full( + [], float("2"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_1 = [full_4, data_1] + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_1 = paddle._C_ops.stack(combine_1, 0) + del combine_1 + + # pd_op.reshape: (2x-1xi32) <- (-1xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, stack_1) + del gather_0, stack_1 + + # pd_op.full: (xf32) <- () + full_5 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (2x-1xb) <- (2x-1xf32, xf32) + greater_than_1 = paddle._C_ops.greater_than(sum_0, full_5) + del full_5, sum_0 + + # pd_op.full: (1xf32) <- () + full_6 = paddle._C_ops.full( + [1], float("4"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (2x-1xi32) <- (2x-1xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_6, paddle.int32, paddle.framework._current_expected_place() + ) + del full_6 + + # pd_op.where: (2x-1xi32) <- (2x-1xb, 2x-1xi32, 2x-1xi32) + where_1 = paddle._C_ops.where(greater_than_1, reshape_1, full_like_0) + del full_like_0, greater_than_1, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_2 = [-1, 4] + + # pd_op.reshape: (-1x4xf32) <- (2x-1x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_8, full_int_array_2) + del data_8, full_int_array_2 + + # pd_op.gather: (-1x4xf32) <- (-1x4xf32, -1xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_3) + del flatten_1, full_3, reshape_2 + + # pd_op.full: (xi64) <- () + full_7 = paddle._C_ops.full( + [], float("4"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_2 = [full_4, data_1, full_7] + del data_1, full_4, full_7 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_2 = paddle._C_ops.stack(combine_2, 0) + del combine_2 + + # pd_op.reshape: (2x-1x4xf32) <- (-1x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, stack_2) + del gather_1, stack_2 + + # pd_op.full: (1xi32) <- () + full_8 = paddle._C_ops.full( + [1], float("5"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (2x-1x5xf32) <- (2x-1xi32, 1xi32) + one_hot_1 = paddle._C_ops.one_hot( + where_1 % paddle.cast(full_8, where_1.dtype), full_8 + ) + del full_8 + + # pd_op.full: (4xi64) <- () + full_9 = paddle._C_ops.full( + [4], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (4xi64) <- (4xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_9, + [4], + paddle.int64, + [float("0"), float("1"), float("2"), float("3")], + paddle.framework._current_expected_place(), + ) + del full_9 + + # pd_op.index_select: (2x-1x4xf32) <- (2x-1x5xf32, 4xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_1, assign_value__0, -1) + del assign_value__0, one_hot_1 + + # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x-1xf32) + multiply_2 = paddle._C_ops.multiply(data_9, where_0) + del data_9 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [-1] + + # pd_op.max: (2x-1x1xf32) <- (2x-1x-1xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_2, full_int_array_3, True) + + # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x-1xf32) + multiply_3 = paddle._C_ops.multiply(data_4, where_0) + del data_4, where_0 + + # pd_op.max: (2x-1x1xf32) <- (2x-1x-1xf32, 1xi64) + max_1 = paddle._C_ops.max(multiply_3, full_int_array_3, True) + del multiply_3 + + # pd_op.full: (1xf32) <- () + full_10 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x-1x1xf32) <- (2x-1x1xf32, 1xf32) + scale_0 = paddle._C_ops.scale(max_0, full_10, float("1e-09"), True) + del full_10, max_0 + + # pd_op.divide: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x1xf32) + divide_0 = paddle._C_ops.divide(multiply_2, scale_0) + del multiply_2, scale_0 + + # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x1xf32) + multiply_4 = paddle._C_ops.multiply(divide_0, max_1) + del divide_0, max_1 + + # pd_op.max: (2x-1xf32) <- (2x-1x-1xf32, 1xi64) + max_2 = paddle._C_ops.max(multiply_4, full_int_array_1, False) + del full_int_array_1, multiply_4 + + # pd_op.unsqueeze: (2x-1x1xf32) <- (2x-1xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(max_2, full_int_array_3) + del full_int_array_3, max_2 + + # pd_op.multiply: (2x-1x4xf32) <- (2x-1x4xf32, 2x-1x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_1) + del index_select_0, unsqueeze_1, where_1 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_4/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_4/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_4/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_2/graph_hash.txt new file mode 100644 index 000000000..95c7fa710 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_2/graph_hash.txt @@ -0,0 +1 @@ +0ca9776f46c68e9c47d78262b0e7a676b5b201ebb813b00dd6cdd710477953d6 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_2/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_2/graph_net.json new file mode 100644 index 000000000..32219c0fa --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_2/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus-L", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_2/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_2/input_meta.py new file mode 100644 index 000000000..7fd3d63e3 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_2/input_meta.py @@ -0,0 +1,76 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [8, 1, 4116] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00127551") + std = float("0.0356915") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [8, 1, 1] + dtype = "int32" + data = [0, 0, 0, 0, 0, 0, 0, 3] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [8, 4116] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00127551") + std = float("0.0356915") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [8, 1, 4] + dtype = "float32" + data = [ + 35.3684, + 311.273, + 300.632, + 442.182, + 78.6168, + 241.764, + 110.41, + 266.46, + 195.413, + 193.28, + 236.373, + 224.0, + 130.415, + 227.85, + 158.792, + 248.688, + 360.901, + 338.022, + 374.268, + 347.726, + 304.951, + 76.6956, + 336.0, + 109.565, + 295.171, + 70.9146, + 350.609, + 111.437, + 18.5379, + 0.0, + 293.517, + 337.836, + ] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [8, 4116, 4] + dtype = "float32" + min_val = float("-317.929") + max_val = float("740.992") + mean = float("224.215") + std = float("149.051") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_2/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_2/model.py new file mode 100644 index 000000000..0619d9191 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_2/model.py @@ -0,0 +1,287 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4): + # pd_op.full: (1xi64) <- () + full_0 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (8x4116xi64) <- (8x1x4116xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_0, full_0, False, False, paddle.int64) + del full_0 + + # pd_op.full: (1xf64) <- () + full_1 = paddle._C_ops.full( + [1], float("0"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_2 = paddle._C_ops.full( + [1], float("8"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (8xi32) <- (1xf64, 1xf64, 1xf64) + arange_0 = paddle.arange(full_1, full_2, full_3, dtype="int32") + del full_1, full_2, full_3 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [-1] + + # pd_op.unsqueeze: (8x1xi32) <- (8xi32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(arange_0, full_int_array_0) + del arange_0 + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (8x1xi32) <- (8x1xi32, 1xf32) + scale_0 = paddle._C_ops.scale(unsqueeze_0, full_4, float("0"), True) + del unsqueeze_0 + + # pd_op.cast: (8x1xi64) <- (8x1xi32) + cast_0 = paddle._C_ops.cast(scale_0, paddle.int64) + del scale_0 + + # pd_op.add: (8x4116xi64) <- (8x4116xi64, 8x1xi64) + add_0 = paddle._C_ops.add(argmax_0, cast_0) + del argmax_0, cast_0 + + # pd_op.flatten: (8xi32) <- (8x1x1xi32) + flatten_0 = paddle._C_ops.flatten(data_1, 0, 2) + del data_1 + + # pd_op.flatten: (32928xi64) <- (8x4116xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_5 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (32928xi32) <- (8xi32, 32928xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_5) + del flatten_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [8, 4116] + + # pd_op.reshape: (8x4116xi32) <- (32928xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, full_int_array_1) + del full_int_array_1, gather_0 + + # pd_op.full: (xf32) <- () + full_6 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (8x4116xb) <- (8x4116xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(data_2, full_6) + del data_2, full_6 + + # pd_op.full: (1xf32) <- () + full_7 = paddle._C_ops.full( + [1], float("4"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (8x4116xi32) <- (8x4116xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_7, paddle.int32, paddle.framework._current_expected_place() + ) + del full_7 + + # pd_op.where: (8x4116xi32) <- (8x4116xb, 8x4116xi32, 8x4116xi32) + where_0 = paddle._C_ops.where(greater_than_0, reshape_1, full_like_0) + del full_like_0, greater_than_0, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_2 = [-1, 4] + + # pd_op.reshape: (8x4xf32) <- (8x1x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_3, full_int_array_2) + del full_int_array_2 + + # pd_op.gather: (32928x4xf32) <- (8x4xf32, 32928xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_5) + del flatten_1, full_5, reshape_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_3 = [8, 4116, 4] + + # pd_op.reshape: (8x4116x4xf32) <- (32928x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, full_int_array_3) + del full_int_array_3, gather_1 + + # pd_op.full: (1xi32) <- () + full_8 = paddle._C_ops.full( + [1], float("5"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (8x4116x5xf32) <- (8x4116xi32, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + where_0 % paddle.cast(full_8, where_0.dtype), full_8 + ) + del full_8 + + # pd_op.full: (4xi64) <- () + full_9 = paddle._C_ops.full( + [4], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (4xi64) <- (4xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_9, + [4], + paddle.int64, + [float("0"), float("1"), float("2"), float("3")], + paddle.framework._current_expected_place(), + ) + del full_9 + + # pd_op.index_select: (8x4116x4xf32) <- (8x4116x5xf32, 4xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_0, assign_value__0, -1) + del assign_value__0, one_hot_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [2] + + # pd_op.unsqueeze: (8x1x1x4xf32) <- (8x1x4xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(data_3, full_int_array_4) + del data_3 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_5 = [1] + + # pd_op.unsqueeze: (8x1x4116x4xf32) <- (8x4116x4xf32, 1xi64) + unsqueeze_2 = paddle._C_ops.unsqueeze(data_4, full_int_array_5) + del data_4, full_int_array_5 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_6 = [0] + + # pd_op.slice: (8x1x1x2xf32) <- (8x1x1x4xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + unsqueeze_1, [3], full_int_array_6, full_int_array_4, [1], [] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_7 = [2147483647] + + # pd_op.slice: (8x1x1x2xf32) <- (8x1x1x4xf32, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + unsqueeze_1, [3], full_int_array_4, full_int_array_7, [1], [] + ) + del unsqueeze_1 + + # pd_op.slice: (8x1x4116x2xf32) <- (8x1x4116x4xf32, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + unsqueeze_2, [3], full_int_array_6, full_int_array_4, [1], [] + ) + del full_int_array_6 + + # pd_op.slice: (8x1x4116x2xf32) <- (8x1x4116x4xf32, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + unsqueeze_2, [3], full_int_array_4, full_int_array_7, [1], [] + ) + del full_int_array_4, full_int_array_7, unsqueeze_2 + + # pd_op.maximum: (8x1x4116x2xf32) <- (8x1x1x2xf32, 8x1x4116x2xf32) + maximum_0 = paddle._C_ops.maximum(slice_0, slice_2) + + # pd_op.minimum: (8x1x4116x2xf32) <- (8x1x1x2xf32, 8x1x4116x2xf32) + minimum_0 = paddle._C_ops.minimum(slice_1, slice_3) + + # pd_op.subtract: (8x1x4116x2xf32) <- (8x1x4116x2xf32, 8x1x4116x2xf32) + subtract_0 = paddle._C_ops.subtract(minimum_0, maximum_0) + del maximum_0, minimum_0 + + # pd_op.full: (1xf32) <- () + full_10 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf32) <- () + full_11 = paddle._C_ops.full( + [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.clip: (8x1x4116x2xf32) <- (8x1x4116x2xf32, 1xf32, 1xf32) + clip_0 = paddle._C_ops.clip(subtract_0, full_10, full_11) + del subtract_0 + + # pd_op.prod: (8x1x4116xf32) <- (8x1x4116x2xf32, 1xi64) + prod_0 = paddle._C_ops.prod(clip_0, full_int_array_0, False, False) + del clip_0 + + # pd_op.subtract: (8x1x1x2xf32) <- (8x1x1x2xf32, 8x1x1x2xf32) + subtract_1 = paddle._C_ops.subtract(slice_1, slice_0) + del slice_0, slice_1 + + # pd_op.clip: (8x1x1x2xf32) <- (8x1x1x2xf32, 1xf32, 1xf32) + clip_1 = paddle._C_ops.clip(subtract_1, full_10, full_11) + del subtract_1 + + # pd_op.prod: (8x1x1xf32) <- (8x1x1x2xf32, 1xi64) + prod_1 = paddle._C_ops.prod(clip_1, full_int_array_0, False, False) + del clip_1 + + # pd_op.subtract: (8x1x4116x2xf32) <- (8x1x4116x2xf32, 8x1x4116x2xf32) + subtract_2 = paddle._C_ops.subtract(slice_3, slice_2) + del slice_2, slice_3 + + # pd_op.clip: (8x1x4116x2xf32) <- (8x1x4116x2xf32, 1xf32, 1xf32) + clip_2 = paddle._C_ops.clip(subtract_2, full_10, full_11) + del full_10, full_11, subtract_2 + + # pd_op.prod: (8x1x4116xf32) <- (8x1x4116x2xf32, 1xi64) + prod_2 = paddle._C_ops.prod(clip_2, full_int_array_0, False, False) + del clip_2 + + # pd_op.add: (8x1x4116xf32) <- (8x1x1xf32, 8x1x4116xf32) + add_1 = paddle._C_ops.add(prod_1, prod_2) + del prod_1, prod_2 + + # pd_op.subtract: (8x1x4116xf32) <- (8x1x4116xf32, 8x1x4116xf32) + subtract_3 = paddle._C_ops.subtract(add_1, prod_0) + del add_1 + + # pd_op.scale: (8x1x4116xf32) <- (8x1x4116xf32, 1xf32) + scale_1 = paddle._C_ops.scale(subtract_3, full_4, float("1e-09"), True) + del full_4, subtract_3 + + # pd_op.divide: (8x1x4116xf32) <- (8x1x4116xf32, 8x1x4116xf32) + divide_0 = paddle._C_ops.divide(prod_0, scale_1) + del prod_0, scale_1 + + # pd_op.multiply: (8x1x4116xf32) <- (8x1x4116xf32, 8x1x4116xf32) + multiply_1 = paddle._C_ops.multiply(divide_0, data_0) + del data_0, divide_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_8 = [-2] + + # pd_op.max: (8x4116xf32) <- (8x1x4116xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_1, full_int_array_8, False) + del full_int_array_8, multiply_1 + + # pd_op.unsqueeze: (8x4116x1xf32) <- (8x4116xf32, 1xi64) + unsqueeze_3 = paddle._C_ops.unsqueeze(max_0, full_int_array_0) + del full_int_array_0, max_0 + + # pd_op.multiply: (8x4116x4xf32) <- (8x4116x4xf32, 8x4116x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_3) + del index_select_0, unsqueeze_3, where_0 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_2/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_2/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_2/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_2/graph_hash.txt new file mode 100644 index 000000000..0eb7b765d --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_2/graph_hash.txt @@ -0,0 +1 @@ +87f1c9d15791927678923354dcfc589bd225484f29e377f70153217772641dce \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_2/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_2/graph_net.json new file mode 100644 index 000000000..1c6cb32da --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_2/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus-M", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_2/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_2/input_meta.py new file mode 100644 index 000000000..880246b79 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_2/input_meta.py @@ -0,0 +1,76 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [8, 1, 10164] + dtype = "float32" + max_val = float("1.0") + mean = float("0.000676407") + std = float("0.025999") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [8, 1, 1] + dtype = "int32" + data = [0, 0, 0, 0, 0, 0, 0, 3] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [8, 10164] + dtype = "float32" + max_val = float("1.0") + mean = float("0.000676407") + std = float("0.025999") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [8, 1, 4] + dtype = "float32" + data = [ + 174.715, + 140.8, + 405.956, + 457.6, + 375.985, + 345.193, + 411.639, + 372.906, + 317.49, + 292.0, + 450.008, + 388.0, + 287.439, + 452.211, + 340.211, + 490.947, + 352.0, + 296.267, + 584.17, + 384.267, + 222.933, + 194.723, + 332.444, + 275.609, + 80.8974, + 117.694, + 116.531, + 143.688, + 124.847, + 201.813, + 433.498, + 633.6, + ] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [8, 10164, 4] + dtype = "float32" + min_val = float("-271.994") + max_val = float("993.136") + mean = float("352.517") + std = float("213.87") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_2/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_2/model.py new file mode 100644 index 000000000..b0b1964b8 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_2/model.py @@ -0,0 +1,287 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4): + # pd_op.full: (1xi64) <- () + full_0 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (8x10164xi64) <- (8x1x10164xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_0, full_0, False, False, paddle.int64) + del full_0 + + # pd_op.full: (1xf64) <- () + full_1 = paddle._C_ops.full( + [1], float("0"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_2 = paddle._C_ops.full( + [1], float("8"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (8xi32) <- (1xf64, 1xf64, 1xf64) + arange_0 = paddle.arange(full_1, full_2, full_3, dtype="int32") + del full_1, full_2, full_3 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [-1] + + # pd_op.unsqueeze: (8x1xi32) <- (8xi32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(arange_0, full_int_array_0) + del arange_0 + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (8x1xi32) <- (8x1xi32, 1xf32) + scale_0 = paddle._C_ops.scale(unsqueeze_0, full_4, float("0"), True) + del unsqueeze_0 + + # pd_op.cast: (8x1xi64) <- (8x1xi32) + cast_0 = paddle._C_ops.cast(scale_0, paddle.int64) + del scale_0 + + # pd_op.add: (8x10164xi64) <- (8x10164xi64, 8x1xi64) + add_0 = paddle._C_ops.add(argmax_0, cast_0) + del argmax_0, cast_0 + + # pd_op.flatten: (8xi32) <- (8x1x1xi32) + flatten_0 = paddle._C_ops.flatten(data_1, 0, 2) + del data_1 + + # pd_op.flatten: (81312xi64) <- (8x10164xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_5 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (81312xi32) <- (8xi32, 81312xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_5) + del flatten_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [8, 10164] + + # pd_op.reshape: (8x10164xi32) <- (81312xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, full_int_array_1) + del full_int_array_1, gather_0 + + # pd_op.full: (xf32) <- () + full_6 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (8x10164xb) <- (8x10164xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(data_2, full_6) + del data_2, full_6 + + # pd_op.full: (1xf32) <- () + full_7 = paddle._C_ops.full( + [1], float("4"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (8x10164xi32) <- (8x10164xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_7, paddle.int32, paddle.framework._current_expected_place() + ) + del full_7 + + # pd_op.where: (8x10164xi32) <- (8x10164xb, 8x10164xi32, 8x10164xi32) + where_0 = paddle._C_ops.where(greater_than_0, reshape_1, full_like_0) + del full_like_0, greater_than_0, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_2 = [-1, 4] + + # pd_op.reshape: (8x4xf32) <- (8x1x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_3, full_int_array_2) + del full_int_array_2 + + # pd_op.gather: (81312x4xf32) <- (8x4xf32, 81312xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_5) + del flatten_1, full_5, reshape_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_3 = [8, 10164, 4] + + # pd_op.reshape: (8x10164x4xf32) <- (81312x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, full_int_array_3) + del full_int_array_3, gather_1 + + # pd_op.full: (1xi32) <- () + full_8 = paddle._C_ops.full( + [1], float("5"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (8x10164x5xf32) <- (8x10164xi32, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + where_0 % paddle.cast(full_8, where_0.dtype), full_8 + ) + del full_8 + + # pd_op.full: (4xi64) <- () + full_9 = paddle._C_ops.full( + [4], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (4xi64) <- (4xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_9, + [4], + paddle.int64, + [float("0"), float("1"), float("2"), float("3")], + paddle.framework._current_expected_place(), + ) + del full_9 + + # pd_op.index_select: (8x10164x4xf32) <- (8x10164x5xf32, 4xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_0, assign_value__0, -1) + del assign_value__0, one_hot_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [2] + + # pd_op.unsqueeze: (8x1x1x4xf32) <- (8x1x4xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(data_3, full_int_array_4) + del data_3 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_5 = [1] + + # pd_op.unsqueeze: (8x1x10164x4xf32) <- (8x10164x4xf32, 1xi64) + unsqueeze_2 = paddle._C_ops.unsqueeze(data_4, full_int_array_5) + del data_4, full_int_array_5 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_6 = [0] + + # pd_op.slice: (8x1x1x2xf32) <- (8x1x1x4xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + unsqueeze_1, [3], full_int_array_6, full_int_array_4, [1], [] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_7 = [2147483647] + + # pd_op.slice: (8x1x1x2xf32) <- (8x1x1x4xf32, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + unsqueeze_1, [3], full_int_array_4, full_int_array_7, [1], [] + ) + del unsqueeze_1 + + # pd_op.slice: (8x1x10164x2xf32) <- (8x1x10164x4xf32, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + unsqueeze_2, [3], full_int_array_6, full_int_array_4, [1], [] + ) + del full_int_array_6 + + # pd_op.slice: (8x1x10164x2xf32) <- (8x1x10164x4xf32, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + unsqueeze_2, [3], full_int_array_4, full_int_array_7, [1], [] + ) + del full_int_array_4, full_int_array_7, unsqueeze_2 + + # pd_op.maximum: (8x1x10164x2xf32) <- (8x1x1x2xf32, 8x1x10164x2xf32) + maximum_0 = paddle._C_ops.maximum(slice_0, slice_2) + + # pd_op.minimum: (8x1x10164x2xf32) <- (8x1x1x2xf32, 8x1x10164x2xf32) + minimum_0 = paddle._C_ops.minimum(slice_1, slice_3) + + # pd_op.subtract: (8x1x10164x2xf32) <- (8x1x10164x2xf32, 8x1x10164x2xf32) + subtract_0 = paddle._C_ops.subtract(minimum_0, maximum_0) + del maximum_0, minimum_0 + + # pd_op.full: (1xf32) <- () + full_10 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf32) <- () + full_11 = paddle._C_ops.full( + [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.clip: (8x1x10164x2xf32) <- (8x1x10164x2xf32, 1xf32, 1xf32) + clip_0 = paddle._C_ops.clip(subtract_0, full_10, full_11) + del subtract_0 + + # pd_op.prod: (8x1x10164xf32) <- (8x1x10164x2xf32, 1xi64) + prod_0 = paddle._C_ops.prod(clip_0, full_int_array_0, False, False) + del clip_0 + + # pd_op.subtract: (8x1x1x2xf32) <- (8x1x1x2xf32, 8x1x1x2xf32) + subtract_1 = paddle._C_ops.subtract(slice_1, slice_0) + del slice_0, slice_1 + + # pd_op.clip: (8x1x1x2xf32) <- (8x1x1x2xf32, 1xf32, 1xf32) + clip_1 = paddle._C_ops.clip(subtract_1, full_10, full_11) + del subtract_1 + + # pd_op.prod: (8x1x1xf32) <- (8x1x1x2xf32, 1xi64) + prod_1 = paddle._C_ops.prod(clip_1, full_int_array_0, False, False) + del clip_1 + + # pd_op.subtract: (8x1x10164x2xf32) <- (8x1x10164x2xf32, 8x1x10164x2xf32) + subtract_2 = paddle._C_ops.subtract(slice_3, slice_2) + del slice_2, slice_3 + + # pd_op.clip: (8x1x10164x2xf32) <- (8x1x10164x2xf32, 1xf32, 1xf32) + clip_2 = paddle._C_ops.clip(subtract_2, full_10, full_11) + del full_10, full_11, subtract_2 + + # pd_op.prod: (8x1x10164xf32) <- (8x1x10164x2xf32, 1xi64) + prod_2 = paddle._C_ops.prod(clip_2, full_int_array_0, False, False) + del clip_2 + + # pd_op.add: (8x1x10164xf32) <- (8x1x1xf32, 8x1x10164xf32) + add_1 = paddle._C_ops.add(prod_1, prod_2) + del prod_1, prod_2 + + # pd_op.subtract: (8x1x10164xf32) <- (8x1x10164xf32, 8x1x10164xf32) + subtract_3 = paddle._C_ops.subtract(add_1, prod_0) + del add_1 + + # pd_op.scale: (8x1x10164xf32) <- (8x1x10164xf32, 1xf32) + scale_1 = paddle._C_ops.scale(subtract_3, full_4, float("1e-09"), True) + del full_4, subtract_3 + + # pd_op.divide: (8x1x10164xf32) <- (8x1x10164xf32, 8x1x10164xf32) + divide_0 = paddle._C_ops.divide(prod_0, scale_1) + del prod_0, scale_1 + + # pd_op.multiply: (8x1x10164xf32) <- (8x1x10164xf32, 8x1x10164xf32) + multiply_1 = paddle._C_ops.multiply(divide_0, data_0) + del data_0, divide_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_8 = [-2] + + # pd_op.max: (8x10164xf32) <- (8x1x10164xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_1, full_int_array_8, False) + del full_int_array_8, multiply_1 + + # pd_op.unsqueeze: (8x10164x1xf32) <- (8x10164xf32, 1xi64) + unsqueeze_3 = paddle._C_ops.unsqueeze(max_0, full_int_array_0) + del full_int_array_0, max_0 + + # pd_op.multiply: (8x10164x4xf32) <- (8x10164x4xf32, 8x10164x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_3) + del index_select_0, unsqueeze_3, where_0 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_2/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_2/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_2/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_12/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_12/graph_hash.txt new file mode 100644 index 000000000..46e554be1 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_12/graph_hash.txt @@ -0,0 +1 @@ +48c8fed96995a8800e769458c3459c34a78e41ed051e581cdf8bf11943aac4cd \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_12/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_12/graph_net.json new file mode 100644 index 000000000..469954d4b --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_12/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus-S", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_12/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_12/input_meta.py new file mode 100644 index 000000000..f404aeedd --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_12/input_meta.py @@ -0,0 +1,76 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [8, 1, 7581] + dtype = "float32" + max_val = float("1.0") + mean = float("0.000659544") + std = float("0.0256731") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [8, 1, 1] + dtype = "int32" + data = [0, 0, 0, 0, 0, 0, 0, 3] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [8, 7581] + dtype = "float32" + max_val = float("1.0") + mean = float("0.000659544") + std = float("0.0256731") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [8, 1, 4] + dtype = "float32" + data = [ + 293.867, + 183.92, + 385.067, + 252.32, + 128.25, + 149.839, + 389.5, + 314.085, + 271.573, + 249.28, + 368.853, + 322.24, + 146.891, + 220.195, + 206.924, + 270.587, + 354.97, + 368.42, + 411.401, + 400.364, + 319.774, + 182.615, + 359.925, + 211.618, + 115.589, + 133.082, + 201.125, + 227.021, + 402.595, + 243.692, + 451.344, + 319.18, + ] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [8, 7581, 4] + dtype = "float32" + min_val = float("-306.461") + max_val = float("915.943") + mean = float("304.98") + std = float("188.516") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_12/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_12/model.py new file mode 100644 index 000000000..a6512b0d9 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_12/model.py @@ -0,0 +1,287 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4): + # pd_op.full: (1xi64) <- () + full_0 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (8x7581xi64) <- (8x1x7581xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_0, full_0, False, False, paddle.int64) + del full_0 + + # pd_op.full: (1xf64) <- () + full_1 = paddle._C_ops.full( + [1], float("0"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_2 = paddle._C_ops.full( + [1], float("8"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (8xi32) <- (1xf64, 1xf64, 1xf64) + arange_0 = paddle.arange(full_1, full_2, full_3, dtype="int32") + del full_1, full_2, full_3 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [-1] + + # pd_op.unsqueeze: (8x1xi32) <- (8xi32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(arange_0, full_int_array_0) + del arange_0 + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (8x1xi32) <- (8x1xi32, 1xf32) + scale_0 = paddle._C_ops.scale(unsqueeze_0, full_4, float("0"), True) + del unsqueeze_0 + + # pd_op.cast: (8x1xi64) <- (8x1xi32) + cast_0 = paddle._C_ops.cast(scale_0, paddle.int64) + del scale_0 + + # pd_op.add: (8x7581xi64) <- (8x7581xi64, 8x1xi64) + add_0 = paddle._C_ops.add(argmax_0, cast_0) + del argmax_0, cast_0 + + # pd_op.flatten: (8xi32) <- (8x1x1xi32) + flatten_0 = paddle._C_ops.flatten(data_1, 0, 2) + del data_1 + + # pd_op.flatten: (60648xi64) <- (8x7581xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_5 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (60648xi32) <- (8xi32, 60648xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_5) + del flatten_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [8, 7581] + + # pd_op.reshape: (8x7581xi32) <- (60648xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, full_int_array_1) + del full_int_array_1, gather_0 + + # pd_op.full: (xf32) <- () + full_6 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (8x7581xb) <- (8x7581xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(data_2, full_6) + del data_2, full_6 + + # pd_op.full: (1xf32) <- () + full_7 = paddle._C_ops.full( + [1], float("4"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (8x7581xi32) <- (8x7581xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_7, paddle.int32, paddle.framework._current_expected_place() + ) + del full_7 + + # pd_op.where: (8x7581xi32) <- (8x7581xb, 8x7581xi32, 8x7581xi32) + where_0 = paddle._C_ops.where(greater_than_0, reshape_1, full_like_0) + del full_like_0, greater_than_0, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_2 = [-1, 4] + + # pd_op.reshape: (8x4xf32) <- (8x1x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_3, full_int_array_2) + del full_int_array_2 + + # pd_op.gather: (60648x4xf32) <- (8x4xf32, 60648xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_5) + del flatten_1, full_5, reshape_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_3 = [8, 7581, 4] + + # pd_op.reshape: (8x7581x4xf32) <- (60648x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, full_int_array_3) + del full_int_array_3, gather_1 + + # pd_op.full: (1xi32) <- () + full_8 = paddle._C_ops.full( + [1], float("5"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (8x7581x5xf32) <- (8x7581xi32, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + where_0 % paddle.cast(full_8, where_0.dtype), full_8 + ) + del full_8 + + # pd_op.full: (4xi64) <- () + full_9 = paddle._C_ops.full( + [4], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (4xi64) <- (4xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_9, + [4], + paddle.int64, + [float("0"), float("1"), float("2"), float("3")], + paddle.framework._current_expected_place(), + ) + del full_9 + + # pd_op.index_select: (8x7581x4xf32) <- (8x7581x5xf32, 4xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_0, assign_value__0, -1) + del assign_value__0, one_hot_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [2] + + # pd_op.unsqueeze: (8x1x1x4xf32) <- (8x1x4xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(data_3, full_int_array_4) + del data_3 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_5 = [1] + + # pd_op.unsqueeze: (8x1x7581x4xf32) <- (8x7581x4xf32, 1xi64) + unsqueeze_2 = paddle._C_ops.unsqueeze(data_4, full_int_array_5) + del data_4, full_int_array_5 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_6 = [0] + + # pd_op.slice: (8x1x1x2xf32) <- (8x1x1x4xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + unsqueeze_1, [3], full_int_array_6, full_int_array_4, [1], [] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_7 = [2147483647] + + # pd_op.slice: (8x1x1x2xf32) <- (8x1x1x4xf32, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + unsqueeze_1, [3], full_int_array_4, full_int_array_7, [1], [] + ) + del unsqueeze_1 + + # pd_op.slice: (8x1x7581x2xf32) <- (8x1x7581x4xf32, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + unsqueeze_2, [3], full_int_array_6, full_int_array_4, [1], [] + ) + del full_int_array_6 + + # pd_op.slice: (8x1x7581x2xf32) <- (8x1x7581x4xf32, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + unsqueeze_2, [3], full_int_array_4, full_int_array_7, [1], [] + ) + del full_int_array_4, full_int_array_7, unsqueeze_2 + + # pd_op.maximum: (8x1x7581x2xf32) <- (8x1x1x2xf32, 8x1x7581x2xf32) + maximum_0 = paddle._C_ops.maximum(slice_0, slice_2) + + # pd_op.minimum: (8x1x7581x2xf32) <- (8x1x1x2xf32, 8x1x7581x2xf32) + minimum_0 = paddle._C_ops.minimum(slice_1, slice_3) + + # pd_op.subtract: (8x1x7581x2xf32) <- (8x1x7581x2xf32, 8x1x7581x2xf32) + subtract_0 = paddle._C_ops.subtract(minimum_0, maximum_0) + del maximum_0, minimum_0 + + # pd_op.full: (1xf32) <- () + full_10 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf32) <- () + full_11 = paddle._C_ops.full( + [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.clip: (8x1x7581x2xf32) <- (8x1x7581x2xf32, 1xf32, 1xf32) + clip_0 = paddle._C_ops.clip(subtract_0, full_10, full_11) + del subtract_0 + + # pd_op.prod: (8x1x7581xf32) <- (8x1x7581x2xf32, 1xi64) + prod_0 = paddle._C_ops.prod(clip_0, full_int_array_0, False, False) + del clip_0 + + # pd_op.subtract: (8x1x1x2xf32) <- (8x1x1x2xf32, 8x1x1x2xf32) + subtract_1 = paddle._C_ops.subtract(slice_1, slice_0) + del slice_0, slice_1 + + # pd_op.clip: (8x1x1x2xf32) <- (8x1x1x2xf32, 1xf32, 1xf32) + clip_1 = paddle._C_ops.clip(subtract_1, full_10, full_11) + del subtract_1 + + # pd_op.prod: (8x1x1xf32) <- (8x1x1x2xf32, 1xi64) + prod_1 = paddle._C_ops.prod(clip_1, full_int_array_0, False, False) + del clip_1 + + # pd_op.subtract: (8x1x7581x2xf32) <- (8x1x7581x2xf32, 8x1x7581x2xf32) + subtract_2 = paddle._C_ops.subtract(slice_3, slice_2) + del slice_2, slice_3 + + # pd_op.clip: (8x1x7581x2xf32) <- (8x1x7581x2xf32, 1xf32, 1xf32) + clip_2 = paddle._C_ops.clip(subtract_2, full_10, full_11) + del full_10, full_11, subtract_2 + + # pd_op.prod: (8x1x7581xf32) <- (8x1x7581x2xf32, 1xi64) + prod_2 = paddle._C_ops.prod(clip_2, full_int_array_0, False, False) + del clip_2 + + # pd_op.add: (8x1x7581xf32) <- (8x1x1xf32, 8x1x7581xf32) + add_1 = paddle._C_ops.add(prod_1, prod_2) + del prod_1, prod_2 + + # pd_op.subtract: (8x1x7581xf32) <- (8x1x7581xf32, 8x1x7581xf32) + subtract_3 = paddle._C_ops.subtract(add_1, prod_0) + del add_1 + + # pd_op.scale: (8x1x7581xf32) <- (8x1x7581xf32, 1xf32) + scale_1 = paddle._C_ops.scale(subtract_3, full_4, float("1e-09"), True) + del full_4, subtract_3 + + # pd_op.divide: (8x1x7581xf32) <- (8x1x7581xf32, 8x1x7581xf32) + divide_0 = paddle._C_ops.divide(prod_0, scale_1) + del prod_0, scale_1 + + # pd_op.multiply: (8x1x7581xf32) <- (8x1x7581xf32, 8x1x7581xf32) + multiply_1 = paddle._C_ops.multiply(divide_0, data_0) + del data_0, divide_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_8 = [-2] + + # pd_op.max: (8x7581xf32) <- (8x1x7581xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_1, full_int_array_8, False) + del full_int_array_8, multiply_1 + + # pd_op.unsqueeze: (8x7581x1xf32) <- (8x7581xf32, 1xi64) + unsqueeze_3 = paddle._C_ops.unsqueeze(max_0, full_int_array_0) + del full_int_array_0, max_0 + + # pd_op.multiply: (8x7581x4xf32) <- (8x7581x4xf32, 8x7581x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_3) + del index_select_0, unsqueeze_3, where_0 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_12/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_12/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_12/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_8/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_8/graph_hash.txt new file mode 100644 index 000000000..dbf341ae3 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_8/graph_hash.txt @@ -0,0 +1 @@ +4d9c4fdb6f89872cff5a1d7b782191d3432f8c3de1e21cadd795d8358285930d \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_8/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_8/graph_net.json new file mode 100644 index 000000000..469954d4b --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_8/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus-S", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_8/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_8/input_meta.py new file mode 100644 index 000000000..25d19e1c4 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_8/input_meta.py @@ -0,0 +1,60 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [3] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [] + dtype = "int64" + data = [6069] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [8, 3, 6069] + dtype = "float32" + max_val = float("1.0") + mean = float("0.000508046") + std = float("0.0225342") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [8, 3, 1] + dtype = "int32" + data = [0, 0, 0, 3, 0, 0, 0, 1, 2, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [8, 6069] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00152414") + std = float("0.0390105") + data = None + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [8, 3, 4] + dtype = "float32" + max_val = float("509.089") + mean = float("160.185") + std = float("176.435") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [8, 6069, 4] + dtype = "float32" + min_val = float("-272.115") + max_val = float("831.985") + mean = float("272.715") + std = float("171.769") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_8/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_8/model.py new file mode 100644 index 000000000..77efae021 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_8/model.py @@ -0,0 +1,310 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): + # pd_op.full: (1xi64) <- () + full_0 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (8x-1xi64) <- (8x-1x-1xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_2, full_0, False, False, paddle.int64) + del full_0 + + # pd_op.full: (1xf64) <- () + full_1 = paddle._C_ops.full( + [1], float("0"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_2 = paddle._C_ops.full( + [1], float("8"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (8xi32) <- (1xf64, 1xf64, 1xf64) + arange_0 = paddle.arange(full_1, full_2, full_3, dtype="int32") + del full_1, full_2, full_3 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [-1] + + # pd_op.unsqueeze: (8x1xi32) <- (8xi32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(arange_0, full_int_array_0) + del arange_0 + + # pd_op.cast: (xi32) <- (xi64) + cast_0 = paddle._C_ops.cast(data_0, paddle.int32) + del data_0 + + # pd_op.multiply: (8x1xi32) <- (8x1xi32, xi32) + multiply_1 = paddle._C_ops.multiply(unsqueeze_0, cast_0) + del cast_0, unsqueeze_0 + + # pd_op.cast: (8x1xi64) <- (8x1xi32) + cast_1 = paddle._C_ops.cast(multiply_1, paddle.int64) + del multiply_1 + + # pd_op.add: (8x-1xi64) <- (8x-1xi64, 8x1xi64) + add_0 = paddle._C_ops.add(argmax_0, cast_1) + del argmax_0, cast_1 + + # pd_op.flatten: (-1xi32) <- (8x-1x1xi32) + flatten_0 = paddle._C_ops.flatten(data_3, 0, 2) + del data_3 + + # pd_op.flatten: (-1xi64) <- (8x-1xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_4 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (-1xi32) <- (-1xi32, -1xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_4) + del flatten_0 + + # pd_op.full: (xi64) <- () + full_5 = paddle._C_ops.full( + [], float("8"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_0 = [full_5, data_1] + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.reshape: (8x-1xi32) <- (-1xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, stack_0) + del gather_0, stack_0 + + # pd_op.full: (xf32) <- () + full_6 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (8x-1xb) <- (8x-1xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(data_4, full_6) + del data_4, full_6 + + # pd_op.full: (1xf32) <- () + full_7 = paddle._C_ops.full( + [1], float("4"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (8x-1xi32) <- (8x-1xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_7, paddle.int32, paddle.framework._current_expected_place() + ) + del full_7 + + # pd_op.where: (8x-1xi32) <- (8x-1xb, 8x-1xi32, 8x-1xi32) + where_0 = paddle._C_ops.where(greater_than_0, reshape_1, full_like_0) + del full_like_0, greater_than_0, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [-1, 4] + + # pd_op.reshape: (-1x4xf32) <- (8x-1x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_5, full_int_array_1) + del full_int_array_1 + + # pd_op.gather: (-1x4xf32) <- (-1x4xf32, -1xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_4) + del flatten_1, full_4, reshape_2 + + # pd_op.full: (xi64) <- () + full_8 = paddle._C_ops.full( + [], float("4"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_1 = [full_5, data_1, full_8] + del data_1, full_5, full_8 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_1 = paddle._C_ops.stack(combine_1, 0) + del combine_1 + + # pd_op.reshape: (8x-1x4xf32) <- (-1x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, stack_1) + del gather_1, stack_1 + + # pd_op.full: (1xi32) <- () + full_9 = paddle._C_ops.full( + [1], float("5"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (8x-1x5xf32) <- (8x-1xi32, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + where_0 % paddle.cast(full_9, where_0.dtype), full_9 + ) + del full_9 + + # pd_op.full: (4xi64) <- () + full_10 = paddle._C_ops.full( + [4], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (4xi64) <- (4xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_10, + [4], + paddle.int64, + [float("0"), float("1"), float("2"), float("3")], + paddle.framework._current_expected_place(), + ) + del full_10 + + # pd_op.index_select: (8x-1x4xf32) <- (8x-1x5xf32, 4xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_0, assign_value__0, -1) + del assign_value__0, one_hot_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [2] + + # pd_op.unsqueeze: (8x-1x1x4xf32) <- (8x-1x4xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(data_5, full_int_array_2) + del data_5 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [1] + + # pd_op.unsqueeze: (8x1x-1x4xf32) <- (8x-1x4xf32, 1xi64) + unsqueeze_2 = paddle._C_ops.unsqueeze(data_6, full_int_array_3) + del data_6, full_int_array_3 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [0] + + # pd_op.slice: (8x-1x1x2xf32) <- (8x-1x1x4xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + unsqueeze_1, [3], full_int_array_4, full_int_array_2, [1], [] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_5 = [2147483647] + + # pd_op.slice: (8x-1x1x2xf32) <- (8x-1x1x4xf32, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + unsqueeze_1, [3], full_int_array_2, full_int_array_5, [1], [] + ) + del unsqueeze_1 + + # pd_op.slice: (8x1x-1x2xf32) <- (8x1x-1x4xf32, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + unsqueeze_2, [3], full_int_array_4, full_int_array_2, [1], [] + ) + del full_int_array_4 + + # pd_op.slice: (8x1x-1x2xf32) <- (8x1x-1x4xf32, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + unsqueeze_2, [3], full_int_array_2, full_int_array_5, [1], [] + ) + del full_int_array_2, full_int_array_5, unsqueeze_2 + + # pd_op.maximum: (8x-1x-1x2xf32) <- (8x-1x1x2xf32, 8x1x-1x2xf32) + maximum_0 = paddle._C_ops.maximum(slice_0, slice_2) + + # pd_op.minimum: (8x-1x-1x2xf32) <- (8x-1x1x2xf32, 8x1x-1x2xf32) + minimum_0 = paddle._C_ops.minimum(slice_1, slice_3) + + # pd_op.subtract: (8x-1x-1x2xf32) <- (8x-1x-1x2xf32, 8x-1x-1x2xf32) + subtract_0 = paddle._C_ops.subtract(minimum_0, maximum_0) + del maximum_0, minimum_0 + + # pd_op.full: (1xf32) <- () + full_11 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf32) <- () + full_12 = paddle._C_ops.full( + [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.clip: (8x-1x-1x2xf32) <- (8x-1x-1x2xf32, 1xf32, 1xf32) + clip_0 = paddle._C_ops.clip(subtract_0, full_11, full_12) + del subtract_0 + + # pd_op.prod: (8x-1x-1xf32) <- (8x-1x-1x2xf32, 1xi64) + prod_0 = paddle._C_ops.prod(clip_0, full_int_array_0, False, False) + del clip_0 + + # pd_op.subtract: (8x-1x1x2xf32) <- (8x-1x1x2xf32, 8x-1x1x2xf32) + subtract_1 = paddle._C_ops.subtract(slice_1, slice_0) + del slice_0, slice_1 + + # pd_op.clip: (8x-1x1x2xf32) <- (8x-1x1x2xf32, 1xf32, 1xf32) + clip_1 = paddle._C_ops.clip(subtract_1, full_11, full_12) + del subtract_1 + + # pd_op.prod: (8x-1x1xf32) <- (8x-1x1x2xf32, 1xi64) + prod_1 = paddle._C_ops.prod(clip_1, full_int_array_0, False, False) + del clip_1 + + # pd_op.subtract: (8x1x-1x2xf32) <- (8x1x-1x2xf32, 8x1x-1x2xf32) + subtract_2 = paddle._C_ops.subtract(slice_3, slice_2) + del slice_2, slice_3 + + # pd_op.clip: (8x1x-1x2xf32) <- (8x1x-1x2xf32, 1xf32, 1xf32) + clip_2 = paddle._C_ops.clip(subtract_2, full_11, full_12) + del full_11, full_12, subtract_2 + + # pd_op.prod: (8x1x-1xf32) <- (8x1x-1x2xf32, 1xi64) + prod_2 = paddle._C_ops.prod(clip_2, full_int_array_0, False, False) + del clip_2 + + # pd_op.add: (8x-1x-1xf32) <- (8x-1x1xf32, 8x1x-1xf32) + add_1 = paddle._C_ops.add(prod_1, prod_2) + del prod_1, prod_2 + + # pd_op.subtract: (8x-1x-1xf32) <- (8x-1x-1xf32, 8x-1x-1xf32) + subtract_3 = paddle._C_ops.subtract(add_1, prod_0) + del add_1 + + # pd_op.full: (1xf32) <- () + full_13 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (8x-1x-1xf32) <- (8x-1x-1xf32, 1xf32) + scale_0 = paddle._C_ops.scale(subtract_3, full_13, float("1e-09"), True) + del full_13, subtract_3 + + # pd_op.divide: (8x-1x-1xf32) <- (8x-1x-1xf32, 8x-1x-1xf32) + divide_0 = paddle._C_ops.divide(prod_0, scale_0) + del prod_0, scale_0 + + # pd_op.multiply: (8x-1x-1xf32) <- (8x-1x-1xf32, 8x-1x-1xf32) + multiply_2 = paddle._C_ops.multiply(divide_0, data_2) + del data_2, divide_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_6 = [-2] + + # pd_op.max: (8x-1xf32) <- (8x-1x-1xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_2, full_int_array_6, False) + del full_int_array_6, multiply_2 + + # pd_op.unsqueeze: (8x-1x1xf32) <- (8x-1xf32, 1xi64) + unsqueeze_3 = paddle._C_ops.unsqueeze(max_0, full_int_array_0) + del full_int_array_0, max_0 + + # pd_op.multiply: (8x-1x4xf32) <- (8x-1x4xf32, 8x-1x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_3) + del index_select_0, unsqueeze_3, where_0 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_8/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_8/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_8/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-X/subgraph_3/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-X/subgraph_3/graph_hash.txt new file mode 100644 index 000000000..50b8d4414 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-X/subgraph_3/graph_hash.txt @@ -0,0 +1 @@ +f2afb21e4c0af848680385179cbb2911fce606dca85ef807e450e02d57c2c505 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-X/subgraph_3/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus-X/subgraph_3/graph_net.json new file mode 100644 index 000000000..22864afee --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-X/subgraph_3/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus-X", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-X/subgraph_3/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-X/subgraph_3/input_meta.py new file mode 100644 index 000000000..3f82f1414 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-X/subgraph_3/input_meta.py @@ -0,0 +1,60 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [4, 1, 9261] + dtype = "float32" + max_val = float("1.0") + mean = float("0.000674873") + std = float("0.0259696") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [4, 1, 1] + dtype = "int32" + data = [0, 0, 0, 0] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [4, 9261] + dtype = "float32" + max_val = float("1.0") + mean = float("0.000674873") + std = float("0.0259696") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [4, 1, 4] + dtype = "float32" + data = [ + 218.164, + 418.526, + 278.766, + 506.947, + 290.441, + 270.261, + 447.051, + 478.435, + 214.154, + 69.12, + 391.385, + 253.44, + 56.0, + 245.189, + 140.903, + 291.604, + ] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [4, 9261, 4] + dtype = "float32" + min_val = float("-292.918") + max_val = float("973.067") + mean = float("336.426") + std = float("208.189") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-X/subgraph_3/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus-X/subgraph_3/model.py new file mode 100644 index 000000000..19ea9191e --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-X/subgraph_3/model.py @@ -0,0 +1,287 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4): + # pd_op.full: (1xi64) <- () + full_0 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (4x9261xi64) <- (4x1x9261xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_0, full_0, False, False, paddle.int64) + del full_0 + + # pd_op.full: (1xf64) <- () + full_1 = paddle._C_ops.full( + [1], float("0"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_2 = paddle._C_ops.full( + [1], float("4"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (4xi32) <- (1xf64, 1xf64, 1xf64) + arange_0 = paddle.arange(full_1, full_2, full_3, dtype="int32") + del full_1, full_2, full_3 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [-1] + + # pd_op.unsqueeze: (4x1xi32) <- (4xi32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(arange_0, full_int_array_0) + del arange_0 + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (4x1xi32) <- (4x1xi32, 1xf32) + scale_0 = paddle._C_ops.scale(unsqueeze_0, full_4, float("0"), True) + del unsqueeze_0 + + # pd_op.cast: (4x1xi64) <- (4x1xi32) + cast_0 = paddle._C_ops.cast(scale_0, paddle.int64) + del scale_0 + + # pd_op.add: (4x9261xi64) <- (4x9261xi64, 4x1xi64) + add_0 = paddle._C_ops.add(argmax_0, cast_0) + del argmax_0, cast_0 + + # pd_op.flatten: (4xi32) <- (4x1x1xi32) + flatten_0 = paddle._C_ops.flatten(data_1, 0, 2) + del data_1 + + # pd_op.flatten: (37044xi64) <- (4x9261xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_5 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (37044xi32) <- (4xi32, 37044xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_5) + del flatten_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [4, 9261] + + # pd_op.reshape: (4x9261xi32) <- (37044xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, full_int_array_1) + del full_int_array_1, gather_0 + + # pd_op.full: (xf32) <- () + full_6 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (4x9261xb) <- (4x9261xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(data_2, full_6) + del data_2, full_6 + + # pd_op.full: (1xf32) <- () + full_7 = paddle._C_ops.full( + [1], float("4"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (4x9261xi32) <- (4x9261xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_7, paddle.int32, paddle.framework._current_expected_place() + ) + del full_7 + + # pd_op.where: (4x9261xi32) <- (4x9261xb, 4x9261xi32, 4x9261xi32) + where_0 = paddle._C_ops.where(greater_than_0, reshape_1, full_like_0) + del full_like_0, greater_than_0, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_2 = [-1, 4] + + # pd_op.reshape: (4x4xf32) <- (4x1x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_3, full_int_array_2) + del full_int_array_2 + + # pd_op.gather: (37044x4xf32) <- (4x4xf32, 37044xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_5) + del flatten_1, full_5, reshape_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_3 = [4, 9261, 4] + + # pd_op.reshape: (4x9261x4xf32) <- (37044x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, full_int_array_3) + del full_int_array_3, gather_1 + + # pd_op.full: (1xi32) <- () + full_8 = paddle._C_ops.full( + [1], float("5"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (4x9261x5xf32) <- (4x9261xi32, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + where_0 % paddle.cast(full_8, where_0.dtype), full_8 + ) + del full_8 + + # pd_op.full: (4xi64) <- () + full_9 = paddle._C_ops.full( + [4], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (4xi64) <- (4xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_9, + [4], + paddle.int64, + [float("0"), float("1"), float("2"), float("3")], + paddle.framework._current_expected_place(), + ) + del full_9 + + # pd_op.index_select: (4x9261x4xf32) <- (4x9261x5xf32, 4xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_0, assign_value__0, -1) + del assign_value__0, one_hot_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [2] + + # pd_op.unsqueeze: (4x1x1x4xf32) <- (4x1x4xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(data_3, full_int_array_4) + del data_3 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_5 = [1] + + # pd_op.unsqueeze: (4x1x9261x4xf32) <- (4x9261x4xf32, 1xi64) + unsqueeze_2 = paddle._C_ops.unsqueeze(data_4, full_int_array_5) + del data_4, full_int_array_5 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_6 = [0] + + # pd_op.slice: (4x1x1x2xf32) <- (4x1x1x4xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + unsqueeze_1, [3], full_int_array_6, full_int_array_4, [1], [] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_7 = [2147483647] + + # pd_op.slice: (4x1x1x2xf32) <- (4x1x1x4xf32, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + unsqueeze_1, [3], full_int_array_4, full_int_array_7, [1], [] + ) + del unsqueeze_1 + + # pd_op.slice: (4x1x9261x2xf32) <- (4x1x9261x4xf32, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + unsqueeze_2, [3], full_int_array_6, full_int_array_4, [1], [] + ) + del full_int_array_6 + + # pd_op.slice: (4x1x9261x2xf32) <- (4x1x9261x4xf32, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + unsqueeze_2, [3], full_int_array_4, full_int_array_7, [1], [] + ) + del full_int_array_4, full_int_array_7, unsqueeze_2 + + # pd_op.maximum: (4x1x9261x2xf32) <- (4x1x1x2xf32, 4x1x9261x2xf32) + maximum_0 = paddle._C_ops.maximum(slice_0, slice_2) + + # pd_op.minimum: (4x1x9261x2xf32) <- (4x1x1x2xf32, 4x1x9261x2xf32) + minimum_0 = paddle._C_ops.minimum(slice_1, slice_3) + + # pd_op.subtract: (4x1x9261x2xf32) <- (4x1x9261x2xf32, 4x1x9261x2xf32) + subtract_0 = paddle._C_ops.subtract(minimum_0, maximum_0) + del maximum_0, minimum_0 + + # pd_op.full: (1xf32) <- () + full_10 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf32) <- () + full_11 = paddle._C_ops.full( + [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.clip: (4x1x9261x2xf32) <- (4x1x9261x2xf32, 1xf32, 1xf32) + clip_0 = paddle._C_ops.clip(subtract_0, full_10, full_11) + del subtract_0 + + # pd_op.prod: (4x1x9261xf32) <- (4x1x9261x2xf32, 1xi64) + prod_0 = paddle._C_ops.prod(clip_0, full_int_array_0, False, False) + del clip_0 + + # pd_op.subtract: (4x1x1x2xf32) <- (4x1x1x2xf32, 4x1x1x2xf32) + subtract_1 = paddle._C_ops.subtract(slice_1, slice_0) + del slice_0, slice_1 + + # pd_op.clip: (4x1x1x2xf32) <- (4x1x1x2xf32, 1xf32, 1xf32) + clip_1 = paddle._C_ops.clip(subtract_1, full_10, full_11) + del subtract_1 + + # pd_op.prod: (4x1x1xf32) <- (4x1x1x2xf32, 1xi64) + prod_1 = paddle._C_ops.prod(clip_1, full_int_array_0, False, False) + del clip_1 + + # pd_op.subtract: (4x1x9261x2xf32) <- (4x1x9261x2xf32, 4x1x9261x2xf32) + subtract_2 = paddle._C_ops.subtract(slice_3, slice_2) + del slice_2, slice_3 + + # pd_op.clip: (4x1x9261x2xf32) <- (4x1x9261x2xf32, 1xf32, 1xf32) + clip_2 = paddle._C_ops.clip(subtract_2, full_10, full_11) + del full_10, full_11, subtract_2 + + # pd_op.prod: (4x1x9261xf32) <- (4x1x9261x2xf32, 1xi64) + prod_2 = paddle._C_ops.prod(clip_2, full_int_array_0, False, False) + del clip_2 + + # pd_op.add: (4x1x9261xf32) <- (4x1x1xf32, 4x1x9261xf32) + add_1 = paddle._C_ops.add(prod_1, prod_2) + del prod_1, prod_2 + + # pd_op.subtract: (4x1x9261xf32) <- (4x1x9261xf32, 4x1x9261xf32) + subtract_3 = paddle._C_ops.subtract(add_1, prod_0) + del add_1 + + # pd_op.scale: (4x1x9261xf32) <- (4x1x9261xf32, 1xf32) + scale_1 = paddle._C_ops.scale(subtract_3, full_4, float("1e-09"), True) + del full_4, subtract_3 + + # pd_op.divide: (4x1x9261xf32) <- (4x1x9261xf32, 4x1x9261xf32) + divide_0 = paddle._C_ops.divide(prod_0, scale_1) + del prod_0, scale_1 + + # pd_op.multiply: (4x1x9261xf32) <- (4x1x9261xf32, 4x1x9261xf32) + multiply_1 = paddle._C_ops.multiply(divide_0, data_0) + del data_0, divide_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_8 = [-2] + + # pd_op.max: (4x9261xf32) <- (4x1x9261xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_1, full_int_array_8, False) + del full_int_array_8, multiply_1 + + # pd_op.unsqueeze: (4x9261x1xf32) <- (4x9261xf32, 1xi64) + unsqueeze_3 = paddle._C_ops.unsqueeze(max_0, full_int_array_0) + del full_int_array_0, max_0 + + # pd_op.multiply: (4x9261x4xf32) <- (4x9261x4xf32, 4x9261x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_3) + del index_select_0, unsqueeze_3, where_0 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-X/subgraph_3/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-X/subgraph_3/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-X/subgraph_3/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_1/graph_hash.txt new file mode 100644 index 000000000..f33996b4a --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_1/graph_hash.txt @@ -0,0 +1 @@ +331d31c12329e180f8072f92c095e5fa3ed0d1dbf984c2e334eff6b5b3862c64 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_1/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_1/graph_net.json new file mode 100644 index 000000000..8b4fccfd1 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_1/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus_SOD-L", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_1/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_1/input_meta.py new file mode 100644 index 000000000..8361f35e8 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_1/input_meta.py @@ -0,0 +1,84 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [2, 3, 5376] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00124008") + std = float("0.0351929") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [2, 1] + dtype = "int32" + data = [0, 1] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [2, 3, 1] + dtype = "int32" + data = [4, 3, 3, 3, 1, 0] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [2, 5376] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00372024") + std = float("0.0608802") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [2, 3, 4] + dtype = "float32" + data = [ + 270.791, + 234.887, + 332.231, + 356.289, + 38.6844, + 240.165, + 99.3659, + 448.66, + 476.35, + 311.423, + 512.0, + 504.082, + 2.03175, + 161.292, + 9.34603, + 181.971, + 39.619, + 61.622, + 40.8381, + 69.0662, + 0.0, + 0.0, + 0.0, + 0.0, + ] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [2, 3, 5376] + dtype = "float32" + max_val = float("0.00886879") + mean = float("8.72339e-07") + std = float("6.13309e-05") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [2, 3, 5376] + dtype = "float32" + max_val = float("0.712006") + mean = float("0.00169782") + std = float("0.0189379") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_1/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_1/model.py new file mode 100644 index 000000000..c7c749882 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_1/model.py @@ -0,0 +1,192 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): + # pd_op.full: (1xi64) <- () + full_0 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (2x5376xi64) <- (2x3x5376xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_0, full_0, False, False, paddle.int64) + del full_0 + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("3"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x1xi32) <- (2x1xi32, 1xf32) + scale_0 = paddle._C_ops.scale(data_1, full_1, float("0"), True) + del data_1, full_1 + + # pd_op.cast: (2x1xi64) <- (2x1xi32) + cast_0 = paddle._C_ops.cast(scale_0, paddle.int64) + del scale_0 + + # pd_op.add: (2x5376xi64) <- (2x5376xi64, 2x1xi64) + add_0 = paddle._C_ops.add(argmax_0, cast_0) + del argmax_0, cast_0 + + # pd_op.flatten: (6xi32) <- (2x3x1xi32) + flatten_0 = paddle._C_ops.flatten(data_2, 0, 2) + del data_2 + + # pd_op.flatten: (10752xi64) <- (2x5376xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_2 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (10752xi32) <- (6xi32, 10752xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_2) + del flatten_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [2, 5376] + + # pd_op.reshape: (2x5376xi32) <- (10752xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, full_int_array_0) + del full_int_array_0, gather_0 + + # pd_op.full: (xf32) <- () + full_3 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (2x5376xb) <- (2x5376xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(data_3, full_3) + del data_3, full_3 + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("10"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (2x5376xi32) <- (2x5376xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_4, paddle.int32, paddle.framework._current_expected_place() + ) + del full_4 + + # pd_op.where: (2x5376xi32) <- (2x5376xb, 2x5376xi32, 2x5376xi32) + where_0 = paddle._C_ops.where(greater_than_0, reshape_1, full_like_0) + del full_like_0, greater_than_0, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [-1, 4] + + # pd_op.reshape: (6x4xf32) <- (2x3x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_4, full_int_array_1) + del data_4, full_int_array_1 + + # pd_op.gather: (10752x4xf32) <- (6x4xf32, 10752xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_2) + del flatten_1, full_2, reshape_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_2 = [2, 5376, 4] + + # pd_op.reshape: (2x5376x4xf32) <- (10752x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, full_int_array_2) + del full_int_array_2, gather_1 + + # pd_op.full: (1xi32) <- () + full_5 = paddle._C_ops.full( + [1], float("11"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (2x5376x11xf32) <- (2x5376xi32, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + where_0 % paddle.cast(full_5, where_0.dtype), full_5 + ) + del full_5 + + # pd_op.full: (10xi64) <- () + full_6 = paddle._C_ops.full( + [10], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (10xi64) <- (10xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_6, + [10], + paddle.int64, + [ + float("0"), + float("1"), + float("2"), + float("3"), + float("4"), + float("5"), + float("6"), + float("7"), + float("8"), + float("9"), + ], + paddle.framework._current_expected_place(), + ) + del full_6 + + # pd_op.index_select: (2x5376x10xf32) <- (2x5376x11xf32, 10xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_0, assign_value__0, -1) + del assign_value__0, one_hot_0 + + # pd_op.multiply: (2x3x5376xf32) <- (2x3x5376xf32, 2x3x5376xf32) + multiply_1 = paddle._C_ops.multiply(data_5, data_0) + del data_5 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [-1] + + # pd_op.max: (2x3x1xf32) <- (2x3x5376xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_1, full_int_array_3, True) + + # pd_op.multiply: (2x3x5376xf32) <- (2x3x5376xf32, 2x3x5376xf32) + multiply_2 = paddle._C_ops.multiply(data_6, data_0) + del data_0, data_6 + + # pd_op.max: (2x3x1xf32) <- (2x3x5376xf32, 1xi64) + max_1 = paddle._C_ops.max(multiply_2, full_int_array_3, True) + del multiply_2 + + # pd_op.full: (1xf32) <- () + full_7 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x3x1xf32) <- (2x3x1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(max_0, full_7, float("1e-09"), True) + del full_7, max_0 + + # pd_op.divide: (2x3x5376xf32) <- (2x3x5376xf32, 2x3x1xf32) + divide_0 = paddle._C_ops.divide(multiply_1, scale_1) + del multiply_1, scale_1 + + # pd_op.multiply: (2x3x5376xf32) <- (2x3x5376xf32, 2x3x1xf32) + multiply_3 = paddle._C_ops.multiply(divide_0, max_1) + del divide_0, max_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [-2] + + # pd_op.max: (2x5376xf32) <- (2x3x5376xf32, 1xi64) + max_2 = paddle._C_ops.max(multiply_3, full_int_array_4, False) + del full_int_array_4, multiply_3 + + # pd_op.unsqueeze: (2x5376x1xf32) <- (2x5376xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(max_2, full_int_array_3) + del full_int_array_3, max_2 + + # pd_op.multiply: (2x5376x10xf32) <- (2x5376x10xf32, 2x5376x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_0) + del index_select_0, unsqueeze_0, where_0 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_1/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_1/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_1/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_5/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_5/graph_hash.txt new file mode 100644 index 000000000..d7d509a2f --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_5/graph_hash.txt @@ -0,0 +1 @@ +2474d5c0140e3ca8b342671eaefb285e6ee8c6c96b775962f701e40c7ee40211 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_5/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_5/graph_net.json new file mode 100644 index 000000000..8b4fccfd1 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_5/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus_SOD-L", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_5/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_5/input_meta.py new file mode 100644 index 000000000..f4ab533fd --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_5/input_meta.py @@ -0,0 +1,64 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [2, 3549] + dtype = "float32" + max_val = float("26.0") + mean = float("0.0874894") + std = float("0.880638") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [2, 49, 3549] + dtype = "float32" + max_val = float("0.980323") + mean = float("0.000678688") + std = float("0.0200074") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [2, 49, 3549] + dtype = "float32" + max_val = float("1.0") + mean = float("0.0017855") + std = float("0.0422174") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [2, 1] + dtype = "int32" + data = [0, 1] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [2, 49, 1] + dtype = "int32" + min_val = 0 + max_val = 8 + data = None + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [2, 49, 4] + dtype = "float32" + max_val = float("408.482") + mean = float("110.196") + std = float("133.414") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [2, 49, 3549] + dtype = "float32" + max_val = float("0.795764") + mean = float("4.47944e-05") + std = float("0.00452556") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_5/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_5/model.py new file mode 100644 index 000000000..2cc272861 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_5/model.py @@ -0,0 +1,244 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [1] + + # pd_op.unsqueeze: (2x1x3549xf32) <- (2x3549xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(data_0, full_int_array_0) + del data_0, full_int_array_0 + + # pd_op.full: (xf32) <- () + full_0 = paddle._C_ops.full( + [], float("1"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (2x1x3549xb) <- (2x1x3549xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(unsqueeze_0, full_0) + del full_0, unsqueeze_0 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_1 = [1, 49, 1] + + # pd_op.tile: (2x49x3549xb) <- (2x1x3549xb, 3xi64) + tile_0 = paddle._C_ops.tile(greater_than_0, full_int_array_1) + del full_int_array_1, greater_than_0 + + # pd_op.multiply: (2x49x3549xf32) <- (2x49x3549xf32, 2x49x3549xf32) + multiply_1 = paddle._C_ops.multiply(data_1, data_2) + + # pd_op.full: (1xi64) <- () + full_1 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (2x3549xi64) <- (2x49x3549xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(multiply_1, full_1, False, False, paddle.int64) + del multiply_1 + + # pd_op.full: (1xi32) <- () + full_2 = paddle._C_ops.full( + [1], float("49"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (2x3549x49xf32) <- (2x3549xi64, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + argmax_0 % paddle.cast(full_2, argmax_0.dtype), full_2 + ) + del argmax_0, full_2 + + # pd_op.transpose: (2x49x3549xf32) <- (2x3549x49xf32) + transpose_0 = paddle._C_ops.transpose(one_hot_0, [0, 2, 1]) + del one_hot_0 + + # pd_op.where: (2x49x3549xf32) <- (2x49x3549xb, 2x49x3549xf32, 2x49x3549xf32) + where_0 = paddle._C_ops.where(tile_0, transpose_0, data_2) + del data_2, tile_0, transpose_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [-2] + + # pd_op.sum: (2x3549xf32) <- (2x49x3549xf32, 1xi64) + sum_0 = paddle._C_ops.sum(where_0, full_int_array_2, None, False) + + # pd_op.argmax: (2x3549xi64) <- (2x49x3549xf32, 1xi64) + argmax_1 = paddle._C_ops.argmax(where_0, full_1, False, False, paddle.int64) + del full_1 + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("49"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x1xi32) <- (2x1xi32, 1xf32) + scale_0 = paddle._C_ops.scale(data_3, full_3, float("0"), True) + del data_3, full_3 + + # pd_op.cast: (2x1xi64) <- (2x1xi32) + cast_0 = paddle._C_ops.cast(scale_0, paddle.int64) + del scale_0 + + # pd_op.add: (2x3549xi64) <- (2x3549xi64, 2x1xi64) + add_0 = paddle._C_ops.add(argmax_1, cast_0) + del argmax_1, cast_0 + + # pd_op.flatten: (98xi32) <- (2x49x1xi32) + flatten_0 = paddle._C_ops.flatten(data_4, 0, 2) + del data_4 + + # pd_op.flatten: (7098xi64) <- (2x3549xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_4 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (7098xi32) <- (98xi32, 7098xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_4) + del flatten_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_3 = [2, 3549] + + # pd_op.reshape: (2x3549xi32) <- (7098xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, full_int_array_3) + del full_int_array_3, gather_0 + + # pd_op.full: (xf32) <- () + full_5 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (2x3549xb) <- (2x3549xf32, xf32) + greater_than_1 = paddle._C_ops.greater_than(sum_0, full_5) + del full_5, sum_0 + + # pd_op.full: (1xf32) <- () + full_6 = paddle._C_ops.full( + [1], float("10"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (2x3549xi32) <- (2x3549xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_6, paddle.int32, paddle.framework._current_expected_place() + ) + del full_6 + + # pd_op.where: (2x3549xi32) <- (2x3549xb, 2x3549xi32, 2x3549xi32) + where_1 = paddle._C_ops.where(greater_than_1, reshape_1, full_like_0) + del full_like_0, greater_than_1, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_4 = [-1, 4] + + # pd_op.reshape: (98x4xf32) <- (2x49x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_5, full_int_array_4) + del data_5, full_int_array_4 + + # pd_op.gather: (7098x4xf32) <- (98x4xf32, 7098xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_4) + del flatten_1, full_4, reshape_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_5 = [2, 3549, 4] + + # pd_op.reshape: (2x3549x4xf32) <- (7098x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, full_int_array_5) + del full_int_array_5, gather_1 + + # pd_op.full: (1xi32) <- () + full_7 = paddle._C_ops.full( + [1], float("11"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (2x3549x11xf32) <- (2x3549xi32, 1xi32) + one_hot_1 = paddle._C_ops.one_hot( + where_1 % paddle.cast(full_7, where_1.dtype), full_7 + ) + del full_7 + + # pd_op.full: (10xi64) <- () + full_8 = paddle._C_ops.full( + [10], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (10xi64) <- (10xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_8, + [10], + paddle.int64, + [ + float("0"), + float("1"), + float("2"), + float("3"), + float("4"), + float("5"), + float("6"), + float("7"), + float("8"), + float("9"), + ], + paddle.framework._current_expected_place(), + ) + del full_8 + + # pd_op.index_select: (2x3549x10xf32) <- (2x3549x11xf32, 10xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_1, assign_value__0, -1) + del assign_value__0, one_hot_1 + + # pd_op.multiply: (2x49x3549xf32) <- (2x49x3549xf32, 2x49x3549xf32) + multiply_2 = paddle._C_ops.multiply(data_6, where_0) + del data_6 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_6 = [-1] + + # pd_op.max: (2x49x1xf32) <- (2x49x3549xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_2, full_int_array_6, True) + + # pd_op.multiply: (2x49x3549xf32) <- (2x49x3549xf32, 2x49x3549xf32) + multiply_3 = paddle._C_ops.multiply(data_1, where_0) + del data_1, where_0 + + # pd_op.max: (2x49x1xf32) <- (2x49x3549xf32, 1xi64) + max_1 = paddle._C_ops.max(multiply_3, full_int_array_6, True) + del multiply_3 + + # pd_op.full: (1xf32) <- () + full_9 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x49x1xf32) <- (2x49x1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(max_0, full_9, float("1e-09"), True) + del full_9, max_0 + + # pd_op.divide: (2x49x3549xf32) <- (2x49x3549xf32, 2x49x1xf32) + divide_0 = paddle._C_ops.divide(multiply_2, scale_1) + del multiply_2, scale_1 + + # pd_op.multiply: (2x49x3549xf32) <- (2x49x3549xf32, 2x49x1xf32) + multiply_4 = paddle._C_ops.multiply(divide_0, max_1) + del divide_0, max_1 + + # pd_op.max: (2x3549xf32) <- (2x49x3549xf32, 1xi64) + max_2 = paddle._C_ops.max(multiply_4, full_int_array_2, False) + del full_int_array_2, multiply_4 + + # pd_op.unsqueeze: (2x3549x1xf32) <- (2x3549xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(max_2, full_int_array_6) + del full_int_array_6, max_2 + + # pd_op.multiply: (2x3549x10xf32) <- (2x3549x10xf32, 2x3549x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_1) + del index_select_0, unsqueeze_1, where_1 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_5/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_5/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_5/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_1/graph_hash.txt new file mode 100644 index 000000000..aee9da3ec --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_1/graph_hash.txt @@ -0,0 +1 @@ +d70c95cb38f1691238207ff482ddeba1213eb8b254421a15420ab67275df40f3 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_1/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_1/graph_net.json new file mode 100644 index 000000000..6ce3cf9a5 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_1/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus_SOD-S", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_1/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_1/input_meta.py new file mode 100644 index 000000000..0b2b2b7c3 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_1/input_meta.py @@ -0,0 +1,89 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [2, 13, 6069] + dtype = "float32" + max_val = float("1.0") + mean = float("0.000741473") + std = float("0.0272199") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [2, 1] + dtype = "int32" + data = [0, 1] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [2, 13, 1] + dtype = "int32" + data = [ + 3, + 3, + 3, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 3, + 3, + 3, + 3, + 0, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + ] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [2, 6069] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00963915") + std = float("0.0977048") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [2, 13, 4] + dtype = "float32" + max_val = float("544.0") + mean = float("152.282") + std = float("162.227") + data = None + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [2, 13, 6069] + dtype = "float32" + max_val = float("0.0674893") + mean = float("2.9226e-06") + std = float("0.000269731") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [2, 13, 6069] + dtype = "float32" + max_val = float("0.902031") + mean = float("0.000925564") + std = float("0.0166036") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_1/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_1/model.py new file mode 100644 index 000000000..0c1739521 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_1/model.py @@ -0,0 +1,192 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): + # pd_op.full: (1xi64) <- () + full_0 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (2x6069xi64) <- (2x13x6069xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_0, full_0, False, False, paddle.int64) + del full_0 + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("13"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x1xi32) <- (2x1xi32, 1xf32) + scale_0 = paddle._C_ops.scale(data_1, full_1, float("0"), True) + del data_1, full_1 + + # pd_op.cast: (2x1xi64) <- (2x1xi32) + cast_0 = paddle._C_ops.cast(scale_0, paddle.int64) + del scale_0 + + # pd_op.add: (2x6069xi64) <- (2x6069xi64, 2x1xi64) + add_0 = paddle._C_ops.add(argmax_0, cast_0) + del argmax_0, cast_0 + + # pd_op.flatten: (26xi32) <- (2x13x1xi32) + flatten_0 = paddle._C_ops.flatten(data_2, 0, 2) + del data_2 + + # pd_op.flatten: (12138xi64) <- (2x6069xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_2 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (12138xi32) <- (26xi32, 12138xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_2) + del flatten_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [2, 6069] + + # pd_op.reshape: (2x6069xi32) <- (12138xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, full_int_array_0) + del full_int_array_0, gather_0 + + # pd_op.full: (xf32) <- () + full_3 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (2x6069xb) <- (2x6069xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(data_3, full_3) + del data_3, full_3 + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("10"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (2x6069xi32) <- (2x6069xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_4, paddle.int32, paddle.framework._current_expected_place() + ) + del full_4 + + # pd_op.where: (2x6069xi32) <- (2x6069xb, 2x6069xi32, 2x6069xi32) + where_0 = paddle._C_ops.where(greater_than_0, reshape_1, full_like_0) + del full_like_0, greater_than_0, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [-1, 4] + + # pd_op.reshape: (26x4xf32) <- (2x13x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_4, full_int_array_1) + del data_4, full_int_array_1 + + # pd_op.gather: (12138x4xf32) <- (26x4xf32, 12138xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_2) + del flatten_1, full_2, reshape_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_2 = [2, 6069, 4] + + # pd_op.reshape: (2x6069x4xf32) <- (12138x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, full_int_array_2) + del full_int_array_2, gather_1 + + # pd_op.full: (1xi32) <- () + full_5 = paddle._C_ops.full( + [1], float("11"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (2x6069x11xf32) <- (2x6069xi32, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + where_0 % paddle.cast(full_5, where_0.dtype), full_5 + ) + del full_5 + + # pd_op.full: (10xi64) <- () + full_6 = paddle._C_ops.full( + [10], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (10xi64) <- (10xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_6, + [10], + paddle.int64, + [ + float("0"), + float("1"), + float("2"), + float("3"), + float("4"), + float("5"), + float("6"), + float("7"), + float("8"), + float("9"), + ], + paddle.framework._current_expected_place(), + ) + del full_6 + + # pd_op.index_select: (2x6069x10xf32) <- (2x6069x11xf32, 10xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_0, assign_value__0, -1) + del assign_value__0, one_hot_0 + + # pd_op.multiply: (2x13x6069xf32) <- (2x13x6069xf32, 2x13x6069xf32) + multiply_1 = paddle._C_ops.multiply(data_5, data_0) + del data_5 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [-1] + + # pd_op.max: (2x13x1xf32) <- (2x13x6069xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_1, full_int_array_3, True) + + # pd_op.multiply: (2x13x6069xf32) <- (2x13x6069xf32, 2x13x6069xf32) + multiply_2 = paddle._C_ops.multiply(data_6, data_0) + del data_0, data_6 + + # pd_op.max: (2x13x1xf32) <- (2x13x6069xf32, 1xi64) + max_1 = paddle._C_ops.max(multiply_2, full_int_array_3, True) + del multiply_2 + + # pd_op.full: (1xf32) <- () + full_7 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x13x1xf32) <- (2x13x1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(max_0, full_7, float("1e-09"), True) + del full_7, max_0 + + # pd_op.divide: (2x13x6069xf32) <- (2x13x6069xf32, 2x13x1xf32) + divide_0 = paddle._C_ops.divide(multiply_1, scale_1) + del multiply_1, scale_1 + + # pd_op.multiply: (2x13x6069xf32) <- (2x13x6069xf32, 2x13x1xf32) + multiply_3 = paddle._C_ops.multiply(divide_0, max_1) + del divide_0, max_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [-2] + + # pd_op.max: (2x6069xf32) <- (2x13x6069xf32, 1xi64) + max_2 = paddle._C_ops.max(multiply_3, full_int_array_4, False) + del full_int_array_4, multiply_3 + + # pd_op.unsqueeze: (2x6069x1xf32) <- (2x6069xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(max_2, full_int_array_3) + del full_int_array_3, max_2 + + # pd_op.multiply: (2x6069x10xf32) <- (2x6069x10xf32, 2x6069x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_0) + del index_select_0, unsqueeze_0, where_0 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_1/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_1/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_1/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_16/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_16/graph_hash.txt new file mode 100644 index 000000000..510fac8ff --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_16/graph_hash.txt @@ -0,0 +1 @@ +355f494a39c48b6df4cbaa2fd5b171f78e285d9d215a0a583ad8f6f31d6c607c \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_16/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_16/graph_net.json new file mode 100644 index 000000000..6ce3cf9a5 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_16/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus_SOD-S", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_16/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_16/input_meta.py new file mode 100644 index 000000000..aa17b1f21 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_16/input_meta.py @@ -0,0 +1,64 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [2, 9261] + dtype = "float32" + max_val = float("10.0") + mean = float("0.0390347") + std = float("0.295082") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [2, 38, 9261] + dtype = "float32" + max_val = float("0.970134") + mean = float("0.00102475") + std = float("0.0250592") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [2, 38, 9261] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00102723") + std = float("0.0320339") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [2, 1] + dtype = "int32" + data = [0, 1] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [2, 38, 1] + dtype = "int32" + min_val = 0 + max_val = 9 + data = None + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [2, 38, 4] + dtype = "float32" + max_val = float("654.24") + mean = float("257.813") + std = float("167.378") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [2, 38, 9261] + dtype = "float32" + max_val = float("0.653712") + mean = float("9.28356e-05") + std = float("0.00547156") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_16/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_16/model.py new file mode 100644 index 000000000..7af677b88 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_16/model.py @@ -0,0 +1,244 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [1] + + # pd_op.unsqueeze: (2x1x9261xf32) <- (2x9261xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(data_0, full_int_array_0) + del data_0, full_int_array_0 + + # pd_op.full: (xf32) <- () + full_0 = paddle._C_ops.full( + [], float("1"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (2x1x9261xb) <- (2x1x9261xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(unsqueeze_0, full_0) + del full_0, unsqueeze_0 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_1 = [1, 38, 1] + + # pd_op.tile: (2x38x9261xb) <- (2x1x9261xb, 3xi64) + tile_0 = paddle._C_ops.tile(greater_than_0, full_int_array_1) + del full_int_array_1, greater_than_0 + + # pd_op.multiply: (2x38x9261xf32) <- (2x38x9261xf32, 2x38x9261xf32) + multiply_1 = paddle._C_ops.multiply(data_1, data_2) + + # pd_op.full: (1xi64) <- () + full_1 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (2x9261xi64) <- (2x38x9261xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(multiply_1, full_1, False, False, paddle.int64) + del multiply_1 + + # pd_op.full: (1xi32) <- () + full_2 = paddle._C_ops.full( + [1], float("38"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (2x9261x38xf32) <- (2x9261xi64, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + argmax_0 % paddle.cast(full_2, argmax_0.dtype), full_2 + ) + del argmax_0, full_2 + + # pd_op.transpose: (2x38x9261xf32) <- (2x9261x38xf32) + transpose_0 = paddle._C_ops.transpose(one_hot_0, [0, 2, 1]) + del one_hot_0 + + # pd_op.where: (2x38x9261xf32) <- (2x38x9261xb, 2x38x9261xf32, 2x38x9261xf32) + where_0 = paddle._C_ops.where(tile_0, transpose_0, data_2) + del data_2, tile_0, transpose_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [-2] + + # pd_op.sum: (2x9261xf32) <- (2x38x9261xf32, 1xi64) + sum_0 = paddle._C_ops.sum(where_0, full_int_array_2, None, False) + + # pd_op.argmax: (2x9261xi64) <- (2x38x9261xf32, 1xi64) + argmax_1 = paddle._C_ops.argmax(where_0, full_1, False, False, paddle.int64) + del full_1 + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("38"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x1xi32) <- (2x1xi32, 1xf32) + scale_0 = paddle._C_ops.scale(data_3, full_3, float("0"), True) + del data_3, full_3 + + # pd_op.cast: (2x1xi64) <- (2x1xi32) + cast_0 = paddle._C_ops.cast(scale_0, paddle.int64) + del scale_0 + + # pd_op.add: (2x9261xi64) <- (2x9261xi64, 2x1xi64) + add_0 = paddle._C_ops.add(argmax_1, cast_0) + del argmax_1, cast_0 + + # pd_op.flatten: (76xi32) <- (2x38x1xi32) + flatten_0 = paddle._C_ops.flatten(data_4, 0, 2) + del data_4 + + # pd_op.flatten: (18522xi64) <- (2x9261xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_4 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (18522xi32) <- (76xi32, 18522xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_4) + del flatten_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_3 = [2, 9261] + + # pd_op.reshape: (2x9261xi32) <- (18522xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, full_int_array_3) + del full_int_array_3, gather_0 + + # pd_op.full: (xf32) <- () + full_5 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (2x9261xb) <- (2x9261xf32, xf32) + greater_than_1 = paddle._C_ops.greater_than(sum_0, full_5) + del full_5, sum_0 + + # pd_op.full: (1xf32) <- () + full_6 = paddle._C_ops.full( + [1], float("10"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (2x9261xi32) <- (2x9261xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_6, paddle.int32, paddle.framework._current_expected_place() + ) + del full_6 + + # pd_op.where: (2x9261xi32) <- (2x9261xb, 2x9261xi32, 2x9261xi32) + where_1 = paddle._C_ops.where(greater_than_1, reshape_1, full_like_0) + del full_like_0, greater_than_1, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_4 = [-1, 4] + + # pd_op.reshape: (76x4xf32) <- (2x38x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_5, full_int_array_4) + del data_5, full_int_array_4 + + # pd_op.gather: (18522x4xf32) <- (76x4xf32, 18522xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_4) + del flatten_1, full_4, reshape_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_5 = [2, 9261, 4] + + # pd_op.reshape: (2x9261x4xf32) <- (18522x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, full_int_array_5) + del full_int_array_5, gather_1 + + # pd_op.full: (1xi32) <- () + full_7 = paddle._C_ops.full( + [1], float("11"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (2x9261x11xf32) <- (2x9261xi32, 1xi32) + one_hot_1 = paddle._C_ops.one_hot( + where_1 % paddle.cast(full_7, where_1.dtype), full_7 + ) + del full_7 + + # pd_op.full: (10xi64) <- () + full_8 = paddle._C_ops.full( + [10], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (10xi64) <- (10xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_8, + [10], + paddle.int64, + [ + float("0"), + float("1"), + float("2"), + float("3"), + float("4"), + float("5"), + float("6"), + float("7"), + float("8"), + float("9"), + ], + paddle.framework._current_expected_place(), + ) + del full_8 + + # pd_op.index_select: (2x9261x10xf32) <- (2x9261x11xf32, 10xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_1, assign_value__0, -1) + del assign_value__0, one_hot_1 + + # pd_op.multiply: (2x38x9261xf32) <- (2x38x9261xf32, 2x38x9261xf32) + multiply_2 = paddle._C_ops.multiply(data_6, where_0) + del data_6 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_6 = [-1] + + # pd_op.max: (2x38x1xf32) <- (2x38x9261xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_2, full_int_array_6, True) + + # pd_op.multiply: (2x38x9261xf32) <- (2x38x9261xf32, 2x38x9261xf32) + multiply_3 = paddle._C_ops.multiply(data_1, where_0) + del data_1, where_0 + + # pd_op.max: (2x38x1xf32) <- (2x38x9261xf32, 1xi64) + max_1 = paddle._C_ops.max(multiply_3, full_int_array_6, True) + del multiply_3 + + # pd_op.full: (1xf32) <- () + full_9 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x38x1xf32) <- (2x38x1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(max_0, full_9, float("1e-09"), True) + del full_9, max_0 + + # pd_op.divide: (2x38x9261xf32) <- (2x38x9261xf32, 2x38x1xf32) + divide_0 = paddle._C_ops.divide(multiply_2, scale_1) + del multiply_2, scale_1 + + # pd_op.multiply: (2x38x9261xf32) <- (2x38x9261xf32, 2x38x1xf32) + multiply_4 = paddle._C_ops.multiply(divide_0, max_1) + del divide_0, max_1 + + # pd_op.max: (2x9261xf32) <- (2x38x9261xf32, 1xi64) + max_2 = paddle._C_ops.max(multiply_4, full_int_array_2, False) + del full_int_array_2, multiply_4 + + # pd_op.unsqueeze: (2x9261x1xf32) <- (2x9261xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(max_2, full_int_array_6) + del full_int_array_6, max_2 + + # pd_op.multiply: (2x9261x10xf32) <- (2x9261x10xf32, 2x9261x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_1) + del index_select_0, unsqueeze_1, where_1 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_16/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_16/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_16/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_5/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_5/graph_hash.txt new file mode 100644 index 000000000..bfa76b594 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_5/graph_hash.txt @@ -0,0 +1 @@ +fa6eb9b0757bd8e932b3ff14c3ba9c809889599d2cfbb8e5c4dc486ee670a158 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_5/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_5/graph_net.json new file mode 100644 index 000000000..6ce3cf9a5 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_5/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus_SOD-S", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_5/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_5/input_meta.py new file mode 100644 index 000000000..b1ee32a68 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_5/input_meta.py @@ -0,0 +1,138 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [8] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [] + dtype = "int64" + data = [4725] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [2, 8, 4725] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00149471") + std = float("0.0386326") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [2, 1] + dtype = "int32" + data = [0, 1] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [2, 8, 1] + dtype = "int32" + data = [3, 4, 8, 0, 0, 0, 0, 0, 3, 3, 3, 8, 8, 0, 5, 5] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [2, 4725] + dtype = "float32" + max_val = float("1.0") + mean = float("0.0119577") + std = float("0.108695") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [2, 8, 4] + dtype = "float32" + data = [ + 10.4673, + 88.2581, + 29.9065, + 109.935, + 86.729, + 34.0645, + 116.636, + 55.7419, + 231.776, + 374.71, + 480.0, + 453.677, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 24.5839, + 49.1873, + 36.2612, + 100.071, + 50.3969, + 84.8057, + 63.3035, + 147.562, + 25.8131, + 408.763, + 46.0947, + 480.0, + 65.1472, + 242.544, + 89.1165, + 480.0, + 296.85, + 351.095, + 326.351, + 478.304, + 280.256, + 120.424, + 283.944, + 150.954, + 251.985, + 61.0601, + 275.954, + 169.611, + 236.62, + 0.0, + 263.047, + 79.7173, + ] + + +class Program_weight_tensor_data_7: + name = "data_7" + shape = [2, 8, 4725] + dtype = "float32" + max_val = float("0.049982") + mean = float("1.7069e-06") + std = float("0.00021642") + data = None + + +class Program_weight_tensor_data_8: + name = "data_8" + shape = [2, 8, 4725] + dtype = "float32" + max_val = float("0.886292") + mean = float("0.0026412") + std = float("0.0230532") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_5/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_5/model.py new file mode 100644 index 000000000..4307988eb --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_5/model.py @@ -0,0 +1,212 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, data_0, data_1, data_2, data_3, data_4, data_5, data_6, data_7, data_8 + ): + # pd_op.full: (1xi64) <- () + full_0 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (2x-1xi64) <- (2x-1x-1xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_2, full_0, False, False, paddle.int64) + del full_0 + + # pd_op.cast: (xi32) <- (xi64) + cast_0 = paddle._C_ops.cast(data_0, paddle.int32) + del data_0 + + # pd_op.multiply: (2x1xi32) <- (2x1xi32, xi32) + multiply_1 = paddle._C_ops.multiply(data_3, cast_0) + del cast_0, data_3 + + # pd_op.cast: (2x1xi64) <- (2x1xi32) + cast_1 = paddle._C_ops.cast(multiply_1, paddle.int64) + del multiply_1 + + # pd_op.add: (2x-1xi64) <- (2x-1xi64, 2x1xi64) + add_0 = paddle._C_ops.add(argmax_0, cast_1) + del argmax_0, cast_1 + + # pd_op.flatten: (-1xi32) <- (2x-1x1xi32) + flatten_0 = paddle._C_ops.flatten(data_4, 0, 2) + del data_4 + + # pd_op.flatten: (-1xi64) <- (2x-1xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_1 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (-1xi32) <- (-1xi32, -1xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_1) + del flatten_0 + + # pd_op.full: (xi64) <- () + full_2 = paddle._C_ops.full( + [], float("2"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_0 = [full_2, data_1] + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.reshape: (2x-1xi32) <- (-1xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, stack_0) + del gather_0, stack_0 + + # pd_op.full: (xf32) <- () + full_3 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (2x-1xb) <- (2x-1xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(data_5, full_3) + del data_5, full_3 + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("10"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (2x-1xi32) <- (2x-1xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_4, paddle.int32, paddle.framework._current_expected_place() + ) + del full_4 + + # pd_op.where: (2x-1xi32) <- (2x-1xb, 2x-1xi32, 2x-1xi32) + where_0 = paddle._C_ops.where(greater_than_0, reshape_1, full_like_0) + del full_like_0, greater_than_0, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [-1, 4] + + # pd_op.reshape: (-1x4xf32) <- (2x-1x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_6, full_int_array_0) + del data_6, full_int_array_0 + + # pd_op.gather: (-1x4xf32) <- (-1x4xf32, -1xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_1) + del flatten_1, full_1, reshape_2 + + # pd_op.full: (xi64) <- () + full_5 = paddle._C_ops.full( + [], float("4"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_1 = [full_2, data_1, full_5] + del data_1, full_2, full_5 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_1 = paddle._C_ops.stack(combine_1, 0) + del combine_1 + + # pd_op.reshape: (2x-1x4xf32) <- (-1x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, stack_1) + del gather_1, stack_1 + + # pd_op.full: (1xi32) <- () + full_6 = paddle._C_ops.full( + [1], float("11"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (2x-1x11xf32) <- (2x-1xi32, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + where_0 % paddle.cast(full_6, where_0.dtype), full_6 + ) + del full_6 + + # pd_op.full: (10xi64) <- () + full_7 = paddle._C_ops.full( + [10], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (10xi64) <- (10xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_7, + [10], + paddle.int64, + [ + float("0"), + float("1"), + float("2"), + float("3"), + float("4"), + float("5"), + float("6"), + float("7"), + float("8"), + float("9"), + ], + paddle.framework._current_expected_place(), + ) + del full_7 + + # pd_op.index_select: (2x-1x10xf32) <- (2x-1x11xf32, 10xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_0, assign_value__0, -1) + del assign_value__0, one_hot_0 + + # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x-1xf32) + multiply_2 = paddle._C_ops.multiply(data_7, data_2) + del data_7 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [-1] + + # pd_op.max: (2x-1x1xf32) <- (2x-1x-1xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_2, full_int_array_1, True) + + # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x-1xf32) + multiply_3 = paddle._C_ops.multiply(data_8, data_2) + del data_2, data_8 + + # pd_op.max: (2x-1x1xf32) <- (2x-1x-1xf32, 1xi64) + max_1 = paddle._C_ops.max(multiply_3, full_int_array_1, True) + del multiply_3 + + # pd_op.full: (1xf32) <- () + full_8 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x-1x1xf32) <- (2x-1x1xf32, 1xf32) + scale_0 = paddle._C_ops.scale(max_0, full_8, float("1e-09"), True) + del full_8, max_0 + + # pd_op.divide: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x1xf32) + divide_0 = paddle._C_ops.divide(multiply_2, scale_0) + del multiply_2, scale_0 + + # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x1xf32) + multiply_4 = paddle._C_ops.multiply(divide_0, max_1) + del divide_0, max_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [-2] + + # pd_op.max: (2x-1xf32) <- (2x-1x-1xf32, 1xi64) + max_2 = paddle._C_ops.max(multiply_4, full_int_array_2, False) + del full_int_array_2, multiply_4 + + # pd_op.unsqueeze: (2x-1x1xf32) <- (2x-1xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(max_2, full_int_array_1) + del full_int_array_1, max_2 + + # pd_op.multiply: (2x-1x10xf32) <- (2x-1x10xf32, 2x-1x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_0) + del index_select_0, unsqueeze_0, where_0 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_5/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_5/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_5/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_7/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_7/graph_hash.txt new file mode 100644 index 000000000..182442b9d --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_7/graph_hash.txt @@ -0,0 +1 @@ +2485a2b54fab16a901a7ed38b4f7e03b3c5d5ee99316309319a1704e31fd9041 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_7/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_7/graph_net.json new file mode 100644 index 000000000..6ce3cf9a5 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_7/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus_SOD-S", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_7/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_7/input_meta.py new file mode 100644 index 000000000..a9e8301f3 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_7/input_meta.py @@ -0,0 +1,78 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [60] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [] + dtype = "int64" + data = [4725] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [2, 4725] + dtype = "float32" + max_val = float("24.0") + mean = float("0.0858201") + std = float("0.756726") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [2, 60, 4725] + dtype = "float32" + max_val = float("0.93016") + mean = float("0.000487774") + std = float("0.0162952") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [2, 60, 4725] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00143033") + std = float("0.0377927") + data = None + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [2, 1] + dtype = "int32" + data = [0, 1] + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [2, 60, 1] + dtype = "int32" + min_val = 0 + max_val = 9 + data = None + + +class Program_weight_tensor_data_7: + name = "data_7" + shape = [2, 60, 4] + dtype = "float32" + max_val = float("478.782") + mean = float("81.1713") + std = float("81.8496") + data = None + + +class Program_weight_tensor_data_8: + name = "data_8" + shape = [2, 60, 4725] + dtype = "float32" + max_val = float("0.237328") + mean = float("1.24907e-05") + std = float("0.00104253") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_7/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_7/model.py new file mode 100644 index 000000000..90c3991aa --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_7/model.py @@ -0,0 +1,290 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, data_0, data_1, data_2, data_3, data_4, data_5, data_6, data_7, data_8 + ): + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [1] + + # pd_op.unsqueeze: (2x1x-1xf32) <- (2x-1xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(data_2, full_int_array_0) + del data_2 + + # pd_op.full: (xf32) <- () + full_0 = paddle._C_ops.full( + [], float("1"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (2x1x-1xb) <- (2x1x-1xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(unsqueeze_0, full_0) + del full_0, unsqueeze_0 + + # pd_op.full: (xi64) <- () + full_1 = paddle._C_ops.full( + [], float("1"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_0 = [full_1, data_0, full_1] + del full_1 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.tile: (2x-1x-1xb) <- (2x1x-1xb, 3xi64) + tile_0 = paddle._C_ops.tile(greater_than_0, stack_0) + del greater_than_0, stack_0 + + # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x-1xf32) + multiply_1 = paddle._C_ops.multiply(data_3, data_4) + + # pd_op.shape64: (3xi64) <- (2x-1x-1xf32) + shape64_0 = paddle._C_ops.shape64(multiply_1) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [2] + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_0, full_int_array_1, [1], [0] + ) + del full_int_array_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [3] + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_1, full_int_array_2, [1], [0] + ) + del full_int_array_1, full_int_array_2, shape64_0 + + # pd_op.full: (1xi64) <- () + full_2 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (2x-1xi64) <- (2x-1x-1xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(multiply_1, full_2, False, False, paddle.int64) + del multiply_1 + + # pd_op.one_hot: (2x-1x-1xf32) <- (2x-1xi64, xi64) + one_hot_0 = paddle._C_ops.one_hot( + argmax_0 % paddle.cast(slice_0, argmax_0.dtype), slice_0 + ) + del argmax_0, slice_0 + + # pd_op.transpose: (2x-1x-1xf32) <- (2x-1x-1xf32) + transpose_0 = paddle._C_ops.transpose(one_hot_0, [0, 2, 1]) + del one_hot_0 + + # pd_op.where: (2x-1x-1xf32) <- (2x-1x-1xb, 2x-1x-1xf32, 2x-1x-1xf32) + where_0 = paddle._C_ops.where(tile_0, transpose_0, data_4) + del data_4, tile_0, transpose_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [-2] + + # pd_op.sum: (2x-1xf32) <- (2x-1x-1xf32, 1xi64) + sum_0 = paddle._C_ops.sum(where_0, full_int_array_3, None, False) + + # pd_op.argmax: (2x-1xi64) <- (2x-1x-1xf32, 1xi64) + argmax_1 = paddle._C_ops.argmax(where_0, full_2, False, False, paddle.int64) + del full_2 + + # pd_op.cast: (xi32) <- (xi64) + cast_0 = paddle._C_ops.cast(data_0, paddle.int32) + del data_0 + + # pd_op.multiply: (2x1xi32) <- (2x1xi32, xi32) + multiply_2 = paddle._C_ops.multiply(data_5, cast_0) + del cast_0, data_5 + + # pd_op.cast: (2x1xi64) <- (2x1xi32) + cast_1 = paddle._C_ops.cast(multiply_2, paddle.int64) + del multiply_2 + + # pd_op.add: (2x-1xi64) <- (2x-1xi64, 2x1xi64) + add_0 = paddle._C_ops.add(argmax_1, cast_1) + del argmax_1, cast_1 + + # pd_op.flatten: (-1xi32) <- (2x-1x1xi32) + flatten_0 = paddle._C_ops.flatten(data_6, 0, 2) + del data_6 + + # pd_op.flatten: (-1xi64) <- (2x-1xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_3 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (-1xi32) <- (-1xi32, -1xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_3) + del flatten_0 + + # pd_op.full: (xi64) <- () + full_4 = paddle._C_ops.full( + [], float("2"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_1 = [full_4, data_1] + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_1 = paddle._C_ops.stack(combine_1, 0) + del combine_1 + + # pd_op.reshape: (2x-1xi32) <- (-1xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, stack_1) + del gather_0, stack_1 + + # pd_op.full: (xf32) <- () + full_5 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (2x-1xb) <- (2x-1xf32, xf32) + greater_than_1 = paddle._C_ops.greater_than(sum_0, full_5) + del full_5, sum_0 + + # pd_op.full: (1xf32) <- () + full_6 = paddle._C_ops.full( + [1], float("10"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (2x-1xi32) <- (2x-1xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_6, paddle.int32, paddle.framework._current_expected_place() + ) + del full_6 + + # pd_op.where: (2x-1xi32) <- (2x-1xb, 2x-1xi32, 2x-1xi32) + where_1 = paddle._C_ops.where(greater_than_1, reshape_1, full_like_0) + del full_like_0, greater_than_1, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_4 = [-1, 4] + + # pd_op.reshape: (-1x4xf32) <- (2x-1x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_7, full_int_array_4) + del data_7, full_int_array_4 + + # pd_op.gather: (-1x4xf32) <- (-1x4xf32, -1xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_3) + del flatten_1, full_3, reshape_2 + + # pd_op.full: (xi64) <- () + full_7 = paddle._C_ops.full( + [], float("4"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_2 = [full_4, data_1, full_7] + del data_1, full_4, full_7 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_2 = paddle._C_ops.stack(combine_2, 0) + del combine_2 + + # pd_op.reshape: (2x-1x4xf32) <- (-1x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, stack_2) + del gather_1, stack_2 + + # pd_op.full: (1xi32) <- () + full_8 = paddle._C_ops.full( + [1], float("11"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (2x-1x11xf32) <- (2x-1xi32, 1xi32) + one_hot_1 = paddle._C_ops.one_hot( + where_1 % paddle.cast(full_8, where_1.dtype), full_8 + ) + del full_8 + + # pd_op.full: (10xi64) <- () + full_9 = paddle._C_ops.full( + [10], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (10xi64) <- (10xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_9, + [10], + paddle.int64, + [ + float("0"), + float("1"), + float("2"), + float("3"), + float("4"), + float("5"), + float("6"), + float("7"), + float("8"), + float("9"), + ], + paddle.framework._current_expected_place(), + ) + del full_9 + + # pd_op.index_select: (2x-1x10xf32) <- (2x-1x11xf32, 10xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_1, assign_value__0, -1) + del assign_value__0, one_hot_1 + + # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x-1xf32) + multiply_3 = paddle._C_ops.multiply(data_8, where_0) + del data_8 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_5 = [-1] + + # pd_op.max: (2x-1x1xf32) <- (2x-1x-1xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_3, full_int_array_5, True) + + # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x-1xf32) + multiply_4 = paddle._C_ops.multiply(data_3, where_0) + del data_3, where_0 + + # pd_op.max: (2x-1x1xf32) <- (2x-1x-1xf32, 1xi64) + max_1 = paddle._C_ops.max(multiply_4, full_int_array_5, True) + del multiply_4 + + # pd_op.full: (1xf32) <- () + full_10 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x-1x1xf32) <- (2x-1x1xf32, 1xf32) + scale_0 = paddle._C_ops.scale(max_0, full_10, float("1e-09"), True) + del full_10, max_0 + + # pd_op.divide: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x1xf32) + divide_0 = paddle._C_ops.divide(multiply_3, scale_0) + del multiply_3, scale_0 + + # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x1xf32) + multiply_5 = paddle._C_ops.multiply(divide_0, max_1) + del divide_0, max_1 + + # pd_op.max: (2x-1xf32) <- (2x-1x-1xf32, 1xi64) + max_2 = paddle._C_ops.max(multiply_5, full_int_array_3, False) + del full_int_array_3, multiply_5 + + # pd_op.unsqueeze: (2x-1x1xf32) <- (2x-1xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(max_2, full_int_array_5) + del full_int_array_5, max_2 + + # pd_op.multiply: (2x-1x10xf32) <- (2x-1x10xf32, 2x-1x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_1) + del index_select_0, unsqueeze_1, where_1 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_7/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_7/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_7/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_3/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_3/graph_hash.txt new file mode 100644 index 000000000..951232222 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_3/graph_hash.txt @@ -0,0 +1 @@ +7f6edfb359b9bae12be4cef48e9822a33ba3dfa983349265b87f2f8e000679cb \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_3/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_3/graph_net.json new file mode 100644 index 000000000..381598f86 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_3/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus_SOD-largesize-L", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_3/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_3/input_meta.py new file mode 100644 index 000000000..56cfb4b20 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_3/input_meta.py @@ -0,0 +1,127 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [49] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [] + dtype = "int64" + data = [48384] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [1, 48384] + dtype = "float32" + max_val = float("8.0") + mean = float("0.00859788") + std = float("0.140155") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [1, 49, 48384] + dtype = "float32" + max_val = float("0.949472") + mean = float("8.67853e-05") + std = float("0.0068947") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [1, 49, 48384] + dtype = "float32" + max_val = float("1.0") + mean = float("0.000175467") + std = float("0.0132452") + data = None + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [1, 1] + dtype = "int32" + data = [0] + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [1, 49, 1] + dtype = "int32" + data = [ + 0, + 0, + 3, + 8, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 4, + 4, + 3, + 3, + 3, + 4, + 4, + 8, + 3, + 3, + 3, + 3, + 8, + 8, + 8, + 8, + 8, + 8, + 4, + 8, + 8, + 8, + 8, + 8, + 5, + 0, + 8, + 8, + 8, + 8, + 8, + 3, + 3, + 8, + 8, + 0, + 4, + ] + + +class Program_weight_tensor_data_7: + name = "data_7" + shape = [1, 49, 4] + dtype = "float32" + min_val = float("830.542") + max_val = float("1214.81") + mean = float("996.371") + std = float("76.2156") + data = None + + +class Program_weight_tensor_data_8: + name = "data_8" + shape = [1, 49, 48384] + dtype = "float32" + max_val = float("0.514231") + mean = float("4.88935e-06") + std = float("0.000998645") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_3/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_3/model.py new file mode 100644 index 000000000..54f7a46c8 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_3/model.py @@ -0,0 +1,284 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, data_0, data_1, data_2, data_3, data_4, data_5, data_6, data_7, data_8 + ): + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [1] + + # pd_op.unsqueeze: (1x1x-1xf32) <- (1x-1xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(data_2, full_int_array_0) + del data_2 + + # pd_op.full: (xf32) <- () + full_0 = paddle._C_ops.full( + [], float("1"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (1x1x-1xb) <- (1x1x-1xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(unsqueeze_0, full_0) + del full_0, unsqueeze_0 + + # pd_op.full: (xi64) <- () + full_1 = paddle._C_ops.full( + [], float("1"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_0 = [full_1, data_0, full_1] + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.tile: (1x-1x-1xb) <- (1x1x-1xb, 3xi64) + tile_0 = paddle._C_ops.tile(greater_than_0, stack_0) + del greater_than_0, stack_0 + + # pd_op.multiply: (1x-1x-1xf32) <- (1x-1x-1xf32, 1x-1x-1xf32) + multiply_1 = paddle._C_ops.multiply(data_3, data_4) + + # pd_op.shape64: (3xi64) <- (1x-1x-1xf32) + shape64_0 = paddle._C_ops.shape64(multiply_1) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [2] + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_0, full_int_array_1, [1], [0] + ) + del full_int_array_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [3] + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_1, full_int_array_2, [1], [0] + ) + del full_int_array_1, full_int_array_2, shape64_0 + + # pd_op.full: (1xi64) <- () + full_2 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (1x-1xi64) <- (1x-1x-1xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(multiply_1, full_2, False, False, paddle.int64) + del multiply_1 + + # pd_op.one_hot: (1x-1x-1xf32) <- (1x-1xi64, xi64) + one_hot_0 = paddle._C_ops.one_hot( + argmax_0 % paddle.cast(slice_0, argmax_0.dtype), slice_0 + ) + del argmax_0, slice_0 + + # pd_op.transpose: (1x-1x-1xf32) <- (1x-1x-1xf32) + transpose_0 = paddle._C_ops.transpose(one_hot_0, [0, 2, 1]) + del one_hot_0 + + # pd_op.where: (1x-1x-1xf32) <- (1x-1x-1xb, 1x-1x-1xf32, 1x-1x-1xf32) + where_0 = paddle._C_ops.where(tile_0, transpose_0, data_4) + del data_4, tile_0, transpose_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [-2] + + # pd_op.sum: (1x-1xf32) <- (1x-1x-1xf32, 1xi64) + sum_0 = paddle._C_ops.sum(where_0, full_int_array_3, None, False) + + # pd_op.argmax: (1x-1xi64) <- (1x-1x-1xf32, 1xi64) + argmax_1 = paddle._C_ops.argmax(where_0, full_2, False, False, paddle.int64) + del full_2 + + # pd_op.cast: (xi32) <- (xi64) + cast_0 = paddle._C_ops.cast(data_0, paddle.int32) + del data_0 + + # pd_op.multiply: (1x1xi32) <- (1x1xi32, xi32) + multiply_2 = paddle._C_ops.multiply(data_5, cast_0) + del cast_0, data_5 + + # pd_op.cast: (1x1xi64) <- (1x1xi32) + cast_1 = paddle._C_ops.cast(multiply_2, paddle.int64) + del multiply_2 + + # pd_op.add: (1x-1xi64) <- (1x-1xi64, 1x1xi64) + add_0 = paddle._C_ops.add(argmax_1, cast_1) + del argmax_1, cast_1 + + # pd_op.flatten: (-1xi32) <- (1x-1x1xi32) + flatten_0 = paddle._C_ops.flatten(data_6, 0, 2) + del data_6 + + # pd_op.flatten: (-1xi64) <- (1x-1xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_3 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (-1xi32) <- (-1xi32, -1xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_3) + del flatten_0 + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_1 = [full_1, data_1] + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_1 = paddle._C_ops.stack(combine_1, 0) + del combine_1 + + # pd_op.reshape: (1x-1xi32) <- (-1xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, stack_1) + del gather_0, stack_1 + + # pd_op.full: (xf32) <- () + full_4 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (1x-1xb) <- (1x-1xf32, xf32) + greater_than_1 = paddle._C_ops.greater_than(sum_0, full_4) + del full_4, sum_0 + + # pd_op.full: (1xf32) <- () + full_5 = paddle._C_ops.full( + [1], float("10"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (1x-1xi32) <- (1x-1xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_5, paddle.int32, paddle.framework._current_expected_place() + ) + del full_5 + + # pd_op.where: (1x-1xi32) <- (1x-1xb, 1x-1xi32, 1x-1xi32) + where_1 = paddle._C_ops.where(greater_than_1, reshape_1, full_like_0) + del full_like_0, greater_than_1, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_4 = [-1, 4] + + # pd_op.reshape: (-1x4xf32) <- (1x-1x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_7, full_int_array_4) + del data_7, full_int_array_4 + + # pd_op.gather: (-1x4xf32) <- (-1x4xf32, -1xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_3) + del flatten_1, full_3, reshape_2 + + # pd_op.full: (xi64) <- () + full_6 = paddle._C_ops.full( + [], float("4"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_2 = [full_1, data_1, full_6] + del data_1, full_1, full_6 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_2 = paddle._C_ops.stack(combine_2, 0) + del combine_2 + + # pd_op.reshape: (1x-1x4xf32) <- (-1x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, stack_2) + del gather_1, stack_2 + + # pd_op.full: (1xi32) <- () + full_7 = paddle._C_ops.full( + [1], float("11"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (1x-1x11xf32) <- (1x-1xi32, 1xi32) + one_hot_1 = paddle._C_ops.one_hot( + where_1 % paddle.cast(full_7, where_1.dtype), full_7 + ) + del full_7 + + # pd_op.full: (10xi64) <- () + full_8 = paddle._C_ops.full( + [10], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (10xi64) <- (10xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_8, + [10], + paddle.int64, + [ + float("0"), + float("1"), + float("2"), + float("3"), + float("4"), + float("5"), + float("6"), + float("7"), + float("8"), + float("9"), + ], + paddle.framework._current_expected_place(), + ) + del full_8 + + # pd_op.index_select: (1x-1x10xf32) <- (1x-1x11xf32, 10xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_1, assign_value__0, -1) + del assign_value__0, one_hot_1 + + # pd_op.multiply: (1x-1x-1xf32) <- (1x-1x-1xf32, 1x-1x-1xf32) + multiply_3 = paddle._C_ops.multiply(data_8, where_0) + del data_8 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_5 = [-1] + + # pd_op.max: (1x-1x1xf32) <- (1x-1x-1xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_3, full_int_array_5, True) + + # pd_op.multiply: (1x-1x-1xf32) <- (1x-1x-1xf32, 1x-1x-1xf32) + multiply_4 = paddle._C_ops.multiply(data_3, where_0) + del data_3, where_0 + + # pd_op.max: (1x-1x1xf32) <- (1x-1x-1xf32, 1xi64) + max_1 = paddle._C_ops.max(multiply_4, full_int_array_5, True) + del multiply_4 + + # pd_op.full: (1xf32) <- () + full_9 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (1x-1x1xf32) <- (1x-1x1xf32, 1xf32) + scale_0 = paddle._C_ops.scale(max_0, full_9, float("1e-09"), True) + del full_9, max_0 + + # pd_op.divide: (1x-1x-1xf32) <- (1x-1x-1xf32, 1x-1x1xf32) + divide_0 = paddle._C_ops.divide(multiply_3, scale_0) + del multiply_3, scale_0 + + # pd_op.multiply: (1x-1x-1xf32) <- (1x-1x-1xf32, 1x-1x1xf32) + multiply_5 = paddle._C_ops.multiply(divide_0, max_1) + del divide_0, max_1 + + # pd_op.max: (1x-1xf32) <- (1x-1x-1xf32, 1xi64) + max_2 = paddle._C_ops.max(multiply_5, full_int_array_3, False) + del full_int_array_3, multiply_5 + + # pd_op.unsqueeze: (1x-1x1xf32) <- (1x-1xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(max_2, full_int_array_5) + del full_int_array_5, max_2 + + # pd_op.multiply: (1x-1x10xf32) <- (1x-1x10xf32, 1x-1x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_1) + del index_select_0, unsqueeze_1, where_1 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_3/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_3/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_3/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_4/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_4/graph_hash.txt new file mode 100644 index 000000000..1ba099c96 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_4/graph_hash.txt @@ -0,0 +1 @@ +a1e871dca6015fd870e153211f3cd48512ab629d616889d648d8f93c88df3e51 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_4/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_4/graph_net.json new file mode 100644 index 000000000..381598f86 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_4/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus_SOD-largesize-L", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_4/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_4/input_meta.py new file mode 100644 index 000000000..0488f0946 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_4/input_meta.py @@ -0,0 +1,108 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [1, 12, 27216] + dtype = "float32" + max_val = float("1.0") + mean = float("0.000453165") + std = float("0.0212828") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [1, 1] + dtype = "int32" + data = [0] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [1, 12, 1] + dtype = "int32" + data = [4, 4, 3, 3, 3, 4, 4, 4, 3, 3, 3, 3] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [1, 27216] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00543798") + std = float("0.0735419") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [1, 12, 4] + dtype = "float32" + data = [ + 810.02, + 1015.02, + 826.828, + 1104.59, + 803.556, + 862.244, + 821.01, + 939.512, + 685.253, + 848.195, + 696.242, + 913.171, + 707.232, + 783.219, + 720.162, + 865.756, + 705.293, + 614.634, + 718.869, + 688.39, + 622.545, + 934.244, + 636.768, + 1037.85, + 625.778, + 567.219, + 640.646, + 632.195, + 605.091, + 567.219, + 617.374, + 632.195, + 538.505, + 763.902, + 548.849, + 864.0, + 536.566, + 567.219, + 550.788, + 621.659, + 513.939, + 978.146, + 529.455, + 1074.73, + 789.98, + 570.732, + 806.788, + 637.463, + ] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [1, 12, 27216] + dtype = "float32" + max_val = float("0.365907") + mean = float("5.42289e-05") + std = float("0.00308233") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [1, 12, 27216] + dtype = "float32" + max_val = float("0.95733") + mean = float("0.00162842") + std = float("0.0295307") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_4/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_4/model.py new file mode 100644 index 000000000..8a1f2862b --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_4/model.py @@ -0,0 +1,192 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): + # pd_op.full: (1xi64) <- () + full_0 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (1x27216xi64) <- (1x12x27216xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_0, full_0, False, False, paddle.int64) + del full_0 + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("12"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (1x1xi32) <- (1x1xi32, 1xf32) + scale_0 = paddle._C_ops.scale(data_1, full_1, float("0"), True) + del data_1, full_1 + + # pd_op.cast: (1x1xi64) <- (1x1xi32) + cast_0 = paddle._C_ops.cast(scale_0, paddle.int64) + del scale_0 + + # pd_op.add: (1x27216xi64) <- (1x27216xi64, 1x1xi64) + add_0 = paddle._C_ops.add(argmax_0, cast_0) + del argmax_0, cast_0 + + # pd_op.flatten: (12xi32) <- (1x12x1xi32) + flatten_0 = paddle._C_ops.flatten(data_2, 0, 2) + del data_2 + + # pd_op.flatten: (27216xi64) <- (1x27216xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_2 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (27216xi32) <- (12xi32, 27216xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_2) + del flatten_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [1, 27216] + + # pd_op.reshape: (1x27216xi32) <- (27216xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, full_int_array_0) + del full_int_array_0, gather_0 + + # pd_op.full: (xf32) <- () + full_3 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (1x27216xb) <- (1x27216xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(data_3, full_3) + del data_3, full_3 + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("10"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (1x27216xi32) <- (1x27216xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_4, paddle.int32, paddle.framework._current_expected_place() + ) + del full_4 + + # pd_op.where: (1x27216xi32) <- (1x27216xb, 1x27216xi32, 1x27216xi32) + where_0 = paddle._C_ops.where(greater_than_0, reshape_1, full_like_0) + del full_like_0, greater_than_0, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [-1, 4] + + # pd_op.reshape: (12x4xf32) <- (1x12x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_4, full_int_array_1) + del data_4, full_int_array_1 + + # pd_op.gather: (27216x4xf32) <- (12x4xf32, 27216xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_2) + del flatten_1, full_2, reshape_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_2 = [1, 27216, 4] + + # pd_op.reshape: (1x27216x4xf32) <- (27216x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, full_int_array_2) + del full_int_array_2, gather_1 + + # pd_op.full: (1xi32) <- () + full_5 = paddle._C_ops.full( + [1], float("11"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (1x27216x11xf32) <- (1x27216xi32, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + where_0 % paddle.cast(full_5, where_0.dtype), full_5 + ) + del full_5 + + # pd_op.full: (10xi64) <- () + full_6 = paddle._C_ops.full( + [10], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (10xi64) <- (10xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_6, + [10], + paddle.int64, + [ + float("0"), + float("1"), + float("2"), + float("3"), + float("4"), + float("5"), + float("6"), + float("7"), + float("8"), + float("9"), + ], + paddle.framework._current_expected_place(), + ) + del full_6 + + # pd_op.index_select: (1x27216x10xf32) <- (1x27216x11xf32, 10xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_0, assign_value__0, -1) + del assign_value__0, one_hot_0 + + # pd_op.multiply: (1x12x27216xf32) <- (1x12x27216xf32, 1x12x27216xf32) + multiply_1 = paddle._C_ops.multiply(data_5, data_0) + del data_5 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [-1] + + # pd_op.max: (1x12x1xf32) <- (1x12x27216xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_1, full_int_array_3, True) + + # pd_op.multiply: (1x12x27216xf32) <- (1x12x27216xf32, 1x12x27216xf32) + multiply_2 = paddle._C_ops.multiply(data_6, data_0) + del data_0, data_6 + + # pd_op.max: (1x12x1xf32) <- (1x12x27216xf32, 1xi64) + max_1 = paddle._C_ops.max(multiply_2, full_int_array_3, True) + del multiply_2 + + # pd_op.full: (1xf32) <- () + full_7 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (1x12x1xf32) <- (1x12x1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(max_0, full_7, float("1e-09"), True) + del full_7, max_0 + + # pd_op.divide: (1x12x27216xf32) <- (1x12x27216xf32, 1x12x1xf32) + divide_0 = paddle._C_ops.divide(multiply_1, scale_1) + del multiply_1, scale_1 + + # pd_op.multiply: (1x12x27216xf32) <- (1x12x27216xf32, 1x12x1xf32) + multiply_3 = paddle._C_ops.multiply(divide_0, max_1) + del divide_0, max_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [-2] + + # pd_op.max: (1x27216xf32) <- (1x12x27216xf32, 1xi64) + max_2 = paddle._C_ops.max(multiply_3, full_int_array_4, False) + del full_int_array_4, multiply_3 + + # pd_op.unsqueeze: (1x27216x1xf32) <- (1x27216xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(max_2, full_int_array_3) + del full_int_array_3, max_2 + + # pd_op.multiply: (1x27216x10xf32) <- (1x27216x10xf32, 1x27216x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_0) + del index_select_0, unsqueeze_0, where_0 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_4/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_4/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_4/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_7/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_7/graph_hash.txt new file mode 100644 index 000000000..7248f3b80 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_7/graph_hash.txt @@ -0,0 +1 @@ +80c6a3012fae16e53b556d8b6ef2a40e2378ccb66ef0a81269f362d7dab93afe \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_7/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_7/graph_net.json new file mode 100644 index 000000000..381598f86 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_7/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus_SOD-largesize-L", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_7/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_7/input_meta.py new file mode 100644 index 000000000..0bbda7212 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_7/input_meta.py @@ -0,0 +1,102 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [1, 24276] + dtype = "float32" + max_val = float("2.0") + mean = float("0.0175482") + std = float("0.141858") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [1, 38, 24276] + dtype = "float32" + max_val = float("0.982337") + mean = float("0.000792632") + std = float("0.022185") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [1, 38, 24276] + dtype = "float32" + max_val = float("1.0") + mean = float("0.000461795") + std = float("0.0214844") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [1, 1] + dtype = "int32" + data = [0] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [1, 38, 1] + dtype = "int32" + data = [ + 3, + 3, + 9, + 1, + 0, + 0, + 0, + 0, + 3, + 3, + 3, + 3, + 3, + 0, + 0, + 0, + 8, + 3, + 3, + 3, + 0, + 0, + 3, + 3, + 3, + 3, + 3, + 5, + 3, + 3, + 3, + 3, + 3, + 0, + 3, + 3, + 0, + 0, + ] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [1, 38, 4] + dtype = "float32" + min_val = float("354.773") + max_val = float("1051.0") + mean = float("652.35") + std = float("193.013") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [1, 38, 24276] + dtype = "float32" + max_val = float("0.73484") + mean = float("8.98923e-05") + std = float("0.00618669") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_7/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_7/model.py new file mode 100644 index 000000000..88cd8833b --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_7/model.py @@ -0,0 +1,244 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [1] + + # pd_op.unsqueeze: (1x1x24276xf32) <- (1x24276xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(data_0, full_int_array_0) + del data_0, full_int_array_0 + + # pd_op.full: (xf32) <- () + full_0 = paddle._C_ops.full( + [], float("1"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (1x1x24276xb) <- (1x1x24276xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(unsqueeze_0, full_0) + del full_0, unsqueeze_0 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_1 = [1, 38, 1] + + # pd_op.tile: (1x38x24276xb) <- (1x1x24276xb, 3xi64) + tile_0 = paddle._C_ops.tile(greater_than_0, full_int_array_1) + del full_int_array_1, greater_than_0 + + # pd_op.multiply: (1x38x24276xf32) <- (1x38x24276xf32, 1x38x24276xf32) + multiply_1 = paddle._C_ops.multiply(data_1, data_2) + + # pd_op.full: (1xi64) <- () + full_1 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (1x24276xi64) <- (1x38x24276xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(multiply_1, full_1, False, False, paddle.int64) + del multiply_1 + + # pd_op.full: (1xi32) <- () + full_2 = paddle._C_ops.full( + [1], float("38"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (1x24276x38xf32) <- (1x24276xi64, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + argmax_0 % paddle.cast(full_2, argmax_0.dtype), full_2 + ) + del argmax_0, full_2 + + # pd_op.transpose: (1x38x24276xf32) <- (1x24276x38xf32) + transpose_0 = paddle._C_ops.transpose(one_hot_0, [0, 2, 1]) + del one_hot_0 + + # pd_op.where: (1x38x24276xf32) <- (1x38x24276xb, 1x38x24276xf32, 1x38x24276xf32) + where_0 = paddle._C_ops.where(tile_0, transpose_0, data_2) + del data_2, tile_0, transpose_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [-2] + + # pd_op.sum: (1x24276xf32) <- (1x38x24276xf32, 1xi64) + sum_0 = paddle._C_ops.sum(where_0, full_int_array_2, None, False) + + # pd_op.argmax: (1x24276xi64) <- (1x38x24276xf32, 1xi64) + argmax_1 = paddle._C_ops.argmax(where_0, full_1, False, False, paddle.int64) + del full_1 + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("38"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (1x1xi32) <- (1x1xi32, 1xf32) + scale_0 = paddle._C_ops.scale(data_3, full_3, float("0"), True) + del data_3, full_3 + + # pd_op.cast: (1x1xi64) <- (1x1xi32) + cast_0 = paddle._C_ops.cast(scale_0, paddle.int64) + del scale_0 + + # pd_op.add: (1x24276xi64) <- (1x24276xi64, 1x1xi64) + add_0 = paddle._C_ops.add(argmax_1, cast_0) + del argmax_1, cast_0 + + # pd_op.flatten: (38xi32) <- (1x38x1xi32) + flatten_0 = paddle._C_ops.flatten(data_4, 0, 2) + del data_4 + + # pd_op.flatten: (24276xi64) <- (1x24276xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_4 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (24276xi32) <- (38xi32, 24276xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_4) + del flatten_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_3 = [1, 24276] + + # pd_op.reshape: (1x24276xi32) <- (24276xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, full_int_array_3) + del full_int_array_3, gather_0 + + # pd_op.full: (xf32) <- () + full_5 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (1x24276xb) <- (1x24276xf32, xf32) + greater_than_1 = paddle._C_ops.greater_than(sum_0, full_5) + del full_5, sum_0 + + # pd_op.full: (1xf32) <- () + full_6 = paddle._C_ops.full( + [1], float("10"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (1x24276xi32) <- (1x24276xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_6, paddle.int32, paddle.framework._current_expected_place() + ) + del full_6 + + # pd_op.where: (1x24276xi32) <- (1x24276xb, 1x24276xi32, 1x24276xi32) + where_1 = paddle._C_ops.where(greater_than_1, reshape_1, full_like_0) + del full_like_0, greater_than_1, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_4 = [-1, 4] + + # pd_op.reshape: (38x4xf32) <- (1x38x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_5, full_int_array_4) + del data_5, full_int_array_4 + + # pd_op.gather: (24276x4xf32) <- (38x4xf32, 24276xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_4) + del flatten_1, full_4, reshape_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_5 = [1, 24276, 4] + + # pd_op.reshape: (1x24276x4xf32) <- (24276x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, full_int_array_5) + del full_int_array_5, gather_1 + + # pd_op.full: (1xi32) <- () + full_7 = paddle._C_ops.full( + [1], float("11"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (1x24276x11xf32) <- (1x24276xi32, 1xi32) + one_hot_1 = paddle._C_ops.one_hot( + where_1 % paddle.cast(full_7, where_1.dtype), full_7 + ) + del full_7 + + # pd_op.full: (10xi64) <- () + full_8 = paddle._C_ops.full( + [10], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (10xi64) <- (10xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_8, + [10], + paddle.int64, + [ + float("0"), + float("1"), + float("2"), + float("3"), + float("4"), + float("5"), + float("6"), + float("7"), + float("8"), + float("9"), + ], + paddle.framework._current_expected_place(), + ) + del full_8 + + # pd_op.index_select: (1x24276x10xf32) <- (1x24276x11xf32, 10xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_1, assign_value__0, -1) + del assign_value__0, one_hot_1 + + # pd_op.multiply: (1x38x24276xf32) <- (1x38x24276xf32, 1x38x24276xf32) + multiply_2 = paddle._C_ops.multiply(data_6, where_0) + del data_6 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_6 = [-1] + + # pd_op.max: (1x38x1xf32) <- (1x38x24276xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_2, full_int_array_6, True) + + # pd_op.multiply: (1x38x24276xf32) <- (1x38x24276xf32, 1x38x24276xf32) + multiply_3 = paddle._C_ops.multiply(data_1, where_0) + del data_1, where_0 + + # pd_op.max: (1x38x1xf32) <- (1x38x24276xf32, 1xi64) + max_1 = paddle._C_ops.max(multiply_3, full_int_array_6, True) + del multiply_3 + + # pd_op.full: (1xf32) <- () + full_9 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (1x38x1xf32) <- (1x38x1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(max_0, full_9, float("1e-09"), True) + del full_9, max_0 + + # pd_op.divide: (1x38x24276xf32) <- (1x38x24276xf32, 1x38x1xf32) + divide_0 = paddle._C_ops.divide(multiply_2, scale_1) + del multiply_2, scale_1 + + # pd_op.multiply: (1x38x24276xf32) <- (1x38x24276xf32, 1x38x1xf32) + multiply_4 = paddle._C_ops.multiply(divide_0, max_1) + del divide_0, max_1 + + # pd_op.max: (1x24276xf32) <- (1x38x24276xf32, 1xi64) + max_2 = paddle._C_ops.max(multiply_4, full_int_array_2, False) + del full_int_array_2, multiply_4 + + # pd_op.unsqueeze: (1x24276x1xf32) <- (1x24276xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(max_2, full_int_array_6) + del full_int_array_6, max_2 + + # pd_op.multiply: (1x24276x10xf32) <- (1x24276x10xf32, 1x24276x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_1) + del index_select_0, unsqueeze_1, where_1 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_7/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_7/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_7/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PicoDet-L/subgraph_11/graph_hash.txt b/paddle_samples/PaddleX/PicoDet-L/subgraph_11/graph_hash.txt new file mode 100644 index 000000000..4c345c63f --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-L/subgraph_11/graph_hash.txt @@ -0,0 +1 @@ +bd0abfe08096791721c2c668713ec90c170582dfecc4344b29d40745a0863466 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PicoDet-L/subgraph_11/graph_net.json b/paddle_samples/PaddleX/PicoDet-L/subgraph_11/graph_net.json new file mode 100644 index 000000000..0289448c6 --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-L/subgraph_11/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PicoDet-L", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PicoDet-L/subgraph_11/input_meta.py b/paddle_samples/PaddleX/PicoDet-L/subgraph_11/input_meta.py new file mode 100644 index 000000000..a6d1ff5ef --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-L/subgraph_11/input_meta.py @@ -0,0 +1,124 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [8, 2, 6885] + dtype = "float32" + max_val = float("1.0") + mean = float("0.0011801") + std = float("0.0343323") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [8, 1] + dtype = "int32" + data = [0, 1, 2, 3, 4, 5, 6, 7] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [8, 2, 1] + dtype = "int32" + data = [0, 0, 0, 0, 0, 0, 3, 2, 0, 0, 3, 0, 1, 0, 0, 0] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [8, 6885] + dtype = "float32" + max_val = float("1.0") + mean = float("0.0023602") + std = float("0.0485246") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [8, 2, 4] + dtype = "float32" + data = [ + 0.0, + 4.88136, + 325.403, + 453.966, + 0.0, + 0.0, + 0.0, + 0.0, + 278.4, + 174.24, + 364.8, + 239.04, + 0.0, + 0.0, + 0.0, + 0.0, + 224.262, + 34.7586, + 361.18, + 178.759, + 0.0, + 0.0, + 0.0, + 0.0, + 163.817, + 0.0, + 576.0, + 147.172, + 63.4128, + 223.295, + 470.312, + 576.0, + 338.824, + 192.0, + 545.882, + 446.512, + 0.0, + 0.0, + 0.0, + 0.0, + 49.7778, + 0.0, + 576.0, + 576.0, + 0.0, + 0.0, + 0.0, + 0.0, + 261.12, + 262.08, + 314.88, + 316.8, + 301.44, + 154.08, + 393.6, + 226.08, + 124.541, + 197.013, + 289.112, + 308.303, + 0.0, + 0.0, + 0.0, + 0.0, + ] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [8, 2, 6885] + dtype = "float32" + max_val = float("0.777919") + mean = float("0.000448094") + std = float("0.0116274") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [8, 2, 6885] + dtype = "float32" + max_val = float("0.987404") + mean = float("0.0142404") + std = float("0.0746938") + data = None diff --git a/paddle_samples/PaddleX/PicoDet-L/subgraph_11/model.py b/paddle_samples/PaddleX/PicoDet-L/subgraph_11/model.py new file mode 100644 index 000000000..33d9e50ce --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-L/subgraph_11/model.py @@ -0,0 +1,181 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): + # pd_op.full: (1xi64) <- () + full_0 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (8x6885xi64) <- (8x2x6885xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_0, full_0, False, False, paddle.int64) + del full_0 + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("2"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (8x1xi32) <- (8x1xi32, 1xf32) + scale_0 = paddle._C_ops.scale(data_1, full_1, float("0"), True) + del data_1, full_1 + + # pd_op.cast: (8x1xi64) <- (8x1xi32) + cast_0 = paddle._C_ops.cast(scale_0, paddle.int64) + del scale_0 + + # pd_op.add: (8x6885xi64) <- (8x6885xi64, 8x1xi64) + add_0 = paddle._C_ops.add(argmax_0, cast_0) + del argmax_0, cast_0 + + # pd_op.flatten: (16xi32) <- (8x2x1xi32) + flatten_0 = paddle._C_ops.flatten(data_2, 0, 2) + del data_2 + + # pd_op.flatten: (55080xi64) <- (8x6885xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_2 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (55080xi32) <- (16xi32, 55080xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_2) + del flatten_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [8, 6885] + + # pd_op.reshape: (8x6885xi32) <- (55080xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, full_int_array_0) + del full_int_array_0, gather_0 + + # pd_op.full: (xf32) <- () + full_3 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (8x6885xb) <- (8x6885xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(data_3, full_3) + del data_3, full_3 + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("4"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (8x6885xi32) <- (8x6885xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_4, paddle.int32, paddle.framework._current_expected_place() + ) + del full_4 + + # pd_op.where: (8x6885xi32) <- (8x6885xb, 8x6885xi32, 8x6885xi32) + where_0 = paddle._C_ops.where(greater_than_0, reshape_1, full_like_0) + del full_like_0, greater_than_0, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [-1, 4] + + # pd_op.reshape: (16x4xf32) <- (8x2x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_4, full_int_array_1) + del data_4, full_int_array_1 + + # pd_op.gather: (55080x4xf32) <- (16x4xf32, 55080xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_2) + del flatten_1, full_2, reshape_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_2 = [8, 6885, 4] + + # pd_op.reshape: (8x6885x4xf32) <- (55080x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, full_int_array_2) + del full_int_array_2, gather_1 + + # pd_op.full: (1xi32) <- () + full_5 = paddle._C_ops.full( + [1], float("5"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (8x6885x5xf32) <- (8x6885xi32, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + where_0 % paddle.cast(full_5, where_0.dtype), full_5 + ) + del full_5 + + # pd_op.full: (4xi64) <- () + full_6 = paddle._C_ops.full( + [4], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (4xi64) <- (4xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_6, + [4], + paddle.int64, + [float("0"), float("1"), float("2"), float("3")], + paddle.framework._current_expected_place(), + ) + del full_6 + + # pd_op.index_select: (8x6885x4xf32) <- (8x6885x5xf32, 4xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_0, assign_value__0, -1) + del assign_value__0, one_hot_0 + + # pd_op.multiply: (8x2x6885xf32) <- (8x2x6885xf32, 8x2x6885xf32) + multiply_1 = paddle._C_ops.multiply(data_5, data_0) + del data_5 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [-1] + + # pd_op.max: (8x2x1xf32) <- (8x2x6885xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_1, full_int_array_3, True) + + # pd_op.multiply: (8x2x6885xf32) <- (8x2x6885xf32, 8x2x6885xf32) + multiply_2 = paddle._C_ops.multiply(data_6, data_0) + del data_0, data_6 + + # pd_op.max: (8x2x1xf32) <- (8x2x6885xf32, 1xi64) + max_1 = paddle._C_ops.max(multiply_2, full_int_array_3, True) + del multiply_2 + + # pd_op.full: (1xf32) <- () + full_7 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (8x2x1xf32) <- (8x2x1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(max_0, full_7, float("1e-09"), True) + del full_7, max_0 + + # pd_op.divide: (8x2x6885xf32) <- (8x2x6885xf32, 8x2x1xf32) + divide_0 = paddle._C_ops.divide(multiply_1, scale_1) + del multiply_1, scale_1 + + # pd_op.multiply: (8x2x6885xf32) <- (8x2x6885xf32, 8x2x1xf32) + multiply_3 = paddle._C_ops.multiply(divide_0, max_1) + del divide_0, max_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [-2] + + # pd_op.max: (8x6885xf32) <- (8x2x6885xf32, 1xi64) + max_2 = paddle._C_ops.max(multiply_3, full_int_array_4, False) + del full_int_array_4, multiply_3 + + # pd_op.unsqueeze: (8x6885x1xf32) <- (8x6885xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(max_2, full_int_array_3) + del full_int_array_3, max_2 + + # pd_op.multiply: (8x6885x4xf32) <- (8x6885x4xf32, 8x6885x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_0) + del index_select_0, unsqueeze_0, where_0 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PicoDet-L/subgraph_11/weight_meta.py b/paddle_samples/PaddleX/PicoDet-L/subgraph_11/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-L/subgraph_11/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PicoDet-L/subgraph_16/graph_hash.txt b/paddle_samples/PaddleX/PicoDet-L/subgraph_16/graph_hash.txt new file mode 100644 index 000000000..e991aeda4 --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-L/subgraph_16/graph_hash.txt @@ -0,0 +1 @@ +392d38c1a6cc6695a35df5e3a49c43e5c9d08126f6c877f14cb1fa970481a478 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PicoDet-L/subgraph_16/graph_net.json b/paddle_samples/PaddleX/PicoDet-L/subgraph_16/graph_net.json new file mode 100644 index 000000000..0289448c6 --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-L/subgraph_16/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PicoDet-L", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PicoDet-L/subgraph_16/input_meta.py b/paddle_samples/PaddleX/PicoDet-L/subgraph_16/input_meta.py new file mode 100644 index 000000000..b314369a7 --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-L/subgraph_16/input_meta.py @@ -0,0 +1,110 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [5] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [8, 5, 6885] + dtype = "float32" + max_val = float("1.0") + mean = float("0.000613653") + std = float("0.0247644") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [8, 1] + dtype = "int32" + data = [0, 1, 2, 3, 4, 5, 6, 7] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [8, 5, 1] + dtype = "int32" + data = [ + 0, + 0, + 0, + 0, + 0, + 0, + 1, + 0, + 0, + 0, + 2, + 2, + 2, + 2, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [8, 6885] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00306826") + std = float("0.0553069") + data = None + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [8, 5, 4] + dtype = "float32" + max_val = float("533.416") + mean = float("83.8826") + std = float("133.551") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [8, 5, 6885] + dtype = "float32" + max_val = float("0.588084") + mean = float("0.00014068") + std = float("0.00627399") + data = None + + +class Program_weight_tensor_data_7: + name = "data_7" + shape = [8, 5, 6885] + dtype = "float32" + max_val = float("0.967009") + mean = float("0.00451001") + std = float("0.0454716") + data = None diff --git a/paddle_samples/PaddleX/PicoDet-L/subgraph_16/model.py b/paddle_samples/PaddleX/PicoDet-L/subgraph_16/model.py new file mode 100644 index 000000000..87a5de8ca --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-L/subgraph_16/model.py @@ -0,0 +1,180 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6, data_7): + # pd_op.full: (1xi64) <- () + full_0 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (8x6885xi64) <- (8x-1x6885xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_1, full_0, False, False, paddle.int64) + del full_0 + + # pd_op.cast: (xi32) <- (xi64) + cast_0 = paddle._C_ops.cast(data_0, paddle.int32) + del data_0 + + # pd_op.multiply: (8x1xi32) <- (8x1xi32, xi32) + multiply_1 = paddle._C_ops.multiply(data_2, cast_0) + del cast_0, data_2 + + # pd_op.cast: (8x1xi64) <- (8x1xi32) + cast_1 = paddle._C_ops.cast(multiply_1, paddle.int64) + del multiply_1 + + # pd_op.add: (8x6885xi64) <- (8x6885xi64, 8x1xi64) + add_0 = paddle._C_ops.add(argmax_0, cast_1) + del argmax_0, cast_1 + + # pd_op.flatten: (-1xi32) <- (8x-1x1xi32) + flatten_0 = paddle._C_ops.flatten(data_3, 0, 2) + del data_3 + + # pd_op.flatten: (55080xi64) <- (8x6885xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_1 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (55080xi32) <- (-1xi32, 55080xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_1) + del flatten_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [8, 6885] + + # pd_op.reshape: (8x6885xi32) <- (55080xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, full_int_array_0) + del full_int_array_0, gather_0 + + # pd_op.full: (xf32) <- () + full_2 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (8x6885xb) <- (8x6885xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(data_4, full_2) + del data_4, full_2 + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("4"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (8x6885xi32) <- (8x6885xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_3, paddle.int32, paddle.framework._current_expected_place() + ) + del full_3 + + # pd_op.where: (8x6885xi32) <- (8x6885xb, 8x6885xi32, 8x6885xi32) + where_0 = paddle._C_ops.where(greater_than_0, reshape_1, full_like_0) + del full_like_0, greater_than_0, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [-1, 4] + + # pd_op.reshape: (-1x4xf32) <- (8x-1x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_5, full_int_array_1) + del data_5, full_int_array_1 + + # pd_op.gather: (55080x4xf32) <- (-1x4xf32, 55080xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_1) + del flatten_1, full_1, reshape_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_2 = [8, 6885, 4] + + # pd_op.reshape: (8x6885x4xf32) <- (55080x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, full_int_array_2) + del full_int_array_2, gather_1 + + # pd_op.full: (1xi32) <- () + full_4 = paddle._C_ops.full( + [1], float("5"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (8x6885x5xf32) <- (8x6885xi32, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + where_0 % paddle.cast(full_4, where_0.dtype), full_4 + ) + del full_4 + + # pd_op.full: (4xi64) <- () + full_5 = paddle._C_ops.full( + [4], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (4xi64) <- (4xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_5, + [4], + paddle.int64, + [float("0"), float("1"), float("2"), float("3")], + paddle.framework._current_expected_place(), + ) + del full_5 + + # pd_op.index_select: (8x6885x4xf32) <- (8x6885x5xf32, 4xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_0, assign_value__0, -1) + del assign_value__0, one_hot_0 + + # pd_op.multiply: (8x-1x6885xf32) <- (8x-1x6885xf32, 8x-1x6885xf32) + multiply_2 = paddle._C_ops.multiply(data_6, data_1) + del data_6 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [-1] + + # pd_op.max: (8x-1x1xf32) <- (8x-1x6885xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_2, full_int_array_3, True) + + # pd_op.multiply: (8x-1x6885xf32) <- (8x-1x6885xf32, 8x-1x6885xf32) + multiply_3 = paddle._C_ops.multiply(data_7, data_1) + del data_1, data_7 + + # pd_op.max: (8x-1x1xf32) <- (8x-1x6885xf32, 1xi64) + max_1 = paddle._C_ops.max(multiply_3, full_int_array_3, True) + del multiply_3 + + # pd_op.full: (1xf32) <- () + full_6 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (8x-1x1xf32) <- (8x-1x1xf32, 1xf32) + scale_0 = paddle._C_ops.scale(max_0, full_6, float("1e-09"), True) + del full_6, max_0 + + # pd_op.divide: (8x-1x6885xf32) <- (8x-1x6885xf32, 8x-1x1xf32) + divide_0 = paddle._C_ops.divide(multiply_2, scale_0) + del multiply_2, scale_0 + + # pd_op.multiply: (8x-1x6885xf32) <- (8x-1x6885xf32, 8x-1x1xf32) + multiply_4 = paddle._C_ops.multiply(divide_0, max_1) + del divide_0, max_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [-2] + + # pd_op.max: (8x6885xf32) <- (8x-1x6885xf32, 1xi64) + max_2 = paddle._C_ops.max(multiply_4, full_int_array_4, False) + del full_int_array_4, multiply_4 + + # pd_op.unsqueeze: (8x6885x1xf32) <- (8x6885xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(max_2, full_int_array_3) + del full_int_array_3, max_2 + + # pd_op.multiply: (8x6885x4xf32) <- (8x6885x4xf32, 8x6885x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_0) + del index_select_0, unsqueeze_0, where_0 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PicoDet-L/subgraph_16/weight_meta.py b/paddle_samples/PaddleX/PicoDet-L/subgraph_16/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-L/subgraph_16/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PicoDet-L/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/PicoDet-L/subgraph_2/graph_hash.txt new file mode 100644 index 000000000..dbf341ae3 --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-L/subgraph_2/graph_hash.txt @@ -0,0 +1 @@ +4d9c4fdb6f89872cff5a1d7b782191d3432f8c3de1e21cadd795d8358285930d \ No newline at end of file diff --git a/paddle_samples/PaddleX/PicoDet-L/subgraph_2/graph_net.json b/paddle_samples/PaddleX/PicoDet-L/subgraph_2/graph_net.json new file mode 100644 index 000000000..0289448c6 --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-L/subgraph_2/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PicoDet-L", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PicoDet-L/subgraph_2/input_meta.py b/paddle_samples/PaddleX/PicoDet-L/subgraph_2/input_meta.py new file mode 100644 index 000000000..c9b6be879 --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-L/subgraph_2/input_meta.py @@ -0,0 +1,60 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [3] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [] + dtype = "int64" + data = [8500] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [8, 3, 8500] + dtype = "float32" + max_val = float("1.0") + mean = float("0.000465686") + std = float("0.0215747") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [8, 3, 1] + dtype = "int32" + data = [0, 0, 0, 3, 0, 0, 0, 1, 2, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [8, 8500] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00139706") + std = float("0.0373511") + data = None + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [8, 3, 4] + dtype = "float32" + max_val = float("633.934") + mean = float("164.638") + std = float("196.882") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [8, 8500, 4] + dtype = "float32" + min_val = float("-223.325") + max_val = float("830.335") + mean = float("319.957") + std = float("190.395") + data = None diff --git a/paddle_samples/PaddleX/PicoDet-L/subgraph_2/model.py b/paddle_samples/PaddleX/PicoDet-L/subgraph_2/model.py new file mode 100644 index 000000000..77efae021 --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-L/subgraph_2/model.py @@ -0,0 +1,310 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): + # pd_op.full: (1xi64) <- () + full_0 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (8x-1xi64) <- (8x-1x-1xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_2, full_0, False, False, paddle.int64) + del full_0 + + # pd_op.full: (1xf64) <- () + full_1 = paddle._C_ops.full( + [1], float("0"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_2 = paddle._C_ops.full( + [1], float("8"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (8xi32) <- (1xf64, 1xf64, 1xf64) + arange_0 = paddle.arange(full_1, full_2, full_3, dtype="int32") + del full_1, full_2, full_3 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [-1] + + # pd_op.unsqueeze: (8x1xi32) <- (8xi32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(arange_0, full_int_array_0) + del arange_0 + + # pd_op.cast: (xi32) <- (xi64) + cast_0 = paddle._C_ops.cast(data_0, paddle.int32) + del data_0 + + # pd_op.multiply: (8x1xi32) <- (8x1xi32, xi32) + multiply_1 = paddle._C_ops.multiply(unsqueeze_0, cast_0) + del cast_0, unsqueeze_0 + + # pd_op.cast: (8x1xi64) <- (8x1xi32) + cast_1 = paddle._C_ops.cast(multiply_1, paddle.int64) + del multiply_1 + + # pd_op.add: (8x-1xi64) <- (8x-1xi64, 8x1xi64) + add_0 = paddle._C_ops.add(argmax_0, cast_1) + del argmax_0, cast_1 + + # pd_op.flatten: (-1xi32) <- (8x-1x1xi32) + flatten_0 = paddle._C_ops.flatten(data_3, 0, 2) + del data_3 + + # pd_op.flatten: (-1xi64) <- (8x-1xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_4 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (-1xi32) <- (-1xi32, -1xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_4) + del flatten_0 + + # pd_op.full: (xi64) <- () + full_5 = paddle._C_ops.full( + [], float("8"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_0 = [full_5, data_1] + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.reshape: (8x-1xi32) <- (-1xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, stack_0) + del gather_0, stack_0 + + # pd_op.full: (xf32) <- () + full_6 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (8x-1xb) <- (8x-1xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(data_4, full_6) + del data_4, full_6 + + # pd_op.full: (1xf32) <- () + full_7 = paddle._C_ops.full( + [1], float("4"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (8x-1xi32) <- (8x-1xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_7, paddle.int32, paddle.framework._current_expected_place() + ) + del full_7 + + # pd_op.where: (8x-1xi32) <- (8x-1xb, 8x-1xi32, 8x-1xi32) + where_0 = paddle._C_ops.where(greater_than_0, reshape_1, full_like_0) + del full_like_0, greater_than_0, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [-1, 4] + + # pd_op.reshape: (-1x4xf32) <- (8x-1x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_5, full_int_array_1) + del full_int_array_1 + + # pd_op.gather: (-1x4xf32) <- (-1x4xf32, -1xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_4) + del flatten_1, full_4, reshape_2 + + # pd_op.full: (xi64) <- () + full_8 = paddle._C_ops.full( + [], float("4"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_1 = [full_5, data_1, full_8] + del data_1, full_5, full_8 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_1 = paddle._C_ops.stack(combine_1, 0) + del combine_1 + + # pd_op.reshape: (8x-1x4xf32) <- (-1x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, stack_1) + del gather_1, stack_1 + + # pd_op.full: (1xi32) <- () + full_9 = paddle._C_ops.full( + [1], float("5"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (8x-1x5xf32) <- (8x-1xi32, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + where_0 % paddle.cast(full_9, where_0.dtype), full_9 + ) + del full_9 + + # pd_op.full: (4xi64) <- () + full_10 = paddle._C_ops.full( + [4], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (4xi64) <- (4xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_10, + [4], + paddle.int64, + [float("0"), float("1"), float("2"), float("3")], + paddle.framework._current_expected_place(), + ) + del full_10 + + # pd_op.index_select: (8x-1x4xf32) <- (8x-1x5xf32, 4xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_0, assign_value__0, -1) + del assign_value__0, one_hot_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [2] + + # pd_op.unsqueeze: (8x-1x1x4xf32) <- (8x-1x4xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(data_5, full_int_array_2) + del data_5 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [1] + + # pd_op.unsqueeze: (8x1x-1x4xf32) <- (8x-1x4xf32, 1xi64) + unsqueeze_2 = paddle._C_ops.unsqueeze(data_6, full_int_array_3) + del data_6, full_int_array_3 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [0] + + # pd_op.slice: (8x-1x1x2xf32) <- (8x-1x1x4xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + unsqueeze_1, [3], full_int_array_4, full_int_array_2, [1], [] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_5 = [2147483647] + + # pd_op.slice: (8x-1x1x2xf32) <- (8x-1x1x4xf32, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + unsqueeze_1, [3], full_int_array_2, full_int_array_5, [1], [] + ) + del unsqueeze_1 + + # pd_op.slice: (8x1x-1x2xf32) <- (8x1x-1x4xf32, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + unsqueeze_2, [3], full_int_array_4, full_int_array_2, [1], [] + ) + del full_int_array_4 + + # pd_op.slice: (8x1x-1x2xf32) <- (8x1x-1x4xf32, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + unsqueeze_2, [3], full_int_array_2, full_int_array_5, [1], [] + ) + del full_int_array_2, full_int_array_5, unsqueeze_2 + + # pd_op.maximum: (8x-1x-1x2xf32) <- (8x-1x1x2xf32, 8x1x-1x2xf32) + maximum_0 = paddle._C_ops.maximum(slice_0, slice_2) + + # pd_op.minimum: (8x-1x-1x2xf32) <- (8x-1x1x2xf32, 8x1x-1x2xf32) + minimum_0 = paddle._C_ops.minimum(slice_1, slice_3) + + # pd_op.subtract: (8x-1x-1x2xf32) <- (8x-1x-1x2xf32, 8x-1x-1x2xf32) + subtract_0 = paddle._C_ops.subtract(minimum_0, maximum_0) + del maximum_0, minimum_0 + + # pd_op.full: (1xf32) <- () + full_11 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf32) <- () + full_12 = paddle._C_ops.full( + [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.clip: (8x-1x-1x2xf32) <- (8x-1x-1x2xf32, 1xf32, 1xf32) + clip_0 = paddle._C_ops.clip(subtract_0, full_11, full_12) + del subtract_0 + + # pd_op.prod: (8x-1x-1xf32) <- (8x-1x-1x2xf32, 1xi64) + prod_0 = paddle._C_ops.prod(clip_0, full_int_array_0, False, False) + del clip_0 + + # pd_op.subtract: (8x-1x1x2xf32) <- (8x-1x1x2xf32, 8x-1x1x2xf32) + subtract_1 = paddle._C_ops.subtract(slice_1, slice_0) + del slice_0, slice_1 + + # pd_op.clip: (8x-1x1x2xf32) <- (8x-1x1x2xf32, 1xf32, 1xf32) + clip_1 = paddle._C_ops.clip(subtract_1, full_11, full_12) + del subtract_1 + + # pd_op.prod: (8x-1x1xf32) <- (8x-1x1x2xf32, 1xi64) + prod_1 = paddle._C_ops.prod(clip_1, full_int_array_0, False, False) + del clip_1 + + # pd_op.subtract: (8x1x-1x2xf32) <- (8x1x-1x2xf32, 8x1x-1x2xf32) + subtract_2 = paddle._C_ops.subtract(slice_3, slice_2) + del slice_2, slice_3 + + # pd_op.clip: (8x1x-1x2xf32) <- (8x1x-1x2xf32, 1xf32, 1xf32) + clip_2 = paddle._C_ops.clip(subtract_2, full_11, full_12) + del full_11, full_12, subtract_2 + + # pd_op.prod: (8x1x-1xf32) <- (8x1x-1x2xf32, 1xi64) + prod_2 = paddle._C_ops.prod(clip_2, full_int_array_0, False, False) + del clip_2 + + # pd_op.add: (8x-1x-1xf32) <- (8x-1x1xf32, 8x1x-1xf32) + add_1 = paddle._C_ops.add(prod_1, prod_2) + del prod_1, prod_2 + + # pd_op.subtract: (8x-1x-1xf32) <- (8x-1x-1xf32, 8x-1x-1xf32) + subtract_3 = paddle._C_ops.subtract(add_1, prod_0) + del add_1 + + # pd_op.full: (1xf32) <- () + full_13 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (8x-1x-1xf32) <- (8x-1x-1xf32, 1xf32) + scale_0 = paddle._C_ops.scale(subtract_3, full_13, float("1e-09"), True) + del full_13, subtract_3 + + # pd_op.divide: (8x-1x-1xf32) <- (8x-1x-1xf32, 8x-1x-1xf32) + divide_0 = paddle._C_ops.divide(prod_0, scale_0) + del prod_0, scale_0 + + # pd_op.multiply: (8x-1x-1xf32) <- (8x-1x-1xf32, 8x-1x-1xf32) + multiply_2 = paddle._C_ops.multiply(divide_0, data_2) + del data_2, divide_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_6 = [-2] + + # pd_op.max: (8x-1xf32) <- (8x-1x-1xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_2, full_int_array_6, False) + del full_int_array_6, multiply_2 + + # pd_op.unsqueeze: (8x-1x1xf32) <- (8x-1xf32, 1xi64) + unsqueeze_3 = paddle._C_ops.unsqueeze(max_0, full_int_array_0) + del full_int_array_0, max_0 + + # pd_op.multiply: (8x-1x4xf32) <- (8x-1x4xf32, 8x-1x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_3) + del index_select_0, unsqueeze_3, where_0 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PicoDet-L/subgraph_2/weight_meta.py b/paddle_samples/PaddleX/PicoDet-L/subgraph_2/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-L/subgraph_2/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PicoDet-L/subgraph_3/graph_hash.txt b/paddle_samples/PaddleX/PicoDet-L/subgraph_3/graph_hash.txt new file mode 100644 index 000000000..517d0e686 --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-L/subgraph_3/graph_hash.txt @@ -0,0 +1 @@ +4656273f72ddbd5493ac9d477fd5328c8560cca46d45edaba54d71ad8ddcd1ac \ No newline at end of file diff --git a/paddle_samples/PaddleX/PicoDet-L/subgraph_3/graph_net.json b/paddle_samples/PaddleX/PicoDet-L/subgraph_3/graph_net.json new file mode 100644 index 000000000..0289448c6 --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-L/subgraph_3/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PicoDet-L", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PicoDet-L/subgraph_3/input_meta.py b/paddle_samples/PaddleX/PicoDet-L/subgraph_3/input_meta.py new file mode 100644 index 000000000..5a20fd12d --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-L/subgraph_3/input_meta.py @@ -0,0 +1,106 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [1] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [] + dtype = "int64" + data = [10285] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [8, 1, 10285] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00126398") + std = float("0.03553") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [8, 1] + dtype = "int32" + data = [0, 1, 2, 3, 4, 5, 6, 7] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [8, 1, 1] + dtype = "int32" + data = [0, 0, 0, 0, 0, 0, 0, 3] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [8, 10285] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00126398") + std = float("0.03553") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [8, 1, 4] + dtype = "float32" + data = [ + 166.595, + 0.0, + 623.389, + 284.952, + 372.895, + 37.6471, + 636.493, + 342.588, + 158.282, + 112.64, + 396.886, + 340.267, + 251.093, + 265.76, + 328.533, + 318.56, + 413.013, + 271.04, + 478.72, + 318.56, + 513.239, + 272.647, + 704.0, + 529.017, + 44.763, + 386.623, + 240.092, + 652.066, + 195.896, + 244.364, + 377.507, + 704.0, + ] + + +class Program_weight_tensor_data_7: + name = "data_7" + shape = [8, 1, 10285] + dtype = "float32" + max_val = float("0.6122") + mean = float("0.000390712") + std = float("0.0102456") + data = None + + +class Program_weight_tensor_data_8: + name = "data_8" + shape = [8, 1, 10285] + dtype = "float32" + max_val = float("0.986067") + mean = float("0.0161051") + std = float("0.0780758") + data = None diff --git a/paddle_samples/PaddleX/PicoDet-L/subgraph_3/model.py b/paddle_samples/PaddleX/PicoDet-L/subgraph_3/model.py new file mode 100644 index 000000000..d8e8f9012 --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-L/subgraph_3/model.py @@ -0,0 +1,201 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, data_0, data_1, data_2, data_3, data_4, data_5, data_6, data_7, data_8 + ): + # pd_op.full: (1xi64) <- () + full_0 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (8x-1xi64) <- (8x-1x-1xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_2, full_0, False, False, paddle.int64) + del full_0 + + # pd_op.cast: (xi32) <- (xi64) + cast_0 = paddle._C_ops.cast(data_0, paddle.int32) + del data_0 + + # pd_op.multiply: (8x1xi32) <- (8x1xi32, xi32) + multiply_1 = paddle._C_ops.multiply(data_3, cast_0) + del cast_0, data_3 + + # pd_op.cast: (8x1xi64) <- (8x1xi32) + cast_1 = paddle._C_ops.cast(multiply_1, paddle.int64) + del multiply_1 + + # pd_op.add: (8x-1xi64) <- (8x-1xi64, 8x1xi64) + add_0 = paddle._C_ops.add(argmax_0, cast_1) + del argmax_0, cast_1 + + # pd_op.flatten: (-1xi32) <- (8x-1x1xi32) + flatten_0 = paddle._C_ops.flatten(data_4, 0, 2) + del data_4 + + # pd_op.flatten: (-1xi64) <- (8x-1xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_1 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (-1xi32) <- (-1xi32, -1xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_1) + del flatten_0 + + # pd_op.full: (xi64) <- () + full_2 = paddle._C_ops.full( + [], float("8"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_0 = [full_2, data_1] + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.reshape: (8x-1xi32) <- (-1xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, stack_0) + del gather_0, stack_0 + + # pd_op.full: (xf32) <- () + full_3 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (8x-1xb) <- (8x-1xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(data_5, full_3) + del data_5, full_3 + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("4"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (8x-1xi32) <- (8x-1xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_4, paddle.int32, paddle.framework._current_expected_place() + ) + del full_4 + + # pd_op.where: (8x-1xi32) <- (8x-1xb, 8x-1xi32, 8x-1xi32) + where_0 = paddle._C_ops.where(greater_than_0, reshape_1, full_like_0) + del full_like_0, greater_than_0, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [-1, 4] + + # pd_op.reshape: (-1x4xf32) <- (8x-1x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_6, full_int_array_0) + del data_6, full_int_array_0 + + # pd_op.gather: (-1x4xf32) <- (-1x4xf32, -1xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_1) + del flatten_1, full_1, reshape_2 + + # pd_op.full: (xi64) <- () + full_5 = paddle._C_ops.full( + [], float("4"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_1 = [full_2, data_1, full_5] + del data_1, full_2, full_5 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_1 = paddle._C_ops.stack(combine_1, 0) + del combine_1 + + # pd_op.reshape: (8x-1x4xf32) <- (-1x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, stack_1) + del gather_1, stack_1 + + # pd_op.full: (1xi32) <- () + full_6 = paddle._C_ops.full( + [1], float("5"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (8x-1x5xf32) <- (8x-1xi32, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + where_0 % paddle.cast(full_6, where_0.dtype), full_6 + ) + del full_6 + + # pd_op.full: (4xi64) <- () + full_7 = paddle._C_ops.full( + [4], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (4xi64) <- (4xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_7, + [4], + paddle.int64, + [float("0"), float("1"), float("2"), float("3")], + paddle.framework._current_expected_place(), + ) + del full_7 + + # pd_op.index_select: (8x-1x4xf32) <- (8x-1x5xf32, 4xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_0, assign_value__0, -1) + del assign_value__0, one_hot_0 + + # pd_op.multiply: (8x-1x-1xf32) <- (8x-1x-1xf32, 8x-1x-1xf32) + multiply_2 = paddle._C_ops.multiply(data_7, data_2) + del data_7 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [-1] + + # pd_op.max: (8x-1x1xf32) <- (8x-1x-1xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_2, full_int_array_1, True) + + # pd_op.multiply: (8x-1x-1xf32) <- (8x-1x-1xf32, 8x-1x-1xf32) + multiply_3 = paddle._C_ops.multiply(data_8, data_2) + del data_2, data_8 + + # pd_op.max: (8x-1x1xf32) <- (8x-1x-1xf32, 1xi64) + max_1 = paddle._C_ops.max(multiply_3, full_int_array_1, True) + del multiply_3 + + # pd_op.full: (1xf32) <- () + full_8 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (8x-1x1xf32) <- (8x-1x1xf32, 1xf32) + scale_0 = paddle._C_ops.scale(max_0, full_8, float("1e-09"), True) + del full_8, max_0 + + # pd_op.divide: (8x-1x-1xf32) <- (8x-1x-1xf32, 8x-1x1xf32) + divide_0 = paddle._C_ops.divide(multiply_2, scale_0) + del multiply_2, scale_0 + + # pd_op.multiply: (8x-1x-1xf32) <- (8x-1x-1xf32, 8x-1x1xf32) + multiply_4 = paddle._C_ops.multiply(divide_0, max_1) + del divide_0, max_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [-2] + + # pd_op.max: (8x-1xf32) <- (8x-1x-1xf32, 1xi64) + max_2 = paddle._C_ops.max(multiply_4, full_int_array_2, False) + del full_int_array_2, multiply_4 + + # pd_op.unsqueeze: (8x-1x1xf32) <- (8x-1xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(max_2, full_int_array_1) + del full_int_array_1, max_2 + + # pd_op.multiply: (8x-1x4xf32) <- (8x-1x4xf32, 8x-1x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_0) + del index_select_0, unsqueeze_0, where_0 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PicoDet-L/subgraph_3/weight_meta.py b/paddle_samples/PaddleX/PicoDet-L/subgraph_3/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-L/subgraph_3/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PicoDet-L/subgraph_5/graph_hash.txt b/paddle_samples/PaddleX/PicoDet-L/subgraph_5/graph_hash.txt new file mode 100644 index 000000000..b450fdb93 --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-L/subgraph_5/graph_hash.txt @@ -0,0 +1 @@ +5982df887dba459df47e75e8c046785e5c1800abe6a28a12e43209ca9aa7a5e3 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PicoDet-L/subgraph_5/graph_net.json b/paddle_samples/PaddleX/PicoDet-L/subgraph_5/graph_net.json new file mode 100644 index 000000000..0289448c6 --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-L/subgraph_5/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PicoDet-L", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PicoDet-L/subgraph_5/input_meta.py b/paddle_samples/PaddleX/PicoDet-L/subgraph_5/input_meta.py new file mode 100644 index 000000000..19d67c7a8 --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-L/subgraph_5/input_meta.py @@ -0,0 +1,76 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [8, 1, 6885] + dtype = "float32" + max_val = float("1.0") + mean = float("0.000962237") + std = float("0.031005") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [8, 1, 1] + dtype = "int32" + data = [0, 0, 0, 0, 0, 0, 0, 3] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [8, 6885] + dtype = "float32" + max_val = float("1.0") + mean = float("0.000962237") + std = float("0.031005") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [8, 1, 4] + dtype = "float32" + data = [ + 266.779, + 175.628, + 357.726, + 242.605, + 445.091, + 156.279, + 576.0, + 283.535, + 299.016, + 185.492, + 450.098, + 341.695, + 316.8, + 290.88, + 407.04, + 357.12, + 180.245, + 234.105, + 240.98, + 284.632, + 337.92, + 221.76, + 391.68, + 260.64, + 378.24, + 92.16, + 449.28, + 144.0, + 286.56, + 187.685, + 414.72, + 386.157, + ] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [8, 6885, 4] + dtype = "float32" + min_val = float("-239.923") + max_val = float("882.894") + mean = float("287.886") + std = float("172.171") + data = None diff --git a/paddle_samples/PaddleX/PicoDet-L/subgraph_5/model.py b/paddle_samples/PaddleX/PicoDet-L/subgraph_5/model.py new file mode 100644 index 000000000..d66ee3660 --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-L/subgraph_5/model.py @@ -0,0 +1,287 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4): + # pd_op.full: (1xi64) <- () + full_0 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (8x6885xi64) <- (8x1x6885xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_0, full_0, False, False, paddle.int64) + del full_0 + + # pd_op.full: (1xf64) <- () + full_1 = paddle._C_ops.full( + [1], float("0"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_2 = paddle._C_ops.full( + [1], float("8"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (8xi32) <- (1xf64, 1xf64, 1xf64) + arange_0 = paddle.arange(full_1, full_2, full_3, dtype="int32") + del full_1, full_2, full_3 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [-1] + + # pd_op.unsqueeze: (8x1xi32) <- (8xi32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(arange_0, full_int_array_0) + del arange_0 + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (8x1xi32) <- (8x1xi32, 1xf32) + scale_0 = paddle._C_ops.scale(unsqueeze_0, full_4, float("0"), True) + del unsqueeze_0 + + # pd_op.cast: (8x1xi64) <- (8x1xi32) + cast_0 = paddle._C_ops.cast(scale_0, paddle.int64) + del scale_0 + + # pd_op.add: (8x6885xi64) <- (8x6885xi64, 8x1xi64) + add_0 = paddle._C_ops.add(argmax_0, cast_0) + del argmax_0, cast_0 + + # pd_op.flatten: (8xi32) <- (8x1x1xi32) + flatten_0 = paddle._C_ops.flatten(data_1, 0, 2) + del data_1 + + # pd_op.flatten: (55080xi64) <- (8x6885xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_5 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (55080xi32) <- (8xi32, 55080xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_5) + del flatten_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [8, 6885] + + # pd_op.reshape: (8x6885xi32) <- (55080xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, full_int_array_1) + del full_int_array_1, gather_0 + + # pd_op.full: (xf32) <- () + full_6 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (8x6885xb) <- (8x6885xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(data_2, full_6) + del data_2, full_6 + + # pd_op.full: (1xf32) <- () + full_7 = paddle._C_ops.full( + [1], float("4"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (8x6885xi32) <- (8x6885xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_7, paddle.int32, paddle.framework._current_expected_place() + ) + del full_7 + + # pd_op.where: (8x6885xi32) <- (8x6885xb, 8x6885xi32, 8x6885xi32) + where_0 = paddle._C_ops.where(greater_than_0, reshape_1, full_like_0) + del full_like_0, greater_than_0, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_2 = [-1, 4] + + # pd_op.reshape: (8x4xf32) <- (8x1x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_3, full_int_array_2) + del full_int_array_2 + + # pd_op.gather: (55080x4xf32) <- (8x4xf32, 55080xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_5) + del flatten_1, full_5, reshape_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_3 = [8, 6885, 4] + + # pd_op.reshape: (8x6885x4xf32) <- (55080x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, full_int_array_3) + del full_int_array_3, gather_1 + + # pd_op.full: (1xi32) <- () + full_8 = paddle._C_ops.full( + [1], float("5"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (8x6885x5xf32) <- (8x6885xi32, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + where_0 % paddle.cast(full_8, where_0.dtype), full_8 + ) + del full_8 + + # pd_op.full: (4xi64) <- () + full_9 = paddle._C_ops.full( + [4], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (4xi64) <- (4xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_9, + [4], + paddle.int64, + [float("0"), float("1"), float("2"), float("3")], + paddle.framework._current_expected_place(), + ) + del full_9 + + # pd_op.index_select: (8x6885x4xf32) <- (8x6885x5xf32, 4xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_0, assign_value__0, -1) + del assign_value__0, one_hot_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [2] + + # pd_op.unsqueeze: (8x1x1x4xf32) <- (8x1x4xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(data_3, full_int_array_4) + del data_3 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_5 = [1] + + # pd_op.unsqueeze: (8x1x6885x4xf32) <- (8x6885x4xf32, 1xi64) + unsqueeze_2 = paddle._C_ops.unsqueeze(data_4, full_int_array_5) + del data_4, full_int_array_5 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_6 = [0] + + # pd_op.slice: (8x1x1x2xf32) <- (8x1x1x4xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + unsqueeze_1, [3], full_int_array_6, full_int_array_4, [1], [] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_7 = [2147483647] + + # pd_op.slice: (8x1x1x2xf32) <- (8x1x1x4xf32, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + unsqueeze_1, [3], full_int_array_4, full_int_array_7, [1], [] + ) + del unsqueeze_1 + + # pd_op.slice: (8x1x6885x2xf32) <- (8x1x6885x4xf32, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + unsqueeze_2, [3], full_int_array_6, full_int_array_4, [1], [] + ) + del full_int_array_6 + + # pd_op.slice: (8x1x6885x2xf32) <- (8x1x6885x4xf32, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + unsqueeze_2, [3], full_int_array_4, full_int_array_7, [1], [] + ) + del full_int_array_4, full_int_array_7, unsqueeze_2 + + # pd_op.maximum: (8x1x6885x2xf32) <- (8x1x1x2xf32, 8x1x6885x2xf32) + maximum_0 = paddle._C_ops.maximum(slice_0, slice_2) + + # pd_op.minimum: (8x1x6885x2xf32) <- (8x1x1x2xf32, 8x1x6885x2xf32) + minimum_0 = paddle._C_ops.minimum(slice_1, slice_3) + + # pd_op.subtract: (8x1x6885x2xf32) <- (8x1x6885x2xf32, 8x1x6885x2xf32) + subtract_0 = paddle._C_ops.subtract(minimum_0, maximum_0) + del maximum_0, minimum_0 + + # pd_op.full: (1xf32) <- () + full_10 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf32) <- () + full_11 = paddle._C_ops.full( + [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.clip: (8x1x6885x2xf32) <- (8x1x6885x2xf32, 1xf32, 1xf32) + clip_0 = paddle._C_ops.clip(subtract_0, full_10, full_11) + del subtract_0 + + # pd_op.prod: (8x1x6885xf32) <- (8x1x6885x2xf32, 1xi64) + prod_0 = paddle._C_ops.prod(clip_0, full_int_array_0, False, False) + del clip_0 + + # pd_op.subtract: (8x1x1x2xf32) <- (8x1x1x2xf32, 8x1x1x2xf32) + subtract_1 = paddle._C_ops.subtract(slice_1, slice_0) + del slice_0, slice_1 + + # pd_op.clip: (8x1x1x2xf32) <- (8x1x1x2xf32, 1xf32, 1xf32) + clip_1 = paddle._C_ops.clip(subtract_1, full_10, full_11) + del subtract_1 + + # pd_op.prod: (8x1x1xf32) <- (8x1x1x2xf32, 1xi64) + prod_1 = paddle._C_ops.prod(clip_1, full_int_array_0, False, False) + del clip_1 + + # pd_op.subtract: (8x1x6885x2xf32) <- (8x1x6885x2xf32, 8x1x6885x2xf32) + subtract_2 = paddle._C_ops.subtract(slice_3, slice_2) + del slice_2, slice_3 + + # pd_op.clip: (8x1x6885x2xf32) <- (8x1x6885x2xf32, 1xf32, 1xf32) + clip_2 = paddle._C_ops.clip(subtract_2, full_10, full_11) + del full_10, full_11, subtract_2 + + # pd_op.prod: (8x1x6885xf32) <- (8x1x6885x2xf32, 1xi64) + prod_2 = paddle._C_ops.prod(clip_2, full_int_array_0, False, False) + del clip_2 + + # pd_op.add: (8x1x6885xf32) <- (8x1x1xf32, 8x1x6885xf32) + add_1 = paddle._C_ops.add(prod_1, prod_2) + del prod_1, prod_2 + + # pd_op.subtract: (8x1x6885xf32) <- (8x1x6885xf32, 8x1x6885xf32) + subtract_3 = paddle._C_ops.subtract(add_1, prod_0) + del add_1 + + # pd_op.scale: (8x1x6885xf32) <- (8x1x6885xf32, 1xf32) + scale_1 = paddle._C_ops.scale(subtract_3, full_4, float("1e-09"), True) + del full_4, subtract_3 + + # pd_op.divide: (8x1x6885xf32) <- (8x1x6885xf32, 8x1x6885xf32) + divide_0 = paddle._C_ops.divide(prod_0, scale_1) + del prod_0, scale_1 + + # pd_op.multiply: (8x1x6885xf32) <- (8x1x6885xf32, 8x1x6885xf32) + multiply_1 = paddle._C_ops.multiply(divide_0, data_0) + del data_0, divide_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_8 = [-2] + + # pd_op.max: (8x6885xf32) <- (8x1x6885xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_1, full_int_array_8, False) + del full_int_array_8, multiply_1 + + # pd_op.unsqueeze: (8x6885x1xf32) <- (8x6885xf32, 1xi64) + unsqueeze_3 = paddle._C_ops.unsqueeze(max_0, full_int_array_0) + del full_int_array_0, max_0 + + # pd_op.multiply: (8x6885x4xf32) <- (8x6885x4xf32, 8x6885x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_3) + del index_select_0, unsqueeze_3, where_0 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PicoDet-L/subgraph_5/weight_meta.py b/paddle_samples/PaddleX/PicoDet-L/subgraph_5/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-L/subgraph_5/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PicoDet-L_layout_17cls/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/PicoDet-L_layout_17cls/subgraph_2/graph_hash.txt new file mode 100644 index 000000000..1506f4669 --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-L_layout_17cls/subgraph_2/graph_hash.txt @@ -0,0 +1 @@ +6800a71f53fed7dd455b89d3b81bc6e4d8e28ef70742aab574ddf0a3a0383698 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PicoDet-L_layout_17cls/subgraph_2/graph_net.json b/paddle_samples/PaddleX/PicoDet-L_layout_17cls/subgraph_2/graph_net.json new file mode 100644 index 000000000..58ad43584 --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-L_layout_17cls/subgraph_2/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PicoDet-L_layout_17cls", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PicoDet-L_layout_17cls/subgraph_2/input_meta.py b/paddle_samples/PaddleX/PicoDet-L_layout_17cls/subgraph_2/input_meta.py new file mode 100644 index 000000000..2f8a09379 --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-L_layout_17cls/subgraph_2/input_meta.py @@ -0,0 +1,60 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [2, 2, 6885] + dtype = "float32" + max_val = float("1.0") + mean = float("0.000980392") + std = float("0.0312959") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [2, 2, 1] + dtype = "int32" + data = [1, 3, 1, 0] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [2, 6885] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00196078") + std = float("0.0442373") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [2, 2, 4] + dtype = "float32" + data = [ + 11.9415, + 23.662, + 576.0, + 369.803, + 65.3268, + 381.296, + 576.0, + 562.479, + 142.432, + 0.0, + 576.0, + 576.0, + 0.0, + 0.0, + 0.0, + 0.0, + ] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [2, 6885, 4] + dtype = "float32" + min_val = float("-262.522") + max_val = float("851.804") + mean = float("288.022") + std = float("173.427") + data = None diff --git a/paddle_samples/PaddleX/PicoDet-L_layout_17cls/subgraph_2/model.py b/paddle_samples/PaddleX/PicoDet-L_layout_17cls/subgraph_2/model.py new file mode 100644 index 000000000..77261c5a4 --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-L_layout_17cls/subgraph_2/model.py @@ -0,0 +1,304 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4): + # pd_op.full: (1xi64) <- () + full_0 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (2x6885xi64) <- (2x2x6885xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_0, full_0, False, False, paddle.int64) + del full_0 + + # pd_op.full: (1xf64) <- () + full_1 = paddle._C_ops.full( + [1], float("0"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_2 = paddle._C_ops.full( + [1], float("2"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (2xi32) <- (1xf64, 1xf64, 1xf64) + arange_0 = paddle.arange(full_1, full_2, full_3, dtype="int32") + del full_1, full_2, full_3 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [-1] + + # pd_op.unsqueeze: (2x1xi32) <- (2xi32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(arange_0, full_int_array_0) + del arange_0 + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("2"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x1xi32) <- (2x1xi32, 1xf32) + scale_0 = paddle._C_ops.scale(unsqueeze_0, full_4, float("0"), True) + del full_4, unsqueeze_0 + + # pd_op.cast: (2x1xi64) <- (2x1xi32) + cast_0 = paddle._C_ops.cast(scale_0, paddle.int64) + del scale_0 + + # pd_op.add: (2x6885xi64) <- (2x6885xi64, 2x1xi64) + add_0 = paddle._C_ops.add(argmax_0, cast_0) + del argmax_0, cast_0 + + # pd_op.flatten: (4xi32) <- (2x2x1xi32) + flatten_0 = paddle._C_ops.flatten(data_1, 0, 2) + del data_1 + + # pd_op.flatten: (13770xi64) <- (2x6885xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_5 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (13770xi32) <- (4xi32, 13770xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_5) + del flatten_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [2, 6885] + + # pd_op.reshape: (2x6885xi32) <- (13770xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, full_int_array_1) + del full_int_array_1, gather_0 + + # pd_op.full: (xf32) <- () + full_6 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (2x6885xb) <- (2x6885xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(data_2, full_6) + del data_2, full_6 + + # pd_op.full: (1xf32) <- () + full_7 = paddle._C_ops.full( + [1], float("11"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (2x6885xi32) <- (2x6885xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_7, paddle.int32, paddle.framework._current_expected_place() + ) + del full_7 + + # pd_op.where: (2x6885xi32) <- (2x6885xb, 2x6885xi32, 2x6885xi32) + where_0 = paddle._C_ops.where(greater_than_0, reshape_1, full_like_0) + del full_like_0, greater_than_0, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_2 = [-1, 4] + + # pd_op.reshape: (4x4xf32) <- (2x2x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_3, full_int_array_2) + del full_int_array_2 + + # pd_op.gather: (13770x4xf32) <- (4x4xf32, 13770xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_5) + del flatten_1, full_5, reshape_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_3 = [2, 6885, 4] + + # pd_op.reshape: (2x6885x4xf32) <- (13770x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, full_int_array_3) + del full_int_array_3, gather_1 + + # pd_op.full: (1xi32) <- () + full_8 = paddle._C_ops.full( + [1], float("12"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (2x6885x12xf32) <- (2x6885xi32, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + where_0 % paddle.cast(full_8, where_0.dtype), full_8 + ) + del full_8 + + # pd_op.full: (11xi64) <- () + full_9 = paddle._C_ops.full( + [11], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (11xi64) <- (11xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_9, + [11], + paddle.int64, + [ + float("0"), + float("1"), + float("2"), + float("3"), + float("4"), + float("5"), + float("6"), + float("7"), + float("8"), + float("9"), + float("10"), + ], + paddle.framework._current_expected_place(), + ) + del full_9 + + # pd_op.index_select: (2x6885x11xf32) <- (2x6885x12xf32, 11xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_0, assign_value__0, -1) + del assign_value__0, one_hot_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [2] + + # pd_op.unsqueeze: (2x2x1x4xf32) <- (2x2x4xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(data_3, full_int_array_4) + del data_3 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_5 = [1] + + # pd_op.unsqueeze: (2x1x6885x4xf32) <- (2x6885x4xf32, 1xi64) + unsqueeze_2 = paddle._C_ops.unsqueeze(data_4, full_int_array_5) + del data_4, full_int_array_5 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_6 = [0] + + # pd_op.slice: (2x2x1x2xf32) <- (2x2x1x4xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + unsqueeze_1, [3], full_int_array_6, full_int_array_4, [1], [] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_7 = [2147483647] + + # pd_op.slice: (2x2x1x2xf32) <- (2x2x1x4xf32, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + unsqueeze_1, [3], full_int_array_4, full_int_array_7, [1], [] + ) + del unsqueeze_1 + + # pd_op.slice: (2x1x6885x2xf32) <- (2x1x6885x4xf32, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + unsqueeze_2, [3], full_int_array_6, full_int_array_4, [1], [] + ) + del full_int_array_6 + + # pd_op.slice: (2x1x6885x2xf32) <- (2x1x6885x4xf32, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + unsqueeze_2, [3], full_int_array_4, full_int_array_7, [1], [] + ) + del full_int_array_4, full_int_array_7, unsqueeze_2 + + # pd_op.maximum: (2x2x6885x2xf32) <- (2x2x1x2xf32, 2x1x6885x2xf32) + maximum_0 = paddle._C_ops.maximum(slice_0, slice_2) + + # pd_op.minimum: (2x2x6885x2xf32) <- (2x2x1x2xf32, 2x1x6885x2xf32) + minimum_0 = paddle._C_ops.minimum(slice_1, slice_3) + + # pd_op.subtract: (2x2x6885x2xf32) <- (2x2x6885x2xf32, 2x2x6885x2xf32) + subtract_0 = paddle._C_ops.subtract(minimum_0, maximum_0) + del maximum_0, minimum_0 + + # pd_op.full: (1xf32) <- () + full_10 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf32) <- () + full_11 = paddle._C_ops.full( + [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.clip: (2x2x6885x2xf32) <- (2x2x6885x2xf32, 1xf32, 1xf32) + clip_0 = paddle._C_ops.clip(subtract_0, full_10, full_11) + del subtract_0 + + # pd_op.prod: (2x2x6885xf32) <- (2x2x6885x2xf32, 1xi64) + prod_0 = paddle._C_ops.prod(clip_0, full_int_array_0, False, False) + del clip_0 + + # pd_op.subtract: (2x2x1x2xf32) <- (2x2x1x2xf32, 2x2x1x2xf32) + subtract_1 = paddle._C_ops.subtract(slice_1, slice_0) + del slice_0, slice_1 + + # pd_op.clip: (2x2x1x2xf32) <- (2x2x1x2xf32, 1xf32, 1xf32) + clip_1 = paddle._C_ops.clip(subtract_1, full_10, full_11) + del subtract_1 + + # pd_op.prod: (2x2x1xf32) <- (2x2x1x2xf32, 1xi64) + prod_1 = paddle._C_ops.prod(clip_1, full_int_array_0, False, False) + del clip_1 + + # pd_op.subtract: (2x1x6885x2xf32) <- (2x1x6885x2xf32, 2x1x6885x2xf32) + subtract_2 = paddle._C_ops.subtract(slice_3, slice_2) + del slice_2, slice_3 + + # pd_op.clip: (2x1x6885x2xf32) <- (2x1x6885x2xf32, 1xf32, 1xf32) + clip_2 = paddle._C_ops.clip(subtract_2, full_10, full_11) + del full_10, full_11, subtract_2 + + # pd_op.prod: (2x1x6885xf32) <- (2x1x6885x2xf32, 1xi64) + prod_2 = paddle._C_ops.prod(clip_2, full_int_array_0, False, False) + del clip_2 + + # pd_op.add: (2x2x6885xf32) <- (2x2x1xf32, 2x1x6885xf32) + add_1 = paddle._C_ops.add(prod_1, prod_2) + del prod_1, prod_2 + + # pd_op.subtract: (2x2x6885xf32) <- (2x2x6885xf32, 2x2x6885xf32) + subtract_3 = paddle._C_ops.subtract(add_1, prod_0) + del add_1 + + # pd_op.full: (1xf32) <- () + full_12 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x2x6885xf32) <- (2x2x6885xf32, 1xf32) + scale_1 = paddle._C_ops.scale(subtract_3, full_12, float("1e-09"), True) + del full_12, subtract_3 + + # pd_op.divide: (2x2x6885xf32) <- (2x2x6885xf32, 2x2x6885xf32) + divide_0 = paddle._C_ops.divide(prod_0, scale_1) + del prod_0, scale_1 + + # pd_op.multiply: (2x2x6885xf32) <- (2x2x6885xf32, 2x2x6885xf32) + multiply_1 = paddle._C_ops.multiply(divide_0, data_0) + del data_0, divide_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_8 = [-2] + + # pd_op.max: (2x6885xf32) <- (2x2x6885xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_1, full_int_array_8, False) + del full_int_array_8, multiply_1 + + # pd_op.unsqueeze: (2x6885x1xf32) <- (2x6885xf32, 1xi64) + unsqueeze_3 = paddle._C_ops.unsqueeze(max_0, full_int_array_0) + del full_int_array_0, max_0 + + # pd_op.multiply: (2x6885x11xf32) <- (2x6885x11xf32, 2x6885x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_3) + del index_select_0, unsqueeze_3, where_0 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PicoDet-L_layout_17cls/subgraph_2/weight_meta.py b/paddle_samples/PaddleX/PicoDet-L_layout_17cls/subgraph_2/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-L_layout_17cls/subgraph_2/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PicoDet-L_layout_17cls/subgraph_3/graph_hash.txt b/paddle_samples/PaddleX/PicoDet-L_layout_17cls/subgraph_3/graph_hash.txt new file mode 100644 index 000000000..3b6e54601 --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-L_layout_17cls/subgraph_3/graph_hash.txt @@ -0,0 +1 @@ +ee84750c594875033121f50e3dd7eee8a439e3dcc8ba23c93389435958b78604 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PicoDet-L_layout_17cls/subgraph_3/graph_net.json b/paddle_samples/PaddleX/PicoDet-L_layout_17cls/subgraph_3/graph_net.json new file mode 100644 index 000000000..58ad43584 --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-L_layout_17cls/subgraph_3/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PicoDet-L_layout_17cls", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PicoDet-L_layout_17cls/subgraph_3/input_meta.py b/paddle_samples/PaddleX/PicoDet-L_layout_17cls/subgraph_3/input_meta.py new file mode 100644 index 000000000..7fa497152 --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-L_layout_17cls/subgraph_3/input_meta.py @@ -0,0 +1,76 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [2, 2, 10285] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00126398") + std = float("0.03553") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [2, 1] + dtype = "int32" + data = [0, 1] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [2, 2, 1] + dtype = "int32" + data = [9, 9, 1, 3] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [2, 10285] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00252795") + std = float("0.0502152") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [2, 2, 4] + dtype = "float32" + data = [ + 354.973, + 0.0, + 640.973, + 704.0, + 42.8108, + 0.0, + 329.405, + 704.0, + 1.7733, + 0.0, + 704.0, + 248.143, + 37.2393, + 263.477, + 704.0, + 677.513, + ] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [2, 2, 10285] + dtype = "float32" + max_val = float("0.10948") + mean = float("2.71672e-05") + std = float("0.00109742") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [2, 2, 10285] + dtype = "float32" + max_val = float("0.889254") + mean = float("0.0103674") + std = float("0.0383997") + data = None diff --git a/paddle_samples/PaddleX/PicoDet-L_layout_17cls/subgraph_3/model.py b/paddle_samples/PaddleX/PicoDet-L_layout_17cls/subgraph_3/model.py new file mode 100644 index 000000000..17fb9782c --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-L_layout_17cls/subgraph_3/model.py @@ -0,0 +1,193 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): + # pd_op.full: (1xi64) <- () + full_0 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (2x10285xi64) <- (2x2x10285xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_0, full_0, False, False, paddle.int64) + del full_0 + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("2"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x1xi32) <- (2x1xi32, 1xf32) + scale_0 = paddle._C_ops.scale(data_1, full_1, float("0"), True) + del data_1, full_1 + + # pd_op.cast: (2x1xi64) <- (2x1xi32) + cast_0 = paddle._C_ops.cast(scale_0, paddle.int64) + del scale_0 + + # pd_op.add: (2x10285xi64) <- (2x10285xi64, 2x1xi64) + add_0 = paddle._C_ops.add(argmax_0, cast_0) + del argmax_0, cast_0 + + # pd_op.flatten: (4xi32) <- (2x2x1xi32) + flatten_0 = paddle._C_ops.flatten(data_2, 0, 2) + del data_2 + + # pd_op.flatten: (20570xi64) <- (2x10285xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_2 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (20570xi32) <- (4xi32, 20570xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_2) + del flatten_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [2, 10285] + + # pd_op.reshape: (2x10285xi32) <- (20570xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, full_int_array_0) + del full_int_array_0, gather_0 + + # pd_op.full: (xf32) <- () + full_3 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (2x10285xb) <- (2x10285xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(data_3, full_3) + del data_3, full_3 + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("11"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (2x10285xi32) <- (2x10285xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_4, paddle.int32, paddle.framework._current_expected_place() + ) + del full_4 + + # pd_op.where: (2x10285xi32) <- (2x10285xb, 2x10285xi32, 2x10285xi32) + where_0 = paddle._C_ops.where(greater_than_0, reshape_1, full_like_0) + del full_like_0, greater_than_0, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [-1, 4] + + # pd_op.reshape: (4x4xf32) <- (2x2x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_4, full_int_array_1) + del data_4, full_int_array_1 + + # pd_op.gather: (20570x4xf32) <- (4x4xf32, 20570xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_2) + del flatten_1, full_2, reshape_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_2 = [2, 10285, 4] + + # pd_op.reshape: (2x10285x4xf32) <- (20570x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, full_int_array_2) + del full_int_array_2, gather_1 + + # pd_op.full: (1xi32) <- () + full_5 = paddle._C_ops.full( + [1], float("12"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (2x10285x12xf32) <- (2x10285xi32, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + where_0 % paddle.cast(full_5, where_0.dtype), full_5 + ) + del full_5 + + # pd_op.full: (11xi64) <- () + full_6 = paddle._C_ops.full( + [11], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (11xi64) <- (11xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_6, + [11], + paddle.int64, + [ + float("0"), + float("1"), + float("2"), + float("3"), + float("4"), + float("5"), + float("6"), + float("7"), + float("8"), + float("9"), + float("10"), + ], + paddle.framework._current_expected_place(), + ) + del full_6 + + # pd_op.index_select: (2x10285x11xf32) <- (2x10285x12xf32, 11xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_0, assign_value__0, -1) + del assign_value__0, one_hot_0 + + # pd_op.multiply: (2x2x10285xf32) <- (2x2x10285xf32, 2x2x10285xf32) + multiply_1 = paddle._C_ops.multiply(data_5, data_0) + del data_5 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [-1] + + # pd_op.max: (2x2x1xf32) <- (2x2x10285xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_1, full_int_array_3, True) + + # pd_op.multiply: (2x2x10285xf32) <- (2x2x10285xf32, 2x2x10285xf32) + multiply_2 = paddle._C_ops.multiply(data_6, data_0) + del data_0, data_6 + + # pd_op.max: (2x2x1xf32) <- (2x2x10285xf32, 1xi64) + max_1 = paddle._C_ops.max(multiply_2, full_int_array_3, True) + del multiply_2 + + # pd_op.full: (1xf32) <- () + full_7 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x2x1xf32) <- (2x2x1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(max_0, full_7, float("1e-09"), True) + del full_7, max_0 + + # pd_op.divide: (2x2x10285xf32) <- (2x2x10285xf32, 2x2x1xf32) + divide_0 = paddle._C_ops.divide(multiply_1, scale_1) + del multiply_1, scale_1 + + # pd_op.multiply: (2x2x10285xf32) <- (2x2x10285xf32, 2x2x1xf32) + multiply_3 = paddle._C_ops.multiply(divide_0, max_1) + del divide_0, max_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [-2] + + # pd_op.max: (2x10285xf32) <- (2x2x10285xf32, 1xi64) + max_2 = paddle._C_ops.max(multiply_3, full_int_array_4, False) + del full_int_array_4, multiply_3 + + # pd_op.unsqueeze: (2x10285x1xf32) <- (2x10285xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(max_2, full_int_array_3) + del full_int_array_3, max_2 + + # pd_op.multiply: (2x10285x11xf32) <- (2x10285x11xf32, 2x10285x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_0) + del index_select_0, unsqueeze_0, where_0 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PicoDet-L_layout_17cls/subgraph_3/weight_meta.py b/paddle_samples/PaddleX/PicoDet-L_layout_17cls/subgraph_3/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-L_layout_17cls/subgraph_3/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PicoDet-L_layout_17cls/subgraph_4/graph_hash.txt b/paddle_samples/PaddleX/PicoDet-L_layout_17cls/subgraph_4/graph_hash.txt new file mode 100644 index 000000000..e79481ab1 --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-L_layout_17cls/subgraph_4/graph_hash.txt @@ -0,0 +1 @@ +44732eda5516c1dbdba1da64b1c8d19b3556c611027f3c536a4d9ef899ee4de4 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PicoDet-L_layout_17cls/subgraph_4/graph_net.json b/paddle_samples/PaddleX/PicoDet-L_layout_17cls/subgraph_4/graph_net.json new file mode 100644 index 000000000..58ad43584 --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-L_layout_17cls/subgraph_4/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PicoDet-L_layout_17cls", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PicoDet-L_layout_17cls/subgraph_4/input_meta.py b/paddle_samples/PaddleX/PicoDet-L_layout_17cls/subgraph_4/input_meta.py new file mode 100644 index 000000000..63c8dafd3 --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-L_layout_17cls/subgraph_4/input_meta.py @@ -0,0 +1,83 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [7681] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [2, 2, 7681] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00169249") + std = float("0.041105") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [2, 1] + dtype = "int32" + data = [0, 1] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [2, 2, 1] + dtype = "int32" + data = [2, 1, 9, 9] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [2, 7681] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00338498") + std = float("0.058082") + data = None + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [2, 2, 4] + dtype = "float32" + data = [ + 513.321, + 194.41, + 608.0, + 204.224, + 0.0, + 0.0, + 459.707, + 580.895, + 0.0, + 0.0, + 285.886, + 422.244, + 321.326, + 0.0, + 608.0, + 378.537, + ] + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [2, 2, 7681] + dtype = "float32" + max_val = float("0.377518") + mean = float("0.000103938") + std = float("0.00525657") + data = None + + +class Program_weight_tensor_data_7: + name = "data_7" + shape = [2, 2, 7681] + dtype = "float32" + max_val = float("0.926143") + mean = float("0.00979722") + std = float("0.0417195") + data = None diff --git a/paddle_samples/PaddleX/PicoDet-L_layout_17cls/subgraph_4/model.py b/paddle_samples/PaddleX/PicoDet-L_layout_17cls/subgraph_4/model.py new file mode 100644 index 000000000..9916f7739 --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-L_layout_17cls/subgraph_4/model.py @@ -0,0 +1,212 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6, data_7): + # pd_op.full: (1xi64) <- () + full_0 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (2x-1xi64) <- (2x2x-1xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_1, full_0, False, False, paddle.int64) + del full_0 + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("2"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x1xi32) <- (2x1xi32, 1xf32) + scale_0 = paddle._C_ops.scale(data_2, full_1, float("0"), True) + del data_2, full_1 + + # pd_op.cast: (2x1xi64) <- (2x1xi32) + cast_0 = paddle._C_ops.cast(scale_0, paddle.int64) + del scale_0 + + # pd_op.add: (2x-1xi64) <- (2x-1xi64, 2x1xi64) + add_0 = paddle._C_ops.add(argmax_0, cast_0) + del argmax_0, cast_0 + + # pd_op.flatten: (4xi32) <- (2x2x1xi32) + flatten_0 = paddle._C_ops.flatten(data_3, 0, 2) + del data_3 + + # pd_op.flatten: (-1xi64) <- (2x-1xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_2 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (-1xi32) <- (4xi32, -1xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_2) + del flatten_0 + + # pd_op.full: (xi64) <- () + full_3 = paddle._C_ops.full( + [], float("2"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_0 = [full_3, data_0] + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.reshape: (2x-1xi32) <- (-1xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, stack_0) + del gather_0, stack_0 + + # pd_op.full: (xf32) <- () + full_4 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (2x-1xb) <- (2x-1xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(data_4, full_4) + del data_4, full_4 + + # pd_op.full: (1xf32) <- () + full_5 = paddle._C_ops.full( + [1], float("11"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (2x-1xi32) <- (2x-1xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_5, paddle.int32, paddle.framework._current_expected_place() + ) + del full_5 + + # pd_op.where: (2x-1xi32) <- (2x-1xb, 2x-1xi32, 2x-1xi32) + where_0 = paddle._C_ops.where(greater_than_0, reshape_1, full_like_0) + del full_like_0, greater_than_0, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [-1, 4] + + # pd_op.reshape: (4x4xf32) <- (2x2x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_5, full_int_array_0) + del data_5, full_int_array_0 + + # pd_op.gather: (-1x4xf32) <- (4x4xf32, -1xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_2) + del flatten_1, full_2, reshape_2 + + # pd_op.full: (xi64) <- () + full_6 = paddle._C_ops.full( + [], float("4"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_1 = [full_3, data_0, full_6] + del data_0, full_3, full_6 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_1 = paddle._C_ops.stack(combine_1, 0) + del combine_1 + + # pd_op.reshape: (2x-1x4xf32) <- (-1x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, stack_1) + del gather_1, stack_1 + + # pd_op.full: (1xi32) <- () + full_7 = paddle._C_ops.full( + [1], float("12"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (2x-1x12xf32) <- (2x-1xi32, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + where_0 % paddle.cast(full_7, where_0.dtype), full_7 + ) + del full_7 + + # pd_op.full: (11xi64) <- () + full_8 = paddle._C_ops.full( + [11], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (11xi64) <- (11xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_8, + [11], + paddle.int64, + [ + float("0"), + float("1"), + float("2"), + float("3"), + float("4"), + float("5"), + float("6"), + float("7"), + float("8"), + float("9"), + float("10"), + ], + paddle.framework._current_expected_place(), + ) + del full_8 + + # pd_op.index_select: (2x-1x11xf32) <- (2x-1x12xf32, 11xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_0, assign_value__0, -1) + del assign_value__0, one_hot_0 + + # pd_op.multiply: (2x2x-1xf32) <- (2x2x-1xf32, 2x2x-1xf32) + multiply_1 = paddle._C_ops.multiply(data_6, data_1) + del data_6 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [-1] + + # pd_op.max: (2x2x1xf32) <- (2x2x-1xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_1, full_int_array_1, True) + + # pd_op.multiply: (2x2x-1xf32) <- (2x2x-1xf32, 2x2x-1xf32) + multiply_2 = paddle._C_ops.multiply(data_7, data_1) + del data_1, data_7 + + # pd_op.max: (2x2x1xf32) <- (2x2x-1xf32, 1xi64) + max_1 = paddle._C_ops.max(multiply_2, full_int_array_1, True) + del multiply_2 + + # pd_op.full: (1xf32) <- () + full_9 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x2x1xf32) <- (2x2x1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(max_0, full_9, float("1e-09"), True) + del full_9, max_0 + + # pd_op.divide: (2x2x-1xf32) <- (2x2x-1xf32, 2x2x1xf32) + divide_0 = paddle._C_ops.divide(multiply_1, scale_1) + del multiply_1, scale_1 + + # pd_op.multiply: (2x2x-1xf32) <- (2x2x-1xf32, 2x2x1xf32) + multiply_3 = paddle._C_ops.multiply(divide_0, max_1) + del divide_0, max_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [-2] + + # pd_op.max: (2x-1xf32) <- (2x2x-1xf32, 1xi64) + max_2 = paddle._C_ops.max(multiply_3, full_int_array_2, False) + del full_int_array_2, multiply_3 + + # pd_op.unsqueeze: (2x-1x1xf32) <- (2x-1xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(max_2, full_int_array_1) + del full_int_array_1, max_2 + + # pd_op.multiply: (2x-1x11xf32) <- (2x-1x11xf32, 2x-1x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_0) + del index_select_0, unsqueeze_0, where_0 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PicoDet-L_layout_17cls/subgraph_4/weight_meta.py b/paddle_samples/PaddleX/PicoDet-L_layout_17cls/subgraph_4/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-L_layout_17cls/subgraph_4/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PicoDet-L_layout_3cls/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/PicoDet-L_layout_3cls/subgraph_2/graph_hash.txt new file mode 100644 index 000000000..9464efa21 --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-L_layout_3cls/subgraph_2/graph_hash.txt @@ -0,0 +1 @@ +a0ad38986043bd89f888b565dc1588ec28fd59bf05e09970854560c26cce8005 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PicoDet-L_layout_3cls/subgraph_2/graph_net.json b/paddle_samples/PaddleX/PicoDet-L_layout_3cls/subgraph_2/graph_net.json new file mode 100644 index 000000000..b338c3f7f --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-L_layout_3cls/subgraph_2/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PicoDet-L_layout_3cls", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PicoDet-L_layout_3cls/subgraph_2/input_meta.py b/paddle_samples/PaddleX/PicoDet-L_layout_3cls/subgraph_2/input_meta.py new file mode 100644 index 000000000..a2a41c256 --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-L_layout_3cls/subgraph_2/input_meta.py @@ -0,0 +1,76 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [2, 2, 6885] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00188816") + std = float("0.043412") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [2, 1] + dtype = "int32" + data = [0, 1] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [2, 2, 1] + dtype = "int32" + data = [9, 9, 4, 3] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [2, 6885] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00377632") + std = float("0.0613357") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [2, 2, 4] + dtype = "float32" + data = [ + 0.0, + 0.0, + 279.403, + 522.831, + 310.209, + 0.0, + 576.0, + 523.873, + 220.975, + 483.792, + 412.625, + 506.545, + 84.8291, + 116.158, + 540.393, + 471.817, + ] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [2, 2, 6885] + dtype = "float32" + max_val = float("0.16577") + mean = float("0.000142783") + std = float("0.00366755") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [2, 2, 6885] + dtype = "float32" + max_val = float("0.964858") + mean = float("0.0140214") + std = float("0.058883") + data = None diff --git a/paddle_samples/PaddleX/PicoDet-L_layout_3cls/subgraph_2/model.py b/paddle_samples/PaddleX/PicoDet-L_layout_3cls/subgraph_2/model.py new file mode 100644 index 000000000..3cfba575b --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-L_layout_3cls/subgraph_2/model.py @@ -0,0 +1,193 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): + # pd_op.full: (1xi64) <- () + full_0 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (2x6885xi64) <- (2x2x6885xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_0, full_0, False, False, paddle.int64) + del full_0 + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("2"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x1xi32) <- (2x1xi32, 1xf32) + scale_0 = paddle._C_ops.scale(data_1, full_1, float("0"), True) + del data_1, full_1 + + # pd_op.cast: (2x1xi64) <- (2x1xi32) + cast_0 = paddle._C_ops.cast(scale_0, paddle.int64) + del scale_0 + + # pd_op.add: (2x6885xi64) <- (2x6885xi64, 2x1xi64) + add_0 = paddle._C_ops.add(argmax_0, cast_0) + del argmax_0, cast_0 + + # pd_op.flatten: (4xi32) <- (2x2x1xi32) + flatten_0 = paddle._C_ops.flatten(data_2, 0, 2) + del data_2 + + # pd_op.flatten: (13770xi64) <- (2x6885xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_2 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (13770xi32) <- (4xi32, 13770xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_2) + del flatten_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [2, 6885] + + # pd_op.reshape: (2x6885xi32) <- (13770xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, full_int_array_0) + del full_int_array_0, gather_0 + + # pd_op.full: (xf32) <- () + full_3 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (2x6885xb) <- (2x6885xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(data_3, full_3) + del data_3, full_3 + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("11"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (2x6885xi32) <- (2x6885xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_4, paddle.int32, paddle.framework._current_expected_place() + ) + del full_4 + + # pd_op.where: (2x6885xi32) <- (2x6885xb, 2x6885xi32, 2x6885xi32) + where_0 = paddle._C_ops.where(greater_than_0, reshape_1, full_like_0) + del full_like_0, greater_than_0, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [-1, 4] + + # pd_op.reshape: (4x4xf32) <- (2x2x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_4, full_int_array_1) + del data_4, full_int_array_1 + + # pd_op.gather: (13770x4xf32) <- (4x4xf32, 13770xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_2) + del flatten_1, full_2, reshape_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_2 = [2, 6885, 4] + + # pd_op.reshape: (2x6885x4xf32) <- (13770x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, full_int_array_2) + del full_int_array_2, gather_1 + + # pd_op.full: (1xi32) <- () + full_5 = paddle._C_ops.full( + [1], float("12"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (2x6885x12xf32) <- (2x6885xi32, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + where_0 % paddle.cast(full_5, where_0.dtype), full_5 + ) + del full_5 + + # pd_op.full: (11xi64) <- () + full_6 = paddle._C_ops.full( + [11], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (11xi64) <- (11xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_6, + [11], + paddle.int64, + [ + float("0"), + float("1"), + float("2"), + float("3"), + float("4"), + float("5"), + float("6"), + float("7"), + float("8"), + float("9"), + float("10"), + ], + paddle.framework._current_expected_place(), + ) + del full_6 + + # pd_op.index_select: (2x6885x11xf32) <- (2x6885x12xf32, 11xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_0, assign_value__0, -1) + del assign_value__0, one_hot_0 + + # pd_op.multiply: (2x2x6885xf32) <- (2x2x6885xf32, 2x2x6885xf32) + multiply_1 = paddle._C_ops.multiply(data_5, data_0) + del data_5 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [-1] + + # pd_op.max: (2x2x1xf32) <- (2x2x6885xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_1, full_int_array_3, True) + + # pd_op.multiply: (2x2x6885xf32) <- (2x2x6885xf32, 2x2x6885xf32) + multiply_2 = paddle._C_ops.multiply(data_6, data_0) + del data_0, data_6 + + # pd_op.max: (2x2x1xf32) <- (2x2x6885xf32, 1xi64) + max_1 = paddle._C_ops.max(multiply_2, full_int_array_3, True) + del multiply_2 + + # pd_op.full: (1xf32) <- () + full_7 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x2x1xf32) <- (2x2x1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(max_0, full_7, float("1e-09"), True) + del full_7, max_0 + + # pd_op.divide: (2x2x6885xf32) <- (2x2x6885xf32, 2x2x1xf32) + divide_0 = paddle._C_ops.divide(multiply_1, scale_1) + del multiply_1, scale_1 + + # pd_op.multiply: (2x2x6885xf32) <- (2x2x6885xf32, 2x2x1xf32) + multiply_3 = paddle._C_ops.multiply(divide_0, max_1) + del divide_0, max_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [-2] + + # pd_op.max: (2x6885xf32) <- (2x2x6885xf32, 1xi64) + max_2 = paddle._C_ops.max(multiply_3, full_int_array_4, False) + del full_int_array_4, multiply_3 + + # pd_op.unsqueeze: (2x6885x1xf32) <- (2x6885xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(max_2, full_int_array_3) + del full_int_array_3, max_2 + + # pd_op.multiply: (2x6885x11xf32) <- (2x6885x11xf32, 2x6885x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_0) + del index_select_0, unsqueeze_0, where_0 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PicoDet-L_layout_3cls/subgraph_2/weight_meta.py b/paddle_samples/PaddleX/PicoDet-L_layout_3cls/subgraph_2/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-L_layout_3cls/subgraph_2/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PicoDet-L_layout_3cls/subgraph_6/graph_hash.txt b/paddle_samples/PaddleX/PicoDet-L_layout_3cls/subgraph_6/graph_hash.txt new file mode 100644 index 000000000..de5ccfd38 --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-L_layout_3cls/subgraph_6/graph_hash.txt @@ -0,0 +1 @@ +9074042afa6046d350379210743eb7d269dc2a895adeb68fea22fc27ca323fef \ No newline at end of file diff --git a/paddle_samples/PaddleX/PicoDet-L_layout_3cls/subgraph_6/graph_net.json b/paddle_samples/PaddleX/PicoDet-L_layout_3cls/subgraph_6/graph_net.json new file mode 100644 index 000000000..b338c3f7f --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-L_layout_3cls/subgraph_6/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PicoDet-L_layout_3cls", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PicoDet-L_layout_3cls/subgraph_6/input_meta.py b/paddle_samples/PaddleX/PicoDet-L_layout_3cls/subgraph_6/input_meta.py new file mode 100644 index 000000000..32964c0af --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-L_layout_3cls/subgraph_6/input_meta.py @@ -0,0 +1,68 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [2, 3, 7681] + dtype = "float32" + max_val = float("1.0") + mean = float("0.000998134") + std = float("0.0315775") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [2, 3, 1] + dtype = "int32" + data = [3, 4, 1, 7, 1, 1] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [2, 7681] + dtype = "float32" + max_val = float("1.0") + mean = float("0.0029944") + std = float("0.0546391") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [2, 3, 4] + dtype = "float32" + data = [ + 10.7339, + 0.0, + 608.0, + 217.749, + 78.971, + 227.175, + 608.0, + 248.856, + 14.5675, + 277.135, + 608.0, + 608.0, + 210.839, + 41.943, + 380.0, + 50.6089, + 305.471, + 55.4618, + 542.787, + 566.404, + 48.5419, + 55.1152, + 285.858, + 566.75, + ] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [2, 7681, 4] + dtype = "float32" + min_val = float("-260.955") + max_val = float("882.53") + mean = float("304.175") + std = float("182.317") + data = None diff --git a/paddle_samples/PaddleX/PicoDet-L_layout_3cls/subgraph_6/model.py b/paddle_samples/PaddleX/PicoDet-L_layout_3cls/subgraph_6/model.py new file mode 100644 index 000000000..c951cbc46 --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-L_layout_3cls/subgraph_6/model.py @@ -0,0 +1,304 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4): + # pd_op.full: (1xi64) <- () + full_0 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (2x7681xi64) <- (2x3x7681xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_0, full_0, False, False, paddle.int64) + del full_0 + + # pd_op.full: (1xf64) <- () + full_1 = paddle._C_ops.full( + [1], float("0"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_2 = paddle._C_ops.full( + [1], float("2"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (2xi32) <- (1xf64, 1xf64, 1xf64) + arange_0 = paddle.arange(full_1, full_2, full_3, dtype="int32") + del full_1, full_2, full_3 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [-1] + + # pd_op.unsqueeze: (2x1xi32) <- (2xi32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(arange_0, full_int_array_0) + del arange_0 + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("3"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x1xi32) <- (2x1xi32, 1xf32) + scale_0 = paddle._C_ops.scale(unsqueeze_0, full_4, float("0"), True) + del full_4, unsqueeze_0 + + # pd_op.cast: (2x1xi64) <- (2x1xi32) + cast_0 = paddle._C_ops.cast(scale_0, paddle.int64) + del scale_0 + + # pd_op.add: (2x7681xi64) <- (2x7681xi64, 2x1xi64) + add_0 = paddle._C_ops.add(argmax_0, cast_0) + del argmax_0, cast_0 + + # pd_op.flatten: (6xi32) <- (2x3x1xi32) + flatten_0 = paddle._C_ops.flatten(data_1, 0, 2) + del data_1 + + # pd_op.flatten: (15362xi64) <- (2x7681xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_5 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (15362xi32) <- (6xi32, 15362xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_5) + del flatten_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [2, 7681] + + # pd_op.reshape: (2x7681xi32) <- (15362xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, full_int_array_1) + del full_int_array_1, gather_0 + + # pd_op.full: (xf32) <- () + full_6 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (2x7681xb) <- (2x7681xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(data_2, full_6) + del data_2, full_6 + + # pd_op.full: (1xf32) <- () + full_7 = paddle._C_ops.full( + [1], float("11"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (2x7681xi32) <- (2x7681xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_7, paddle.int32, paddle.framework._current_expected_place() + ) + del full_7 + + # pd_op.where: (2x7681xi32) <- (2x7681xb, 2x7681xi32, 2x7681xi32) + where_0 = paddle._C_ops.where(greater_than_0, reshape_1, full_like_0) + del full_like_0, greater_than_0, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_2 = [-1, 4] + + # pd_op.reshape: (6x4xf32) <- (2x3x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_3, full_int_array_2) + del full_int_array_2 + + # pd_op.gather: (15362x4xf32) <- (6x4xf32, 15362xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_5) + del flatten_1, full_5, reshape_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_3 = [2, 7681, 4] + + # pd_op.reshape: (2x7681x4xf32) <- (15362x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, full_int_array_3) + del full_int_array_3, gather_1 + + # pd_op.full: (1xi32) <- () + full_8 = paddle._C_ops.full( + [1], float("12"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (2x7681x12xf32) <- (2x7681xi32, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + where_0 % paddle.cast(full_8, where_0.dtype), full_8 + ) + del full_8 + + # pd_op.full: (11xi64) <- () + full_9 = paddle._C_ops.full( + [11], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (11xi64) <- (11xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_9, + [11], + paddle.int64, + [ + float("0"), + float("1"), + float("2"), + float("3"), + float("4"), + float("5"), + float("6"), + float("7"), + float("8"), + float("9"), + float("10"), + ], + paddle.framework._current_expected_place(), + ) + del full_9 + + # pd_op.index_select: (2x7681x11xf32) <- (2x7681x12xf32, 11xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_0, assign_value__0, -1) + del assign_value__0, one_hot_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [2] + + # pd_op.unsqueeze: (2x3x1x4xf32) <- (2x3x4xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(data_3, full_int_array_4) + del data_3 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_5 = [1] + + # pd_op.unsqueeze: (2x1x7681x4xf32) <- (2x7681x4xf32, 1xi64) + unsqueeze_2 = paddle._C_ops.unsqueeze(data_4, full_int_array_5) + del data_4, full_int_array_5 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_6 = [0] + + # pd_op.slice: (2x3x1x2xf32) <- (2x3x1x4xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + unsqueeze_1, [3], full_int_array_6, full_int_array_4, [1], [] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_7 = [2147483647] + + # pd_op.slice: (2x3x1x2xf32) <- (2x3x1x4xf32, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + unsqueeze_1, [3], full_int_array_4, full_int_array_7, [1], [] + ) + del unsqueeze_1 + + # pd_op.slice: (2x1x7681x2xf32) <- (2x1x7681x4xf32, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + unsqueeze_2, [3], full_int_array_6, full_int_array_4, [1], [] + ) + del full_int_array_6 + + # pd_op.slice: (2x1x7681x2xf32) <- (2x1x7681x4xf32, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + unsqueeze_2, [3], full_int_array_4, full_int_array_7, [1], [] + ) + del full_int_array_4, full_int_array_7, unsqueeze_2 + + # pd_op.maximum: (2x3x7681x2xf32) <- (2x3x1x2xf32, 2x1x7681x2xf32) + maximum_0 = paddle._C_ops.maximum(slice_0, slice_2) + + # pd_op.minimum: (2x3x7681x2xf32) <- (2x3x1x2xf32, 2x1x7681x2xf32) + minimum_0 = paddle._C_ops.minimum(slice_1, slice_3) + + # pd_op.subtract: (2x3x7681x2xf32) <- (2x3x7681x2xf32, 2x3x7681x2xf32) + subtract_0 = paddle._C_ops.subtract(minimum_0, maximum_0) + del maximum_0, minimum_0 + + # pd_op.full: (1xf32) <- () + full_10 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf32) <- () + full_11 = paddle._C_ops.full( + [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.clip: (2x3x7681x2xf32) <- (2x3x7681x2xf32, 1xf32, 1xf32) + clip_0 = paddle._C_ops.clip(subtract_0, full_10, full_11) + del subtract_0 + + # pd_op.prod: (2x3x7681xf32) <- (2x3x7681x2xf32, 1xi64) + prod_0 = paddle._C_ops.prod(clip_0, full_int_array_0, False, False) + del clip_0 + + # pd_op.subtract: (2x3x1x2xf32) <- (2x3x1x2xf32, 2x3x1x2xf32) + subtract_1 = paddle._C_ops.subtract(slice_1, slice_0) + del slice_0, slice_1 + + # pd_op.clip: (2x3x1x2xf32) <- (2x3x1x2xf32, 1xf32, 1xf32) + clip_1 = paddle._C_ops.clip(subtract_1, full_10, full_11) + del subtract_1 + + # pd_op.prod: (2x3x1xf32) <- (2x3x1x2xf32, 1xi64) + prod_1 = paddle._C_ops.prod(clip_1, full_int_array_0, False, False) + del clip_1 + + # pd_op.subtract: (2x1x7681x2xf32) <- (2x1x7681x2xf32, 2x1x7681x2xf32) + subtract_2 = paddle._C_ops.subtract(slice_3, slice_2) + del slice_2, slice_3 + + # pd_op.clip: (2x1x7681x2xf32) <- (2x1x7681x2xf32, 1xf32, 1xf32) + clip_2 = paddle._C_ops.clip(subtract_2, full_10, full_11) + del full_10, full_11, subtract_2 + + # pd_op.prod: (2x1x7681xf32) <- (2x1x7681x2xf32, 1xi64) + prod_2 = paddle._C_ops.prod(clip_2, full_int_array_0, False, False) + del clip_2 + + # pd_op.add: (2x3x7681xf32) <- (2x3x1xf32, 2x1x7681xf32) + add_1 = paddle._C_ops.add(prod_1, prod_2) + del prod_1, prod_2 + + # pd_op.subtract: (2x3x7681xf32) <- (2x3x7681xf32, 2x3x7681xf32) + subtract_3 = paddle._C_ops.subtract(add_1, prod_0) + del add_1 + + # pd_op.full: (1xf32) <- () + full_12 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x3x7681xf32) <- (2x3x7681xf32, 1xf32) + scale_1 = paddle._C_ops.scale(subtract_3, full_12, float("1e-09"), True) + del full_12, subtract_3 + + # pd_op.divide: (2x3x7681xf32) <- (2x3x7681xf32, 2x3x7681xf32) + divide_0 = paddle._C_ops.divide(prod_0, scale_1) + del prod_0, scale_1 + + # pd_op.multiply: (2x3x7681xf32) <- (2x3x7681xf32, 2x3x7681xf32) + multiply_1 = paddle._C_ops.multiply(divide_0, data_0) + del data_0, divide_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_8 = [-2] + + # pd_op.max: (2x7681xf32) <- (2x3x7681xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_1, full_int_array_8, False) + del full_int_array_8, multiply_1 + + # pd_op.unsqueeze: (2x7681x1xf32) <- (2x7681xf32, 1xi64) + unsqueeze_3 = paddle._C_ops.unsqueeze(max_0, full_int_array_0) + del full_int_array_0, max_0 + + # pd_op.multiply: (2x7681x11xf32) <- (2x7681x11xf32, 2x7681x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_3) + del index_select_0, unsqueeze_3, where_0 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PicoDet-L_layout_3cls/subgraph_6/weight_meta.py b/paddle_samples/PaddleX/PicoDet-L_layout_3cls/subgraph_6/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-L_layout_3cls/subgraph_6/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PicoDet-M/subgraph_11/graph_hash.txt b/paddle_samples/PaddleX/PicoDet-M/subgraph_11/graph_hash.txt new file mode 100644 index 000000000..025146623 --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-M/subgraph_11/graph_hash.txt @@ -0,0 +1 @@ +49aff689868d297e6420c3fd421407855974af46c87ec315df858c88909a81d0 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PicoDet-M/subgraph_11/graph_net.json b/paddle_samples/PaddleX/PicoDet-M/subgraph_11/graph_net.json new file mode 100644 index 000000000..49bcf79aa --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-M/subgraph_11/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PicoDet-M", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PicoDet-M/subgraph_11/input_meta.py b/paddle_samples/PaddleX/PicoDet-M/subgraph_11/input_meta.py new file mode 100644 index 000000000..078c6efc1 --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-M/subgraph_11/input_meta.py @@ -0,0 +1,124 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [4, 4, 2577] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00252231") + std = float("0.0501593") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [4, 1] + dtype = "int32" + data = [0, 1, 2, 3] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [4, 4, 1] + dtype = "int32" + data = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 1, 2, 2] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [4, 2577] + dtype = "float32" + max_val = float("1.0") + mean = float("0.0100893") + std = float("0.0999373") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [4, 4, 4] + dtype = "float32" + data = [ + 156.053, + 85.36, + 266.347, + 166.32, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 170.133, + 106.48, + 222.933, + 146.08, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 96.2612, + 37.9401, + 178.155, + 164.407, + 97.698, + 231.856, + 181.029, + 352.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 77.8894, + 0.0, + 230.672, + 109.0, + 238.162, + 156.0, + 352.0, + 269.0, + 143.796, + 139.0, + 259.132, + 336.0, + 53.9234, + 219.0, + 128.817, + 352.0, + ] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [4, 4, 2577] + dtype = "float32" + max_val = float("0.0649346") + mean = float("1.50054e-05") + std = float("0.000519193") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [4, 4, 2577] + dtype = "float32" + max_val = float("0.950413") + mean = float("0.0116738") + std = float("0.0583998") + data = None diff --git a/paddle_samples/PaddleX/PicoDet-M/subgraph_11/model.py b/paddle_samples/PaddleX/PicoDet-M/subgraph_11/model.py new file mode 100644 index 000000000..9c1b49d11 --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-M/subgraph_11/model.py @@ -0,0 +1,176 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): + # pd_op.full: (1xi64) <- () + full_0 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (4x2577xi64) <- (4x4x2577xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_0, full_0, False, False, paddle.int64) + del full_0 + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("4"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (4x1xi32) <- (4x1xi32, 1xf32) + scale_0 = paddle._C_ops.scale(data_1, full_1, float("0"), True) + del data_1 + + # pd_op.cast: (4x1xi64) <- (4x1xi32) + cast_0 = paddle._C_ops.cast(scale_0, paddle.int64) + del scale_0 + + # pd_op.add: (4x2577xi64) <- (4x2577xi64, 4x1xi64) + add_0 = paddle._C_ops.add(argmax_0, cast_0) + del argmax_0, cast_0 + + # pd_op.flatten: (16xi32) <- (4x4x1xi32) + flatten_0 = paddle._C_ops.flatten(data_2, 0, 2) + del data_2 + + # pd_op.flatten: (10308xi64) <- (4x2577xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_2 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (10308xi32) <- (16xi32, 10308xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_2) + del flatten_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [4, 2577] + + # pd_op.reshape: (4x2577xi32) <- (10308xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, full_int_array_0) + del full_int_array_0, gather_0 + + # pd_op.full: (xf32) <- () + full_3 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (4x2577xb) <- (4x2577xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(data_3, full_3) + del data_3, full_3 + + # pd_op.full_like: (4x2577xi32) <- (4x2577xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_1, paddle.int32, paddle.framework._current_expected_place() + ) + del full_1 + + # pd_op.where: (4x2577xi32) <- (4x2577xb, 4x2577xi32, 4x2577xi32) + where_0 = paddle._C_ops.where(greater_than_0, reshape_1, full_like_0) + del full_like_0, greater_than_0, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [-1, 4] + + # pd_op.reshape: (16x4xf32) <- (4x4x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_4, full_int_array_1) + del data_4, full_int_array_1 + + # pd_op.gather: (10308x4xf32) <- (16x4xf32, 10308xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_2) + del flatten_1, full_2, reshape_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_2 = [4, 2577, 4] + + # pd_op.reshape: (4x2577x4xf32) <- (10308x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, full_int_array_2) + del full_int_array_2, gather_1 + + # pd_op.full: (1xi32) <- () + full_4 = paddle._C_ops.full( + [1], float("5"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (4x2577x5xf32) <- (4x2577xi32, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + where_0 % paddle.cast(full_4, where_0.dtype), full_4 + ) + del full_4 + + # pd_op.full: (4xi64) <- () + full_5 = paddle._C_ops.full( + [4], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (4xi64) <- (4xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_5, + [4], + paddle.int64, + [float("0"), float("1"), float("2"), float("3")], + paddle.framework._current_expected_place(), + ) + del full_5 + + # pd_op.index_select: (4x2577x4xf32) <- (4x2577x5xf32, 4xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_0, assign_value__0, -1) + del assign_value__0, one_hot_0 + + # pd_op.multiply: (4x4x2577xf32) <- (4x4x2577xf32, 4x4x2577xf32) + multiply_1 = paddle._C_ops.multiply(data_5, data_0) + del data_5 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [-1] + + # pd_op.max: (4x4x1xf32) <- (4x4x2577xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_1, full_int_array_3, True) + + # pd_op.multiply: (4x4x2577xf32) <- (4x4x2577xf32, 4x4x2577xf32) + multiply_2 = paddle._C_ops.multiply(data_6, data_0) + del data_0, data_6 + + # pd_op.max: (4x4x1xf32) <- (4x4x2577xf32, 1xi64) + max_1 = paddle._C_ops.max(multiply_2, full_int_array_3, True) + del multiply_2 + + # pd_op.full: (1xf32) <- () + full_6 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (4x4x1xf32) <- (4x4x1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(max_0, full_6, float("1e-09"), True) + del full_6, max_0 + + # pd_op.divide: (4x4x2577xf32) <- (4x4x2577xf32, 4x4x1xf32) + divide_0 = paddle._C_ops.divide(multiply_1, scale_1) + del multiply_1, scale_1 + + # pd_op.multiply: (4x4x2577xf32) <- (4x4x2577xf32, 4x4x1xf32) + multiply_3 = paddle._C_ops.multiply(divide_0, max_1) + del divide_0, max_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [-2] + + # pd_op.max: (4x2577xf32) <- (4x4x2577xf32, 1xi64) + max_2 = paddle._C_ops.max(multiply_3, full_int_array_4, False) + del full_int_array_4, multiply_3 + + # pd_op.unsqueeze: (4x2577x1xf32) <- (4x2577xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(max_2, full_int_array_3) + del full_int_array_3, max_2 + + # pd_op.multiply: (4x2577x4xf32) <- (4x2577x4xf32, 4x2577x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_0) + del index_select_0, unsqueeze_0, where_0 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PicoDet-M/subgraph_11/weight_meta.py b/paddle_samples/PaddleX/PicoDet-M/subgraph_11/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-M/subgraph_11/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PicoDet-M/subgraph_15/graph_hash.txt b/paddle_samples/PaddleX/PicoDet-M/subgraph_15/graph_hash.txt new file mode 100644 index 000000000..7427c266c --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-M/subgraph_15/graph_hash.txt @@ -0,0 +1 @@ +0f0435e48afd74cc4b53c79cc5e4c028bb47e1433f41e2a6ed19f47836aa669c \ No newline at end of file diff --git a/paddle_samples/PaddleX/PicoDet-M/subgraph_15/graph_net.json b/paddle_samples/PaddleX/PicoDet-M/subgraph_15/graph_net.json new file mode 100644 index 000000000..49bcf79aa --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-M/subgraph_15/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PicoDet-M", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PicoDet-M/subgraph_15/input_meta.py b/paddle_samples/PaddleX/PicoDet-M/subgraph_15/input_meta.py new file mode 100644 index 000000000..faab4eba6 --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-M/subgraph_15/input_meta.py @@ -0,0 +1,60 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [4, 1, 4789] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00114847") + std = float("0.0338695") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [4, 1, 1] + dtype = "int32" + data = [0, 0, 0, 0] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [4, 4789] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00114847") + std = float("0.0338695") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [4, 1, 4] + dtype = "float32" + data = [ + 186.353, + 203.429, + 440.471, + 306.286, + 0.0, + 188.852, + 192.99, + 413.115, + 207.273, + 225.306, + 469.091, + 382.041, + 268.531, + 107.586, + 426.294, + 297.931, + ] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [4, 4789, 4] + dtype = "float32" + min_val = float("-81.8591") + max_val = float("712.118") + mean = float("240.104") + std = float("144.853") + data = None diff --git a/paddle_samples/PaddleX/PicoDet-M/subgraph_15/model.py b/paddle_samples/PaddleX/PicoDet-M/subgraph_15/model.py new file mode 100644 index 000000000..2dbd499de --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-M/subgraph_15/model.py @@ -0,0 +1,287 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4): + # pd_op.full: (1xi64) <- () + full_0 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (4x4789xi64) <- (4x1x4789xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_0, full_0, False, False, paddle.int64) + del full_0 + + # pd_op.full: (1xf64) <- () + full_1 = paddle._C_ops.full( + [1], float("0"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_2 = paddle._C_ops.full( + [1], float("4"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (4xi32) <- (1xf64, 1xf64, 1xf64) + arange_0 = paddle.arange(full_1, full_2, full_3, dtype="int32") + del full_1, full_2, full_3 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [-1] + + # pd_op.unsqueeze: (4x1xi32) <- (4xi32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(arange_0, full_int_array_0) + del arange_0 + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (4x1xi32) <- (4x1xi32, 1xf32) + scale_0 = paddle._C_ops.scale(unsqueeze_0, full_4, float("0"), True) + del unsqueeze_0 + + # pd_op.cast: (4x1xi64) <- (4x1xi32) + cast_0 = paddle._C_ops.cast(scale_0, paddle.int64) + del scale_0 + + # pd_op.add: (4x4789xi64) <- (4x4789xi64, 4x1xi64) + add_0 = paddle._C_ops.add(argmax_0, cast_0) + del argmax_0, cast_0 + + # pd_op.flatten: (4xi32) <- (4x1x1xi32) + flatten_0 = paddle._C_ops.flatten(data_1, 0, 2) + del data_1 + + # pd_op.flatten: (19156xi64) <- (4x4789xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_5 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (19156xi32) <- (4xi32, 19156xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_5) + del flatten_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [4, 4789] + + # pd_op.reshape: (4x4789xi32) <- (19156xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, full_int_array_1) + del full_int_array_1, gather_0 + + # pd_op.full: (xf32) <- () + full_6 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (4x4789xb) <- (4x4789xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(data_2, full_6) + del data_2, full_6 + + # pd_op.full: (1xf32) <- () + full_7 = paddle._C_ops.full( + [1], float("4"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (4x4789xi32) <- (4x4789xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_7, paddle.int32, paddle.framework._current_expected_place() + ) + del full_7 + + # pd_op.where: (4x4789xi32) <- (4x4789xb, 4x4789xi32, 4x4789xi32) + where_0 = paddle._C_ops.where(greater_than_0, reshape_1, full_like_0) + del full_like_0, greater_than_0, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_2 = [-1, 4] + + # pd_op.reshape: (4x4xf32) <- (4x1x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_3, full_int_array_2) + del full_int_array_2 + + # pd_op.gather: (19156x4xf32) <- (4x4xf32, 19156xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_5) + del flatten_1, full_5, reshape_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_3 = [4, 4789, 4] + + # pd_op.reshape: (4x4789x4xf32) <- (19156x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, full_int_array_3) + del full_int_array_3, gather_1 + + # pd_op.full: (1xi32) <- () + full_8 = paddle._C_ops.full( + [1], float("5"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (4x4789x5xf32) <- (4x4789xi32, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + where_0 % paddle.cast(full_8, where_0.dtype), full_8 + ) + del full_8 + + # pd_op.full: (4xi64) <- () + full_9 = paddle._C_ops.full( + [4], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (4xi64) <- (4xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_9, + [4], + paddle.int64, + [float("0"), float("1"), float("2"), float("3")], + paddle.framework._current_expected_place(), + ) + del full_9 + + # pd_op.index_select: (4x4789x4xf32) <- (4x4789x5xf32, 4xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_0, assign_value__0, -1) + del assign_value__0, one_hot_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [2] + + # pd_op.unsqueeze: (4x1x1x4xf32) <- (4x1x4xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(data_3, full_int_array_4) + del data_3 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_5 = [1] + + # pd_op.unsqueeze: (4x1x4789x4xf32) <- (4x4789x4xf32, 1xi64) + unsqueeze_2 = paddle._C_ops.unsqueeze(data_4, full_int_array_5) + del data_4, full_int_array_5 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_6 = [0] + + # pd_op.slice: (4x1x1x2xf32) <- (4x1x1x4xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + unsqueeze_1, [3], full_int_array_6, full_int_array_4, [1], [] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_7 = [2147483647] + + # pd_op.slice: (4x1x1x2xf32) <- (4x1x1x4xf32, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + unsqueeze_1, [3], full_int_array_4, full_int_array_7, [1], [] + ) + del unsqueeze_1 + + # pd_op.slice: (4x1x4789x2xf32) <- (4x1x4789x4xf32, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + unsqueeze_2, [3], full_int_array_6, full_int_array_4, [1], [] + ) + del full_int_array_6 + + # pd_op.slice: (4x1x4789x2xf32) <- (4x1x4789x4xf32, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + unsqueeze_2, [3], full_int_array_4, full_int_array_7, [1], [] + ) + del full_int_array_4, full_int_array_7, unsqueeze_2 + + # pd_op.maximum: (4x1x4789x2xf32) <- (4x1x1x2xf32, 4x1x4789x2xf32) + maximum_0 = paddle._C_ops.maximum(slice_0, slice_2) + + # pd_op.minimum: (4x1x4789x2xf32) <- (4x1x1x2xf32, 4x1x4789x2xf32) + minimum_0 = paddle._C_ops.minimum(slice_1, slice_3) + + # pd_op.subtract: (4x1x4789x2xf32) <- (4x1x4789x2xf32, 4x1x4789x2xf32) + subtract_0 = paddle._C_ops.subtract(minimum_0, maximum_0) + del maximum_0, minimum_0 + + # pd_op.full: (1xf32) <- () + full_10 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf32) <- () + full_11 = paddle._C_ops.full( + [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.clip: (4x1x4789x2xf32) <- (4x1x4789x2xf32, 1xf32, 1xf32) + clip_0 = paddle._C_ops.clip(subtract_0, full_10, full_11) + del subtract_0 + + # pd_op.prod: (4x1x4789xf32) <- (4x1x4789x2xf32, 1xi64) + prod_0 = paddle._C_ops.prod(clip_0, full_int_array_0, False, False) + del clip_0 + + # pd_op.subtract: (4x1x1x2xf32) <- (4x1x1x2xf32, 4x1x1x2xf32) + subtract_1 = paddle._C_ops.subtract(slice_1, slice_0) + del slice_0, slice_1 + + # pd_op.clip: (4x1x1x2xf32) <- (4x1x1x2xf32, 1xf32, 1xf32) + clip_1 = paddle._C_ops.clip(subtract_1, full_10, full_11) + del subtract_1 + + # pd_op.prod: (4x1x1xf32) <- (4x1x1x2xf32, 1xi64) + prod_1 = paddle._C_ops.prod(clip_1, full_int_array_0, False, False) + del clip_1 + + # pd_op.subtract: (4x1x4789x2xf32) <- (4x1x4789x2xf32, 4x1x4789x2xf32) + subtract_2 = paddle._C_ops.subtract(slice_3, slice_2) + del slice_2, slice_3 + + # pd_op.clip: (4x1x4789x2xf32) <- (4x1x4789x2xf32, 1xf32, 1xf32) + clip_2 = paddle._C_ops.clip(subtract_2, full_10, full_11) + del full_10, full_11, subtract_2 + + # pd_op.prod: (4x1x4789xf32) <- (4x1x4789x2xf32, 1xi64) + prod_2 = paddle._C_ops.prod(clip_2, full_int_array_0, False, False) + del clip_2 + + # pd_op.add: (4x1x4789xf32) <- (4x1x1xf32, 4x1x4789xf32) + add_1 = paddle._C_ops.add(prod_1, prod_2) + del prod_1, prod_2 + + # pd_op.subtract: (4x1x4789xf32) <- (4x1x4789xf32, 4x1x4789xf32) + subtract_3 = paddle._C_ops.subtract(add_1, prod_0) + del add_1 + + # pd_op.scale: (4x1x4789xf32) <- (4x1x4789xf32, 1xf32) + scale_1 = paddle._C_ops.scale(subtract_3, full_4, float("1e-09"), True) + del full_4, subtract_3 + + # pd_op.divide: (4x1x4789xf32) <- (4x1x4789xf32, 4x1x4789xf32) + divide_0 = paddle._C_ops.divide(prod_0, scale_1) + del prod_0, scale_1 + + # pd_op.multiply: (4x1x4789xf32) <- (4x1x4789xf32, 4x1x4789xf32) + multiply_1 = paddle._C_ops.multiply(divide_0, data_0) + del data_0, divide_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_8 = [-2] + + # pd_op.max: (4x4789xf32) <- (4x1x4789xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_1, full_int_array_8, False) + del full_int_array_8, multiply_1 + + # pd_op.unsqueeze: (4x4789x1xf32) <- (4x4789xf32, 1xi64) + unsqueeze_3 = paddle._C_ops.unsqueeze(max_0, full_int_array_0) + del full_int_array_0, max_0 + + # pd_op.multiply: (4x4789x4xf32) <- (4x4789x4xf32, 4x4789x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_3) + del index_select_0, unsqueeze_3, where_0 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PicoDet-M/subgraph_15/weight_meta.py b/paddle_samples/PaddleX/PicoDet-M/subgraph_15/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-M/subgraph_15/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PicoDet-M/subgraph_16/graph_hash.txt b/paddle_samples/PaddleX/PicoDet-M/subgraph_16/graph_hash.txt new file mode 100644 index 000000000..90d4682cc --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-M/subgraph_16/graph_hash.txt @@ -0,0 +1 @@ +2180983e439cf5d3537fc12c5820d6dc5fd684feaa2d49af4f35ef93184eee77 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PicoDet-M/subgraph_16/graph_net.json b/paddle_samples/PaddleX/PicoDet-M/subgraph_16/graph_net.json new file mode 100644 index 000000000..49bcf79aa --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-M/subgraph_16/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PicoDet-M", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PicoDet-M/subgraph_16/input_meta.py b/paddle_samples/PaddleX/PicoDet-M/subgraph_16/input_meta.py new file mode 100644 index 000000000..677fd254e --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-M/subgraph_16/input_meta.py @@ -0,0 +1,106 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [3] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [] + dtype = "int64" + data = [3598] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [4, 3, 3598] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00108857") + std = float("0.0329755") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [4, 3, 1] + dtype = "int32" + data = [0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [4, 3598] + dtype = "float32" + max_val = float("1.0") + mean = float("0.0032657") + std = float("0.0570529") + data = None + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [4, 3, 4] + dtype = "float32" + data = [ + 21.7043, + 178.894, + 104.904, + 241.365, + 135.652, + 245.625, + 262.261, + 345.01, + 0.0, + 345.01, + 65.113, + 408.901, + 15.2195, + 104.0, + 88.7805, + 195.394, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 268.8, + 73.0382, + 349.867, + 193.71, + 266.667, + 200.061, + 349.867, + 317.557, + 0.0, + 0.0, + 0.0, + 0.0, + 4.83721, + 0.0, + 249.116, + 289.836, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + ] + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [4, 3598, 4] + dtype = "float32" + min_val = float("-121.722") + max_val = float("725.016") + mean = float("208.044") + std = float("126.754") + data = None diff --git a/paddle_samples/PaddleX/PicoDet-M/subgraph_16/model.py b/paddle_samples/PaddleX/PicoDet-M/subgraph_16/model.py new file mode 100644 index 000000000..37984eedf --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-M/subgraph_16/model.py @@ -0,0 +1,305 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): + # pd_op.full: (1xi64) <- () + full_0 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (4x-1xi64) <- (4x-1x-1xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_2, full_0, False, False, paddle.int64) + del full_0 + + # pd_op.full: (1xf64) <- () + full_1 = paddle._C_ops.full( + [1], float("0"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_2 = paddle._C_ops.full( + [1], float("4"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (4xi32) <- (1xf64, 1xf64, 1xf64) + arange_0 = paddle.arange(full_1, full_2, full_3, dtype="int32") + del full_1, full_2, full_3 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [-1] + + # pd_op.unsqueeze: (4x1xi32) <- (4xi32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(arange_0, full_int_array_0) + del arange_0 + + # pd_op.cast: (xi32) <- (xi64) + cast_0 = paddle._C_ops.cast(data_0, paddle.int32) + del data_0 + + # pd_op.multiply: (4x1xi32) <- (4x1xi32, xi32) + multiply_1 = paddle._C_ops.multiply(unsqueeze_0, cast_0) + del cast_0, unsqueeze_0 + + # pd_op.cast: (4x1xi64) <- (4x1xi32) + cast_1 = paddle._C_ops.cast(multiply_1, paddle.int64) + del multiply_1 + + # pd_op.add: (4x-1xi64) <- (4x-1xi64, 4x1xi64) + add_0 = paddle._C_ops.add(argmax_0, cast_1) + del argmax_0, cast_1 + + # pd_op.flatten: (-1xi32) <- (4x-1x1xi32) + flatten_0 = paddle._C_ops.flatten(data_3, 0, 2) + del data_3 + + # pd_op.flatten: (-1xi64) <- (4x-1xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_4 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (-1xi32) <- (-1xi32, -1xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_4) + del flatten_0 + + # pd_op.full: (xi64) <- () + full_5 = paddle._C_ops.full( + [], float("4"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_0 = [full_5, data_1] + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.reshape: (4x-1xi32) <- (-1xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, stack_0) + del gather_0, stack_0 + + # pd_op.full: (xf32) <- () + full_6 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (4x-1xb) <- (4x-1xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(data_4, full_6) + del data_4, full_6 + + # pd_op.full: (1xf32) <- () + full_7 = paddle._C_ops.full( + [1], float("4"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (4x-1xi32) <- (4x-1xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_7, paddle.int32, paddle.framework._current_expected_place() + ) + del full_7 + + # pd_op.where: (4x-1xi32) <- (4x-1xb, 4x-1xi32, 4x-1xi32) + where_0 = paddle._C_ops.where(greater_than_0, reshape_1, full_like_0) + del full_like_0, greater_than_0, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [-1, 4] + + # pd_op.reshape: (-1x4xf32) <- (4x-1x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_5, full_int_array_1) + del full_int_array_1 + + # pd_op.gather: (-1x4xf32) <- (-1x4xf32, -1xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_4) + del flatten_1, full_4, reshape_2 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_1 = [full_5, data_1, full_5] + del data_1, full_5 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_1 = paddle._C_ops.stack(combine_1, 0) + del combine_1 + + # pd_op.reshape: (4x-1x4xf32) <- (-1x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, stack_1) + del gather_1, stack_1 + + # pd_op.full: (1xi32) <- () + full_8 = paddle._C_ops.full( + [1], float("5"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (4x-1x5xf32) <- (4x-1xi32, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + where_0 % paddle.cast(full_8, where_0.dtype), full_8 + ) + del full_8 + + # pd_op.full: (4xi64) <- () + full_9 = paddle._C_ops.full( + [4], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (4xi64) <- (4xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_9, + [4], + paddle.int64, + [float("0"), float("1"), float("2"), float("3")], + paddle.framework._current_expected_place(), + ) + del full_9 + + # pd_op.index_select: (4x-1x4xf32) <- (4x-1x5xf32, 4xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_0, assign_value__0, -1) + del assign_value__0, one_hot_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [2] + + # pd_op.unsqueeze: (4x-1x1x4xf32) <- (4x-1x4xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(data_5, full_int_array_2) + del data_5 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [1] + + # pd_op.unsqueeze: (4x1x-1x4xf32) <- (4x-1x4xf32, 1xi64) + unsqueeze_2 = paddle._C_ops.unsqueeze(data_6, full_int_array_3) + del data_6, full_int_array_3 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [0] + + # pd_op.slice: (4x-1x1x2xf32) <- (4x-1x1x4xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + unsqueeze_1, [3], full_int_array_4, full_int_array_2, [1], [] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_5 = [2147483647] + + # pd_op.slice: (4x-1x1x2xf32) <- (4x-1x1x4xf32, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + unsqueeze_1, [3], full_int_array_2, full_int_array_5, [1], [] + ) + del unsqueeze_1 + + # pd_op.slice: (4x1x-1x2xf32) <- (4x1x-1x4xf32, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + unsqueeze_2, [3], full_int_array_4, full_int_array_2, [1], [] + ) + del full_int_array_4 + + # pd_op.slice: (4x1x-1x2xf32) <- (4x1x-1x4xf32, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + unsqueeze_2, [3], full_int_array_2, full_int_array_5, [1], [] + ) + del full_int_array_2, full_int_array_5, unsqueeze_2 + + # pd_op.maximum: (4x-1x-1x2xf32) <- (4x-1x1x2xf32, 4x1x-1x2xf32) + maximum_0 = paddle._C_ops.maximum(slice_0, slice_2) + + # pd_op.minimum: (4x-1x-1x2xf32) <- (4x-1x1x2xf32, 4x1x-1x2xf32) + minimum_0 = paddle._C_ops.minimum(slice_1, slice_3) + + # pd_op.subtract: (4x-1x-1x2xf32) <- (4x-1x-1x2xf32, 4x-1x-1x2xf32) + subtract_0 = paddle._C_ops.subtract(minimum_0, maximum_0) + del maximum_0, minimum_0 + + # pd_op.full: (1xf32) <- () + full_10 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf32) <- () + full_11 = paddle._C_ops.full( + [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.clip: (4x-1x-1x2xf32) <- (4x-1x-1x2xf32, 1xf32, 1xf32) + clip_0 = paddle._C_ops.clip(subtract_0, full_10, full_11) + del subtract_0 + + # pd_op.prod: (4x-1x-1xf32) <- (4x-1x-1x2xf32, 1xi64) + prod_0 = paddle._C_ops.prod(clip_0, full_int_array_0, False, False) + del clip_0 + + # pd_op.subtract: (4x-1x1x2xf32) <- (4x-1x1x2xf32, 4x-1x1x2xf32) + subtract_1 = paddle._C_ops.subtract(slice_1, slice_0) + del slice_0, slice_1 + + # pd_op.clip: (4x-1x1x2xf32) <- (4x-1x1x2xf32, 1xf32, 1xf32) + clip_1 = paddle._C_ops.clip(subtract_1, full_10, full_11) + del subtract_1 + + # pd_op.prod: (4x-1x1xf32) <- (4x-1x1x2xf32, 1xi64) + prod_1 = paddle._C_ops.prod(clip_1, full_int_array_0, False, False) + del clip_1 + + # pd_op.subtract: (4x1x-1x2xf32) <- (4x1x-1x2xf32, 4x1x-1x2xf32) + subtract_2 = paddle._C_ops.subtract(slice_3, slice_2) + del slice_2, slice_3 + + # pd_op.clip: (4x1x-1x2xf32) <- (4x1x-1x2xf32, 1xf32, 1xf32) + clip_2 = paddle._C_ops.clip(subtract_2, full_10, full_11) + del full_10, full_11, subtract_2 + + # pd_op.prod: (4x1x-1xf32) <- (4x1x-1x2xf32, 1xi64) + prod_2 = paddle._C_ops.prod(clip_2, full_int_array_0, False, False) + del clip_2 + + # pd_op.add: (4x-1x-1xf32) <- (4x-1x1xf32, 4x1x-1xf32) + add_1 = paddle._C_ops.add(prod_1, prod_2) + del prod_1, prod_2 + + # pd_op.subtract: (4x-1x-1xf32) <- (4x-1x-1xf32, 4x-1x-1xf32) + subtract_3 = paddle._C_ops.subtract(add_1, prod_0) + del add_1 + + # pd_op.full: (1xf32) <- () + full_12 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (4x-1x-1xf32) <- (4x-1x-1xf32, 1xf32) + scale_0 = paddle._C_ops.scale(subtract_3, full_12, float("1e-09"), True) + del full_12, subtract_3 + + # pd_op.divide: (4x-1x-1xf32) <- (4x-1x-1xf32, 4x-1x-1xf32) + divide_0 = paddle._C_ops.divide(prod_0, scale_0) + del prod_0, scale_0 + + # pd_op.multiply: (4x-1x-1xf32) <- (4x-1x-1xf32, 4x-1x-1xf32) + multiply_2 = paddle._C_ops.multiply(divide_0, data_2) + del data_2, divide_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_6 = [-2] + + # pd_op.max: (4x-1xf32) <- (4x-1x-1xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_2, full_int_array_6, False) + del full_int_array_6, multiply_2 + + # pd_op.unsqueeze: (4x-1x1xf32) <- (4x-1xf32, 1xi64) + unsqueeze_3 = paddle._C_ops.unsqueeze(max_0, full_int_array_0) + del full_int_array_0, max_0 + + # pd_op.multiply: (4x-1x4xf32) <- (4x-1x4xf32, 4x-1x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_3) + del index_select_0, unsqueeze_3, where_0 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PicoDet-M/subgraph_16/weight_meta.py b/paddle_samples/PaddleX/PicoDet-M/subgraph_16/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-M/subgraph_16/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PicoDet-M/subgraph_18/graph_hash.txt b/paddle_samples/PaddleX/PicoDet-M/subgraph_18/graph_hash.txt new file mode 100644 index 000000000..448b535d6 --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-M/subgraph_18/graph_hash.txt @@ -0,0 +1 @@ +7836695756b3239a3069dc67218ea1619ca2b36a2e32fbda393e66313950cd5c \ No newline at end of file diff --git a/paddle_samples/PaddleX/PicoDet-M/subgraph_18/graph_net.json b/paddle_samples/PaddleX/PicoDet-M/subgraph_18/graph_net.json new file mode 100644 index 000000000..49bcf79aa --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-M/subgraph_18/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PicoDet-M", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PicoDet-M/subgraph_18/input_meta.py b/paddle_samples/PaddleX/PicoDet-M/subgraph_18/input_meta.py new file mode 100644 index 000000000..a1c273b69 --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-M/subgraph_18/input_meta.py @@ -0,0 +1,67 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [2577] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [4, 1, 2577] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00310438") + std = float("0.0556305") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [4, 1, 1] + dtype = "int32" + data = [0, 0, 0, 3] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [4, 2577] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00310438") + std = float("0.0556305") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [4, 1, 4] + dtype = "float32" + data = [ + 205.333, + 129.36, + 241.707, + 155.76, + 234.667, + 7.77901, + 273.778, + 60.2873, + 231.147, + 56.32, + 274.56, + 88.0, + 71.68, + 3.41748, + 185.6, + 317.825, + ] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [4, 2577, 4] + dtype = "float32" + min_val = float("-137.475") + max_val = float("691.133") + mean = float("176.065") + std = float("108.672") + data = None diff --git a/paddle_samples/PaddleX/PicoDet-M/subgraph_18/model.py b/paddle_samples/PaddleX/PicoDet-M/subgraph_18/model.py new file mode 100644 index 000000000..6a4423cdb --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-M/subgraph_18/model.py @@ -0,0 +1,301 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5): + # pd_op.full: (1xi64) <- () + full_0 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (4x-1xi64) <- (4x1x-1xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_1, full_0, False, False, paddle.int64) + del full_0 + + # pd_op.full: (1xf64) <- () + full_1 = paddle._C_ops.full( + [1], float("0"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_2 = paddle._C_ops.full( + [1], float("4"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (4xi32) <- (1xf64, 1xf64, 1xf64) + arange_0 = paddle.arange(full_1, full_2, full_3, dtype="int32") + del full_1, full_2, full_3 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [-1] + + # pd_op.unsqueeze: (4x1xi32) <- (4xi32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(arange_0, full_int_array_0) + del arange_0 + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (4x1xi32) <- (4x1xi32, 1xf32) + scale_0 = paddle._C_ops.scale(unsqueeze_0, full_4, float("0"), True) + del unsqueeze_0 + + # pd_op.cast: (4x1xi64) <- (4x1xi32) + cast_0 = paddle._C_ops.cast(scale_0, paddle.int64) + del scale_0 + + # pd_op.add: (4x-1xi64) <- (4x-1xi64, 4x1xi64) + add_0 = paddle._C_ops.add(argmax_0, cast_0) + del argmax_0, cast_0 + + # pd_op.flatten: (4xi32) <- (4x1x1xi32) + flatten_0 = paddle._C_ops.flatten(data_2, 0, 2) + del data_2 + + # pd_op.flatten: (-1xi64) <- (4x-1xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_5 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (-1xi32) <- (4xi32, -1xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_5) + del flatten_0 + + # pd_op.full: (xi64) <- () + full_6 = paddle._C_ops.full( + [], float("4"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_0 = [full_6, data_0] + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.reshape: (4x-1xi32) <- (-1xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, stack_0) + del gather_0, stack_0 + + # pd_op.full: (xf32) <- () + full_7 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (4x-1xb) <- (4x-1xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(data_3, full_7) + del data_3, full_7 + + # pd_op.full: (1xf32) <- () + full_8 = paddle._C_ops.full( + [1], float("4"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (4x-1xi32) <- (4x-1xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_8, paddle.int32, paddle.framework._current_expected_place() + ) + del full_8 + + # pd_op.where: (4x-1xi32) <- (4x-1xb, 4x-1xi32, 4x-1xi32) + where_0 = paddle._C_ops.where(greater_than_0, reshape_1, full_like_0) + del full_like_0, greater_than_0, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [-1, 4] + + # pd_op.reshape: (4x4xf32) <- (4x1x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_4, full_int_array_1) + del full_int_array_1 + + # pd_op.gather: (-1x4xf32) <- (4x4xf32, -1xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_5) + del flatten_1, full_5, reshape_2 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_1 = [full_6, data_0, full_6] + del data_0, full_6 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_1 = paddle._C_ops.stack(combine_1, 0) + del combine_1 + + # pd_op.reshape: (4x-1x4xf32) <- (-1x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, stack_1) + del gather_1, stack_1 + + # pd_op.full: (1xi32) <- () + full_9 = paddle._C_ops.full( + [1], float("5"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (4x-1x5xf32) <- (4x-1xi32, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + where_0 % paddle.cast(full_9, where_0.dtype), full_9 + ) + del full_9 + + # pd_op.full: (4xi64) <- () + full_10 = paddle._C_ops.full( + [4], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (4xi64) <- (4xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_10, + [4], + paddle.int64, + [float("0"), float("1"), float("2"), float("3")], + paddle.framework._current_expected_place(), + ) + del full_10 + + # pd_op.index_select: (4x-1x4xf32) <- (4x-1x5xf32, 4xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_0, assign_value__0, -1) + del assign_value__0, one_hot_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [2] + + # pd_op.unsqueeze: (4x1x1x4xf32) <- (4x1x4xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(data_4, full_int_array_2) + del data_4 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [1] + + # pd_op.unsqueeze: (4x1x-1x4xf32) <- (4x-1x4xf32, 1xi64) + unsqueeze_2 = paddle._C_ops.unsqueeze(data_5, full_int_array_3) + del data_5, full_int_array_3 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [0] + + # pd_op.slice: (4x1x1x2xf32) <- (4x1x1x4xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + unsqueeze_1, [3], full_int_array_4, full_int_array_2, [1], [] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_5 = [2147483647] + + # pd_op.slice: (4x1x1x2xf32) <- (4x1x1x4xf32, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + unsqueeze_1, [3], full_int_array_2, full_int_array_5, [1], [] + ) + del unsqueeze_1 + + # pd_op.slice: (4x1x-1x2xf32) <- (4x1x-1x4xf32, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + unsqueeze_2, [3], full_int_array_4, full_int_array_2, [1], [] + ) + del full_int_array_4 + + # pd_op.slice: (4x1x-1x2xf32) <- (4x1x-1x4xf32, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + unsqueeze_2, [3], full_int_array_2, full_int_array_5, [1], [] + ) + del full_int_array_2, full_int_array_5, unsqueeze_2 + + # pd_op.maximum: (4x1x-1x2xf32) <- (4x1x1x2xf32, 4x1x-1x2xf32) + maximum_0 = paddle._C_ops.maximum(slice_0, slice_2) + + # pd_op.minimum: (4x1x-1x2xf32) <- (4x1x1x2xf32, 4x1x-1x2xf32) + minimum_0 = paddle._C_ops.minimum(slice_1, slice_3) + + # pd_op.subtract: (4x1x-1x2xf32) <- (4x1x-1x2xf32, 4x1x-1x2xf32) + subtract_0 = paddle._C_ops.subtract(minimum_0, maximum_0) + del maximum_0, minimum_0 + + # pd_op.full: (1xf32) <- () + full_11 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf32) <- () + full_12 = paddle._C_ops.full( + [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.clip: (4x1x-1x2xf32) <- (4x1x-1x2xf32, 1xf32, 1xf32) + clip_0 = paddle._C_ops.clip(subtract_0, full_11, full_12) + del subtract_0 + + # pd_op.prod: (4x1x-1xf32) <- (4x1x-1x2xf32, 1xi64) + prod_0 = paddle._C_ops.prod(clip_0, full_int_array_0, False, False) + del clip_0 + + # pd_op.subtract: (4x1x1x2xf32) <- (4x1x1x2xf32, 4x1x1x2xf32) + subtract_1 = paddle._C_ops.subtract(slice_1, slice_0) + del slice_0, slice_1 + + # pd_op.clip: (4x1x1x2xf32) <- (4x1x1x2xf32, 1xf32, 1xf32) + clip_1 = paddle._C_ops.clip(subtract_1, full_11, full_12) + del subtract_1 + + # pd_op.prod: (4x1x1xf32) <- (4x1x1x2xf32, 1xi64) + prod_1 = paddle._C_ops.prod(clip_1, full_int_array_0, False, False) + del clip_1 + + # pd_op.subtract: (4x1x-1x2xf32) <- (4x1x-1x2xf32, 4x1x-1x2xf32) + subtract_2 = paddle._C_ops.subtract(slice_3, slice_2) + del slice_2, slice_3 + + # pd_op.clip: (4x1x-1x2xf32) <- (4x1x-1x2xf32, 1xf32, 1xf32) + clip_2 = paddle._C_ops.clip(subtract_2, full_11, full_12) + del full_11, full_12, subtract_2 + + # pd_op.prod: (4x1x-1xf32) <- (4x1x-1x2xf32, 1xi64) + prod_2 = paddle._C_ops.prod(clip_2, full_int_array_0, False, False) + del clip_2 + + # pd_op.add: (4x1x-1xf32) <- (4x1x1xf32, 4x1x-1xf32) + add_1 = paddle._C_ops.add(prod_1, prod_2) + del prod_1, prod_2 + + # pd_op.subtract: (4x1x-1xf32) <- (4x1x-1xf32, 4x1x-1xf32) + subtract_3 = paddle._C_ops.subtract(add_1, prod_0) + del add_1 + + # pd_op.scale: (4x1x-1xf32) <- (4x1x-1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(subtract_3, full_4, float("1e-09"), True) + del full_4, subtract_3 + + # pd_op.divide: (4x1x-1xf32) <- (4x1x-1xf32, 4x1x-1xf32) + divide_0 = paddle._C_ops.divide(prod_0, scale_1) + del prod_0, scale_1 + + # pd_op.multiply: (4x1x-1xf32) <- (4x1x-1xf32, 4x1x-1xf32) + multiply_1 = paddle._C_ops.multiply(divide_0, data_1) + del data_1, divide_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_6 = [-2] + + # pd_op.max: (4x-1xf32) <- (4x1x-1xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_1, full_int_array_6, False) + del full_int_array_6, multiply_1 + + # pd_op.unsqueeze: (4x-1x1xf32) <- (4x-1xf32, 1xi64) + unsqueeze_3 = paddle._C_ops.unsqueeze(max_0, full_int_array_0) + del full_int_array_0, max_0 + + # pd_op.multiply: (4x-1x4xf32) <- (4x-1x4xf32, 4x-1x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_3) + del index_select_0, unsqueeze_3, where_0 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PicoDet-M/subgraph_18/weight_meta.py b/paddle_samples/PaddleX/PicoDet-M/subgraph_18/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-M/subgraph_18/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PicoDet-M/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/PicoDet-M/subgraph_2/graph_hash.txt new file mode 100644 index 000000000..5d483b5c1 --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-M/subgraph_2/graph_hash.txt @@ -0,0 +1 @@ +b1a5abe03b221430f1b6985501081f92cb6042bc91ee480ee7254388d72af580 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PicoDet-M/subgraph_2/graph_net.json b/paddle_samples/PaddleX/PicoDet-M/subgraph_2/graph_net.json new file mode 100644 index 000000000..49bcf79aa --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-M/subgraph_2/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PicoDet-M", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PicoDet-M/subgraph_2/input_meta.py b/paddle_samples/PaddleX/PicoDet-M/subgraph_2/input_meta.py new file mode 100644 index 000000000..e476645bc --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-M/subgraph_2/input_meta.py @@ -0,0 +1,108 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [4, 3598] + dtype = "float32" + max_val = float("2.0") + mean = float("0.00458588") + std = float("0.0695901") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [4, 3, 3598] + dtype = "float32" + max_val = float("0.614456") + mean = float("0.00261096") + std = float("0.0233938") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [4, 3, 3598] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00152863") + std = float("0.0390678") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [4, 1] + dtype = "int32" + data = [0, 1, 2, 3] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [4, 3, 1] + dtype = "int32" + data = [0, 0, 0, 0, 1, 2, 0, 0, 0, 3, 0, 0] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [4, 3, 4] + dtype = "float32" + data = [ + 205.227, + 204.88, + 221.867, + 217.36, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 216.32, + 196.56, + 248.213, + 220.48, + 368.853, + 243.36, + 404.907, + 271.44, + 348.053, + 246.48, + 373.013, + 295.36, + 117.921, + 183.424, + 232.567, + 266.144, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 113.707, + 307.84, + 134.507, + 326.56, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + ] + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [4, 3, 3598] + dtype = "float32" + max_val = float("0.00377802") + mean = float("5.22152e-07") + std = float("3.12199e-05") + data = None diff --git a/paddle_samples/PaddleX/PicoDet-M/subgraph_2/model.py b/paddle_samples/PaddleX/PicoDet-M/subgraph_2/model.py new file mode 100644 index 000000000..7bca48ac9 --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-M/subgraph_2/model.py @@ -0,0 +1,229 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [1] + + # pd_op.unsqueeze: (4x1x3598xf32) <- (4x3598xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(data_0, full_int_array_0) + del data_0, full_int_array_0 + + # pd_op.full: (xf32) <- () + full_0 = paddle._C_ops.full( + [], float("1"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (4x1x3598xb) <- (4x1x3598xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(unsqueeze_0, full_0) + del full_0, unsqueeze_0 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_1 = [1, 3, 1] + + # pd_op.tile: (4x3x3598xb) <- (4x1x3598xb, 3xi64) + tile_0 = paddle._C_ops.tile(greater_than_0, full_int_array_1) + del full_int_array_1, greater_than_0 + + # pd_op.full: (1xi64) <- () + full_1 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (4x3598xi64) <- (4x3x3598xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_1, full_1, False, False, paddle.int64) + + # pd_op.full: (1xi32) <- () + full_2 = paddle._C_ops.full( + [1], float("3"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (4x3598x3xf32) <- (4x3598xi64, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + argmax_0 % paddle.cast(full_2, argmax_0.dtype), full_2 + ) + del argmax_0, full_2 + + # pd_op.transpose: (4x3x3598xf32) <- (4x3598x3xf32) + transpose_0 = paddle._C_ops.transpose(one_hot_0, [0, 2, 1]) + del one_hot_0 + + # pd_op.where: (4x3x3598xf32) <- (4x3x3598xb, 4x3x3598xf32, 4x3x3598xf32) + where_0 = paddle._C_ops.where(tile_0, transpose_0, data_2) + del data_2, tile_0, transpose_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [-2] + + # pd_op.sum: (4x3598xf32) <- (4x3x3598xf32, 1xi64) + sum_0 = paddle._C_ops.sum(where_0, full_int_array_2, None, False) + + # pd_op.argmax: (4x3598xi64) <- (4x3x3598xf32, 1xi64) + argmax_1 = paddle._C_ops.argmax(where_0, full_1, False, False, paddle.int64) + del full_1 + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("3"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (4x1xi32) <- (4x1xi32, 1xf32) + scale_0 = paddle._C_ops.scale(data_3, full_3, float("0"), True) + del data_3, full_3 + + # pd_op.cast: (4x1xi64) <- (4x1xi32) + cast_0 = paddle._C_ops.cast(scale_0, paddle.int64) + del scale_0 + + # pd_op.add: (4x3598xi64) <- (4x3598xi64, 4x1xi64) + add_0 = paddle._C_ops.add(argmax_1, cast_0) + del argmax_1, cast_0 + + # pd_op.flatten: (12xi32) <- (4x3x1xi32) + flatten_0 = paddle._C_ops.flatten(data_4, 0, 2) + del data_4 + + # pd_op.flatten: (14392xi64) <- (4x3598xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_4 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (14392xi32) <- (12xi32, 14392xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_4) + del flatten_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_3 = [4, 3598] + + # pd_op.reshape: (4x3598xi32) <- (14392xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, full_int_array_3) + del full_int_array_3, gather_0 + + # pd_op.full: (xf32) <- () + full_5 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (4x3598xb) <- (4x3598xf32, xf32) + greater_than_1 = paddle._C_ops.greater_than(sum_0, full_5) + del full_5, sum_0 + + # pd_op.full: (1xf32) <- () + full_6 = paddle._C_ops.full( + [1], float("4"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (4x3598xi32) <- (4x3598xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_6, paddle.int32, paddle.framework._current_expected_place() + ) + del full_6 + + # pd_op.where: (4x3598xi32) <- (4x3598xb, 4x3598xi32, 4x3598xi32) + where_1 = paddle._C_ops.where(greater_than_1, reshape_1, full_like_0) + del full_like_0, greater_than_1, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_4 = [-1, 4] + + # pd_op.reshape: (12x4xf32) <- (4x3x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_5, full_int_array_4) + del data_5, full_int_array_4 + + # pd_op.gather: (14392x4xf32) <- (12x4xf32, 14392xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_4) + del flatten_1, full_4, reshape_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_5 = [4, 3598, 4] + + # pd_op.reshape: (4x3598x4xf32) <- (14392x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, full_int_array_5) + del full_int_array_5, gather_1 + + # pd_op.full: (1xi32) <- () + full_7 = paddle._C_ops.full( + [1], float("5"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (4x3598x5xf32) <- (4x3598xi32, 1xi32) + one_hot_1 = paddle._C_ops.one_hot( + where_1 % paddle.cast(full_7, where_1.dtype), full_7 + ) + del full_7 + + # pd_op.full: (4xi64) <- () + full_8 = paddle._C_ops.full( + [4], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (4xi64) <- (4xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_8, + [4], + paddle.int64, + [float("0"), float("1"), float("2"), float("3")], + paddle.framework._current_expected_place(), + ) + del full_8 + + # pd_op.index_select: (4x3598x4xf32) <- (4x3598x5xf32, 4xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_1, assign_value__0, -1) + del assign_value__0, one_hot_1 + + # pd_op.multiply: (4x3x3598xf32) <- (4x3x3598xf32, 4x3x3598xf32) + multiply_1 = paddle._C_ops.multiply(data_6, where_0) + del data_6 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_6 = [-1] + + # pd_op.max: (4x3x1xf32) <- (4x3x3598xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_1, full_int_array_6, True) + + # pd_op.multiply: (4x3x3598xf32) <- (4x3x3598xf32, 4x3x3598xf32) + multiply_2 = paddle._C_ops.multiply(data_1, where_0) + del data_1, where_0 + + # pd_op.max: (4x3x1xf32) <- (4x3x3598xf32, 1xi64) + max_1 = paddle._C_ops.max(multiply_2, full_int_array_6, True) + del multiply_2 + + # pd_op.full: (1xf32) <- () + full_9 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (4x3x1xf32) <- (4x3x1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(max_0, full_9, float("1e-09"), True) + del full_9, max_0 + + # pd_op.divide: (4x3x3598xf32) <- (4x3x3598xf32, 4x3x1xf32) + divide_0 = paddle._C_ops.divide(multiply_1, scale_1) + del multiply_1, scale_1 + + # pd_op.multiply: (4x3x3598xf32) <- (4x3x3598xf32, 4x3x1xf32) + multiply_3 = paddle._C_ops.multiply(divide_0, max_1) + del divide_0, max_1 + + # pd_op.max: (4x3598xf32) <- (4x3x3598xf32, 1xi64) + max_2 = paddle._C_ops.max(multiply_3, full_int_array_2, False) + del full_int_array_2, multiply_3 + + # pd_op.unsqueeze: (4x3598x1xf32) <- (4x3598xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(max_2, full_int_array_6) + del full_int_array_6, max_2 + + # pd_op.multiply: (4x3598x4xf32) <- (4x3598x4xf32, 4x3598x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_1) + del index_select_0, unsqueeze_1, where_1 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PicoDet-M/subgraph_2/weight_meta.py b/paddle_samples/PaddleX/PicoDet-M/subgraph_2/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-M/subgraph_2/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PicoDet-M/subgraph_9/graph_hash.txt b/paddle_samples/PaddleX/PicoDet-M/subgraph_9/graph_hash.txt new file mode 100644 index 000000000..8e901dc0d --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-M/subgraph_9/graph_hash.txt @@ -0,0 +1 @@ +0e23809bb012ad66c6885fee22da9486ad043d7043ddbd1653712c1a45d78ec2 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PicoDet-M/subgraph_9/graph_net.json b/paddle_samples/PaddleX/PicoDet-M/subgraph_9/graph_net.json new file mode 100644 index 000000000..49bcf79aa --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-M/subgraph_9/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PicoDet-M", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PicoDet-M/subgraph_9/input_meta.py b/paddle_samples/PaddleX/PicoDet-M/subgraph_9/input_meta.py new file mode 100644 index 000000000..c352bf627 --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-M/subgraph_9/input_meta.py @@ -0,0 +1,106 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [2] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [] + dtype = "int64" + data = [3060] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [4, 2, 3060] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00265523") + std = float("0.0514605") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [4, 1] + dtype = "int32" + data = [0, 1, 2, 3] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [4, 2, 1] + dtype = "int32" + data = [0, 0, 3, 0, 1, 0, 0, 0] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [4, 3060] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00531046") + std = float("0.0726791") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [4, 2, 4] + dtype = "float32" + data = [ + 258.098, + 153.098, + 384.0, + 296.157, + 0.0, + 0.0, + 0.0, + 0.0, + 10.1386, + 0.0, + 311.762, + 384.0, + 0.0, + 0.0, + 0.0, + 0.0, + 170.375, + 156.8, + 207.072, + 197.333, + 197.898, + 76.8, + 260.805, + 130.133, + 121.6, + 141.12, + 216.32, + 212.16, + 0.0, + 0.0, + 0.0, + 0.0, + ] + + +class Program_weight_tensor_data_7: + name = "data_7" + shape = [4, 2, 3060] + dtype = "float32" + max_val = float("0.150296") + mean = float("4.95832e-05") + std = float("0.00157955") + data = None + + +class Program_weight_tensor_data_8: + name = "data_8" + shape = [4, 2, 3060] + dtype = "float32" + max_val = float("0.890974") + mean = float("0.0138066") + std = float("0.0622531") + data = None diff --git a/paddle_samples/PaddleX/PicoDet-M/subgraph_9/model.py b/paddle_samples/PaddleX/PicoDet-M/subgraph_9/model.py new file mode 100644 index 000000000..3c1c24ef7 --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-M/subgraph_9/model.py @@ -0,0 +1,196 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, data_0, data_1, data_2, data_3, data_4, data_5, data_6, data_7, data_8 + ): + # pd_op.full: (1xi64) <- () + full_0 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (4x-1xi64) <- (4x-1x-1xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_2, full_0, False, False, paddle.int64) + del full_0 + + # pd_op.cast: (xi32) <- (xi64) + cast_0 = paddle._C_ops.cast(data_0, paddle.int32) + del data_0 + + # pd_op.multiply: (4x1xi32) <- (4x1xi32, xi32) + multiply_1 = paddle._C_ops.multiply(data_3, cast_0) + del cast_0, data_3 + + # pd_op.cast: (4x1xi64) <- (4x1xi32) + cast_1 = paddle._C_ops.cast(multiply_1, paddle.int64) + del multiply_1 + + # pd_op.add: (4x-1xi64) <- (4x-1xi64, 4x1xi64) + add_0 = paddle._C_ops.add(argmax_0, cast_1) + del argmax_0, cast_1 + + # pd_op.flatten: (-1xi32) <- (4x-1x1xi32) + flatten_0 = paddle._C_ops.flatten(data_4, 0, 2) + del data_4 + + # pd_op.flatten: (-1xi64) <- (4x-1xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_1 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (-1xi32) <- (-1xi32, -1xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_1) + del flatten_0 + + # pd_op.full: (xi64) <- () + full_2 = paddle._C_ops.full( + [], float("4"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_0 = [full_2, data_1] + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.reshape: (4x-1xi32) <- (-1xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, stack_0) + del gather_0, stack_0 + + # pd_op.full: (xf32) <- () + full_3 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (4x-1xb) <- (4x-1xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(data_5, full_3) + del data_5, full_3 + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("4"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (4x-1xi32) <- (4x-1xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_4, paddle.int32, paddle.framework._current_expected_place() + ) + del full_4 + + # pd_op.where: (4x-1xi32) <- (4x-1xb, 4x-1xi32, 4x-1xi32) + where_0 = paddle._C_ops.where(greater_than_0, reshape_1, full_like_0) + del full_like_0, greater_than_0, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [-1, 4] + + # pd_op.reshape: (-1x4xf32) <- (4x-1x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_6, full_int_array_0) + del data_6, full_int_array_0 + + # pd_op.gather: (-1x4xf32) <- (-1x4xf32, -1xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_1) + del flatten_1, full_1, reshape_2 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_1 = [full_2, data_1, full_2] + del data_1, full_2 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_1 = paddle._C_ops.stack(combine_1, 0) + del combine_1 + + # pd_op.reshape: (4x-1x4xf32) <- (-1x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, stack_1) + del gather_1, stack_1 + + # pd_op.full: (1xi32) <- () + full_5 = paddle._C_ops.full( + [1], float("5"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (4x-1x5xf32) <- (4x-1xi32, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + where_0 % paddle.cast(full_5, where_0.dtype), full_5 + ) + del full_5 + + # pd_op.full: (4xi64) <- () + full_6 = paddle._C_ops.full( + [4], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (4xi64) <- (4xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_6, + [4], + paddle.int64, + [float("0"), float("1"), float("2"), float("3")], + paddle.framework._current_expected_place(), + ) + del full_6 + + # pd_op.index_select: (4x-1x4xf32) <- (4x-1x5xf32, 4xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_0, assign_value__0, -1) + del assign_value__0, one_hot_0 + + # pd_op.multiply: (4x-1x-1xf32) <- (4x-1x-1xf32, 4x-1x-1xf32) + multiply_2 = paddle._C_ops.multiply(data_7, data_2) + del data_7 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [-1] + + # pd_op.max: (4x-1x1xf32) <- (4x-1x-1xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_2, full_int_array_1, True) + + # pd_op.multiply: (4x-1x-1xf32) <- (4x-1x-1xf32, 4x-1x-1xf32) + multiply_3 = paddle._C_ops.multiply(data_8, data_2) + del data_2, data_8 + + # pd_op.max: (4x-1x1xf32) <- (4x-1x-1xf32, 1xi64) + max_1 = paddle._C_ops.max(multiply_3, full_int_array_1, True) + del multiply_3 + + # pd_op.full: (1xf32) <- () + full_7 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (4x-1x1xf32) <- (4x-1x1xf32, 1xf32) + scale_0 = paddle._C_ops.scale(max_0, full_7, float("1e-09"), True) + del full_7, max_0 + + # pd_op.divide: (4x-1x-1xf32) <- (4x-1x-1xf32, 4x-1x1xf32) + divide_0 = paddle._C_ops.divide(multiply_2, scale_0) + del multiply_2, scale_0 + + # pd_op.multiply: (4x-1x-1xf32) <- (4x-1x-1xf32, 4x-1x1xf32) + multiply_4 = paddle._C_ops.multiply(divide_0, max_1) + del divide_0, max_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [-2] + + # pd_op.max: (4x-1xf32) <- (4x-1x-1xf32, 1xi64) + max_2 = paddle._C_ops.max(multiply_4, full_int_array_2, False) + del full_int_array_2, multiply_4 + + # pd_op.unsqueeze: (4x-1x1xf32) <- (4x-1xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(max_2, full_int_array_1) + del full_int_array_1, max_2 + + # pd_op.multiply: (4x-1x4xf32) <- (4x-1x4xf32, 4x-1x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_0) + del index_select_0, unsqueeze_0, where_0 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PicoDet-M/subgraph_9/weight_meta.py b/paddle_samples/PaddleX/PicoDet-M/subgraph_9/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-M/subgraph_9/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PicoDet-S/subgraph_11/graph_hash.txt b/paddle_samples/PaddleX/PicoDet-S/subgraph_11/graph_hash.txt new file mode 100644 index 000000000..f88daec9a --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-S/subgraph_11/graph_hash.txt @@ -0,0 +1 @@ +ca17bbdf32a8bfa2355df425f39c7b6d2066e2070bbc093007f834ec3d6c3e5d \ No newline at end of file diff --git a/paddle_samples/PaddleX/PicoDet-S/subgraph_11/graph_net.json b/paddle_samples/PaddleX/PicoDet-S/subgraph_11/graph_net.json new file mode 100644 index 000000000..39267b2fc --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-S/subgraph_11/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PicoDet-S", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PicoDet-S/subgraph_11/input_meta.py b/paddle_samples/PaddleX/PicoDet-S/subgraph_11/input_meta.py new file mode 100644 index 000000000..d6e2d36fd --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-S/subgraph_11/input_meta.py @@ -0,0 +1,93 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [2] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [] + dtype = "int64" + data = [1726] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [16, 2, 1726] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00219076") + std = float("0.0467542") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [16, 2, 1] + dtype = "int32" + data = [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2, + 0, + 0, + 0, + 3, + 0, + 1, + 0, + 0, + 0, + 0, + 0, + 1, + 2, + ] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [16, 1726] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00438152") + std = float("0.0660479") + data = None + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [16, 2, 4] + dtype = "float32" + max_val = float("288.0") + mean = float("74.7247") + std = float("89.2829") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [16, 1726, 4] + dtype = "float32" + min_val = float("-162.356") + max_val = float("543.8") + mean = float("144.504") + std = float("90.7991") + data = None diff --git a/paddle_samples/PaddleX/PicoDet-S/subgraph_11/model.py b/paddle_samples/PaddleX/PicoDet-S/subgraph_11/model.py new file mode 100644 index 000000000..43801b2b2 --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-S/subgraph_11/model.py @@ -0,0 +1,310 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): + # pd_op.full: (1xi64) <- () + full_0 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (16x-1xi64) <- (16x-1x-1xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_2, full_0, False, False, paddle.int64) + del full_0 + + # pd_op.full: (1xf64) <- () + full_1 = paddle._C_ops.full( + [1], float("0"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_2 = paddle._C_ops.full( + [1], float("16"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (16xi32) <- (1xf64, 1xf64, 1xf64) + arange_0 = paddle.arange(full_1, full_2, full_3, dtype="int32") + del full_1, full_2, full_3 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [-1] + + # pd_op.unsqueeze: (16x1xi32) <- (16xi32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(arange_0, full_int_array_0) + del arange_0 + + # pd_op.cast: (xi32) <- (xi64) + cast_0 = paddle._C_ops.cast(data_0, paddle.int32) + del data_0 + + # pd_op.multiply: (16x1xi32) <- (16x1xi32, xi32) + multiply_1 = paddle._C_ops.multiply(unsqueeze_0, cast_0) + del cast_0, unsqueeze_0 + + # pd_op.cast: (16x1xi64) <- (16x1xi32) + cast_1 = paddle._C_ops.cast(multiply_1, paddle.int64) + del multiply_1 + + # pd_op.add: (16x-1xi64) <- (16x-1xi64, 16x1xi64) + add_0 = paddle._C_ops.add(argmax_0, cast_1) + del argmax_0, cast_1 + + # pd_op.flatten: (-1xi32) <- (16x-1x1xi32) + flatten_0 = paddle._C_ops.flatten(data_3, 0, 2) + del data_3 + + # pd_op.flatten: (-1xi64) <- (16x-1xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_4 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (-1xi32) <- (-1xi32, -1xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_4) + del flatten_0 + + # pd_op.full: (xi64) <- () + full_5 = paddle._C_ops.full( + [], float("16"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_0 = [full_5, data_1] + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.reshape: (16x-1xi32) <- (-1xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, stack_0) + del gather_0, stack_0 + + # pd_op.full: (xf32) <- () + full_6 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (16x-1xb) <- (16x-1xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(data_4, full_6) + del data_4, full_6 + + # pd_op.full: (1xf32) <- () + full_7 = paddle._C_ops.full( + [1], float("4"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (16x-1xi32) <- (16x-1xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_7, paddle.int32, paddle.framework._current_expected_place() + ) + del full_7 + + # pd_op.where: (16x-1xi32) <- (16x-1xb, 16x-1xi32, 16x-1xi32) + where_0 = paddle._C_ops.where(greater_than_0, reshape_1, full_like_0) + del full_like_0, greater_than_0, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [-1, 4] + + # pd_op.reshape: (-1x4xf32) <- (16x-1x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_5, full_int_array_1) + del full_int_array_1 + + # pd_op.gather: (-1x4xf32) <- (-1x4xf32, -1xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_4) + del flatten_1, full_4, reshape_2 + + # pd_op.full: (xi64) <- () + full_8 = paddle._C_ops.full( + [], float("4"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_1 = [full_5, data_1, full_8] + del data_1, full_5, full_8 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_1 = paddle._C_ops.stack(combine_1, 0) + del combine_1 + + # pd_op.reshape: (16x-1x4xf32) <- (-1x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, stack_1) + del gather_1, stack_1 + + # pd_op.full: (1xi32) <- () + full_9 = paddle._C_ops.full( + [1], float("5"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (16x-1x5xf32) <- (16x-1xi32, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + where_0 % paddle.cast(full_9, where_0.dtype), full_9 + ) + del full_9 + + # pd_op.full: (4xi64) <- () + full_10 = paddle._C_ops.full( + [4], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (4xi64) <- (4xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_10, + [4], + paddle.int64, + [float("0"), float("1"), float("2"), float("3")], + paddle.framework._current_expected_place(), + ) + del full_10 + + # pd_op.index_select: (16x-1x4xf32) <- (16x-1x5xf32, 4xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_0, assign_value__0, -1) + del assign_value__0, one_hot_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [2] + + # pd_op.unsqueeze: (16x-1x1x4xf32) <- (16x-1x4xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(data_5, full_int_array_2) + del data_5 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [1] + + # pd_op.unsqueeze: (16x1x-1x4xf32) <- (16x-1x4xf32, 1xi64) + unsqueeze_2 = paddle._C_ops.unsqueeze(data_6, full_int_array_3) + del data_6, full_int_array_3 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [0] + + # pd_op.slice: (16x-1x1x2xf32) <- (16x-1x1x4xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + unsqueeze_1, [3], full_int_array_4, full_int_array_2, [1], [] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_5 = [2147483647] + + # pd_op.slice: (16x-1x1x2xf32) <- (16x-1x1x4xf32, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + unsqueeze_1, [3], full_int_array_2, full_int_array_5, [1], [] + ) + del unsqueeze_1 + + # pd_op.slice: (16x1x-1x2xf32) <- (16x1x-1x4xf32, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + unsqueeze_2, [3], full_int_array_4, full_int_array_2, [1], [] + ) + del full_int_array_4 + + # pd_op.slice: (16x1x-1x2xf32) <- (16x1x-1x4xf32, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + unsqueeze_2, [3], full_int_array_2, full_int_array_5, [1], [] + ) + del full_int_array_2, full_int_array_5, unsqueeze_2 + + # pd_op.maximum: (16x-1x-1x2xf32) <- (16x-1x1x2xf32, 16x1x-1x2xf32) + maximum_0 = paddle._C_ops.maximum(slice_0, slice_2) + + # pd_op.minimum: (16x-1x-1x2xf32) <- (16x-1x1x2xf32, 16x1x-1x2xf32) + minimum_0 = paddle._C_ops.minimum(slice_1, slice_3) + + # pd_op.subtract: (16x-1x-1x2xf32) <- (16x-1x-1x2xf32, 16x-1x-1x2xf32) + subtract_0 = paddle._C_ops.subtract(minimum_0, maximum_0) + del maximum_0, minimum_0 + + # pd_op.full: (1xf32) <- () + full_11 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf32) <- () + full_12 = paddle._C_ops.full( + [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.clip: (16x-1x-1x2xf32) <- (16x-1x-1x2xf32, 1xf32, 1xf32) + clip_0 = paddle._C_ops.clip(subtract_0, full_11, full_12) + del subtract_0 + + # pd_op.prod: (16x-1x-1xf32) <- (16x-1x-1x2xf32, 1xi64) + prod_0 = paddle._C_ops.prod(clip_0, full_int_array_0, False, False) + del clip_0 + + # pd_op.subtract: (16x-1x1x2xf32) <- (16x-1x1x2xf32, 16x-1x1x2xf32) + subtract_1 = paddle._C_ops.subtract(slice_1, slice_0) + del slice_0, slice_1 + + # pd_op.clip: (16x-1x1x2xf32) <- (16x-1x1x2xf32, 1xf32, 1xf32) + clip_1 = paddle._C_ops.clip(subtract_1, full_11, full_12) + del subtract_1 + + # pd_op.prod: (16x-1x1xf32) <- (16x-1x1x2xf32, 1xi64) + prod_1 = paddle._C_ops.prod(clip_1, full_int_array_0, False, False) + del clip_1 + + # pd_op.subtract: (16x1x-1x2xf32) <- (16x1x-1x2xf32, 16x1x-1x2xf32) + subtract_2 = paddle._C_ops.subtract(slice_3, slice_2) + del slice_2, slice_3 + + # pd_op.clip: (16x1x-1x2xf32) <- (16x1x-1x2xf32, 1xf32, 1xf32) + clip_2 = paddle._C_ops.clip(subtract_2, full_11, full_12) + del full_11, full_12, subtract_2 + + # pd_op.prod: (16x1x-1xf32) <- (16x1x-1x2xf32, 1xi64) + prod_2 = paddle._C_ops.prod(clip_2, full_int_array_0, False, False) + del clip_2 + + # pd_op.add: (16x-1x-1xf32) <- (16x-1x1xf32, 16x1x-1xf32) + add_1 = paddle._C_ops.add(prod_1, prod_2) + del prod_1, prod_2 + + # pd_op.subtract: (16x-1x-1xf32) <- (16x-1x-1xf32, 16x-1x-1xf32) + subtract_3 = paddle._C_ops.subtract(add_1, prod_0) + del add_1 + + # pd_op.full: (1xf32) <- () + full_13 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (16x-1x-1xf32) <- (16x-1x-1xf32, 1xf32) + scale_0 = paddle._C_ops.scale(subtract_3, full_13, float("1e-09"), True) + del full_13, subtract_3 + + # pd_op.divide: (16x-1x-1xf32) <- (16x-1x-1xf32, 16x-1x-1xf32) + divide_0 = paddle._C_ops.divide(prod_0, scale_0) + del prod_0, scale_0 + + # pd_op.multiply: (16x-1x-1xf32) <- (16x-1x-1xf32, 16x-1x-1xf32) + multiply_2 = paddle._C_ops.multiply(divide_0, data_2) + del data_2, divide_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_6 = [-2] + + # pd_op.max: (16x-1xf32) <- (16x-1x-1xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_2, full_int_array_6, False) + del full_int_array_6, multiply_2 + + # pd_op.unsqueeze: (16x-1x1xf32) <- (16x-1xf32, 1xi64) + unsqueeze_3 = paddle._C_ops.unsqueeze(max_0, full_int_array_0) + del full_int_array_0, max_0 + + # pd_op.multiply: (16x-1x4xf32) <- (16x-1x4xf32, 16x-1x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_3) + del index_select_0, unsqueeze_3, where_0 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PicoDet-S/subgraph_11/weight_meta.py b/paddle_samples/PaddleX/PicoDet-S/subgraph_11/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-S/subgraph_11/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PicoDet-S/subgraph_20/graph_hash.txt b/paddle_samples/PaddleX/PicoDet-S/subgraph_20/graph_hash.txt new file mode 100644 index 000000000..9eca13106 --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-S/subgraph_20/graph_hash.txt @@ -0,0 +1 @@ +8cba0fb42218d1f0ec6fe1dbc4fb2bc4ee7d0eb9e2ad22c2f4aa66e7aba637a5 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PicoDet-S/subgraph_20/graph_net.json b/paddle_samples/PaddleX/PicoDet-S/subgraph_20/graph_net.json new file mode 100644 index 000000000..39267b2fc --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-S/subgraph_20/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PicoDet-S", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PicoDet-S/subgraph_20/input_meta.py b/paddle_samples/PaddleX/PicoDet-S/subgraph_20/input_meta.py new file mode 100644 index 000000000..e5017d095 --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-S/subgraph_20/input_meta.py @@ -0,0 +1,64 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [16, 5, 2577] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00149884") + std = float("0.0386858") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [16, 1] + dtype = "int32" + data = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [16, 5, 1] + dtype = "int32" + min_val = 0 + max_val = 3 + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [16, 2577] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00749418") + std = float("0.0862439") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [16, 5, 4] + dtype = "float32" + max_val = float("352.0") + mean = float("51.4072") + std = float("88.1411") + data = None + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [16, 5, 2577] + dtype = "float32" + max_val = float("0.732494") + mean = float("0.000262283") + std = float("0.00762656") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [16, 5, 2577] + dtype = "float32" + max_val = float("0.988568") + mean = float("0.0108881") + std = float("0.0698942") + data = None diff --git a/paddle_samples/PaddleX/PicoDet-S/subgraph_20/model.py b/paddle_samples/PaddleX/PicoDet-S/subgraph_20/model.py new file mode 100644 index 000000000..030586044 --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-S/subgraph_20/model.py @@ -0,0 +1,181 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): + # pd_op.full: (1xi64) <- () + full_0 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (16x2577xi64) <- (16x5x2577xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_0, full_0, False, False, paddle.int64) + del full_0 + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("5"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (16x1xi32) <- (16x1xi32, 1xf32) + scale_0 = paddle._C_ops.scale(data_1, full_1, float("0"), True) + del data_1, full_1 + + # pd_op.cast: (16x1xi64) <- (16x1xi32) + cast_0 = paddle._C_ops.cast(scale_0, paddle.int64) + del scale_0 + + # pd_op.add: (16x2577xi64) <- (16x2577xi64, 16x1xi64) + add_0 = paddle._C_ops.add(argmax_0, cast_0) + del argmax_0, cast_0 + + # pd_op.flatten: (80xi32) <- (16x5x1xi32) + flatten_0 = paddle._C_ops.flatten(data_2, 0, 2) + del data_2 + + # pd_op.flatten: (41232xi64) <- (16x2577xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_2 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (41232xi32) <- (80xi32, 41232xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_2) + del flatten_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [16, 2577] + + # pd_op.reshape: (16x2577xi32) <- (41232xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, full_int_array_0) + del full_int_array_0, gather_0 + + # pd_op.full: (xf32) <- () + full_3 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (16x2577xb) <- (16x2577xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(data_3, full_3) + del data_3, full_3 + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("4"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (16x2577xi32) <- (16x2577xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_4, paddle.int32, paddle.framework._current_expected_place() + ) + del full_4 + + # pd_op.where: (16x2577xi32) <- (16x2577xb, 16x2577xi32, 16x2577xi32) + where_0 = paddle._C_ops.where(greater_than_0, reshape_1, full_like_0) + del full_like_0, greater_than_0, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [-1, 4] + + # pd_op.reshape: (80x4xf32) <- (16x5x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_4, full_int_array_1) + del data_4, full_int_array_1 + + # pd_op.gather: (41232x4xf32) <- (80x4xf32, 41232xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_2) + del flatten_1, full_2, reshape_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_2 = [16, 2577, 4] + + # pd_op.reshape: (16x2577x4xf32) <- (41232x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, full_int_array_2) + del full_int_array_2, gather_1 + + # pd_op.full: (1xi32) <- () + full_5 = paddle._C_ops.full( + [1], float("5"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (16x2577x5xf32) <- (16x2577xi32, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + where_0 % paddle.cast(full_5, where_0.dtype), full_5 + ) + del full_5 + + # pd_op.full: (4xi64) <- () + full_6 = paddle._C_ops.full( + [4], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (4xi64) <- (4xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_6, + [4], + paddle.int64, + [float("0"), float("1"), float("2"), float("3")], + paddle.framework._current_expected_place(), + ) + del full_6 + + # pd_op.index_select: (16x2577x4xf32) <- (16x2577x5xf32, 4xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_0, assign_value__0, -1) + del assign_value__0, one_hot_0 + + # pd_op.multiply: (16x5x2577xf32) <- (16x5x2577xf32, 16x5x2577xf32) + multiply_1 = paddle._C_ops.multiply(data_5, data_0) + del data_5 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [-1] + + # pd_op.max: (16x5x1xf32) <- (16x5x2577xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_1, full_int_array_3, True) + + # pd_op.multiply: (16x5x2577xf32) <- (16x5x2577xf32, 16x5x2577xf32) + multiply_2 = paddle._C_ops.multiply(data_6, data_0) + del data_0, data_6 + + # pd_op.max: (16x5x1xf32) <- (16x5x2577xf32, 1xi64) + max_1 = paddle._C_ops.max(multiply_2, full_int_array_3, True) + del multiply_2 + + # pd_op.full: (1xf32) <- () + full_7 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (16x5x1xf32) <- (16x5x1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(max_0, full_7, float("1e-09"), True) + del full_7, max_0 + + # pd_op.divide: (16x5x2577xf32) <- (16x5x2577xf32, 16x5x1xf32) + divide_0 = paddle._C_ops.divide(multiply_1, scale_1) + del multiply_1, scale_1 + + # pd_op.multiply: (16x5x2577xf32) <- (16x5x2577xf32, 16x5x1xf32) + multiply_3 = paddle._C_ops.multiply(divide_0, max_1) + del divide_0, max_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [-2] + + # pd_op.max: (16x2577xf32) <- (16x5x2577xf32, 1xi64) + max_2 = paddle._C_ops.max(multiply_3, full_int_array_4, False) + del full_int_array_4, multiply_3 + + # pd_op.unsqueeze: (16x2577x1xf32) <- (16x2577xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(max_2, full_int_array_3) + del full_int_array_3, max_2 + + # pd_op.multiply: (16x2577x4xf32) <- (16x2577x4xf32, 16x2577x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_0) + del index_select_0, unsqueeze_0, where_0 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PicoDet-S/subgraph_20/weight_meta.py b/paddle_samples/PaddleX/PicoDet-S/subgraph_20/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-S/subgraph_20/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PicoDet-S/subgraph_5/graph_hash.txt b/paddle_samples/PaddleX/PicoDet-S/subgraph_5/graph_hash.txt new file mode 100644 index 000000000..c61d50d59 --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-S/subgraph_5/graph_hash.txt @@ -0,0 +1 @@ +8589c931b6eeb270fc60381d1847011e1cd499c62d059a177bd4c5e1b7e2cd2e \ No newline at end of file diff --git a/paddle_samples/PaddleX/PicoDet-S/subgraph_5/graph_net.json b/paddle_samples/PaddleX/PicoDet-S/subgraph_5/graph_net.json new file mode 100644 index 000000000..39267b2fc --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-S/subgraph_5/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PicoDet-S", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PicoDet-S/subgraph_5/input_meta.py b/paddle_samples/PaddleX/PicoDet-S/subgraph_5/input_meta.py new file mode 100644 index 000000000..fc900176c --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-S/subgraph_5/input_meta.py @@ -0,0 +1,58 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [16, 3060] + dtype = "float32" + max_val = float("2.0") + mean = float("0.00379902") + std = float("0.0621795") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [16, 5, 3060] + dtype = "float32" + max_val = float("0.749538") + mean = float("0.00420797") + std = float("0.0278533") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [16, 5, 3060] + dtype = "float32" + max_val = float("1.0") + mean = float("0.000759804") + std = float("0.0275541") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [16, 5, 1] + dtype = "int32" + min_val = 0 + max_val = 3 + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [16, 5, 4] + dtype = "float32" + max_val = float("384.0") + mean = float("57.555") + std = float("95.4551") + data = None + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [16, 3060, 4] + dtype = "float32" + min_val = float("-138.88") + max_val = float("556.733") + mean = float("192.303") + std = float("116.769") + data = None diff --git a/paddle_samples/PaddleX/PicoDet-S/subgraph_5/model.py b/paddle_samples/PaddleX/PicoDet-S/subgraph_5/model.py new file mode 100644 index 000000000..9b740317d --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-S/subgraph_5/model.py @@ -0,0 +1,341 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5): + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [1] + + # pd_op.unsqueeze: (16x1x3060xf32) <- (16x3060xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(data_0, full_int_array_0) + del data_0 + + # pd_op.full: (xf32) <- () + full_0 = paddle._C_ops.full( + [], float("1"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (16x1x3060xb) <- (16x1x3060xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(unsqueeze_0, full_0) + del full_0, unsqueeze_0 + + # pd_op.cast: (16x1x3060xi32) <- (16x1x3060xb) + cast_0 = paddle._C_ops.cast(greater_than_0, paddle.int32) + del greater_than_0 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_1 = [1, 5, 1] + + # pd_op.tile: (16x5x3060xi32) <- (16x1x3060xi32, 3xi64) + tile_0 = paddle._C_ops.tile(cast_0, full_int_array_1) + del cast_0, full_int_array_1 + + # pd_op.cast: (16x5x3060xb) <- (16x5x3060xi32) + cast_1 = paddle._C_ops.cast(tile_0, paddle.bool) + del tile_0 + + # pd_op.full: (1xi64) <- () + full_1 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (16x3060xi64) <- (16x5x3060xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_1, full_1, False, False, paddle.int64) + del data_1 + + # pd_op.full: (1xi32) <- () + full_2 = paddle._C_ops.full( + [1], float("5"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (16x3060x5xf32) <- (16x3060xi64, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + argmax_0 % paddle.cast(full_2, argmax_0.dtype), full_2 + ) + del argmax_0 + + # pd_op.transpose: (16x5x3060xf32) <- (16x3060x5xf32) + transpose_0 = paddle._C_ops.transpose(one_hot_0, [0, 2, 1]) + del one_hot_0 + + # pd_op.where: (16x5x3060xf32) <- (16x5x3060xb, 16x5x3060xf32, 16x5x3060xf32) + where_0 = paddle._C_ops.where(cast_1, transpose_0, data_2) + del cast_1, data_2, transpose_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [-2] + + # pd_op.sum: (16x3060xf32) <- (16x5x3060xf32, 1xi64) + sum_0 = paddle._C_ops.sum(where_0, full_int_array_2, None, False) + + # pd_op.argmax: (16x3060xi64) <- (16x5x3060xf32, 1xi64) + argmax_1 = paddle._C_ops.argmax(where_0, full_1, False, False, paddle.int64) + del full_1 + + # pd_op.full: (1xf64) <- () + full_3 = paddle._C_ops.full( + [1], float("0"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_4 = paddle._C_ops.full( + [1], float("16"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_5 = paddle._C_ops.full( + [1], float("1"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (16xi32) <- (1xf64, 1xf64, 1xf64) + arange_0 = paddle.arange(full_3, full_4, full_5, dtype="int32") + del full_3, full_4, full_5 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [-1] + + # pd_op.unsqueeze: (16x1xi32) <- (16xi32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(arange_0, full_int_array_3) + del arange_0 + + # pd_op.full: (1xf32) <- () + full_6 = paddle._C_ops.full( + [1], float("5"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (16x1xi32) <- (16x1xi32, 1xf32) + scale_0 = paddle._C_ops.scale(unsqueeze_1, full_6, float("0"), True) + del full_6, unsqueeze_1 + + # pd_op.cast: (16x1xi64) <- (16x1xi32) + cast_2 = paddle._C_ops.cast(scale_0, paddle.int64) + del scale_0 + + # pd_op.add: (16x3060xi64) <- (16x3060xi64, 16x1xi64) + add_0 = paddle._C_ops.add(argmax_1, cast_2) + del argmax_1, cast_2 + + # pd_op.flatten: (80xi32) <- (16x5x1xi32) + flatten_0 = paddle._C_ops.flatten(data_3, 0, 2) + del data_3 + + # pd_op.flatten: (48960xi64) <- (16x3060xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_7 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (48960xi32) <- (80xi32, 48960xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_7) + del flatten_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_4 = [16, 3060] + + # pd_op.reshape: (16x3060xi32) <- (48960xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, full_int_array_4) + del full_int_array_4, gather_0 + + # pd_op.full: (xf32) <- () + full_8 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (16x3060xb) <- (16x3060xf32, xf32) + greater_than_1 = paddle._C_ops.greater_than(sum_0, full_8) + del full_8, sum_0 + + # pd_op.full: (1xf32) <- () + full_9 = paddle._C_ops.full( + [1], float("4"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (16x3060xi32) <- (16x3060xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_9, paddle.int32, paddle.framework._current_expected_place() + ) + del full_9 + + # pd_op.where: (16x3060xi32) <- (16x3060xb, 16x3060xi32, 16x3060xi32) + where_1 = paddle._C_ops.where(greater_than_1, reshape_1, full_like_0) + del full_like_0, greater_than_1, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_5 = [-1, 4] + + # pd_op.reshape: (80x4xf32) <- (16x5x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_4, full_int_array_5) + del full_int_array_5 + + # pd_op.gather: (48960x4xf32) <- (80x4xf32, 48960xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_7) + del flatten_1, full_7, reshape_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_6 = [16, 3060, 4] + + # pd_op.reshape: (16x3060x4xf32) <- (48960x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, full_int_array_6) + del full_int_array_6, gather_1 + + # pd_op.one_hot: (16x3060x5xf32) <- (16x3060xi32, 1xi32) + one_hot_1 = paddle._C_ops.one_hot( + where_1 % paddle.cast(full_2, where_1.dtype), full_2 + ) + del full_2 + + # pd_op.full: (4xi64) <- () + full_10 = paddle._C_ops.full( + [4], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (4xi64) <- (4xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_10, + [4], + paddle.int64, + [float("0"), float("1"), float("2"), float("3")], + paddle.framework._current_expected_place(), + ) + del full_10 + + # pd_op.index_select: (16x3060x4xf32) <- (16x3060x5xf32, 4xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_1, assign_value__0, -1) + del assign_value__0, one_hot_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_7 = [2] + + # pd_op.unsqueeze: (16x5x1x4xf32) <- (16x5x4xf32, 1xi64) + unsqueeze_2 = paddle._C_ops.unsqueeze(data_4, full_int_array_7) + del data_4 + + # pd_op.unsqueeze: (16x1x3060x4xf32) <- (16x3060x4xf32, 1xi64) + unsqueeze_3 = paddle._C_ops.unsqueeze(data_5, full_int_array_0) + del data_5, full_int_array_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_8 = [0] + + # pd_op.slice: (16x5x1x2xf32) <- (16x5x1x4xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + unsqueeze_2, [3], full_int_array_8, full_int_array_7, [1], [] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_9 = [2147483647] + + # pd_op.slice: (16x5x1x2xf32) <- (16x5x1x4xf32, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + unsqueeze_2, [3], full_int_array_7, full_int_array_9, [1], [] + ) + del unsqueeze_2 + + # pd_op.slice: (16x1x3060x2xf32) <- (16x1x3060x4xf32, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + unsqueeze_3, [3], full_int_array_8, full_int_array_7, [1], [] + ) + del full_int_array_8 + + # pd_op.slice: (16x1x3060x2xf32) <- (16x1x3060x4xf32, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + unsqueeze_3, [3], full_int_array_7, full_int_array_9, [1], [] + ) + del full_int_array_7, full_int_array_9, unsqueeze_3 + + # pd_op.maximum: (16x5x3060x2xf32) <- (16x5x1x2xf32, 16x1x3060x2xf32) + maximum_0 = paddle._C_ops.maximum(slice_0, slice_2) + + # pd_op.minimum: (16x5x3060x2xf32) <- (16x5x1x2xf32, 16x1x3060x2xf32) + minimum_0 = paddle._C_ops.minimum(slice_1, slice_3) + + # pd_op.subtract: (16x5x3060x2xf32) <- (16x5x3060x2xf32, 16x5x3060x2xf32) + subtract_0 = paddle._C_ops.subtract(minimum_0, maximum_0) + del maximum_0, minimum_0 + + # pd_op.full: (1xf32) <- () + full_11 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf32) <- () + full_12 = paddle._C_ops.full( + [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.clip: (16x5x3060x2xf32) <- (16x5x3060x2xf32, 1xf32, 1xf32) + clip_0 = paddle._C_ops.clip(subtract_0, full_11, full_12) + del subtract_0 + + # pd_op.prod: (16x5x3060xf32) <- (16x5x3060x2xf32, 1xi64) + prod_0 = paddle._C_ops.prod(clip_0, full_int_array_3, False, False) + del clip_0 + + # pd_op.subtract: (16x5x1x2xf32) <- (16x5x1x2xf32, 16x5x1x2xf32) + subtract_1 = paddle._C_ops.subtract(slice_1, slice_0) + del slice_0, slice_1 + + # pd_op.clip: (16x5x1x2xf32) <- (16x5x1x2xf32, 1xf32, 1xf32) + clip_1 = paddle._C_ops.clip(subtract_1, full_11, full_12) + del subtract_1 + + # pd_op.prod: (16x5x1xf32) <- (16x5x1x2xf32, 1xi64) + prod_1 = paddle._C_ops.prod(clip_1, full_int_array_3, False, False) + del clip_1 + + # pd_op.subtract: (16x1x3060x2xf32) <- (16x1x3060x2xf32, 16x1x3060x2xf32) + subtract_2 = paddle._C_ops.subtract(slice_3, slice_2) + del slice_2, slice_3 + + # pd_op.clip: (16x1x3060x2xf32) <- (16x1x3060x2xf32, 1xf32, 1xf32) + clip_2 = paddle._C_ops.clip(subtract_2, full_11, full_12) + del full_11, full_12, subtract_2 + + # pd_op.prod: (16x1x3060xf32) <- (16x1x3060x2xf32, 1xi64) + prod_2 = paddle._C_ops.prod(clip_2, full_int_array_3, False, False) + del clip_2 + + # pd_op.add: (16x5x3060xf32) <- (16x5x1xf32, 16x1x3060xf32) + add_1 = paddle._C_ops.add(prod_1, prod_2) + del prod_1, prod_2 + + # pd_op.subtract: (16x5x3060xf32) <- (16x5x3060xf32, 16x5x3060xf32) + subtract_3 = paddle._C_ops.subtract(add_1, prod_0) + del add_1 + + # pd_op.full: (1xf32) <- () + full_13 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (16x5x3060xf32) <- (16x5x3060xf32, 1xf32) + scale_1 = paddle._C_ops.scale(subtract_3, full_13, float("1e-09"), True) + del full_13, subtract_3 + + # pd_op.divide: (16x5x3060xf32) <- (16x5x3060xf32, 16x5x3060xf32) + divide_0 = paddle._C_ops.divide(prod_0, scale_1) + del prod_0, scale_1 + + # pd_op.multiply: (16x5x3060xf32) <- (16x5x3060xf32, 16x5x3060xf32) + multiply_1 = paddle._C_ops.multiply(divide_0, where_0) + del divide_0, where_0 + + # pd_op.max: (16x3060xf32) <- (16x5x3060xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_1, full_int_array_2, False) + del full_int_array_2, multiply_1 + + # pd_op.unsqueeze: (16x3060x1xf32) <- (16x3060xf32, 1xi64) + unsqueeze_4 = paddle._C_ops.unsqueeze(max_0, full_int_array_3) + del full_int_array_3, max_0 + + # pd_op.multiply: (16x3060x4xf32) <- (16x3060x4xf32, 16x3060x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_4) + del index_select_0, unsqueeze_4, where_1 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PicoDet-S/subgraph_5/weight_meta.py b/paddle_samples/PaddleX/PicoDet-S/subgraph_5/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-S/subgraph_5/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PicoDet-S/subgraph_8/graph_hash.txt b/paddle_samples/PaddleX/PicoDet-S/subgraph_8/graph_hash.txt new file mode 100644 index 000000000..b3be3c380 --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-S/subgraph_8/graph_hash.txt @@ -0,0 +1 @@ +5b2626a3cbbcc1056e9e802ff57c85d99ae970558e7e41076fe5d00319c714c5 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PicoDet-S/subgraph_8/graph_net.json b/paddle_samples/PaddleX/PicoDet-S/subgraph_8/graph_net.json new file mode 100644 index 000000000..39267b2fc --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-S/subgraph_8/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PicoDet-S", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PicoDet-S/subgraph_8/input_meta.py b/paddle_samples/PaddleX/PicoDet-S/subgraph_8/input_meta.py new file mode 100644 index 000000000..107144967 --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-S/subgraph_8/input_meta.py @@ -0,0 +1,109 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [2] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [] + dtype = "int64" + data = [2125] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [16, 2, 2125] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00320588") + std = float("0.0565297") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [16, 1] + dtype = "int32" + data = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [16, 2, 1] + dtype = "int32" + data = [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 3, + 0, + 2, + 2, + 0, + 1, + 0, + 0, + 0, + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [16, 2125] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00641177") + std = float("0.0798164") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [16, 2, 4] + dtype = "float32" + max_val = float("320.0") + mean = float("94.9651") + std = float("99.4517") + data = None + + +class Program_weight_tensor_data_7: + name = "data_7" + shape = [16, 2, 2125] + dtype = "float32" + max_val = float("0.773153") + mean = float("0.000889096") + std = float("0.0159838") + data = None + + +class Program_weight_tensor_data_8: + name = "data_8" + shape = [16, 2, 2125] + dtype = "float32" + max_val = float("0.98029") + mean = float("0.0243078") + std = float("0.102425") + data = None diff --git a/paddle_samples/PaddleX/PicoDet-S/subgraph_8/model.py b/paddle_samples/PaddleX/PicoDet-S/subgraph_8/model.py new file mode 100644 index 000000000..6179b974c --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-S/subgraph_8/model.py @@ -0,0 +1,201 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, data_0, data_1, data_2, data_3, data_4, data_5, data_6, data_7, data_8 + ): + # pd_op.full: (1xi64) <- () + full_0 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (16x-1xi64) <- (16x-1x-1xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_2, full_0, False, False, paddle.int64) + del full_0 + + # pd_op.cast: (xi32) <- (xi64) + cast_0 = paddle._C_ops.cast(data_0, paddle.int32) + del data_0 + + # pd_op.multiply: (16x1xi32) <- (16x1xi32, xi32) + multiply_1 = paddle._C_ops.multiply(data_3, cast_0) + del cast_0, data_3 + + # pd_op.cast: (16x1xi64) <- (16x1xi32) + cast_1 = paddle._C_ops.cast(multiply_1, paddle.int64) + del multiply_1 + + # pd_op.add: (16x-1xi64) <- (16x-1xi64, 16x1xi64) + add_0 = paddle._C_ops.add(argmax_0, cast_1) + del argmax_0, cast_1 + + # pd_op.flatten: (-1xi32) <- (16x-1x1xi32) + flatten_0 = paddle._C_ops.flatten(data_4, 0, 2) + del data_4 + + # pd_op.flatten: (-1xi64) <- (16x-1xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_1 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (-1xi32) <- (-1xi32, -1xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_1) + del flatten_0 + + # pd_op.full: (xi64) <- () + full_2 = paddle._C_ops.full( + [], float("16"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_0 = [full_2, data_1] + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.reshape: (16x-1xi32) <- (-1xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, stack_0) + del gather_0, stack_0 + + # pd_op.full: (xf32) <- () + full_3 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (16x-1xb) <- (16x-1xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(data_5, full_3) + del data_5, full_3 + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("4"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (16x-1xi32) <- (16x-1xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_4, paddle.int32, paddle.framework._current_expected_place() + ) + del full_4 + + # pd_op.where: (16x-1xi32) <- (16x-1xb, 16x-1xi32, 16x-1xi32) + where_0 = paddle._C_ops.where(greater_than_0, reshape_1, full_like_0) + del full_like_0, greater_than_0, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [-1, 4] + + # pd_op.reshape: (-1x4xf32) <- (16x-1x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_6, full_int_array_0) + del data_6, full_int_array_0 + + # pd_op.gather: (-1x4xf32) <- (-1x4xf32, -1xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_1) + del flatten_1, full_1, reshape_2 + + # pd_op.full: (xi64) <- () + full_5 = paddle._C_ops.full( + [], float("4"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_1 = [full_2, data_1, full_5] + del data_1, full_2, full_5 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_1 = paddle._C_ops.stack(combine_1, 0) + del combine_1 + + # pd_op.reshape: (16x-1x4xf32) <- (-1x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, stack_1) + del gather_1, stack_1 + + # pd_op.full: (1xi32) <- () + full_6 = paddle._C_ops.full( + [1], float("5"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (16x-1x5xf32) <- (16x-1xi32, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + where_0 % paddle.cast(full_6, where_0.dtype), full_6 + ) + del full_6 + + # pd_op.full: (4xi64) <- () + full_7 = paddle._C_ops.full( + [4], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (4xi64) <- (4xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_7, + [4], + paddle.int64, + [float("0"), float("1"), float("2"), float("3")], + paddle.framework._current_expected_place(), + ) + del full_7 + + # pd_op.index_select: (16x-1x4xf32) <- (16x-1x5xf32, 4xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_0, assign_value__0, -1) + del assign_value__0, one_hot_0 + + # pd_op.multiply: (16x-1x-1xf32) <- (16x-1x-1xf32, 16x-1x-1xf32) + multiply_2 = paddle._C_ops.multiply(data_7, data_2) + del data_7 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [-1] + + # pd_op.max: (16x-1x1xf32) <- (16x-1x-1xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_2, full_int_array_1, True) + + # pd_op.multiply: (16x-1x-1xf32) <- (16x-1x-1xf32, 16x-1x-1xf32) + multiply_3 = paddle._C_ops.multiply(data_8, data_2) + del data_2, data_8 + + # pd_op.max: (16x-1x1xf32) <- (16x-1x-1xf32, 1xi64) + max_1 = paddle._C_ops.max(multiply_3, full_int_array_1, True) + del multiply_3 + + # pd_op.full: (1xf32) <- () + full_8 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (16x-1x1xf32) <- (16x-1x1xf32, 1xf32) + scale_0 = paddle._C_ops.scale(max_0, full_8, float("1e-09"), True) + del full_8, max_0 + + # pd_op.divide: (16x-1x-1xf32) <- (16x-1x-1xf32, 16x-1x1xf32) + divide_0 = paddle._C_ops.divide(multiply_2, scale_0) + del multiply_2, scale_0 + + # pd_op.multiply: (16x-1x-1xf32) <- (16x-1x-1xf32, 16x-1x1xf32) + multiply_4 = paddle._C_ops.multiply(divide_0, max_1) + del divide_0, max_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [-2] + + # pd_op.max: (16x-1xf32) <- (16x-1x-1xf32, 1xi64) + max_2 = paddle._C_ops.max(multiply_4, full_int_array_2, False) + del full_int_array_2, multiply_4 + + # pd_op.unsqueeze: (16x-1x1xf32) <- (16x-1xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(max_2, full_int_array_1) + del full_int_array_1, max_2 + + # pd_op.multiply: (16x-1x4xf32) <- (16x-1x4xf32, 16x-1x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_0) + del index_select_0, unsqueeze_0, where_0 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PicoDet-S/subgraph_8/weight_meta.py b/paddle_samples/PaddleX/PicoDet-S/subgraph_8/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-S/subgraph_8/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PicoDet-S_layout_17cls/subgraph_3/graph_hash.txt b/paddle_samples/PaddleX/PicoDet-S_layout_17cls/subgraph_3/graph_hash.txt new file mode 100644 index 000000000..899b6d51f --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-S_layout_17cls/subgraph_3/graph_hash.txt @@ -0,0 +1 @@ +950848fd44819a5d5eb9f692606fde0c9710c058d0952432e20c5c4b27fad259 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PicoDet-S_layout_17cls/subgraph_3/graph_net.json b/paddle_samples/PaddleX/PicoDet-S_layout_17cls/subgraph_3/graph_net.json new file mode 100644 index 000000000..6f8ebf1bc --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-S_layout_17cls/subgraph_3/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PicoDet-S_layout_17cls", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PicoDet-S_layout_17cls/subgraph_3/input_meta.py b/paddle_samples/PaddleX/PicoDet-S_layout_17cls/subgraph_3/input_meta.py new file mode 100644 index 000000000..7d968a339 --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-S_layout_17cls/subgraph_3/input_meta.py @@ -0,0 +1,62 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [2, 12, 6150] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00138889") + std = float("0.0372419") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [2, 1] + dtype = "int32" + data = [0, 1] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [2, 12, 1] + dtype = "int32" + data = [7, 7, 7, 9, 9, 0, 0, 0, 0, 0, 0, 0, 7, 7, 1, 1, 1, 1, 1, 4, 3, 6, 5, 2] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [2, 6150] + dtype = "float32" + max_val = float("1.0") + mean = float("0.0166667") + std = float("0.128019") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [2, 12, 4] + dtype = "float32" + max_val = float("544.0") + mean = float("144.454") + std = float("178.425") + data = None + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [2, 12, 6150] + dtype = "float32" + max_val = float("0.254265") + mean = float("2.03166e-05") + std = float("0.00151754") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [2, 12, 6150] + dtype = "float32" + max_val = float("0.93727") + mean = float("0.00561772") + std = float("0.0371427") + data = None diff --git a/paddle_samples/PaddleX/PicoDet-S_layout_17cls/subgraph_3/model.py b/paddle_samples/PaddleX/PicoDet-S_layout_17cls/subgraph_3/model.py new file mode 100644 index 000000000..e22401d47 --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-S_layout_17cls/subgraph_3/model.py @@ -0,0 +1,193 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): + # pd_op.full: (1xi64) <- () + full_0 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (2x6150xi64) <- (2x12x6150xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_0, full_0, False, False, paddle.int64) + del full_0 + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("12"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x1xi32) <- (2x1xi32, 1xf32) + scale_0 = paddle._C_ops.scale(data_1, full_1, float("0"), True) + del data_1, full_1 + + # pd_op.cast: (2x1xi64) <- (2x1xi32) + cast_0 = paddle._C_ops.cast(scale_0, paddle.int64) + del scale_0 + + # pd_op.add: (2x6150xi64) <- (2x6150xi64, 2x1xi64) + add_0 = paddle._C_ops.add(argmax_0, cast_0) + del argmax_0, cast_0 + + # pd_op.flatten: (24xi32) <- (2x12x1xi32) + flatten_0 = paddle._C_ops.flatten(data_2, 0, 2) + del data_2 + + # pd_op.flatten: (12300xi64) <- (2x6150xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_2 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (12300xi32) <- (24xi32, 12300xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_2) + del flatten_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [2, 6150] + + # pd_op.reshape: (2x6150xi32) <- (12300xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, full_int_array_0) + del full_int_array_0, gather_0 + + # pd_op.full: (xf32) <- () + full_3 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (2x6150xb) <- (2x6150xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(data_3, full_3) + del data_3, full_3 + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("11"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (2x6150xi32) <- (2x6150xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_4, paddle.int32, paddle.framework._current_expected_place() + ) + del full_4 + + # pd_op.where: (2x6150xi32) <- (2x6150xb, 2x6150xi32, 2x6150xi32) + where_0 = paddle._C_ops.where(greater_than_0, reshape_1, full_like_0) + del full_like_0, greater_than_0, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [-1, 4] + + # pd_op.reshape: (24x4xf32) <- (2x12x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_4, full_int_array_1) + del data_4, full_int_array_1 + + # pd_op.gather: (12300x4xf32) <- (24x4xf32, 12300xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_2) + del flatten_1, full_2, reshape_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_2 = [2, 6150, 4] + + # pd_op.reshape: (2x6150x4xf32) <- (12300x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, full_int_array_2) + del full_int_array_2, gather_1 + + # pd_op.full: (1xi32) <- () + full_5 = paddle._C_ops.full( + [1], float("12"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (2x6150x12xf32) <- (2x6150xi32, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + where_0 % paddle.cast(full_5, where_0.dtype), full_5 + ) + del full_5 + + # pd_op.full: (11xi64) <- () + full_6 = paddle._C_ops.full( + [11], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (11xi64) <- (11xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_6, + [11], + paddle.int64, + [ + float("0"), + float("1"), + float("2"), + float("3"), + float("4"), + float("5"), + float("6"), + float("7"), + float("8"), + float("9"), + float("10"), + ], + paddle.framework._current_expected_place(), + ) + del full_6 + + # pd_op.index_select: (2x6150x11xf32) <- (2x6150x12xf32, 11xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_0, assign_value__0, -1) + del assign_value__0, one_hot_0 + + # pd_op.multiply: (2x12x6150xf32) <- (2x12x6150xf32, 2x12x6150xf32) + multiply_1 = paddle._C_ops.multiply(data_5, data_0) + del data_5 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [-1] + + # pd_op.max: (2x12x1xf32) <- (2x12x6150xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_1, full_int_array_3, True) + + # pd_op.multiply: (2x12x6150xf32) <- (2x12x6150xf32, 2x12x6150xf32) + multiply_2 = paddle._C_ops.multiply(data_6, data_0) + del data_0, data_6 + + # pd_op.max: (2x12x1xf32) <- (2x12x6150xf32, 1xi64) + max_1 = paddle._C_ops.max(multiply_2, full_int_array_3, True) + del multiply_2 + + # pd_op.full: (1xf32) <- () + full_7 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x12x1xf32) <- (2x12x1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(max_0, full_7, float("1e-09"), True) + del full_7, max_0 + + # pd_op.divide: (2x12x6150xf32) <- (2x12x6150xf32, 2x12x1xf32) + divide_0 = paddle._C_ops.divide(multiply_1, scale_1) + del multiply_1, scale_1 + + # pd_op.multiply: (2x12x6150xf32) <- (2x12x6150xf32, 2x12x1xf32) + multiply_3 = paddle._C_ops.multiply(divide_0, max_1) + del divide_0, max_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [-2] + + # pd_op.max: (2x6150xf32) <- (2x12x6150xf32, 1xi64) + max_2 = paddle._C_ops.max(multiply_3, full_int_array_4, False) + del full_int_array_4, multiply_3 + + # pd_op.unsqueeze: (2x6150x1xf32) <- (2x6150xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(max_2, full_int_array_3) + del full_int_array_3, max_2 + + # pd_op.multiply: (2x6150x11xf32) <- (2x6150x11xf32, 2x6150x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_0) + del index_select_0, unsqueeze_0, where_0 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PicoDet-S_layout_17cls/subgraph_3/weight_meta.py b/paddle_samples/PaddleX/PicoDet-S_layout_17cls/subgraph_3/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-S_layout_17cls/subgraph_3/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PicoDet-S_layout_3cls/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/PicoDet-S_layout_3cls/subgraph_0/graph_hash.txt new file mode 100644 index 000000000..9b9afae02 --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-S_layout_3cls/subgraph_0/graph_hash.txt @@ -0,0 +1 @@ +521390a9260589d601a1caec13a059858fb12f40b5e557fe9fcd4428f16e7c37 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PicoDet-S_layout_3cls/subgraph_0/graph_net.json b/paddle_samples/PaddleX/PicoDet-S_layout_3cls/subgraph_0/graph_net.json new file mode 100644 index 000000000..d183290da --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-S_layout_3cls/subgraph_0/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PicoDet-S_layout_3cls", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PicoDet-S_layout_3cls/subgraph_0/input_meta.py b/paddle_samples/PaddleX/PicoDet-S_layout_3cls/subgraph_0/input_meta.py new file mode 100644 index 000000000..c5efae135 --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-S_layout_3cls/subgraph_0/input_meta.py @@ -0,0 +1,98 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [5] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [] + dtype = "int64" + data = [3598] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [2, 5, 3598] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00113952") + std = float("0.0337376") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [2, 5, 1] + dtype = "int32" + data = [2, 3, 3, 4, 4, 1, 5, 5, 8, 6] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [2, 3598] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00569761") + std = float("0.0752672") + data = None + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [2, 5, 4] + dtype = "float32" + data = [ + 102.432, + 315.509, + 185.353, + 327.298, + 95.464, + 0.0, + 386.734, + 78.0351, + 57.139, + 104.421, + 413.213, + 280.14, + 166.539, + 88.1404, + 312.871, + 97.1228, + 154.693, + 290.807, + 324.717, + 299.789, + 352.929, + 31.5439, + 365.006, + 35.3387, + 48.6452, + 36.2873, + 366.684, + 204.68, + 45.6258, + 212.269, + 370.71, + 384.219, + 53.6774, + 388.488, + 60.3871, + 392.046, + 140.903, + 206.577, + 274.761, + 210.609, + ] + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [2, 3598, 4] + dtype = "float32" + min_val = float("-182.974") + max_val = float("673.896") + mean = float("208.0") + std = float("128.278") + data = None diff --git a/paddle_samples/PaddleX/PicoDet-S_layout_3cls/subgraph_0/model.py b/paddle_samples/PaddleX/PicoDet-S_layout_3cls/subgraph_0/model.py new file mode 100644 index 000000000..ddd8b6f2a --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-S_layout_3cls/subgraph_0/model.py @@ -0,0 +1,322 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): + # pd_op.full: (1xi64) <- () + full_0 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (2x-1xi64) <- (2x-1x-1xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_2, full_0, False, False, paddle.int64) + del full_0 + + # pd_op.full: (1xf64) <- () + full_1 = paddle._C_ops.full( + [1], float("0"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_2 = paddle._C_ops.full( + [1], float("2"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (2xi32) <- (1xf64, 1xf64, 1xf64) + arange_0 = paddle.arange(full_1, full_2, full_3, dtype="int32") + del full_1, full_2, full_3 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [-1] + + # pd_op.unsqueeze: (2x1xi32) <- (2xi32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(arange_0, full_int_array_0) + del arange_0 + + # pd_op.cast: (xi32) <- (xi64) + cast_0 = paddle._C_ops.cast(data_0, paddle.int32) + del data_0 + + # pd_op.multiply: (2x1xi32) <- (2x1xi32, xi32) + multiply_1 = paddle._C_ops.multiply(unsqueeze_0, cast_0) + del cast_0, unsqueeze_0 + + # pd_op.cast: (2x1xi64) <- (2x1xi32) + cast_1 = paddle._C_ops.cast(multiply_1, paddle.int64) + del multiply_1 + + # pd_op.add: (2x-1xi64) <- (2x-1xi64, 2x1xi64) + add_0 = paddle._C_ops.add(argmax_0, cast_1) + del argmax_0, cast_1 + + # pd_op.flatten: (-1xi32) <- (2x-1x1xi32) + flatten_0 = paddle._C_ops.flatten(data_3, 0, 2) + del data_3 + + # pd_op.flatten: (-1xi64) <- (2x-1xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_4 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (-1xi32) <- (-1xi32, -1xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_4) + del flatten_0 + + # pd_op.full: (xi64) <- () + full_5 = paddle._C_ops.full( + [], float("2"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_0 = [full_5, data_1] + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.reshape: (2x-1xi32) <- (-1xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, stack_0) + del gather_0, stack_0 + + # pd_op.full: (xf32) <- () + full_6 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (2x-1xb) <- (2x-1xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(data_4, full_6) + del data_4, full_6 + + # pd_op.full: (1xf32) <- () + full_7 = paddle._C_ops.full( + [1], float("11"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (2x-1xi32) <- (2x-1xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_7, paddle.int32, paddle.framework._current_expected_place() + ) + del full_7 + + # pd_op.where: (2x-1xi32) <- (2x-1xb, 2x-1xi32, 2x-1xi32) + where_0 = paddle._C_ops.where(greater_than_0, reshape_1, full_like_0) + del full_like_0, greater_than_0, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [-1, 4] + + # pd_op.reshape: (-1x4xf32) <- (2x-1x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_5, full_int_array_1) + del full_int_array_1 + + # pd_op.gather: (-1x4xf32) <- (-1x4xf32, -1xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_4) + del flatten_1, full_4, reshape_2 + + # pd_op.full: (xi64) <- () + full_8 = paddle._C_ops.full( + [], float("4"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_1 = [full_5, data_1, full_8] + del data_1, full_5, full_8 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_1 = paddle._C_ops.stack(combine_1, 0) + del combine_1 + + # pd_op.reshape: (2x-1x4xf32) <- (-1x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, stack_1) + del gather_1, stack_1 + + # pd_op.full: (1xi32) <- () + full_9 = paddle._C_ops.full( + [1], float("12"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (2x-1x12xf32) <- (2x-1xi32, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + where_0 % paddle.cast(full_9, where_0.dtype), full_9 + ) + del full_9 + + # pd_op.full: (11xi64) <- () + full_10 = paddle._C_ops.full( + [11], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (11xi64) <- (11xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_10, + [11], + paddle.int64, + [ + float("0"), + float("1"), + float("2"), + float("3"), + float("4"), + float("5"), + float("6"), + float("7"), + float("8"), + float("9"), + float("10"), + ], + paddle.framework._current_expected_place(), + ) + del full_10 + + # pd_op.index_select: (2x-1x11xf32) <- (2x-1x12xf32, 11xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_0, assign_value__0, -1) + del assign_value__0, one_hot_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [2] + + # pd_op.unsqueeze: (2x-1x1x4xf32) <- (2x-1x4xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(data_5, full_int_array_2) + del data_5 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [1] + + # pd_op.unsqueeze: (2x1x-1x4xf32) <- (2x-1x4xf32, 1xi64) + unsqueeze_2 = paddle._C_ops.unsqueeze(data_6, full_int_array_3) + del data_6, full_int_array_3 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [0] + + # pd_op.slice: (2x-1x1x2xf32) <- (2x-1x1x4xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + unsqueeze_1, [3], full_int_array_4, full_int_array_2, [1], [] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_5 = [2147483647] + + # pd_op.slice: (2x-1x1x2xf32) <- (2x-1x1x4xf32, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + unsqueeze_1, [3], full_int_array_2, full_int_array_5, [1], [] + ) + del unsqueeze_1 + + # pd_op.slice: (2x1x-1x2xf32) <- (2x1x-1x4xf32, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + unsqueeze_2, [3], full_int_array_4, full_int_array_2, [1], [] + ) + del full_int_array_4 + + # pd_op.slice: (2x1x-1x2xf32) <- (2x1x-1x4xf32, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + unsqueeze_2, [3], full_int_array_2, full_int_array_5, [1], [] + ) + del full_int_array_2, full_int_array_5, unsqueeze_2 + + # pd_op.maximum: (2x-1x-1x2xf32) <- (2x-1x1x2xf32, 2x1x-1x2xf32) + maximum_0 = paddle._C_ops.maximum(slice_0, slice_2) + + # pd_op.minimum: (2x-1x-1x2xf32) <- (2x-1x1x2xf32, 2x1x-1x2xf32) + minimum_0 = paddle._C_ops.minimum(slice_1, slice_3) + + # pd_op.subtract: (2x-1x-1x2xf32) <- (2x-1x-1x2xf32, 2x-1x-1x2xf32) + subtract_0 = paddle._C_ops.subtract(minimum_0, maximum_0) + del maximum_0, minimum_0 + + # pd_op.full: (1xf32) <- () + full_11 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf32) <- () + full_12 = paddle._C_ops.full( + [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.clip: (2x-1x-1x2xf32) <- (2x-1x-1x2xf32, 1xf32, 1xf32) + clip_0 = paddle._C_ops.clip(subtract_0, full_11, full_12) + del subtract_0 + + # pd_op.prod: (2x-1x-1xf32) <- (2x-1x-1x2xf32, 1xi64) + prod_0 = paddle._C_ops.prod(clip_0, full_int_array_0, False, False) + del clip_0 + + # pd_op.subtract: (2x-1x1x2xf32) <- (2x-1x1x2xf32, 2x-1x1x2xf32) + subtract_1 = paddle._C_ops.subtract(slice_1, slice_0) + del slice_0, slice_1 + + # pd_op.clip: (2x-1x1x2xf32) <- (2x-1x1x2xf32, 1xf32, 1xf32) + clip_1 = paddle._C_ops.clip(subtract_1, full_11, full_12) + del subtract_1 + + # pd_op.prod: (2x-1x1xf32) <- (2x-1x1x2xf32, 1xi64) + prod_1 = paddle._C_ops.prod(clip_1, full_int_array_0, False, False) + del clip_1 + + # pd_op.subtract: (2x1x-1x2xf32) <- (2x1x-1x2xf32, 2x1x-1x2xf32) + subtract_2 = paddle._C_ops.subtract(slice_3, slice_2) + del slice_2, slice_3 + + # pd_op.clip: (2x1x-1x2xf32) <- (2x1x-1x2xf32, 1xf32, 1xf32) + clip_2 = paddle._C_ops.clip(subtract_2, full_11, full_12) + del full_11, full_12, subtract_2 + + # pd_op.prod: (2x1x-1xf32) <- (2x1x-1x2xf32, 1xi64) + prod_2 = paddle._C_ops.prod(clip_2, full_int_array_0, False, False) + del clip_2 + + # pd_op.add: (2x-1x-1xf32) <- (2x-1x1xf32, 2x1x-1xf32) + add_1 = paddle._C_ops.add(prod_1, prod_2) + del prod_1, prod_2 + + # pd_op.subtract: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x-1xf32) + subtract_3 = paddle._C_ops.subtract(add_1, prod_0) + del add_1 + + # pd_op.full: (1xf32) <- () + full_13 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x-1x-1xf32) <- (2x-1x-1xf32, 1xf32) + scale_0 = paddle._C_ops.scale(subtract_3, full_13, float("1e-09"), True) + del full_13, subtract_3 + + # pd_op.divide: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x-1xf32) + divide_0 = paddle._C_ops.divide(prod_0, scale_0) + del prod_0, scale_0 + + # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x-1xf32) + multiply_2 = paddle._C_ops.multiply(divide_0, data_2) + del data_2, divide_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_6 = [-2] + + # pd_op.max: (2x-1xf32) <- (2x-1x-1xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_2, full_int_array_6, False) + del full_int_array_6, multiply_2 + + # pd_op.unsqueeze: (2x-1x1xf32) <- (2x-1xf32, 1xi64) + unsqueeze_3 = paddle._C_ops.unsqueeze(max_0, full_int_array_0) + del full_int_array_0, max_0 + + # pd_op.multiply: (2x-1x11xf32) <- (2x-1x11xf32, 2x-1x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_3) + del index_select_0, unsqueeze_3, where_0 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PicoDet-S_layout_3cls/subgraph_0/weight_meta.py b/paddle_samples/PaddleX/PicoDet-S_layout_3cls/subgraph_0/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-S_layout_3cls/subgraph_0/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PicoDet-S_layout_3cls/subgraph_4/graph_hash.txt b/paddle_samples/PaddleX/PicoDet-S_layout_3cls/subgraph_4/graph_hash.txt new file mode 100644 index 000000000..fff74fa0e --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-S_layout_3cls/subgraph_4/graph_hash.txt @@ -0,0 +1 @@ +24b2c1824ef8422782ea275ad739cb7319d20d5ed36e616d8e76bdab3d63ee8c \ No newline at end of file diff --git a/paddle_samples/PaddleX/PicoDet-S_layout_3cls/subgraph_4/graph_net.json b/paddle_samples/PaddleX/PicoDet-S_layout_3cls/subgraph_4/graph_net.json new file mode 100644 index 000000000..d183290da --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-S_layout_3cls/subgraph_4/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PicoDet-S_layout_3cls", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PicoDet-S_layout_3cls/subgraph_4/input_meta.py b/paddle_samples/PaddleX/PicoDet-S_layout_3cls/subgraph_4/input_meta.py new file mode 100644 index 000000000..2ba1bb255 --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-S_layout_3cls/subgraph_4/input_meta.py @@ -0,0 +1,84 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [2, 3, 6150] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00140921") + std = float("0.037513") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [2, 1] + dtype = "int32" + data = [0, 1] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [2, 3, 1] + dtype = "int32" + data = [9, 0, 0, 1, 9, 2] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [2, 6150] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00422764") + std = float("0.0648827") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [2, 3, 4] + dtype = "float32" + data = [ + 93.4351, + 0.0, + 544.0, + 508.211, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 27.4365, + 136.0, + 484.397, + 380.8, + 13.2452, + 419.333, + 484.397, + 498.667, + 87.04, + 77.0667, + 425.739, + 111.067, + ] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [2, 3, 6150] + dtype = "float32" + max_val = float("0.233549") + mean = float("6.68287e-05") + std = float("0.00302899") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [2, 3, 6150] + dtype = "float32" + max_val = float("0.962486") + mean = float("0.0113819") + std = float("0.0480583") + data = None diff --git a/paddle_samples/PaddleX/PicoDet-S_layout_3cls/subgraph_4/model.py b/paddle_samples/PaddleX/PicoDet-S_layout_3cls/subgraph_4/model.py new file mode 100644 index 000000000..fb11c0b10 --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-S_layout_3cls/subgraph_4/model.py @@ -0,0 +1,193 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): + # pd_op.full: (1xi64) <- () + full_0 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (2x6150xi64) <- (2x3x6150xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_0, full_0, False, False, paddle.int64) + del full_0 + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("3"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x1xi32) <- (2x1xi32, 1xf32) + scale_0 = paddle._C_ops.scale(data_1, full_1, float("0"), True) + del data_1, full_1 + + # pd_op.cast: (2x1xi64) <- (2x1xi32) + cast_0 = paddle._C_ops.cast(scale_0, paddle.int64) + del scale_0 + + # pd_op.add: (2x6150xi64) <- (2x6150xi64, 2x1xi64) + add_0 = paddle._C_ops.add(argmax_0, cast_0) + del argmax_0, cast_0 + + # pd_op.flatten: (6xi32) <- (2x3x1xi32) + flatten_0 = paddle._C_ops.flatten(data_2, 0, 2) + del data_2 + + # pd_op.flatten: (12300xi64) <- (2x6150xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_2 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (12300xi32) <- (6xi32, 12300xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_2) + del flatten_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [2, 6150] + + # pd_op.reshape: (2x6150xi32) <- (12300xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, full_int_array_0) + del full_int_array_0, gather_0 + + # pd_op.full: (xf32) <- () + full_3 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (2x6150xb) <- (2x6150xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(data_3, full_3) + del data_3, full_3 + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("11"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (2x6150xi32) <- (2x6150xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_4, paddle.int32, paddle.framework._current_expected_place() + ) + del full_4 + + # pd_op.where: (2x6150xi32) <- (2x6150xb, 2x6150xi32, 2x6150xi32) + where_0 = paddle._C_ops.where(greater_than_0, reshape_1, full_like_0) + del full_like_0, greater_than_0, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [-1, 4] + + # pd_op.reshape: (6x4xf32) <- (2x3x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_4, full_int_array_1) + del data_4, full_int_array_1 + + # pd_op.gather: (12300x4xf32) <- (6x4xf32, 12300xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_2) + del flatten_1, full_2, reshape_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_2 = [2, 6150, 4] + + # pd_op.reshape: (2x6150x4xf32) <- (12300x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, full_int_array_2) + del full_int_array_2, gather_1 + + # pd_op.full: (1xi32) <- () + full_5 = paddle._C_ops.full( + [1], float("12"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (2x6150x12xf32) <- (2x6150xi32, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + where_0 % paddle.cast(full_5, where_0.dtype), full_5 + ) + del full_5 + + # pd_op.full: (11xi64) <- () + full_6 = paddle._C_ops.full( + [11], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (11xi64) <- (11xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_6, + [11], + paddle.int64, + [ + float("0"), + float("1"), + float("2"), + float("3"), + float("4"), + float("5"), + float("6"), + float("7"), + float("8"), + float("9"), + float("10"), + ], + paddle.framework._current_expected_place(), + ) + del full_6 + + # pd_op.index_select: (2x6150x11xf32) <- (2x6150x12xf32, 11xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_0, assign_value__0, -1) + del assign_value__0, one_hot_0 + + # pd_op.multiply: (2x3x6150xf32) <- (2x3x6150xf32, 2x3x6150xf32) + multiply_1 = paddle._C_ops.multiply(data_5, data_0) + del data_5 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [-1] + + # pd_op.max: (2x3x1xf32) <- (2x3x6150xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_1, full_int_array_3, True) + + # pd_op.multiply: (2x3x6150xf32) <- (2x3x6150xf32, 2x3x6150xf32) + multiply_2 = paddle._C_ops.multiply(data_6, data_0) + del data_0, data_6 + + # pd_op.max: (2x3x1xf32) <- (2x3x6150xf32, 1xi64) + max_1 = paddle._C_ops.max(multiply_2, full_int_array_3, True) + del multiply_2 + + # pd_op.full: (1xf32) <- () + full_7 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x3x1xf32) <- (2x3x1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(max_0, full_7, float("1e-09"), True) + del full_7, max_0 + + # pd_op.divide: (2x3x6150xf32) <- (2x3x6150xf32, 2x3x1xf32) + divide_0 = paddle._C_ops.divide(multiply_1, scale_1) + del multiply_1, scale_1 + + # pd_op.multiply: (2x3x6150xf32) <- (2x3x6150xf32, 2x3x1xf32) + multiply_3 = paddle._C_ops.multiply(divide_0, max_1) + del divide_0, max_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [-2] + + # pd_op.max: (2x6150xf32) <- (2x3x6150xf32, 1xi64) + max_2 = paddle._C_ops.max(multiply_3, full_int_array_4, False) + del full_int_array_4, multiply_3 + + # pd_op.unsqueeze: (2x6150x1xf32) <- (2x6150xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(max_2, full_int_array_3) + del full_int_array_3, max_2 + + # pd_op.multiply: (2x6150x11xf32) <- (2x6150x11xf32, 2x6150x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_0) + del index_select_0, unsqueeze_0, where_0 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PicoDet-S_layout_3cls/subgraph_4/weight_meta.py b/paddle_samples/PaddleX/PicoDet-S_layout_3cls/subgraph_4/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-S_layout_3cls/subgraph_4/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PicoDet-S_layout_3cls/subgraph_9/graph_hash.txt b/paddle_samples/PaddleX/PicoDet-S_layout_3cls/subgraph_9/graph_hash.txt new file mode 100644 index 000000000..85660ae0f --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-S_layout_3cls/subgraph_9/graph_hash.txt @@ -0,0 +1 @@ +80d1ed592dc09b73b7a9134c4346bbbaed2fbcb8708cd41f31709305c34d10bd \ No newline at end of file diff --git a/paddle_samples/PaddleX/PicoDet-S_layout_3cls/subgraph_9/graph_net.json b/paddle_samples/PaddleX/PicoDet-S_layout_3cls/subgraph_9/graph_net.json new file mode 100644 index 000000000..d183290da --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-S_layout_3cls/subgraph_9/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PicoDet-S_layout_3cls", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PicoDet-S_layout_3cls/subgraph_9/input_meta.py b/paddle_samples/PaddleX/PicoDet-S_layout_3cls/subgraph_9/input_meta.py new file mode 100644 index 000000000..b5e2b11a6 --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-S_layout_3cls/subgraph_9/input_meta.py @@ -0,0 +1,105 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [14] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [] + dtype = "int64" + data = [4165] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [2, 14, 4165] + dtype = "float32" + max_val = float("1.0") + mean = float("0.0012605") + std = float("0.0354812") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [2, 1] + dtype = "int32" + data = [0, 1] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [2, 14, 1] + dtype = "int32" + data = [ + 7, + 7, + 7, + 2, + 2, + 2, + 1, + 1, + 1, + 1, + 3, + 4, + 3, + 4, + 9, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [2, 4165] + dtype = "float32" + max_val = float("1.0") + mean = float("0.0176471") + std = float("0.131665") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [2, 14, 4] + dtype = "float32" + max_val = float("448.0") + mean = float("93.5693") + std = float("126.644") + data = None + + +class Program_weight_tensor_data_7: + name = "data_7" + shape = [2, 14, 4165] + dtype = "float32" + max_val = float("0.531705") + mean = float("5.99419e-05") + std = float("0.00347373") + data = None + + +class Program_weight_tensor_data_8: + name = "data_8" + shape = [2, 14, 4165] + dtype = "float32" + max_val = float("0.960982") + mean = float("0.00570039") + std = float("0.0413357") + data = None diff --git a/paddle_samples/PaddleX/PicoDet-S_layout_3cls/subgraph_9/model.py b/paddle_samples/PaddleX/PicoDet-S_layout_3cls/subgraph_9/model.py new file mode 100644 index 000000000..e0a9a439b --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-S_layout_3cls/subgraph_9/model.py @@ -0,0 +1,213 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, data_0, data_1, data_2, data_3, data_4, data_5, data_6, data_7, data_8 + ): + # pd_op.full: (1xi64) <- () + full_0 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (2x-1xi64) <- (2x-1x-1xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_2, full_0, False, False, paddle.int64) + del full_0 + + # pd_op.cast: (xi32) <- (xi64) + cast_0 = paddle._C_ops.cast(data_0, paddle.int32) + del data_0 + + # pd_op.multiply: (2x1xi32) <- (2x1xi32, xi32) + multiply_1 = paddle._C_ops.multiply(data_3, cast_0) + del cast_0, data_3 + + # pd_op.cast: (2x1xi64) <- (2x1xi32) + cast_1 = paddle._C_ops.cast(multiply_1, paddle.int64) + del multiply_1 + + # pd_op.add: (2x-1xi64) <- (2x-1xi64, 2x1xi64) + add_0 = paddle._C_ops.add(argmax_0, cast_1) + del argmax_0, cast_1 + + # pd_op.flatten: (-1xi32) <- (2x-1x1xi32) + flatten_0 = paddle._C_ops.flatten(data_4, 0, 2) + del data_4 + + # pd_op.flatten: (-1xi64) <- (2x-1xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_1 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (-1xi32) <- (-1xi32, -1xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_1) + del flatten_0 + + # pd_op.full: (xi64) <- () + full_2 = paddle._C_ops.full( + [], float("2"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_0 = [full_2, data_1] + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.reshape: (2x-1xi32) <- (-1xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, stack_0) + del gather_0, stack_0 + + # pd_op.full: (xf32) <- () + full_3 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (2x-1xb) <- (2x-1xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(data_5, full_3) + del data_5, full_3 + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("11"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (2x-1xi32) <- (2x-1xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_4, paddle.int32, paddle.framework._current_expected_place() + ) + del full_4 + + # pd_op.where: (2x-1xi32) <- (2x-1xb, 2x-1xi32, 2x-1xi32) + where_0 = paddle._C_ops.where(greater_than_0, reshape_1, full_like_0) + del full_like_0, greater_than_0, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [-1, 4] + + # pd_op.reshape: (-1x4xf32) <- (2x-1x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_6, full_int_array_0) + del data_6, full_int_array_0 + + # pd_op.gather: (-1x4xf32) <- (-1x4xf32, -1xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_1) + del flatten_1, full_1, reshape_2 + + # pd_op.full: (xi64) <- () + full_5 = paddle._C_ops.full( + [], float("4"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_1 = [full_2, data_1, full_5] + del data_1, full_2, full_5 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_1 = paddle._C_ops.stack(combine_1, 0) + del combine_1 + + # pd_op.reshape: (2x-1x4xf32) <- (-1x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, stack_1) + del gather_1, stack_1 + + # pd_op.full: (1xi32) <- () + full_6 = paddle._C_ops.full( + [1], float("12"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (2x-1x12xf32) <- (2x-1xi32, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + where_0 % paddle.cast(full_6, where_0.dtype), full_6 + ) + del full_6 + + # pd_op.full: (11xi64) <- () + full_7 = paddle._C_ops.full( + [11], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (11xi64) <- (11xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_7, + [11], + paddle.int64, + [ + float("0"), + float("1"), + float("2"), + float("3"), + float("4"), + float("5"), + float("6"), + float("7"), + float("8"), + float("9"), + float("10"), + ], + paddle.framework._current_expected_place(), + ) + del full_7 + + # pd_op.index_select: (2x-1x11xf32) <- (2x-1x12xf32, 11xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_0, assign_value__0, -1) + del assign_value__0, one_hot_0 + + # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x-1xf32) + multiply_2 = paddle._C_ops.multiply(data_7, data_2) + del data_7 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [-1] + + # pd_op.max: (2x-1x1xf32) <- (2x-1x-1xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_2, full_int_array_1, True) + + # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x-1xf32) + multiply_3 = paddle._C_ops.multiply(data_8, data_2) + del data_2, data_8 + + # pd_op.max: (2x-1x1xf32) <- (2x-1x-1xf32, 1xi64) + max_1 = paddle._C_ops.max(multiply_3, full_int_array_1, True) + del multiply_3 + + # pd_op.full: (1xf32) <- () + full_8 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x-1x1xf32) <- (2x-1x1xf32, 1xf32) + scale_0 = paddle._C_ops.scale(max_0, full_8, float("1e-09"), True) + del full_8, max_0 + + # pd_op.divide: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x1xf32) + divide_0 = paddle._C_ops.divide(multiply_2, scale_0) + del multiply_2, scale_0 + + # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x1xf32) + multiply_4 = paddle._C_ops.multiply(divide_0, max_1) + del divide_0, max_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [-2] + + # pd_op.max: (2x-1xf32) <- (2x-1x-1xf32, 1xi64) + max_2 = paddle._C_ops.max(multiply_4, full_int_array_2, False) + del full_int_array_2, multiply_4 + + # pd_op.unsqueeze: (2x-1x1xf32) <- (2x-1xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(max_2, full_int_array_1) + del full_int_array_1, max_2 + + # pd_op.multiply: (2x-1x11xf32) <- (2x-1x11xf32, 2x-1x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_0) + del index_select_0, unsqueeze_0, where_0 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PicoDet-S_layout_3cls/subgraph_9/weight_meta.py b/paddle_samples/PaddleX/PicoDet-S_layout_3cls/subgraph_9/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-S_layout_3cls/subgraph_9/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PicoDet-XS/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/PicoDet-XS/subgraph_0/graph_hash.txt new file mode 100644 index 000000000..982eccac3 --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-XS/subgraph_0/graph_hash.txt @@ -0,0 +1 @@ +78b029970cfa0e2c00235d9ecf66b3eb54de8aa8982e1e99dc3988b9040aba8b \ No newline at end of file diff --git a/paddle_samples/PaddleX/PicoDet-XS/subgraph_0/graph_net.json b/paddle_samples/PaddleX/PicoDet-XS/subgraph_0/graph_net.json new file mode 100644 index 000000000..a2baeb082 --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-XS/subgraph_0/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PicoDet-XS", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PicoDet-XS/subgraph_0/input_meta.py b/paddle_samples/PaddleX/PicoDet-XS/subgraph_0/input_meta.py new file mode 100644 index 000000000..61c3720a2 --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-XS/subgraph_0/input_meta.py @@ -0,0 +1,86 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [4, 4789] + dtype = "float32" + max_val = float("2.0") + mean = float("0.00245354") + std = float("0.0515396") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [4, 2, 4789] + dtype = "float32" + max_val = float("0.676182") + mean = float("0.00801628") + std = float("0.0390109") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [4, 2, 4789] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00122677") + std = float("0.0350038") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [4, 2, 1] + dtype = "int32" + data = [0, 0, 0, 3, 0, 0, 2, 2] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [4, 2, 4] + dtype = "float32" + data = [ + 322.56, + 145.946, + 426.24, + 233.514, + 0.0, + 0.0, + 0.0, + 0.0, + 216.0, + 243.6, + 352.0, + 345.6, + 208.0, + 152.4, + 332.8, + 244.8, + 300.632, + 91.9149, + 401.684, + 228.085, + 0.0, + 0.0, + 0.0, + 0.0, + 235.2, + 38.1457, + 280.8, + 146.225, + 278.4, + 47.6821, + 314.4, + 149.404, + ] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [4, 4789, 4] + dtype = "float32" + min_val = float("-149.166") + max_val = float("680.308") + mean = float("240.959") + std = float("143.532") + data = None diff --git a/paddle_samples/PaddleX/PicoDet-XS/subgraph_0/model.py b/paddle_samples/PaddleX/PicoDet-XS/subgraph_0/model.py new file mode 100644 index 000000000..b77ec50ea --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-XS/subgraph_0/model.py @@ -0,0 +1,346 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5): + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [1] + + # pd_op.unsqueeze: (4x1x4789xf32) <- (4x4789xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(data_0, full_int_array_0) + del data_0 + + # pd_op.full: (xf32) <- () + full_0 = paddle._C_ops.full( + [], float("1"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (4x1x4789xb) <- (4x1x4789xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(unsqueeze_0, full_0) + del full_0, unsqueeze_0 + + # pd_op.cast: (4x1x4789xi32) <- (4x1x4789xb) + cast_0 = paddle._C_ops.cast(greater_than_0, paddle.int32) + del greater_than_0 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_1 = [1, 2, 1] + + # pd_op.tile: (4x2x4789xi32) <- (4x1x4789xi32, 3xi64) + tile_0 = paddle._C_ops.tile(cast_0, full_int_array_1) + del cast_0, full_int_array_1 + + # pd_op.cast: (4x2x4789xb) <- (4x2x4789xi32) + cast_1 = paddle._C_ops.cast(tile_0, paddle.bool) + del tile_0 + + # pd_op.full: (1xi64) <- () + full_1 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (4x4789xi64) <- (4x2x4789xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_1, full_1, False, False, paddle.int64) + del data_1 + + # pd_op.full: (1xi32) <- () + full_2 = paddle._C_ops.full( + [1], float("2"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (4x4789x2xf32) <- (4x4789xi64, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + argmax_0 % paddle.cast(full_2, argmax_0.dtype), full_2 + ) + del argmax_0, full_2 + + # pd_op.transpose: (4x2x4789xf32) <- (4x4789x2xf32) + transpose_0 = paddle._C_ops.transpose(one_hot_0, [0, 2, 1]) + del one_hot_0 + + # pd_op.where: (4x2x4789xf32) <- (4x2x4789xb, 4x2x4789xf32, 4x2x4789xf32) + where_0 = paddle._C_ops.where(cast_1, transpose_0, data_2) + del cast_1, data_2, transpose_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [-2] + + # pd_op.sum: (4x4789xf32) <- (4x2x4789xf32, 1xi64) + sum_0 = paddle._C_ops.sum(where_0, full_int_array_2, None, False) + + # pd_op.argmax: (4x4789xi64) <- (4x2x4789xf32, 1xi64) + argmax_1 = paddle._C_ops.argmax(where_0, full_1, False, False, paddle.int64) + del full_1 + + # pd_op.full: (1xf64) <- () + full_3 = paddle._C_ops.full( + [1], float("0"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_4 = paddle._C_ops.full( + [1], float("4"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_5 = paddle._C_ops.full( + [1], float("1"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (4xi32) <- (1xf64, 1xf64, 1xf64) + arange_0 = paddle.arange(full_3, full_4, full_5, dtype="int32") + del full_3, full_4, full_5 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [-1] + + # pd_op.unsqueeze: (4x1xi32) <- (4xi32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(arange_0, full_int_array_3) + del arange_0 + + # pd_op.full: (1xf32) <- () + full_6 = paddle._C_ops.full( + [1], float("2"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (4x1xi32) <- (4x1xi32, 1xf32) + scale_0 = paddle._C_ops.scale(unsqueeze_1, full_6, float("0"), True) + del full_6, unsqueeze_1 + + # pd_op.cast: (4x1xi64) <- (4x1xi32) + cast_2 = paddle._C_ops.cast(scale_0, paddle.int64) + del scale_0 + + # pd_op.add: (4x4789xi64) <- (4x4789xi64, 4x1xi64) + add_0 = paddle._C_ops.add(argmax_1, cast_2) + del argmax_1, cast_2 + + # pd_op.flatten: (8xi32) <- (4x2x1xi32) + flatten_0 = paddle._C_ops.flatten(data_3, 0, 2) + del data_3 + + # pd_op.flatten: (19156xi64) <- (4x4789xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_7 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (19156xi32) <- (8xi32, 19156xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_7) + del flatten_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_4 = [4, 4789] + + # pd_op.reshape: (4x4789xi32) <- (19156xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, full_int_array_4) + del full_int_array_4, gather_0 + + # pd_op.full: (xf32) <- () + full_8 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (4x4789xb) <- (4x4789xf32, xf32) + greater_than_1 = paddle._C_ops.greater_than(sum_0, full_8) + del full_8, sum_0 + + # pd_op.full: (1xf32) <- () + full_9 = paddle._C_ops.full( + [1], float("4"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (4x4789xi32) <- (4x4789xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_9, paddle.int32, paddle.framework._current_expected_place() + ) + del full_9 + + # pd_op.where: (4x4789xi32) <- (4x4789xb, 4x4789xi32, 4x4789xi32) + where_1 = paddle._C_ops.where(greater_than_1, reshape_1, full_like_0) + del full_like_0, greater_than_1, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_5 = [-1, 4] + + # pd_op.reshape: (8x4xf32) <- (4x2x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_4, full_int_array_5) + del full_int_array_5 + + # pd_op.gather: (19156x4xf32) <- (8x4xf32, 19156xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_7) + del flatten_1, full_7, reshape_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_6 = [4, 4789, 4] + + # pd_op.reshape: (4x4789x4xf32) <- (19156x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, full_int_array_6) + del full_int_array_6, gather_1 + + # pd_op.full: (1xi32) <- () + full_10 = paddle._C_ops.full( + [1], float("5"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (4x4789x5xf32) <- (4x4789xi32, 1xi32) + one_hot_1 = paddle._C_ops.one_hot( + where_1 % paddle.cast(full_10, where_1.dtype), full_10 + ) + del full_10 + + # pd_op.full: (4xi64) <- () + full_11 = paddle._C_ops.full( + [4], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (4xi64) <- (4xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_11, + [4], + paddle.int64, + [float("0"), float("1"), float("2"), float("3")], + paddle.framework._current_expected_place(), + ) + del full_11 + + # pd_op.index_select: (4x4789x4xf32) <- (4x4789x5xf32, 4xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_1, assign_value__0, -1) + del assign_value__0, one_hot_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_7 = [2] + + # pd_op.unsqueeze: (4x2x1x4xf32) <- (4x2x4xf32, 1xi64) + unsqueeze_2 = paddle._C_ops.unsqueeze(data_4, full_int_array_7) + del data_4 + + # pd_op.unsqueeze: (4x1x4789x4xf32) <- (4x4789x4xf32, 1xi64) + unsqueeze_3 = paddle._C_ops.unsqueeze(data_5, full_int_array_0) + del data_5, full_int_array_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_8 = [0] + + # pd_op.slice: (4x2x1x2xf32) <- (4x2x1x4xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + unsqueeze_2, [3], full_int_array_8, full_int_array_7, [1], [] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_9 = [2147483647] + + # pd_op.slice: (4x2x1x2xf32) <- (4x2x1x4xf32, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + unsqueeze_2, [3], full_int_array_7, full_int_array_9, [1], [] + ) + del unsqueeze_2 + + # pd_op.slice: (4x1x4789x2xf32) <- (4x1x4789x4xf32, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + unsqueeze_3, [3], full_int_array_8, full_int_array_7, [1], [] + ) + del full_int_array_8 + + # pd_op.slice: (4x1x4789x2xf32) <- (4x1x4789x4xf32, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + unsqueeze_3, [3], full_int_array_7, full_int_array_9, [1], [] + ) + del full_int_array_7, full_int_array_9, unsqueeze_3 + + # pd_op.maximum: (4x2x4789x2xf32) <- (4x2x1x2xf32, 4x1x4789x2xf32) + maximum_0 = paddle._C_ops.maximum(slice_0, slice_2) + + # pd_op.minimum: (4x2x4789x2xf32) <- (4x2x1x2xf32, 4x1x4789x2xf32) + minimum_0 = paddle._C_ops.minimum(slice_1, slice_3) + + # pd_op.subtract: (4x2x4789x2xf32) <- (4x2x4789x2xf32, 4x2x4789x2xf32) + subtract_0 = paddle._C_ops.subtract(minimum_0, maximum_0) + del maximum_0, minimum_0 + + # pd_op.full: (1xf32) <- () + full_12 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf32) <- () + full_13 = paddle._C_ops.full( + [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.clip: (4x2x4789x2xf32) <- (4x2x4789x2xf32, 1xf32, 1xf32) + clip_0 = paddle._C_ops.clip(subtract_0, full_12, full_13) + del subtract_0 + + # pd_op.prod: (4x2x4789xf32) <- (4x2x4789x2xf32, 1xi64) + prod_0 = paddle._C_ops.prod(clip_0, full_int_array_3, False, False) + del clip_0 + + # pd_op.subtract: (4x2x1x2xf32) <- (4x2x1x2xf32, 4x2x1x2xf32) + subtract_1 = paddle._C_ops.subtract(slice_1, slice_0) + del slice_0, slice_1 + + # pd_op.clip: (4x2x1x2xf32) <- (4x2x1x2xf32, 1xf32, 1xf32) + clip_1 = paddle._C_ops.clip(subtract_1, full_12, full_13) + del subtract_1 + + # pd_op.prod: (4x2x1xf32) <- (4x2x1x2xf32, 1xi64) + prod_1 = paddle._C_ops.prod(clip_1, full_int_array_3, False, False) + del clip_1 + + # pd_op.subtract: (4x1x4789x2xf32) <- (4x1x4789x2xf32, 4x1x4789x2xf32) + subtract_2 = paddle._C_ops.subtract(slice_3, slice_2) + del slice_2, slice_3 + + # pd_op.clip: (4x1x4789x2xf32) <- (4x1x4789x2xf32, 1xf32, 1xf32) + clip_2 = paddle._C_ops.clip(subtract_2, full_12, full_13) + del full_12, full_13, subtract_2 + + # pd_op.prod: (4x1x4789xf32) <- (4x1x4789x2xf32, 1xi64) + prod_2 = paddle._C_ops.prod(clip_2, full_int_array_3, False, False) + del clip_2 + + # pd_op.add: (4x2x4789xf32) <- (4x2x1xf32, 4x1x4789xf32) + add_1 = paddle._C_ops.add(prod_1, prod_2) + del prod_1, prod_2 + + # pd_op.subtract: (4x2x4789xf32) <- (4x2x4789xf32, 4x2x4789xf32) + subtract_3 = paddle._C_ops.subtract(add_1, prod_0) + del add_1 + + # pd_op.full: (1xf32) <- () + full_14 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (4x2x4789xf32) <- (4x2x4789xf32, 1xf32) + scale_1 = paddle._C_ops.scale(subtract_3, full_14, float("1e-09"), True) + del full_14, subtract_3 + + # pd_op.divide: (4x2x4789xf32) <- (4x2x4789xf32, 4x2x4789xf32) + divide_0 = paddle._C_ops.divide(prod_0, scale_1) + del prod_0, scale_1 + + # pd_op.multiply: (4x2x4789xf32) <- (4x2x4789xf32, 4x2x4789xf32) + multiply_1 = paddle._C_ops.multiply(divide_0, where_0) + del divide_0, where_0 + + # pd_op.max: (4x4789xf32) <- (4x2x4789xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_1, full_int_array_2, False) + del full_int_array_2, multiply_1 + + # pd_op.unsqueeze: (4x4789x1xf32) <- (4x4789xf32, 1xi64) + unsqueeze_4 = paddle._C_ops.unsqueeze(max_0, full_int_array_3) + del full_int_array_3, max_0 + + # pd_op.multiply: (4x4789x4xf32) <- (4x4789x4xf32, 4x4789x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_4) + del index_select_0, unsqueeze_4, where_1 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PicoDet-XS/subgraph_0/weight_meta.py b/paddle_samples/PaddleX/PicoDet-XS/subgraph_0/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-XS/subgraph_0/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PicoDet-XS/subgraph_5/graph_hash.txt b/paddle_samples/PaddleX/PicoDet-XS/subgraph_5/graph_hash.txt new file mode 100644 index 000000000..1a41c7c5f --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-XS/subgraph_5/graph_hash.txt @@ -0,0 +1 @@ +6b9ebbb355cbefd223744cf4f7bb67f89cb7044a20ba22a21b372d3327ff6482 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PicoDet-XS/subgraph_5/graph_net.json b/paddle_samples/PaddleX/PicoDet-XS/subgraph_5/graph_net.json new file mode 100644 index 000000000..a2baeb082 --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-XS/subgraph_5/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PicoDet-XS", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PicoDet-XS/subgraph_5/input_meta.py b/paddle_samples/PaddleX/PicoDet-XS/subgraph_5/input_meta.py new file mode 100644 index 000000000..2c47ebc80 --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-XS/subgraph_5/input_meta.py @@ -0,0 +1,92 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [4, 2, 2577] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00315289") + std = float("0.056062") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [4, 1] + dtype = "int32" + data = [0, 1, 2, 3] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [4, 2, 1] + dtype = "int32" + data = [0, 0, 0, 0, 0, 0, 2, 0] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [4, 2577] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00630578") + std = float("0.0791582") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [4, 2, 4] + dtype = "float32" + data = [ + 30.976, + 7.65217, + 295.68, + 352.0, + 0.0, + 0.0, + 0.0, + 0.0, + 125.035, + 195.959, + 186.193, + 277.608, + 0.0, + 0.0, + 0.0, + 0.0, + 130.087, + 56.0, + 275.478, + 176.0, + 132.638, + 240.0, + 280.58, + 352.0, + 8.43114, + 0.0, + 170.731, + 352.0, + 0.0, + 0.0, + 0.0, + 0.0, + ] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [4, 2, 2577] + dtype = "float32" + max_val = float("0.0613762") + mean = float("1.93609e-05") + std = float("0.000727902") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [4, 2, 2577] + dtype = "float32" + max_val = float("0.814194") + mean = float("0.0170456") + std = float("0.0574271") + data = None diff --git a/paddle_samples/PaddleX/PicoDet-XS/subgraph_5/model.py b/paddle_samples/PaddleX/PicoDet-XS/subgraph_5/model.py new file mode 100644 index 000000000..86c6a10c4 --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-XS/subgraph_5/model.py @@ -0,0 +1,181 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): + # pd_op.full: (1xi64) <- () + full_0 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (4x2577xi64) <- (4x2x2577xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_0, full_0, False, False, paddle.int64) + del full_0 + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("2"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (4x1xi32) <- (4x1xi32, 1xf32) + scale_0 = paddle._C_ops.scale(data_1, full_1, float("0"), True) + del data_1, full_1 + + # pd_op.cast: (4x1xi64) <- (4x1xi32) + cast_0 = paddle._C_ops.cast(scale_0, paddle.int64) + del scale_0 + + # pd_op.add: (4x2577xi64) <- (4x2577xi64, 4x1xi64) + add_0 = paddle._C_ops.add(argmax_0, cast_0) + del argmax_0, cast_0 + + # pd_op.flatten: (8xi32) <- (4x2x1xi32) + flatten_0 = paddle._C_ops.flatten(data_2, 0, 2) + del data_2 + + # pd_op.flatten: (10308xi64) <- (4x2577xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_2 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (10308xi32) <- (8xi32, 10308xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_2) + del flatten_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [4, 2577] + + # pd_op.reshape: (4x2577xi32) <- (10308xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, full_int_array_0) + del full_int_array_0, gather_0 + + # pd_op.full: (xf32) <- () + full_3 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (4x2577xb) <- (4x2577xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(data_3, full_3) + del data_3, full_3 + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("4"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (4x2577xi32) <- (4x2577xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_4, paddle.int32, paddle.framework._current_expected_place() + ) + del full_4 + + # pd_op.where: (4x2577xi32) <- (4x2577xb, 4x2577xi32, 4x2577xi32) + where_0 = paddle._C_ops.where(greater_than_0, reshape_1, full_like_0) + del full_like_0, greater_than_0, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [-1, 4] + + # pd_op.reshape: (8x4xf32) <- (4x2x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_4, full_int_array_1) + del data_4, full_int_array_1 + + # pd_op.gather: (10308x4xf32) <- (8x4xf32, 10308xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_2) + del flatten_1, full_2, reshape_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_2 = [4, 2577, 4] + + # pd_op.reshape: (4x2577x4xf32) <- (10308x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, full_int_array_2) + del full_int_array_2, gather_1 + + # pd_op.full: (1xi32) <- () + full_5 = paddle._C_ops.full( + [1], float("5"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (4x2577x5xf32) <- (4x2577xi32, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + where_0 % paddle.cast(full_5, where_0.dtype), full_5 + ) + del full_5 + + # pd_op.full: (4xi64) <- () + full_6 = paddle._C_ops.full( + [4], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (4xi64) <- (4xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_6, + [4], + paddle.int64, + [float("0"), float("1"), float("2"), float("3")], + paddle.framework._current_expected_place(), + ) + del full_6 + + # pd_op.index_select: (4x2577x4xf32) <- (4x2577x5xf32, 4xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_0, assign_value__0, -1) + del assign_value__0, one_hot_0 + + # pd_op.multiply: (4x2x2577xf32) <- (4x2x2577xf32, 4x2x2577xf32) + multiply_1 = paddle._C_ops.multiply(data_5, data_0) + del data_5 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [-1] + + # pd_op.max: (4x2x1xf32) <- (4x2x2577xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_1, full_int_array_3, True) + + # pd_op.multiply: (4x2x2577xf32) <- (4x2x2577xf32, 4x2x2577xf32) + multiply_2 = paddle._C_ops.multiply(data_6, data_0) + del data_0, data_6 + + # pd_op.max: (4x2x1xf32) <- (4x2x2577xf32, 1xi64) + max_1 = paddle._C_ops.max(multiply_2, full_int_array_3, True) + del multiply_2 + + # pd_op.full: (1xf32) <- () + full_7 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (4x2x1xf32) <- (4x2x1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(max_0, full_7, float("1e-09"), True) + del full_7, max_0 + + # pd_op.divide: (4x2x2577xf32) <- (4x2x2577xf32, 4x2x1xf32) + divide_0 = paddle._C_ops.divide(multiply_1, scale_1) + del multiply_1, scale_1 + + # pd_op.multiply: (4x2x2577xf32) <- (4x2x2577xf32, 4x2x1xf32) + multiply_3 = paddle._C_ops.multiply(divide_0, max_1) + del divide_0, max_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [-2] + + # pd_op.max: (4x2577xf32) <- (4x2x2577xf32, 1xi64) + max_2 = paddle._C_ops.max(multiply_3, full_int_array_4, False) + del full_int_array_4, multiply_3 + + # pd_op.unsqueeze: (4x2577x1xf32) <- (4x2577xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(max_2, full_int_array_3) + del full_int_array_3, max_2 + + # pd_op.multiply: (4x2577x4xf32) <- (4x2577x4xf32, 4x2577x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_0) + del index_select_0, unsqueeze_0, where_0 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PicoDet-XS/subgraph_5/weight_meta.py b/paddle_samples/PaddleX/PicoDet-XS/subgraph_5/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PicoDet-XS/subgraph_5/weight_meta.py @@ -0,0 +1 @@ +