1+ {
2+ "key_" : " nndeploy.dag.Graph" ,
3+ "name_" : " Classification_ResNet" ,
4+ "developer_" : " Always" ,
5+ "source_" : " https://github.com/KaimingHe/deep-residual-networks" ,
6+ "desc_" : " Image classification workflow based on ResNet network" ,
7+ "device_type_" : " kDeviceTypeCodeCpu:0" ,
8+ "is_dynamic_input_" : false ,
9+ "inputs_" : [],
10+ "is_dynamic_output_" : false ,
11+ "outputs_" : [],
12+ "is_graph_" : true ,
13+ "parallel_type_" : " kParallelTypeNone" ,
14+ "is_inner_" : false ,
15+ "node_type_" : " Intermediate" ,
16+ "is_time_profile_" : false ,
17+ "is_debug_" : false ,
18+ "is_external_stream_" : false ,
19+ "is_graph_node_share_stream_" : true ,
20+ "queue_max_size_" : 16 ,
21+ "is_loop_max_flag_" : true ,
22+ "loop_count_" : -1 ,
23+ "image_url_" : [
24+ " template[http,modelscope]@https://template.cn/template.jpg"
25+ ],
26+ "video_url_" : [
27+ " template[http,modelscope]@https://template.cn/template.mp4"
28+ ],
29+ "audio_url_" : [
30+ " template[http,modelscope]@https://template.cn/template.mp3"
31+ ],
32+ "model_url_" : [
33+ " modelscope@nndeploy/nndeploy:classification/resnet50-v1-7.slim.onnx"
34+ ],
35+ "other_url_" : [
36+ " template[http,modelscope]@https://template.cn/template.txt"
37+ ],
38+ "node_repository_" : [
39+ {
40+ "key_" : " nndeploy::preprocess::CvtResizeNormTrans" ,
41+ "name_" : " CvtResizeNormTrans_22" ,
42+ "desc_" : " cv::Mat to device::Tensor[cvtcolor->resize->normalize->transpose]" ,
43+ "device_type_" : " kDeviceTypeCodeCpu:0" ,
44+ "is_dynamic_input_" : false ,
45+ "inputs_" : [
46+ {
47+ "desc_" : " input_0" ,
48+ "type_" : " ndarray" ,
49+ "name_" : " OpenCvImageDecode_27@output_0"
50+ }
51+ ],
52+ "is_dynamic_output_" : false ,
53+ "outputs_" : [
54+ {
55+ "desc_" : " output_0" ,
56+ "type_" : " Tensor" ,
57+ "name_" : " CvtResizeNormTrans_22@output_0"
58+ }
59+ ],
60+ "node_type_" : " Intermediate" ,
61+ "param_" : {
62+ "src_pixel_type_" : " kPixelTypeBGR" ,
63+ "dst_pixel_type_" : " kPixelTypeRGB" ,
64+ "interp_type_" : " kInterpTypeLinear" ,
65+ "h_" : 224 ,
66+ "w_" : 224 ,
67+ "data_type_" : " kDataTypeCodeFp32" ,
68+ "data_format_" : " kDataFormatNCHW" ,
69+ "normalize_" : true ,
70+ "scale_" : [
71+ 0.003921568859368563 ,
72+ 0.003921568859368563 ,
73+ 0.003921568859368563 ,
74+ 0.003921568859368563
75+ ],
76+ "mean_" : [
77+ 0.485 ,
78+ 0.456 ,
79+ 0.406 ,
80+ 0.5
81+ ],
82+ "std_" : [
83+ 0.229 ,
84+ 0.224 ,
85+ 0.225 ,
86+ 1
87+ ]
88+ },
89+ "node_repository_" : [],
90+ "size" : {
91+ "width" : 200 ,
92+ "height" : 80
93+ }
94+ },
95+ {
96+ "key_" : " nndeploy::infer::Infer" ,
97+ "name_" : " Infer_23" ,
98+ "desc_" : " Universal Inference Node - Enables cross-platform model deployment with multiple inference backends while maintaining native performance" ,
99+ "device_type_" : " kDeviceTypeCodeCpu:0" ,
100+ "is_dynamic_input_" : true ,
101+ "inputs_" : [
102+ {
103+ "desc_" : " input_0" ,
104+ "type_" : " Tensor" ,
105+ "name_" : " CvtResizeNormTrans_22@output_0"
106+ }
107+ ],
108+ "is_dynamic_output_" : true ,
109+ "outputs_" : [
110+ {
111+ "desc_" : " output_0" ,
112+ "type_" : " Tensor" ,
113+ "name_" : " Infer_23@output_0"
114+ }
115+ ],
116+ "node_type_" : " Intermediate" ,
117+ "type_" : " kInferenceTypeOnnxRuntime" ,
118+ "param_" : {
119+ "model_type_" : " kModelTypeOnnx" ,
120+ "is_path_" : true ,
121+ "model_value_" : [
122+ " resources/models/classification/resnet50-v1-7.slim.onnx"
123+ ],
124+ "external_model_data_" : [
125+ " "
126+ ],
127+ "device_type_" : " kDeviceTypeCodeCpu:0" ,
128+ "num_thread_" : 8 ,
129+ "gpu_tune_kernel_" : 1 ,
130+ "input_num_" : 1 ,
131+ "input_name_" : [
132+ " "
133+ ],
134+ "input_shape_" : [
135+ [
136+ -1 ,
137+ -1 ,
138+ -1 ,
139+ -1
140+ ]
141+ ],
142+ "output_num_" : 1 ,
143+ "output_name_" : [
144+ " "
145+ ],
146+ "encrypt_type_" : " kEncryptTypeNone" ,
147+ "license_" : " " ,
148+ "share_memory_mode_" : " kShareMemoryTypeNoShare" ,
149+ "precision_type_" : " kPrecisionTypeFp32" ,
150+ "power_type_" : " kPowerTypeNormal" ,
151+ "is_dynamic_shape_" : false ,
152+ "min_shape_" : {
153+ "input_0" : [
154+ -1 ,
155+ -1 ,
156+ -1 ,
157+ -1
158+ ]
159+ },
160+ "opt_shape_" : {
161+ "input_0" : [
162+ -1 ,
163+ -1 ,
164+ -1 ,
165+ -1
166+ ]
167+ },
168+ "max_shape_" : {
169+ "input_0" : [
170+ -1 ,
171+ -1 ,
172+ -1 ,
173+ -1
174+ ]
175+ },
176+ "parallel_type_" : " kParallelTypeNone" ,
177+ "worker_num_" : 1
178+ },
179+ "node_repository_" : [],
180+ "size" : {
181+ "width" : 200 ,
182+ "height" : 80
183+ }
184+ },
185+ {
186+ "key_" : " nndeploy::classification::ClassificationPostProcess" ,
187+ "name_" : " ClassificationPostProcess_24" ,
188+ "desc_" : " Classification postprocess[device::Tensor->ClassificationResult]" ,
189+ "device_type_" : " kDeviceTypeCodeCpu:0" ,
190+ "is_dynamic_input_" : false ,
191+ "inputs_" : [
192+ {
193+ "desc_" : " input_0" ,
194+ "type_" : " Tensor" ,
195+ "name_" : " Infer_23@output_0"
196+ }
197+ ],
198+ "is_dynamic_output_" : false ,
199+ "outputs_" : [
200+ {
201+ "desc_" : " output_0" ,
202+ "type_" : " ClassificationResult" ,
203+ "name_" : " ClassificationPostProcess_24@output_0"
204+ }
205+ ],
206+ "node_type_" : " Intermediate" ,
207+ "param_" : {
208+ "topk" : 1 ,
209+ "is_softmax" : true ,
210+ "version" : -1
211+ },
212+ "node_repository_" : [],
213+ "size" : {
214+ "width" : 200 ,
215+ "height" : 80
216+ }
217+ },
218+ {
219+ "key_" : " nndeploy::classification::DrawLable" ,
220+ "name_" : " DrawLable_25" ,
221+ "desc_" : " Draw classification labels on input cv::Mat image based on classification results[cv::Mat->cv::Mat]" ,
222+ "device_type_" : " kDeviceTypeCodeCpu:0" ,
223+ "is_dynamic_input_" : false ,
224+ "inputs_" : [
225+ {
226+ "desc_" : " input_0" ,
227+ "type_" : " ndarray" ,
228+ "name_" : " OpenCvImageDecode_27@output_0"
229+ },
230+ {
231+ "desc_" : " input_1" ,
232+ "type_" : " ClassificationResult" ,
233+ "name_" : " ClassificationPostProcess_24@output_0"
234+ }
235+ ],
236+ "is_dynamic_output_" : false ,
237+ "outputs_" : [
238+ {
239+ "desc_" : " output_0" ,
240+ "type_" : " ndarray" ,
241+ "name_" : " DrawLable_25@output_0"
242+ }
243+ ],
244+ "node_type_" : " Intermediate" ,
245+ "node_repository_" : [],
246+ "size" : {
247+ "width" : 200 ,
248+ "height" : 80
249+ }
250+ },
251+ {
252+ "key_" : " nndeploy::codec::OpenCvImageDecode" ,
253+ "name_" : " OpenCvImageDecode_27" ,
254+ "developer_" : " " ,
255+ "source_" : " " ,
256+ "desc_" : " Decode image using OpenCV, from image path to cv::Mat, default color space is BGR" ,
257+ "device_type_" : " kDeviceTypeCodeCpu:0" ,
258+ "version_" : " 1.0.0" ,
259+ "required_params_" : [
260+ " path_"
261+ ],
262+ "ui_params_" : [],
263+ "is_dynamic_input_" : false ,
264+ "inputs_" : [],
265+ "is_dynamic_output_" : false ,
266+ "outputs_" : [
267+ {
268+ "type_" : " ndarray" ,
269+ "desc_" : " output_0" ,
270+ "name_" : " OpenCvImageDecode_27@output_0"
271+ }
272+ ],
273+ "node_type_" : " Input" ,
274+ "io_type_" : " Image" ,
275+ "path_" : " resources/template/nndeploy-workflow/classification/input.resnet.png" ,
276+ "size" : {
277+ "width" : 200 ,
278+ "height" : 80
279+ },
280+ "node_repository_" : []
281+ },
282+ {
283+ "key_" : " nndeploy::codec::OpenCvImageEncode" ,
284+ "name_" : " OpenCvImageEncode_28" ,
285+ "developer_" : " " ,
286+ "source_" : " " ,
287+ "desc_" : " Encode image using OpenCV, from cv::Mat to image file, supports common image formats" ,
288+ "device_type_" : " kDeviceTypeCodeCpu:0" ,
289+ "version_" : " 1.0.0" ,
290+ "required_params_" : [
291+ " path_"
292+ ],
293+ "ui_params_" : [],
294+ "is_dynamic_input_" : false ,
295+ "inputs_" : [
296+ {
297+ "type_" : " ndarray" ,
298+ "desc_" : " input_0" ,
299+ "name_" : " DrawLable_25@output_0"
300+ }
301+ ],
302+ "is_dynamic_output_" : false ,
303+ "outputs_" : [],
304+ "node_type_" : " Output" ,
305+ "io_type_" : " Image" ,
306+ "path_" : " resources/images/result.resnet.jpg" ,
307+ "size" : {
308+ "width" : 200 ,
309+ "height" : 80
310+ },
311+ "node_repository_" : []
312+ }
313+ ],
314+ "nndeploy_ui_layout" : {
315+ "layout" : {
316+ "CvtResizeNormTrans_22" : {
317+ "x" : 400 ,
318+ "y" : 60
319+ },
320+ "Infer_23" : {
321+ "x" : 700 ,
322+ "y" : 60
323+ },
324+ "ClassificationPostProcess_24" : {
325+ "x" : 1000 ,
326+ "y" : 60
327+ },
328+ "DrawLable_25" : {
329+ "x" : 1300 ,
330+ "y" : 0
331+ },
332+ "OpenCvImageDecode_27" : {
333+ "x" : 110.25706940874026 ,
334+ "y" : -189.32647814910027
335+ },
336+ "OpenCvImageEncode_28" : {
337+ "x" : 1569.383033419023 ,
338+ "y" : 161.31619537275066
339+ }
340+ },
341+ "groups" : []
342+ }
343+ }
0 commit comments