@@ -1073,12 +1073,59 @@ k3_udma_glue_request_rx_chn_priv(struct device *dev, const char *name,
10731073 return ERR_PTR (ret );
10741074}
10751075
1076+ static int
1077+ k3_udma_glue_request_remote_rx_chn_common (struct k3_udma_glue_rx_channel * rx_chn ,
1078+ struct k3_udma_glue_rx_channel_cfg * cfg ,
1079+ struct device * dev )
1080+ {
1081+ int ret , i ;
1082+
1083+ rx_chn -> common .hdesc_size = cppi5_hdesc_calc_size (rx_chn -> common .epib ,
1084+ rx_chn -> common .psdata_size ,
1085+ rx_chn -> common .swdata_size );
1086+
1087+ rx_chn -> flows = devm_kcalloc (dev , rx_chn -> flow_num ,
1088+ sizeof (* rx_chn -> flows ), GFP_KERNEL );
1089+ if (!rx_chn -> flows )
1090+ return - ENOMEM ;
1091+
1092+ rx_chn -> common .chan_dev .class = & k3_udma_glue_devclass ;
1093+ rx_chn -> common .chan_dev .parent = xudma_get_device (rx_chn -> common .udmax );
1094+ dev_set_name (& rx_chn -> common .chan_dev , "rchan_remote-0x%04x-0x%02x" ,
1095+ rx_chn -> common .src_thread , rx_chn -> flow_id_base );
1096+ ret = device_register (& rx_chn -> common .chan_dev );
1097+ if (ret ) {
1098+ dev_err (dev , "Channel Device registration failed %d\n" , ret );
1099+ put_device (& rx_chn -> common .chan_dev );
1100+ rx_chn -> common .chan_dev .parent = NULL ;
1101+ return ret ;
1102+ }
1103+
1104+ if (xudma_is_pktdma (rx_chn -> common .udmax )) {
1105+ /* prepare the channel device as coherent */
1106+ rx_chn -> common .chan_dev .dma_coherent = true;
1107+ dma_coerce_mask_and_coherent (& rx_chn -> common .chan_dev ,
1108+ DMA_BIT_MASK (48 ));
1109+ }
1110+
1111+ ret = k3_udma_glue_allocate_rx_flows (rx_chn , cfg );
1112+ if (ret )
1113+ return ret ;
1114+
1115+ for (i = 0 ; i < rx_chn -> flow_num ; i ++ )
1116+ rx_chn -> flows [i ].udma_rflow_id = rx_chn -> flow_id_base + i ;
1117+
1118+ k3_udma_glue_dump_rx_chn (rx_chn );
1119+
1120+ return 0 ;
1121+ }
1122+
10761123static struct k3_udma_glue_rx_channel *
10771124k3_udma_glue_request_remote_rx_chn (struct device * dev , const char * name ,
10781125 struct k3_udma_glue_rx_channel_cfg * cfg )
10791126{
10801127 struct k3_udma_glue_rx_channel * rx_chn ;
1081- int ret , i ;
1128+ int ret ;
10821129
10831130 if (cfg -> flow_id_num <= 0 ||
10841131 cfg -> flow_id_use_rxchan_id ||
@@ -1109,51 +1156,63 @@ k3_udma_glue_request_remote_rx_chn(struct device *dev, const char *name,
11091156 if (ret )
11101157 goto err ;
11111158
1112- rx_chn -> common .hdesc_size = cppi5_hdesc_calc_size (rx_chn -> common .epib ,
1113- rx_chn -> common .psdata_size ,
1114- rx_chn -> common .swdata_size );
1115-
1116- rx_chn -> flows = devm_kcalloc (dev , rx_chn -> flow_num ,
1117- sizeof (* rx_chn -> flows ), GFP_KERNEL );
1118- if (!rx_chn -> flows ) {
1119- ret = - ENOMEM ;
1159+ ret = k3_udma_glue_request_remote_rx_chn_common (rx_chn , cfg , dev );
1160+ if (ret )
11201161 goto err ;
1121- }
11221162
1123- rx_chn -> common .chan_dev .class = & k3_udma_glue_devclass ;
1124- rx_chn -> common .chan_dev .parent = xudma_get_device (rx_chn -> common .udmax );
1125- dev_set_name (& rx_chn -> common .chan_dev , "rchan_remote-0x%04x-0x%02x" ,
1126- rx_chn -> common .src_thread , rx_chn -> flow_id_base );
1127- ret = device_register (& rx_chn -> common .chan_dev );
1128- if (ret ) {
1129- dev_err (dev , "Channel Device registration failed %d\n" , ret );
1130- put_device (& rx_chn -> common .chan_dev );
1131- rx_chn -> common .chan_dev .parent = NULL ;
1132- goto err ;
1133- }
1163+ return rx_chn ;
11341164
1135- if (xudma_is_pktdma (rx_chn -> common .udmax )) {
1136- /* prepare the channel device as coherent */
1137- rx_chn -> common .chan_dev .dma_coherent = true;
1138- dma_coerce_mask_and_coherent (& rx_chn -> common .chan_dev ,
1139- DMA_BIT_MASK (48 ));
1140- }
1165+ err :
1166+ k3_udma_glue_release_rx_chn (rx_chn );
1167+ return ERR_PTR (ret );
1168+ }
11411169
1142- ret = k3_udma_glue_allocate_rx_flows (rx_chn , cfg );
1170+ struct k3_udma_glue_rx_channel *
1171+ k3_udma_glue_request_remote_rx_chn_for_thread_id (struct device * dev ,
1172+ struct k3_udma_glue_rx_channel_cfg * cfg ,
1173+ struct device_node * udmax_np , u32 thread_id )
1174+ {
1175+ struct k3_udma_glue_rx_channel * rx_chn ;
1176+ int ret ;
1177+
1178+ if (cfg -> flow_id_num <= 0 ||
1179+ cfg -> flow_id_use_rxchan_id ||
1180+ cfg -> def_flow_cfg ||
1181+ cfg -> flow_id_base < 0 )
1182+ return ERR_PTR (- EINVAL );
1183+
1184+ /*
1185+ * Remote RX channel is under control of Remote CPU core, so
1186+ * Linux can only request and manipulate by dedicated RX flows
1187+ */
1188+
1189+ rx_chn = devm_kzalloc (dev , sizeof (* rx_chn ), GFP_KERNEL );
1190+ if (!rx_chn )
1191+ return ERR_PTR (- ENOMEM );
1192+
1193+ rx_chn -> common .dev = dev ;
1194+ rx_chn -> common .swdata_size = cfg -> swdata_size ;
1195+ rx_chn -> remote = true;
1196+ rx_chn -> udma_rchan_id = -1 ;
1197+ rx_chn -> flow_num = cfg -> flow_id_num ;
1198+ rx_chn -> flow_id_base = cfg -> flow_id_base ;
1199+ rx_chn -> psil_paired = false;
1200+
1201+ ret = of_k3_udma_glue_parse_chn_by_id (udmax_np , & rx_chn -> common , false, thread_id );
11431202 if (ret )
11441203 goto err ;
11451204
1146- for (i = 0 ; i < rx_chn -> flow_num ; i ++ )
1147- rx_chn -> flows [i ].udma_rflow_id = rx_chn -> flow_id_base + i ;
1148-
1149- k3_udma_glue_dump_rx_chn (rx_chn );
1205+ ret = k3_udma_glue_request_remote_rx_chn_common (rx_chn , cfg , dev );
1206+ if (ret )
1207+ goto err ;
11501208
11511209 return rx_chn ;
11521210
11531211err :
11541212 k3_udma_glue_release_rx_chn (rx_chn );
11551213 return ERR_PTR (ret );
11561214}
1215+ EXPORT_SYMBOL_GPL (k3_udma_glue_request_remote_rx_chn_for_thread_id );
11571216
11581217struct k3_udma_glue_rx_channel *
11591218k3_udma_glue_request_rx_chn (struct device * dev , const char * name ,
0 commit comments