2525/* RX mailbox client buffer max length */
2626#define MBOX_CLIENT_BUF_MAX (IPI_BUF_LEN_MAX + \
2727 sizeof(struct zynqmp_ipi_message))
28+
29+ #define RSC_TBL_XLNX_MAGIC ((uint32_t)'x' << 24 | (uint32_t)'a' << 16 | \
30+ (uint32_t)'m' << 8 | (uint32_t)'p')
31+
2832/*
2933 * settings for RPU cluster mode which
3034 * reflects possible values of xlnx,cluster-mode dt-property
@@ -73,6 +77,26 @@ struct mbox_info {
7377 struct mbox_chan * rx_chan ;
7478};
7579
80+ /**
81+ * struct rsc_tbl_data
82+ *
83+ * Platform specific data structure used to sync resource table address.
84+ * It's important to maintain order and size of each field on remote side.
85+ *
86+ * @version: version of data structure
87+ * @magic_num: 32-bit magic number.
88+ * @comp_magic_num: complement of above magic number
89+ * @rsc_tbl_size: resource table size
90+ * @rsc_tbl: resource table address
91+ */
92+ struct rsc_tbl_data {
93+ const int version ;
94+ const u32 magic_num ;
95+ const u32 comp_magic_num ;
96+ const u32 rsc_tbl_size ;
97+ const uintptr_t rsc_tbl ;
98+ } __packed ;
99+
76100/*
77101 * Hardcoded TCM bank values. This will stay in driver to maintain backward
78102 * compatibility with device-tree that does not have TCM information.
@@ -95,20 +119,24 @@ static const struct mem_bank_data zynqmp_tcm_banks_lockstep[] = {
95119/**
96120 * struct zynqmp_r5_core
97121 *
122+ * @rsc_tbl_va: resource table virtual address
98123 * @dev: device of RPU instance
99124 * @np: device node of RPU instance
100125 * @tcm_bank_count: number TCM banks accessible to this RPU
101126 * @tcm_banks: array of each TCM bank data
102127 * @rproc: rproc handle
128+ * @rsc_tbl_size: resource table size retrieved from remote
103129 * @pm_domain_id: RPU CPU power domain id
104130 * @ipi: pointer to mailbox information
105131 */
106132struct zynqmp_r5_core {
133+ void __iomem * rsc_tbl_va ;
107134 struct device * dev ;
108135 struct device_node * np ;
109136 int tcm_bank_count ;
110137 struct mem_bank_data * * tcm_banks ;
111138 struct rproc * rproc ;
139+ u32 rsc_tbl_size ;
112140 u32 pm_domain_id ;
113141 struct mbox_info * ipi ;
114142};
@@ -557,6 +585,14 @@ static int add_tcm_banks(struct rproc *rproc)
557585 dev_dbg (dev , "TCM carveout %s addr=%llx, da=0x%x, size=0x%lx" ,
558586 bank_name , bank_addr , da , bank_size );
559587
588+ /*
589+ * In DETACHED state firmware is already running so no need to
590+ * request add TCM registers. However, request TCM PD node to let
591+ * platform management firmware know that TCM is in use.
592+ */
593+ if (rproc -> state == RPROC_DETACHED )
594+ continue ;
595+
560596 rproc_mem = rproc_mem_entry_init (dev , NULL , bank_addr ,
561597 bank_size , da ,
562598 tcm_mem_map , tcm_mem_unmap ,
@@ -662,6 +698,107 @@ static int zynqmp_r5_rproc_unprepare(struct rproc *rproc)
662698 return 0 ;
663699}
664700
701+ static struct resource_table * zynqmp_r5_get_loaded_rsc_table (struct rproc * rproc ,
702+ size_t * size )
703+ {
704+ struct zynqmp_r5_core * r5_core ;
705+
706+ r5_core = rproc -> priv ;
707+
708+ * size = r5_core -> rsc_tbl_size ;
709+
710+ return (struct resource_table * )r5_core -> rsc_tbl_va ;
711+ }
712+
713+ static int zynqmp_r5_get_rsc_table_va (struct zynqmp_r5_core * r5_core )
714+ {
715+ struct resource_table * rsc_tbl_addr ;
716+ struct device * dev = r5_core -> dev ;
717+ struct rsc_tbl_data * rsc_data_va ;
718+ struct resource res_mem ;
719+ struct device_node * np ;
720+ int ret ;
721+
722+ /*
723+ * It is expected from remote processor firmware to provide resource
724+ * table address via struct rsc_tbl_data data structure.
725+ * Start address of first entry under "memory-region" property list
726+ * contains that data structure which holds resource table address, size
727+ * and some magic number to validate correct resource table entry.
728+ */
729+ np = of_parse_phandle (r5_core -> np , "memory-region" , 0 );
730+ if (!np ) {
731+ dev_err (dev , "failed to get memory region dev node\n" );
732+ return - EINVAL ;
733+ }
734+
735+ ret = of_address_to_resource (np , 0 , & res_mem );
736+ of_node_put (np );
737+ if (ret ) {
738+ dev_err (dev , "failed to get memory-region resource addr\n" );
739+ return - EINVAL ;
740+ }
741+
742+ rsc_data_va = (struct rsc_tbl_data * )ioremap_wc (res_mem .start ,
743+ sizeof (struct rsc_tbl_data ));
744+ if (!rsc_data_va ) {
745+ dev_err (dev , "failed to map resource table data address\n" );
746+ return - EIO ;
747+ }
748+
749+ /*
750+ * If RSC_TBL_XLNX_MAGIC number and its complement isn't found then
751+ * do not consider resource table address valid and don't attach
752+ */
753+ if (rsc_data_va -> magic_num != RSC_TBL_XLNX_MAGIC ||
754+ rsc_data_va -> comp_magic_num != ~RSC_TBL_XLNX_MAGIC ) {
755+ dev_dbg (dev , "invalid magic number, won't attach\n" );
756+ return - EINVAL ;
757+ }
758+
759+ r5_core -> rsc_tbl_va = ioremap_wc (rsc_data_va -> rsc_tbl ,
760+ rsc_data_va -> rsc_tbl_size );
761+ if (!r5_core -> rsc_tbl_va ) {
762+ dev_err (dev , "failed to get resource table va\n" );
763+ return - EINVAL ;
764+ }
765+
766+ rsc_tbl_addr = (struct resource_table * )r5_core -> rsc_tbl_va ;
767+
768+ /*
769+ * As of now resource table version 1 is expected. Don't fail to attach
770+ * but warn users about it.
771+ */
772+ if (rsc_tbl_addr -> ver != 1 )
773+ dev_warn (dev , "unexpected resource table version %d\n" ,
774+ rsc_tbl_addr -> ver );
775+
776+ r5_core -> rsc_tbl_size = rsc_data_va -> rsc_tbl_size ;
777+
778+ iounmap ((void __iomem * )rsc_data_va );
779+
780+ return 0 ;
781+ }
782+
783+ static int zynqmp_r5_attach (struct rproc * rproc )
784+ {
785+ dev_dbg (& rproc -> dev , "rproc %d attached\n" , rproc -> index );
786+
787+ return 0 ;
788+ }
789+
790+ static int zynqmp_r5_detach (struct rproc * rproc )
791+ {
792+ /*
793+ * Generate last notification to remote after clearing virtio flag.
794+ * Remote can avoid polling on virtio reset flag if kick is generated
795+ * during detach by host and check virtio reset flag on kick interrupt.
796+ */
797+ zynqmp_r5_rproc_kick (rproc , 0 );
798+
799+ return 0 ;
800+ }
801+
665802static const struct rproc_ops zynqmp_r5_rproc_ops = {
666803 .prepare = zynqmp_r5_rproc_prepare ,
667804 .unprepare = zynqmp_r5_rproc_unprepare ,
@@ -673,6 +810,9 @@ static const struct rproc_ops zynqmp_r5_rproc_ops = {
673810 .sanity_check = rproc_elf_sanity_check ,
674811 .get_boot_addr = rproc_elf_get_boot_addr ,
675812 .kick = zynqmp_r5_rproc_kick ,
813+ .get_loaded_rsc_table = zynqmp_r5_get_loaded_rsc_table ,
814+ .attach = zynqmp_r5_attach ,
815+ .detach = zynqmp_r5_detach ,
676816};
677817
678818/**
@@ -723,6 +863,16 @@ static struct zynqmp_r5_core *zynqmp_r5_add_rproc_core(struct device *cdev)
723863 goto free_rproc ;
724864 }
725865
866+ /*
867+ * If firmware is already available in the memory then move rproc state
868+ * to DETACHED. Firmware can be preloaded via debugger or by any other
869+ * agent (processors) in the system.
870+ * If firmware isn't available in the memory and resource table isn't
871+ * found, then rproc state remains OFFLINE.
872+ */
873+ if (!zynqmp_r5_get_rsc_table_va (r5_core ))
874+ r5_rproc -> state = RPROC_DETACHED ;
875+
726876 r5_core -> rproc = r5_rproc ;
727877 return r5_core ;
728878
@@ -1134,6 +1284,7 @@ static void zynqmp_r5_cluster_exit(void *data)
11341284 for (i = 0 ; i < cluster -> core_count ; i ++ ) {
11351285 r5_core = cluster -> r5_cores [i ];
11361286 zynqmp_r5_free_mbox (r5_core -> ipi );
1287+ iounmap (r5_core -> rsc_tbl_va );
11371288 of_reserved_mem_device_release (r5_core -> dev );
11381289 put_device (r5_core -> dev );
11391290 rproc_del (r5_core -> rproc );
0 commit comments