@@ -91,26 +91,26 @@ enum panthor_fw_binary_entry_type {
91
91
#define CSF_FW_BINARY_ENTRY_UPDATE BIT(30)
92
92
#define CSF_FW_BINARY_ENTRY_OPTIONAL BIT(31)
93
93
94
- #define CSF_FW_BINARY_IFACE_ENTRY_RD_RD BIT(0)
95
- #define CSF_FW_BINARY_IFACE_ENTRY_RD_WR BIT(1)
96
- #define CSF_FW_BINARY_IFACE_ENTRY_RD_EX BIT(2)
97
- #define CSF_FW_BINARY_IFACE_ENTRY_RD_CACHE_MODE_NONE (0 << 3)
98
- #define CSF_FW_BINARY_IFACE_ENTRY_RD_CACHE_MODE_CACHED (1 << 3)
99
- #define CSF_FW_BINARY_IFACE_ENTRY_RD_CACHE_MODE_UNCACHED_COHERENT (2 << 3)
100
- #define CSF_FW_BINARY_IFACE_ENTRY_RD_CACHE_MODE_CACHED_COHERENT (3 << 3)
101
- #define CSF_FW_BINARY_IFACE_ENTRY_RD_CACHE_MODE_MASK GENMASK(4, 3)
102
- #define CSF_FW_BINARY_IFACE_ENTRY_RD_PROT BIT(5)
103
- #define CSF_FW_BINARY_IFACE_ENTRY_RD_SHARED BIT(30)
104
- #define CSF_FW_BINARY_IFACE_ENTRY_RD_ZERO BIT(31)
105
-
106
- #define CSF_FW_BINARY_IFACE_ENTRY_RD_SUPPORTED_FLAGS \
107
- (CSF_FW_BINARY_IFACE_ENTRY_RD_RD | \
108
- CSF_FW_BINARY_IFACE_ENTRY_RD_WR | \
109
- CSF_FW_BINARY_IFACE_ENTRY_RD_EX | \
110
- CSF_FW_BINARY_IFACE_ENTRY_RD_CACHE_MODE_MASK | \
111
- CSF_FW_BINARY_IFACE_ENTRY_RD_PROT | \
112
- CSF_FW_BINARY_IFACE_ENTRY_RD_SHARED | \
113
- CSF_FW_BINARY_IFACE_ENTRY_RD_ZERO )
94
+ #define CSF_FW_BINARY_IFACE_ENTRY_RD BIT(0)
95
+ #define CSF_FW_BINARY_IFACE_ENTRY_WR BIT(1)
96
+ #define CSF_FW_BINARY_IFACE_ENTRY_EX BIT(2)
97
+ #define CSF_FW_BINARY_IFACE_ENTRY_CACHE_MODE_NONE (0 << 3)
98
+ #define CSF_FW_BINARY_IFACE_ENTRY_CACHE_MODE_CACHED (1 << 3)
99
+ #define CSF_FW_BINARY_IFACE_ENTRY_CACHE_MODE_UNCACHED_COHERENT (2 << 3)
100
+ #define CSF_FW_BINARY_IFACE_ENTRY_CACHE_MODE_CACHED_COHERENT (3 << 3)
101
+ #define CSF_FW_BINARY_IFACE_ENTRY_CACHE_MODE_MASK GENMASK(4, 3)
102
+ #define CSF_FW_BINARY_IFACE_ENTRY_PROT BIT(5)
103
+ #define CSF_FW_BINARY_IFACE_ENTRY_SHARED BIT(30)
104
+ #define CSF_FW_BINARY_IFACE_ENTRY_ZERO BIT(31)
105
+
106
+ #define CSF_FW_BINARY_IFACE_ENTRY_SUPPORTED_FLAGS \
107
+ (CSF_FW_BINARY_IFACE_ENTRY_RD | \
108
+ CSF_FW_BINARY_IFACE_ENTRY_WR | \
109
+ CSF_FW_BINARY_IFACE_ENTRY_EX | \
110
+ CSF_FW_BINARY_IFACE_ENTRY_CACHE_MODE_MASK | \
111
+ CSF_FW_BINARY_IFACE_ENTRY_PROT | \
112
+ CSF_FW_BINARY_IFACE_ENTRY_SHARED | \
113
+ CSF_FW_BINARY_IFACE_ENTRY_ZERO )
114
114
115
115
/**
116
116
* struct panthor_fw_binary_section_entry_hdr - Describes a section of FW binary
@@ -413,15 +413,15 @@ static void panthor_fw_init_section_mem(struct panthor_device *ptdev,
413
413
int ret ;
414
414
415
415
if (!section -> data .size &&
416
- !(section -> flags & CSF_FW_BINARY_IFACE_ENTRY_RD_ZERO ))
416
+ !(section -> flags & CSF_FW_BINARY_IFACE_ENTRY_ZERO ))
417
417
return ;
418
418
419
419
ret = panthor_kernel_bo_vmap (section -> mem );
420
420
if (drm_WARN_ON (& ptdev -> base , ret ))
421
421
return ;
422
422
423
423
memcpy (section -> mem -> kmap , section -> data .buf , section -> data .size );
424
- if (section -> flags & CSF_FW_BINARY_IFACE_ENTRY_RD_ZERO ) {
424
+ if (section -> flags & CSF_FW_BINARY_IFACE_ENTRY_ZERO ) {
425
425
memset (section -> mem -> kmap + section -> data .size , 0 ,
426
426
panthor_kernel_bo_size (section -> mem ) - section -> data .size );
427
427
}
@@ -535,20 +535,20 @@ static int panthor_fw_load_section_entry(struct panthor_device *ptdev,
535
535
return - EINVAL ;
536
536
}
537
537
538
- if (hdr .flags & ~CSF_FW_BINARY_IFACE_ENTRY_RD_SUPPORTED_FLAGS ) {
538
+ if (hdr .flags & ~CSF_FW_BINARY_IFACE_ENTRY_SUPPORTED_FLAGS ) {
539
539
drm_err (& ptdev -> base , "Firmware contains interface with unsupported flags (0x%x)\n" ,
540
540
hdr .flags );
541
541
return - EINVAL ;
542
542
}
543
543
544
- if (hdr .flags & CSF_FW_BINARY_IFACE_ENTRY_RD_PROT ) {
544
+ if (hdr .flags & CSF_FW_BINARY_IFACE_ENTRY_PROT ) {
545
545
drm_warn (& ptdev -> base ,
546
546
"Firmware protected mode entry not be supported, ignoring" );
547
547
return 0 ;
548
548
}
549
549
550
550
if (hdr .va .start == CSF_MCU_SHARED_REGION_START &&
551
- !(hdr .flags & CSF_FW_BINARY_IFACE_ENTRY_RD_SHARED )) {
551
+ !(hdr .flags & CSF_FW_BINARY_IFACE_ENTRY_SHARED )) {
552
552
drm_err (& ptdev -> base ,
553
553
"Interface at 0x%llx must be shared" , CSF_MCU_SHARED_REGION_START );
554
554
return - EINVAL ;
@@ -587,26 +587,26 @@ static int panthor_fw_load_section_entry(struct panthor_device *ptdev,
587
587
588
588
section_size = hdr .va .end - hdr .va .start ;
589
589
if (section_size ) {
590
- u32 cache_mode = hdr .flags & CSF_FW_BINARY_IFACE_ENTRY_RD_CACHE_MODE_MASK ;
590
+ u32 cache_mode = hdr .flags & CSF_FW_BINARY_IFACE_ENTRY_CACHE_MODE_MASK ;
591
591
struct panthor_gem_object * bo ;
592
592
u32 vm_map_flags = 0 ;
593
593
struct sg_table * sgt ;
594
594
u64 va = hdr .va .start ;
595
595
596
- if (!(hdr .flags & CSF_FW_BINARY_IFACE_ENTRY_RD_WR ))
596
+ if (!(hdr .flags & CSF_FW_BINARY_IFACE_ENTRY_WR ))
597
597
vm_map_flags |= DRM_PANTHOR_VM_BIND_OP_MAP_READONLY ;
598
598
599
- if (!(hdr .flags & CSF_FW_BINARY_IFACE_ENTRY_RD_EX ))
599
+ if (!(hdr .flags & CSF_FW_BINARY_IFACE_ENTRY_EX ))
600
600
vm_map_flags |= DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC ;
601
601
602
- /* TODO: CSF_FW_BINARY_IFACE_ENTRY_RD_CACHE_MODE_ *_COHERENT are mapped to
602
+ /* TODO: CSF_FW_BINARY_IFACE_ENTRY_CACHE_MODE_ *_COHERENT are mapped to
603
603
* non-cacheable for now. We might want to introduce a new
604
604
* IOMMU_xxx flag (or abuse IOMMU_MMIO, which maps to device
605
605
* memory and is currently not used by our driver) for
606
606
* AS_MEMATTR_AARCH64_SHARED memory, so we can take benefit
607
607
* of IO-coherent systems.
608
608
*/
609
- if (cache_mode != CSF_FW_BINARY_IFACE_ENTRY_RD_CACHE_MODE_CACHED )
609
+ if (cache_mode != CSF_FW_BINARY_IFACE_ENTRY_CACHE_MODE_CACHED )
610
610
vm_map_flags |= DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED ;
611
611
612
612
section -> mem = panthor_kernel_bo_create (ptdev , panthor_fw_vm (ptdev ),
@@ -619,7 +619,7 @@ static int panthor_fw_load_section_entry(struct panthor_device *ptdev,
619
619
if (drm_WARN_ON (& ptdev -> base , section -> mem -> va_node .start != hdr .va .start ))
620
620
return - EINVAL ;
621
621
622
- if (section -> flags & CSF_FW_BINARY_IFACE_ENTRY_RD_SHARED ) {
622
+ if (section -> flags & CSF_FW_BINARY_IFACE_ENTRY_SHARED ) {
623
623
ret = panthor_kernel_bo_vmap (section -> mem );
624
624
if (ret )
625
625
return ret ;
@@ -689,7 +689,7 @@ panthor_reload_fw_sections(struct panthor_device *ptdev, bool full_reload)
689
689
list_for_each_entry (section , & ptdev -> fw -> sections , node ) {
690
690
struct sg_table * sgt ;
691
691
692
- if (!full_reload && !(section -> flags & CSF_FW_BINARY_IFACE_ENTRY_RD_WR ))
692
+ if (!full_reload && !(section -> flags & CSF_FW_BINARY_IFACE_ENTRY_WR ))
693
693
continue ;
694
694
695
695
panthor_fw_init_section_mem (ptdev , section );
0 commit comments