Skip to content

Commit 0a8e82b

Browse files
committed
ref(linker): split lma and vma of data sec for non unified plats
Signed-off-by: Daniel Oliveira <[email protected]>
1 parent 76a565e commit 0a8e82b

File tree

1 file changed

+45
-16
lines changed

1 file changed

+45
-16
lines changed

src/linker.ld

Lines changed: 45 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -30,9 +30,16 @@ SECTIONS
3030
*(.rdata .rodata .rodata.*)
3131
}
3232

33-
. = ALIGN(PAGE_SIZE); /* start RW sections in separate page */
34-
35-
.data : {
33+
. = ALIGN(PAGE_SIZE); /* start RW sections in separate page */
34+
_data_lma_start = .;
35+
36+
#ifdef MEM_NON_UNIFIED
37+
. = CONFIG_HYP_DATA_ADDR; /* Changed location counter to VMA address */
38+
#endif
39+
40+
_data_vma_start = .;
41+
42+
.data : AT (_data_lma_start) {
3643
*(.data .data.*)
3744
PROVIDE(__global_pointer$ = . + 0x800);
3845
*(.sdata .sdata.* .sdata2.*)
@@ -49,17 +56,39 @@ SECTIONS
4956
_ipi_cpumsg_handlers_id_end = .;
5057
}
5158

52-
. = ALIGN(PAGE_SIZE);
59+
.pad_load : {
60+
/**
61+
* This section ensures the loadable portion of the image (_image_load_end) is page-aligned
62+
* by adding padding if necessary. The BYTE(0x00) forces this section to be PROGBITS rather
63+
* than NOBITS, ensuring any padding bytes are actually written to the file up to the align
64+
*/
65+
BYTE(0x00)
66+
. = ALIGN(PAGE_SIZE);
67+
}
68+
5369
_image_load_end = .;
5470

71+
#ifdef MEM_NON_UNIFIED
72+
/* Save the current location counter (VMA) and switch to LMA for .vm_images */
73+
_vma_before_vm_images = .;
74+
_image_load_end = _data_lma_start + (_image_load_end - _data_vma_start);
75+
. = _image_load_end;
76+
#endif
77+
5578
/* Sections to be allocated only in physical memory, not in VA space */
5679

57-
.vm_images : SUBALIGN(PAGE_SIZE) {
58-
_vm_image_start = .;
59-
KEEP(*(.vm_image*))
60-
}
80+
.vm_images : AT(_image_load_end) SUBALIGN(PAGE_SIZE) {
81+
_vm_image_start = .;
82+
KEEP(*(.vm_image*))
83+
}
84+
85+
#ifdef MEM_NON_UNIFIED
86+
/* Restore the location counter (VMA) */
87+
. = _vma_before_vm_images;
88+
#endif
89+
6190
. = ALIGN(PAGE_SIZE);
62-
_vm_image_end = .;
91+
_vm_image_end = ALIGN(_vm_image_start + SIZEOF(.vm_images), PAGE_SIZE);
6392
_image_noload_start = .;
6493
extra_allocated_phys_mem = _image_noload_start - _image_load_end;
6594

@@ -73,28 +102,28 @@ SECTIONS
73102
#endif
74103

75104
/* Only no load regions below */
76-
77-
.bss (NOLOAD) : {
105+
106+
.bss (NOLOAD) : {
78107
_bss_start = .;
79-
*(.bss* .sbss*)
80-
*(COMMON)
108+
*(.bss* .sbss*)
109+
*(COMMON)
81110
_bss_end = .;
82-
}
111+
}
83112

84113
.glb_page_tables (NOLOAD) : ALIGN(PAGE_SIZE) {
85114
_page_tables_start = .;
86115
*(.glb_page_tables)
87116
_page_tables_end = .;
88117
}
89-
118+
90119
. = ALIGN(PAGE_SIZE);
91120
_image_end = ABSOLUTE(.);
92121
_dmem_phys_beg = ABSOLUTE(.) + extra_allocated_phys_mem;
93122

94123
. = ALIGN(PAGE_SIZE);
95124
_dmem_beg = ABSOLUTE(.);
96125

97-
#ifdef MEM_PROT_MMU
126+
#ifdef MEM_PROT_MMU
98127
/* Global dynamic memory virtual address space here */
99128

100129
. = BAO_CPU_BASE;

0 commit comments

Comments
 (0)