Skip to content

Commit 6416a18

Browse files
committed
add cache option api
1 parent ed0637f commit 6416a18

File tree

4 files changed

+292
-1
lines changed

4 files changed

+292
-1
lines changed

libcpu/aarch64/cortex-a53/SConscript

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@ context_gcc.S
77
vector_gcc.S
88
entry_point.S
99
cpu_gcc.S
10+
cache.S
1011
''')
1112
CPPPATH = [cwd]
1213

libcpu/aarch64/cortex-a53/cache.S

Lines changed: 151 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,151 @@
1+
/*
2+
* Copyright (c) 2006-2020, RT-Thread Development Team
3+
*
4+
* SPDX-License-Identifier: Apache-2.0
5+
*
6+
* Change Logs:
7+
* Date Author Notes
8+
* 2020-03-17 bigmagic first version
9+
*/
10+
11+
/*
12+
* void __asm_dcache_level(level)
13+
*
14+
* flush or invalidate one level cache.
15+
*
16+
* x0: cache level
17+
* x1: 0 clean & invalidate, 1 invalidate only
18+
* x2~x9: clobbered
19+
*/
20+
.globl __asm_dcache_level
21+
__asm_dcache_level:
22+
lsl x12, x0, #1
23+
msr csselr_el1, x12 /* select cache level */
24+
isb /* sync change of cssidr_el1 */
25+
mrs x6, ccsidr_el1 /* read the new cssidr_el1 */
26+
and x2, x6, #7 /* x2 <- log2(cache line size)-4 */
27+
add x2, x2, #4 /* x2 <- log2(cache line size) */
28+
mov x3, #0x3ff
29+
and x3, x3, x6, lsr #3 /* x3 <- max number of #ways */
30+
clz w5, w3 /* bit position of #ways */
31+
mov x4, #0x7fff
32+
and x4, x4, x6, lsr #13 /* x4 <- max number of #sets */
33+
/* x12 <- cache level << 1 */
34+
/* x2 <- line length offset */
35+
/* x3 <- number of cache ways - 1 */
36+
/* x4 <- number of cache sets - 1 */
37+
/* x5 <- bit position of #ways */
38+
39+
loop_set:
40+
mov x6, x3 /* x6 <- working copy of #ways */
41+
loop_way:
42+
lsl x7, x6, x5
43+
orr x9, x12, x7 /* map way and level to cisw value */
44+
lsl x7, x4, x2
45+
orr x9, x9, x7 /* map set number to cisw value */
46+
tbz w1, #0, 1f
47+
dc isw, x9
48+
b 2f
49+
1: dc cisw, x9 /* clean & invalidate by set/way */
50+
2: subs x6, x6, #1 /* decrement the way */
51+
b.ge loop_way
52+
subs x4, x4, #1 /* decrement the set */
53+
b.ge loop_set
54+
55+
ret
56+
57+
/*
58+
* void __asm_flush_dcache_all(int invalidate_only)
59+
*
60+
* x0: 0 clean & invalidate, 1 invalidate only
61+
*
62+
* flush or invalidate all data cache by SET/WAY.
63+
*/
64+
.globl __asm_dcache_all
65+
__asm_dcache_all:
66+
mov x1, x0
67+
dsb sy
68+
mrs x10, clidr_el1 /* read clidr_el1 */
69+
lsr x11, x10, #24
70+
and x11, x11, #0x7 /* x11 <- loc */
71+
cbz x11, finished /* if loc is 0, exit */
72+
mov x15, lr
73+
mov x0, #0 /* start flush at cache level 0 */
74+
/* x0 <- cache level */
75+
/* x10 <- clidr_el1 */
76+
/* x11 <- loc */
77+
/* x15 <- return address */
78+
79+
loop_level:
80+
lsl x12, x0, #1
81+
add x12, x12, x0 /* x0 <- tripled cache level */
82+
lsr x12, x10, x12
83+
and x12, x12, #7 /* x12 <- cache type */
84+
cmp x12, #2
85+
b.lt skip /* skip if no cache or icache */
86+
bl __asm_dcache_level /* x1 = 0 flush, 1 invalidate */
87+
skip:
88+
add x0, x0, #1 /* increment cache level */
89+
cmp x11, x0
90+
b.gt loop_level
91+
92+
mov x0, #0
93+
msr csselr_el1, x0 /* restore csselr_el1 */
94+
dsb sy
95+
isb
96+
mov lr, x15
97+
98+
finished:
99+
ret
100+
101+
.globl __asm_flush_dcache_all
102+
__asm_flush_dcache_all:
103+
mov x0, #0
104+
b __asm_dcache_all
105+
106+
.globl __asm_invalidate_dcache_all
107+
__asm_invalidate_dcache_all:
108+
mov x0, #0x1
109+
b __asm_dcache_all
110+
111+
/*
112+
* void __asm_flush_dcache_range(start, end)
113+
*
114+
* clean & invalidate data cache in the range
115+
*
116+
* x0: start address
117+
* x1: end address
118+
*/
119+
.globl __asm_flush_dcache_range
120+
__asm_flush_dcache_range:
121+
mrs x3, ctr_el0
122+
lsr x3, x3, #16
123+
and x3, x3, #0xf
124+
mov x2, #4
125+
lsl x2, x2, x3 /* cache line size */
126+
127+
/* x2 <- minimal cache line size in cache system */
128+
sub x3, x2, #1
129+
bic x0, x0, x3
130+
1: dc civac, x0 /* clean & invalidate data or unified cache */
131+
add x0, x0, x2
132+
cmp x0, x1
133+
b.lo 1b
134+
dsb sy
135+
ret
136+
137+
/*
138+
* void __asm_invalidate_icache_all(void)
139+
*
140+
* invalidate all tlb entries.
141+
*/
142+
.globl __asm_invalidate_icache_all
143+
__asm_invalidate_icache_all:
144+
ic ialluis
145+
isb sy
146+
ret
147+
148+
.globl __asm_flush_l3_cache
149+
__asm_flush_l3_cache:
150+
mov x0, #0 /* return status as success */
151+
ret

libcpu/aarch64/cortex-a53/mmu.c

Lines changed: 94 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@
99
*/
1010
#include <mmu.h>
1111
#include <stddef.h>
12+
#include <rthw.h>
1213

1314
#define TTBR_CNP 1
1415

@@ -35,6 +36,13 @@ static unsigned long main_tbl[512 * 20] __attribute__((aligned (4096)));
3536

3637
int free_idx = 1;
3738

39+
void __asm_invalidate_icache_all(void);
40+
void __asm_flush_dcache_all(void);
41+
int __asm_flush_l3_cache(void);
42+
void __asm_flush_dcache_range(unsigned long long start, unsigned long long end);
43+
void __asm_invalidate_dcache_all(void);
44+
void __asm_invalidate_icache_all(void);
45+
3846
void mmu_memset(char *dst, char v, size_t len)
3947
{
4048
while (len--)
@@ -50,6 +58,20 @@ static unsigned long get_free_page(void)
5058
return (unsigned long)(main_tbl + __page_off);
5159
}
5260

61+
62+
static inline unsigned int get_sctlr(void)
63+
{
64+
unsigned int val;
65+
asm volatile("mrs %0, sctlr_el1" : "=r" (val) : : "cc");
66+
return val;
67+
}
68+
69+
static inline void set_sctlr(unsigned int val)
70+
{
71+
asm volatile("msr sctlr_el1, %0" : : "r" (val) : "cc");
72+
asm volatile("isb");
73+
}
74+
5375
void mmu_init(void)
5476
{
5577
unsigned long val64;
@@ -101,6 +123,9 @@ void mmu_enable(void)
101123
__asm__ volatile("mrs %0, SCTLR_EL1\n":"=r"(val32));
102124
val32 |= 0x1005; //enable mmu, I C M
103125
__asm__ volatile("dmb sy\n msr SCTLR_EL1, %0\nisb sy\n"::"r"(val32));
126+
rt_hw_icache_enable();
127+
rt_hw_dcache_enable();
128+
104129
}
105130

106131
static int map_single_page_2M(unsigned long* lv0_tbl, unsigned long va, unsigned long pa, unsigned long attr)
@@ -271,3 +296,72 @@ void armv8_map(unsigned long va, unsigned long pa, unsigned long size, unsigned
271296
map_region(va, pa, size, attr);
272297
}
273298

299+
void rt_hw_dcache_enable(void)
300+
{
301+
if (!(get_sctlr() & CR_M))
302+
{
303+
rt_kprintf("please init mmu!\n");
304+
}
305+
else
306+
{
307+
set_sctlr(get_sctlr() | CR_C);
308+
}
309+
}
310+
311+
void rt_hw_dcache_flush_all(void)
312+
{
313+
int ret;
314+
315+
__asm_flush_dcache_all();
316+
ret = __asm_flush_l3_cache();
317+
if (ret)
318+
{
319+
rt_kprintf("flushing dcache returns 0x%x\n", ret);
320+
}
321+
else
322+
{
323+
rt_kprintf("flushing dcache successfully.\n");
324+
}
325+
}
326+
327+
void rt_hw_dcache_flush_range(unsigned long start_addr, unsigned long size)
328+
{
329+
__asm_flush_dcache_range(start_addr, start_addr + size);
330+
}
331+
void rt_hw_dcache_invalidate_range(unsigned long start_addr,unsigned long size)
332+
{
333+
__asm_flush_dcache_range(start_addr, start_addr + size);
334+
}
335+
336+
void rt_hw_dcache_invalidate_all(void)
337+
{
338+
__asm_invalidate_dcache_all();
339+
}
340+
341+
void rt_hw_dcache_disable(void)
342+
{
343+
/* if cache isn't enabled no need to disable */
344+
if(!(get_sctlr() & CR_C))
345+
{
346+
rt_kprintf("need enable cache!\n");
347+
return;
348+
}
349+
set_sctlr(get_sctlr() & ~CR_C);
350+
}
351+
352+
//icache
353+
void rt_hw_icache_enable(void)
354+
{
355+
__asm_invalidate_icache_all();
356+
set_sctlr(get_sctlr() | CR_I);
357+
}
358+
359+
void rt_hw_icache_invalidate_all(void)
360+
{
361+
__asm_invalidate_icache_all();
362+
}
363+
364+
void rt_hw_icache_disable(void)
365+
{
366+
set_sctlr(get_sctlr() & ~CR_I);
367+
}

libcpu/aarch64/cortex-a53/mmu.h

Lines changed: 46 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,37 @@
1111
#ifndef __MMU_H__
1212
#define __MMU_H__
1313

14+
/*
15+
* CR1 bits (CP#15 CR1)
16+
*/
17+
#define CR_M (1 << 0) /* MMU enable */
18+
#define CR_A (1 << 1) /* Alignment abort enable */
19+
#define CR_C (1 << 2) /* Dcache enable */
20+
#define CR_W (1 << 3) /* Write buffer enable */
21+
#define CR_P (1 << 4) /* 32-bit exception handler */
22+
#define CR_D (1 << 5) /* 32-bit data address range */
23+
#define CR_L (1 << 6) /* Implementation defined */
24+
#define CR_B (1 << 7) /* Big endian */
25+
#define CR_S (1 << 8) /* System MMU protection */
26+
#define CR_R (1 << 9) /* ROM MMU protection */
27+
#define CR_F (1 << 10) /* Implementation defined */
28+
#define CR_Z (1 << 11) /* Implementation defined */
29+
#define CR_I (1 << 12) /* Icache enable */
30+
#define CR_V (1 << 13) /* Vectors relocated to 0xffff0000 */
31+
#define CR_RR (1 << 14) /* Round Robin cache replacement */
32+
#define CR_L4 (1 << 15) /* LDR pc can set T bit */
33+
#define CR_DT (1 << 16)
34+
#define CR_IT (1 << 18)
35+
#define CR_ST (1 << 19)
36+
#define CR_FI (1 << 21) /* Fast interrupt (lower latency mode) */
37+
#define CR_U (1 << 22) /* Unaligned access operation */
38+
#define CR_XP (1 << 23) /* Extended page tables */
39+
#define CR_VE (1 << 24) /* Vectored interrupts */
40+
#define CR_EE (1 << 25) /* Exception (Big) Endian */
41+
#define CR_TRE (1 << 28) /* TEX remap enable */
42+
#define CR_AFE (1 << 29) /* Access flag enable */
43+
#define CR_TE (1 << 30) /* Thumb exception enable */
44+
1445
#define MMU_LEVEL_MASK 0x1ffUL
1546
#define MMU_MAP_ERROR_VANOTALIGN -1
1647
#define MMU_MAP_ERROR_PANOTALIGN -2
@@ -20,7 +51,7 @@
2051
#define MEM_ATTR_MEMORY ((0x1UL << 10) | (0x2UL << 8) | (0x0UL << 6) | (0x1UL << 2))
2152
#define MEM_ATTR_IO ((0x1UL << 10) | (0x2UL << 8) | (0x0UL << 6) | (0x2UL << 2))
2253

23-
#define BUS_ADDRESS(phys) (((phys) & ~0xC0000000) | 0xC0000000)
54+
#define BUS_ADDRESS(phys) (((phys) & ~0xC0000000) | 0xC0000000)
2455

2556
void mmu_init(void);
2657

@@ -30,4 +61,18 @@ int armv8_map_2M(unsigned long va, unsigned long pa, int count, unsigned long at
3061

3162
void armv8_map(unsigned long va, unsigned long pa, unsigned long size, unsigned long attr);
3263

64+
//dcache
65+
void rt_hw_dcache_enable(void);
66+
void rt_hw_dcache_flush_all(void);
67+
void rt_hw_dcache_flush_range(unsigned long start_addr, unsigned long size);
68+
void rt_hw_dcache_invalidate_range(unsigned long start_addr,unsigned long size);
69+
void rt_hw_dcache_invalidate_all(void);
70+
void rt_hw_dcache_disable(void);
71+
72+
//icache
73+
void rt_hw_icache_enable(void);
74+
void rt_hw_icache_invalidate_all(void);
75+
void rt_hw_icache_disable(void);
76+
77+
3378
#endif /*__MMU_H__*/

0 commit comments

Comments
 (0)