|
76 | 76 | static int l2_line_sz; |
77 | 77 | int ioc_exists; |
78 | 78 |
|
| 79 | +void (*_cache_line_loop_ic_fn)(unsigned long paddr, unsigned long vaddr, |
| 80 | + unsigned long sz, const int cacheop); |
79 | 81 | void (*__dma_cache_wback_inv)(unsigned long start, unsigned long sz); |
80 | 82 | void (*__dma_cache_inv)(unsigned long start, unsigned long sz); |
81 | 83 | void (*__dma_cache_wback)(unsigned long start, unsigned long sz); |
@@ -320,6 +322,45 @@ static inline void __cache_line_loop(unsigned long paddr, unsigned long vaddr, |
320 | 322 | } |
321 | 323 | } |
322 | 324 |
|
| 325 | +static inline void |
| 326 | +__cache_line_loop_ic_alias(unsigned long paddr, unsigned long vaddr, |
| 327 | + unsigned long sz, const int cacheop) |
| 328 | +{ |
| 329 | + unsigned int aux_cmd, aux_tag; |
| 330 | + int num_lines; |
| 331 | + const int full_page_op = __builtin_constant_p(sz) && sz == PAGE_SIZE; |
| 332 | + |
| 333 | + aux_cmd = ARC_REG_IC_IVIL; |
| 334 | + aux_tag = ARC_REG_IC_PTAG; |
| 335 | + |
| 336 | + /* Ensure we properly floor/ceil the non-line aligned/sized requests |
| 337 | + * and have @paddr - aligned to cache line and integral @num_lines. |
| 338 | + * This however can be avoided for page sized since: |
| 339 | + * -@paddr will be cache-line aligned already (being page aligned) |
| 340 | + * -@sz will be integral multiple of line size (being page sized). |
| 341 | + */ |
| 342 | + if (!full_page_op) { |
| 343 | + sz += paddr & ~CACHE_LINE_MASK; |
| 344 | + paddr &= CACHE_LINE_MASK; |
| 345 | + vaddr &= CACHE_LINE_MASK; |
| 346 | + } else { |
| 347 | + /* V-P const, PTAG can be written once outside loop */ |
| 348 | + write_aux_reg(aux_tag, paddr); |
| 349 | + } |
| 350 | + |
| 351 | + num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES); |
| 352 | + |
| 353 | + while (num_lines-- > 0) { |
| 354 | + if (!full_page_op) { |
| 355 | + write_aux_reg(aux_tag, paddr); |
| 356 | + paddr += L1_CACHE_BYTES; |
| 357 | + } |
| 358 | + |
| 359 | + write_aux_reg(aux_cmd, vaddr); |
| 360 | + vaddr += L1_CACHE_BYTES; |
| 361 | + } |
| 362 | +} |
| 363 | + |
323 | 364 | #endif |
324 | 365 |
|
325 | 366 |
|
@@ -471,7 +512,7 @@ __ic_line_inv_vaddr_local(unsigned long paddr, unsigned long vaddr, |
471 | 512 | unsigned long flags; |
472 | 513 |
|
473 | 514 | local_irq_save(flags); |
474 | | - __cache_line_loop(paddr, vaddr, sz, OP_INV_IC); |
| 515 | + (*_cache_line_loop_ic_fn)(paddr, vaddr, sz, OP_INV_IC); |
475 | 516 | local_irq_restore(flags); |
476 | 517 | } |
477 | 518 |
|
@@ -910,6 +951,17 @@ void arc_cache_init(void) |
910 | 951 | if (ic->ver != CONFIG_ARC_MMU_VER) |
911 | 952 | panic("Cache ver [%d] doesn't match MMU ver [%d]\n", |
912 | 953 | ic->ver, CONFIG_ARC_MMU_VER); |
| 954 | + |
| 955 | +#if (CONFIG_ARC_MMU_VER >= 4) |
| 956 | + /* |
| 957 | + * In MMU v4 (HS38x) the alising icache config uses IVIL/PTAG |
| 958 | + * pair to provide vaddr/paddr respectively, just as in MMU v3 |
| 959 | + */ |
| 960 | + if (ic->alias) |
| 961 | + _cache_line_loop_ic_fn = __cache_line_loop_ic_alias; |
| 962 | + else |
| 963 | +#endif |
| 964 | + _cache_line_loop_ic_fn = __cache_line_loop; |
913 | 965 | } |
914 | 966 |
|
915 | 967 | if (IS_ENABLED(CONFIG_ARC_HAS_DCACHE)) { |
|
0 commit comments