|
| 1 | +/* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | +/* |
| 3 | + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited |
| 4 | + */ |
| 5 | +#ifndef _ASM_IO_H |
| 6 | +#define _ASM_IO_H |
| 7 | + |
| 8 | +#define ARCH_HAS_IOREMAP_WC |
| 9 | + |
| 10 | +#include <linux/compiler.h> |
| 11 | +#include <linux/kernel.h> |
| 12 | +#include <linux/types.h> |
| 13 | + |
| 14 | +#include <asm/addrspace.h> |
| 15 | +#include <asm/bug.h> |
| 16 | +#include <asm/byteorder.h> |
| 17 | +#include <asm/cpu.h> |
| 18 | +#include <asm/page.h> |
| 19 | +#include <asm/pgtable-bits.h> |
| 20 | +#include <asm/string.h> |
| 21 | + |
| 22 | +/* |
| 23 | + * On LoongArch, I/O ports mappring is following: |
| 24 | + * |
| 25 | + * | .... | |
| 26 | + * |-----------------------| |
| 27 | + * | pci io ports(64K~32M) | |
| 28 | + * |-----------------------| |
| 29 | + * | isa io ports(0 ~16K) | |
| 30 | + * PCI_IOBASE ->|-----------------------| |
| 31 | + * | .... | |
| 32 | + */ |
| 33 | +#define PCI_IOBASE ((void __iomem *)(vm_map_base + (2 * PAGE_SIZE))) |
| 34 | +#define PCI_IOSIZE SZ_32M |
| 35 | +#define ISA_IOSIZE SZ_16K |
| 36 | +#define IO_SPACE_LIMIT (PCI_IOSIZE - 1) |
| 37 | + |
| 38 | +/* |
| 39 | + * Change "struct page" to physical address. |
| 40 | + */ |
| 41 | +#define page_to_phys(page) ((phys_addr_t)page_to_pfn(page) << PAGE_SHIFT) |
| 42 | + |
| 43 | +extern void __init __iomem *early_ioremap(u64 phys_addr, unsigned long size); |
| 44 | +extern void __init early_iounmap(void __iomem *addr, unsigned long size); |
| 45 | + |
| 46 | +#define early_memremap early_ioremap |
| 47 | +#define early_memunmap early_iounmap |
| 48 | + |
| 49 | +static inline void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size, |
| 50 | + unsigned long prot_val) |
| 51 | +{ |
| 52 | + if (prot_val == _CACHE_CC) |
| 53 | + return (void __iomem *)(unsigned long)(CACHE_BASE + offset); |
| 54 | + else |
| 55 | + return (void __iomem *)(unsigned long)(UNCACHE_BASE + offset); |
| 56 | +} |
| 57 | + |
| 58 | +/* |
| 59 | + * ioremap - map bus memory into CPU space |
| 60 | + * @offset: bus address of the memory |
| 61 | + * @size: size of the resource to map |
| 62 | + * |
| 63 | + * ioremap performs a platform specific sequence of operations to |
| 64 | + * make bus memory CPU accessible via the readb/readw/readl/writeb/ |
| 65 | + * writew/writel functions and the other mmio helpers. The returned |
| 66 | + * address is not guaranteed to be usable directly as a virtual |
| 67 | + * address. |
| 68 | + */ |
| 69 | +#define ioremap(offset, size) \ |
| 70 | + ioremap_prot((offset), (size), _CACHE_SUC) |
| 71 | + |
| 72 | +/* |
| 73 | + * ioremap_wc - map bus memory into CPU space |
| 74 | + * @offset: bus address of the memory |
| 75 | + * @size: size of the resource to map |
| 76 | + * |
| 77 | + * ioremap_wc performs a platform specific sequence of operations to |
| 78 | + * make bus memory CPU accessible via the readb/readw/readl/writeb/ |
| 79 | + * writew/writel functions and the other mmio helpers. The returned |
| 80 | + * address is not guaranteed to be usable directly as a virtual |
| 81 | + * address. |
| 82 | + * |
| 83 | + * This version of ioremap ensures that the memory is marked uncachable |
| 84 | + * but accelerated by means of write-combining feature. It is specifically |
| 85 | + * useful for PCIe prefetchable windows, which may vastly improve a |
| 86 | + * communications performance. If it was determined on boot stage, what |
| 87 | + * CPU CCA doesn't support WUC, the method shall fall-back to the |
| 88 | + * _CACHE_SUC option (see cpu_probe() method). |
| 89 | + */ |
| 90 | +#define ioremap_wc(offset, size) \ |
| 91 | + ioremap_prot((offset), (size), _CACHE_WUC) |
| 92 | + |
| 93 | +/* |
| 94 | + * ioremap_cache - map bus memory into CPU space |
| 95 | + * @offset: bus address of the memory |
| 96 | + * @size: size of the resource to map |
| 97 | + * |
| 98 | + * ioremap_cache performs a platform specific sequence of operations to |
| 99 | + * make bus memory CPU accessible via the readb/readw/readl/writeb/ |
| 100 | + * writew/writel functions and the other mmio helpers. The returned |
| 101 | + * address is not guaranteed to be usable directly as a virtual |
| 102 | + * address. |
| 103 | + * |
| 104 | + * This version of ioremap ensures that the memory is marked cachable by |
| 105 | + * the CPU. Also enables full write-combining. Useful for some |
| 106 | + * memory-like regions on I/O busses. |
| 107 | + */ |
| 108 | +#define ioremap_cache(offset, size) \ |
| 109 | + ioremap_prot((offset), (size), _CACHE_CC) |
| 110 | + |
| 111 | +static inline void iounmap(const volatile void __iomem *addr) |
| 112 | +{ |
| 113 | +} |
| 114 | + |
| 115 | +#define mmiowb() asm volatile ("dbar 0" ::: "memory") |
| 116 | + |
| 117 | +/* |
| 118 | + * String version of I/O memory access operations. |
| 119 | + */ |
| 120 | +extern void __memset_io(volatile void __iomem *dst, int c, size_t count); |
| 121 | +extern void __memcpy_toio(volatile void __iomem *to, const void *from, size_t count); |
| 122 | +extern void __memcpy_fromio(void *to, const volatile void __iomem *from, size_t count); |
| 123 | +#define memset_io(c, v, l) __memset_io((c), (v), (l)) |
| 124 | +#define memcpy_fromio(a, c, l) __memcpy_fromio((a), (c), (l)) |
| 125 | +#define memcpy_toio(c, a, l) __memcpy_toio((c), (a), (l)) |
| 126 | + |
| 127 | +#include <asm-generic/io.h> |
| 128 | + |
| 129 | +#endif /* _ASM_IO_H */ |
0 commit comments