| /* |
| * This file is part of the coreboot project. |
| * |
| * Copyright 2014 Google Inc. |
| * |
| * Redistribution and use in source and binary forms, with or without |
| * modification, are permitted provided that the following conditions |
| * are met: |
| * 1. Redistributions of source code must retain the above copyright |
| * notice, this list of conditions and the following disclaimer. |
| * 2. Redistributions in binary form must reproduce the above copyright |
| * notice, this list of conditions and the following disclaimer in the |
| * documentation and/or other materials provided with the distribution. |
| * 3. The name of the author may not be used to endorse or promote products |
| * derived from this software without specific prior written permission. |
| * |
| * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND |
| * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
| * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
| * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE |
| * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
| * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
| * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
| * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
| * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
| * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
| * SUCH DAMAGE. |
| */ |
| |
| #ifndef __ARCH_ARM64_MMU_H__ |
| #define __ARCH_ARM64_MMU_H__ |
| |
| #include <libpayload.h> |
| |
| struct mmu_memrange { |
| uint64_t base; |
| uint64_t size; |
| uint64_t type; |
| }; |
| |
| struct mmu_ranges { |
| struct mmu_memrange entries[SYSINFO_MAX_MEM_RANGES]; |
| size_t used; |
| }; |
| |
| /* |
| * Symbols taken from linker script |
| * They mark the start and end of the region used by payload |
| */ |
| extern char _start[], _end[]; |
| |
| /* IMPORTANT!!!!!!! |
| * Assumptions made: |
| * Granule size is 64KiB |
| * BITS per Virtual address is 33 |
| * All the calculations for tables L1,L2 and L3 are based on these assumptions |
| * If these values are changed, recalculate the other macros as well |
| */ |
| |
| /* Memory attributes for mmap regions |
| * These attributes act as tag values for memrange regions |
| */ |
| |
| #define TYPE_NORMAL_MEM 1 |
| #define TYPE_DEV_MEM 2 |
| #define TYPE_DMA_MEM 3 |
| |
| /* Descriptor attributes */ |
| |
| #define INVALID_DESC 0x0 |
| #define BLOCK_DESC 0x1 |
| #define TABLE_DESC 0x3 |
| #define PAGE_DESC 0x3 |
| |
| /* Block descriptor */ |
| #define BLOCK_NS (1 << 5) |
| |
| #define BLOCK_AP_RW (0 << 7) |
| #define BLOCK_AP_RO (1 << 7) |
| |
| #define BLOCK_ACCESS (1 << 10) |
| |
| /* XLAT Table Init Attributes */ |
| |
| #define VA_START 0x0 |
| /* If BITS_PER_VA or GRANULE_SIZE are changed, recalculate and change the |
| macros following them */ |
| #define BITS_PER_VA 33 |
| /* Granule size of 64KB is being used */ |
| #define MIN_64_BIT_ADDR (1UL << 32) |
| #define XLAT_TABLE_MASK ~(0xffffUL) |
| #define GRANULE_SIZE_SHIFT 16 |
| #define GRANULE_SIZE (1 << GRANULE_SIZE_SHIFT) |
| #define GRANULE_SIZE_MASK ((1 << 16) - 1) |
| |
| #define L1_ADDR_SHIFT 42 |
| #define L2_ADDR_SHIFT 29 |
| #define L3_ADDR_SHIFT 16 |
| |
| #define L1_ADDR_MASK (0UL << L1_ADDR_SHIFT) |
| #define L2_ADDR_MASK (0xfUL << L2_ADDR_SHIFT) |
| #define L3_ADDR_MASK (0x1fffUL << L3_ADDR_SHIFT) |
| |
| /* Dependent on BITS_PER_VA and GRANULE_SIZE */ |
| #define INIT_LEVEL 2 |
| #define XLAT_MAX_LEVEL 3 |
| |
| /* Each entry in XLAT table is 8 bytes */ |
| #define XLAT_ENTRY_SHIFT 3 |
| #define XLAT_ENTRY_SIZE (1 << XLAT_ENTRY_SHIFT) |
| |
| #define XLAT_TABLE_SHIFT GRANULE_SIZE_SHIFT |
| #define XLAT_TABLE_SIZE (1 << XLAT_TABLE_SHIFT) |
| |
| #define XLAT_NUM_ENTRIES_SHIFT (XLAT_TABLE_SHIFT - XLAT_ENTRY_SHIFT) |
| #define XLAT_NUM_ENTRIES (1 << XLAT_NUM_ENTRIES_SHIFT) |
| |
| #define L3_XLAT_SIZE_SHIFT (GRANULE_SIZE_SHIFT) |
| #define L2_XLAT_SIZE_SHIFT (GRANULE_SIZE_SHIFT + XLAT_NUM_ENTRIES_SHIFT) |
| #define L1_XLAT_SIZE_SHIFT (GRANULE_SIZE_SHIFT + XLAT_NUM_ENTRIES_SHIFT) |
| |
| /* These macros give the size of the region addressed by each entry of a xlat |
| table at any given level */ |
| #define L3_XLAT_SIZE (1 << L3_XLAT_SIZE_SHIFT) |
| #define L2_XLAT_SIZE (1 << L2_XLAT_SIZE_SHIFT) |
| #define L1_XLAT_SIZE (1 << L1_XLAT_SIZE_SHIFT) |
| |
| /* Block indices required for MAIR */ |
| #define BLOCK_INDEX_MEM_DEV_NGNRNE 0 |
| #define BLOCK_INDEX_MEM_DEV_NGNRE 1 |
| #define BLOCK_INDEX_MEM_DEV_GRE 2 |
| #define BLOCK_INDEX_MEM_NORMAL_NC 3 |
| #define BLOCK_INDEX_MEM_NORMAL 4 |
| |
| #define BLOCK_INDEX_SHIFT 2 |
| |
| /* MAIR attributes */ |
| #define MAIR_ATTRIBUTES ((0x00 << (BLOCK_INDEX_MEM_DEV_NGNRNE*8)) | \ |
| (0x04 << (BLOCK_INDEX_MEM_DEV_NGNRE*8)) | \ |
| (0x0c << (BLOCK_INDEX_MEM_DEV_GRE*8)) | \ |
| (0x44 << (BLOCK_INDEX_MEM_NORMAL_NC*8)) | \ |
| (0xffUL << (BLOCK_INDEX_MEM_NORMAL*8))) |
| |
| /* TCR attributes */ |
| #define TCR_TOSZ (64 - BITS_PER_VA) |
| |
| #define TCR_IRGN0_SHIFT 8 |
| #define TCR_IRGN0_NM_NC (0x00 << TCR_IRGN0_SHIFT) |
| #define TCR_IRGN0_NM_WBWAC (0x01 << TCR_IRGN0_SHIFT) |
| #define TCR_IRGN0_NM_WTC (0x02 << TCR_IRGN0_SHIFT) |
| #define TCR_IRGN0_NM_WBNWAC (0x03 << TCR_IRGN0_SHIFT) |
| |
| #define TCR_ORGN0_SHIFT 10 |
| #define TCR_ORGN0_NM_NC (0x00 << TCR_ORGN0_SHIFT) |
| #define TCR_ORGN0_NM_WBWAC (0x01 << TCR_ORGN0_SHIFT) |
| #define TCR_ORGN0_NM_WTC (0x02 << TCR_ORGN0_SHIFT) |
| #define TCR_ORGN0_NM_WBNWAC (0x03 << TCR_ORGN0_SHIFT) |
| |
| #define TCR_SH0_SHIFT 12 |
| #define TCR_SH0_NC (0x0 << TCR_SH0_SHIFT) |
| #define TCR_SH0_OS (0x2 << TCR_SH0_SHIFT) |
| #define TCR_SH0_IS (0x3 << TCR_SH0_SHIFT) |
| |
| #define TCR_TG0_SHIFT 14 |
| #define TCR_TG0_4KB (0x0 << TCR_TG0_SHIFT) |
| #define TCR_TG0_64KB (0x1 << TCR_TG0_SHIFT) |
| #define TCR_TG0_16KB (0x2 << TCR_TG0_SHIFT) |
| |
| #define TCR_PS_SHIFT 16 |
| #define TCR_PS_4GB (0x0 << TCR_PS_SHIFT) |
| #define TCR_PS_64GB (0x1 << TCR_PS_SHIFT) |
| #define TCR_PS_1TB (0x2 << TCR_PS_SHIFT) |
| #define TCR_PS_4TB (0x3 << TCR_PS_SHIFT) |
| #define TCR_PS_16TB (0x4 << TCR_PS_SHIFT) |
| #define TCR_PS_256TB (0x5 << TCR_PS_SHIFT) |
| |
| #define TCR_TBI_SHIFT 20 |
| #define TCR_TBI_USED (0x0 << TCR_TBI_SHIFT) |
| #define TCR_TBI_IGNORED (0x1 << TCR_TBI_SHIFT) |
| |
| #define DMA_DEFAULT_SIZE (0x20 * GRANULE_SIZE) |
| #define TTB_DEFAULT_SIZE 0x100000 |
| |
| #define MB_SIZE (1UL << 20) |
| |
| /* Initialize the MMU TTB tables using the mmu_ranges */ |
| uint64_t mmu_init(struct mmu_ranges *mmu_ranges); |
| |
| /* Enable the mmu based on previous mmu_init(). */ |
| void mmu_enable(void); |
| |
| /* Disable mmu */ |
| void mmu_disable(void); |
| |
| /* |
| * Based on the memory ranges provided in coreboot tables, |
| * initialize the mmu_memranges used for mmu initialization |
| * cb_ranges -> Memory ranges present in cb tables |
| * mmu_ranges -> mmu_memranges initialized by this function |
| */ |
| struct mmu_memrange* mmu_init_ranges_from_sysinfo(struct memrange *cb_ranges, |
| uint64_t ncb, |
| struct mmu_ranges *mmu_ranges); |
| |
| /* |
| * Functions for handling the initialization of memory ranges and enabling mmu |
| * before coreboot tables are parsed |
| */ |
| void mmu_presysinfo_memory_used(uint64_t base, uint64_t size); |
| void mmu_presysinfo_enable(void); |
| #endif // __ARCH_ARM64_MMU_H__ |