Furquan Shaikh | 2af76f4 | 2014-04-28 16:39:40 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Based on arch/arm/include/asm/cacheflush.h |
| 3 | * |
| 4 | * Copyright (C) 1999-2002 Russell King. |
| 5 | * Copyright (C) 2012 ARM Ltd. |
| 6 | * |
| 7 | * This program is free software; you can redistribute it and/or modify |
| 8 | * it under the terms of the GNU General Public License version 2 as |
| 9 | * published by the Free Software Foundation. |
| 10 | * |
| 11 | * This program is distributed in the hope that it will be useful, |
| 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 14 | * GNU General Public License for more details. |
| 15 | * |
| 16 | * You should have received a copy of the GNU General Public License |
| 17 | * along with this program; if not, write to the Free Software |
| 18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
| 19 | */ |
| 20 | |
| 21 | #include <arch/asm.h> |
| 22 | |
| 23 | /* |
| 24 | * flush_dcache_all() |
| 25 | * |
| 26 | * Flush the whole D-cache. |
| 27 | * |
| 28 | * Corrupted registers: x0-x7, x9-x11 |
| 29 | * From: Linux arch/arm64/mm/cache.S |
| 30 | */ |
| 31 | ENTRY(flush_dcache_all) |
| 32 | dsb sy // ensure ordering with previous memory accesses |
| 33 | mrs x0, clidr_el1 // read clidr |
| 34 | and x3, x0, #0x7000000 // extract loc from clidr |
| 35 | lsr x3, x3, #23 // left align loc bit field |
| 36 | cbz x3, finished // if loc is 0, then no need to clean |
| 37 | mov x10, #0 // start clean at cache level 0 |
| 38 | loop1: |
| 39 | add x2, x10, x10, lsr #1 // work out 3x current cache level |
| 40 | lsr x1, x0, x2 // extract cache type bits from clidr |
| 41 | and x1, x1, #7 // mask of the bits for current cache only |
| 42 | cmp x1, #2 // see what cache we have at this level |
| 43 | b.lt skip // skip if no cache, or just i-cache |
| 44 | mrs x9, daif // make CSSELR and CCSIDR access atomic |
| 45 | msr csselr_el1, x10 // select current cache level in csselr |
| 46 | isb // isb to sych the new cssr&csidr |
| 47 | mrs x1, ccsidr_el1 // read the new ccsidr |
| 48 | msr daif, x9 |
| 49 | and x2, x1, #7 // extract the length of the cache lines |
| 50 | add x2, x2, #4 // add 4 (line length offset) |
| 51 | mov x4, #0x3ff |
| 52 | and x4, x4, x1, lsr #3 // find maximum number on the way size |
| 53 | clz x5, x4 // find bit position of way size increment |
| 54 | mov x7, #0x7fff |
| 55 | and x7, x7, x1, lsr #13 // extract max number of the index size |
| 56 | loop2: |
| 57 | mov x9, x4 // create working copy of max way size |
| 58 | loop3: |
| 59 | lsl x6, x9, x5 |
| 60 | orr x11, x10, x6 // factor way and cache number into x11 |
| 61 | lsl x6, x7, x2 |
| 62 | orr x11, x11, x6 // factor index number into x11 |
| 63 | dc cisw, x11 // clean & invalidate by set/way |
| 64 | subs x9, x9, #1 // decrement the way |
| 65 | b.ge loop3 |
| 66 | subs x7, x7, #1 // decrement the index |
| 67 | b.ge loop2 |
| 68 | skip: |
| 69 | add x10, x10, #2 // increment cache number |
| 70 | cmp x3, x10 |
| 71 | b.gt loop1 |
| 72 | finished: |
| 73 | mov x10, #0 // swith back to cache level 0 |
| 74 | msr csselr_el1, x10 // select current cache level in csselr |
| 75 | dsb sy |
| 76 | isb |
| 77 | ret |
| 78 | ENDPROC(flush_dcache_all) |
| 79 | |
| 80 | /* |
| 81 | * Bring an ARMv8 processor we just gained control of (e.g. from IROM) into a |
| 82 | * known state regarding caches/SCTLR. Completely cleans and invalidates |
| 83 | * icache/dcache, disables MMU and dcache (if active), and enables unaligned |
| 84 | * accesses, icache and branch prediction (if inactive). Clobbers x4 and x5. |
| 85 | */ |
| 86 | ENTRY(arm_init_caches) |
| 87 | /* w4: SCTLR, return address: x8 (stay valid for the whole function) */ |
| 88 | mov x8, x30 |
| 89 | /* XXX: Assume that we always start running at EL3 */ |
| 90 | mrs x4, sctlr_el3 |
| 91 | |
| 92 | /* FIXME: How to enable branch prediction on ARMv8? */ |
| 93 | |
| 94 | /* Flush and invalidate dcache */ |
| 95 | bl flush_dcache_all |
| 96 | |
| 97 | /* Deactivate MMU (0), Alignment Check (1) and DCache (2) */ |
| 98 | and x4, x4, # ~(1 << 0) & ~(1 << 1) & ~(1 << 2) |
| 99 | /* Activate ICache (12) already for speed */ |
| 100 | orr x4, x4, #(1 << 12) |
| 101 | msr sctlr_el3, x4 |
| 102 | |
| 103 | /* Invalidate icache and TLB for good measure */ |
| 104 | ic iallu |
| 105 | tlbi alle3 |
| 106 | dsb sy |
| 107 | isb |
| 108 | |
| 109 | ret x8 |
| 110 | ENDPROC(arm_init_caches) |
| 111 | |
| 112 | /* Based on u-boot transition.S */ |
| 113 | ENTRY(switch_el3_to_el2) |
| 114 | mov x0, #0x5b1 /* Non-secure EL0/EL1 | HVC | 64bit EL2 */ |
| 115 | msr scr_el3, x0 |
| 116 | msr cptr_el3, xzr /* Disable coprocessor traps to EL3 */ |
| 117 | mov x0, #0x33ff |
| 118 | msr cptr_el2, x0 /* Disable coprocessor traps to EL2 */ |
| 119 | |
| 120 | /* Return to the EL2_SP2 mode from EL3 */ |
| 121 | mov x0, sp |
| 122 | msr sp_el2, x0 /* Migrate SP */ |
| 123 | mrs x0, vbar_el3 |
| 124 | msr vbar_el2, x0 /* Migrate VBAR */ |
| 125 | mrs x0, sctlr_el3 |
| 126 | msr sctlr_el2, x0 /* Migrate SCTLR */ |
| 127 | mov x0, #0x3c9 |
| 128 | msr spsr_el3, x0 /* EL2_SP2 | D | A | I | F */ |
| 129 | msr elr_el3, x30 |
| 130 | eret |
| 131 | ENDPROC(switch_el3_to_el2) |