blob: 1b54df5929bd056774ad591eb3f5a45fae434fd6 [file] [log] [blame]
Furquan Shaikh8c8c3772014-02-19 11:35:30 -08001/*
2 * Optimized assembly for low-level CPU operations on ARM64 processors.
3 *
4 * Copyright (c) 2010 Per Odlund <per.odlund@armagedon.se>
5 * Copyright (c) 2014 Google Inc.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31#include <arch/asm.h>
Julius Wernerbf33b032020-02-14 12:42:01 -080032#include <arch/lib_helpers.h>
Furquan Shaikh8c8c3772014-02-19 11:35:30 -080033
34.macro dcache_apply_all crm
35 dsb sy
36 mrs x0, clidr_el1 // read CLIDR
37 and w3, w0, #0x07000000 // narrow to LoC
38 lsr w3, w3, #23 // left align LoC (low 4 bits)
39 cbz w3, 5f //done
40
41 mov w10, #0 // w10 = 2 * cache level
42 mov w8, #1 // w8 = constant 0b1
43
Yidi Lin6643b5e2024-05-24 15:27:24 +080044 mrs x12, id_aa64mmfr2_el1 // read ID_AA64MMFR2_EL1
45 ubfx x12, x12, #20, #4 // [23:20] - CCIDX support
46
Furquan Shaikh8c8c3772014-02-19 11:35:30 -0800471: //next_level
48 add w2, w10, w10, lsr #1 // calculate 3 * cache level
49 lsr w1, w0, w2 // extract 3-bit cache type for this level
50 and w1, w1, #0x7 // w1 = cache type
51 cmp w1, #2 // is it data or i&d?
52 b.lt 4f //skip
53 msr csselr_el1, x10 // select current cache level
54 isb // sync change of csselr
55 mrs x1, ccsidr_el1 // w1 = read ccsidr
56 and w2, w1, #7 // w2 = log2(linelen_bytes) - 4
57 add w2, w2, #4 // w2 = log2(linelen_bytes)
Yidi Lin6643b5e2024-05-24 15:27:24 +080058
59 cbz x12, 11f // check FEAT_CCIDX for associativity
60 // branch to 11 if FEAT_CCIDX is not implemented
61 ubfx x4, x1, #3, #21 // x4 = associativity CCSIDR_EL1[23:3]
62 b 12f
6311:
64 ubfx x4, x1, #3, #10 // x4 = associativity CCSIDR_EL1[12:3]
6512:
Furquan Shaikh8c8c3772014-02-19 11:35:30 -080066 clz w5, w4 // w5 = 32 - log2(ways)
67 // (bit position of way in DC)
68 lsl w9, w4, w5 // w9 = max way number
69 // (aligned for DC)
70 lsl w16, w8, w5 // w16 = amount to decrement (way
71 // number per iteration)
722: //next_way
Yidi Lin6643b5e2024-05-24 15:27:24 +080073 cbz x12, 21f // check FEAT_CCIDX for numsets
74 // branch to 21 if FEAT_CCIDX is not implemented
75 ubfx x7, x1, #32, #24 // x7(w7) = numsets CCSIDR_EL1[55:32]
76 b 22f
7721:
78 ubfx w7, w1, #13, #15 // w7 = numsets CCSIDR_EL1[27:13]
7922:
Furquan Shaikh8c8c3772014-02-19 11:35:30 -080080 lsl w7, w7, w2 // w7 = max set #, DC aligned
81 lsl w17, w8, w2 // w17 = amount to decrement (set
82 // number per iteration)
83
843: //next_set
85 orr w11, w10, w9 // w11 = combine way # & cache #
86 orr w11, w11, w7 // ... and set #
87 dc \crm, x11 // clean and/or invalidate line
88 subs w7, w7, w17 // decrement set number
89 b.ge 3b //next_set
90 subs x9, x9, x16 // decrement way number
91 b.ge 2b //next_way
92
934: //skip
94 add w10, w10, #2 // increment 2 *cache level
95 cmp w3, w10 // Went beyond LoC?
96 b.gt 1b //next_level
97
985: //done
99 dsb sy
100 isb
101 ret
102.endm
103
104ENTRY(dcache_invalidate_all)
105 dcache_apply_all crm=isw
106ENDPROC(dcache_invalidate_all)
107
108ENTRY(dcache_clean_all)
109 dcache_apply_all crm=csw
110ENDPROC(dcache_clean_all)
111
112ENTRY(dcache_clean_invalidate_all)
113 dcache_apply_all crm=cisw
114ENDPROC(dcache_clean_invalidate_all)
Julius Wernerbf33b032020-02-14 12:42:01 -0800115
116/* This must be implemented in assembly to ensure there are no accesses to
117 memory (e.g. the stack) in between disabling and flushing the cache. */
118ENTRY(mmu_disable)
119 str x30, [sp, #-0x8]
120 mrs x0, sctlr_el2
121 mov x1, #~(SCTLR_C | SCTLR_M)
122 and x0, x0, x1
123 msr sctlr_el2, x0
124 isb
125 bl dcache_clean_invalidate_all
126 ldr x30, [sp, #-0x8]
127 ret
128ENDPROC(mmu_disable)