source: mainline/kernel/arch/arm32/src/cpu/cpu.c@ d701672

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since d701672 was 93d8022, checked in by Jakub Jermar <jakub@…>, 10 years ago

ARM cache handling fixes

  • boot: Use the normal outer and inner WBWA attribute also for ARMv6
  • kernel: Fix comment in page_armv6.h:set_pt_level0_flags(). TEX=5, C=0, B=1 encodes outer and inner WBWA normal memory.
  • Treat all normal memory as non shareable also on ARMv6.
  • Make sure D$ is invalidated in cpu_arch_init() before it is enabled.
  • For non-cacheable ARMv6+ memory, use device memory type instead of strongly-ordered.
  • For ARMv5-, use either cached/buffered (CB=0b11) or uncached/unbuffered (CB=0b00).
  • Property mode set to 100644
File size: 10.5 KB
Line 
1/*
2 * Copyright (c) 2007 Michal Kebrt
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup arm32
30 * @{
31 */
32/** @file
33 * @brief CPU identification.
34 */
35
36#include <arch/cache.h>
37#include <arch/cpu.h>
38#include <arch/cp15.h>
39#include <cpu.h>
40#include <arch.h>
41#include <print.h>
42
43#ifdef CONFIG_FPU
44#include <arch/fpu_context.h>
45#endif
46
47static inline unsigned log2(unsigned val)
48{
49 unsigned log = 0;
50 --val;
51 while (val) {
52 ++log;
53 val >>= 1;
54 }
55 return log;
56}
57
58static unsigned dcache_ways(unsigned level);
59static unsigned dcache_sets(unsigned level);
60static unsigned dcache_linesize_log(unsigned level);
61
62
63/** Implementers (vendor) names */
64static const char * implementer(unsigned id)
65{
66 switch (id) {
67 case 0x41: return "ARM Limited";
68 case 0x44: return "Digital Equipment Corporation";
69 case 0x4d: return "Motorola, Freescale Semiconductor Inc.";
70 case 0x51: return "Qualcomm Inc.";
71 case 0x56: return "Marvell Semiconductor Inc.";
72 case 0x69: return "Intel Corporation";
73 }
74 return "Unknown implementer";
75}
76
77/** Architecture names */
78static const char * architecture_string(cpu_arch_t *arch)
79{
80 static const char *arch_data[] = {
81 "ARM", /* 0x0 */
82 "ARMv4", /* 0x1 */
83 "ARMv4T", /* 0x2 */
84 "ARMv5", /* 0x3 */
85 "ARMv5T", /* 0x4 */
86 "ARMv5TE", /* 0x5 */
87 "ARMv5TEJ", /* 0x6 */
88 "ARMv6" /* 0x7 */
89 };
90 if (arch->arch_num < (sizeof(arch_data) / sizeof(arch_data[0])))
91 return arch_data[arch->arch_num];
92 else
93 return arch_data[0];
94}
95
96
97/** Retrieves processor identification from CP15 register 0.
98 *
99 * @param cpu Structure for storing CPU identification.
100 * See page B4-1630 of ARM Architecture Reference Manual.
101 */
102static void arch_cpu_identify(cpu_arch_t *cpu)
103{
104 const uint32_t ident = MIDR_read();
105
106 cpu->imp_num = (ident >> MIDR_IMPLEMENTER_SHIFT) & MIDR_IMPLEMENTER_MASK;
107 cpu->variant_num = (ident >> MIDR_VARIANT_SHIFT) & MIDR_VARIANT_MASK;
108 cpu->arch_num = (ident >> MIDR_ARCHITECTURE_SHIFT) & MIDR_ARCHITECTURE_MASK;
109 cpu->prim_part_num = (ident >> MIDR_PART_NUMBER_SHIFT) & MIDR_PART_NUMBER_MASK;
110 cpu->rev_num = (ident >> MIDR_REVISION_SHIFT) & MIDR_REVISION_MASK;
111
112 // TODO CPUs with arch_num == 0xf use CPUID scheme for identification
113 cpu->dcache_levels = dcache_levels();
114
115 for (unsigned i = 0; i < cpu->dcache_levels; ++i) {
116 cpu->dcache[i].ways = dcache_ways(i);
117 cpu->dcache[i].sets = dcache_sets(i);
118 cpu->dcache[i].way_shift = 31 - log2(cpu->dcache[i].ways);
119 cpu->dcache[i].set_shift = dcache_linesize_log(i);
120 cpu->dcache[i].line_size = 1 << dcache_linesize_log(i);
121 printf("Found DCache L%u: %u-way, %u sets, %u byte lines "
122 "(shifts: w%u, s%u)\n", i + 1, cpu->dcache[i].ways,
123 cpu->dcache[i].sets, cpu->dcache[i].line_size,
124 cpu->dcache[i].way_shift, cpu->dcache[i].set_shift);
125 }
126}
127
128/** Enables unaligned access and caching for armv6+ */
129void cpu_arch_init(void)
130{
131 uint32_t control_reg = SCTLR_read();
132
133 dcache_invalidate();
134 read_barrier();
135
136 /* Turn off tex remap, RAZ/WI prior to armv7 */
137 control_reg &= ~SCTLR_TEX_REMAP_EN_FLAG;
138 /* Turn off accessed flag, RAZ/WI prior to armv7 */
139 control_reg &= ~(SCTLR_ACCESS_FLAG_EN_FLAG | SCTLR_HW_ACCESS_FLAG_EN_FLAG);
140
141 /* Unaligned access is supported on armv6+ */
142#if defined(PROCESSOR_ARCH_armv7_a) | defined(PROCESSOR_ARCH_armv6)
143 /* Enable unaligned access, RAZ/WI prior to armv6
144 * switchable on armv6, RAO/WI writes on armv7,
145 * see ARM Architecture Reference Manual ARMv7-A and ARMv7-R edition
146 * L.3.1 (p. 2456) */
147 control_reg |= SCTLR_UNALIGNED_EN_FLAG;
148 /* Disable alignment checks, this turns unaligned access to undefined,
149 * unless U bit is set. */
150 control_reg &= ~SCTLR_ALIGN_CHECK_EN_FLAG;
151 /* Enable caching, On arm prior to armv7 there is only one level
152 * of caches. Data cache is coherent.
153 * "This means that the behavior of accesses from the same observer to
154 * different VAs, that are translated to the same PA
155 * with the same memory attributes, is fully coherent."
156 * ARM Architecture Reference Manual ARMv7-A and ARMv7-R Edition
157 * B3.11.1 (p. 1383)
158 * We are safe to turn this on. For arm v6 see ch L.6.2 (p. 2469)
159 * L2 Cache for armv7 is enabled by default (i.e. controlled by
160 * this flag).
161 */
162 control_reg |= SCTLR_CACHE_EN_FLAG;
163#endif
164#ifdef PROCESSOR_ARCH_armv7_a
165 /* ICache coherency is elaborated on in barrier.h.
166 * VIPT and PIPT caches need maintenance only on code modify,
167 * so it should be safe for general use.
168 * Enable branch predictors too as they follow the same rules
169 * as ICache and they can be flushed together
170 */
171 if ((CTR_read() & CTR_L1I_POLICY_MASK) != CTR_L1I_POLICY_AIVIVT) {
172 control_reg |=
173 SCTLR_INST_CACHE_EN_FLAG | SCTLR_BRANCH_PREDICT_EN_FLAG;
174 } else {
175 control_reg &=
176 ~(SCTLR_INST_CACHE_EN_FLAG | SCTLR_BRANCH_PREDICT_EN_FLAG);
177 }
178#endif
179 SCTLR_write(control_reg);
180
181#ifdef CONFIG_FPU
182 fpu_setup();
183#endif
184
185#ifdef PROCESSOR_ARCH_armv7_a
186 if ((ID_PFR1_read() & ID_PFR1_GEN_TIMER_EXT_MASK) !=
187 ID_PFR1_GEN_TIMER_EXT) {
188 PMCR_write(PMCR_read() | PMCR_E_FLAG | PMCR_D_FLAG);
189 PMCNTENSET_write(PMCNTENSET_CYCLE_COUNTER_EN_FLAG);
190 }
191#endif
192}
193
194/** Retrieves processor identification and stores it to #CPU.arch */
195void cpu_identify(void)
196{
197 arch_cpu_identify(&CPU->arch);
198}
199
200/** Prints CPU identification. */
201void cpu_print_report(cpu_t *m)
202{
203 printf("cpu%d: vendor=%s, architecture=%s, part number=%x, "
204 "variant=%x, revision=%x\n",
205 m->id, implementer(m->arch.imp_num),
206 architecture_string(&m->arch), m->arch.prim_part_num,
207 m->arch.variant_num, m->arch.rev_num);
208}
209
210/** See chapter B4.1.19 of ARM Architecture Reference Manual */
211static unsigned dcache_linesize_log(unsigned level)
212{
213#ifdef PROCESSOR_ARCH_armv7_a
214 CSSELR_write((level & CCSELR_LEVEL_MASK) << CCSELR_LEVEL_SHIFT);
215 const uint32_t ccsidr = CCSIDR_read();
216 return CCSIDR_LINESIZE_LOG(ccsidr);
217#endif
218 return 0;
219
220}
221
222/** See chapter B4.1.19 of ARM Architecture Reference Manual */
223static unsigned dcache_ways(unsigned level)
224{
225#ifdef PROCESSOR_ARCH_armv7_a
226 CSSELR_write((level & CCSELR_LEVEL_MASK) << CCSELR_LEVEL_SHIFT);
227 const uint32_t ccsidr = CCSIDR_read();
228 return CCSIDR_WAYS(ccsidr);
229#endif
230 return 0;
231}
232
233/** See chapter B4.1.19 of ARM Architecture Reference Manual */
234static unsigned dcache_sets(unsigned level)
235{
236#ifdef PROCESSOR_ARCH_armv7_a
237 CSSELR_write((level & CCSELR_LEVEL_MASK) << CCSELR_LEVEL_SHIFT);
238 const uint32_t ccsidr = CCSIDR_read();
239 return CCSIDR_SETS(ccsidr);
240#endif
241 return 0;
242}
243
244unsigned dcache_levels(void)
245{
246 unsigned levels = 0;
247#ifdef PROCESSOR_ARCH_armv7_a
248 const uint32_t val = CLIDR_read();
249 for (unsigned i = 0; i < 8; ++i) {
250 const unsigned ctype = CLIDR_CACHE(i, val);
251 switch (ctype) {
252 case CLIDR_DCACHE_ONLY:
253 case CLIDR_SEP_CACHE:
254 case CLIDR_UNI_CACHE:
255 ++levels;
256 default:
257 (void)0;
258 }
259 }
260#endif
261 return levels;
262}
263
264static void dcache_clean_manual(unsigned level, bool invalidate,
265 unsigned ways, unsigned sets, unsigned way_shift, unsigned set_shift)
266{
267
268 for (unsigned i = 0; i < ways; ++i) {
269 for (unsigned j = 0; j < sets; ++j) {
270 const uint32_t val =
271 ((level & 0x7) << 1) |
272 (j << set_shift) | (i << way_shift);
273 if (invalidate)
274 DCCISW_write(val);
275 else
276 DCCSW_write(val);
277 }
278 }
279}
280
281void dcache_flush(void)
282{
283 /* See ARM Architecture Reference Manual ch. B4.2.1 p. B4-1724 */
284 const unsigned levels = dcache_levels();
285 for (unsigned i = 0; i < levels; ++i) {
286 const unsigned ways = dcache_ways(i);
287 const unsigned sets = dcache_sets(i);
288 const unsigned way_shift = 32 - log2(ways);
289 const unsigned set_shift = dcache_linesize_log(i);
290 dcache_clean_manual(i, false, ways, sets, way_shift, set_shift);
291 }
292}
293
294void dcache_flush_invalidate(void)
295{
296 /* See ARM Architecture Reference Manual ch. B4.2.1 p. B4-1724 */
297 const unsigned levels = dcache_levels();
298 for (unsigned i = 0; i < levels; ++i) {
299 const unsigned ways = dcache_ways(i);
300 const unsigned sets = dcache_sets(i);
301 const unsigned way_shift = 32 - log2(ways);
302 const unsigned set_shift = dcache_linesize_log(i);
303 dcache_clean_manual(i, true, ways, sets, way_shift, set_shift);
304 }
305}
306
307
308void cpu_dcache_flush(void)
309{
310 for (unsigned i = 0; i < CPU->arch.dcache_levels; ++i)
311 dcache_clean_manual(i, false,
312 CPU->arch.dcache[i].ways, CPU->arch.dcache[i].sets,
313 CPU->arch.dcache[i].way_shift, CPU->arch.dcache[i].set_shift);
314}
315
316void cpu_dcache_flush_invalidate(void)
317{
318 const unsigned levels = dcache_levels();
319 for (unsigned i = 0; i < levels; ++i)
320 dcache_clean_manual(i, true,
321 CPU->arch.dcache[i].ways, CPU->arch.dcache[i].sets,
322 CPU->arch.dcache[i].way_shift, CPU->arch.dcache[i].set_shift);
323}
324
325void icache_invalidate(void)
326{
327#if defined(PROCESSOR_ARCH_armv7_a)
328 ICIALLU_write(0);
329#else
330 ICIALL_write(0);
331#endif
332}
333
334#if !defined(PROCESSOR_ARCH_armv7_a)
335static bool cache_is_unified(void)
336{
337 if (MIDR_read() != CTR_read()) {
338 /* We have the CTR register */
339 return (CTR_read() & CTR_SEP_FLAG) != CTR_SEP_FLAG;
340 } else {
341 panic("Unknown cache type");
342 }
343}
344#endif
345
346void dcache_invalidate(void)
347{
348#if defined(PROCESSOR_ARCH_armv7_a)
349 dcache_flush_invalidate();
350#else
351 if (cache_is_unified())
352 CIALL_write(0);
353 else
354 DCIALL_write(0);
355#endif
356}
357
358void dcache_clean_mva_pou(uintptr_t mva)
359{
360#if defined(PROCESSOR_ARCH_armv7_a)
361 DCCMVAU_write(mva);
362#else
363 if (cache_is_unified())
364 CCMVA_write(mva);
365 else
366 DCCMVA_write(mva);
367#endif
368}
369
370/** @}
371 */
Note: See TracBrowser for help on using the repository browser.