source: mainline/kernel/arch/mips32/src/debug/stacktrace.c

Last change on this file was bab75df6, checked in by Jiri Svoboda <jiri@…>, 7 years ago

Let kernel code get printf via the standard stdio header. Clean up unused includes.

  • Property mode set to 100644
File size: 7.7 KB
Line 
1/*
2 * Copyright (c) 2010 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup kernel_mips32
30 * @{
31 */
32/** @file
33 */
34
35/*
36 * This stack tracing code is based on the suggested algorithm described on page
37 * 3-27 and 3-28 of:
38 *
39 * SYSTEM V
40 * APPLICATION BINARY INTERFACE
41 *
42 * MIPS RISC Processor
43 * Supplement
44 * 3rd Edition
45 *
46 * Unfortunately, GCC generates code which is not entirely compliant with this
47 * method. For example, it places the "jr ra" instruction quite arbitrarily in
48 * the middle of the function which makes the original algorithm unapplicable.
49 *
50 * We deal with this problem by simply not using those parts of the algorithm
51 * that rely on the "jr ra" instruction occurring in the last basic block of a
52 * function, which gives us still usable, but less reliable stack tracer. The
53 * unreliability stems from the fact that under some circumstances it can become
54 * confused and produce incorrect or incomplete stack trace. We apply extra
55 * sanity checks so that the algorithm is still safe and should not crash the
56 * system.
57 *
58 * Even though not perfect, our solution is pretty lightweight, especially when
59 * compared with a prospective alternative solution based on additional
60 * debugging information stored directly in the kernel image.
61 */
62
63#include <stacktrace.h>
64#include <stddef.h>
65#include <syscall/copy.h>
66#include <typedefs.h>
67#include <arch/debugger.h>
68
69#define R0 0U
70#define SP 29U
71#define RA 31U
72
73#define OP_SHIFT 26
74#define RS_SHIFT 21
75#define RT_SHIFT 16
76#define RD_SHIFT 11
77
78#define HINT_SHIFT 6
79#define BASE_SHIFT RS_SHIFT
80#define IMM_SHIFT 0
81#define OFFSET_SHIFT IMM_SHIFT
82
83#define RS_MASK (0x1f << RS_SHIFT)
84#define RT_MASK (0x1f << RT_SHIFT)
85#define RD_MASK (0x1f << RD_SHIFT)
86#define HINT_MASK (0x1f << HINT_SHIFT)
87#define BASE_MASK RS_MASK
88#define IMM_MASK (0xffff << IMM_SHIFT)
89#define OFFSET_MASK IMM_MASK
90
91#define RS_GET(inst) (((inst) & RS_MASK) >> RS_SHIFT)
92#define RD_GET(inst) (((inst) & RD_MASK) >> RD_SHIFT)
93#define IMM_GET(inst) (int16_t)(((inst) & IMM_MASK) >> IMM_SHIFT)
94#define BASE_GET(inst) RS_GET(inst)
95#define OFFSET_GET(inst) IMM_GET(inst)
96
97#define ADDU_R_SP_R0_TEMPL \
98 ((0x0 << OP_SHIFT) | (SP << RS_SHIFT) | (R0 << RT_SHIFT) | 0x21)
99#define ADDU_SP_R_R0_TEMPL \
100 ((0x0 << OP_SHIFT) | (SP << RD_SHIFT) | (R0 << RT_SHIFT) | 0x21)
101#define ADDI_SP_SP_IMM_TEMPL \
102 ((0x8 << OP_SHIFT) | (SP << RS_SHIFT) | (SP << RT_SHIFT))
103#define ADDIU_SP_SP_IMM_TEMPL \
104 ((0x9 << OP_SHIFT) | (SP << RS_SHIFT) | (SP << RT_SHIFT))
105#define JR_RA_TEMPL \
106 ((0x0 << OP_SHIFT) | (RA << RS_SHIFT) | (0x0 << HINT_SHIFT) | 0x8)
107#define SW_RA_TEMPL \
108 ((0x2b << OP_SHIFT) | (RA << RT_SHIFT))
109
110#define IS_ADDU_R_SP_R0(inst) \
111 (((inst) & ~RD_MASK) == ADDU_R_SP_R0_TEMPL)
112#define IS_ADDU_SP_R_R0(inst) \
113 (((inst) & ~RS_MASK) == ADDU_SP_R_R0_TEMPL)
114#define IS_ADDI_SP_SP_IMM(inst) \
115 (((inst) & ~IMM_MASK) == ADDI_SP_SP_IMM_TEMPL)
116#define IS_ADDIU_SP_SP_IMM(inst) \
117 (((inst) & ~IMM_MASK) == ADDIU_SP_SP_IMM_TEMPL)
118#define IS_JR_RA(inst) \
119 (((inst) & ~HINT_MASK) == JR_RA_TEMPL)
120#define IS_SW_RA(inst) \
121 (((inst) & ~(BASE_MASK | OFFSET_MASK)) == SW_RA_TEMPL)
122
123extern char ktext_start;
124extern char ktext_end;
125
126static bool bounds_check(uintptr_t pc)
127{
128 return (pc >= (uintptr_t) &ktext_start) &&
129 (pc < (uintptr_t) &ktext_end);
130}
131
132static bool
133scan(stack_trace_context_t *ctx, uintptr_t *prev_fp, uintptr_t *prev_ra)
134{
135 uint32_t *inst = (void *) ctx->pc;
136 bool has_fp = false;
137 size_t frame_size;
138 unsigned int fp = SP;
139
140 do {
141 inst--;
142 if (!bounds_check((uintptr_t) inst))
143 return false;
144#if 0
145 /*
146 * This is one of the situations in which the theory (ABI) does
147 * not meet the practice (GCC). GCC simply does not place the
148 * JR $ra instruction as dictated by the ABI, rendering the
149 * official stack tracing algorithm somewhat unapplicable.
150 */
151
152 if (IS_ADDU_R_SP_R0(*inst)) {
153 uint32_t *cur;
154 fp = RD_GET(*inst);
155 /*
156 * We have a candidate for frame pointer.
157 */
158
159 /* Seek to the end of this function. */
160 cur = inst + 1;
161 while (!IS_JR_RA(*cur))
162 cur++;
163
164 /* Scan the last basic block */
165 for (cur--; !is_jump(*(cur - 1)); cur--) {
166 if (IS_ADDU_SP_R_R0(*cur) &&
167 (fp == RS_GET(*cur))) {
168 has_fp = true;
169 }
170 }
171 continue;
172 }
173
174 if (IS_JR_RA(*inst)) {
175 if (!ctx->istate)
176 return false;
177 /*
178 * No stack frame has been allocated yet.
179 * Use the values stored in istate.
180 */
181 if (prev_fp)
182 *prev_fp = ctx->istate->sp;
183 if (prev_ra)
184 *prev_ra = ctx->istate->ra - 8;
185 ctx->istate = NULL;
186 return true;
187 }
188#endif
189
190 } while ((!IS_ADDIU_SP_SP_IMM(*inst) && !IS_ADDI_SP_SP_IMM(*inst)) ||
191 (IMM_GET(*inst) >= 0));
192
193 /*
194 * We are at the instruction which allocates the space for the current
195 * stack frame.
196 */
197 frame_size = -IMM_GET(*inst);
198 if (prev_fp)
199 *prev_fp = ctx->fp + frame_size;
200
201 /*
202 * Scan the first basic block for the occurrence of
203 * SW $ra, OFFSET($base).
204 */
205 for (inst++; !is_jump(*(inst - 1)) && (uintptr_t) inst < ctx->pc;
206 inst++) {
207 if (IS_SW_RA(*inst)) {
208 unsigned int base = BASE_GET(*inst);
209 int16_t offset = OFFSET_GET(*inst);
210
211 if (base == SP || (has_fp && base == fp)) {
212 uint32_t *addr = (void *) (ctx->fp + offset);
213
214 if (offset % 4 != 0)
215 return false;
216 /* cannot store below current stack pointer */
217 if (offset < 0)
218 return false;
219 /* too big offsets are suspicious */
220 if ((size_t) offset > sizeof(istate_t))
221 return false;
222
223 if (prev_ra)
224 *prev_ra = *addr;
225 return true;
226 }
227 }
228 }
229
230 /*
231 * The first basic block does not save the return address or saves it
232 * after ctx->pc, which means that the correct value is in istate.
233 */
234 if (prev_ra) {
235 if (!ctx->istate)
236 return false;
237 *prev_ra = ctx->istate->ra - 8;
238 ctx->istate = NULL;
239 }
240 return true;
241}
242
243bool kernel_stack_trace_context_validate(stack_trace_context_t *ctx)
244{
245 return !((ctx->fp == 0) || ((ctx->fp % 8) != 0) ||
246 (ctx->pc % 4 != 0) || !bounds_check(ctx->pc));
247}
248
249bool kernel_frame_pointer_prev(stack_trace_context_t *ctx, uintptr_t *prev)
250{
251 return scan(ctx, prev, NULL);
252}
253
254bool kernel_return_address_get(stack_trace_context_t *ctx, uintptr_t *ra)
255{
256 return scan(ctx, NULL, ra);
257}
258
259bool uspace_stack_trace_context_validate(stack_trace_context_t *ctx)
260{
261 return false;
262}
263
264bool uspace_frame_pointer_prev(stack_trace_context_t *ctx, uintptr_t *prev)
265{
266 return false;
267}
268
269bool uspace_return_address_get(stack_trace_context_t *ctx, uintptr_t *ra)
270{
271 return false;
272}
273
274/** @}
275 */
Note: See TracBrowser for help on using the repository browser.