source: mainline/kernel/arch/mips32/src/debug/stacktrace.c@ 76e1121f

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 76e1121f was 76e1121f, checked in by Jakub Jermar <jakub@…>, 15 years ago

Add an explanatory comment to mips32 stacktrace.c.

  • Property mode set to 100644
File size: 7.7 KB
RevLine 
[e1f0fe9]1/*
2 * Copyright (c) 2010 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup mips32
30 * @{
31 */
32/** @file
33 */
34
[76e1121f]35/*
36 * This stack tracing code is based on the suggested algorithm described on page
37 * 3-27 and 3-28 of:
38 *
39 * SYSTEM V
40 * APPLICATION BINARY INTERFACE
41 *
42 * MIPS RISC Processor
43 * Supplement
44 * 3rd Edition
45 *
46 * Unfortunately, GCC generates code which is not entirely compliant with this
47 * method. For example, it places the "jr ra" instruction quite arbitrarily in
48 * the middle of the function which makes the original algorithm unapplicable.
49 *
50 * We deal with this problem by simply not using those parts of the algorithm
51 * that rely on the "jr ra" instruction occurring in the last basic block of a
52 * function, which gives us still usable, but less reliable stack tracer. The
53 * unreliability stems from the fact that under some circumstances it can become
54 * confused and produce incorrect or incomplete stack trace. We apply extra
55 * sanity checks so that the algorithm is still safe and should not crash the
56 * system.
57 *
58 * Even though not perfect, our solution is pretty lightweight, especially when
59 * compared with a prospective alternative solution based on additional
60 * debugging information stored directly in the kernel image.
61 */
62
[e1f0fe9]63#include <stacktrace.h>
64#include <syscall/copy.h>
65#include <typedefs.h>
[63bdde6]66#include <arch/debugger.h>
67#include <print.h>
[e1f0fe9]68
[63bdde6]69#define R0 0U
70#define SP 29U
71#define RA 31U
72
73#define OP_SHIFT 26
74#define RS_SHIFT 21
75#define RT_SHIFT 16
76#define RD_SHIFT 11
77
78#define HINT_SHIFT 6
79#define BASE_SHIFT RS_SHIFT
80#define IMM_SHIFT 0
81#define OFFSET_SHIFT IMM_SHIFT
82
83#define RS_MASK (0x1f << RS_SHIFT)
84#define RT_MASK (0x1f << RT_SHIFT)
85#define RD_MASK (0x1f << RD_SHIFT)
86#define HINT_MASK (0x1f << HINT_SHIFT)
87#define BASE_MASK RS_MASK
88#define IMM_MASK (0xffff << IMM_SHIFT)
89#define OFFSET_MASK IMM_MASK
90
91#define RS_GET(inst) (((inst) & RS_MASK) >> RS_SHIFT)
92#define RD_GET(inst) (((inst) & RD_MASK) >> RD_SHIFT)
93#define IMM_GET(inst) (int16_t)(((inst) & IMM_MASK) >> IMM_SHIFT)
94#define BASE_GET(inst) RS_GET(inst)
95#define OFFSET_GET(inst) IMM_GET(inst)
96
97#define ADDU_R_SP_R0_TEMPL \
98 ((0x0 << OP_SHIFT) | (SP << RS_SHIFT) | (R0 << RT_SHIFT) | 0x21)
99#define ADDU_SP_R_R0_TEMPL \
100 ((0x0 << OP_SHIFT) | (SP << RD_SHIFT) | (R0 << RT_SHIFT) | 0x21)
101#define ADDI_SP_SP_IMM_TEMPL \
102 ((0x8 << OP_SHIFT) | (SP << RS_SHIFT) | (SP << RT_SHIFT))
103#define ADDIU_SP_SP_IMM_TEMPL \
104 ((0x9 << OP_SHIFT) | (SP << RS_SHIFT) | (SP << RT_SHIFT))
105#define JR_RA_TEMPL \
106 ((0x0 << OP_SHIFT) | (RA << RS_SHIFT) | (0x0 << HINT_SHIFT) | 0x8)
107#define SW_RA_TEMPL \
108 ((0x2b << OP_SHIFT) | (RA << RT_SHIFT))
109
110#define IS_ADDU_R_SP_R0(inst) \
111 (((inst) & ~RD_MASK) == ADDU_R_SP_R0_TEMPL)
112#define IS_ADDU_SP_R_R0(inst) \
113 (((inst) & ~RS_MASK) == ADDU_SP_R_R0_TEMPL)
114#define IS_ADDI_SP_SP_IMM(inst) \
115 (((inst) & ~IMM_MASK) == ADDI_SP_SP_IMM_TEMPL)
116#define IS_ADDIU_SP_SP_IMM(inst) \
117 (((inst) & ~IMM_MASK) == ADDIU_SP_SP_IMM_TEMPL)
118#define IS_JR_RA(inst) \
119 (((inst) & ~HINT_MASK) == JR_RA_TEMPL)
120#define IS_SW_RA(inst) \
121 (((inst) & ~(BASE_MASK | OFFSET_MASK)) == SW_RA_TEMPL)
122
123extern char ktext_start;
124extern char ktext_end;
125
[343f2b7e]126static bool bounds_check(uintptr_t pc)
127{
128 return (pc >= (uintptr_t) &ktext_start) &&
129 (pc < (uintptr_t) &ktext_end);
130}
131
[63bdde6]132static bool
133scan(stack_trace_context_t *ctx, uintptr_t *prev_fp, uintptr_t *prev_ra)
[e1f0fe9]134{
[63bdde6]135 uint32_t *inst = (void *) ctx->pc;
136 bool has_fp = false;
137 size_t frame_size;
138 unsigned int fp = SP;
139
140 do {
141 inst--;
[343f2b7e]142 if (!bounds_check((uintptr_t) inst))
143 return false;
[63bdde6]144#if 0
145 /*
146 * This is one of the situations in which the theory (ABI) does
147 * not meet the practice (GCC). GCC simply does not place the
148 * JR $ra instruction as dictated by the ABI, rendering the
149 * official stack tracing algorithm somewhat unapplicable.
150 */
151
152 if (IS_ADDU_R_SP_R0(*inst)) {
153 uint32_t *cur;
154 fp = RD_GET(*inst);
155 /*
156 * We have a candidate for frame pointer.
157 */
158
159 /* Seek to the end of this function. */
160 for (cur = inst + 1; !IS_JR_RA(*cur); cur++)
161 ;
162 /* Scan the last basic block */
163 for (cur--; !is_jump(*(cur - 1)); cur--) {
164 if (IS_ADDU_SP_R_R0(*cur) &&
165 (fp == RS_GET(*cur))) {
166 has_fp = true;
167 }
168 }
169 continue;
170 }
171
172 if (IS_JR_RA(*inst)) {
173 if (!ctx->istate)
174 return false;
175 /*
176 * No stack frame has been allocated yet.
177 * Use the values stored in istate.
178 */
179 if (prev_fp)
180 *prev_fp = ctx->istate->sp;
181 if (prev_ra)
182 *prev_ra = ctx->istate->ra - 8;
183 ctx->istate = NULL;
184 return true;
185 }
186#endif
187
188 } while ((!IS_ADDIU_SP_SP_IMM(*inst) && !IS_ADDI_SP_SP_IMM(*inst)) ||
189 (IMM_GET(*inst) >= 0));
190
191 /*
192 * We are at the instruction which allocates the space for the current
193 * stack frame.
194 */
195 frame_size = -IMM_GET(*inst);
196 if (prev_fp)
197 *prev_fp = ctx->fp + frame_size;
198
199 /*
200 * Scan the first basic block for the occurrence of
201 * SW $ra, OFFSET($base).
202 */
203 for (inst++; !is_jump(*(inst - 1)) && (uintptr_t) inst < ctx->pc;
204 inst++) {
205 if (IS_SW_RA(*inst)) {
206 unsigned int base = BASE_GET(*inst);
207 int16_t offset = OFFSET_GET(*inst);
208
209 if (base == SP || (has_fp && base == fp)) {
210 uint32_t *addr = (void *) (ctx->fp + offset);
211
212 if (offset % 4 != 0)
213 return false;
214 /* cannot store below current stack pointer */
215 if (offset < 0)
216 return false;
217 /* too big offsets are suspicious */
[0c39b96]218 if ((size_t) offset > sizeof(istate_t))
[63bdde6]219 return false;
220
221 if (prev_ra)
222 *prev_ra = *addr;
223 return true;
224 }
225 }
226 }
227
228 /*
229 * The first basic block does not save the return address or saves it
230 * after ctx->pc, which means that the correct value is in istate.
231 */
232 if (prev_ra) {
233 if (!ctx->istate)
234 return false;
235 *prev_ra = ctx->istate->ra - 8;
236 ctx->istate = NULL;
237 }
238 return true;
[e1f0fe9]239}
240
[63bdde6]241
242bool kernel_stack_trace_context_validate(stack_trace_context_t *ctx)
[e1f0fe9]243{
[63bdde6]244 return !((ctx->fp == 0) || ((ctx->fp % 8) != 0) ||
[343f2b7e]245 (ctx->pc % 4 != 0) || !bounds_check(ctx->pc));
[e1f0fe9]246}
247
[63bdde6]248bool kernel_frame_pointer_prev(stack_trace_context_t *ctx, uintptr_t *prev)
[e1f0fe9]249{
[63bdde6]250 return scan(ctx, prev, NULL);
251}
252
253bool kernel_return_address_get(stack_trace_context_t *ctx, uintptr_t *ra)
254{
255 return scan(ctx, NULL, ra);
[e1f0fe9]256}
257
[63bdde6]258bool uspace_stack_trace_context_validate(stack_trace_context_t *ctx)
[e1f0fe9]259{
260 return false;
261}
262
[63bdde6]263bool uspace_frame_pointer_prev(stack_trace_context_t *ctx, uintptr_t *prev)
[e1f0fe9]264{
265 return false;
266}
267
[63bdde6]268bool uspace_return_address_get(stack_trace_context_t *ctx, uintptr_t *ra)
[e1f0fe9]269{
270 return false;
271}
272
273/** @}
274 */
Note: See TracBrowser for help on using the repository browser.