source: mainline/kernel/arch/mips32/src/debug/stacktrace.c@ a35b458

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since a35b458 was a35b458, checked in by Jiří Zárevúcky <zarevucky.jiri@…>, 7 years ago

style: Remove trailing whitespace on _all_ lines, including empty ones, for particular file types.

Command used: tools/srepl '\s\+$' '' -- *.c *.h *.py *.sh *.s *.S *.ag

Currently, whitespace on empty lines is very inconsistent.
There are two basic choices: Either remove the whitespace, or keep empty lines
indented to the level of surrounding code. The former is AFAICT more common,
and also much easier to do automatically.

Alternatively, we could write script for automatic indentation, and use that
instead. However, if such a script exists, it's possible to use the indented
style locally, by having the editor apply relevant conversions on load/save,
without affecting remote repository. IMO, it makes more sense to adopt
the simpler rule.

  • Property mode set to 100644
File size: 7.7 KB
Line 
1/*
2 * Copyright (c) 2010 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup mips32
30 * @{
31 */
32/** @file
33 */
34
35/*
36 * This stack tracing code is based on the suggested algorithm described on page
37 * 3-27 and 3-28 of:
38 *
39 * SYSTEM V
40 * APPLICATION BINARY INTERFACE
41 *
42 * MIPS RISC Processor
43 * Supplement
44 * 3rd Edition
45 *
46 * Unfortunately, GCC generates code which is not entirely compliant with this
47 * method. For example, it places the "jr ra" instruction quite arbitrarily in
48 * the middle of the function which makes the original algorithm unapplicable.
49 *
50 * We deal with this problem by simply not using those parts of the algorithm
51 * that rely on the "jr ra" instruction occurring in the last basic block of a
52 * function, which gives us still usable, but less reliable stack tracer. The
53 * unreliability stems from the fact that under some circumstances it can become
54 * confused and produce incorrect or incomplete stack trace. We apply extra
55 * sanity checks so that the algorithm is still safe and should not crash the
56 * system.
57 *
58 * Even though not perfect, our solution is pretty lightweight, especially when
59 * compared with a prospective alternative solution based on additional
60 * debugging information stored directly in the kernel image.
61 */
62
63#include <stacktrace.h>
64#include <stddef.h>
65#include <syscall/copy.h>
66#include <typedefs.h>
67#include <arch/debugger.h>
68#include <print.h>
69
70#define R0 0U
71#define SP 29U
72#define RA 31U
73
74#define OP_SHIFT 26
75#define RS_SHIFT 21
76#define RT_SHIFT 16
77#define RD_SHIFT 11
78
79#define HINT_SHIFT 6
80#define BASE_SHIFT RS_SHIFT
81#define IMM_SHIFT 0
82#define OFFSET_SHIFT IMM_SHIFT
83
84#define RS_MASK (0x1f << RS_SHIFT)
85#define RT_MASK (0x1f << RT_SHIFT)
86#define RD_MASK (0x1f << RD_SHIFT)
87#define HINT_MASK (0x1f << HINT_SHIFT)
88#define BASE_MASK RS_MASK
89#define IMM_MASK (0xffff << IMM_SHIFT)
90#define OFFSET_MASK IMM_MASK
91
92#define RS_GET(inst) (((inst) & RS_MASK) >> RS_SHIFT)
93#define RD_GET(inst) (((inst) & RD_MASK) >> RD_SHIFT)
94#define IMM_GET(inst) (int16_t)(((inst) & IMM_MASK) >> IMM_SHIFT)
95#define BASE_GET(inst) RS_GET(inst)
96#define OFFSET_GET(inst) IMM_GET(inst)
97
98#define ADDU_R_SP_R0_TEMPL \
99 ((0x0 << OP_SHIFT) | (SP << RS_SHIFT) | (R0 << RT_SHIFT) | 0x21)
100#define ADDU_SP_R_R0_TEMPL \
101 ((0x0 << OP_SHIFT) | (SP << RD_SHIFT) | (R0 << RT_SHIFT) | 0x21)
102#define ADDI_SP_SP_IMM_TEMPL \
103 ((0x8 << OP_SHIFT) | (SP << RS_SHIFT) | (SP << RT_SHIFT))
104#define ADDIU_SP_SP_IMM_TEMPL \
105 ((0x9 << OP_SHIFT) | (SP << RS_SHIFT) | (SP << RT_SHIFT))
106#define JR_RA_TEMPL \
107 ((0x0 << OP_SHIFT) | (RA << RS_SHIFT) | (0x0 << HINT_SHIFT) | 0x8)
108#define SW_RA_TEMPL \
109 ((0x2b << OP_SHIFT) | (RA << RT_SHIFT))
110
111#define IS_ADDU_R_SP_R0(inst) \
112 (((inst) & ~RD_MASK) == ADDU_R_SP_R0_TEMPL)
113#define IS_ADDU_SP_R_R0(inst) \
114 (((inst) & ~RS_MASK) == ADDU_SP_R_R0_TEMPL)
115#define IS_ADDI_SP_SP_IMM(inst) \
116 (((inst) & ~IMM_MASK) == ADDI_SP_SP_IMM_TEMPL)
117#define IS_ADDIU_SP_SP_IMM(inst) \
118 (((inst) & ~IMM_MASK) == ADDIU_SP_SP_IMM_TEMPL)
119#define IS_JR_RA(inst) \
120 (((inst) & ~HINT_MASK) == JR_RA_TEMPL)
121#define IS_SW_RA(inst) \
122 (((inst) & ~(BASE_MASK | OFFSET_MASK)) == SW_RA_TEMPL)
123
124extern char ktext_start;
125extern char ktext_end;
126
127static bool bounds_check(uintptr_t pc)
128{
129 return (pc >= (uintptr_t) &ktext_start) &&
130 (pc < (uintptr_t) &ktext_end);
131}
132
133static bool
134scan(stack_trace_context_t *ctx, uintptr_t *prev_fp, uintptr_t *prev_ra)
135{
136 uint32_t *inst = (void *) ctx->pc;
137 bool has_fp = false;
138 size_t frame_size;
139 unsigned int fp = SP;
140
141 do {
142 inst--;
143 if (!bounds_check((uintptr_t) inst))
144 return false;
145#if 0
146 /*
147 * This is one of the situations in which the theory (ABI) does
148 * not meet the practice (GCC). GCC simply does not place the
149 * JR $ra instruction as dictated by the ABI, rendering the
150 * official stack tracing algorithm somewhat unapplicable.
151 */
152
153 if (IS_ADDU_R_SP_R0(*inst)) {
154 uint32_t *cur;
155 fp = RD_GET(*inst);
156 /*
157 * We have a candidate for frame pointer.
158 */
159
160 /* Seek to the end of this function. */
161 for (cur = inst + 1; !IS_JR_RA(*cur); cur++)
162 ;
163 /* Scan the last basic block */
164 for (cur--; !is_jump(*(cur - 1)); cur--) {
165 if (IS_ADDU_SP_R_R0(*cur) &&
166 (fp == RS_GET(*cur))) {
167 has_fp = true;
168 }
169 }
170 continue;
171 }
172
173 if (IS_JR_RA(*inst)) {
174 if (!ctx->istate)
175 return false;
176 /*
177 * No stack frame has been allocated yet.
178 * Use the values stored in istate.
179 */
180 if (prev_fp)
181 *prev_fp = ctx->istate->sp;
182 if (prev_ra)
183 *prev_ra = ctx->istate->ra - 8;
184 ctx->istate = NULL;
185 return true;
186 }
187#endif
188
189 } while ((!IS_ADDIU_SP_SP_IMM(*inst) && !IS_ADDI_SP_SP_IMM(*inst)) ||
190 (IMM_GET(*inst) >= 0));
191
192 /*
193 * We are at the instruction which allocates the space for the current
194 * stack frame.
195 */
196 frame_size = -IMM_GET(*inst);
197 if (prev_fp)
198 *prev_fp = ctx->fp + frame_size;
199
200 /*
201 * Scan the first basic block for the occurrence of
202 * SW $ra, OFFSET($base).
203 */
204 for (inst++; !is_jump(*(inst - 1)) && (uintptr_t) inst < ctx->pc;
205 inst++) {
206 if (IS_SW_RA(*inst)) {
207 unsigned int base = BASE_GET(*inst);
208 int16_t offset = OFFSET_GET(*inst);
209
210 if (base == SP || (has_fp && base == fp)) {
211 uint32_t *addr = (void *) (ctx->fp + offset);
212
213 if (offset % 4 != 0)
214 return false;
215 /* cannot store below current stack pointer */
216 if (offset < 0)
217 return false;
218 /* too big offsets are suspicious */
219 if ((size_t) offset > sizeof(istate_t))
220 return false;
221
222 if (prev_ra)
223 *prev_ra = *addr;
224 return true;
225 }
226 }
227 }
228
229 /*
230 * The first basic block does not save the return address or saves it
231 * after ctx->pc, which means that the correct value is in istate.
232 */
233 if (prev_ra) {
234 if (!ctx->istate)
235 return false;
236 *prev_ra = ctx->istate->ra - 8;
237 ctx->istate = NULL;
238 }
239 return true;
240}
241
242
243bool kernel_stack_trace_context_validate(stack_trace_context_t *ctx)
244{
245 return !((ctx->fp == 0) || ((ctx->fp % 8) != 0) ||
246 (ctx->pc % 4 != 0) || !bounds_check(ctx->pc));
247}
248
249bool kernel_frame_pointer_prev(stack_trace_context_t *ctx, uintptr_t *prev)
250{
251 return scan(ctx, prev, NULL);
252}
253
254bool kernel_return_address_get(stack_trace_context_t *ctx, uintptr_t *ra)
255{
256 return scan(ctx, NULL, ra);
257}
258
259bool uspace_stack_trace_context_validate(stack_trace_context_t *ctx)
260{
261 return false;
262}
263
264bool uspace_frame_pointer_prev(stack_trace_context_t *ctx, uintptr_t *prev)
265{
266 return false;
267}
268
269bool uspace_return_address_get(stack_trace_context_t *ctx, uintptr_t *ra)
270{
271 return false;
272}
273
274/** @}
275 */
Note: See TracBrowser for help on using the repository browser.