source: mainline/kernel/arch/ia32/src/asm.S@ 1433ecda

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 1433ecda was a35b458, checked in by Jiří Zárevúcky <zarevucky.jiri@…>, 7 years ago

style: Remove trailing whitespace on _all_ lines, including empty ones, for particular file types.

Command used: tools/srepl '\s\+$' '' -- *.c *.h *.py *.sh *.s *.S *.ag

Currently, whitespace on empty lines is very inconsistent.
There are two basic choices: Either remove the whitespace, or keep empty lines
indented to the level of surrounding code. The former is AFAICT more common,
and also much easier to do automatically.

Alternatively, we could write script for automatic indentation, and use that
instead. However, if such a script exists, it's possible to use the indented
style locally, by having the editor apply relevant conversions on load/save,
without affecting remote repository. IMO, it makes more sense to adopt
the simpler rule.

  • Property mode set to 100644
File size: 13.1 KB
Line 
1/*
2 * Copyright (c) 2010 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** Very low and hardware-level functions
30 *
31 */
32
33#include <abi/asmtool.h>
34#include <arch/pm.h>
35#include <arch/cpu.h>
36#include <arch/mm/page.h>
37#include <arch/istate_struct.h>
38#include <arch/smp/apic.h>
39
40.text
41
42#define MEMCPY_DST 4
43#define MEMCPY_SRC 8
44#define MEMCPY_SIZE 12
45
46/** Copy memory to/from userspace.
47 *
48 * This is almost conventional memcpy().
49 * The difference is that there is a failover part
50 * to where control is returned from a page fault
51 * if the page fault occurs during copy_from_uspace()
52 * or copy_to_uspace().
53 *
54 * @param MEMCPY_DST(%esp) Destination address.
55 * @param MEMCPY_SRC(%esp) Source address.
56 * @param MEMCPY_SIZE(%esp) Size.
57 *
58 * @return MEMCPY_DST(%esp) on success and 0 on failure.
59 *
60 */
61FUNCTION_BEGIN(memcpy_from_uspace)
62FUNCTION_BEGIN(memcpy_to_uspace)
63 movl %edi, %edx /* save %edi */
64 movl %esi, %eax /* save %esi */
65
66 movl MEMCPY_SIZE(%esp), %ecx
67 shrl $2, %ecx /* size / 4 */
68
69 movl MEMCPY_DST(%esp), %edi
70 movl MEMCPY_SRC(%esp), %esi
71
72 /* Copy whole words */
73 rep movsl
74
75 movl MEMCPY_SIZE(%esp), %ecx
76 andl $3, %ecx /* size % 4 */
77 jz 0f
78
79 /* Copy the rest byte by byte */
80 rep movsb
81
82 0:
83
84 movl %edx, %edi
85 movl %eax, %esi
86
87 /* MEMCPY_DST(%esp), success */
88 movl MEMCPY_DST(%esp), %eax
89 ret
90FUNCTION_END(memcpy_from_uspace)
91FUNCTION_END(memcpy_to_uspace)
92
93/*
94 * We got here from as_page_fault() after the memory operations
95 * above had caused a page fault.
96 */
97SYMBOL(memcpy_from_uspace_failover_address)
98SYMBOL(memcpy_to_uspace_failover_address)
99 movl %edx, %edi
100 movl %eax, %esi
101
102 /* Return 0, failure */
103 xorl %eax, %eax
104 ret
105
106/** Turn paging on
107 *
108 * Enable paging and write-back caching in CR0.
109 *
110 */
111FUNCTION_BEGIN(paging_on)
112 movl %cr0, %edx
113 orl $CR0_PG, %edx /* paging on */
114
115 /* Clear Cache Disable and not Write Though */
116 andl $~(CR0_CD | CR0_NW), %edx
117 movl %edx, %cr0
118 jmp 0f
119
120 0:
121 ret
122FUNCTION_END(paging_on)
123
124/** Enable local APIC
125 *
126 * Enable local APIC in MSR.
127 *
128 */
129FUNCTION_BEGIN(enable_l_apic_in_msr)
130 movl $IA32_MSR_APIC_BASE, %ecx
131 rdmsr
132 orl $(L_APIC_BASE | IA32_APIC_BASE_GE), %eax
133 wrmsr
134 ret
135FUNCTION_END(enable_l_apic_in_msr)
136
137/*
138 * Size of the istate structure without the hardware-saved part
139 * and without the error word.
140 */
141#define ISTATE_SOFT_SIZE ISTATE_SIZE - (6 * 4)
142
143/*
144 * The SYSENTER syscall mechanism can be used for syscalls with
145 * four or fewer arguments. To pass these four arguments, we
146 * use four registers: EDX, ECX, EBX, ESI. The syscall number
147 * is passed in EAX. We use EDI to remember the return address
148 * and EBP to remember the stack. The INT-based syscall mechanism
149 * can actually handle six arguments plus the syscall number
150 * entirely in registers.
151 */
152SYMBOL(sysenter_handler)
153
154 /*
155 * Note that the space needed for the istate structure has been
156 * preallocated on the stack by before_thread_runs_arch().
157 */
158
159 /*
160 * Save the return address and the userspace stack in the istate
161 * structure on locations that would normally be taken by them.
162 */
163 movl %ebp, ISTATE_OFFSET_ESP(%esp)
164 movl %edi, ISTATE_OFFSET_EIP(%esp)
165
166 /*
167 * Push syscall arguments onto the stack
168 */
169 movl %eax, ISTATE_OFFSET_EAX(%esp)
170 movl %ebx, ISTATE_OFFSET_EBX(%esp)
171 movl %ecx, ISTATE_OFFSET_ECX(%esp)
172 movl %edx, ISTATE_OFFSET_EDX(%esp)
173 movl %esi, ISTATE_OFFSET_ESI(%esp)
174 movl %edi, ISTATE_OFFSET_EDI(%esp) /* observability; not needed */
175 movl %ebp, ISTATE_OFFSET_EBP(%esp) /* observability; not needed */
176
177 /*
178 * Fake up the stack trace linkage.
179 */
180 movl %edi, ISTATE_OFFSET_EIP_FRAME(%esp)
181 movl $0, ISTATE_OFFSET_EBP_FRAME(%esp)
182 leal ISTATE_OFFSET_EBP_FRAME(%esp), %ebp
183
184 /*
185 * Switch to kernel selectors.
186 */
187 movl $(GDT_SELECTOR(KDATA_DES)), %eax
188 movl %eax, %ds
189 movl %eax, %es
190 movl $(GDT_SELECTOR(VREG_DES)), %eax
191 movl %eax, %gs
192
193 /*
194 * Sanitize EFLAGS.
195 *
196 * SYSENTER does not clear the NT flag, which could thus proliferate
197 * from here to the IRET instruction via a context switch and result
198 * in crash.
199 *
200 * SYSENTER does not clear DF, which the ABI assumes to be cleared.
201 *
202 * SYSENTER clears IF, which we would like to be set for syscalls.
203 *
204 */
205 pushl $(EFLAGS_IF) /* specify EFLAGS bits that we want to set */
206 popfl /* set bits from the mask, clear or ignore others */
207
208 call syscall_handler
209
210 /*
211 * Prepare return address and userspace stack for SYSEXIT.
212 */
213 movl ISTATE_OFFSET_EIP(%esp), %edx
214 movl ISTATE_OFFSET_ESP(%esp), %ecx
215
216 sysexit /* return to userspace */
217
218/*
219 * This is the legacy syscall handler using the interrupt mechanism.
220 */
221SYMBOL(int_syscall)
222 subl $(ISTATE_SOFT_SIZE + 4), %esp
223
224 /*
225 * Push syscall arguments onto the stack
226 *
227 * NOTE: The idea behind the order of arguments passed
228 * in registers is to use all scratch registers
229 * first and preserved registers next. An optimized
230 * libc syscall wrapper can make use of this setup.
231 * The istate structure is arranged in the way to support
232 * this idea.
233 *
234 */
235 movl %eax, ISTATE_OFFSET_EAX(%esp)
236 movl %ebx, ISTATE_OFFSET_EBX(%esp)
237 movl %ecx, ISTATE_OFFSET_ECX(%esp)
238 movl %edx, ISTATE_OFFSET_EDX(%esp)
239 movl %edi, ISTATE_OFFSET_EDI(%esp)
240 movl %esi, ISTATE_OFFSET_ESI(%esp)
241 movl %ebp, ISTATE_OFFSET_EBP(%esp)
242
243 /*
244 * Save the segment registers.
245 */
246 movl %gs, %ecx
247 movl %fs, %edx
248
249 movl %ecx, ISTATE_OFFSET_GS(%esp)
250 movl %edx, ISTATE_OFFSET_FS(%esp)
251
252 movl %es, %ecx
253 movl %ds, %edx
254
255 movl %ecx, ISTATE_OFFSET_ES(%esp)
256 movl %edx, ISTATE_OFFSET_DS(%esp)
257
258 /*
259 * Switch to kernel selectors.
260 */
261 movl $(GDT_SELECTOR(KDATA_DES)), %eax
262 movl %eax, %ds
263 movl %eax, %es
264 movl $(GDT_SELECTOR(VREG_DES)), %eax
265 movl %eax, %gs
266
267 movl $0, ISTATE_OFFSET_EBP_FRAME(%esp)
268 movl ISTATE_OFFSET_EIP(%esp), %eax
269 movl %eax, ISTATE_OFFSET_EIP_FRAME(%esp)
270 leal ISTATE_OFFSET_EBP_FRAME(%esp), %ebp
271
272 cld
273
274 /* Call syscall_handler(edx, ecx, ebx, esi, edi, ebp, eax) */
275 call syscall_handler
276
277 /*
278 * Restore the segment registers.
279 */
280 movl ISTATE_OFFSET_GS(%esp), %ecx
281 movl ISTATE_OFFSET_FS(%esp), %edx
282
283 movl %ecx, %gs
284 movl %edx, %fs
285
286 movl ISTATE_OFFSET_ES(%esp), %ecx
287 movl ISTATE_OFFSET_DS(%esp), %edx
288
289 movl %ecx, %es
290 movl %edx, %ds
291
292 /*
293 * Restore the preserved registers the handler cloberred itself
294 * (i.e. EBP).
295 */
296 movl ISTATE_OFFSET_EBP(%esp), %ebp
297
298 addl $(ISTATE_SOFT_SIZE + 4), %esp
299 iret
300
301/**
302 * Mask for interrupts 0 - 31 (bits 0 - 31) where 0 means that int
303 * has no error word and 1 means interrupt with error word
304 *
305 */
306#define ERROR_WORD_INTERRUPT_LIST 0x00027d00
307
308.macro handler i
309SYMBOL(int_\i)
310 /*
311 * This macro distinguishes between two versions of ia32
312 * exceptions. One version has error word and the other
313 * does not have it. The latter version fakes the error
314 * word on the stack so that the handlers and istate_t
315 * can be the same for both types.
316 */
317 .iflt \i - 32
318 .if (1 << \i) & ERROR_WORD_INTERRUPT_LIST
319 /*
320 * Exception with error word.
321 */
322 subl $ISTATE_SOFT_SIZE, %esp
323 .else
324 /*
325 * Exception without error word: fake up one
326 */
327 subl $(ISTATE_SOFT_SIZE + 4), %esp
328 .endif
329 .else
330 /*
331 * Interrupt: fake up an error word
332 */
333 subl $(ISTATE_SOFT_SIZE + 4), %esp
334 .endif
335
336 /*
337 * Save the general purpose registers.
338 */
339 movl %eax, ISTATE_OFFSET_EAX(%esp)
340 movl %ebx, ISTATE_OFFSET_EBX(%esp)
341 movl %ecx, ISTATE_OFFSET_ECX(%esp)
342 movl %edx, ISTATE_OFFSET_EDX(%esp)
343 movl %edi, ISTATE_OFFSET_EDI(%esp)
344 movl %esi, ISTATE_OFFSET_ESI(%esp)
345 movl %ebp, ISTATE_OFFSET_EBP(%esp)
346
347 /*
348 * Save the segment registers.
349 */
350 movl %gs, %ecx
351 movl %fs, %edx
352
353 movl %ecx, ISTATE_OFFSET_GS(%esp)
354 movl %edx, ISTATE_OFFSET_FS(%esp)
355
356 movl %es, %ecx
357 movl %ds, %edx
358
359 movl %ecx, ISTATE_OFFSET_ES(%esp)
360 movl %edx, ISTATE_OFFSET_DS(%esp)
361
362 /*
363 * Switch to kernel selectors.
364 */
365 movl $(GDT_SELECTOR(KDATA_DES)), %eax
366 movl %eax, %ds
367 movl %eax, %es
368 movl $(GDT_SELECTOR(VREG_DES)), %eax
369 movl %eax, %gs
370
371 /*
372 * Imitate a regular stack frame linkage.
373 * Stop stack traces here if we came from userspace.
374 */
375 xorl %eax, %eax
376 cmpl $(GDT_SELECTOR(KTEXT_DES)), ISTATE_OFFSET_CS(%esp)
377#ifdef PROCESSOR_i486
378 jz 0f
379 movl %eax, %ebp
380 0:
381#else
382 cmovnzl %eax, %ebp
383#endif
384
385 movl %ebp, ISTATE_OFFSET_EBP_FRAME(%esp)
386 movl ISTATE_OFFSET_EIP(%esp), %eax
387 movl %eax, ISTATE_OFFSET_EIP_FRAME(%esp)
388 leal ISTATE_OFFSET_EBP_FRAME(%esp), %ebp
389
390 cld
391
392 pushl %esp /* pass istate address */
393 pushl $(\i) /* pass intnum */
394
395 /* Call exc_dispatch(intnum, istate) */
396 call exc_dispatch
397
398 addl $8, %esp /* clear arguments from the stack */
399
400 /*
401 * Restore the selector registers.
402 */
403 movl ISTATE_OFFSET_GS(%esp), %ecx
404 movl ISTATE_OFFSET_FS(%esp), %edx
405
406 movl %ecx, %gs
407 movl %edx, %fs
408
409 movl ISTATE_OFFSET_ES(%esp), %ecx
410 movl ISTATE_OFFSET_DS(%esp), %edx
411
412 movl %ecx, %es
413 movl %edx, %ds
414
415 /*
416 * Restore the scratch registers and the preserved
417 * registers the handler cloberred itself
418 * (i.e. EBP).
419 */
420 movl ISTATE_OFFSET_EAX(%esp), %eax
421 movl ISTATE_OFFSET_ECX(%esp), %ecx
422 movl ISTATE_OFFSET_EDX(%esp), %edx
423 movl ISTATE_OFFSET_EBP(%esp), %ebp
424
425 addl $(ISTATE_SOFT_SIZE + 4), %esp
426 iret
427.endm
428
429#define LIST_0_63 \
430 0, 1, 2, 3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,\
431 28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,\
432 53,54,55,56,57,58,59,60,61,62,63
433
434interrupt_handlers:
435.irp cnt, LIST_0_63
436 handler \cnt
437.endr
438
439/** Print Unicode character to EGA display.
440 *
441 * If CONFIG_EGA is undefined or CONFIG_FB is defined
442 * then this function does nothing.
443 *
444 * Since the EGA can only display Extended ASCII (usually
445 * ISO Latin 1) characters, some of the Unicode characters
446 * can be displayed in a wrong way. Only newline and backspace
447 * are interpreted, all other characters (even unprintable) are
448 * printed verbatim.
449 *
450 * @param %ebp+0x08 Unicode character to be printed.
451 *
452 */
453FUNCTION_BEGIN(early_putchar)
454
455#if ((defined(CONFIG_EGA)) && (!defined(CONFIG_FB)))
456
457 /* Prologue, save preserved registers */
458 pushl %ebp
459 movl %esp, %ebp
460 pushl %ebx
461 pushl %esi
462 pushl %edi
463
464 movl $(PA2KA(0xb8000)), %edi /* base of EGA text mode memory */
465 xorl %eax, %eax
466
467 /* Read bits 8 - 15 of the cursor address */
468 movw $0x3d4, %dx
469 movb $0xe, %al
470 outb %al, %dx
471
472 movw $0x3d5, %dx
473 inb %dx, %al
474 shl $8, %ax
475
476 /* Read bits 0 - 7 of the cursor address */
477 movw $0x3d4, %dx
478 movb $0xf, %al
479 outb %al, %dx
480
481 movw $0x3d5, %dx
482 inb %dx, %al
483
484 /* Sanity check for the cursor on screen */
485 cmp $2000, %ax
486 jb early_putchar_cursor_ok
487
488 movw $1998, %ax
489
490 early_putchar_cursor_ok:
491
492 movw %ax, %bx
493 shl $1, %eax
494 addl %eax, %edi
495
496 movl 0x08(%ebp), %eax
497
498 cmp $0x0a, %al
499 jne early_putchar_backspace
500
501 /* Interpret newline */
502
503 movw %bx, %ax /* %bx -> %dx:%ax */
504 xorw %dx, %dx
505
506 movw $80, %cx
507 idivw %cx, %ax /* %dx = %bx % 80 */
508
509 /* %bx <- %bx + 80 - (%bx % 80) */
510 addw %cx, %bx
511 subw %dx, %bx
512
513 jmp early_putchar_skip
514
515 early_putchar_backspace:
516
517 cmp $0x08, %al
518 jne early_putchar_print
519
520 /* Interpret backspace */
521
522 cmp $0x0000, %bx
523 je early_putchar_skip
524
525 dec %bx
526 jmp early_putchar_skip
527
528 early_putchar_print:
529
530 /* Print character */
531
532 movb $0x0e, %ah /* black background, yellow foreground */
533 stosw
534 inc %bx
535
536 early_putchar_skip:
537
538 /* Sanity check for the cursor on the last line */
539 cmp $2000, %bx
540 jb early_putchar_no_scroll
541
542 /* Scroll the screen (24 rows) */
543 movl $(PA2KA(0xb80a0)), %esi
544 movl $(PA2KA(0xb8000)), %edi
545 movl $960, %ecx
546 rep movsl
547
548 /* Clear the 24th row */
549 xorl %eax, %eax
550 movl $40, %ecx
551 rep stosl
552
553 /* Go to row 24 */
554 movw $1920, %bx
555
556 early_putchar_no_scroll:
557
558 /* Write bits 8 - 15 of the cursor address */
559 movw $0x3d4, %dx
560 movb $0xe, %al
561 outb %al, %dx
562
563 movw $0x3d5, %dx
564 movb %bh, %al
565 outb %al, %dx
566
567 /* Write bits 0 - 7 of the cursor address */
568 movw $0x3d4, %dx
569 movb $0xf, %al
570 outb %al, %dx
571
572 movw $0x3d5, %dx
573 movb %bl, %al
574 outb %al, %dx
575
576 /* Epilogue, restore preserved registers */
577 popl %edi
578 popl %esi
579 popl %ebx
580 leave
581
582#endif
583
584 ret
585FUNCTION_END(early_putchar)
586
Note: See TracBrowser for help on using the repository browser.