source: mainline/kernel/arch/ia32/src/pm.c@ 8844e70

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 8844e70 was d242cb6, checked in by Martin Decky <martin@…>, 12 years ago

make sure we configure two distinct segment descriptors and set the read bit on the code segment before switching back to real-mode for VESA/VBE
this fixes execution on the latest (3.9.1) Linux KVM with real-mode x86 code emulation that is particularly picky about the code segment permission bits

  • Property mode set to 100644
File size: 8.9 KB
RevLine 
[f761f1eb]1/*
[df4ed85]2 * Copyright (c) 2001-2004 Jakub Jermar
[f761f1eb]3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
[add04f7]29/** @addtogroup ia32
[b45c443]30 * @{
31 */
32/** @file
33 */
34
[f761f1eb]35#include <arch/pm.h>
36#include <config.h>
[d99c1d2]37#include <typedefs.h>
[f761f1eb]38#include <arch/interrupt.h>
39#include <arch/asm.h>
40#include <arch/context.h>
41#include <panic.h>
[b07769b6]42#include <arch/mm/page.h>
[085d973]43#include <mm/slab.h>
[9c0a9b3]44#include <memstr.h>
[375237d1]45#include <arch/boot/boot.h>
[fcfac420]46#include <interrupt.h>
[f761f1eb]47
48/*
[397c77f]49 * Early ia32 configuration functions and data structures.
[f761f1eb]50 */
51
52/*
53 * We have no use for segmentation so we set up flat mode. In this
54 * mode, we use, for each privilege level, two segments spanning the
55 * whole memory. One is for code and one is for data.
[281b607]56 *
57 * One is for GS register which holds pointer to the TLS thread
58 * structure in it's base.
[f761f1eb]59 */
[39cea6a]60descriptor_t gdt[GDT_ITEMS] = {
[76cec1e]61 /* NULL descriptor */
62 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
63 /* KTEXT descriptor */
64 { 0xffff, 0, 0, AR_PRESENT | AR_CODE | DPL_KERNEL, 0xf, 0, 0, 1, 1, 0 },
65 /* KDATA descriptor */
66 { 0xffff, 0, 0, AR_PRESENT | AR_DATA | AR_WRITABLE | DPL_KERNEL, 0xf, 0, 0, 1, 1, 0 },
67 /* UTEXT descriptor */
68 { 0xffff, 0, 0, AR_PRESENT | AR_CODE | DPL_USER, 0xf, 0, 0, 1, 1, 0 },
69 /* UDATA descriptor */
70 { 0xffff, 0, 0, AR_PRESENT | AR_DATA | AR_WRITABLE | DPL_USER, 0xf, 0, 0, 1, 1, 0 },
71 /* TSS descriptor - set up will be completed later */
[281b607]72 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
[0ddeabc]73 /* TLS descriptor */
[22cf454d]74 { 0xffff, 0, 0, AR_PRESENT | AR_DATA | AR_WRITABLE | DPL_USER, 0xf, 0, 0, 1, 1, 0 },
75 /* VESA Init descriptor */
[e8194664]76#ifdef CONFIG_FB
[d242cb6]77 { 0xffff, 0, VESA_INIT_SEGMENT >> 12, AR_PRESENT | AR_CODE | AR_READABLE | DPL_KERNEL, 0xf, 0, 0, 0, 0, 0 },
78 { 0xffff, 0, VESA_INIT_SEGMENT >> 12, AR_PRESENT | AR_DATA | AR_WRITABLE | DPL_KERNEL, 0xf, 0, 0, 0, 0, 0 }
[ff586e06]79#endif
[f761f1eb]80};
81
[39cea6a]82static idescriptor_t idt[IDT_ITEMS];
[f761f1eb]83
[39cea6a]84static tss_t tss;
[f761f1eb]85
[39cea6a]86tss_t *tss_p = NULL;
[f761f1eb]87
[cb4b61d]88/* gdtr is changed by kmp before next CPU is initialized */
[ff586e06]89ptr_16_32_t gdtr = {
90 .limit = sizeof(gdt),
91 .base = (uintptr_t) gdt
92};
[f761f1eb]93
[7f1c620]94void gdt_setbase(descriptor_t *d, uintptr_t base)
[f761f1eb]95{
[76cec1e]96 d->base_0_15 = base & 0xffff;
97 d->base_16_23 = ((base) >> 16) & 0xff;
98 d->base_24_31 = ((base) >> 24) & 0xff;
[f761f1eb]99}
100
[7f1c620]101void gdt_setlimit(descriptor_t *d, uint32_t limit)
[f761f1eb]102{
[76cec1e]103 d->limit_0_15 = limit & 0xffff;
104 d->limit_16_19 = (limit >> 16) & 0xf;
[f761f1eb]105}
106
[7f1c620]107void idt_setoffset(idescriptor_t *d, uintptr_t offset)
[f761f1eb]108{
[b0bf501]109 /*
110 * Offset is a linear address.
111 */
112 d->offset_0_15 = offset & 0xffff;
113 d->offset_16_31 = offset >> 16;
[f761f1eb]114}
115
[39cea6a]116void tss_initialize(tss_t *t)
[f761f1eb]117{
[99d6fd0]118 memsetb(t, sizeof(tss_t), 0);
[f761f1eb]119}
120
121/*
122 * This function takes care of proper setup of IDT and IDTR.
123 */
124void idt_init(void)
125{
[39cea6a]126 idescriptor_t *d;
[f74bbaf]127 unsigned int i;
[76cec1e]128
[f761f1eb]129 for (i = 0; i < IDT_ITEMS; i++) {
130 d = &idt[i];
131
132 d->unused = 0;
[1d3d2cf]133 d->selector = GDT_SELECTOR(KTEXT_DES);
[f761f1eb]134
135 if (i == VECTOR_SYSCALL) {
136 /*
[f4946de]137 * The syscall trap gate must be callable from
138 * userland. Interrupts will remain enabled.
139 */
140 d->access = AR_PRESENT | AR_TRAP | DPL_USER;
141 } else {
142 /*
143 * Other interrupts use interrupt gates which
144 * disable interrupts.
[f761f1eb]145 */
[f4946de]146 d->access = AR_PRESENT | AR_INTERRUPT;
[f761f1eb]147 }
148 }
149
[b808660]150 d = &idt[0];
151 idt_setoffset(d++, (uintptr_t) &int_0);
152 idt_setoffset(d++, (uintptr_t) &int_1);
153 idt_setoffset(d++, (uintptr_t) &int_2);
154 idt_setoffset(d++, (uintptr_t) &int_3);
155 idt_setoffset(d++, (uintptr_t) &int_4);
156 idt_setoffset(d++, (uintptr_t) &int_5);
157 idt_setoffset(d++, (uintptr_t) &int_6);
158 idt_setoffset(d++, (uintptr_t) &int_7);
159 idt_setoffset(d++, (uintptr_t) &int_8);
160 idt_setoffset(d++, (uintptr_t) &int_9);
161 idt_setoffset(d++, (uintptr_t) &int_10);
162 idt_setoffset(d++, (uintptr_t) &int_11);
163 idt_setoffset(d++, (uintptr_t) &int_12);
164 idt_setoffset(d++, (uintptr_t) &int_13);
165 idt_setoffset(d++, (uintptr_t) &int_14);
166 idt_setoffset(d++, (uintptr_t) &int_15);
167 idt_setoffset(d++, (uintptr_t) &int_16);
168 idt_setoffset(d++, (uintptr_t) &int_17);
169 idt_setoffset(d++, (uintptr_t) &int_18);
170 idt_setoffset(d++, (uintptr_t) &int_19);
171 idt_setoffset(d++, (uintptr_t) &int_20);
172 idt_setoffset(d++, (uintptr_t) &int_21);
173 idt_setoffset(d++, (uintptr_t) &int_22);
174 idt_setoffset(d++, (uintptr_t) &int_23);
175 idt_setoffset(d++, (uintptr_t) &int_24);
176 idt_setoffset(d++, (uintptr_t) &int_25);
177 idt_setoffset(d++, (uintptr_t) &int_26);
178 idt_setoffset(d++, (uintptr_t) &int_27);
179 idt_setoffset(d++, (uintptr_t) &int_28);
180 idt_setoffset(d++, (uintptr_t) &int_29);
181 idt_setoffset(d++, (uintptr_t) &int_30);
182 idt_setoffset(d++, (uintptr_t) &int_31);
183 idt_setoffset(d++, (uintptr_t) &int_32);
184 idt_setoffset(d++, (uintptr_t) &int_33);
185 idt_setoffset(d++, (uintptr_t) &int_34);
186 idt_setoffset(d++, (uintptr_t) &int_35);
187 idt_setoffset(d++, (uintptr_t) &int_36);
188 idt_setoffset(d++, (uintptr_t) &int_37);
189 idt_setoffset(d++, (uintptr_t) &int_38);
190 idt_setoffset(d++, (uintptr_t) &int_39);
191 idt_setoffset(d++, (uintptr_t) &int_40);
192 idt_setoffset(d++, (uintptr_t) &int_41);
193 idt_setoffset(d++, (uintptr_t) &int_42);
194 idt_setoffset(d++, (uintptr_t) &int_43);
195 idt_setoffset(d++, (uintptr_t) &int_44);
196 idt_setoffset(d++, (uintptr_t) &int_45);
197 idt_setoffset(d++, (uintptr_t) &int_46);
198 idt_setoffset(d++, (uintptr_t) &int_47);
199 idt_setoffset(d++, (uintptr_t) &int_48);
200 idt_setoffset(d++, (uintptr_t) &int_49);
201 idt_setoffset(d++, (uintptr_t) &int_50);
202 idt_setoffset(d++, (uintptr_t) &int_51);
203 idt_setoffset(d++, (uintptr_t) &int_52);
204 idt_setoffset(d++, (uintptr_t) &int_53);
205 idt_setoffset(d++, (uintptr_t) &int_54);
206 idt_setoffset(d++, (uintptr_t) &int_55);
207 idt_setoffset(d++, (uintptr_t) &int_56);
208 idt_setoffset(d++, (uintptr_t) &int_57);
209 idt_setoffset(d++, (uintptr_t) &int_58);
210 idt_setoffset(d++, (uintptr_t) &int_59);
211 idt_setoffset(d++, (uintptr_t) &int_60);
212 idt_setoffset(d++, (uintptr_t) &int_61);
213 idt_setoffset(d++, (uintptr_t) &int_62);
214 idt_setoffset(d++, (uintptr_t) &int_63);
[44c69b66]215
216 idt_setoffset(&idt[VECTOR_SYSCALL], (uintptr_t) &int_syscall);
[b808660]217}
[f761f1eb]218
[60875800]219/* Clean IOPL(12,13) and NT(14) flags in EFLAGS register */
[c192134]220static void clean_IOPL_NT_flags(void)
221{
[e7b7be3f]222 asm volatile (
[39cea6a]223 "pushfl\n"
224 "pop %%eax\n"
225 "and $0xffff8fff, %%eax\n"
226 "push %%eax\n"
227 "popfl\n"
[add04f7]228 ::: "eax"
[c192134]229 );
230}
231
[60875800]232/* Clean AM(18) flag in CR0 register */
[1eb0dd13]233static void clean_AM_flag(void)
234{
[e7b7be3f]235 asm volatile (
[39cea6a]236 "mov %%cr0, %%eax\n"
237 "and $0xfffbffff, %%eax\n"
238 "mov %%eax, %%cr0\n"
[add04f7]239 ::: "eax"
[1eb0dd13]240 );
241}
242
[f761f1eb]243void pm_init(void)
244{
[39cea6a]245 descriptor_t *gdt_p = (descriptor_t *) gdtr.base;
246 ptr_16_32_t idtr;
[69bd642]247
248 /*
249 * Update addresses in GDT and IDT to their virtual counterparts.
250 */
[4533601]251 idtr.limit = sizeof(idt);
[7f1c620]252 idtr.base = (uintptr_t) idt;
[897ad60]253 gdtr_load(&gdtr);
254 idtr_load(&idtr);
[69bd642]255
[f761f1eb]256 /*
257 * Each CPU has its private GDT and TSS.
258 * All CPUs share one IDT.
259 */
260
261 if (config.cpu_active == 1) {
262 idt_init();
263 /*
264 * NOTE: bootstrap CPU has statically allocated TSS, because
265 * the heap hasn't been initialized so far.
266 */
267 tss_p = &tss;
268 }
269 else {
[39cea6a]270 tss_p = (tss_t *) malloc(sizeof(tss_t), FRAME_ATOMIC);
[f761f1eb]271 if (!tss_p)
[f651e80]272 panic("Cannot allocate TSS.");
[f761f1eb]273 }
274
275 tss_initialize(tss_p);
276
277 gdt_p[TSS_DES].access = AR_PRESENT | AR_TSS | DPL_KERNEL;
278 gdt_p[TSS_DES].special = 1;
[11928d5]279 gdt_p[TSS_DES].granularity = 0;
[f761f1eb]280
[7f1c620]281 gdt_setbase(&gdt_p[TSS_DES], (uintptr_t) tss_p);
[11928d5]282 gdt_setlimit(&gdt_p[TSS_DES], TSS_BASIC_SIZE - 1);
[f761f1eb]283
284 /*
285 * As of this moment, the current CPU has its own GDT pointing
286 * to its own TSS. We just need to load the TR register.
287 */
[1d3d2cf]288 tr_load(GDT_SELECTOR(TSS_DES));
[c192134]289
[11928d5]290 clean_IOPL_NT_flags(); /* Disable I/O on nonprivileged levels and clear NT flag. */
[60875800]291 clean_AM_flag(); /* Disable alignment check */
[f761f1eb]292}
[281b607]293
[7f1c620]294void set_tls_desc(uintptr_t tls)
[281b607]295{
[39cea6a]296 ptr_16_32_t cpugdtr;
[e185136]297 descriptor_t *gdt_p;
[281b607]298
[897ad60]299 gdtr_store(&cpugdtr);
[e185136]300 gdt_p = (descriptor_t *) cpugdtr.base;
[281b607]301 gdt_setbase(&gdt_p[TLS_DES], tls);
302 /* Reload gdt register to update GS in CPU */
[897ad60]303 gdtr_load(&cpugdtr);
[281b607]304}
[b45c443]305
[06e1e95]306/** @}
[b45c443]307 */
Note: See TracBrowser for help on using the repository browser.