- Timestamp:
- 2006-04-13T17:38:03Z (20 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- e185136
- Parents:
- 897ad60
- Location:
- arch/ia32
- Files:
-
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
arch/ia32/include/asm.h
r897ad60 r39cea6a 257 257 * @param gdtr_reg Address of memory from where to load GDTR. 258 258 */ 259 static inline void gdtr_load( struct ptr_16_32*gdtr_reg)259 static inline void gdtr_load(ptr_16_32_t *gdtr_reg) 260 260 { 261 261 __asm__ volatile ("lgdt %0\n" : : "m" (*gdtr_reg)); … … 266 266 * @param gdtr_reg Address of memory to where to load GDTR. 267 267 */ 268 static inline void gdtr_store( struct ptr_16_32*gdtr_reg)268 static inline void gdtr_store(ptr_16_32_t *gdtr_reg) 269 269 { 270 270 __asm__ volatile ("sgdt %0\n" : : "m" (*gdtr_reg)); … … 275 275 * @param idtr_reg Address of memory from where to load IDTR. 276 276 */ 277 static inline void idtr_load( struct ptr_16_32*idtr_reg)277 static inline void idtr_load(ptr_16_32_t *idtr_reg) 278 278 { 279 279 __asm__ volatile ("lidt %0\n" : : "m" (*idtr_reg)); -
arch/ia32/include/pm.h
r897ad60 r39cea6a 56 56 #define DPL_USER (PL_USER<<5) 57 57 58 #define IO_MAP_BASE (104)58 #define TSS_BASIC_SIZE 104 59 59 60 60 #ifndef __ASM__ … … 68 68 __u32 base; 69 69 } __attribute__ ((packed)); 70 typedef struct ptr_16_32 ptr_16_32_t; 70 71 71 72 struct descriptor { … … 81 82 unsigned base_24_31: 8; 82 83 } __attribute__ ((packed)); 84 typedef struct descriptor descriptor_t; 83 85 84 86 struct idescriptor { … … 89 91 unsigned offset_16_31: 16; 90 92 } __attribute__ ((packed)); 91 93 typedef struct idescriptor idescriptor_t; 92 94 93 95 struct tss { … … 132 134 __u8 iomap[0x10000+1]; /* 64K + 1 terminating byte */ 133 135 } __attribute__ ((packed)); 136 typedef struct tss tss_t; 134 137 135 extern struct ptr_16_32gdtr;136 extern struct ptr_16_32bootstrap_gdtr;137 extern struct ptr_16_32protected_ap_gdtr;138 extern ptr_16_32_t gdtr; 139 extern ptr_16_32_t bootstrap_gdtr; 140 extern ptr_16_32_t protected_ap_gdtr; 138 141 extern struct tss *tss_p; 139 142 140 extern struct descriptorgdt[];143 extern descriptor_t gdt[]; 141 144 142 145 extern void pm_init(void); 143 146 144 extern void gdt_setbase( struct descriptor*d, __address base);145 extern void gdt_setlimit( struct descriptor*d, __u32 limit);147 extern void gdt_setbase(descriptor_t *d, __address base); 148 extern void gdt_setlimit(descriptor_t *d, __u32 limit); 146 149 147 150 extern void idt_init(void); 148 extern void idt_setoffset( struct idescriptor*d, __address offset);151 extern void idt_setoffset(idescriptor_t *d, __address offset); 149 152 150 extern void tss_initialize( struct tss*t);153 extern void tss_initialize(tss_t *t); 151 154 extern void set_tls_desc(__address tls); 152 155 -
arch/ia32/src/cpu/cpu.c
r897ad60 r39cea6a 88 88 } 89 89 90 91 92 93 90 void cpu_arch_init(void) 94 91 { 95 __u32 help=0; 92 cpuid_feature_info fi; 93 cpuid_extended_feature_info efi; 94 cpu_info_t info; 95 __u32 help = 0; 96 96 97 97 CPU->arch.tss = tss_p; 98 CPU-> fpu_owner=NULL;98 CPU->arch.tss->iomap_base = &CPU->arch.tss->iomap[0] - ((__u8 *) CPU->arch.tss); 99 99 100 cpuid_feature_info fi; 101 cpuid_extended_feature_info efi; 100 CPU->fpu_owner = NULL; 102 101 103 cpu_info_t info;104 102 cpuid(1, &info); 105 103 106 fi.word =info.cpuid_edx;107 efi.word =info.cpuid_ecx;104 fi.word = info.cpuid_edx; 105 efi.word = info.cpuid_ecx; 108 106 109 if(fi.bits.fxsr) fpu_fxsr(); 110 else fpu_fsr(); 107 if (fi.bits.fxsr) 108 fpu_fxsr(); 109 else 110 fpu_fsr(); 111 111 112 if(fi.bits.sse) asm volatile ( 113 "mov %%cr4,%0;\n" 114 "or %1,%0;\n" 115 "mov %0,%%cr4;\n" 116 :"+r"(help) 117 :"i"(CR4_OSFXSR_MASK|(1<<10)) 118 ); 119 112 if (fi.bits.sse) { 113 asm volatile ( 114 "mov %%cr4,%0\n" 115 "or %1,%0\n" 116 "mov %0,%%cr4\n" 117 : "+r" (help) 118 : "i" (CR4_OSFXSR_MASK|(1<<10)) 119 ); 120 } 120 121 } 121 122 122 123 123 void cpu_identify(void) -
arch/ia32/src/pm.c
r897ad60 r39cea6a 53 53 * structure in it's base. 54 54 */ 55 struct descriptorgdt[GDT_ITEMS] = {55 descriptor_t gdt[GDT_ITEMS] = { 56 56 /* NULL descriptor */ 57 57 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, … … 69 69 }; 70 70 71 static struct idescriptoridt[IDT_ITEMS];72 73 static struct tsstss;74 75 struct tss*tss_p = NULL;71 static idescriptor_t idt[IDT_ITEMS]; 72 73 static tss_t tss; 74 75 tss_t *tss_p = NULL; 76 76 77 77 /* gdtr is changed by kmp before next CPU is initialized */ 78 struct ptr_16_32bootstrap_gdtr = { .limit = sizeof(gdt), .base = KA2PA((__address) gdt) };79 struct ptr_16_32gdtr = { .limit = sizeof(gdt), .base = (__address) gdt };80 81 void gdt_setbase( struct descriptor*d, __address base)78 ptr_16_32_t bootstrap_gdtr = { .limit = sizeof(gdt), .base = KA2PA((__address) gdt) }; 79 ptr_16_32_t gdtr = { .limit = sizeof(gdt), .base = (__address) gdt }; 80 81 void gdt_setbase(descriptor_t *d, __address base) 82 82 { 83 83 d->base_0_15 = base & 0xffff; … … 86 86 } 87 87 88 void gdt_setlimit( struct descriptor*d, __u32 limit)88 void gdt_setlimit(descriptor_t *d, __u32 limit) 89 89 { 90 90 d->limit_0_15 = limit & 0xffff; … … 92 92 } 93 93 94 void idt_setoffset( struct idescriptor*d, __address offset)94 void idt_setoffset(idescriptor_t *d, __address offset) 95 95 { 96 96 /* … … 101 101 } 102 102 103 void tss_initialize( struct tss*t)103 void tss_initialize(tss_t *t) 104 104 { 105 105 memsetb((__address) t, sizeof(struct tss), 0); … … 111 111 void idt_init(void) 112 112 { 113 struct idescriptor*d;113 idescriptor_t *d; 114 114 int i; 115 115 … … 142 142 static void clean_IOPL_NT_flags(void) 143 143 { 144 asm 145 ( 146 "pushfl;" 147 "pop %%eax;" 148 "and $0xffff8fff,%%eax;" 149 "push %%eax;" 150 "popfl;" 151 : 152 : 153 :"%eax" 144 __asm__ volatile ( 145 "pushfl\n" 146 "pop %%eax\n" 147 "and $0xffff8fff, %%eax\n" 148 "push %%eax\n" 149 "popfl\n" 150 : : : "eax" 154 151 ); 155 152 } … … 158 155 static void clean_AM_flag(void) 159 156 { 160 asm 161 ( 162 "mov %%cr0,%%eax;" 163 "and $0xFFFBFFFF,%%eax;" 164 "mov %%eax,%%cr0;" 165 : 166 : 167 :"%eax" 157 __asm__ volatile ( 158 "mov %%cr0, %%eax\n" 159 "and $0xfffbffff, %%eax\n" 160 "mov %%eax, %%cr0\n" 161 : : : "eax" 168 162 ); 169 163 } … … 171 165 void pm_init(void) 172 166 { 173 struct descriptor *gdt_p = (struct descriptor*) gdtr.base;174 struct ptr_16_32idtr;167 descriptor_t *gdt_p = (descriptor_t *) gdtr.base; 168 ptr_16_32_t idtr; 175 169 176 170 /* … … 196 190 } 197 191 else { 198 tss_p = ( struct tss *) malloc(sizeof(struct tss),FRAME_ATOMIC);192 tss_p = (tss_t *) malloc(sizeof(tss_t), FRAME_ATOMIC); 199 193 if (!tss_p) 200 194 panic("could not allocate TSS\n"); … … 208 202 209 203 gdt_setbase(&gdt_p[TSS_DES], (__address) tss_p); 210 gdt_setlimit(&gdt_p[TSS_DES], sizeof( struct tss) - 1);204 gdt_setlimit(&gdt_p[TSS_DES], sizeof(tss_t) - 1); 211 205 212 206 /* … … 222 216 void set_tls_desc(__address tls) 223 217 { 224 struct ptr_16_32cpugdtr;225 struct descriptor *gdt_p = (struct descriptor*) cpugdtr.base;218 ptr_16_32_t cpugdtr; 219 descriptor_t *gdt_p = (descriptor_t *) cpugdtr.base; 226 220 227 221 gdtr_store(&cpugdtr); -
arch/ia32/src/proc/scheduler.c
r897ad60 r39cea6a 29 29 #include <proc/scheduler.h> 30 30 #include <cpu.h> 31 #include <proc/task.h> 31 32 #include <proc/thread.h> 32 33 #include <arch.h> … … 34 35 #include <arch/debugger.h> 35 36 #include <arch/pm.h> 37 #include <arch/asm.h> 36 38 39 /** Perform ia32 specific tasks needed before the new task is run. */ 40 void before_task_runs_arch(void) 41 { 42 } 43 44 /** Perform ia32 specific tasks needed before the new thread is scheduled. 45 * 46 * THREAD is locked and interrupts are disabled. 47 */ 37 48 void before_thread_runs_arch(void) 38 49 { 50 size_t iomap_size; 51 ptr_16_32_t cpugdtr; 52 descriptor_t *gdt_p; 53 39 54 CPU->arch.tss->esp0 = (__address) &THREAD->kstack[THREAD_STACK_SIZE-SP_DELTA]; 40 55 CPU->arch.tss->ss0 = selector(KDATA_DES); … … 42 57 /* Set up TLS in GS register */ 43 58 set_tls_desc(THREAD->arch.tls); 59 60 /* 61 * Switch the I/O Permission Bitmap, if necessary. 62 * 63 * First, copy the I/O Permission Bitmap. 64 * This needs to be changed so that the 65 * copying is avoided if the same task 66 * was already running and the iomap did 67 * not change. 68 */ 69 spinlock_lock(&TASK->lock); 70 iomap_size = TASK->arch.iomap_size; 71 if (iomap_size) { 72 ASSERT(TASK->arch.iomap); 73 memcpy(CPU->arch.tss->iomap, TASK->arch.iomap, iomap_size); 74 CPU->arch.tss->iomap[iomap_size] = 0xff; /* terminating byte */ 75 } 76 spinlock_unlock(&TASK->lock); 77 78 /* Second, adjust TSS segment limit. */ 79 gdtr_store(&cpugdtr); 80 gdt_p = (descriptor_t *) cpugdtr.base; 81 gdt_setlimit(&gdt_p[TSS_DES], TSS_BASIC_SIZE + iomap_size - 1); 82 gdtr_load(&cpugdtr); 44 83 45 84 #ifdef CONFIG_DEBUG_AS_WATCHPOINT
Note:
See TracChangeset
for help on using the changeset viewer.