Changeset dd0c8a0 in mainline for kernel/arch
- Timestamp:
- 2013-09-29T06:56:33Z (12 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- a9bd960d
- Parents:
- 3deb0155 (diff), 13be2583 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Location:
- kernel/arch
- Files:
-
- 55 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/arch/abs32le/Makefile.inc
r3deb0155 rdd0c8a0 39 39 endif 40 40 41 ifeq ($(COMPILER),clang)42 CLANG_ARCH = i38643 endif44 45 41 BITS = 32 46 42 ENDIANESS = LE -
kernel/arch/abs32le/include/arch/mm/frame.h
r3deb0155 rdd0c8a0 39 39 #define FRAME_SIZE (1 << FRAME_WIDTH) 40 40 41 #define FRAME_LOWPRIO 0 42 41 43 #include <typedefs.h> 42 44 -
kernel/arch/abs32le/include/arch/mm/page.h
r3deb0155 rdd0c8a0 57 57 58 58 /* Page table sizes for each level. */ 59 #define PTL0_ SIZE_ARCH ONE_FRAME60 #define PTL1_ SIZE_ARCH 061 #define PTL2_ SIZE_ARCH 062 #define PTL3_ SIZE_ARCH ONE_FRAME59 #define PTL0_FRAMES_ARCH 1 60 #define PTL1_FRAMES_ARCH 1 61 #define PTL2_FRAMES_ARCH 1 62 #define PTL3_FRAMES_ARCH 1 63 63 64 64 /* Macros calculating indices for each level. */ -
kernel/arch/amd64/Makefile.inc
r3deb0155 rdd0c8a0 30 30 BFD_ARCH = i386:x86-64 31 31 BFD = binary 32 CLANG_ARCH = x86_6433 32 34 33 FPU_NO_CFLAGS = -mno-sse -mno-sse2 … … 36 35 GCC_CFLAGS += $(CMN1) 37 36 ICC_CFLAGS += $(CMN1) 37 CLANG_CFLAGS += $(CMN1) 38 38 39 39 BITS = 64 -
kernel/arch/amd64/include/arch/mm/frame.h
r3deb0155 rdd0c8a0 39 39 #define FRAME_SIZE (1 << FRAME_WIDTH) 40 40 41 #define FRAME_LOWPRIO 0x1000 42 41 43 #ifndef __ASM__ 42 44 -
kernel/arch/amd64/include/arch/mm/page.h
r3deb0155 rdd0c8a0 61 61 62 62 /* Page table sizes for each level. */ 63 #define PTL0_ SIZE_ARCH ONE_FRAME64 #define PTL1_ SIZE_ARCH ONE_FRAME65 #define PTL2_ SIZE_ARCH ONE_FRAME66 #define PTL3_ SIZE_ARCH ONE_FRAME63 #define PTL0_FRAMES_ARCH 1 64 #define PTL1_FRAMES_ARCH 1 65 #define PTL2_FRAMES_ARCH 1 66 #define PTL3_FRAMES_ARCH 1 67 67 68 68 /* Macros calculating indices into page tables in each level. */ -
kernel/arch/amd64/include/arch/pm.h
r3deb0155 rdd0c8a0 57 57 #ifdef CONFIG_FB 58 58 59 #define VESA_INIT_DES 860 59 #define VESA_INIT_SEGMENT 0x8000 60 #define VESA_INIT_CODE_DES 8 61 #define VESA_INIT_DATA_DES 9 61 62 62 63 #undef GDT_ITEMS 63 #define GDT_ITEMS 964 #define GDT_ITEMS 10 64 65 65 66 #endif /* CONFIG_FB */ -
kernel/arch/amd64/src/ddi/ddi.c
r3deb0155 rdd0c8a0 42 42 #include <errno.h> 43 43 #include <arch/cpu.h> 44 #include <cpu.h> 44 45 #include <arch.h> 45 46 #include <align.h> … … 58 59 int ddi_iospace_enable_arch(task_t *task, uintptr_t ioaddr, size_t size) 59 60 { 60 size_t bits = ioaddr + size;61 if ( bits > IO_PORTS)61 size_t elements = ioaddr + size; 62 if (elements > IO_PORTS) 62 63 return ENOENT; 63 64 64 if (task->arch.iomap. bits < bits) {65 if (task->arch.iomap.elements < elements) { 65 66 /* 66 67 * The I/O permission bitmap is too small and needs to be grown. 67 68 */ 68 69 69 uint8_t *newmap = (uint8_t *) malloc(BITS2BYTES(bits), FRAME_ATOMIC);70 if (! newmap)70 void *store = malloc(bitmap_size(elements), FRAME_ATOMIC); 71 if (!store) 71 72 return ENOMEM; 72 73 73 74 bitmap_t oldiomap; 74 bitmap_initialize(&oldiomap, task->arch.iomap. map,75 bitmap_initialize(&oldiomap, task->arch.iomap.elements, 75 76 task->arch.iomap.bits); 76 bitmap_initialize(&task->arch.iomap, newmap, bits); 77 78 bitmap_initialize(&task->arch.iomap, elements, store); 77 79 78 80 /* 79 81 * Mark the new range inaccessible. 80 82 */ 81 bitmap_set_range(&task->arch.iomap, oldiomap. bits,82 bits - oldiomap.bits);83 bitmap_set_range(&task->arch.iomap, oldiomap.elements, 84 elements - oldiomap.elements); 83 85 84 86 /* … … 88 90 if (oldiomap.bits) { 89 91 bitmap_copy(&task->arch.iomap, &oldiomap, 90 oldiomap.bits); 91 free(oldiomap.map); 92 oldiomap.elements); 93 94 free(oldiomap.bits); 92 95 } 93 96 } … … 96 99 * Enable the range and we are done. 97 100 */ 98 bitmap_clear_range(&task->arch.iomap, (size_t) ioaddr, (size_t)size);101 bitmap_clear_range(&task->arch.iomap, (size_t) ioaddr, size); 99 102 100 103 /* … … 118 121 /* First, copy the I/O Permission Bitmap. */ 119 122 irq_spinlock_lock(&TASK->lock, false); 123 120 124 size_t ver = TASK->arch.iomapver; 121 size_t bits = TASK->arch.iomap.bits; 122 if (bits) { 123 ASSERT(TASK->arch.iomap.map); 125 size_t elements = TASK->arch.iomap.elements; 126 127 if (elements > 0) { 128 ASSERT(TASK->arch.iomap.bits); 124 129 125 130 bitmap_t iomap; 126 bitmap_initialize(&iomap, CPU->arch.tss->iomap,127 TSS_IOMAP_SIZE * 8);128 bitmap_copy(&iomap, &TASK->arch.iomap, bits);131 bitmap_initialize(&iomap, TSS_IOMAP_SIZE * 8, 132 CPU->arch.tss->iomap); 133 bitmap_copy(&iomap, &TASK->arch.iomap, elements); 129 134 130 135 /* … … 132 137 * I/O access. 133 138 */ 134 bitmap_set_range(&iomap, bits, ALIGN_UP(bits, 8) - bits); 139 bitmap_set_range(&iomap, elements, 140 ALIGN_UP(elements, 8) - elements); 141 135 142 /* 136 143 * It is safe to set the trailing eight bits because of the 137 144 * extra convenience byte in TSS_IOMAP_SIZE. 138 145 */ 139 bitmap_set_range(&iomap, ALIGN_UP( bits, 8), 8);146 bitmap_set_range(&iomap, ALIGN_UP(elements, 8), 8); 140 147 } 148 141 149 irq_spinlock_unlock(&TASK->lock, false); 142 150 143 151 /* 144 152 * Second, adjust TSS segment limit. 145 * Take the extra ending byte wi ll all bits set into account.153 * Take the extra ending byte with all bits set into account. 146 154 */ 147 155 ptr_16_64_t cpugdtr; … … 149 157 150 158 descriptor_t *gdt_p = (descriptor_t *) cpugdtr.base; 151 gdt_tss_setlimit(&gdt_p[TSS_DES], TSS_BASIC_SIZE + BITS2BYTES(bits)); 159 size_t size = bitmap_size(elements); 160 gdt_tss_setlimit(&gdt_p[TSS_DES], TSS_BASIC_SIZE + size); 152 161 gdtr_load(&cpugdtr); 153 162 -
kernel/arch/amd64/src/pm.c
r3deb0155 rdd0c8a0 112 112 /* VESA Init descriptor */ 113 113 #ifdef CONFIG_FB 114 { 115 0xffff, 0, VESA_INIT_SEGMENT >> 12, AR_PRESENT | AR_CODE | DPL_KERNEL, 116 0xf, 0, 0, 0, 0, 0 117 } 114 { 0xffff, 0, VESA_INIT_SEGMENT >> 12, AR_PRESENT | AR_CODE | AR_READABLE | DPL_KERNEL, 0xf, 0, 0, 0, 0, 0 }, 115 { 0xffff, 0, VESA_INIT_SEGMENT >> 12, AR_PRESENT | AR_DATA | AR_WRITABLE | DPL_KERNEL, 0xf, 0, 0, 0, 0, 0 } 118 116 #endif 119 117 }; -
kernel/arch/amd64/src/proc/task.c
r3deb0155 rdd0c8a0 34 34 35 35 #include <proc/task.h> 36 #include <typedefs.h> 37 #include <adt/bitmap.h> 36 38 #include <mm/slab.h> 37 #include <typedefs.h>38 39 39 40 /** Perform amd64 specific task initialization. … … 45 46 { 46 47 task->arch.iomapver = 0; 47 bitmap_initialize(&task->arch.iomap, NULL, 0);48 bitmap_initialize(&task->arch.iomap, 0, NULL); 48 49 } 49 50 … … 55 56 void task_destroy_arch(task_t *task) 56 57 { 57 if (task->arch.iomap. map)58 free(task->arch.iomap. map);58 if (task->arch.iomap.bits != NULL) 59 free(task->arch.iomap.bits); 59 60 } 60 61 -
kernel/arch/arm32/include/arch/asm.h
r3deb0155 rdd0c8a0 38 38 39 39 #include <typedefs.h> 40 #include <arch/cp15.h> 40 41 #include <arch/stack.h> 41 42 #include <config.h> … … 51 52 * chapter 2.3.8 p.2-22 (52 in the PDF) 52 53 * 53 * @note Although mcr p15, 0, R0, c7, c0, 4 is defined in ARM Architecture54 * reference manual for armv4/5 CP15 implementation is mandatory only for55 * armv6+.54 * @note Although CP15WFI (mcr p15, 0, R0, c7, c0, 4) is defined in ARM 55 * Architecture reference manual for armv4/5, CP15 implementation is mandatory 56 * only for armv6+. 56 57 */ 57 58 NO_TRACE static inline void cpu_sleep(void) … … 60 61 asm volatile ( "wfe" ); 61 62 #elif defined(PROCESSOR_ARCH_armv6) | defined(PROCESSOR_arm926ej_s) | defined(PROCESSOR_arm920t) 62 asm volatile ( "mcr p15, 0, R0, c7, c0, 4");63 WFI_write(0); 63 64 #endif 64 65 } -
kernel/arch/arm32/include/arch/cp15.h
r3deb0155 rdd0c8a0 171 171 CCSIDR_LINESIZE_MASK = 0x7, 172 172 CCSIDR_LINESIZE_SHIFT = 0, 173 #define CCSIDR_SETS(val) \ 174 (((val >> CCSIDR_NUMSETS_SHIFT) & CCSIDR_NUMSETS_MASK) + 1) 175 #define CCSIDR_WAYS(val) \ 176 (((val >> CCSIDR_ASSOC_SHIFT) & CCSIDR_ASSOC_MASK) + 1) 177 /* The register value is log(linesize_in_words) - 2 */ 178 #define CCSIDR_LINESIZE_LOG(val) \ 179 (((val >> CCSIDR_LINESIZE_SHIFT) & CCSIDR_LINESIZE_MASK) + 2 + 2) 173 180 }; 174 181 CONTROL_REG_GEN_READ(CCSIDR, c0, 1, c0, 0); … … 187 194 CLIDR_UNI_CACHE = 0x4, 188 195 CLIDR_CACHE_MASK = 0x7, 189 #define CLIDR_CACHE(level, val) ((val >> (level - 1) * 3) & CLIDR_CACHE_MASK) 196 /** levels counted from 0 */ 197 #define CLIDR_CACHE(level, val) ((val >> (level * 3)) & CLIDR_CACHE_MASK) 190 198 }; 191 199 CONTROL_REG_GEN_READ(CLIDR, c0, 1, c0, 1); … … 294 302 295 303 /* Memory protection and control registers */ 304 enum { 305 TTBR_ADDR_MASK = 0xffffff80, 306 TTBR_NOS_FLAG = 1 << 5, 307 TTBR_RGN_MASK = 0x3 << 3, 308 TTBR_RGN_NO_CACHE = 0x0 << 3, 309 TTBR_RGN_WBWA_CACHE = 0x1 << 3, 310 TTBR_RGN_WT_CACHE = 0x2 << 3, 311 TTBR_RGN_WB_CACHE = 0x3 << 3, 312 TTBR_S_FLAG = 1 << 1, 313 TTBR_C_FLAG = 1 << 0, 314 }; 296 315 CONTROL_REG_GEN_READ(TTBR0, c2, 0, c0, 0); 297 316 CONTROL_REG_GEN_WRITE(TTBR0, c2, 0, c0, 0); … … 364 383 365 384 CONTROL_REG_GEN_WRITE(DCIMVAC, c7, 0, c6, 1); 366 CONTROL_REG_GEN_WRITE(DCI MSW, c7, 0, c6, 2);385 CONTROL_REG_GEN_WRITE(DCISW, c7, 0, c6, 2); 367 386 368 387 CONTROL_REG_GEN_WRITE(ATS1CPR, c7, 0, c8, 0); … … 370 389 CONTROL_REG_GEN_WRITE(ATS1CUR, c7, 0, c8, 2); 371 390 CONTROL_REG_GEN_WRITE(ATS1CUW, c7, 0, c8, 3); 372 CONTROL_REG_GEN_WRITE(ATS1 NSOPR, c7, 0, c8, 4);373 CONTROL_REG_GEN_WRITE(ATS1 NSOPW, c7, 0, c8, 5);374 CONTROL_REG_GEN_WRITE(ATS1 NSOUR, c7, 0, c8, 6);375 CONTROL_REG_GEN_WRITE(ATS1 NSOUW, c7, 0, c8, 7);391 CONTROL_REG_GEN_WRITE(ATS12NSOPR, c7, 0, c8, 4); 392 CONTROL_REG_GEN_WRITE(ATS12NSOPW, c7, 0, c8, 5); 393 CONTROL_REG_GEN_WRITE(ATS12NSOUR, c7, 0, c8, 6); 394 CONTROL_REG_GEN_WRITE(ATS12NSOUW, c7, 0, c8, 7); 376 395 377 396 -
kernel/arch/arm32/include/arch/mm/frame.h
r3deb0155 rdd0c8a0 39 39 #define FRAME_WIDTH 12 /* 4KB frames */ 40 40 #define FRAME_SIZE (1 << FRAME_WIDTH) 41 42 #define FRAME_LOWPRIO 0 41 43 42 44 #ifndef __ASM__ -
kernel/arch/arm32/include/arch/mm/page.h
r3deb0155 rdd0c8a0 41 41 #include <arch/exception.h> 42 42 #include <arch/barrier.h> 43 #include <arch/cp15.h> 43 44 #include <trace.h> 44 45 … … 72 73 73 74 /* Page table sizes for each level. */ 74 #define PTL0_ SIZE_ARCH FOUR_FRAMES75 #define PTL1_ SIZE_ARCH 076 #define PTL2_ SIZE_ARCH 077 #define PTL3_ SIZE_ARCH ONE_FRAME75 #define PTL0_FRAMES_ARCH 4 76 #define PTL1_FRAMES_ARCH 1 77 #define PTL2_FRAMES_ARCH 1 78 #define PTL3_FRAMES_ARCH 1 78 79 79 80 /* Macros calculating indices into page tables for each level. */ … … 95 96 /* Set PTE address accessors for each level. */ 96 97 #define SET_PTL0_ADDRESS_ARCH(ptl0) \ 97 (set_ptl0_addr((pte_t *) (ptl0)))98 set_ptl0_addr((pte_t *) (ptl0)) 98 99 #define SET_PTL1_ADDRESS_ARCH(ptl0, i, a) \ 99 (((pte_t *) (ptl0))[(i)].l0.coarse_table_addr = (a) >> 10)100 set_ptl1_addr((pte_t*) (ptl0), i, a) 100 101 #define SET_PTL2_ADDRESS_ARCH(ptl1, i, a) 101 102 #define SET_PTL3_ADDRESS_ARCH(ptl2, i, a) 102 103 #define SET_FRAME_ADDRESS_ARCH(ptl3, i, a) \ 103 (((pte_t *) (ptl3))[(i)].l1.frame_base_addr = (a) >> 12)104 set_ptl3_addr((pte_t*) (ptl3), i, a) 104 105 105 106 /* Get PTE flags accessors for each level. */ … … 129 130 set_pt_level1_present((pte_t *) (ptl3), (size_t) (i)) 130 131 132 133 #define pt_coherence(page) pt_coherence_m(page, 1) 134 131 135 #if defined(PROCESSOR_ARCH_armv6) | defined(PROCESSOR_ARCH_armv7_a) 132 136 #include "page_armv6.h" … … 137 141 #endif 138 142 143 /** Sets the address of level 0 page table. 144 * 145 * @param pt Pointer to the page table to set. 146 * 147 * Page tables are always in cacheable memory. 148 * Make sure the memory type is correct, and in sync with: 149 * init_boot_pt (boot/arch/arm32/src/mm.c) 150 * init_ptl0_section (boot/arch/arm32/src/mm.c) 151 * set_pt_level1_flags (kernel/arch/arm32/include/arch/mm/page_armv6.h) 152 */ 153 NO_TRACE static inline void set_ptl0_addr(pte_t *pt) 154 { 155 uint32_t val = (uint32_t)pt & TTBR_ADDR_MASK; 156 val |= TTBR_RGN_WBWA_CACHE | TTBR_C_FLAG; 157 TTBR0_write(val); 158 } 159 160 NO_TRACE static inline void set_ptl1_addr(pte_t *pt, size_t i, uintptr_t address) 161 { 162 pt[i].l0.coarse_table_addr = address >> 10; 163 pt_coherence(&pt[i].l0); 164 } 165 166 NO_TRACE static inline void set_ptl3_addr(pte_t *pt, size_t i, uintptr_t address) 167 { 168 pt[i].l1.frame_base_addr = address >> 12; 169 pt_coherence(&pt[i].l1); 170 } 171 139 172 #endif 140 173 -
kernel/arch/arm32/include/arch/mm/page_armv4.h
r3deb0155 rdd0c8a0 120 120 #define PTE_DESCRIPTOR_SMALL_PAGE 2 121 121 122 123 /** Sets the address of level 0 page table. 124 * 125 * @param pt Pointer to the page table to set. 126 * 127 */ 128 NO_TRACE static inline void set_ptl0_addr(pte_t *pt) 129 { 130 asm volatile ( 131 "mcr p15, 0, %[pt], c2, c0, 0\n" 132 :: [pt] "r" (pt) 133 ); 134 } 135 122 #define pt_coherence_m(pt, count) \ 123 do { \ 124 for (unsigned i = 0; i < count; ++i) \ 125 DCCMVAU_write((uintptr_t)(pt + i)); \ 126 read_barrier(); \ 127 } while (0) 136 128 137 129 /** Returns level 0 page table entry flags. … … 223 215 224 216 /* default access permission */ 225 p->access_permission_0 = p->access_permission_1 = 217 p->access_permission_0 = p->access_permission_1 = 226 218 p->access_permission_2 = p->access_permission_3 = 227 219 PTE_AP_USER_NO_KERNEL_RW; … … 229 221 if (flags & PAGE_USER) { 230 222 if (flags & PAGE_READ) { 231 p->access_permission_0 = p->access_permission_1 = 232 p->access_permission_2 = p->access_permission_3 = 223 p->access_permission_0 = p->access_permission_1 = 224 p->access_permission_2 = p->access_permission_3 = 233 225 PTE_AP_USER_RO_KERNEL_RW; 234 226 } 235 227 if (flags & PAGE_WRITE) { 236 p->access_permission_0 = p->access_permission_1 = 237 p->access_permission_2 = p->access_permission_3 = 238 PTE_AP_USER_RW_KERNEL_RW; 228 p->access_permission_0 = p->access_permission_1 = 229 p->access_permission_2 = p->access_permission_3 = 230 PTE_AP_USER_RW_KERNEL_RW; 239 231 } 240 232 } -
kernel/arch/arm32/include/arch/mm/page_armv6.h
r3deb0155 rdd0c8a0 40 40 #error "Do not include arch specific page.h directly use generic page.h instead" 41 41 #endif 42 42 43 43 44 /* Macros for querying the last-level PTE entries. */ … … 125 126 #define PTE_DESCRIPTOR_SMALL_PAGE_NX 3 126 127 127 /** Sets the address of level 0 page table. 128 * 129 * @param pt Pointer to the page table to set. 130 * 131 */ 132 NO_TRACE static inline void set_ptl0_addr(pte_t *pt) 133 { 134 asm volatile ( 135 "mcr p15, 0, %[pt], c2, c0, 0\n" 136 :: [pt] "r" (pt) 137 ); 138 } 128 129 /** 130 * For an ARMv7 implementation that does not include the Large Physical Address Extension, 131 * and in implementations of architecture versions before ARMv7, if the translation tables 132 * are held in Write-Back Cacheable memory, the caches must be cleaned to the point of 133 * unification after writing to the translation tables and before the DSB instruction. This 134 * ensures that the updated translation table are visible to a hardware translation table walk. 135 * 136 * Therefore, an example instruction sequence for writing a translation table entry, 137 * covering changes to the instruction 138 * or data mappings in a uniprocessor system is: 139 * STR rx, [Translation table entry] 140 * ; write new entry to the translation table 141 * Clean cache line [Translation table entry] : This operation is not required with the 142 * ; Multiprocessing Extensions. 143 * DSB 144 * ; ensures visibility of the data cleaned from the D Cache 145 * Invalidate TLB entry by MVA (and ASID if non-global) [page address] 146 * Invalidate BTC 147 * DSB 148 * ; ensure completion of the Invalidate TLB operation 149 * ISB 150 * ; ensure table changes visible to instruction fetch 151 * 152 * ARM Architecture reference chp. B3.10.1 p. B3-1375 153 * @note: see TTRB0/1 for pt memory type 154 */ 155 #define pt_coherence_m(pt, count) \ 156 do { \ 157 for (unsigned i = 0; i < count; ++i) \ 158 DCCMVAU_write((uintptr_t)(pt + i)); \ 159 read_barrier(); \ 160 } while (0) 139 161 140 162 … … 206 228 p->ns = 0; 207 229 } 230 pt_coherence(p); 208 231 } 209 232 … … 232 255 p->descriptor_type = PTE_DESCRIPTOR_SMALL_PAGE_NX; 233 256 } 234 235 /* tex=0 buf=1 and cache=1 => normal memory 236 * tex=0 buf=1 and cache=0 => shareable device mmio 237 */ 238 p->cacheable = (flags & PAGE_CACHEABLE); 239 p->bufferable = 1; 240 p->tex = 0; 257 258 if (flags & PAGE_CACHEABLE) { 259 /* 260 * Write-through, no write-allocate memory, see ch. B3.8.2 261 * (p. B3-1358) of ARM Architecture reference manual. 262 * Make sure the memory type is correct, and in sync with: 263 * init_boot_pt (boot/arch/arm32/src/mm.c) 264 * init_ptl0_section (boot/arch/arm32/src/mm.c) 265 * set_ptl0_addr (kernel/arch/arm32/include/arch/mm/page.h) 266 */ 267 p->tex = 5; 268 p->cacheable = 0; 269 p->bufferable = 1; 270 } else { 271 /* 272 * Shareable device memory, see ch. B3.8.2 (p. B3-1358) of 273 * ARM Architecture reference manual. 274 */ 275 p->tex = 0; 276 p->cacheable = 0; 277 p->bufferable = 1; 278 } 241 279 242 280 /* Shareable is ignored for devices (non-cacheable), 243 * turn it o nfor normal memory. */244 p->shareable = 1;281 * turn it off for normal memory. */ 282 p->shareable = 0; 245 283 246 284 p->non_global = !(flags & PAGE_GLOBAL); … … 256 294 p->access_permission_1 = PTE_AP1_RO; 257 295 } 296 pt_coherence(p); 258 297 } 259 298 … … 264 303 p->should_be_zero_0 = 0; 265 304 p->should_be_zero_1 = 0; 266 write_barrier();267 305 p->descriptor_type = PTE_DESCRIPTOR_COARSE_TABLE; 306 pt_coherence(p); 268 307 } 269 308 … … 273 312 274 313 p->descriptor_type = PTE_DESCRIPTOR_SMALL_PAGE; 314 pt_coherence(p); 275 315 } 276 316 -
kernel/arch/arm32/src/cpu/cpu.c
r3deb0155 rdd0c8a0 157 157 #endif 158 158 #ifdef PROCESSOR_ARCH_armv7_a 159 /* ICache coherency is elaborate on in barrier.h.159 /* ICache coherency is elaborated on in barrier.h. 160 160 * VIPT and PIPT caches need maintenance only on code modify, 161 161 * so it should be safe for general use. … … 166 166 control_reg |= 167 167 SCTLR_INST_CACHE_EN_FLAG | SCTLR_BRANCH_PREDICT_EN_FLAG; 168 } else { 169 control_reg &= 170 ~(SCTLR_INST_CACHE_EN_FLAG | SCTLR_BRANCH_PREDICT_EN_FLAG); 168 171 } 169 172 #endif … … 204 207 #ifdef PROCESSOR_ARCH_armv7_a 205 208 CSSELR_write((level & CCSELR_LEVEL_MASK) << CCSELR_LEVEL_SHIFT); 206 const unsigned ls_log = 2 + 207 ((CCSIDR_read() >> CCSIDR_LINESIZE_SHIFT) & CCSIDR_LINESIZE_MASK); 208 return ls_log + 2; //return log2(bytes) 209 const uint32_t ccsidr = CCSIDR_read(); 210 return CCSIDR_LINESIZE_LOG(ccsidr); 209 211 #endif 210 212 return 0; … … 217 219 #ifdef PROCESSOR_ARCH_armv7_a 218 220 CSSELR_write((level & CCSELR_LEVEL_MASK) << CCSELR_LEVEL_SHIFT); 219 const unsigned ways = 1 + 220 ((CCSIDR_read() >> CCSIDR_ASSOC_SHIFT) & CCSIDR_ASSOC_MASK); 221 return ways; 221 const uint32_t ccsidr = CCSIDR_read(); 222 return CCSIDR_WAYS(ccsidr); 222 223 #endif 223 224 return 0; … … 229 230 #ifdef PROCESSOR_ARCH_armv7_a 230 231 CSSELR_write((level & CCSELR_LEVEL_MASK) << CCSELR_LEVEL_SHIFT); 231 const unsigned sets = 1 + 232 ((CCSIDR_read() >> CCSIDR_NUMSETS_SHIFT) & CCSIDR_NUMSETS_MASK); 233 return sets; 232 const uint32_t ccsidr = CCSIDR_read(); 233 return CCSIDR_SETS(ccsidr); 234 234 #endif 235 235 return 0; … … 241 241 #ifdef PROCESSOR_ARCH_armv7_a 242 242 const uint32_t val = CLIDR_read(); 243 for (unsigned i = 1; i <= 7; ++i) {243 for (unsigned i = 0; i < 8; ++i) { 244 244 const unsigned ctype = CLIDR_CACHE(i, val); 245 245 switch (ctype) { … … 280 280 const unsigned ways = dcache_ways(i); 281 281 const unsigned sets = dcache_sets(i); 282 const unsigned way_shift = 31- log2(ways);282 const unsigned way_shift = 32 - log2(ways); 283 283 const unsigned set_shift = dcache_linesize_log(i); 284 284 dcache_clean_manual(i, false, ways, sets, way_shift, set_shift); … … 293 293 const unsigned ways = dcache_ways(i); 294 294 const unsigned sets = dcache_sets(i); 295 const unsigned way_shift = 31- log2(ways);295 const unsigned way_shift = 32 - log2(ways); 296 296 const unsigned set_shift = dcache_linesize_log(i); 297 297 dcache_clean_manual(i, true, ways, sets, way_shift, set_shift); -
kernel/arch/arm32/src/mach/beagleboardxm/beagleboardxm.c
r3deb0155 rdd0c8a0 60 60 61 61 static struct beagleboard { 62 amdm37x_irc_regs_t *irc_addr;63 amdm37x_uart_t uart;62 omap_irc_regs_t *irc_addr; 63 omap_uart_t uart; 64 64 amdm37x_gpt_t timer; 65 65 } beagleboard; … … 85 85 static void bb_timer_irq_handler(irq_t *irq) 86 86 { 87 amdm37x_gpt_irq_ack(&beagleboard.timer); 88 87 89 /* 88 90 * We are holding a lock which prevents preemption. 89 91 * Release the lock, call clock() and reacquire the lock again. 90 92 */ 91 amdm37x_gpt_irq_ack(&beagleboard.timer);92 93 spinlock_unlock(&irq->lock); 93 94 clock(); … … 102 103 PAGE_NOT_CACHEABLE); 103 104 ASSERT(beagleboard.irc_addr); 104 amdm37x_irc_init(beagleboard.irc_addr);105 omap_irc_init(beagleboard.irc_addr); 105 106 106 107 /* Initialize timer. Use timer1, because it is in WKUP power domain … … 122 123 123 124 /* Enable timer interrupt */ 124 amdm37x_irc_enable(beagleboard.irc_addr, AMDM37x_GPT1_IRQ);125 omap_irc_enable(beagleboard.irc_addr, AMDM37x_GPT1_IRQ); 125 126 126 127 /* Start timer here */ … … 146 147 static void bbxm_irq_exception(unsigned int exc_no, istate_t *istate) 147 148 { 148 const unsigned inum = amdm37x_irc_inum_get(beagleboard.irc_addr); 149 amdm37x_irc_irq_ack(beagleboard.irc_addr); 149 const unsigned inum = omap_irc_inum_get(beagleboard.irc_addr); 150 150 151 151 irq_t *irq = irq_dispatch_and_lock(inum); … … 159 159 CPU->id, inum); 160 160 } 161 /** amdm37x manual ch. 12.5.2 (p. 2428) places irc ack at the end 162 * of ISR. DO this to avoid strange behavior. */ 163 omap_irc_irq_ack(beagleboard.irc_addr); 161 164 } 162 165 … … 167 170 static void bbxm_output_init(void) 168 171 { 172 #ifdef CONFIG_OMAP_UART 169 173 /* UART3 is wired to external RS232 connector */ 170 const bool ok = amdm37x_uart_init(&beagleboard.uart,174 const bool ok = omap_uart_init(&beagleboard.uart, 171 175 AMDM37x_UART3_IRQ, AMDM37x_UART3_BASE_ADDRESS, AMDM37x_UART3_SIZE); 172 176 if (ok) { 173 177 stdout_wire(&beagleboard.uart.outdev); 174 178 } 179 #endif 175 180 } 176 181 177 182 static void bbxm_input_init(void) 178 183 { 184 #ifdef CONFIG_OMAP_UART 179 185 srln_instance_t *srln_instance = srln_init(); 180 186 if (srln_instance) { 181 187 indev_t *sink = stdin_wire(); 182 188 indev_t *srln = srln_wire(srln_instance, sink); 183 amdm37x_uart_input_wire(&beagleboard.uart, srln);184 amdm37x_irc_enable(beagleboard.irc_addr, AMDM37x_UART3_IRQ);189 omap_uart_input_wire(&beagleboard.uart, srln); 190 omap_irc_enable(beagleboard.irc_addr, AMDM37x_UART3_IRQ); 185 191 } 192 #endif 186 193 } 187 194 -
kernel/arch/arm32/src/mach/beaglebone/beaglebone.c
r3deb0155 rdd0c8a0 63 63 64 64 static struct beaglebone { 65 am335x_irc_regs_t *irc_addr;65 omap_irc_regs_t *irc_addr; 66 66 am335x_cm_per_regs_t *cm_per_addr; 67 67 am335x_cm_dpll_regs_t *cm_dpll_addr; 68 68 am335x_ctrl_module_t *ctrl_module; 69 69 am335x_timer_t timer; 70 am335x_uart_t uart;70 omap_uart_t uart; 71 71 } bbone; 72 72 … … 104 104 105 105 /* Initialize the interrupt controller */ 106 am335x_irc_init(bbone.irc_addr);106 omap_irc_init(bbone.irc_addr); 107 107 } 108 108 … … 153 153 } 154 154 /* Enable the interrupt */ 155 am335x_irc_enable(bbone.irc_addr, AM335x_DMTIMER2_IRQ);155 omap_irc_enable(bbone.irc_addr, AM335x_DMTIMER2_IRQ); 156 156 /* Start the timer */ 157 157 am335x_timer_start(&bbone.timer); … … 176 176 static void bbone_irq_exception(unsigned int exc_no, istate_t *istate) 177 177 { 178 const unsigned inum = am335x_irc_inum_get(bbone.irc_addr); 179 am335x_irc_irq_ack(bbone.irc_addr); 178 const unsigned inum = omap_irc_inum_get(bbone.irc_addr); 180 179 181 180 irq_t *irq = irq_dispatch_and_lock(inum); … … 187 186 printf("Spurious interrupt\n"); 188 187 } 188 189 omap_irc_irq_ack(bbone.irc_addr); 189 190 } 190 191 … … 195 196 static void bbone_output_init(void) 196 197 { 197 const bool ok = am335x_uart_init(&bbone.uart, 198 #ifdef CONFIG_OMAP_UART 199 const bool ok = omap_uart_init(&bbone.uart, 198 200 AM335x_UART0_IRQ, AM335x_UART0_BASE_ADDRESS, 199 201 AM335x_UART0_SIZE); … … 201 203 if (ok) 202 204 stdout_wire(&bbone.uart.outdev); 205 #endif 203 206 } 204 207 205 208 static void bbone_input_init(void) 206 209 { 210 #ifdef CONFIG_OMAP_UART 207 211 srln_instance_t *srln_instance = srln_init(); 208 212 if (srln_instance) { 209 213 indev_t *sink = stdin_wire(); 210 214 indev_t *srln = srln_wire(srln_instance, sink); 211 am335x_uart_input_wire(&bbone.uart, srln);212 am335x_irc_enable(bbone.irc_addr, AM335x_UART0_IRQ);215 omap_uart_input_wire(&bbone.uart, srln); 216 omap_irc_enable(bbone.irc_addr, AM335x_UART0_IRQ); 213 217 } 218 #endif 214 219 } 215 220 -
kernel/arch/arm32/src/mm/frame.c
r3deb0155 rdd0c8a0 88 88 void boot_page_table_free(void) 89 89 { 90 unsigned int i; 91 for (i = 0; i < BOOT_PAGE_TABLE_SIZE_IN_FRAMES; i++) 92 frame_free(i * FRAME_SIZE + BOOT_PAGE_TABLE_ADDRESS); 90 frame_free(BOOT_PAGE_TABLE_ADDRESS, 91 BOOT_PAGE_TABLE_SIZE_IN_FRAMES); 93 92 } 94 93 -
kernel/arch/arm32/src/mm/page.c
r3deb0155 rdd0c8a0 73 73 #ifdef HIGH_EXCEPTION_VECTORS 74 74 /* Create mapping for exception table at high offset */ 75 uintptr_t ev_frame = (uintptr_t) frame_alloc(ONE_FRAME, FRAME_NONE);75 uintptr_t ev_frame = frame_alloc(1, FRAME_NONE, 0); 76 76 page_mapping_insert(AS_KERNEL, EXC_BASE_ADDRESS, ev_frame, flags); 77 77 #else -
kernel/arch/arm32/src/mm/tlb.c
r3deb0155 rdd0c8a0 37 37 #include <arch/mm/asid.h> 38 38 #include <arch/asm.h> 39 #include <arch/cp15.h> 39 40 #include <typedefs.h> 40 41 #include <arch/mm/page.h> 42 #include <arch/cache.h> 41 43 42 44 /** Invalidate all entries in TLB. … … 46 48 void tlb_invalidate_all(void) 47 49 { 48 asm volatile ( 49 "eor r1, r1\n" 50 "mcr p15, 0, r1, c8, c7, 0\n" 51 ::: "r1" 52 ); 50 TLBIALL_write(0); 51 /* 52 * "A TLB maintenance operation is only guaranteed to be complete after 53 * the execution of a DSB instruction." 54 * "An ISB instruction, or a return from an exception, causes the 55 * effect of all completed TLB maintenance operations that appear in 56 * program order before the ISB or return from exception to be visible 57 * to all subsequent instructions, including the instruction fetches 58 * for those instructions." 59 * ARM Architecture reference Manual ch. B3.10.1 p. B3-1374 B3-1375 60 */ 61 read_barrier(); 62 inst_barrier(); 53 63 } 54 64 … … 60 70 { 61 71 tlb_invalidate_all(); 72 // TODO: why not TLBIASID_write(asid) ? 62 73 } 63 74 … … 65 76 * 66 77 * @param page Virtual adress of the page 67 */ 78 */ 68 79 static inline void invalidate_page(uintptr_t page) 69 80 { 70 asm volatile ( 71 "mcr p15, 0, %[page], c8, c7, 1\n" 72 :: [page] "r" (page) 73 ); 81 //TODO: What about TLBIMVAA? 82 TLBIMVA_write(page); 83 /* 84 * "A TLB maintenance operation is only guaranteed to be complete after 85 * the execution of a DSB instruction." 86 * "An ISB instruction, or a return from an exception, causes the 87 * effect of all completed TLB maintenance operations that appear in 88 * program order before the ISB or return from exception to be visible 89 * to all subsequent instructions, including the instruction fetches 90 * for those instructions." 91 * ARM Architecture reference Manual ch. B3.10.1 p. B3-1374 B3-1375 92 */ 93 read_barrier(); 94 inst_barrier(); 74 95 } 75 96 … … 83 104 void tlb_invalidate_pages(asid_t asid __attribute__((unused)), uintptr_t page, size_t cnt) 84 105 { 85 unsigned int i; 86 87 for (i = 0; i < cnt; i++) 106 for (unsigned i = 0; i < cnt; i++) 88 107 invalidate_page(page + i * PAGE_SIZE); 89 108 } -
kernel/arch/arm32/src/ras.c
r3deb0155 rdd0c8a0 51 51 void ras_init(void) 52 52 { 53 uintptr_t frame; 54 55 frame = (uintptr_t) frame_alloc(ONE_FRAME, 56 FRAME_ATOMIC | FRAME_HIGHMEM); 53 uintptr_t frame = 54 frame_alloc(1, FRAME_ATOMIC | FRAME_HIGHMEM, 0); 57 55 if (!frame) 58 frame = (uintptr_t) frame_alloc(ONE_FRAME, FRAME_LOWMEM); 56 frame = frame_alloc(1, FRAME_LOWMEM, 0); 57 59 58 ras_page = (uintptr_t *) km_map(frame, 60 59 PAGE_SIZE, PAGE_READ | PAGE_WRITE | PAGE_USER | PAGE_CACHEABLE); 61 60 62 61 memsetb(ras_page, PAGE_SIZE, 0); 63 62 ras_page[RAS_START] = 0; -
kernel/arch/ia32/Makefile.inc
r3deb0155 rdd0c8a0 30 30 BFD_ARCH = i386 31 31 BFD = binary 32 CLANG_ARCH = i38633 32 34 33 BITS = 32 -
kernel/arch/ia32/include/arch/cpu.h
r3deb0155 rdd0c8a0 41 41 #define EFLAGS_RF (1 << 16) 42 42 43 #define CR4_OSFXSR_MASK (1<<9) 43 #define CR4_OSFXSR_MASK (1 << 9) 44 #define CR4_OSXMMEXCPT_MASK (1 << 10) 44 45 45 46 /* Support for SYSENTER and SYSEXIT */ … … 59 60 unsigned int model; 60 61 unsigned int stepping; 61 cpuid_feature_info fi;62 62 cpuid_feature_info_t fi; 63 63 64 tss_t *tss; 64 65 -
kernel/arch/ia32/include/arch/cpuid.h
r3deb0155 rdd0c8a0 50 50 uint32_t cpuid_ecx; 51 51 uint32_t cpuid_edx; 52 } __attribute__ 52 } __attribute__((packed)) cpu_info_t; 53 53 54 struct __cpuid_extended_feature_info {55 unsigned sse3 :1;56 unsigned 57 } __attribute__ 54 struct cpuid_extended_feature_info { 55 unsigned int sse3 : 1; 56 unsigned int : 31; 57 } __attribute__((packed)); 58 58 59 typedef union cpuid_extended_feature_info{60 struct __cpuid_extended_feature_info bits;59 typedef union { 60 struct cpuid_extended_feature_info bits; 61 61 uint32_t word; 62 } cpuid_extended_feature_info ;62 } cpuid_extended_feature_info_t; 63 63 64 struct __cpuid_feature_info {65 unsigned 66 unsigned sep :1;67 unsigned 68 unsigned mmx :1;69 unsigned fxsr :1;70 unsigned sse :1;71 unsigned sse2 :1;72 unsigned :5;73 } __attribute__ 64 struct cpuid_feature_info { 65 unsigned int : 11; 66 unsigned int sep : 1; 67 unsigned int : 11; 68 unsigned int mmx : 1; 69 unsigned int fxsr : 1; 70 unsigned int sse : 1; 71 unsigned int sse2 : 1; 72 unsigned int : 5; 73 } __attribute__((packed)); 74 74 75 typedef union cpuid_feature_info{76 struct __cpuid_feature_info bits;75 typedef union { 76 struct cpuid_feature_info bits; 77 77 uint32_t word; 78 } cpuid_feature_info; 79 78 } cpuid_feature_info_t; 80 79 81 80 static inline uint32_t has_cpuid(void) 82 81 { 83 uint32_t val, ret; 82 uint32_t val; 83 uint32_t ret; 84 84 85 85 asm volatile ( -
kernel/arch/ia32/include/arch/mm/frame.h
r3deb0155 rdd0c8a0 39 39 #define FRAME_SIZE (1 << FRAME_WIDTH) 40 40 41 #define FRAME_LOWPRIO 0x1000 42 41 43 #ifndef __ASM__ 42 44 -
kernel/arch/ia32/include/arch/mm/page.h
r3deb0155 rdd0c8a0 66 66 67 67 /* Page table sizes for each level. */ 68 #define PTL0_ SIZE_ARCH ONE_FRAME69 #define PTL1_ SIZE_ARCH 070 #define PTL2_ SIZE_ARCH 071 #define PTL3_ SIZE_ARCH ONE_FRAME68 #define PTL0_FRAMES_ARCH 1 69 #define PTL1_FRAMES_ARCH 1 70 #define PTL2_FRAMES_ARCH 1 71 #define PTL3_FRAMES_ARCH 1 72 72 73 73 /* Macros calculating indices for each level. */ -
kernel/arch/ia32/include/arch/pm.h
r3deb0155 rdd0c8a0 50 50 51 51 #define VESA_INIT_SEGMENT 0x8000 52 #define VESA_INIT_DES 7 52 #define VESA_INIT_CODE_DES 7 53 #define VESA_INIT_DATA_DES 8 53 54 #define KTEXT32_DES KTEXT_DES 54 55 55 56 #undef GDT_ITEMS 56 #define GDT_ITEMS 857 #define GDT_ITEMS 9 57 58 58 59 #endif /* CONFIG_FB */ … … 67 68 #define AR_CODE (3 << 3) 68 69 #define AR_WRITABLE (1 << 1) 70 #define AR_READABLE (1 << 1) 69 71 #define AR_INTERRUPT (0xe) 70 72 #define AR_TRAP (0xf) -
kernel/arch/ia32/src/boot/vesa_real.inc
r3deb0155 rdd0c8a0 31 31 vesa_init: 32 32 lidtl vesa_idtr 33 jmp $GDT_SELECTOR(VESA_INIT_DES), $vesa_init_real - vesa_init 33 34 mov $GDT_SELECTOR(VESA_INIT_DATA_DES), %bx 35 36 mov %bx, %es 37 mov %bx, %fs 38 mov %bx, %gs 39 mov %bx, %ds 40 mov %bx, %ss 41 42 jmp $GDT_SELECTOR(VESA_INIT_CODE_DES), $vesa_init_real - vesa_init 34 43 35 44 vesa_idtr: … … 39 48 .code16 40 49 vesa_init_real: 41 42 50 mov %cr0, %eax 43 51 and $~1, %eax … … 45 53 46 54 jmp $VESA_INIT_SEGMENT, $vesa_init_real2 - vesa_init 47 55 48 56 vesa_init_real2: 49 57 mov $VESA_INIT_SEGMENT, %bx -
kernel/arch/ia32/src/cpu/cpu.c
r3deb0155 rdd0c8a0 115 115 "mov %[help], %%cr4\n" 116 116 : [help] "+r" (help) 117 : [mask] "i" (CR4_OSFXSR_MASK | (1 << 10))117 : [mask] "i" (CR4_OSFXSR_MASK | CR4_OSXMMEXCPT_MASK) 118 118 ); 119 119 } 120 120 121 121 #ifndef PROCESSOR_i486 122 122 if (CPU->arch.fi.bits.sep) { -
kernel/arch/ia32/src/ddi/ddi.c
r3deb0155 rdd0c8a0 59 59 int ddi_iospace_enable_arch(task_t *task, uintptr_t ioaddr, size_t size) 60 60 { 61 size_t bits = ioaddr + size;62 if ( bits > IO_PORTS)61 size_t elements = ioaddr + size; 62 if (elements > IO_PORTS) 63 63 return ENOENT; 64 64 65 if (task->arch.iomap. bits < bits) {65 if (task->arch.iomap.elements < elements) { 66 66 /* 67 67 * The I/O permission bitmap is too small and needs to be grown. 68 68 */ 69 69 70 uint8_t *newmap = (uint8_t *) malloc(BITS2BYTES(bits), FRAME_ATOMIC);71 if (! newmap)70 void *store = malloc(bitmap_size(elements), FRAME_ATOMIC); 71 if (!store) 72 72 return ENOMEM; 73 73 74 74 bitmap_t oldiomap; 75 bitmap_initialize(&oldiomap, task->arch.iomap. map,75 bitmap_initialize(&oldiomap, task->arch.iomap.elements, 76 76 task->arch.iomap.bits); 77 bitmap_initialize(&task->arch.iomap, newmap, bits); 77 78 bitmap_initialize(&task->arch.iomap, elements, store); 78 79 79 80 /* 80 81 * Mark the new range inaccessible. 81 82 */ 82 bitmap_set_range(&task->arch.iomap, oldiomap. bits,83 bits - oldiomap.bits);83 bitmap_set_range(&task->arch.iomap, oldiomap.elements, 84 elements - oldiomap.elements); 84 85 85 86 /* … … 89 90 if (oldiomap.bits) { 90 91 bitmap_copy(&task->arch.iomap, &oldiomap, 91 oldiomap.bits); 92 free(oldiomap.map); 92 oldiomap.elements); 93 94 free(oldiomap.bits); 93 95 } 94 96 } … … 97 99 * Enable the range and we are done. 98 100 */ 99 bitmap_clear_range(&task->arch.iomap, (size_t) ioaddr, (size_t)size);101 bitmap_clear_range(&task->arch.iomap, (size_t) ioaddr, size); 100 102 101 103 /* … … 119 121 /* First, copy the I/O Permission Bitmap. */ 120 122 irq_spinlock_lock(&TASK->lock, false); 123 121 124 size_t ver = TASK->arch.iomapver; 122 size_t bits = TASK->arch.iomap.bits; 123 if (bits) { 124 ASSERT(TASK->arch.iomap.map); 125 size_t elements = TASK->arch.iomap.elements; 126 127 if (elements > 0) { 128 ASSERT(TASK->arch.iomap.bits); 125 129 126 130 bitmap_t iomap; 127 bitmap_initialize(&iomap, CPU->arch.tss->iomap,128 TSS_IOMAP_SIZE * 8);129 bitmap_copy(&iomap, &TASK->arch.iomap, bits);131 bitmap_initialize(&iomap, TSS_IOMAP_SIZE * 8, 132 CPU->arch.tss->iomap); 133 bitmap_copy(&iomap, &TASK->arch.iomap, elements); 130 134 131 135 /* … … 133 137 * I/O access. 134 138 */ 135 bitmap_set_range(&iomap, bits, ALIGN_UP(bits, 8) - bits); 139 bitmap_set_range(&iomap, elements, 140 ALIGN_UP(elements, 8) - elements); 141 136 142 /* 137 143 * It is safe to set the trailing eight bits because of the 138 144 * extra convenience byte in TSS_IOMAP_SIZE. 139 145 */ 140 bitmap_set_range(&iomap, ALIGN_UP( bits, 8), 8);146 bitmap_set_range(&iomap, ALIGN_UP(elements, 8), 8); 141 147 } 148 142 149 irq_spinlock_unlock(&TASK->lock, false); 143 150 … … 150 157 151 158 descriptor_t *gdt_p = (descriptor_t *) cpugdtr.base; 152 gdt_setlimit(&gdt_p[TSS_DES], TSS_BASIC_SIZE + BITS2BYTES(bits)); 159 size_t size = bitmap_size(elements); 160 gdt_setlimit(&gdt_p[TSS_DES], TSS_BASIC_SIZE + size); 153 161 gdtr_load(&cpugdtr); 154 162 -
kernel/arch/ia32/src/fpu_context.c
r3deb0155 rdd0c8a0 27 27 */ 28 28 29 /** @addtogroup ia32 29 /** @addtogroup ia32 30 30 * @{ 31 31 */ … … 37 37 #include <arch.h> 38 38 #include <cpu.h> 39 40 39 41 40 /** x87 FPU scr values (P3+ MMX2) */ … … 60 59 X87_DENORMAL_EXC_FLAG = (1 << 1), 61 60 X87_INVALID_OP_EXC_FLAG = (1 << 0), 62 61 63 62 X87_ALL_MASK = X87_PRECISION_MASK | X87_UNDERFLOW_MASK | X87_OVERFLOW_MASK | X87_ZERO_DIV_MASK | X87_DENORMAL_OP_MASK | X87_INVALID_OP_MASK, 64 63 }; 65 64 66 67 65 typedef void (*fpu_context_function)(fpu_context_t *fctx); 68 66 69 static fpu_context_function fpu_save, fpu_restore; 67 static fpu_context_function fpu_save; 68 static fpu_context_function fpu_restore; 70 69 71 70 static void fpu_context_f_save(fpu_context_t *fctx) … … 104 103 void fpu_fxsr(void) 105 104 { 106 fpu_save =fpu_context_fx_save;107 fpu_restore =fpu_context_fx_restore;105 fpu_save = fpu_context_fx_save; 106 fpu_restore = fpu_context_fx_restore; 108 107 } 109 108 -
kernel/arch/ia32/src/pm.c
r3deb0155 rdd0c8a0 75 75 /* VESA Init descriptor */ 76 76 #ifdef CONFIG_FB 77 { 0xffff, 0, VESA_INIT_SEGMENT >> 12, AR_PRESENT | AR_CODE | DPL_KERNEL, 0xf, 0, 0, 0, 0, 0 } 77 { 0xffff, 0, VESA_INIT_SEGMENT >> 12, AR_PRESENT | AR_CODE | AR_READABLE | DPL_KERNEL, 0xf, 0, 0, 0, 0, 0 }, 78 { 0xffff, 0, VESA_INIT_SEGMENT >> 12, AR_PRESENT | AR_DATA | AR_WRITABLE | DPL_KERNEL, 0xf, 0, 0, 0, 0, 0 } 78 79 #endif 79 80 }; -
kernel/arch/ia32/src/proc/task.c
r3deb0155 rdd0c8a0 40 40 /** Perform ia32 specific task initialization. 41 41 * 42 * @param t Task to be initialized. 42 * @param task Task to be initialized. 43 * 43 44 */ 44 void task_create_arch(task_t *t )45 void task_create_arch(task_t *task) 45 46 { 46 t ->arch.iomapver = 0;47 bitmap_initialize(&t ->arch.iomap, NULL, 0);47 task->arch.iomapver = 0; 48 bitmap_initialize(&task->arch.iomap, 0, NULL); 48 49 } 49 50 50 51 /** Perform ia32 specific task destruction. 51 52 * 52 * @param t Task to be initialized. 53 * @param task Task to be initialized. 54 * 53 55 */ 54 void task_destroy_arch(task_t *t )56 void task_destroy_arch(task_t *task) 55 57 { 56 if (t ->arch.iomap.map)57 free(t ->arch.iomap.map);58 if (task->arch.iomap.bits != NULL) 59 free(task->arch.iomap.bits); 58 60 } 59 61 -
kernel/arch/ia64/include/arch/mm/frame.h
r3deb0155 rdd0c8a0 39 39 #define FRAME_SIZE (1 << FRAME_WIDTH) 40 40 41 #define FRAME_LOWPRIO 0 42 41 43 #ifndef __ASM__ 42 44 -
kernel/arch/ia64/src/ddi/ddi.c
r3deb0155 rdd0c8a0 1 1 /* 2 2 * Copyright (c) 2006 Jakub Jermar 3 * Copyright (c) 2008 Jakub vana3 * Copyright (c) 2008 Jakub Vana 4 4 * All rights reserved. 5 5 * … … 56 56 { 57 57 if (!task->arch.iomap) { 58 uint8_t *map;59 60 58 task->arch.iomap = malloc(sizeof(bitmap_t), 0); 61 map = malloc(BITS2BYTES(IO_MEMMAP_PAGES), 0); 62 if(!map) 59 if (task->arch.iomap == NULL) 63 60 return ENOMEM; 64 bitmap_initialize(task->arch.iomap, map, IO_MEMMAP_PAGES); 61 62 void *store = malloc(bitmap_size(IO_MEMMAP_PAGES), 0); 63 if (store == NULL) 64 return ENOMEM; 65 66 bitmap_initialize(task->arch.iomap, IO_MEMMAP_PAGES, store); 65 67 bitmap_clear_range(task->arch.iomap, 0, IO_MEMMAP_PAGES); 66 68 } … … 69 71 size = ALIGN_UP(size + ioaddr - 4 * iopage, PORTS_PER_PAGE); 70 72 bitmap_set_range(task->arch.iomap, iopage, size / 4); 71 73 72 74 return 0; 73 75 } -
kernel/arch/ia64/src/mm/vhpt.c
r3deb0155 rdd0c8a0 42 42 uintptr_t vhpt_set_up(void) 43 43 { 44 vhpt_base = frame_alloc(VHPT_WIDTH - FRAME_WIDTH,45 FRAME_KA | FRAME_ATOMIC);46 if (!vhpt_ base)44 uintptr_t vhpt_frame = 45 frame_alloc(SIZE2FRAMES(VHPT_SIZE), FRAME_ATOMIC, 0); 46 if (!vhpt_frame) 47 47 panic("Kernel configured with VHPT but no memory for table."); 48 49 vhpt_base = (vhpt_entry_t *) PA2KA(vhpt_frame); 48 50 vhpt_invalidate_all(); 49 51 return (uintptr_t) vhpt_base; … … 82 84 void vhpt_invalidate_all() 83 85 { 84 memsetb(vhpt_base, 1 << VHPT_WIDTH, 0);86 memsetb(vhpt_base, VHPT_SIZE, 0); 85 87 } 86 88 -
kernel/arch/mips32/include/arch/asm.h
r3deb0155 rdd0c8a0 42 42 NO_TRACE static inline void cpu_sleep(void) 43 43 { 44 /* 45 * Unfortunatelly most of the simulators do not support 46 * 47 * asm volatile ( 48 * "wait" 49 * ); 50 * 51 */ 44 asm volatile ("wait"); 52 45 } 53 46 -
kernel/arch/mips32/include/arch/mm/frame.h
r3deb0155 rdd0c8a0 39 39 #define FRAME_SIZE (1 << FRAME_WIDTH) 40 40 41 #define FRAME_LOWPRIO 0 42 41 43 #ifndef __ASM__ 42 44 -
kernel/arch/mips32/include/arch/mm/page.h
r3deb0155 rdd0c8a0 27 27 */ 28 28 29 /** @addtogroup mips32mm 29 /** @addtogroup mips32mm 30 30 * @{ 31 31 */ … … 70 70 * - PTL3 has 4096 entries (12 bits) 71 71 */ 72 72 73 73 /* Macros describing number of entries in each level. */ 74 #define PTL0_ENTRIES_ARCH 75 #define PTL1_ENTRIES_ARCH 76 #define PTL2_ENTRIES_ARCH 77 #define PTL3_ENTRIES_ARCH 74 #define PTL0_ENTRIES_ARCH 64 75 #define PTL1_ENTRIES_ARCH 0 76 #define PTL2_ENTRIES_ARCH 0 77 #define PTL3_ENTRIES_ARCH 4096 78 78 79 79 /* Macros describing size of page tables in each level. */ 80 #define PTL0_ SIZE_ARCH ONE_FRAME81 #define PTL1_ SIZE_ARCH 082 #define PTL2_ SIZE_ARCH 083 #define PTL3_ SIZE_ARCH ONE_FRAME80 #define PTL0_FRAMES_ARCH 1 81 #define PTL1_FRAMES_ARCH 1 82 #define PTL2_FRAMES_ARCH 1 83 #define PTL3_FRAMES_ARCH 1 84 84 85 85 /* Macros calculating entry indices for each level. */ 86 #define PTL0_INDEX_ARCH(vaddr) ((vaddr) >> 26)87 #define PTL1_INDEX_ARCH(vaddr) 88 #define PTL2_INDEX_ARCH(vaddr) 89 #define PTL3_INDEX_ARCH(vaddr) 86 #define PTL0_INDEX_ARCH(vaddr) ((vaddr) >> 26) 87 #define PTL1_INDEX_ARCH(vaddr) 0 88 #define PTL2_INDEX_ARCH(vaddr) 0 89 #define PTL3_INDEX_ARCH(vaddr) (((vaddr) >> 14) & 0xfff) 90 90 91 91 /* Set accessor for PTL0 address. */ 92 92 #define SET_PTL0_ADDRESS_ARCH(ptl0) 93 93 94 /* Get PTE address accessors for each level. */ 94 /* Get PTE address accessors for each level. */ 95 95 #define GET_PTL1_ADDRESS_ARCH(ptl0, i) \ 96 96 (((pte_t *) (ptl0))[(i)].pfn << 12) … … 196 196 p->p = 1; 197 197 } 198 199 198 200 199 extern void page_arch_init(void); -
kernel/arch/mips32/src/mach/malta/malta.c
r3deb0155 rdd0c8a0 103 103 void malta_input_init(void) 104 104 { 105 (void) stdin_wire(); 105 106 } 106 107 -
kernel/arch/mips32/src/mm/tlb.c
r3deb0155 rdd0c8a0 48 48 #include <symtab.h> 49 49 50 #define PFN_SHIFT 12 51 #define VPN_SHIFT 12 52 #define ADDR2VPN(a) ((a) >> VPN_SHIFT) 53 #define ADDR2VPN2(a) (ADDR2VPN((a)) >> 1) 54 #define VPN2ADDR(vpn) ((vpn) << VPN_SHIFT) 55 #define VPN22ADDR(vpn2) (VPN2ADDR(vpn2) << 1) 56 #define PFN2ADDR(pfn) ((pfn) << PFN_SHIFT) 57 58 #define BANK_SELECT_BIT(a) (((a) >> PAGE_WIDTH) & 1) 59 50 #define PFN_SHIFT 12 51 #define VPN_SHIFT 12 52 53 #define ADDR2HI_VPN(a) ((a) >> VPN_SHIFT) 54 #define ADDR2HI_VPN2(a) (ADDR2HI_VPN((a)) >> 1) 55 56 #define HI_VPN2ADDR(vpn) ((vpn) << VPN_SHIFT) 57 #define HI_VPN22ADDR(vpn2) (HI_VPN2ADDR(vpn2) << 1) 58 59 #define LO_PFN2ADDR(pfn) ((pfn) << PFN_SHIFT) 60 61 #define BANK_SELECT_BIT(a) (((a) >> PAGE_WIDTH) & 1) 60 62 61 63 /** Initialize TLB. … … 266 268 { 267 269 hi->value = 0; 268 hi->vpn2 = ADDR2 VPN2(ALIGN_DOWN(addr, PAGE_SIZE));270 hi->vpn2 = ADDR2HI_VPN2(ALIGN_DOWN(addr, PAGE_SIZE)); 269 271 hi->asid = asid; 270 272 } … … 295 297 296 298 printf("%-4u %-6u %0#10x %-#6x %1u%1u%1u%1u %0#10x\n", 297 i, hi.asid, VPN22ADDR(hi.vpn2), mask.mask,298 lo0.g, lo0.v, lo0.d, lo0.c, PFN2ADDR(lo0.pfn));299 i, hi.asid, HI_VPN22ADDR(hi.vpn2), mask.mask, 300 lo0.g, lo0.v, lo0.d, lo0.c, LO_PFN2ADDR(lo0.pfn)); 299 301 printf(" %1u%1u%1u%1u %0#10x\n", 300 lo1.g, lo1.v, lo1.d, lo1.c, PFN2ADDR(lo1.pfn));302 lo1.g, lo1.v, lo1.d, lo1.c, LO_PFN2ADDR(lo1.pfn)); 301 303 } 302 304 -
kernel/arch/mips64/include/arch/mm/frame.h
r3deb0155 rdd0c8a0 39 39 #define FRAME_SIZE (1 << FRAME_WIDTH) 40 40 41 #define FRAME_LOWPRIO 0 42 41 43 #ifndef __ASM__ 42 44 -
kernel/arch/ppc32/include/arch/mm/frame.h
r3deb0155 rdd0c8a0 39 39 #define FRAME_SIZE (1 << FRAME_WIDTH) 40 40 41 #define FRAME_LOWPRIO 0 42 41 43 #ifndef __ASM__ 42 44 -
kernel/arch/ppc32/include/arch/mm/page.h
r3deb0155 rdd0c8a0 70 70 71 71 /* Page table sizes for each level. */ 72 #define PTL0_ SIZE_ARCH ONE_FRAME73 #define PTL1_ SIZE_ARCH 074 #define PTL2_ SIZE_ARCH 075 #define PTL3_ SIZE_ARCH ONE_FRAME72 #define PTL0_FRAMES_ARCH 1 73 #define PTL1_FRAMES_ARCH 1 74 #define PTL2_FRAMES_ARCH 1 75 #define PTL3_FRAMES_ARCH 1 76 76 77 77 /* Macros calculating indices into page tables on each level. */ -
kernel/arch/sparc64/include/arch/mm/frame.h
r3deb0155 rdd0c8a0 46 46 #endif 47 47 48 #ifndef __ASM__ 49 50 #include <typedefs.h> 51 52 extern uintptr_t end_of_identity; 53 54 extern void frame_low_arch_init(void); 55 extern void frame_high_arch_init(void); 56 #define physmem_print() 57 58 #endif 59 48 60 #endif 49 61 -
kernel/arch/sparc64/include/arch/mm/sun4u/frame.h
r3deb0155 rdd0c8a0 27 27 */ 28 28 29 /** @addtogroup sparc64mm 29 /** @addtogroup sparc64mm 30 30 * @{ 31 31 */ … … 41 41 * Therefore, the kernel uses 8K only internally on the TLB and TSB levels. 42 42 */ 43 #define MMU_FRAME_WIDTH 13/* 8K */44 #define MMU_FRAME_SIZE 43 #define MMU_FRAME_WIDTH 13 /* 8K */ 44 #define MMU_FRAME_SIZE (1 << MMU_FRAME_WIDTH) 45 45 46 46 /* … … 49 49 * each 16K page with a pair of adjacent 8K pages. 50 50 */ 51 #define FRAME_WIDTH 14 /* 16K */ 52 #define FRAME_SIZE (1 << FRAME_WIDTH) 51 #define FRAME_WIDTH 14 /* 16K */ 52 #define FRAME_SIZE (1 << FRAME_WIDTH) 53 54 #define FRAME_LOWPRIO 0 53 55 54 56 #ifndef __ASM__ … … 72 74 typedef union frame_address frame_address_t; 73 75 74 extern uintptr_t end_of_identity;75 76 extern void frame_low_arch_init(void);77 extern void frame_high_arch_init(void);78 #define physmem_print()79 80 76 #endif 81 77 -
kernel/arch/sparc64/include/arch/mm/sun4v/frame.h
r3deb0155 rdd0c8a0 27 27 */ 28 28 29 /** @addtogroup sparc64mm 29 /** @addtogroup sparc64mm 30 30 * @{ 31 31 */ … … 36 36 #define KERN_sparc64_sun4v_FRAME_H_ 37 37 38 #define MMU_FRAME_WIDTH 13/* 8K */39 #define MMU_FRAME_SIZE 38 #define MMU_FRAME_WIDTH 13 /* 8K */ 39 #define MMU_FRAME_SIZE (1 << MMU_FRAME_WIDTH) 40 40 41 #define FRAME_WIDTH 42 #define FRAME_SIZE 41 #define FRAME_WIDTH 13 42 #define FRAME_SIZE (1 << FRAME_WIDTH) 43 43 44 #ifndef __ASM__ 45 46 #include <typedefs.h> 47 48 extern void frame_low_arch_init(void); 49 extern void frame_high_arch_init(void); 50 #define physmem_print() 51 52 #endif 44 #define FRAME_LOWPRIO 0 53 45 54 46 #endif -
kernel/arch/sparc64/include/arch/trap/sun4v/mmu.h
r3deb0155 rdd0c8a0 102 102 nop 103 103 104 /* exclude pages beyond the end of memory from the identity mapping */ 105 sethi %hi(end_of_identity), %g4 106 ldx [%g4 + %lo(end_of_identity)], %g4 107 cmp %g1, %g4 108 bgeu %xcc, 0f 109 nop 110 104 111 /* 105 112 * Installing the identity does not fit into 32 instructions, call -
kernel/arch/sparc64/src/mm/sun4u/as.c
r3deb0155 rdd0c8a0 63 63 { 64 64 #ifdef CONFIG_TSB 65 /* 66 * The order must be calculated with respect to the emulated 67 * 16K page size. 68 * 69 */ 70 uint8_t order = fnzb32(((ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) * 71 sizeof(tsb_entry_t)) >> FRAME_WIDTH); 72 73 uintptr_t tsb = (uintptr_t) frame_alloc(order, flags | FRAME_KA); 74 75 if (!tsb) 65 uintptr_t tsb_phys = 66 frame_alloc(SIZE2FRAMES((ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) * 67 sizeof(tsb_entry_t)), flags, 0); 68 if (!tsb_phys) 76 69 return -1; 77 70 78 as->arch.itsb = (tsb_entry_t *) tsb; 79 as->arch.dtsb = (tsb_entry_t *) (tsb + ITSB_ENTRY_COUNT * 71 tsb_entry_t *tsb = (tsb_entry_t *) PA2KA(tsb_phys); 72 73 as->arch.itsb = tsb; 74 as->arch.dtsb = tsb + ITSB_ENTRY_COUNT; 75 76 memsetb(as->arch.itsb, (ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) * 77 sizeof(tsb_entry_t), 0); 78 #endif 79 80 return 0; 81 } 82 83 int as_destructor_arch(as_t *as) 84 { 85 #ifdef CONFIG_TSB 86 size_t frames = SIZE2FRAMES((ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) * 80 87 sizeof(tsb_entry_t)); 81 82 memsetb(as->arch.itsb, 83 (ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) * sizeof(tsb_entry_t), 0); 84 #endif 85 86 return 0; 87 } 88 89 int as_destructor_arch(as_t *as) 90 { 91 #ifdef CONFIG_TSB 92 /* 93 * The count must be calculated with respect to the emualted 16K page 94 * size. 95 */ 96 size_t cnt = ((ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) * 97 sizeof(tsb_entry_t)) >> FRAME_WIDTH; 98 frame_free(KA2PA((uintptr_t) as->arch.itsb)); 99 100 return cnt; 88 frame_free(KA2PA((uintptr_t) as->arch.itsb), frames); 89 90 return frames; 101 91 #else 102 92 return 0; -
kernel/arch/sparc64/src/mm/sun4v/as.c
r3deb0155 rdd0c8a0 66 66 { 67 67 #ifdef CONFIG_TSB 68 uint8_t order = fnzb32( 69 (TSB_ENTRY_COUNT * sizeof(tsb_entry_t)) >> FRAME_WIDTH); 70 71 uintptr_t tsb = (uintptr_t) frame_alloc(order, flags); 72 68 uintptr_t tsb = 69 frame_alloc(SIZE2FRAMES(TSB_ENTRY_COUNT * sizeof(tsb_entry_t)), 70 flags, 0); 73 71 if (!tsb) 74 72 return -1; … … 92 90 { 93 91 #ifdef CONFIG_TSB 94 size_t cnt = (TSB_ENTRY_COUNT * sizeof(tsb_entry_t)) >> FRAME_WIDTH;95 frame_free( (uintptr_t) as->arch.tsb_description.tsb_base);92 size_t frames = SIZE2FRAMES(TSB_ENTRY_COUNT * sizeof(tsb_entry_t)); 93 frame_free(as->arch.tsb_description.tsb_base, frames); 96 94 97 return cnt;95 return frames; 98 96 #else 99 97 return 0; -
kernel/arch/sparc64/src/mm/sun4v/frame.c
r3deb0155 rdd0c8a0 101 101 */ 102 102 frame_mark_unavailable(ADDR2PFN(KA2PA(PFN2ADDR(0))), 1); 103 104 /* PA2KA will work only on low-memory. */ 105 end_of_identity = PA2KA(config.physmem_end - FRAME_SIZE) + PAGE_SIZE; 103 106 } 104 107 -
kernel/arch/sparc64/src/mm/sun4v/tlb.c
r3deb0155 rdd0c8a0 251 251 uintptr_t va = DMISS_ADDRESS(page_and_ctx); 252 252 uint16_t ctx = DMISS_CONTEXT(page_and_ctx); 253 as_t *as = AS; 253 254 254 255 if (ctx == ASID_KERNEL) { … … 256 257 /* NULL access in kernel */ 257 258 panic("NULL pointer dereference."); 259 } else if (va >= end_of_identity) { 260 /* Kernel non-identity */ 261 as = AS_KERNEL; 262 } else { 263 panic("Unexpected kernel page fault."); 258 264 } 259 panic("Unexpected kernel page fault."); 260 } 261 262 t = page_mapping_find(AS, va, true); 265 } 266 267 t = page_mapping_find(as, va, true); 263 268 if (t) { 264 269 /* … … 295 300 uintptr_t va = DMISS_ADDRESS(page_and_ctx); 296 301 uint16_t ctx = DMISS_CONTEXT(page_and_ctx); 297 298 t = page_mapping_find(AS, va, true); 302 as_t *as = AS; 303 304 if (ctx == ASID_KERNEL) 305 as = AS_KERNEL; 306 307 t = page_mapping_find(as, va, true); 299 308 if (t && PTE_WRITABLE(t)) { 300 309 /* -
kernel/arch/sparc64/src/sun4v/start.S
r3deb0155 rdd0c8a0 345 345 .quad 0 346 346 347 /* 348 * This variable is used by the fast_data_access_MMU_miss trap handler. 349 * In runtime, it is modified to contain the address of the end of physical 350 * memory. 351 */ 352 .global end_of_identity 353 end_of_identity: 354 .quad -1 355 347 356 .global kernel_8k_tlb_data_template 348 357 kernel_8k_tlb_data_template:
Note:
See TracChangeset
for help on using the changeset viewer.