Changeset ca62f86 in mainline for kernel/arch
- Timestamp:
- 2013-09-09T17:52:40Z (12 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- f7bb6d1
- Parents:
- 6ad185d (diff), a1ecb88 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Location:
- kernel/arch
- Files:
-
- 21 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/arch/amd64/src/ddi/ddi.c
r6ad185d rca62f86 42 42 #include <errno.h> 43 43 #include <arch/cpu.h> 44 #include <cpu.h> 44 45 #include <arch.h> 45 46 #include <align.h> … … 58 59 int ddi_iospace_enable_arch(task_t *task, uintptr_t ioaddr, size_t size) 59 60 { 60 size_t bits = ioaddr + size;61 if ( bits > IO_PORTS)61 size_t elements = ioaddr + size; 62 if (elements > IO_PORTS) 62 63 return ENOENT; 63 64 64 if (task->arch.iomap. bits < bits) {65 if (task->arch.iomap.elements < elements) { 65 66 /* 66 67 * The I/O permission bitmap is too small and needs to be grown. 67 68 */ 68 69 69 uint8_t *newmap = (uint8_t *) malloc(BITS2BYTES(bits), FRAME_ATOMIC);70 if (! newmap)70 void *store = malloc(bitmap_size(elements, 0), FRAME_ATOMIC); 71 if (!store) 71 72 return ENOMEM; 72 73 73 74 bitmap_t oldiomap; 74 bitmap_initialize(&oldiomap, task->arch.iomap. map,75 bitmap_initialize(&oldiomap, task->arch.iomap.elements, 0, 75 76 task->arch.iomap.bits); 76 bitmap_initialize(&task->arch.iomap, newmap, bits); 77 78 bitmap_initialize(&task->arch.iomap, elements, 0, store); 77 79 78 80 /* 79 81 * Mark the new range inaccessible. 80 82 */ 81 bitmap_set_range(&task->arch.iomap, oldiomap. bits,82 bits - oldiomap.bits);83 bitmap_set_range(&task->arch.iomap, oldiomap.elements, 84 elements - oldiomap.elements); 83 85 84 86 /* … … 88 90 if (oldiomap.bits) { 89 91 bitmap_copy(&task->arch.iomap, &oldiomap, 90 oldiomap.bits); 91 free(oldiomap.map); 92 oldiomap.elements); 93 94 free(oldiomap.bits); 92 95 } 93 96 } … … 96 99 * Enable the range and we are done. 97 100 */ 98 bitmap_clear_range(&task->arch.iomap, (size_t) ioaddr, (size_t)size);101 bitmap_clear_range(&task->arch.iomap, (size_t) ioaddr, size); 99 102 100 103 /* … … 118 121 /* First, copy the I/O Permission Bitmap. */ 119 122 irq_spinlock_lock(&TASK->lock, false); 123 120 124 size_t ver = TASK->arch.iomapver; 121 size_t bits = TASK->arch.iomap.bits; 122 if (bits) { 123 ASSERT(TASK->arch.iomap.map); 125 size_t elements = TASK->arch.iomap.elements; 126 127 if (elements > 0) { 128 ASSERT(TASK->arch.iomap.bits); 124 129 125 130 bitmap_t iomap; 126 bitmap_initialize(&iomap, CPU->arch.tss->iomap,127 TSS_IOMAP_SIZE * 8);128 bitmap_copy(&iomap, &TASK->arch.iomap, bits);131 bitmap_initialize(&iomap, TSS_IOMAP_SIZE * 8, 0, 132 CPU->arch.tss->iomap); 133 bitmap_copy(&iomap, &TASK->arch.iomap, elements); 129 134 130 135 /* … … 132 137 * I/O access. 133 138 */ 134 bitmap_set_range(&iomap, bits, ALIGN_UP(bits, 8) - bits); 139 bitmap_set_range(&iomap, elements, 140 ALIGN_UP(elements, 8) - elements); 141 135 142 /* 136 143 * It is safe to set the trailing eight bits because of the 137 144 * extra convenience byte in TSS_IOMAP_SIZE. 138 145 */ 139 bitmap_set_range(&iomap, ALIGN_UP( bits, 8), 8);146 bitmap_set_range(&iomap, ALIGN_UP(elements, 8), 8); 140 147 } 148 141 149 irq_spinlock_unlock(&TASK->lock, false); 142 150 143 151 /* 144 152 * Second, adjust TSS segment limit. 145 * Take the extra ending byte wi ll all bits set into account.153 * Take the extra ending byte with all bits set into account. 146 154 */ 147 155 ptr_16_64_t cpugdtr; … … 149 157 150 158 descriptor_t *gdt_p = (descriptor_t *) cpugdtr.base; 151 gdt_tss_setlimit(&gdt_p[TSS_DES], TSS_BASIC_SIZE + BITS2BYTES(bits)); 159 size_t size = bitmap_size(elements, 0); 160 gdt_tss_setlimit(&gdt_p[TSS_DES], TSS_BASIC_SIZE + size); 152 161 gdtr_load(&cpugdtr); 153 162 -
kernel/arch/amd64/src/proc/task.c
r6ad185d rca62f86 34 34 35 35 #include <proc/task.h> 36 #include <typedefs.h> 37 #include <adt/bitmap.h> 36 38 #include <mm/slab.h> 37 #include <typedefs.h>38 39 39 40 /** Perform amd64 specific task initialization. … … 45 46 { 46 47 task->arch.iomapver = 0; 47 bitmap_initialize(&task->arch.iomap, NULL, 0);48 bitmap_initialize(&task->arch.iomap, 0, 0, NULL); 48 49 } 49 50 … … 55 56 void task_destroy_arch(task_t *task) 56 57 { 57 if (task->arch.iomap. map)58 free(task->arch.iomap. map);58 if (task->arch.iomap.bits != NULL) 59 free(task->arch.iomap.bits); 59 60 } 60 61 -
kernel/arch/arm32/include/arch/asm.h
r6ad185d rca62f86 38 38 39 39 #include <typedefs.h> 40 #include <arch/cp15.h> 40 41 #include <arch/stack.h> 41 42 #include <config.h> … … 51 52 * chapter 2.3.8 p.2-22 (52 in the PDF) 52 53 * 53 * @note Although mcr p15, 0, R0, c7, c0, 4 is defined in ARM Architecture54 * reference manual for armv4/5 CP15 implementation is mandatory only for55 * armv6+.54 * @note Although CP15WFI (mcr p15, 0, R0, c7, c0, 4) is defined in ARM 55 * Architecture reference manual for armv4/5, CP15 implementation is mandatory 56 * only for armv6+. 56 57 */ 57 58 NO_TRACE static inline void cpu_sleep(void) … … 60 61 asm volatile ( "wfe" ); 61 62 #elif defined(PROCESSOR_ARCH_armv6) | defined(PROCESSOR_arm926ej_s) | defined(PROCESSOR_arm920t) 62 asm volatile ( "mcr p15, 0, R0, c7, c0, 4");63 WFI_write(0); 63 64 #endif 64 65 } -
kernel/arch/arm32/include/arch/cp15.h
r6ad185d rca62f86 171 171 CCSIDR_LINESIZE_MASK = 0x7, 172 172 CCSIDR_LINESIZE_SHIFT = 0, 173 #define CCSIDR_SETS(val) \ 174 (((val >> CCSIDR_NUMSETS_SHIFT) & CCSIDR_NUMSETS_MASK) + 1) 175 #define CCSIDR_WAYS(val) \ 176 (((val >> CCSIDR_ASSOC_SHIFT) & CCSIDR_ASSOC_MASK) + 1) 177 /* The register value is log(linesize_in_words) - 2 */ 178 #define CCSIDR_LINESIZE_LOG(val) \ 179 (((val >> CCSIDR_LINESIZE_SHIFT) & CCSIDR_LINESIZE_MASK) + 2 + 2) 173 180 }; 174 181 CONTROL_REG_GEN_READ(CCSIDR, c0, 1, c0, 0); … … 187 194 CLIDR_UNI_CACHE = 0x4, 188 195 CLIDR_CACHE_MASK = 0x7, 189 #define CLIDR_CACHE(level, val) ((val >> (level - 1) * 3) & CLIDR_CACHE_MASK) 196 /** levels counted from 0 */ 197 #define CLIDR_CACHE(level, val) ((val >> (level * 3)) & CLIDR_CACHE_MASK) 190 198 }; 191 199 CONTROL_REG_GEN_READ(CLIDR, c0, 1, c0, 1); … … 293 301 294 302 /* Memory protection and control registers */ 303 enum { 304 TTBR_ADDR_MASK = 0xffffff80, 305 TTBR_NOS_FLAG = 1 << 5, 306 TTBR_RGN_MASK = 0x3 << 3, 307 TTBR_RGN_NO_CACHE = 0x0 << 3, 308 TTBR_RGN_WBWA_CACHE = 0x1 << 3, 309 TTBR_RGN_WT_CACHE = 0x2 << 3, 310 TTBR_RGN_WB_CACHE = 0x3 << 3, 311 TTBR_S_FLAG = 1 << 1, 312 TTBR_C_FLAG = 1 << 0, 313 }; 295 314 CONTROL_REG_GEN_READ(TTBR0, c2, 0, c0, 0); 296 315 CONTROL_REG_GEN_WRITE(TTBR0, c2, 0, c0, 0); … … 363 382 364 383 CONTROL_REG_GEN_WRITE(DCIMVAC, c7, 0, c6, 1); 365 CONTROL_REG_GEN_WRITE(DCI MSW, c7, 0, c6, 2);384 CONTROL_REG_GEN_WRITE(DCISW, c7, 0, c6, 2); 366 385 367 386 CONTROL_REG_GEN_WRITE(ATS1CPR, c7, 0, c8, 0); … … 369 388 CONTROL_REG_GEN_WRITE(ATS1CUR, c7, 0, c8, 2); 370 389 CONTROL_REG_GEN_WRITE(ATS1CUW, c7, 0, c8, 3); 371 CONTROL_REG_GEN_WRITE(ATS1 NSOPR, c7, 0, c8, 4);372 CONTROL_REG_GEN_WRITE(ATS1 NSOPW, c7, 0, c8, 5);373 CONTROL_REG_GEN_WRITE(ATS1 NSOUR, c7, 0, c8, 6);374 CONTROL_REG_GEN_WRITE(ATS1 NSOUW, c7, 0, c8, 7);390 CONTROL_REG_GEN_WRITE(ATS12NSOPR, c7, 0, c8, 4); 391 CONTROL_REG_GEN_WRITE(ATS12NSOPW, c7, 0, c8, 5); 392 CONTROL_REG_GEN_WRITE(ATS12NSOUR, c7, 0, c8, 6); 393 CONTROL_REG_GEN_WRITE(ATS12NSOUW, c7, 0, c8, 7); 375 394 376 395 -
kernel/arch/arm32/include/arch/mm/page.h
r6ad185d rca62f86 41 41 #include <arch/exception.h> 42 42 #include <arch/barrier.h> 43 #include <arch/cp15.h> 43 44 #include <trace.h> 44 45 … … 95 96 /* Set PTE address accessors for each level. */ 96 97 #define SET_PTL0_ADDRESS_ARCH(ptl0) \ 97 (set_ptl0_addr((pte_t *) (ptl0)))98 set_ptl0_addr((pte_t *) (ptl0)) 98 99 #define SET_PTL1_ADDRESS_ARCH(ptl0, i, a) \ 99 (((pte_t *) (ptl0))[(i)].l0.coarse_table_addr = (a) >> 10)100 set_ptl1_addr((pte_t*) (ptl0), i, a) 100 101 #define SET_PTL2_ADDRESS_ARCH(ptl1, i, a) 101 102 #define SET_PTL3_ADDRESS_ARCH(ptl2, i, a) 102 103 #define SET_FRAME_ADDRESS_ARCH(ptl3, i, a) \ 103 (((pte_t *) (ptl3))[(i)].l1.frame_base_addr = (a) >> 12)104 set_ptl3_addr((pte_t*) (ptl3), i, a) 104 105 105 106 /* Get PTE flags accessors for each level. */ … … 129 130 set_pt_level1_present((pte_t *) (ptl3), (size_t) (i)) 130 131 132 133 #define pt_coherence(page) pt_coherence_m(page, 1) 134 131 135 #if defined(PROCESSOR_ARCH_armv6) | defined(PROCESSOR_ARCH_armv7_a) 132 136 #include "page_armv6.h" … … 137 141 #endif 138 142 143 /** Sets the address of level 0 page table. 144 * 145 * @param pt Pointer to the page table to set. 146 * 147 * Page tables are always in cacheable memory. 148 * Make sure the memory type is correct, and in sync with: 149 * init_boot_pt (boot/arch/arm32/src/mm.c) 150 * init_ptl0_section (boot/arch/arm32/src/mm.c) 151 * set_pt_level1_flags (kernel/arch/arm32/include/arch/mm/page_armv6.h) 152 */ 153 NO_TRACE static inline void set_ptl0_addr(pte_t *pt) 154 { 155 uint32_t val = (uint32_t)pt & TTBR_ADDR_MASK; 156 val |= TTBR_RGN_WBWA_CACHE | TTBR_C_FLAG; 157 TTBR0_write(val); 158 } 159 160 NO_TRACE static inline void set_ptl1_addr(pte_t *pt, size_t i, uintptr_t address) 161 { 162 pt[i].l0.coarse_table_addr = address >> 10; 163 pt_coherence(&pt[i].l0); 164 } 165 166 NO_TRACE static inline void set_ptl3_addr(pte_t *pt, size_t i, uintptr_t address) 167 { 168 pt[i].l1.frame_base_addr = address >> 12; 169 pt_coherence(&pt[i].l1); 170 } 171 139 172 #endif 140 173 -
kernel/arch/arm32/include/arch/mm/page_armv4.h
r6ad185d rca62f86 120 120 #define PTE_DESCRIPTOR_SMALL_PAGE 2 121 121 122 123 /** Sets the address of level 0 page table. 124 * 125 * @param pt Pointer to the page table to set. 126 * 127 */ 128 NO_TRACE static inline void set_ptl0_addr(pte_t *pt) 129 { 130 asm volatile ( 131 "mcr p15, 0, %[pt], c2, c0, 0\n" 132 :: [pt] "r" (pt) 133 ); 134 } 135 122 #define pt_coherence_m(pt, count) \ 123 do { \ 124 for (unsigned i = 0; i < count; ++i) \ 125 DCCMVAU_write((uintptr_t)(pt + i)); \ 126 read_barrier(); \ 127 } while (0) 136 128 137 129 /** Returns level 0 page table entry flags. … … 223 215 224 216 /* default access permission */ 225 p->access_permission_0 = p->access_permission_1 = 217 p->access_permission_0 = p->access_permission_1 = 226 218 p->access_permission_2 = p->access_permission_3 = 227 219 PTE_AP_USER_NO_KERNEL_RW; … … 229 221 if (flags & PAGE_USER) { 230 222 if (flags & PAGE_READ) { 231 p->access_permission_0 = p->access_permission_1 = 232 p->access_permission_2 = p->access_permission_3 = 223 p->access_permission_0 = p->access_permission_1 = 224 p->access_permission_2 = p->access_permission_3 = 233 225 PTE_AP_USER_RO_KERNEL_RW; 234 226 } 235 227 if (flags & PAGE_WRITE) { 236 p->access_permission_0 = p->access_permission_1 = 237 p->access_permission_2 = p->access_permission_3 = 238 PTE_AP_USER_RW_KERNEL_RW; 228 p->access_permission_0 = p->access_permission_1 = 229 p->access_permission_2 = p->access_permission_3 = 230 PTE_AP_USER_RW_KERNEL_RW; 239 231 } 240 232 } -
kernel/arch/arm32/include/arch/mm/page_armv6.h
r6ad185d rca62f86 40 40 #error "Do not include arch specific page.h directly use generic page.h instead" 41 41 #endif 42 42 43 43 44 /* Macros for querying the last-level PTE entries. */ … … 125 126 #define PTE_DESCRIPTOR_SMALL_PAGE_NX 3 126 127 127 /** Sets the address of level 0 page table. 128 * 129 * @param pt Pointer to the page table to set. 130 * 131 */ 132 NO_TRACE static inline void set_ptl0_addr(pte_t *pt) 133 { 134 asm volatile ( 135 "mcr p15, 0, %[pt], c2, c0, 0\n" 136 :: [pt] "r" (pt) 137 ); 138 } 128 129 /** 130 * For an ARMv7 implementation that does not include the Large Physical Address Extension, 131 * and in implementations of architecture versions before ARMv7, if the translation tables 132 * are held in Write-Back Cacheable memory, the caches must be cleaned to the point of 133 * unification after writing to the translation tables and before the DSB instruction. This 134 * ensures that the updated translation table are visible to a hardware translation table walk. 135 * 136 * Therefore, an example instruction sequence for writing a translation table entry, 137 * covering changes to the instruction 138 * or data mappings in a uniprocessor system is: 139 * STR rx, [Translation table entry] 140 * ; write new entry to the translation table 141 * Clean cache line [Translation table entry] : This operation is not required with the 142 * ; Multiprocessing Extensions. 143 * DSB 144 * ; ensures visibility of the data cleaned from the D Cache 145 * Invalidate TLB entry by MVA (and ASID if non-global) [page address] 146 * Invalidate BTC 147 * DSB 148 * ; ensure completion of the Invalidate TLB operation 149 * ISB 150 * ; ensure table changes visible to instruction fetch 151 * 152 * ARM Architecture reference chp. B3.10.1 p. B3-1375 153 * @note: see TTRB0/1 for pt memory type 154 */ 155 #define pt_coherence_m(pt, count) \ 156 do { \ 157 for (unsigned i = 0; i < count; ++i) \ 158 DCCMVAU_write((uintptr_t)(pt + i)); \ 159 read_barrier(); \ 160 } while (0) 139 161 140 162 … … 206 228 p->ns = 0; 207 229 } 230 pt_coherence(p); 208 231 } 209 232 … … 232 255 p->descriptor_type = PTE_DESCRIPTOR_SMALL_PAGE_NX; 233 256 } 234 235 /* tex=0 buf=1 and cache=1 => normal memory 236 * tex=0 buf=1 and cache=0 => shareable device mmio 237 */ 238 p->cacheable = (flags & PAGE_CACHEABLE); 239 p->bufferable = 1; 240 p->tex = 0; 257 258 if (flags & PAGE_CACHEABLE) { 259 /* 260 * Write-through, no write-allocate memory, see ch. B3.8.2 261 * (p. B3-1358) of ARM Architecture reference manual. 262 * Make sure the memory type is correct, and in sync with: 263 * init_boot_pt (boot/arch/arm32/src/mm.c) 264 * init_ptl0_section (boot/arch/arm32/src/mm.c) 265 * set_ptl0_addr (kernel/arch/arm32/include/arch/mm/page.h) 266 */ 267 p->tex = 5; 268 p->cacheable = 0; 269 p->bufferable = 1; 270 } else { 271 /* 272 * Shareable device memory, see ch. B3.8.2 (p. B3-1358) of 273 * ARM Architecture reference manual. 274 */ 275 p->tex = 0; 276 p->cacheable = 0; 277 p->bufferable = 1; 278 } 241 279 242 280 /* Shareable is ignored for devices (non-cacheable), 243 * turn it o nfor normal memory. */244 p->shareable = 1;281 * turn it off for normal memory. */ 282 p->shareable = 0; 245 283 246 284 p->non_global = !(flags & PAGE_GLOBAL); … … 256 294 p->access_permission_1 = PTE_AP1_RO; 257 295 } 296 pt_coherence(p); 258 297 } 259 298 … … 264 303 p->should_be_zero_0 = 0; 265 304 p->should_be_zero_1 = 0; 266 write_barrier();267 305 p->descriptor_type = PTE_DESCRIPTOR_COARSE_TABLE; 306 pt_coherence(p); 268 307 } 269 308 … … 273 312 274 313 p->descriptor_type = PTE_DESCRIPTOR_SMALL_PAGE; 314 pt_coherence(p); 275 315 } 276 316 -
kernel/arch/arm32/src/cpu/cpu.c
r6ad185d rca62f86 157 157 #endif 158 158 #ifdef PROCESSOR_ARCH_armv7_a 159 /* ICache coherency is elaborate on in barrier.h.159 /* ICache coherency is elaborated on in barrier.h. 160 160 * VIPT and PIPT caches need maintenance only on code modify, 161 161 * so it should be safe for general use. … … 166 166 control_reg |= 167 167 SCTLR_INST_CACHE_EN_FLAG | SCTLR_BRANCH_PREDICT_EN_FLAG; 168 } else { 169 control_reg &= 170 ~(SCTLR_INST_CACHE_EN_FLAG | SCTLR_BRANCH_PREDICT_EN_FLAG); 168 171 } 169 172 #endif … … 204 207 #ifdef PROCESSOR_ARCH_armv7_a 205 208 CSSELR_write((level & CCSELR_LEVEL_MASK) << CCSELR_LEVEL_SHIFT); 206 const unsigned ls_log = 2 + 207 ((CCSIDR_read() >> CCSIDR_LINESIZE_SHIFT) & CCSIDR_LINESIZE_MASK); 208 return ls_log + 2; //return log2(bytes) 209 const uint32_t ccsidr = CCSIDR_read(); 210 return CCSIDR_LINESIZE_LOG(ccsidr); 209 211 #endif 210 212 return 0; … … 217 219 #ifdef PROCESSOR_ARCH_armv7_a 218 220 CSSELR_write((level & CCSELR_LEVEL_MASK) << CCSELR_LEVEL_SHIFT); 219 const unsigned ways = 1 + 220 ((CCSIDR_read() >> CCSIDR_ASSOC_SHIFT) & CCSIDR_ASSOC_MASK); 221 return ways; 221 const uint32_t ccsidr = CCSIDR_read(); 222 return CCSIDR_WAYS(ccsidr); 222 223 #endif 223 224 return 0; … … 229 230 #ifdef PROCESSOR_ARCH_armv7_a 230 231 CSSELR_write((level & CCSELR_LEVEL_MASK) << CCSELR_LEVEL_SHIFT); 231 const unsigned sets = 1 + 232 ((CCSIDR_read() >> CCSIDR_NUMSETS_SHIFT) & CCSIDR_NUMSETS_MASK); 233 return sets; 232 const uint32_t ccsidr = CCSIDR_read(); 233 return CCSIDR_SETS(ccsidr); 234 234 #endif 235 235 return 0; … … 241 241 #ifdef PROCESSOR_ARCH_armv7_a 242 242 const uint32_t val = CLIDR_read(); 243 for (unsigned i = 1; i <= 7; ++i) {243 for (unsigned i = 0; i < 8; ++i) { 244 244 const unsigned ctype = CLIDR_CACHE(i, val); 245 245 switch (ctype) { … … 280 280 const unsigned ways = dcache_ways(i); 281 281 const unsigned sets = dcache_sets(i); 282 const unsigned way_shift = 31- log2(ways);282 const unsigned way_shift = 32 - log2(ways); 283 283 const unsigned set_shift = dcache_linesize_log(i); 284 284 dcache_clean_manual(i, false, ways, sets, way_shift, set_shift); … … 293 293 const unsigned ways = dcache_ways(i); 294 294 const unsigned sets = dcache_sets(i); 295 const unsigned way_shift = 31- log2(ways);295 const unsigned way_shift = 32 - log2(ways); 296 296 const unsigned set_shift = dcache_linesize_log(i); 297 297 dcache_clean_manual(i, true, ways, sets, way_shift, set_shift); -
kernel/arch/arm32/src/mach/beagleboardxm/beagleboardxm.c
r6ad185d rca62f86 85 85 static void bb_timer_irq_handler(irq_t *irq) 86 86 { 87 amdm37x_gpt_irq_ack(&beagleboard.timer); 88 87 89 /* 88 90 * We are holding a lock which prevents preemption. 89 91 * Release the lock, call clock() and reacquire the lock again. 90 92 */ 91 amdm37x_gpt_irq_ack(&beagleboard.timer);92 93 spinlock_unlock(&irq->lock); 93 94 clock(); … … 147 148 { 148 149 const unsigned inum = amdm37x_irc_inum_get(beagleboard.irc_addr); 149 amdm37x_irc_irq_ack(beagleboard.irc_addr);150 150 151 151 irq_t *irq = irq_dispatch_and_lock(inum); … … 159 159 CPU->id, inum); 160 160 } 161 /** amdm37x manual ch. 12.5.2 (p. 2428) places irc ack at the end 162 * of ISR. DO this to avoid strange behavior. */ 163 amdm37x_irc_irq_ack(beagleboard.irc_addr); 161 164 } 162 165 -
kernel/arch/arm32/src/mm/tlb.c
r6ad185d rca62f86 37 37 #include <arch/mm/asid.h> 38 38 #include <arch/asm.h> 39 #include <arch/cp15.h> 39 40 #include <typedefs.h> 40 41 #include <arch/mm/page.h> 42 #include <arch/cache.h> 41 43 42 44 /** Invalidate all entries in TLB. … … 46 48 void tlb_invalidate_all(void) 47 49 { 48 asm volatile ( 49 "eor r1, r1\n" 50 "mcr p15, 0, r1, c8, c7, 0\n" 51 ::: "r1" 52 ); 50 TLBIALL_write(0); 51 /* 52 * "A TLB maintenance operation is only guaranteed to be complete after 53 * the execution of a DSB instruction." 54 * "An ISB instruction, or a return from an exception, causes the 55 * effect of all completed TLB maintenance operations that appear in 56 * program order before the ISB or return from exception to be visible 57 * to all subsequent instructions, including the instruction fetches 58 * for those instructions." 59 * ARM Architecture reference Manual ch. B3.10.1 p. B3-1374 B3-1375 60 */ 61 read_barrier(); 62 inst_barrier(); 53 63 } 54 64 … … 60 70 { 61 71 tlb_invalidate_all(); 72 // TODO: why not TLBIASID_write(asid) ? 62 73 } 63 74 … … 65 76 * 66 77 * @param page Virtual adress of the page 67 */ 78 */ 68 79 static inline void invalidate_page(uintptr_t page) 69 80 { 70 asm volatile ( 71 "mcr p15, 0, %[page], c8, c7, 1\n" 72 :: [page] "r" (page) 73 ); 81 //TODO: What about TLBIMVAA? 82 TLBIMVA_write(page); 83 /* 84 * "A TLB maintenance operation is only guaranteed to be complete after 85 * the execution of a DSB instruction." 86 * "An ISB instruction, or a return from an exception, causes the 87 * effect of all completed TLB maintenance operations that appear in 88 * program order before the ISB or return from exception to be visible 89 * to all subsequent instructions, including the instruction fetches 90 * for those instructions." 91 * ARM Architecture reference Manual ch. B3.10.1 p. B3-1374 B3-1375 92 */ 93 read_barrier(); 94 inst_barrier(); 74 95 } 75 96 … … 83 104 void tlb_invalidate_pages(asid_t asid __attribute__((unused)), uintptr_t page, size_t cnt) 84 105 { 85 unsigned int i; 86 87 for (i = 0; i < cnt; i++) 106 for (unsigned i = 0; i < cnt; i++) 88 107 invalidate_page(page + i * PAGE_SIZE); 89 108 } -
kernel/arch/ia32/src/ddi/ddi.c
r6ad185d rca62f86 59 59 int ddi_iospace_enable_arch(task_t *task, uintptr_t ioaddr, size_t size) 60 60 { 61 size_t bits = ioaddr + size;62 if ( bits > IO_PORTS)61 size_t elements = ioaddr + size; 62 if (elements > IO_PORTS) 63 63 return ENOENT; 64 64 65 if (task->arch.iomap. bits < bits) {65 if (task->arch.iomap.elements < elements) { 66 66 /* 67 67 * The I/O permission bitmap is too small and needs to be grown. 68 68 */ 69 69 70 uint8_t *newmap = (uint8_t *) malloc(BITS2BYTES(bits), FRAME_ATOMIC);71 if (! newmap)70 void *store = malloc(bitmap_size(elements, 0), FRAME_ATOMIC); 71 if (!store) 72 72 return ENOMEM; 73 73 74 74 bitmap_t oldiomap; 75 bitmap_initialize(&oldiomap, task->arch.iomap. map,75 bitmap_initialize(&oldiomap, task->arch.iomap.elements, 0, 76 76 task->arch.iomap.bits); 77 bitmap_initialize(&task->arch.iomap, newmap, bits); 77 78 bitmap_initialize(&task->arch.iomap, elements, 0, store); 78 79 79 80 /* 80 81 * Mark the new range inaccessible. 81 82 */ 82 bitmap_set_range(&task->arch.iomap, oldiomap. bits,83 bits - oldiomap.bits);83 bitmap_set_range(&task->arch.iomap, oldiomap.elements, 84 elements - oldiomap.elements); 84 85 85 86 /* … … 89 90 if (oldiomap.bits) { 90 91 bitmap_copy(&task->arch.iomap, &oldiomap, 91 oldiomap.bits); 92 free(oldiomap.map); 92 oldiomap.elements); 93 94 free(oldiomap.bits); 93 95 } 94 96 } … … 97 99 * Enable the range and we are done. 98 100 */ 99 bitmap_clear_range(&task->arch.iomap, (size_t) ioaddr, (size_t)size);101 bitmap_clear_range(&task->arch.iomap, (size_t) ioaddr, size); 100 102 101 103 /* … … 119 121 /* First, copy the I/O Permission Bitmap. */ 120 122 irq_spinlock_lock(&TASK->lock, false); 123 121 124 size_t ver = TASK->arch.iomapver; 122 size_t bits = TASK->arch.iomap.bits; 123 if (bits) { 124 ASSERT(TASK->arch.iomap.map); 125 size_t elements = TASK->arch.iomap.elements; 126 127 if (elements > 0) { 128 ASSERT(TASK->arch.iomap.bits); 125 129 126 130 bitmap_t iomap; 127 bitmap_initialize(&iomap, CPU->arch.tss->iomap,128 TSS_IOMAP_SIZE * 8);129 bitmap_copy(&iomap, &TASK->arch.iomap, bits);131 bitmap_initialize(&iomap, TSS_IOMAP_SIZE * 8, 0, 132 CPU->arch.tss->iomap); 133 bitmap_copy(&iomap, &TASK->arch.iomap, elements); 130 134 131 135 /* … … 133 137 * I/O access. 134 138 */ 135 bitmap_set_range(&iomap, bits, ALIGN_UP(bits, 8) - bits); 139 bitmap_set_range(&iomap, elements, 140 ALIGN_UP(elements, 8) - elements); 141 136 142 /* 137 143 * It is safe to set the trailing eight bits because of the 138 144 * extra convenience byte in TSS_IOMAP_SIZE. 139 145 */ 140 bitmap_set_range(&iomap, ALIGN_UP( bits, 8), 8);146 bitmap_set_range(&iomap, ALIGN_UP(elements, 8), 8); 141 147 } 148 142 149 irq_spinlock_unlock(&TASK->lock, false); 143 150 … … 150 157 151 158 descriptor_t *gdt_p = (descriptor_t *) cpugdtr.base; 152 gdt_setlimit(&gdt_p[TSS_DES], TSS_BASIC_SIZE + BITS2BYTES(bits)); 159 size_t size = bitmap_size(elements, 0); 160 gdt_setlimit(&gdt_p[TSS_DES], TSS_BASIC_SIZE + size); 153 161 gdtr_load(&cpugdtr); 154 162 -
kernel/arch/ia32/src/mm/frame.c
r6ad185d rca62f86 47 47 48 48 #define PHYSMEM_LIMIT32 UINT64_C(0x100000000) 49 #define PHYSMEM_LIMIT_DMA UINT64_C(0x1000000) 49 50 50 51 size_t hardcoded_unmapped_ktext_size = 0; … … 91 92 else 92 93 conf = minconf; 93 zone_create(pfn, count, conf, 94 ZONE_AVAILABLE | ZONE_LOWMEM); 94 95 if ((pfn * PAGE_SIZE) < PHYSMEM_LIMIT_DMA) { 96 size_t dma_count = min( 97 PHYSMEM_LIMIT_DMA / PAGE_SIZE - pfn, 98 count); 99 zone_create(pfn, dma_count, conf, 100 ZONE_AVAILABLE | ZONE_DMA); 101 count -= dma_count; 102 pfn += dma_count; 103 } 104 105 conf = pfn; 106 if (count) { 107 zone_create(pfn, count, conf, 108 ZONE_AVAILABLE | ZONE_LOWMEM); 109 } 95 110 } else { 96 111 conf = zone_external_conf_alloc(count); 97 if (conf != 0) 112 if (conf != 0) { 98 113 zone_create(pfn, count, conf, 99 114 ZONE_AVAILABLE | ZONE_HIGHMEM); 115 } 100 116 } 101 117 } else if ((e820table[i].type == MEMMAP_MEMORY_ACPI) || -
kernel/arch/ia32/src/proc/task.c
r6ad185d rca62f86 40 40 /** Perform ia32 specific task initialization. 41 41 * 42 * @param t Task to be initialized. 42 * @param task Task to be initialized. 43 * 43 44 */ 44 void task_create_arch(task_t *t )45 void task_create_arch(task_t *task) 45 46 { 46 t ->arch.iomapver = 0;47 bitmap_initialize(&t ->arch.iomap, NULL, 0);47 task->arch.iomapver = 0; 48 bitmap_initialize(&task->arch.iomap, 0, 0, NULL); 48 49 } 49 50 50 51 /** Perform ia32 specific task destruction. 51 52 * 52 * @param t Task to be initialized. 53 * @param task Task to be initialized. 54 * 53 55 */ 54 void task_destroy_arch(task_t *t )56 void task_destroy_arch(task_t *task) 55 57 { 56 if (t ->arch.iomap.map)57 free(t ->arch.iomap.map);58 if (task->arch.iomap.bits != NULL) 59 free(task->arch.iomap.bits); 58 60 } 59 61 -
kernel/arch/ia64/src/ddi/ddi.c
r6ad185d rca62f86 56 56 { 57 57 if (!task->arch.iomap) { 58 uint8_t *map;59 60 58 task->arch.iomap = malloc(sizeof(bitmap_t), 0); 61 map = malloc(BITS2BYTES(IO_MEMMAP_PAGES), 0); 62 if(!map) 59 if (task->arch.iomap == NULL) 63 60 return ENOMEM; 64 bitmap_initialize(task->arch.iomap, map, IO_MEMMAP_PAGES); 61 62 void *store = malloc(bitmap_size(IO_MEMMAP_PAGES, 0), 0); 63 if (store == NULL) 64 return ENOMEM; 65 66 bitmap_initialize(task->arch.iomap, IO_MEMMAP_PAGES, 0, store); 65 67 bitmap_clear_range(task->arch.iomap, 0, IO_MEMMAP_PAGES); 66 68 } … … 69 71 size = ALIGN_UP(size + ioaddr - 4 * iopage, PORTS_PER_PAGE); 70 72 bitmap_set_range(task->arch.iomap, iopage, size / 4); 71 73 72 74 return 0; 73 75 } -
kernel/arch/sparc64/include/arch/mm/frame.h
r6ad185d rca62f86 46 46 #endif 47 47 48 #ifndef __ASM__ 49 50 #include <typedefs.h> 51 52 extern uintptr_t end_of_identity; 53 54 extern void frame_low_arch_init(void); 55 extern void frame_high_arch_init(void); 56 #define physmem_print() 57 58 #endif 59 48 60 #endif 49 61 -
kernel/arch/sparc64/include/arch/mm/sun4u/frame.h
r6ad185d rca62f86 72 72 typedef union frame_address frame_address_t; 73 73 74 extern uintptr_t end_of_identity;75 76 extern void frame_low_arch_init(void);77 extern void frame_high_arch_init(void);78 #define physmem_print()79 80 74 #endif 81 75 -
kernel/arch/sparc64/include/arch/mm/sun4v/frame.h
r6ad185d rca62f86 42 42 #define FRAME_SIZE (1 << FRAME_WIDTH) 43 43 44 #ifndef __ASM__45 46 #include <typedefs.h>47 48 extern void frame_low_arch_init(void);49 extern void frame_high_arch_init(void);50 #define physmem_print()51 52 #endif53 54 44 #endif 55 45 -
kernel/arch/sparc64/include/arch/trap/sun4v/mmu.h
r6ad185d rca62f86 102 102 nop 103 103 104 /* exclude pages beyond the end of memory from the identity mapping */ 105 sethi %hi(end_of_identity), %g4 106 ldx [%g4 + %lo(end_of_identity)], %g4 107 cmp %g1, %g4 108 bgeu %xcc, 0f 109 nop 110 104 111 /* 105 112 * Installing the identity does not fit into 32 instructions, call -
kernel/arch/sparc64/src/mm/sun4v/frame.c
r6ad185d rca62f86 101 101 */ 102 102 frame_mark_unavailable(ADDR2PFN(KA2PA(PFN2ADDR(0))), 1); 103 104 /* PA2KA will work only on low-memory. */ 105 end_of_identity = PA2KA(config.physmem_end - FRAME_SIZE) + PAGE_SIZE; 103 106 } 104 107 -
kernel/arch/sparc64/src/mm/sun4v/tlb.c
r6ad185d rca62f86 251 251 uintptr_t va = DMISS_ADDRESS(page_and_ctx); 252 252 uint16_t ctx = DMISS_CONTEXT(page_and_ctx); 253 as_t *as = AS; 253 254 254 255 if (ctx == ASID_KERNEL) { … … 256 257 /* NULL access in kernel */ 257 258 panic("NULL pointer dereference."); 259 } else if (va >= end_of_identity) { 260 /* Kernel non-identity */ 261 as = AS_KERNEL; 262 } else { 263 panic("Unexpected kernel page fault."); 258 264 } 259 panic("Unexpected kernel page fault."); 260 } 261 262 t = page_mapping_find(AS, va, true); 265 } 266 267 t = page_mapping_find(as, va, true); 263 268 if (t) { 264 269 /* … … 295 300 uintptr_t va = DMISS_ADDRESS(page_and_ctx); 296 301 uint16_t ctx = DMISS_CONTEXT(page_and_ctx); 297 298 t = page_mapping_find(AS, va, true); 302 as_t *as = AS; 303 304 if (ctx == ASID_KERNEL) 305 as = AS_KERNEL; 306 307 t = page_mapping_find(as, va, true); 299 308 if (t && PTE_WRITABLE(t)) { 300 309 /* -
kernel/arch/sparc64/src/sun4v/start.S
r6ad185d rca62f86 345 345 .quad 0 346 346 347 /* 348 * This variable is used by the fast_data_access_MMU_miss trap handler. 349 * In runtime, it is modified to contain the address of the end of physical 350 * memory. 351 */ 352 .global end_of_identity 353 end_of_identity: 354 .quad -1 355 347 356 .global kernel_8k_tlb_data_template 348 357 kernel_8k_tlb_data_template:
Note:
See TracChangeset
for help on using the changeset viewer.