- Timestamp:
- 2013-09-09T17:52:40Z (12 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- f7bb6d1
- Parents:
- 6ad185d (diff), a1ecb88 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Location:
- kernel
- Files:
-
- 32 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/arch/amd64/src/ddi/ddi.c
r6ad185d rca62f86 42 42 #include <errno.h> 43 43 #include <arch/cpu.h> 44 #include <cpu.h> 44 45 #include <arch.h> 45 46 #include <align.h> … … 58 59 int ddi_iospace_enable_arch(task_t *task, uintptr_t ioaddr, size_t size) 59 60 { 60 size_t bits = ioaddr + size;61 if ( bits > IO_PORTS)61 size_t elements = ioaddr + size; 62 if (elements > IO_PORTS) 62 63 return ENOENT; 63 64 64 if (task->arch.iomap. bits < bits) {65 if (task->arch.iomap.elements < elements) { 65 66 /* 66 67 * The I/O permission bitmap is too small and needs to be grown. 67 68 */ 68 69 69 uint8_t *newmap = (uint8_t *) malloc(BITS2BYTES(bits), FRAME_ATOMIC);70 if (! newmap)70 void *store = malloc(bitmap_size(elements, 0), FRAME_ATOMIC); 71 if (!store) 71 72 return ENOMEM; 72 73 73 74 bitmap_t oldiomap; 74 bitmap_initialize(&oldiomap, task->arch.iomap. map,75 bitmap_initialize(&oldiomap, task->arch.iomap.elements, 0, 75 76 task->arch.iomap.bits); 76 bitmap_initialize(&task->arch.iomap, newmap, bits); 77 78 bitmap_initialize(&task->arch.iomap, elements, 0, store); 77 79 78 80 /* 79 81 * Mark the new range inaccessible. 80 82 */ 81 bitmap_set_range(&task->arch.iomap, oldiomap. bits,82 bits - oldiomap.bits);83 bitmap_set_range(&task->arch.iomap, oldiomap.elements, 84 elements - oldiomap.elements); 83 85 84 86 /* … … 88 90 if (oldiomap.bits) { 89 91 bitmap_copy(&task->arch.iomap, &oldiomap, 90 oldiomap.bits); 91 free(oldiomap.map); 92 oldiomap.elements); 93 94 free(oldiomap.bits); 92 95 } 93 96 } … … 96 99 * Enable the range and we are done. 97 100 */ 98 bitmap_clear_range(&task->arch.iomap, (size_t) ioaddr, (size_t)size);101 bitmap_clear_range(&task->arch.iomap, (size_t) ioaddr, size); 99 102 100 103 /* … … 118 121 /* First, copy the I/O Permission Bitmap. */ 119 122 irq_spinlock_lock(&TASK->lock, false); 123 120 124 size_t ver = TASK->arch.iomapver; 121 size_t bits = TASK->arch.iomap.bits; 122 if (bits) { 123 ASSERT(TASK->arch.iomap.map); 125 size_t elements = TASK->arch.iomap.elements; 126 127 if (elements > 0) { 128 ASSERT(TASK->arch.iomap.bits); 124 129 125 130 bitmap_t iomap; 126 bitmap_initialize(&iomap, CPU->arch.tss->iomap,127 TSS_IOMAP_SIZE * 8);128 bitmap_copy(&iomap, &TASK->arch.iomap, bits);131 bitmap_initialize(&iomap, TSS_IOMAP_SIZE * 8, 0, 132 CPU->arch.tss->iomap); 133 bitmap_copy(&iomap, &TASK->arch.iomap, elements); 129 134 130 135 /* … … 132 137 * I/O access. 133 138 */ 134 bitmap_set_range(&iomap, bits, ALIGN_UP(bits, 8) - bits); 139 bitmap_set_range(&iomap, elements, 140 ALIGN_UP(elements, 8) - elements); 141 135 142 /* 136 143 * It is safe to set the trailing eight bits because of the 137 144 * extra convenience byte in TSS_IOMAP_SIZE. 138 145 */ 139 bitmap_set_range(&iomap, ALIGN_UP( bits, 8), 8);146 bitmap_set_range(&iomap, ALIGN_UP(elements, 8), 8); 140 147 } 148 141 149 irq_spinlock_unlock(&TASK->lock, false); 142 150 143 151 /* 144 152 * Second, adjust TSS segment limit. 145 * Take the extra ending byte wi ll all bits set into account.153 * Take the extra ending byte with all bits set into account. 146 154 */ 147 155 ptr_16_64_t cpugdtr; … … 149 157 150 158 descriptor_t *gdt_p = (descriptor_t *) cpugdtr.base; 151 gdt_tss_setlimit(&gdt_p[TSS_DES], TSS_BASIC_SIZE + BITS2BYTES(bits)); 159 size_t size = bitmap_size(elements, 0); 160 gdt_tss_setlimit(&gdt_p[TSS_DES], TSS_BASIC_SIZE + size); 152 161 gdtr_load(&cpugdtr); 153 162 -
kernel/arch/amd64/src/proc/task.c
r6ad185d rca62f86 34 34 35 35 #include <proc/task.h> 36 #include <typedefs.h> 37 #include <adt/bitmap.h> 36 38 #include <mm/slab.h> 37 #include <typedefs.h>38 39 39 40 /** Perform amd64 specific task initialization. … … 45 46 { 46 47 task->arch.iomapver = 0; 47 bitmap_initialize(&task->arch.iomap, NULL, 0);48 bitmap_initialize(&task->arch.iomap, 0, 0, NULL); 48 49 } 49 50 … … 55 56 void task_destroy_arch(task_t *task) 56 57 { 57 if (task->arch.iomap. map)58 free(task->arch.iomap. map);58 if (task->arch.iomap.bits != NULL) 59 free(task->arch.iomap.bits); 59 60 } 60 61 -
kernel/arch/arm32/include/arch/asm.h
r6ad185d rca62f86 38 38 39 39 #include <typedefs.h> 40 #include <arch/cp15.h> 40 41 #include <arch/stack.h> 41 42 #include <config.h> … … 51 52 * chapter 2.3.8 p.2-22 (52 in the PDF) 52 53 * 53 * @note Although mcr p15, 0, R0, c7, c0, 4 is defined in ARM Architecture54 * reference manual for armv4/5 CP15 implementation is mandatory only for55 * armv6+.54 * @note Although CP15WFI (mcr p15, 0, R0, c7, c0, 4) is defined in ARM 55 * Architecture reference manual for armv4/5, CP15 implementation is mandatory 56 * only for armv6+. 56 57 */ 57 58 NO_TRACE static inline void cpu_sleep(void) … … 60 61 asm volatile ( "wfe" ); 61 62 #elif defined(PROCESSOR_ARCH_armv6) | defined(PROCESSOR_arm926ej_s) | defined(PROCESSOR_arm920t) 62 asm volatile ( "mcr p15, 0, R0, c7, c0, 4");63 WFI_write(0); 63 64 #endif 64 65 } -
kernel/arch/arm32/include/arch/cp15.h
r6ad185d rca62f86 171 171 CCSIDR_LINESIZE_MASK = 0x7, 172 172 CCSIDR_LINESIZE_SHIFT = 0, 173 #define CCSIDR_SETS(val) \ 174 (((val >> CCSIDR_NUMSETS_SHIFT) & CCSIDR_NUMSETS_MASK) + 1) 175 #define CCSIDR_WAYS(val) \ 176 (((val >> CCSIDR_ASSOC_SHIFT) & CCSIDR_ASSOC_MASK) + 1) 177 /* The register value is log(linesize_in_words) - 2 */ 178 #define CCSIDR_LINESIZE_LOG(val) \ 179 (((val >> CCSIDR_LINESIZE_SHIFT) & CCSIDR_LINESIZE_MASK) + 2 + 2) 173 180 }; 174 181 CONTROL_REG_GEN_READ(CCSIDR, c0, 1, c0, 0); … … 187 194 CLIDR_UNI_CACHE = 0x4, 188 195 CLIDR_CACHE_MASK = 0x7, 189 #define CLIDR_CACHE(level, val) ((val >> (level - 1) * 3) & CLIDR_CACHE_MASK) 196 /** levels counted from 0 */ 197 #define CLIDR_CACHE(level, val) ((val >> (level * 3)) & CLIDR_CACHE_MASK) 190 198 }; 191 199 CONTROL_REG_GEN_READ(CLIDR, c0, 1, c0, 1); … … 293 301 294 302 /* Memory protection and control registers */ 303 enum { 304 TTBR_ADDR_MASK = 0xffffff80, 305 TTBR_NOS_FLAG = 1 << 5, 306 TTBR_RGN_MASK = 0x3 << 3, 307 TTBR_RGN_NO_CACHE = 0x0 << 3, 308 TTBR_RGN_WBWA_CACHE = 0x1 << 3, 309 TTBR_RGN_WT_CACHE = 0x2 << 3, 310 TTBR_RGN_WB_CACHE = 0x3 << 3, 311 TTBR_S_FLAG = 1 << 1, 312 TTBR_C_FLAG = 1 << 0, 313 }; 295 314 CONTROL_REG_GEN_READ(TTBR0, c2, 0, c0, 0); 296 315 CONTROL_REG_GEN_WRITE(TTBR0, c2, 0, c0, 0); … … 363 382 364 383 CONTROL_REG_GEN_WRITE(DCIMVAC, c7, 0, c6, 1); 365 CONTROL_REG_GEN_WRITE(DCI MSW, c7, 0, c6, 2);384 CONTROL_REG_GEN_WRITE(DCISW, c7, 0, c6, 2); 366 385 367 386 CONTROL_REG_GEN_WRITE(ATS1CPR, c7, 0, c8, 0); … … 369 388 CONTROL_REG_GEN_WRITE(ATS1CUR, c7, 0, c8, 2); 370 389 CONTROL_REG_GEN_WRITE(ATS1CUW, c7, 0, c8, 3); 371 CONTROL_REG_GEN_WRITE(ATS1 NSOPR, c7, 0, c8, 4);372 CONTROL_REG_GEN_WRITE(ATS1 NSOPW, c7, 0, c8, 5);373 CONTROL_REG_GEN_WRITE(ATS1 NSOUR, c7, 0, c8, 6);374 CONTROL_REG_GEN_WRITE(ATS1 NSOUW, c7, 0, c8, 7);390 CONTROL_REG_GEN_WRITE(ATS12NSOPR, c7, 0, c8, 4); 391 CONTROL_REG_GEN_WRITE(ATS12NSOPW, c7, 0, c8, 5); 392 CONTROL_REG_GEN_WRITE(ATS12NSOUR, c7, 0, c8, 6); 393 CONTROL_REG_GEN_WRITE(ATS12NSOUW, c7, 0, c8, 7); 375 394 376 395 -
kernel/arch/arm32/include/arch/mm/page.h
r6ad185d rca62f86 41 41 #include <arch/exception.h> 42 42 #include <arch/barrier.h> 43 #include <arch/cp15.h> 43 44 #include <trace.h> 44 45 … … 95 96 /* Set PTE address accessors for each level. */ 96 97 #define SET_PTL0_ADDRESS_ARCH(ptl0) \ 97 (set_ptl0_addr((pte_t *) (ptl0)))98 set_ptl0_addr((pte_t *) (ptl0)) 98 99 #define SET_PTL1_ADDRESS_ARCH(ptl0, i, a) \ 99 (((pte_t *) (ptl0))[(i)].l0.coarse_table_addr = (a) >> 10)100 set_ptl1_addr((pte_t*) (ptl0), i, a) 100 101 #define SET_PTL2_ADDRESS_ARCH(ptl1, i, a) 101 102 #define SET_PTL3_ADDRESS_ARCH(ptl2, i, a) 102 103 #define SET_FRAME_ADDRESS_ARCH(ptl3, i, a) \ 103 (((pte_t *) (ptl3))[(i)].l1.frame_base_addr = (a) >> 12)104 set_ptl3_addr((pte_t*) (ptl3), i, a) 104 105 105 106 /* Get PTE flags accessors for each level. */ … … 129 130 set_pt_level1_present((pte_t *) (ptl3), (size_t) (i)) 130 131 132 133 #define pt_coherence(page) pt_coherence_m(page, 1) 134 131 135 #if defined(PROCESSOR_ARCH_armv6) | defined(PROCESSOR_ARCH_armv7_a) 132 136 #include "page_armv6.h" … … 137 141 #endif 138 142 143 /** Sets the address of level 0 page table. 144 * 145 * @param pt Pointer to the page table to set. 146 * 147 * Page tables are always in cacheable memory. 148 * Make sure the memory type is correct, and in sync with: 149 * init_boot_pt (boot/arch/arm32/src/mm.c) 150 * init_ptl0_section (boot/arch/arm32/src/mm.c) 151 * set_pt_level1_flags (kernel/arch/arm32/include/arch/mm/page_armv6.h) 152 */ 153 NO_TRACE static inline void set_ptl0_addr(pte_t *pt) 154 { 155 uint32_t val = (uint32_t)pt & TTBR_ADDR_MASK; 156 val |= TTBR_RGN_WBWA_CACHE | TTBR_C_FLAG; 157 TTBR0_write(val); 158 } 159 160 NO_TRACE static inline void set_ptl1_addr(pte_t *pt, size_t i, uintptr_t address) 161 { 162 pt[i].l0.coarse_table_addr = address >> 10; 163 pt_coherence(&pt[i].l0); 164 } 165 166 NO_TRACE static inline void set_ptl3_addr(pte_t *pt, size_t i, uintptr_t address) 167 { 168 pt[i].l1.frame_base_addr = address >> 12; 169 pt_coherence(&pt[i].l1); 170 } 171 139 172 #endif 140 173 -
kernel/arch/arm32/include/arch/mm/page_armv4.h
r6ad185d rca62f86 120 120 #define PTE_DESCRIPTOR_SMALL_PAGE 2 121 121 122 123 /** Sets the address of level 0 page table. 124 * 125 * @param pt Pointer to the page table to set. 126 * 127 */ 128 NO_TRACE static inline void set_ptl0_addr(pte_t *pt) 129 { 130 asm volatile ( 131 "mcr p15, 0, %[pt], c2, c0, 0\n" 132 :: [pt] "r" (pt) 133 ); 134 } 135 122 #define pt_coherence_m(pt, count) \ 123 do { \ 124 for (unsigned i = 0; i < count; ++i) \ 125 DCCMVAU_write((uintptr_t)(pt + i)); \ 126 read_barrier(); \ 127 } while (0) 136 128 137 129 /** Returns level 0 page table entry flags. … … 223 215 224 216 /* default access permission */ 225 p->access_permission_0 = p->access_permission_1 = 217 p->access_permission_0 = p->access_permission_1 = 226 218 p->access_permission_2 = p->access_permission_3 = 227 219 PTE_AP_USER_NO_KERNEL_RW; … … 229 221 if (flags & PAGE_USER) { 230 222 if (flags & PAGE_READ) { 231 p->access_permission_0 = p->access_permission_1 = 232 p->access_permission_2 = p->access_permission_3 = 223 p->access_permission_0 = p->access_permission_1 = 224 p->access_permission_2 = p->access_permission_3 = 233 225 PTE_AP_USER_RO_KERNEL_RW; 234 226 } 235 227 if (flags & PAGE_WRITE) { 236 p->access_permission_0 = p->access_permission_1 = 237 p->access_permission_2 = p->access_permission_3 = 238 PTE_AP_USER_RW_KERNEL_RW; 228 p->access_permission_0 = p->access_permission_1 = 229 p->access_permission_2 = p->access_permission_3 = 230 PTE_AP_USER_RW_KERNEL_RW; 239 231 } 240 232 } -
kernel/arch/arm32/include/arch/mm/page_armv6.h
r6ad185d rca62f86 40 40 #error "Do not include arch specific page.h directly use generic page.h instead" 41 41 #endif 42 42 43 43 44 /* Macros for querying the last-level PTE entries. */ … … 125 126 #define PTE_DESCRIPTOR_SMALL_PAGE_NX 3 126 127 127 /** Sets the address of level 0 page table. 128 * 129 * @param pt Pointer to the page table to set. 130 * 131 */ 132 NO_TRACE static inline void set_ptl0_addr(pte_t *pt) 133 { 134 asm volatile ( 135 "mcr p15, 0, %[pt], c2, c0, 0\n" 136 :: [pt] "r" (pt) 137 ); 138 } 128 129 /** 130 * For an ARMv7 implementation that does not include the Large Physical Address Extension, 131 * and in implementations of architecture versions before ARMv7, if the translation tables 132 * are held in Write-Back Cacheable memory, the caches must be cleaned to the point of 133 * unification after writing to the translation tables and before the DSB instruction. This 134 * ensures that the updated translation table are visible to a hardware translation table walk. 135 * 136 * Therefore, an example instruction sequence for writing a translation table entry, 137 * covering changes to the instruction 138 * or data mappings in a uniprocessor system is: 139 * STR rx, [Translation table entry] 140 * ; write new entry to the translation table 141 * Clean cache line [Translation table entry] : This operation is not required with the 142 * ; Multiprocessing Extensions. 143 * DSB 144 * ; ensures visibility of the data cleaned from the D Cache 145 * Invalidate TLB entry by MVA (and ASID if non-global) [page address] 146 * Invalidate BTC 147 * DSB 148 * ; ensure completion of the Invalidate TLB operation 149 * ISB 150 * ; ensure table changes visible to instruction fetch 151 * 152 * ARM Architecture reference chp. B3.10.1 p. B3-1375 153 * @note: see TTRB0/1 for pt memory type 154 */ 155 #define pt_coherence_m(pt, count) \ 156 do { \ 157 for (unsigned i = 0; i < count; ++i) \ 158 DCCMVAU_write((uintptr_t)(pt + i)); \ 159 read_barrier(); \ 160 } while (0) 139 161 140 162 … … 206 228 p->ns = 0; 207 229 } 230 pt_coherence(p); 208 231 } 209 232 … … 232 255 p->descriptor_type = PTE_DESCRIPTOR_SMALL_PAGE_NX; 233 256 } 234 235 /* tex=0 buf=1 and cache=1 => normal memory 236 * tex=0 buf=1 and cache=0 => shareable device mmio 237 */ 238 p->cacheable = (flags & PAGE_CACHEABLE); 239 p->bufferable = 1; 240 p->tex = 0; 257 258 if (flags & PAGE_CACHEABLE) { 259 /* 260 * Write-through, no write-allocate memory, see ch. B3.8.2 261 * (p. B3-1358) of ARM Architecture reference manual. 262 * Make sure the memory type is correct, and in sync with: 263 * init_boot_pt (boot/arch/arm32/src/mm.c) 264 * init_ptl0_section (boot/arch/arm32/src/mm.c) 265 * set_ptl0_addr (kernel/arch/arm32/include/arch/mm/page.h) 266 */ 267 p->tex = 5; 268 p->cacheable = 0; 269 p->bufferable = 1; 270 } else { 271 /* 272 * Shareable device memory, see ch. B3.8.2 (p. B3-1358) of 273 * ARM Architecture reference manual. 274 */ 275 p->tex = 0; 276 p->cacheable = 0; 277 p->bufferable = 1; 278 } 241 279 242 280 /* Shareable is ignored for devices (non-cacheable), 243 * turn it o nfor normal memory. */244 p->shareable = 1;281 * turn it off for normal memory. */ 282 p->shareable = 0; 245 283 246 284 p->non_global = !(flags & PAGE_GLOBAL); … … 256 294 p->access_permission_1 = PTE_AP1_RO; 257 295 } 296 pt_coherence(p); 258 297 } 259 298 … … 264 303 p->should_be_zero_0 = 0; 265 304 p->should_be_zero_1 = 0; 266 write_barrier();267 305 p->descriptor_type = PTE_DESCRIPTOR_COARSE_TABLE; 306 pt_coherence(p); 268 307 } 269 308 … … 273 312 274 313 p->descriptor_type = PTE_DESCRIPTOR_SMALL_PAGE; 314 pt_coherence(p); 275 315 } 276 316 -
kernel/arch/arm32/src/cpu/cpu.c
r6ad185d rca62f86 157 157 #endif 158 158 #ifdef PROCESSOR_ARCH_armv7_a 159 /* ICache coherency is elaborate on in barrier.h.159 /* ICache coherency is elaborated on in barrier.h. 160 160 * VIPT and PIPT caches need maintenance only on code modify, 161 161 * so it should be safe for general use. … … 166 166 control_reg |= 167 167 SCTLR_INST_CACHE_EN_FLAG | SCTLR_BRANCH_PREDICT_EN_FLAG; 168 } else { 169 control_reg &= 170 ~(SCTLR_INST_CACHE_EN_FLAG | SCTLR_BRANCH_PREDICT_EN_FLAG); 168 171 } 169 172 #endif … … 204 207 #ifdef PROCESSOR_ARCH_armv7_a 205 208 CSSELR_write((level & CCSELR_LEVEL_MASK) << CCSELR_LEVEL_SHIFT); 206 const unsigned ls_log = 2 + 207 ((CCSIDR_read() >> CCSIDR_LINESIZE_SHIFT) & CCSIDR_LINESIZE_MASK); 208 return ls_log + 2; //return log2(bytes) 209 const uint32_t ccsidr = CCSIDR_read(); 210 return CCSIDR_LINESIZE_LOG(ccsidr); 209 211 #endif 210 212 return 0; … … 217 219 #ifdef PROCESSOR_ARCH_armv7_a 218 220 CSSELR_write((level & CCSELR_LEVEL_MASK) << CCSELR_LEVEL_SHIFT); 219 const unsigned ways = 1 + 220 ((CCSIDR_read() >> CCSIDR_ASSOC_SHIFT) & CCSIDR_ASSOC_MASK); 221 return ways; 221 const uint32_t ccsidr = CCSIDR_read(); 222 return CCSIDR_WAYS(ccsidr); 222 223 #endif 223 224 return 0; … … 229 230 #ifdef PROCESSOR_ARCH_armv7_a 230 231 CSSELR_write((level & CCSELR_LEVEL_MASK) << CCSELR_LEVEL_SHIFT); 231 const unsigned sets = 1 + 232 ((CCSIDR_read() >> CCSIDR_NUMSETS_SHIFT) & CCSIDR_NUMSETS_MASK); 233 return sets; 232 const uint32_t ccsidr = CCSIDR_read(); 233 return CCSIDR_SETS(ccsidr); 234 234 #endif 235 235 return 0; … … 241 241 #ifdef PROCESSOR_ARCH_armv7_a 242 242 const uint32_t val = CLIDR_read(); 243 for (unsigned i = 1; i <= 7; ++i) {243 for (unsigned i = 0; i < 8; ++i) { 244 244 const unsigned ctype = CLIDR_CACHE(i, val); 245 245 switch (ctype) { … … 280 280 const unsigned ways = dcache_ways(i); 281 281 const unsigned sets = dcache_sets(i); 282 const unsigned way_shift = 31- log2(ways);282 const unsigned way_shift = 32 - log2(ways); 283 283 const unsigned set_shift = dcache_linesize_log(i); 284 284 dcache_clean_manual(i, false, ways, sets, way_shift, set_shift); … … 293 293 const unsigned ways = dcache_ways(i); 294 294 const unsigned sets = dcache_sets(i); 295 const unsigned way_shift = 31- log2(ways);295 const unsigned way_shift = 32 - log2(ways); 296 296 const unsigned set_shift = dcache_linesize_log(i); 297 297 dcache_clean_manual(i, true, ways, sets, way_shift, set_shift); -
kernel/arch/arm32/src/mach/beagleboardxm/beagleboardxm.c
r6ad185d rca62f86 85 85 static void bb_timer_irq_handler(irq_t *irq) 86 86 { 87 amdm37x_gpt_irq_ack(&beagleboard.timer); 88 87 89 /* 88 90 * We are holding a lock which prevents preemption. 89 91 * Release the lock, call clock() and reacquire the lock again. 90 92 */ 91 amdm37x_gpt_irq_ack(&beagleboard.timer);92 93 spinlock_unlock(&irq->lock); 93 94 clock(); … … 147 148 { 148 149 const unsigned inum = amdm37x_irc_inum_get(beagleboard.irc_addr); 149 amdm37x_irc_irq_ack(beagleboard.irc_addr);150 150 151 151 irq_t *irq = irq_dispatch_and_lock(inum); … … 159 159 CPU->id, inum); 160 160 } 161 /** amdm37x manual ch. 12.5.2 (p. 2428) places irc ack at the end 162 * of ISR. DO this to avoid strange behavior. */ 163 amdm37x_irc_irq_ack(beagleboard.irc_addr); 161 164 } 162 165 -
kernel/arch/arm32/src/mm/tlb.c
r6ad185d rca62f86 37 37 #include <arch/mm/asid.h> 38 38 #include <arch/asm.h> 39 #include <arch/cp15.h> 39 40 #include <typedefs.h> 40 41 #include <arch/mm/page.h> 42 #include <arch/cache.h> 41 43 42 44 /** Invalidate all entries in TLB. … … 46 48 void tlb_invalidate_all(void) 47 49 { 48 asm volatile ( 49 "eor r1, r1\n" 50 "mcr p15, 0, r1, c8, c7, 0\n" 51 ::: "r1" 52 ); 50 TLBIALL_write(0); 51 /* 52 * "A TLB maintenance operation is only guaranteed to be complete after 53 * the execution of a DSB instruction." 54 * "An ISB instruction, or a return from an exception, causes the 55 * effect of all completed TLB maintenance operations that appear in 56 * program order before the ISB or return from exception to be visible 57 * to all subsequent instructions, including the instruction fetches 58 * for those instructions." 59 * ARM Architecture reference Manual ch. B3.10.1 p. B3-1374 B3-1375 60 */ 61 read_barrier(); 62 inst_barrier(); 53 63 } 54 64 … … 60 70 { 61 71 tlb_invalidate_all(); 72 // TODO: why not TLBIASID_write(asid) ? 62 73 } 63 74 … … 65 76 * 66 77 * @param page Virtual adress of the page 67 */ 78 */ 68 79 static inline void invalidate_page(uintptr_t page) 69 80 { 70 asm volatile ( 71 "mcr p15, 0, %[page], c8, c7, 1\n" 72 :: [page] "r" (page) 73 ); 81 //TODO: What about TLBIMVAA? 82 TLBIMVA_write(page); 83 /* 84 * "A TLB maintenance operation is only guaranteed to be complete after 85 * the execution of a DSB instruction." 86 * "An ISB instruction, or a return from an exception, causes the 87 * effect of all completed TLB maintenance operations that appear in 88 * program order before the ISB or return from exception to be visible 89 * to all subsequent instructions, including the instruction fetches 90 * for those instructions." 91 * ARM Architecture reference Manual ch. B3.10.1 p. B3-1374 B3-1375 92 */ 93 read_barrier(); 94 inst_barrier(); 74 95 } 75 96 … … 83 104 void tlb_invalidate_pages(asid_t asid __attribute__((unused)), uintptr_t page, size_t cnt) 84 105 { 85 unsigned int i; 86 87 for (i = 0; i < cnt; i++) 106 for (unsigned i = 0; i < cnt; i++) 88 107 invalidate_page(page + i * PAGE_SIZE); 89 108 } -
kernel/arch/ia32/src/ddi/ddi.c
r6ad185d rca62f86 59 59 int ddi_iospace_enable_arch(task_t *task, uintptr_t ioaddr, size_t size) 60 60 { 61 size_t bits = ioaddr + size;62 if ( bits > IO_PORTS)61 size_t elements = ioaddr + size; 62 if (elements > IO_PORTS) 63 63 return ENOENT; 64 64 65 if (task->arch.iomap. bits < bits) {65 if (task->arch.iomap.elements < elements) { 66 66 /* 67 67 * The I/O permission bitmap is too small and needs to be grown. 68 68 */ 69 69 70 uint8_t *newmap = (uint8_t *) malloc(BITS2BYTES(bits), FRAME_ATOMIC);71 if (! newmap)70 void *store = malloc(bitmap_size(elements, 0), FRAME_ATOMIC); 71 if (!store) 72 72 return ENOMEM; 73 73 74 74 bitmap_t oldiomap; 75 bitmap_initialize(&oldiomap, task->arch.iomap. map,75 bitmap_initialize(&oldiomap, task->arch.iomap.elements, 0, 76 76 task->arch.iomap.bits); 77 bitmap_initialize(&task->arch.iomap, newmap, bits); 77 78 bitmap_initialize(&task->arch.iomap, elements, 0, store); 78 79 79 80 /* 80 81 * Mark the new range inaccessible. 81 82 */ 82 bitmap_set_range(&task->arch.iomap, oldiomap. bits,83 bits - oldiomap.bits);83 bitmap_set_range(&task->arch.iomap, oldiomap.elements, 84 elements - oldiomap.elements); 84 85 85 86 /* … … 89 90 if (oldiomap.bits) { 90 91 bitmap_copy(&task->arch.iomap, &oldiomap, 91 oldiomap.bits); 92 free(oldiomap.map); 92 oldiomap.elements); 93 94 free(oldiomap.bits); 93 95 } 94 96 } … … 97 99 * Enable the range and we are done. 98 100 */ 99 bitmap_clear_range(&task->arch.iomap, (size_t) ioaddr, (size_t)size);101 bitmap_clear_range(&task->arch.iomap, (size_t) ioaddr, size); 100 102 101 103 /* … … 119 121 /* First, copy the I/O Permission Bitmap. */ 120 122 irq_spinlock_lock(&TASK->lock, false); 123 121 124 size_t ver = TASK->arch.iomapver; 122 size_t bits = TASK->arch.iomap.bits; 123 if (bits) { 124 ASSERT(TASK->arch.iomap.map); 125 size_t elements = TASK->arch.iomap.elements; 126 127 if (elements > 0) { 128 ASSERT(TASK->arch.iomap.bits); 125 129 126 130 bitmap_t iomap; 127 bitmap_initialize(&iomap, CPU->arch.tss->iomap,128 TSS_IOMAP_SIZE * 8);129 bitmap_copy(&iomap, &TASK->arch.iomap, bits);131 bitmap_initialize(&iomap, TSS_IOMAP_SIZE * 8, 0, 132 CPU->arch.tss->iomap); 133 bitmap_copy(&iomap, &TASK->arch.iomap, elements); 130 134 131 135 /* … … 133 137 * I/O access. 134 138 */ 135 bitmap_set_range(&iomap, bits, ALIGN_UP(bits, 8) - bits); 139 bitmap_set_range(&iomap, elements, 140 ALIGN_UP(elements, 8) - elements); 141 136 142 /* 137 143 * It is safe to set the trailing eight bits because of the 138 144 * extra convenience byte in TSS_IOMAP_SIZE. 139 145 */ 140 bitmap_set_range(&iomap, ALIGN_UP( bits, 8), 8);146 bitmap_set_range(&iomap, ALIGN_UP(elements, 8), 8); 141 147 } 148 142 149 irq_spinlock_unlock(&TASK->lock, false); 143 150 … … 150 157 151 158 descriptor_t *gdt_p = (descriptor_t *) cpugdtr.base; 152 gdt_setlimit(&gdt_p[TSS_DES], TSS_BASIC_SIZE + BITS2BYTES(bits)); 159 size_t size = bitmap_size(elements, 0); 160 gdt_setlimit(&gdt_p[TSS_DES], TSS_BASIC_SIZE + size); 153 161 gdtr_load(&cpugdtr); 154 162 -
kernel/arch/ia32/src/mm/frame.c
r6ad185d rca62f86 47 47 48 48 #define PHYSMEM_LIMIT32 UINT64_C(0x100000000) 49 #define PHYSMEM_LIMIT_DMA UINT64_C(0x1000000) 49 50 50 51 size_t hardcoded_unmapped_ktext_size = 0; … … 91 92 else 92 93 conf = minconf; 93 zone_create(pfn, count, conf, 94 ZONE_AVAILABLE | ZONE_LOWMEM); 94 95 if ((pfn * PAGE_SIZE) < PHYSMEM_LIMIT_DMA) { 96 size_t dma_count = min( 97 PHYSMEM_LIMIT_DMA / PAGE_SIZE - pfn, 98 count); 99 zone_create(pfn, dma_count, conf, 100 ZONE_AVAILABLE | ZONE_DMA); 101 count -= dma_count; 102 pfn += dma_count; 103 } 104 105 conf = pfn; 106 if (count) { 107 zone_create(pfn, count, conf, 108 ZONE_AVAILABLE | ZONE_LOWMEM); 109 } 95 110 } else { 96 111 conf = zone_external_conf_alloc(count); 97 if (conf != 0) 112 if (conf != 0) { 98 113 zone_create(pfn, count, conf, 99 114 ZONE_AVAILABLE | ZONE_HIGHMEM); 115 } 100 116 } 101 117 } else if ((e820table[i].type == MEMMAP_MEMORY_ACPI) || -
kernel/arch/ia32/src/proc/task.c
r6ad185d rca62f86 40 40 /** Perform ia32 specific task initialization. 41 41 * 42 * @param t Task to be initialized. 42 * @param task Task to be initialized. 43 * 43 44 */ 44 void task_create_arch(task_t *t )45 void task_create_arch(task_t *task) 45 46 { 46 t ->arch.iomapver = 0;47 bitmap_initialize(&t ->arch.iomap, NULL, 0);47 task->arch.iomapver = 0; 48 bitmap_initialize(&task->arch.iomap, 0, 0, NULL); 48 49 } 49 50 50 51 /** Perform ia32 specific task destruction. 51 52 * 52 * @param t Task to be initialized. 53 * @param task Task to be initialized. 54 * 53 55 */ 54 void task_destroy_arch(task_t *t )56 void task_destroy_arch(task_t *task) 55 57 { 56 if (t ->arch.iomap.map)57 free(t ->arch.iomap.map);58 if (task->arch.iomap.bits != NULL) 59 free(task->arch.iomap.bits); 58 60 } 59 61 -
kernel/arch/ia64/src/ddi/ddi.c
r6ad185d rca62f86 56 56 { 57 57 if (!task->arch.iomap) { 58 uint8_t *map;59 60 58 task->arch.iomap = malloc(sizeof(bitmap_t), 0); 61 map = malloc(BITS2BYTES(IO_MEMMAP_PAGES), 0); 62 if(!map) 59 if (task->arch.iomap == NULL) 63 60 return ENOMEM; 64 bitmap_initialize(task->arch.iomap, map, IO_MEMMAP_PAGES); 61 62 void *store = malloc(bitmap_size(IO_MEMMAP_PAGES, 0), 0); 63 if (store == NULL) 64 return ENOMEM; 65 66 bitmap_initialize(task->arch.iomap, IO_MEMMAP_PAGES, 0, store); 65 67 bitmap_clear_range(task->arch.iomap, 0, IO_MEMMAP_PAGES); 66 68 } … … 69 71 size = ALIGN_UP(size + ioaddr - 4 * iopage, PORTS_PER_PAGE); 70 72 bitmap_set_range(task->arch.iomap, iopage, size / 4); 71 73 72 74 return 0; 73 75 } -
kernel/arch/sparc64/include/arch/mm/frame.h
r6ad185d rca62f86 46 46 #endif 47 47 48 #ifndef __ASM__ 49 50 #include <typedefs.h> 51 52 extern uintptr_t end_of_identity; 53 54 extern void frame_low_arch_init(void); 55 extern void frame_high_arch_init(void); 56 #define physmem_print() 57 58 #endif 59 48 60 #endif 49 61 -
kernel/arch/sparc64/include/arch/mm/sun4u/frame.h
r6ad185d rca62f86 72 72 typedef union frame_address frame_address_t; 73 73 74 extern uintptr_t end_of_identity;75 76 extern void frame_low_arch_init(void);77 extern void frame_high_arch_init(void);78 #define physmem_print()79 80 74 #endif 81 75 -
kernel/arch/sparc64/include/arch/mm/sun4v/frame.h
r6ad185d rca62f86 42 42 #define FRAME_SIZE (1 << FRAME_WIDTH) 43 43 44 #ifndef __ASM__45 46 #include <typedefs.h>47 48 extern void frame_low_arch_init(void);49 extern void frame_high_arch_init(void);50 #define physmem_print()51 52 #endif53 54 44 #endif 55 45 -
kernel/arch/sparc64/include/arch/trap/sun4v/mmu.h
r6ad185d rca62f86 102 102 nop 103 103 104 /* exclude pages beyond the end of memory from the identity mapping */ 105 sethi %hi(end_of_identity), %g4 106 ldx [%g4 + %lo(end_of_identity)], %g4 107 cmp %g1, %g4 108 bgeu %xcc, 0f 109 nop 110 104 111 /* 105 112 * Installing the identity does not fit into 32 instructions, call -
kernel/arch/sparc64/src/mm/sun4v/frame.c
r6ad185d rca62f86 101 101 */ 102 102 frame_mark_unavailable(ADDR2PFN(KA2PA(PFN2ADDR(0))), 1); 103 104 /* PA2KA will work only on low-memory. */ 105 end_of_identity = PA2KA(config.physmem_end - FRAME_SIZE) + PAGE_SIZE; 103 106 } 104 107 -
kernel/arch/sparc64/src/mm/sun4v/tlb.c
r6ad185d rca62f86 251 251 uintptr_t va = DMISS_ADDRESS(page_and_ctx); 252 252 uint16_t ctx = DMISS_CONTEXT(page_and_ctx); 253 as_t *as = AS; 253 254 254 255 if (ctx == ASID_KERNEL) { … … 256 257 /* NULL access in kernel */ 257 258 panic("NULL pointer dereference."); 259 } else if (va >= end_of_identity) { 260 /* Kernel non-identity */ 261 as = AS_KERNEL; 262 } else { 263 panic("Unexpected kernel page fault."); 258 264 } 259 panic("Unexpected kernel page fault."); 260 } 261 262 t = page_mapping_find(AS, va, true); 265 } 266 267 t = page_mapping_find(as, va, true); 263 268 if (t) { 264 269 /* … … 295 300 uintptr_t va = DMISS_ADDRESS(page_and_ctx); 296 301 uint16_t ctx = DMISS_CONTEXT(page_and_ctx); 297 298 t = page_mapping_find(AS, va, true); 302 as_t *as = AS; 303 304 if (ctx == ASID_KERNEL) 305 as = AS_KERNEL; 306 307 t = page_mapping_find(as, va, true); 299 308 if (t && PTE_WRITABLE(t)) { 300 309 /* -
kernel/arch/sparc64/src/sun4v/start.S
r6ad185d rca62f86 345 345 .quad 0 346 346 347 /* 348 * This variable is used by the fast_data_access_MMU_miss trap handler. 349 * In runtime, it is modified to contain the address of the end of physical 350 * memory. 351 */ 352 .global end_of_identity 353 end_of_identity: 354 .quad -1 355 347 356 .global kernel_8k_tlb_data_template 348 357 kernel_8k_tlb_data_template: -
kernel/genarch/include/genarch/drivers/amdm37x/gpt.h
r6ad185d rca62f86 39 39 #include <typedefs.h> 40 40 #include <mm/km.h> 41 #include <time/clock.h> 41 42 42 43 /* AMDM37x TRM p. 2740 */ … … 128 129 #define AMDM37x_GPT_TCLR_CE_FLAG (1 << 6) 129 130 #define AMDM37x_GPT_TCLR_SCPWM (1 << 7) 130 #define AMDM37x_GPT_TCLR_TCM_MASK (0x3) 131 #define AMDM37x_GPT_TCLR_TCM_SHIFT (8) 132 #define AMDM37x_GPT_TCLR_TRG_MASK (0x3) 133 #define AMDM37x_GPT_TCLR_TRG_SHIFT (10) 131 #define AMDM37x_GPT_TCLR_TCM_MASK (0x3 << 8) 132 #define AMDM37x_GPT_TCLR_TCM_NO_CAPTURE (0x0 << 8) 133 #define AMDM37x_GPT_TCLR_TCM_RAISE_CAPTURE (0x1 << 8) 134 #define AMDM37x_GPT_TCLR_TCM_FALL_CAPTURE (0x2 << 8) 135 #define AMDM37x_GPT_TCLR_TCM_BOTH_CAPTURE (0x3 << 8) 136 #define AMDM37x_GPT_TCLR_TRG_MASK (0x3 << 10) 137 #define AMDM37x_GPT_TCLR_TRG_NO (0x0 << 10) 138 #define AMDM37x_GPT_TCLR_TRG_OVERFLOW (0x1 << 10) 139 #define AMDM37x_GPT_TCLR_TRG_OVERMATCH (0x2 << 10) 134 140 #define AMDM37x_GPT_TCLR_PT_FLAG (1 << 12) 135 141 #define AMDM37x_GPT_TCLR_CAPT_MODE_FLAG (1 << 13) … … 209 215 timer->regs = (void*) km_map(ioregs, iosize, PAGE_NOT_CACHEABLE); 210 216 217 /* Reset the timer */ 218 timer->regs->tiocp_cfg |= AMDM37x_GPT_TIOCP_CFG_SOFTRESET_FLAG; 219 220 while (!(timer->regs->tistat & AMDM37x_GPT_TISTAT_RESET_DONE_FLAG)); 221 211 222 /* Set autoreload */ 212 timer->regs->tclr = AMDM37x_GPT_TCLR_AR_FLAG;223 timer->regs->tclr |= AMDM37x_GPT_TCLR_AR_FLAG; 213 224 214 225 timer->special_available = ( … … 216 227 (ioregs == AMDM37x_GPT2_BASE_ADDRESS) || 217 228 (ioregs == AMDM37x_GPT10_BASE_ADDRESS)); 229 /* Select reload value */ 218 230 timer->regs->tldr = 0xffffffff - (32768 / hz) + 1; 231 /* Set current counter value */ 219 232 timer->regs->tccr = 0xffffffff - (32768 / hz) + 1; 233 220 234 if (timer->special_available) { 221 /* Set values foraccording to formula (manual p. 2733) */235 /* Set values according to formula (manual p. 2733) */ 222 236 /* Use temporary variables for easier debugging */ 223 237 const uint32_t tpir = 224 238 ((32768 / hz + 1) * 1000000) - (32768000L * (1000 / hz)); 225 239 const uint32_t tnir = 226 ((32768 / hz) * 1000000) - (32768000 * (1000 / hz));240 ((32768 / hz) * 1000000) - (32768000L * (1000 / hz)); 227 241 timer->regs->tpir = tpir; 228 242 timer->regs->tnir = tnir; … … 241 255 } 242 256 243 static inline voidamdm37x_gpt_irq_ack(amdm37x_gpt_t* timer)257 static inline bool amdm37x_gpt_irq_ack(amdm37x_gpt_t* timer) 244 258 { 245 259 ASSERT(timer); 246 260 ASSERT(timer->regs); 247 261 /* Clear all pending interrupts */ 248 timer->regs->tisr = timer->regs->tisr; 262 const uint32_t tisr = timer->regs->tisr; 263 timer->regs->tisr = tisr; 264 return tisr != 0; 249 265 } 250 266 -
kernel/genarch/src/mm/page_pt.c
r6ad185d rca62f86 112 112 */ 113 113 write_barrier(); 114 SET_PTL2_PRESENT(ptl1, PTL1_INDEX(page)); 114 SET_PTL2_PRESENT(ptl1, PTL1_INDEX(page)); 115 115 } 116 116 … … 180 180 * Destroy the mapping. 181 181 * Setting to PAGE_NOT_PRESENT is not sufficient. 182 */ 182 * But we need SET_FRAME for possible PT coherence maintenance. 183 * At least on ARM. 184 */ 185 //TODO: Fix this inconsistency 186 SET_FRAME_FLAGS(ptl3, PTL3_INDEX(page), PAGE_NOT_PRESENT); 183 187 memsetb(&ptl3[PTL3_INDEX(page)], sizeof(pte_t), 0); 184 188 -
kernel/generic/include/adt/bitmap.h
r6ad185d rca62f86 38 38 #include <typedefs.h> 39 39 40 #define BITS2BYTES(bits) (bits ? ((((bits)-1)>>3)+1) : 0) 40 #define BITMAP_ELEMENT 8 41 #define BITMAP_REMAINER 7 41 42 42 43 typedef struct { 43 uint8_t *map; 44 size_t bits; 44 size_t elements; 45 uint8_t *bits; 46 47 size_t block_size; 48 uint8_t *blocks; 45 49 } bitmap_t; 46 50 47 extern void bitmap_initialize(bitmap_t *bitmap, uint8_t *map, size_t bits); 48 extern void bitmap_set_range(bitmap_t *bitmap, size_t start, size_t bits); 49 extern void bitmap_clear_range(bitmap_t *bitmap, size_t start, size_t bits); 50 extern void bitmap_copy(bitmap_t *dst, bitmap_t *src, size_t bits); 51 static inline void bitmap_set(bitmap_t *bitmap, size_t element, 52 unsigned int value) 53 { 54 if (element < bitmap->elements) { 55 /* 56 * The 2nd level bitmap is conservative. 57 * Make sure we update it properly. 58 */ 59 60 if (value) { 61 bitmap->bits[element / BITMAP_ELEMENT] |= 62 (1 << (element & BITMAP_REMAINER)); 63 } else { 64 bitmap->bits[element / BITMAP_ELEMENT] &= 65 ~(1 << (element & BITMAP_REMAINER)); 66 67 if (bitmap->block_size > 0) { 68 size_t block = element / bitmap->block_size; 69 70 bitmap->blocks[block / BITMAP_ELEMENT] &= 71 ~(1 << (block & BITMAP_REMAINER)); 72 } 73 } 74 } 75 } 51 76 52 static inline int bitmap_get(bitmap_t *bitmap, size_t bit)77 static inline unsigned int bitmap_get(bitmap_t *bitmap, size_t element) 53 78 { 54 if (bit >= bitmap->bits)79 if (element >= bitmap->elements) 55 80 return 0; 56 81 57 return !! ((bitmap->map)[bit/8] & (1 << (bit & 7))); 82 return !!((bitmap->bits)[element / BITMAP_ELEMENT] & 83 (1 << (element & BITMAP_REMAINER))); 58 84 } 59 85 86 extern size_t bitmap_size(size_t, size_t); 87 extern void bitmap_initialize(bitmap_t *, size_t, size_t, void *); 88 89 extern void bitmap_set_range(bitmap_t *, size_t, size_t); 90 extern void bitmap_clear_range(bitmap_t *, size_t, size_t); 91 92 extern int bitmap_find_range(bitmap_t *, size_t, size_t, size_t); 93 extern int bitmap_allocate_range(bitmap_t *, size_t, size_t, size_t, size_t *); 94 extern void bitmap_free_range(bitmap_t *, size_t, size_t); 95 extern void bitmap_copy(bitmap_t *, bitmap_t *, size_t); 60 96 61 97 #endif -
kernel/generic/include/adt/list.h
r6ad185d rca62f86 1 1 /* 2 2 * Copyright (c) 2001-2004 Jakub Jermar 3 * Copyright (c) 201 1Jiri Svoboda3 * Copyright (c) 2013 Jiri Svoboda 4 4 * All rights reserved. 5 5 * … … 65 65 66 66 #define list_get_instance(link, type, member) \ 67 ((type *) (((void *)(link)) - ((void *)&(((type *) NULL)->member))))67 ((type *) (((void *)(link)) - list_link_to_void(&(((type *) NULL)->member)))) 68 68 69 69 #define list_foreach(list, iterator) \ … … 281 281 } 282 282 283 /** Verify that argument type is a pointer to link_t (at compile time). 284 * 285 * This can be used to check argument type in a macro. 286 */ 287 static inline const void *list_link_to_void(const link_t *link) 288 { 289 return link; 290 } 291 283 292 extern int list_member(const link_t *, const list_t *); 284 293 extern void list_concat(list_t *, list_t *); -
kernel/generic/include/mm/as.h
r6ad185d rca62f86 250 250 extern int as_area_change_flags(as_t *, unsigned int, uintptr_t); 251 251 252 //TODO REMOVE! 253 extern as_area_t * find_locked_area(as_t *as, uintptr_t va); 254 252 255 extern unsigned int as_area_get_flags(as_area_t *); 253 256 extern bool as_area_check_access(as_area_t *, pf_access_t); -
kernel/generic/include/mm/frame.h
r6ad185d rca62f86 63 63 /** Allocate a frame which cannot be identity-mapped. */ 64 64 #define FRAME_HIGHMEM 0x20 65 /** Allocate a frame which needs to be from DMA zone. */ 66 #define FRAME_DMA 0x40 65 67 66 68 typedef uint8_t zone_flags_t; … … 77 79 /** Zone contains memory that cannot be identity-mapped */ 78 80 #define ZONE_HIGHMEM 0x10 81 /** Zone contains memory suitable for old ISA DMA */ 82 #define ZONE_DMA 0x20 79 83 80 84 /** Mask of zone bits that must be matched exactly. */ … … 82 86 83 87 #define FRAME_TO_ZONE_FLAGS(ff) \ 84 ((((ff) & FRAME_LOWMEM) ? ZONE_LOWMEM : \ 88 ((((ff) & FRAME_DMA) ? ZONE_DMA : \ 89 (((ff) & FRAME_LOWMEM) ? ZONE_LOWMEM : \ 85 90 (((ff) & FRAME_HIGHMEM) ? ZONE_HIGHMEM : \ 86 ZONE_LOWMEM /* | ZONE_HIGHMEM */)) | \87 ZONE_AVAILABLE) 91 ZONE_LOWMEM /* | ZONE_HIGHMEM */))) | \ 92 ZONE_AVAILABLE) 88 93 89 94 #define ZONE_FLAGS_MATCH(zf, f) \ -
kernel/generic/src/adt/bitmap.c
r6ad185d rca62f86 35 35 * 36 36 * This file implements bitmap ADT and provides functions for 37 * setting and clearing ranges of bits. 37 * setting and clearing ranges of bits and for finding ranges 38 * of unset bits. 39 * 40 * The bitmap ADT can optionally implement a two-level hierarchy 41 * for faster range searches. The second level bitmap (of blocks) 42 * is not precise, but conservative. This means that if the block 43 * bit is set, it guarantees that all bits in the block are set. 44 * But if the block bit is unset, nothing can be said about the 45 * bits in the block. 46 * 38 47 */ 39 48 … … 44 53 #include <macros.h> 45 54 46 #define ALL_ONES 0xff 47 #define ALL_ZEROES 0x00 55 #define ALL_ONES 0xff 56 #define ALL_ZEROES 0x00 57 58 /** Get bitmap size 59 * 60 * Return the size (in bytes) required for the bitmap. 61 * 62 * @param elements Number bits stored in bitmap. 63 * @param block_size Block size of the 2nd level bitmap. 64 * If set to zero, no 2nd level is used. 65 * 66 * @return Size (in bytes) required for the bitmap. 67 * 68 */ 69 size_t bitmap_size(size_t elements, size_t block_size) 70 { 71 size_t size = elements / BITMAP_ELEMENT; 72 73 if ((elements % BITMAP_ELEMENT) != 0) 74 size++; 75 76 if (block_size > 0) { 77 size += elements / block_size; 78 79 if ((elements % block_size) != 0) 80 size++; 81 } 82 83 return size; 84 } 48 85 49 86 /** Initialize bitmap. … … 51 88 * No portion of the bitmap is set or cleared by this function. 52 89 * 53 * @param bitmap Bitmap structure. 54 * @param map Address of the memory used to hold the map. 55 * @param bits Number of bits stored in bitmap. 56 */ 57 void bitmap_initialize(bitmap_t *bitmap, uint8_t *map, size_t bits) 58 { 59 bitmap->map = map; 60 bitmap->bits = bits; 61 } 62 63 /** Set range of bits. 64 * 65 * @param bitmap Bitmap structure. 66 * @param start Starting bit. 67 * @param bits Number of bits to set. 68 */ 69 void bitmap_set_range(bitmap_t *bitmap, size_t start, size_t bits) 70 { 71 size_t i = 0; 72 size_t aligned_start; 73 size_t lub; /* leading unaligned bits */ 74 size_t amb; /* aligned middle bits */ 75 size_t tab; /* trailing aligned bits */ 76 77 ASSERT(start + bits <= bitmap->bits); 78 79 aligned_start = ALIGN_UP(start, 8); 80 lub = min(aligned_start - start, bits); 81 amb = bits > lub ? bits - lub : 0; 82 tab = amb % 8; 83 84 if (!bits) 85 return; 86 87 if (start + bits < aligned_start) { 90 * @param bitmap Bitmap structure. 91 * @param elements Number of bits stored in bitmap. 92 * @param block_size Block size of the 2nd level bitmap. 93 * If set to zero, no 2nd level is used. 94 * @param data Address of the memory used to hold the map. 95 * The optional 2nd level bitmap follows the 1st 96 * level bitmap. 97 * 98 */ 99 void bitmap_initialize(bitmap_t *bitmap, size_t elements, size_t block_size, 100 void *data) 101 { 102 bitmap->elements = elements; 103 bitmap->bits = (uint8_t *) data; 104 105 if (block_size > 0) { 106 bitmap->block_size = block_size; 107 bitmap->blocks = bitmap->bits + 108 bitmap_size(elements, 0); 109 } else { 110 bitmap->block_size = 0; 111 bitmap->blocks = NULL; 112 } 113 } 114 115 static void bitmap_set_range_internal(uint8_t *bits, size_t start, size_t count) 116 { 117 if (count == 0) 118 return; 119 120 size_t aligned_start = ALIGN_UP(start, BITMAP_ELEMENT); 121 122 /* Leading unaligned bits */ 123 size_t lub = min(aligned_start - start, count); 124 125 /* Aligned middle bits */ 126 size_t amb = (count > lub) ? (count - lub) : 0; 127 128 /* Trailing aligned bits */ 129 size_t tab = amb % BITMAP_ELEMENT; 130 131 if (start + count < aligned_start) { 88 132 /* Set bits in the middle of byte. */ 89 bitmap->map[start / 8] |= ((1 << lub) - 1) << (start & 7); 133 bits[start / BITMAP_ELEMENT] |= 134 ((1 << lub) - 1) << (start & BITMAP_REMAINER); 90 135 return; 91 136 } … … 93 138 if (lub) { 94 139 /* Make sure to set any leading unaligned bits. */ 95 bitmap->map[start / 8] |= ~((1 << (8 - lub)) - 1); 96 } 97 for (i = 0; i < amb / 8; i++) { 140 bits[start / BITMAP_ELEMENT] |= 141 ~((1 << (BITMAP_ELEMENT - lub)) - 1); 142 } 143 144 size_t i; 145 146 for (i = 0; i < amb / BITMAP_ELEMENT; i++) { 98 147 /* The middle bits can be set byte by byte. */ 99 bitmap->map[aligned_start / 8 + i] = ALL_ONES; 100 } 148 bits[aligned_start / BITMAP_ELEMENT + i] = ALL_ONES; 149 } 150 101 151 if (tab) { 102 152 /* Make sure to set any trailing aligned bits. */ 103 bitmap->map[aligned_start / 8 + i] |= (1 << tab) - 1; 104 } 105 106 } 107 108 /** Clear range of bits. 109 * 110 * @param bitmap Bitmap structure. 111 * @param start Starting bit. 112 * @param bits Number of bits to clear. 113 */ 114 void bitmap_clear_range(bitmap_t *bitmap, size_t start, size_t bits) 115 { 116 size_t i = 0; 117 size_t aligned_start; 118 size_t lub; /* leading unaligned bits */ 119 size_t amb; /* aligned middle bits */ 120 size_t tab; /* trailing aligned bits */ 121 122 ASSERT(start + bits <= bitmap->bits); 123 124 aligned_start = ALIGN_UP(start, 8); 125 lub = min(aligned_start - start, bits); 126 amb = bits > lub ? bits - lub : 0; 127 tab = amb % 8; 128 129 if (!bits) 130 return; 131 132 if (start + bits < aligned_start) { 153 bits[aligned_start / BITMAP_ELEMENT + i] |= (1 << tab) - 1; 154 } 155 } 156 157 /** Set range of bits. 158 * 159 * @param bitmap Bitmap structure. 160 * @param start Starting bit. 161 * @param count Number of bits to set. 162 * 163 */ 164 void bitmap_set_range(bitmap_t *bitmap, size_t start, size_t count) 165 { 166 ASSERT(start + count <= bitmap->elements); 167 168 bitmap_set_range_internal(bitmap->bits, start, count); 169 170 if (bitmap->block_size > 0) { 171 size_t aligned_start = ALIGN_UP(start, bitmap->block_size); 172 173 /* Leading unaligned bits */ 174 size_t lub = min(aligned_start - start, count); 175 176 /* Aligned middle bits */ 177 size_t amb = (count > lub) ? (count - lub) : 0; 178 179 size_t aligned_size = amb / bitmap->block_size; 180 181 bitmap_set_range_internal(bitmap->blocks, aligned_start, 182 aligned_size); 183 } 184 } 185 186 static void bitmap_clear_range_internal(uint8_t *bits, size_t start, 187 size_t count) 188 { 189 if (count == 0) 190 return; 191 192 size_t aligned_start = ALIGN_UP(start, BITMAP_ELEMENT); 193 194 /* Leading unaligned bits */ 195 size_t lub = min(aligned_start - start, count); 196 197 /* Aligned middle bits */ 198 size_t amb = (count > lub) ? (count - lub) : 0; 199 200 /* Trailing aligned bits */ 201 size_t tab = amb % BITMAP_ELEMENT; 202 203 if (start + count < aligned_start) { 133 204 /* Set bits in the middle of byte */ 134 bitmap->map[start / 8] &= ~(((1 << lub) - 1) << (start & 7)); 135 return; 136 } 137 205 bits[start / BITMAP_ELEMENT] &= 206 ~(((1 << lub) - 1) << (start & BITMAP_REMAINER)); 207 return; 208 } 209 138 210 if (lub) { 139 211 /* Make sure to clear any leading unaligned bits. */ 140 bitmap->map[start / 8] &= (1 << (8 - lub)) - 1; 141 } 142 for (i = 0; i < amb / 8; i++) { 212 bits[start / BITMAP_ELEMENT] &= 213 (1 << (BITMAP_ELEMENT - lub)) - 1; 214 } 215 216 size_t i; 217 218 for (i = 0; i < amb / BITMAP_ELEMENT; i++) { 143 219 /* The middle bits can be cleared byte by byte. */ 144 bitmap->map[aligned_start / 8 + i] = ALL_ZEROES; 145 } 220 bits[aligned_start / BITMAP_ELEMENT + i] = ALL_ZEROES; 221 } 222 146 223 if (tab) { 147 224 /* Make sure to clear any trailing aligned bits. */ 148 bitmap->map[aligned_start / 8 + i] &= ~((1 << tab) - 1); 149 } 150 225 bits[aligned_start / BITMAP_ELEMENT + i] &= ~((1 << tab) - 1); 226 } 227 } 228 229 /** Clear range of bits. 230 * 231 * @param bitmap Bitmap structure. 232 * @param start Starting bit. 233 * @param count Number of bits to clear. 234 * 235 */ 236 void bitmap_clear_range(bitmap_t *bitmap, size_t start, size_t count) 237 { 238 ASSERT(start + count <= bitmap->elements); 239 240 bitmap_clear_range_internal(bitmap->bits, start, count); 241 242 if (bitmap->block_size > 0) { 243 size_t aligned_start = start / bitmap->block_size; 244 245 size_t aligned_end = (start + count) / bitmap->block_size; 246 247 if (((start + count) % bitmap->block_size) != 0) 248 aligned_end++; 249 250 size_t aligned_size = aligned_end - aligned_start; 251 252 bitmap_clear_range_internal(bitmap->blocks, aligned_start, 253 aligned_size); 254 } 151 255 } 152 256 153 257 /** Copy portion of one bitmap into another bitmap. 154 258 * 155 * @param dst Destination bitmap. 156 * @param src Source bitmap. 157 * @param bits Number of bits to copy. 158 */ 159 void bitmap_copy(bitmap_t *dst, bitmap_t *src, size_t bits) 160 { 259 * @param dst Destination bitmap. 260 * @param src Source bitmap. 261 * @param count Number of bits to copy. 262 * 263 */ 264 void bitmap_copy(bitmap_t *dst, bitmap_t *src, size_t count) 265 { 266 ASSERT(count <= dst->elements); 267 ASSERT(count <= src->elements); 268 161 269 size_t i; 162 270 163 ASSERT(bits <= dst->bits); 164 ASSERT(bits <= src->bits); 165 166 for (i = 0; i < bits / 8; i++) 167 dst->map[i] = src->map[i]; 168 169 if (bits % 8) { 170 bitmap_clear_range(dst, i * 8, bits % 8); 171 dst->map[i] |= src->map[i] & ((1 << (bits % 8)) - 1); 271 for (i = 0; i < count / BITMAP_ELEMENT; i++) 272 dst->bits[i] = src->bits[i]; 273 274 if (count % BITMAP_ELEMENT) { 275 bitmap_clear_range(dst, i * BITMAP_ELEMENT, 276 count % BITMAP_ELEMENT); 277 dst->bits[i] |= src->bits[i] & 278 ((1 << (count % BITMAP_ELEMENT)) - 1); 172 279 } 173 280 } -
kernel/generic/src/console/kconsole.c
r6ad185d rca62f86 53 53 #include <func.h> 54 54 #include <str.h> 55 #include <macros.h>56 55 #include <sysinfo/sysinfo.h> 57 56 #include <ddi/device.h> -
kernel/generic/src/ddi/ddi.c
r6ad185d rca62f86 336 336 order = fnzb(pages - 1) + 1; 337 337 338 *phys = frame_alloc_noreserve(order, 0);338 *phys = frame_alloc_noreserve(order, FRAME_DMA); 339 339 if (*phys == NULL) 340 340 return ENOMEM; … … 361 361 NO_TRACE static int dmamem_unmap_anonymous(uintptr_t virt) 362 362 { 363 // TODO: implement unlocking & unmap 364 return EOK; 363 // TODO: This is an ugly hack 364 as_t *as = TASK->as; 365 366 mutex_lock(&as->lock); 367 as_area_t *area = find_locked_area(as, virt); 368 if (!area) { 369 mutex_unlock(&as->lock); 370 return ENOENT; 371 } 372 frame_free_noreserve(area->backend_data.base); 373 area->backend_data.base = 0; 374 area->backend_data.frames = 0; 375 mutex_unlock(&area->lock); 376 mutex_unlock(&as->lock); 377 378 return as_area_destroy(as, virt); 365 379 } 366 380 -
kernel/generic/src/mm/as.c
r6ad185d rca62f86 672 672 673 673 return NULL; 674 } 675 676 /** UGLY! UGLY! UGLY! */ 677 // TODO: REMOVE ASAP! 678 as_area_t * find_locked_area(as_t *as, uintptr_t va) 679 { 680 return find_area_and_lock(as, va); 674 681 } 675 682 -
kernel/generic/src/mm/frame.c
r6ad185d rca62f86 518 518 NO_TRACE static void zone_mark_unavailable(zone_t *zone, size_t frame_idx) 519 519 { 520 ASSERT(zone->flags & ZONE_AVAILABLE); 520 if (!(zone->flags & ZONE_AVAILABLE)) 521 return; 522 // ASSERT(zone->flags & ZONE_AVAILABLE); 521 523 522 524 frame_t *frame = zone_get_frame(zone, frame_idx); … … 935 937 } 936 938 937 if (confframe >= start + count) 938 panic("Cannot find configuration data for zone."); 939 if (confframe >= start + count) { 940 flags &= ~ZONE_AVAILABLE; 941 goto nonavail; 942 // panic("Cannot find configuration data for zone."); 943 } 939 944 } 940 945 … … 960 965 return znum; 961 966 } 962 967 nonavail: 968 (void)0; // label trick 963 969 /* Non-available zone */ 964 970 size_t znum = zones_insert_zone(start, count, flags);
Note:
See TracChangeset
for help on using the changeset viewer.