- Timestamp:
- 2013-09-02T20:14:11Z (12 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 8c95dff
- Parents:
- 0435fe41 (diff), 61ab4a9 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)links above to see all the changes relative to each parent. - Location:
- kernel
- Files:
-
- 16 edited
-
arch/arm32/include/arch/asm.h (modified) (3 diffs)
-
arch/arm32/include/arch/cp15.h (modified) (5 diffs)
-
arch/arm32/include/arch/mm/page.h (modified) (4 diffs)
-
arch/arm32/include/arch/mm/page_armv4.h (modified) (3 diffs)
-
arch/arm32/include/arch/mm/page_armv6.h (modified) (7 diffs)
-
arch/arm32/src/cpu/cpu.c (modified) (8 diffs)
-
arch/arm32/src/mach/beagleboardxm/beagleboardxm.c (modified) (3 diffs)
-
arch/arm32/src/mm/tlb.c (modified) (5 diffs)
-
arch/ia32/src/mm/frame.c (modified) (2 diffs)
-
genarch/include/genarch/drivers/amdm37x/gpt.h (modified) (5 diffs)
-
genarch/src/mm/page_pt.c (modified) (2 diffs)
-
generic/include/mm/as.h (modified) (1 diff)
-
generic/include/mm/frame.h (modified) (3 diffs)
-
generic/src/ddi/ddi.c (modified) (2 diffs)
-
generic/src/mm/as.c (modified) (1 diff)
-
generic/src/mm/frame.c (modified) (3 diffs)
Legend:
- Unmodified
- Added
- Removed
-
kernel/arch/arm32/include/arch/asm.h
r0435fe41 r802898f 38 38 39 39 #include <typedefs.h> 40 #include <arch/cp15.h> 40 41 #include <arch/stack.h> 41 42 #include <config.h> … … 51 52 * chapter 2.3.8 p.2-22 (52 in the PDF) 52 53 * 53 * @note Although mcr p15, 0, R0, c7, c0, 4 is defined in ARM Architecture54 * reference manual for armv4/5 CP15 implementation is mandatory only for55 * armv6+.54 * @note Although CP15WFI (mcr p15, 0, R0, c7, c0, 4) is defined in ARM 55 * Architecture reference manual for armv4/5, CP15 implementation is mandatory 56 * only for armv6+. 56 57 */ 57 58 NO_TRACE static inline void cpu_sleep(void) … … 60 61 asm volatile ( "wfe" ); 61 62 #elif defined(PROCESSOR_ARCH_armv6) | defined(PROCESSOR_arm926ej_s) | defined(PROCESSOR_arm920t) 62 asm volatile ( "mcr p15, 0, R0, c7, c0, 4");63 WFI_write(0); 63 64 #endif 64 65 } -
kernel/arch/arm32/include/arch/cp15.h
r0435fe41 r802898f 171 171 CCSIDR_LINESIZE_MASK = 0x7, 172 172 CCSIDR_LINESIZE_SHIFT = 0, 173 #define CCSIDR_SETS(val) \ 174 (((val >> CCSIDR_NUMSETS_SHIFT) & CCSIDR_NUMSETS_MASK) + 1) 175 #define CCSIDR_WAYS(val) \ 176 (((val >> CCSIDR_ASSOC_SHIFT) & CCSIDR_ASSOC_MASK) + 1) 177 /* The register value is log(linesize_in_words) - 2 */ 178 #define CCSIDR_LINESIZE_LOG(val) \ 179 (((val >> CCSIDR_LINESIZE_SHIFT) & CCSIDR_LINESIZE_MASK) + 2 + 2) 173 180 }; 174 181 CONTROL_REG_GEN_READ(CCSIDR, c0, 1, c0, 0); … … 187 194 CLIDR_UNI_CACHE = 0x4, 188 195 CLIDR_CACHE_MASK = 0x7, 189 #define CLIDR_CACHE(level, val) ((val >> (level - 1) * 3) & CLIDR_CACHE_MASK) 196 /** levels counted from 0 */ 197 #define CLIDR_CACHE(level, val) ((val >> (level * 3)) & CLIDR_CACHE_MASK) 190 198 }; 191 199 CONTROL_REG_GEN_READ(CLIDR, c0, 1, c0, 1); … … 293 301 294 302 /* Memory protection and control registers */ 303 enum { 304 TTBR_ADDR_MASK = 0xffffff80, 305 TTBR_NOS_FLAG = 1 << 5, 306 TTBR_RGN_MASK = 0x3 << 3, 307 TTBR_RGN_NO_CACHE = 0x0 << 3, 308 TTBR_RGN_WBWA_CACHE = 0x1 << 3, 309 TTBR_RGN_WT_CACHE = 0x2 << 3, 310 TTBR_RGN_WB_CACHE = 0x3 << 3, 311 TTBR_S_FLAG = 1 << 1, 312 TTBR_C_FLAG = 1 << 0, 313 }; 295 314 CONTROL_REG_GEN_READ(TTBR0, c2, 0, c0, 0); 296 315 CONTROL_REG_GEN_WRITE(TTBR0, c2, 0, c0, 0); … … 363 382 364 383 CONTROL_REG_GEN_WRITE(DCIMVAC, c7, 0, c6, 1); 365 CONTROL_REG_GEN_WRITE(DCI MSW, c7, 0, c6, 2);384 CONTROL_REG_GEN_WRITE(DCISW, c7, 0, c6, 2); 366 385 367 386 CONTROL_REG_GEN_WRITE(ATS1CPR, c7, 0, c8, 0); … … 369 388 CONTROL_REG_GEN_WRITE(ATS1CUR, c7, 0, c8, 2); 370 389 CONTROL_REG_GEN_WRITE(ATS1CUW, c7, 0, c8, 3); 371 CONTROL_REG_GEN_WRITE(ATS1 NSOPR, c7, 0, c8, 4);372 CONTROL_REG_GEN_WRITE(ATS1 NSOPW, c7, 0, c8, 5);373 CONTROL_REG_GEN_WRITE(ATS1 NSOUR, c7, 0, c8, 6);374 CONTROL_REG_GEN_WRITE(ATS1 NSOUW, c7, 0, c8, 7);390 CONTROL_REG_GEN_WRITE(ATS12NSOPR, c7, 0, c8, 4); 391 CONTROL_REG_GEN_WRITE(ATS12NSOPW, c7, 0, c8, 5); 392 CONTROL_REG_GEN_WRITE(ATS12NSOUR, c7, 0, c8, 6); 393 CONTROL_REG_GEN_WRITE(ATS12NSOUW, c7, 0, c8, 7); 375 394 376 395 -
kernel/arch/arm32/include/arch/mm/page.h
r0435fe41 r802898f 41 41 #include <arch/exception.h> 42 42 #include <arch/barrier.h> 43 #include <arch/cp15.h> 43 44 #include <trace.h> 44 45 … … 95 96 /* Set PTE address accessors for each level. */ 96 97 #define SET_PTL0_ADDRESS_ARCH(ptl0) \ 97 (set_ptl0_addr((pte_t *) (ptl0)))98 set_ptl0_addr((pte_t *) (ptl0)) 98 99 #define SET_PTL1_ADDRESS_ARCH(ptl0, i, a) \ 99 (((pte_t *) (ptl0))[(i)].l0.coarse_table_addr = (a) >> 10)100 set_ptl1_addr((pte_t*) (ptl0), i, a) 100 101 #define SET_PTL2_ADDRESS_ARCH(ptl1, i, a) 101 102 #define SET_PTL3_ADDRESS_ARCH(ptl2, i, a) 102 103 #define SET_FRAME_ADDRESS_ARCH(ptl3, i, a) \ 103 (((pte_t *) (ptl3))[(i)].l1.frame_base_addr = (a) >> 12)104 set_ptl3_addr((pte_t*) (ptl3), i, a) 104 105 105 106 /* Get PTE flags accessors for each level. */ … … 129 130 set_pt_level1_present((pte_t *) (ptl3), (size_t) (i)) 130 131 132 133 #define pt_coherence(page) pt_coherence_m(page, 1) 134 131 135 #if defined(PROCESSOR_ARCH_armv6) | defined(PROCESSOR_ARCH_armv7_a) 132 136 #include "page_armv6.h" … … 137 141 #endif 138 142 143 /** Sets the address of level 0 page table. 144 * 145 * @param pt Pointer to the page table to set. 146 * 147 * Page tables are always in cacheable memory. 148 * Make sure the memory type is correct, and in sync with: 149 * init_boot_pt (boot/arch/arm32/src/mm.c) 150 * init_ptl0_section (boot/arch/arm32/src/mm.c) 151 * set_pt_level1_flags (kernel/arch/arm32/include/arch/mm/page_armv6.h) 152 */ 153 NO_TRACE static inline void set_ptl0_addr(pte_t *pt) 154 { 155 uint32_t val = (uint32_t)pt & TTBR_ADDR_MASK; 156 val |= TTBR_RGN_WBWA_CACHE | TTBR_C_FLAG; 157 TTBR0_write(val); 158 } 159 160 NO_TRACE static inline void set_ptl1_addr(pte_t *pt, size_t i, uintptr_t address) 161 { 162 pt[i].l0.coarse_table_addr = address >> 10; 163 pt_coherence(&pt[i].l0); 164 } 165 166 NO_TRACE static inline void set_ptl3_addr(pte_t *pt, size_t i, uintptr_t address) 167 { 168 pt[i].l1.frame_base_addr = address >> 12; 169 pt_coherence(&pt[i].l1); 170 } 171 139 172 #endif 140 173 -
kernel/arch/arm32/include/arch/mm/page_armv4.h
r0435fe41 r802898f 120 120 #define PTE_DESCRIPTOR_SMALL_PAGE 2 121 121 122 123 /** Sets the address of level 0 page table. 124 * 125 * @param pt Pointer to the page table to set. 126 * 127 */ 128 NO_TRACE static inline void set_ptl0_addr(pte_t *pt) 129 { 130 asm volatile ( 131 "mcr p15, 0, %[pt], c2, c0, 0\n" 132 :: [pt] "r" (pt) 133 ); 134 } 135 122 #define pt_coherence_m(pt, count) \ 123 do { \ 124 for (unsigned i = 0; i < count; ++i) \ 125 DCCMVAU_write((uintptr_t)(pt + i)); \ 126 read_barrier(); \ 127 } while (0) 136 128 137 129 /** Returns level 0 page table entry flags. … … 223 215 224 216 /* default access permission */ 225 p->access_permission_0 = p->access_permission_1 = 217 p->access_permission_0 = p->access_permission_1 = 226 218 p->access_permission_2 = p->access_permission_3 = 227 219 PTE_AP_USER_NO_KERNEL_RW; … … 229 221 if (flags & PAGE_USER) { 230 222 if (flags & PAGE_READ) { 231 p->access_permission_0 = p->access_permission_1 = 232 p->access_permission_2 = p->access_permission_3 = 223 p->access_permission_0 = p->access_permission_1 = 224 p->access_permission_2 = p->access_permission_3 = 233 225 PTE_AP_USER_RO_KERNEL_RW; 234 226 } 235 227 if (flags & PAGE_WRITE) { 236 p->access_permission_0 = p->access_permission_1 = 237 p->access_permission_2 = p->access_permission_3 = 238 PTE_AP_USER_RW_KERNEL_RW; 228 p->access_permission_0 = p->access_permission_1 = 229 p->access_permission_2 = p->access_permission_3 = 230 PTE_AP_USER_RW_KERNEL_RW; 239 231 } 240 232 } -
kernel/arch/arm32/include/arch/mm/page_armv6.h
r0435fe41 r802898f 40 40 #error "Do not include arch specific page.h directly use generic page.h instead" 41 41 #endif 42 42 43 43 44 /* Macros for querying the last-level PTE entries. */ … … 125 126 #define PTE_DESCRIPTOR_SMALL_PAGE_NX 3 126 127 127 /** Sets the address of level 0 page table. 128 * 129 * @param pt Pointer to the page table to set. 130 * 131 */ 132 NO_TRACE static inline void set_ptl0_addr(pte_t *pt) 133 { 134 asm volatile ( 135 "mcr p15, 0, %[pt], c2, c0, 0\n" 136 :: [pt] "r" (pt) 137 ); 138 } 128 129 /** 130 * For an ARMv7 implementation that does not include the Large Physical Address Extension, 131 * and in implementations of architecture versions before ARMv7, if the translation tables 132 * are held in Write-Back Cacheable memory, the caches must be cleaned to the point of 133 * unification after writing to the translation tables and before the DSB instruction. This 134 * ensures that the updated translation table are visible to a hardware translation table walk. 135 * 136 * Therefore, an example instruction sequence for writing a translation table entry, 137 * covering changes to the instruction 138 * or data mappings in a uniprocessor system is: 139 * STR rx, [Translation table entry] 140 * ; write new entry to the translation table 141 * Clean cache line [Translation table entry] : This operation is not required with the 142 * ; Multiprocessing Extensions. 143 * DSB 144 * ; ensures visibility of the data cleaned from the D Cache 145 * Invalidate TLB entry by MVA (and ASID if non-global) [page address] 146 * Invalidate BTC 147 * DSB 148 * ; ensure completion of the Invalidate TLB operation 149 * ISB 150 * ; ensure table changes visible to instruction fetch 151 * 152 * ARM Architecture reference chp. B3.10.1 p. B3-1375 153 * @note: see TTRB0/1 for pt memory type 154 */ 155 #define pt_coherence_m(pt, count) \ 156 do { \ 157 for (unsigned i = 0; i < count; ++i) \ 158 DCCMVAU_write((uintptr_t)(pt + i)); \ 159 read_barrier(); \ 160 } while (0) 139 161 140 162 … … 206 228 p->ns = 0; 207 229 } 230 pt_coherence(p); 208 231 } 209 232 … … 232 255 p->descriptor_type = PTE_DESCRIPTOR_SMALL_PAGE_NX; 233 256 } 234 235 /* tex=0 buf=1 and cache=1 => normal memory 236 * tex=0 buf=1 and cache=0 => shareable device mmio 237 */ 238 p->cacheable = (flags & PAGE_CACHEABLE); 239 p->bufferable = 1; 240 p->tex = 0; 257 258 if (flags & PAGE_CACHEABLE) { 259 /* 260 * Write-through, no write-allocate memory, see ch. B3.8.2 261 * (p. B3-1358) of ARM Architecture reference manual. 262 * Make sure the memory type is correct, and in sync with: 263 * init_boot_pt (boot/arch/arm32/src/mm.c) 264 * init_ptl0_section (boot/arch/arm32/src/mm.c) 265 * set_ptl0_addr (kernel/arch/arm32/include/arch/mm/page.h) 266 */ 267 p->tex = 5; 268 p->cacheable = 0; 269 p->bufferable = 1; 270 } else { 271 /* 272 * Shareable device memory, see ch. B3.8.2 (p. B3-1358) of 273 * ARM Architecture reference manual. 274 */ 275 p->tex = 0; 276 p->cacheable = 0; 277 p->bufferable = 1; 278 } 241 279 242 280 /* Shareable is ignored for devices (non-cacheable), 243 * turn it o nfor normal memory. */244 p->shareable = 1;281 * turn it off for normal memory. */ 282 p->shareable = 0; 245 283 246 284 p->non_global = !(flags & PAGE_GLOBAL); … … 256 294 p->access_permission_1 = PTE_AP1_RO; 257 295 } 296 pt_coherence(p); 258 297 } 259 298 … … 264 303 p->should_be_zero_0 = 0; 265 304 p->should_be_zero_1 = 0; 266 write_barrier();267 305 p->descriptor_type = PTE_DESCRIPTOR_COARSE_TABLE; 306 pt_coherence(p); 268 307 } 269 308 … … 273 312 274 313 p->descriptor_type = PTE_DESCRIPTOR_SMALL_PAGE; 314 pt_coherence(p); 275 315 } 276 316 -
kernel/arch/arm32/src/cpu/cpu.c
r0435fe41 r802898f 157 157 #endif 158 158 #ifdef PROCESSOR_ARCH_armv7_a 159 /* ICache coherency is elaborate on in barrier.h.159 /* ICache coherency is elaborated on in barrier.h. 160 160 * VIPT and PIPT caches need maintenance only on code modify, 161 161 * so it should be safe for general use. … … 166 166 control_reg |= 167 167 SCTLR_INST_CACHE_EN_FLAG | SCTLR_BRANCH_PREDICT_EN_FLAG; 168 } else { 169 control_reg &= 170 ~(SCTLR_INST_CACHE_EN_FLAG | SCTLR_BRANCH_PREDICT_EN_FLAG); 168 171 } 169 172 #endif … … 204 207 #ifdef PROCESSOR_ARCH_armv7_a 205 208 CSSELR_write((level & CCSELR_LEVEL_MASK) << CCSELR_LEVEL_SHIFT); 206 const unsigned ls_log = 2 + 207 ((CCSIDR_read() >> CCSIDR_LINESIZE_SHIFT) & CCSIDR_LINESIZE_MASK); 208 return ls_log + 2; //return log2(bytes) 209 const uint32_t ccsidr = CCSIDR_read(); 210 return CCSIDR_LINESIZE_LOG(ccsidr); 209 211 #endif 210 212 return 0; … … 217 219 #ifdef PROCESSOR_ARCH_armv7_a 218 220 CSSELR_write((level & CCSELR_LEVEL_MASK) << CCSELR_LEVEL_SHIFT); 219 const unsigned ways = 1 + 220 ((CCSIDR_read() >> CCSIDR_ASSOC_SHIFT) & CCSIDR_ASSOC_MASK); 221 return ways; 221 const uint32_t ccsidr = CCSIDR_read(); 222 return CCSIDR_WAYS(ccsidr); 222 223 #endif 223 224 return 0; … … 229 230 #ifdef PROCESSOR_ARCH_armv7_a 230 231 CSSELR_write((level & CCSELR_LEVEL_MASK) << CCSELR_LEVEL_SHIFT); 231 const unsigned sets = 1 + 232 ((CCSIDR_read() >> CCSIDR_NUMSETS_SHIFT) & CCSIDR_NUMSETS_MASK); 233 return sets; 232 const uint32_t ccsidr = CCSIDR_read(); 233 return CCSIDR_SETS(ccsidr); 234 234 #endif 235 235 return 0; … … 241 241 #ifdef PROCESSOR_ARCH_armv7_a 242 242 const uint32_t val = CLIDR_read(); 243 for (unsigned i = 1; i <= 7; ++i) {243 for (unsigned i = 0; i < 8; ++i) { 244 244 const unsigned ctype = CLIDR_CACHE(i, val); 245 245 switch (ctype) { … … 280 280 const unsigned ways = dcache_ways(i); 281 281 const unsigned sets = dcache_sets(i); 282 const unsigned way_shift = 31- log2(ways);282 const unsigned way_shift = 32 - log2(ways); 283 283 const unsigned set_shift = dcache_linesize_log(i); 284 284 dcache_clean_manual(i, false, ways, sets, way_shift, set_shift); … … 293 293 const unsigned ways = dcache_ways(i); 294 294 const unsigned sets = dcache_sets(i); 295 const unsigned way_shift = 31- log2(ways);295 const unsigned way_shift = 32 - log2(ways); 296 296 const unsigned set_shift = dcache_linesize_log(i); 297 297 dcache_clean_manual(i, true, ways, sets, way_shift, set_shift); -
kernel/arch/arm32/src/mach/beagleboardxm/beagleboardxm.c
r0435fe41 r802898f 85 85 static void bb_timer_irq_handler(irq_t *irq) 86 86 { 87 amdm37x_gpt_irq_ack(&beagleboard.timer); 88 87 89 /* 88 90 * We are holding a lock which prevents preemption. 89 91 * Release the lock, call clock() and reacquire the lock again. 90 92 */ 91 amdm37x_gpt_irq_ack(&beagleboard.timer);92 93 spinlock_unlock(&irq->lock); 93 94 clock(); … … 147 148 { 148 149 const unsigned inum = amdm37x_irc_inum_get(beagleboard.irc_addr); 149 amdm37x_irc_irq_ack(beagleboard.irc_addr);150 150 151 151 irq_t *irq = irq_dispatch_and_lock(inum); … … 159 159 CPU->id, inum); 160 160 } 161 /** amdm37x manual ch. 12.5.2 (p. 2428) places irc ack at the end 162 * of ISR. DO this to avoid strange behavior. */ 163 amdm37x_irc_irq_ack(beagleboard.irc_addr); 161 164 } 162 165 -
kernel/arch/arm32/src/mm/tlb.c
r0435fe41 r802898f 37 37 #include <arch/mm/asid.h> 38 38 #include <arch/asm.h> 39 #include <arch/cp15.h> 39 40 #include <typedefs.h> 40 41 #include <arch/mm/page.h> 42 #include <arch/cache.h> 41 43 42 44 /** Invalidate all entries in TLB. … … 46 48 void tlb_invalidate_all(void) 47 49 { 48 asm volatile ( 49 "eor r1, r1\n" 50 "mcr p15, 0, r1, c8, c7, 0\n" 51 ::: "r1" 52 ); 50 TLBIALL_write(0); 51 /* 52 * "A TLB maintenance operation is only guaranteed to be complete after 53 * the execution of a DSB instruction." 54 * "An ISB instruction, or a return from an exception, causes the 55 * effect of all completed TLB maintenance operations that appear in 56 * program order before the ISB or return from exception to be visible 57 * to all subsequent instructions, including the instruction fetches 58 * for those instructions." 59 * ARM Architecture reference Manual ch. B3.10.1 p. B3-1374 B3-1375 60 */ 61 read_barrier(); 62 inst_barrier(); 53 63 } 54 64 … … 60 70 { 61 71 tlb_invalidate_all(); 72 // TODO: why not TLBIASID_write(asid) ? 62 73 } 63 74 … … 65 76 * 66 77 * @param page Virtual adress of the page 67 */ 78 */ 68 79 static inline void invalidate_page(uintptr_t page) 69 80 { 70 asm volatile ( 71 "mcr p15, 0, %[page], c8, c7, 1\n" 72 :: [page] "r" (page) 73 ); 81 //TODO: What about TLBIMVAA? 82 TLBIMVA_write(page); 83 /* 84 * "A TLB maintenance operation is only guaranteed to be complete after 85 * the execution of a DSB instruction." 86 * "An ISB instruction, or a return from an exception, causes the 87 * effect of all completed TLB maintenance operations that appear in 88 * program order before the ISB or return from exception to be visible 89 * to all subsequent instructions, including the instruction fetches 90 * for those instructions." 91 * ARM Architecture reference Manual ch. B3.10.1 p. B3-1374 B3-1375 92 */ 93 read_barrier(); 94 inst_barrier(); 74 95 } 75 96 … … 83 104 void tlb_invalidate_pages(asid_t asid __attribute__((unused)), uintptr_t page, size_t cnt) 84 105 { 85 unsigned int i; 86 87 for (i = 0; i < cnt; i++) 106 for (unsigned i = 0; i < cnt; i++) 88 107 invalidate_page(page + i * PAGE_SIZE); 89 108 } -
kernel/arch/ia32/src/mm/frame.c
r0435fe41 r802898f 47 47 48 48 #define PHYSMEM_LIMIT32 UINT64_C(0x100000000) 49 #define PHYSMEM_LIMIT_DMA UINT64_C(0x1000000) 49 50 50 51 size_t hardcoded_unmapped_ktext_size = 0; … … 91 92 else 92 93 conf = minconf; 93 zone_create(pfn, count, conf, 94 ZONE_AVAILABLE | ZONE_LOWMEM); 94 95 if ((pfn * PAGE_SIZE) < PHYSMEM_LIMIT_DMA) { 96 size_t dma_count = min( 97 PHYSMEM_LIMIT_DMA / PAGE_SIZE - pfn, 98 count); 99 zone_create(pfn, dma_count, conf, 100 ZONE_AVAILABLE | ZONE_DMA); 101 count -= dma_count; 102 pfn += dma_count; 103 } 104 105 conf = pfn; 106 if (count) { 107 zone_create(pfn, count, conf, 108 ZONE_AVAILABLE | ZONE_LOWMEM); 109 } 95 110 } else { 96 111 conf = zone_external_conf_alloc(count); 97 if (conf != 0) 112 if (conf != 0) { 98 113 zone_create(pfn, count, conf, 99 114 ZONE_AVAILABLE | ZONE_HIGHMEM); 115 } 100 116 } 101 117 } else if ((e820table[i].type == MEMMAP_MEMORY_ACPI) || -
kernel/genarch/include/genarch/drivers/amdm37x/gpt.h
r0435fe41 r802898f 39 39 #include <typedefs.h> 40 40 #include <mm/km.h> 41 #include <time/clock.h> 41 42 42 43 /* AMDM37x TRM p. 2740 */ … … 128 129 #define AMDM37x_GPT_TCLR_CE_FLAG (1 << 6) 129 130 #define AMDM37x_GPT_TCLR_SCPWM (1 << 7) 130 #define AMDM37x_GPT_TCLR_TCM_MASK (0x3) 131 #define AMDM37x_GPT_TCLR_TCM_SHIFT (8) 132 #define AMDM37x_GPT_TCLR_TRG_MASK (0x3) 133 #define AMDM37x_GPT_TCLR_TRG_SHIFT (10) 131 #define AMDM37x_GPT_TCLR_TCM_MASK (0x3 << 8) 132 #define AMDM37x_GPT_TCLR_TCM_NO_CAPTURE (0x0 << 8) 133 #define AMDM37x_GPT_TCLR_TCM_RAISE_CAPTURE (0x1 << 8) 134 #define AMDM37x_GPT_TCLR_TCM_FALL_CAPTURE (0x2 << 8) 135 #define AMDM37x_GPT_TCLR_TCM_BOTH_CAPTURE (0x3 << 8) 136 #define AMDM37x_GPT_TCLR_TRG_MASK (0x3 << 10) 137 #define AMDM37x_GPT_TCLR_TRG_NO (0x0 << 10) 138 #define AMDM37x_GPT_TCLR_TRG_OVERFLOW (0x1 << 10) 139 #define AMDM37x_GPT_TCLR_TRG_OVERMATCH (0x2 << 10) 134 140 #define AMDM37x_GPT_TCLR_PT_FLAG (1 << 12) 135 141 #define AMDM37x_GPT_TCLR_CAPT_MODE_FLAG (1 << 13) … … 209 215 timer->regs = (void*) km_map(ioregs, iosize, PAGE_NOT_CACHEABLE); 210 216 217 /* Reset the timer */ 218 timer->regs->tiocp_cfg |= AMDM37x_GPT_TIOCP_CFG_SOFTRESET_FLAG; 219 220 while (!(timer->regs->tistat & AMDM37x_GPT_TISTAT_RESET_DONE_FLAG)); 221 211 222 /* Set autoreload */ 212 timer->regs->tclr = AMDM37x_GPT_TCLR_AR_FLAG;223 timer->regs->tclr |= AMDM37x_GPT_TCLR_AR_FLAG; 213 224 214 225 timer->special_available = ( … … 216 227 (ioregs == AMDM37x_GPT2_BASE_ADDRESS) || 217 228 (ioregs == AMDM37x_GPT10_BASE_ADDRESS)); 229 /* Select reload value */ 218 230 timer->regs->tldr = 0xffffffff - (32768 / hz) + 1; 231 /* Set current counter value */ 219 232 timer->regs->tccr = 0xffffffff - (32768 / hz) + 1; 233 220 234 if (timer->special_available) { 221 /* Set values foraccording to formula (manual p. 2733) */235 /* Set values according to formula (manual p. 2733) */ 222 236 /* Use temporary variables for easier debugging */ 223 237 const uint32_t tpir = 224 238 ((32768 / hz + 1) * 1000000) - (32768000L * (1000 / hz)); 225 239 const uint32_t tnir = 226 ((32768 / hz) * 1000000) - (32768000 * (1000 / hz));240 ((32768 / hz) * 1000000) - (32768000L * (1000 / hz)); 227 241 timer->regs->tpir = tpir; 228 242 timer->regs->tnir = tnir; … … 241 255 } 242 256 243 static inline voidamdm37x_gpt_irq_ack(amdm37x_gpt_t* timer)257 static inline bool amdm37x_gpt_irq_ack(amdm37x_gpt_t* timer) 244 258 { 245 259 ASSERT(timer); 246 260 ASSERT(timer->regs); 247 261 /* Clear all pending interrupts */ 248 timer->regs->tisr = timer->regs->tisr; 262 const uint32_t tisr = timer->regs->tisr; 263 timer->regs->tisr = tisr; 264 return tisr != 0; 249 265 } 250 266 -
kernel/genarch/src/mm/page_pt.c
r0435fe41 r802898f 112 112 */ 113 113 write_barrier(); 114 SET_PTL2_PRESENT(ptl1, PTL1_INDEX(page)); 114 SET_PTL2_PRESENT(ptl1, PTL1_INDEX(page)); 115 115 } 116 116 … … 180 180 * Destroy the mapping. 181 181 * Setting to PAGE_NOT_PRESENT is not sufficient. 182 */ 182 * But we need SET_FRAME for possible PT coherence maintenance. 183 * At least on ARM. 184 */ 185 //TODO: Fix this inconsistency 186 SET_FRAME_FLAGS(ptl3, PTL3_INDEX(page), PAGE_NOT_PRESENT); 183 187 memsetb(&ptl3[PTL3_INDEX(page)], sizeof(pte_t), 0); 184 188 -
kernel/generic/include/mm/as.h
r0435fe41 r802898f 250 250 extern int as_area_change_flags(as_t *, unsigned int, uintptr_t); 251 251 252 //TODO REMOVE! 253 extern as_area_t * find_locked_area(as_t *as, uintptr_t va); 254 252 255 extern unsigned int as_area_get_flags(as_area_t *); 253 256 extern bool as_area_check_access(as_area_t *, pf_access_t); -
kernel/generic/include/mm/frame.h
r0435fe41 r802898f 63 63 /** Allocate a frame which cannot be identity-mapped. */ 64 64 #define FRAME_HIGHMEM 0x20 65 /** Allocate a frame which needs to be from DMA zone. */ 66 #define FRAME_DMA 0x40 65 67 66 68 typedef uint8_t zone_flags_t; … … 77 79 /** Zone contains memory that cannot be identity-mapped */ 78 80 #define ZONE_HIGHMEM 0x10 81 /** Zone contains memory suitable for old ISA DMA */ 82 #define ZONE_DMA 0x20 79 83 80 84 /** Mask of zone bits that must be matched exactly. */ … … 82 86 83 87 #define FRAME_TO_ZONE_FLAGS(ff) \ 84 ((((ff) & FRAME_LOWMEM) ? ZONE_LOWMEM : \ 88 ((((ff) & FRAME_DMA) ? ZONE_DMA : \ 89 (((ff) & FRAME_LOWMEM) ? ZONE_LOWMEM : \ 85 90 (((ff) & FRAME_HIGHMEM) ? ZONE_HIGHMEM : \ 86 ZONE_LOWMEM /* | ZONE_HIGHMEM */)) | \87 ZONE_AVAILABLE) 91 ZONE_LOWMEM /* | ZONE_HIGHMEM */))) | \ 92 ZONE_AVAILABLE) 88 93 89 94 #define ZONE_FLAGS_MATCH(zf, f) \ -
kernel/generic/src/ddi/ddi.c
r0435fe41 r802898f 336 336 order = fnzb(pages - 1) + 1; 337 337 338 *phys = frame_alloc_noreserve(order, 0);338 *phys = frame_alloc_noreserve(order, FRAME_DMA); 339 339 if (*phys == NULL) 340 340 return ENOMEM; … … 361 361 NO_TRACE static int dmamem_unmap_anonymous(uintptr_t virt) 362 362 { 363 // TODO: implement unlocking & unmap 364 return EOK; 363 // TODO: This is an ugly hack 364 as_t *as = TASK->as; 365 366 mutex_lock(&as->lock); 367 as_area_t *area = find_locked_area(as, virt); 368 if (!area) { 369 mutex_unlock(&as->lock); 370 return ENOENT; 371 } 372 frame_free_noreserve(area->backend_data.base); 373 area->backend_data.base = 0; 374 area->backend_data.frames = 0; 375 mutex_unlock(&area->lock); 376 mutex_unlock(&as->lock); 377 378 return as_area_destroy(as, virt); 365 379 } 366 380 -
kernel/generic/src/mm/as.c
r0435fe41 r802898f 672 672 673 673 return NULL; 674 } 675 676 /** UGLY! UGLY! UGLY! */ 677 // TODO: REMOVE ASAP! 678 as_area_t * find_locked_area(as_t *as, uintptr_t va) 679 { 680 return find_area_and_lock(as, va); 674 681 } 675 682 -
kernel/generic/src/mm/frame.c
r0435fe41 r802898f 518 518 NO_TRACE static void zone_mark_unavailable(zone_t *zone, size_t frame_idx) 519 519 { 520 ASSERT(zone->flags & ZONE_AVAILABLE); 520 if (!(zone->flags & ZONE_AVAILABLE)) 521 return; 522 // ASSERT(zone->flags & ZONE_AVAILABLE); 521 523 522 524 frame_t *frame = zone_get_frame(zone, frame_idx); … … 935 937 } 936 938 937 if (confframe >= start + count) 938 panic("Cannot find configuration data for zone."); 939 if (confframe >= start + count) { 940 flags &= ~ZONE_AVAILABLE; 941 goto nonavail; 942 // panic("Cannot find configuration data for zone."); 943 } 939 944 } 940 945 … … 960 965 return znum; 961 966 } 962 967 nonavail: 968 (void)0; // label trick 963 969 /* Non-available zone */ 964 970 size_t znum = zones_insert_zone(start, count, flags);
Note:
See TracChangeset
for help on using the changeset viewer.
