- Timestamp:
- 2006-12-09T20:20:50Z (19 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- b82a13c
- Parents:
- 9ab9c2ec
- Location:
- kernel
- Files:
-
- 19 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/arch/ia32/src/drivers/ega.c
r9ab9c2ec rf8ddd17 47 47 #include <console/console.h> 48 48 #include <sysinfo/sysinfo.h> 49 #include <ddi/ddi.h> 49 50 50 51 /* … … 52 53 * Simple and short. Function for displaying characters and "scrolling". 53 54 */ 55 56 static parea_t ega_parea; /**< Physical memory area for EGA video RAM. */ 54 57 55 58 SPINLOCK_INITIALIZE(egalock); … … 80 83 stdout = &ega_console; 81 84 85 ega_parea.pbase = VIDEORAM; 86 ega_parea.vbase = (uintptr_t) videoram; 87 ega_parea.frames = 1; 88 ega_parea.cacheable = false; 89 ddi_parea_register(&ega_parea); 90 82 91 sysinfo_set_item_val("fb", NULL, true); 83 92 sysinfo_set_item_val("fb.kind", NULL, 2); … … 85 94 sysinfo_set_item_val("fb.height", NULL, ROWS); 86 95 sysinfo_set_item_val("fb.address.physical", NULL, VIDEORAM); 96 sysinfo_set_item_val("fb.address.color", NULL, PAGE_COLOR((uintptr_t) 97 videoram)); 87 98 88 99 #ifndef CONFIG_FB -
kernel/arch/sparc64/include/interrupt.h
r9ab9c2ec rf8ddd17 48 48 49 49 enum { 50 IPI_TLB_SHOOTDOWN = VECTOR_TLB_SHOOTDOWN_IPI, 51 IPI_DCACHE_SHOOTDOWN 50 IPI_TLB_SHOOTDOWN = VECTOR_TLB_SHOOTDOWN_IPI 52 51 }; 53 52 -
kernel/arch/sparc64/include/mm/cache.h
r9ab9c2ec rf8ddd17 36 36 #define KERN_sparc64_CACHE_H_ 37 37 38 #ifdef CONFIG_SMP39 extern void dcache_shootdown_start(void);40 extern void dcache_shootdown_finalize(void);41 extern void dcache_shootdown_ipi_recv(void);42 #else /* CONFIG_SMP */43 #define dcache_shootdown_start();44 #define dcache_shootdown_finalize();45 #define dcache_shootdown_ipi_recv();46 #endif /* CONFIG_SMP */47 48 38 extern void dcache_flush(void); 49 39 -
kernel/arch/sparc64/src/mm/as.c
r9ab9c2ec rf8ddd17 49 49 #include <macros.h> 50 50 #endif /* CONFIG_TSB */ 51 52 #ifdef CONFIG_VIRT_IDX_DCACHE53 #include <arch/mm/cache.h>54 #endif /* CONFIG_VIRT_IDX_DCACHE */55 51 56 52 /** Architecture dependent address space init. */ … … 163 159 dtsb_base_write(tsb_base.value); 164 160 #endif 165 #ifdef CONFIG_VIRT_IDX_DCACHE166 if (as->dcache_flush_on_install) {167 /*168 * Some mappings in this address space are illegal address169 * aliases. Upon their creation, the dcache_flush_on_install170 * flag was set.171 *172 * We are now obliged to flush the D-cache in order to guarantee173 * that there will be at most one cache line for each address174 * alias.175 *176 * This flush performs a cleanup after another address space in177 * which the alias might have existed.178 */179 dcache_flush();180 }181 #endif /* CONFIG_VIRT_IDX_DCACHE */182 161 } 183 162 … … 214 193 } 215 194 #endif 216 #ifdef CONFIG_VIRT_IDX_DCACHE217 if (as->dcache_flush_on_deinstall) {218 /*219 * Some mappings in this address space are illegal address220 * aliases. Upon their creation, the dcache_flush_on_deinstall221 * flag was set.222 *223 * We are now obliged to flush the D-cache in order to guarantee224 * that there will be at most one cache line for each address225 * alias.226 *227 * This flush performs a cleanup after this address space. It is228 * necessary because other address spaces that contain the same229 * alias are not necessarily aware of the need to carry out the230 * cache flush. The only address spaces that are aware of it are231 * those that created the illegal alias.232 */233 dcache_flush();234 }235 #endif /* CONFIG_VIRT_IDX_DCACHE */236 195 } 237 196 -
kernel/arch/sparc64/src/mm/cache.c
r9ab9c2ec rf8ddd17 32 32 /** 33 33 * @file 34 * @brief D-cache shootdown algorithm.35 34 */ 36 35 37 36 #include <arch/mm/cache.h> 38 37 39 #ifdef CONFIG_SMP40 41 #include <smp/ipi.h>42 #include <arch/interrupt.h>43 #include <synch/spinlock.h>44 #include <arch.h>45 #include <debug.h>46 47 /**48 * This spinlock is used by the processors to synchronize during the D-cache49 * shootdown.50 */51 SPINLOCK_INITIALIZE(dcachelock);52 53 /** Initialize the D-cache shootdown sequence.54 *55 * Start the shootdown sequence by sending out an IPI and wait until all56 * processors spin on the dcachelock spinlock.57 */58 void dcache_shootdown_start(void)59 {60 int i;61 62 CPU->arch.dcache_active = 0;63 spinlock_lock(&dcachelock);64 65 ipi_broadcast(IPI_DCACHE_SHOOTDOWN);66 67 busy_wait:68 for (i = 0; i < config.cpu_count; i++)69 if (cpus[i].arch.dcache_active)70 goto busy_wait;71 }72 73 /** Finish the D-cache shootdown sequence. */74 void dcache_shootdown_finalize(void)75 {76 spinlock_unlock(&dcachelock);77 CPU->arch.dcache_active = 1;78 }79 80 /** Process the D-cache shootdown IPI. */81 void dcache_shootdown_ipi_recv(void)82 {83 ASSERT(CPU);84 85 CPU->arch.dcache_active = 0;86 spinlock_lock(&dcachelock);87 spinlock_unlock(&dcachelock);88 89 dcache_flush();90 91 CPU->arch.dcache_active = 1;92 }93 94 #endif /* CONFIG_SMP */95 96 38 /** @} 97 39 */ -
kernel/arch/sparc64/src/mm/page.c
r9ab9c2ec rf8ddd17 74 74 for (i = 0; i < bsp_locked_dtlb_entries; i++) { 75 75 dtlb_insert_mapping(bsp_locked_dtlb_entry[i].virt_page, 76 bsp_locked_dtlb_entry[i].phys_page, bsp_locked_dtlb_entry[i].pagesize_code, 77 true, false); 76 bsp_locked_dtlb_entry[i].phys_page, 77 bsp_locked_dtlb_entry[i].pagesize_code, true, 78 false); 78 79 } 79 80 #endif … … 152 153 * Second, save the information about the mapping for APs. 153 154 */ 154 bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].virt_page = virtaddr + i*sizemap[order].increment; 155 bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].phys_page = physaddr + i*sizemap[order].increment; 156 bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].pagesize_code = sizemap[order].pagesize_code; 155 bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].virt_page = 156 virtaddr + i*sizemap[order].increment; 157 bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].phys_page = 158 physaddr + i*sizemap[order].increment; 159 bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].pagesize_code = 160 sizemap[order].pagesize_code; 157 161 bsp_locked_dtlb_entries++; 158 162 #endif -
kernel/arch/sparc64/src/smp/ipi.c
r9ab9c2ec rf8ddd17 39 39 #include <config.h> 40 40 #include <mm/tlb.h> 41 #include <arch/mm/cache.h>42 41 #include <arch/interrupt.h> 43 42 #include <arch/trap/interrupt.h> … … 122 121 func = tlb_shootdown_ipi_recv; 123 122 break; 124 case IPI_DCACHE_SHOOTDOWN:125 func = dcache_shootdown_ipi_recv;126 break;127 123 default: 128 124 panic("Unknown IPI (%d).\n", ipi); -
kernel/arch/sparc64/src/trap/interrupt.c
r9ab9c2ec rf8ddd17 45 45 #include <arch.h> 46 46 #include <mm/tlb.h> 47 #include <arch/mm/cache.h>48 47 #include <config.h> 49 48 #include <synch/spinlock.h> … … 92 91 if (data0 == (uintptr_t) tlb_shootdown_ipi_recv) { 93 92 tlb_shootdown_ipi_recv(); 94 } else if (data0 == (uintptr_t) dcache_shootdown_ipi_recv) {95 dcache_shootdown_ipi_recv();96 93 } 97 94 #endif -
kernel/genarch/src/fb/fb.c
r9ab9c2ec rf8ddd17 46 46 #include <bitops.h> 47 47 #include <print.h> 48 #include <ddi/ddi.h> 48 49 49 50 #include "helenos.xbm" 51 52 static parea_t fb_parea; /**< Physical memory area for fb. */ 50 53 51 54 SPINLOCK_INITIALIZE(fb_lock); … … 435 438 columns = x / COL_WIDTH; 436 439 440 fb_parea.pbase = (uintptr_t) addr; 441 fb_parea.vbase = (uintptr_t) fbaddress; 442 fb_parea.frames = SIZE2FRAMES(fbsize); 443 fb_parea.cacheable = false; 444 ddi_parea_register(&fb_parea); 445 437 446 sysinfo_set_item_val("fb", NULL, true); 438 447 sysinfo_set_item_val("fb.kind", NULL, 1); … … 442 451 sysinfo_set_item_val("fb.visual", NULL, visual); 443 452 sysinfo_set_item_val("fb.address.physical", NULL, addr); 453 sysinfo_set_item_val("fb.address.color", NULL, PAGE_COLOR((uintptr_t) 454 fbaddress)); 444 455 sysinfo_set_item_val("fb.invert-colors", NULL, invert_colors); 445 456 -
kernel/generic/include/ddi/ddi.h
r9ab9c2ec rf8ddd17 40 40 #include <typedefs.h> 41 41 42 unative_t sys_physmem_map(unative_t phys_base, unative_t virt_base, unative_t pages, 43 unative_t flags); 42 /** Structure representing contiguous physical memory area. */ 43 typedef struct { 44 uintptr_t pbase; /**< Physical base of the area. */ 45 uintptr_t vbase; /**< Virtual base of the area. */ 46 count_t frames; /**< Number of frames in the area. */ 47 bool cacheable; /**< Cacheability. */ 48 } parea_t; 49 50 extern void ddi_init(void); 51 extern void ddi_parea_register(parea_t *parea); 52 53 extern unative_t sys_physmem_map(unative_t phys_base, unative_t virt_base, 54 unative_t pages, unative_t flags); 44 55 extern unative_t sys_iospace_enable(ddi_ioarg_t *uspace_io_arg); 45 56 extern unative_t sys_preempt_control(int enable); -
kernel/generic/include/mm/as.h
r9ab9c2ec rf8ddd17 95 95 asid_t asid; 96 96 97 #ifdef CONFIG_VIRT_IDX_DCACHE98 bool dcache_flush_on_install;99 bool dcache_flush_on_deinstall;100 #endif /* CONFIG_VIRT_IDX_DCACHE */101 102 97 /** Architecture specific content. */ 103 98 as_arch_t arch; … … 166 161 /** Data to be used by the backend. */ 167 162 mem_backend_data_t backend_data; 168 169 /**170 * Virtual color of the original address space area that was at the beginning171 * of the share chain.172 */173 int orig_color;174 163 }; 175 164 -
kernel/generic/include/mm/page.h
r9ab9c2ec rf8ddd17 71 71 * Macro for computing page color. 72 72 */ 73 #define PAGE_COLOR(va) 73 #define PAGE_COLOR(va) (((va) >> PAGE_WIDTH) & ((1 << PAGE_COLOR_BITS) - 1)) 74 74 75 75 /** Page fault access type. */ … … 83 83 /** Operations to manipulate page mappings. */ 84 84 struct page_mapping_operations { 85 void (* mapping_insert)(as_t *as, uintptr_t page, uintptr_t frame, int flags); 85 void (* mapping_insert)(as_t *as, uintptr_t page, uintptr_t frame, int 86 flags); 86 87 void (* mapping_remove)(as_t *as, uintptr_t page); 87 88 pte_t *(* mapping_find)(as_t *as, uintptr_t page); … … 94 95 extern void page_table_lock(as_t *as, bool lock); 95 96 extern void page_table_unlock(as_t *as, bool unlock); 96 extern void page_mapping_insert(as_t *as, uintptr_t page, uintptr_t frame, int flags); 97 extern void page_mapping_insert(as_t *as, uintptr_t page, uintptr_t frame, int 98 flags); 97 99 extern void page_mapping_remove(as_t *as, uintptr_t page); 98 100 extern pte_t *page_mapping_find(as_t *as, uintptr_t page); -
kernel/generic/src/console/klog.c
r9ab9c2ec rf8ddd17 39 39 #include <ddi/device.h> 40 40 #include <ddi/irq.h> 41 #include <ddi/ddi.h> 41 42 #include <ipc/irq.h> 42 43 44 /** Physical memory area used for klog. */ 45 static parea_t klog_parea; 46 43 47 /* 44 48 * For now, we use 0 as INR. 45 * However, on some architectures 0 is the clock interrupt (e.g. amd64 and ia32).46 * It is therefore desirable to have architecture specific definition of KLOG_VIRT_INR47 * in the future.49 * However, on some architectures 0 is the clock interrupt (e.g. amd64 and 50 * ia32). It is therefore desirable to have architecture specific definition of 51 * KLOG_VIRT_INR in the future. 48 52 */ 49 53 #define KLOG_VIRT_INR 0 … … 76 80 if (!faddr) 77 81 panic("Cannot allocate page for klog"); 78 klog = (char *) PA2KA(faddr);82 klog = (char *) PA2KA(faddr); 79 83 80 84 devno_t devno = device_assign_devno(); 81 85 82 sysinfo_set_item_val("klog.faddr", NULL, (unative_t)faddr); 86 klog_parea.pbase = (uintptr_t) faddr; 87 klog_parea.vbase = (uintptr_t) klog; 88 klog_parea.frames = 1 << KLOG_ORDER; 89 klog_parea.cacheable = true; 90 ddi_parea_register(&klog_parea); 91 92 sysinfo_set_item_val("klog.faddr", NULL, (unative_t) faddr); 93 sysinfo_set_item_val("klog.fcolor", NULL, (unative_t) 94 PAGE_COLOR((uintptr_t) klog)); 83 95 sysinfo_set_item_val("klog.pages", NULL, 1 << KLOG_ORDER); 84 96 sysinfo_set_item_val("klog.devno", NULL, devno); -
kernel/generic/src/ddi/ddi.c
r9ab9c2ec rf8ddd17 48 48 #include <synch/spinlock.h> 49 49 #include <syscall/copy.h> 50 #include <adt/btree.h> 50 51 #include <arch.h> 51 52 #include <align.h> 52 53 #include <errno.h> 53 54 55 /** This lock protects the parea_btree. */ 56 SPINLOCK_INITIALIZE(parea_lock); 57 58 /** B+tree with enabled physical memory areas. */ 59 static btree_t parea_btree; 60 61 /** Initialize DDI. */ 62 void ddi_init(void) 63 { 64 btree_create(&parea_btree); 65 } 66 67 /** Enable piece of physical memory for mapping by physmem_map(). 68 * 69 * @param parea Pointer to physical area structure. 70 * 71 * @todo This function doesn't check for overlaps. It depends on the kernel to 72 * create disjunct physical memory areas. 73 */ 74 void ddi_parea_register(parea_t *parea) 75 { 76 ipl_t ipl; 77 78 ipl = interrupts_disable(); 79 spinlock_lock(&parea_lock); 80 81 /* 82 * TODO: we should really check for overlaps here. 83 * However, we should be safe because the kernel is pretty sane and 84 * memory of different devices doesn't overlap. 85 */ 86 btree_insert(&parea_btree, (btree_key_t) parea->pbase, parea, NULL); 87 88 spinlock_unlock(&parea_lock); 89 interrupts_restore(ipl); 90 } 91 54 92 /** Map piece of physical memory into virtual address space of current task. 55 93 * 56 * @param pf Physical frameaddress of the starting frame.57 * @param vp Virtual pageaddress of the starting page.94 * @param pf Physical address of the starting frame. 95 * @param vp Virtual address of the starting page. 58 96 * @param pages Number of pages to map. 59 97 * @param flags Address space area flags for the mapping. 60 98 * 61 * @return 0 on success, EPERM if the caller lacks capabilities to use this syscall, 62 * ENOENT if there is no task matching the specified ID and ENOMEM if 63 * there was a problem in creating address space area. 99 * @return 0 on success, EPERM if the caller lacks capabilities to use this 100 * syscall, ENOENT if there is no task matching the specified ID or the 101 * physical address space is not enabled for mapping and ENOMEM if there 102 * was a problem in creating address space area. ENOTSUP is returned when 103 * an attempt to create an illegal address alias is detected. 64 104 */ 65 105 static int ddi_physmem_map(uintptr_t pf, uintptr_t vp, count_t pages, int flags) … … 80 120 81 121 ipl = interrupts_disable(); 122 123 /* 124 * Check if the physical memory area is enabled for mapping. 125 * If the architecture supports virtually indexed caches, intercept 126 * attempts to create an illegal address alias. 127 */ 128 spinlock_lock(&parea_lock); 129 parea_t *parea; 130 btree_node_t *nodep; 131 parea = btree_search(&parea_btree, (btree_key_t) pf, &nodep); 132 if (!parea || parea->frames < pages || ((flags & AS_AREA_CACHEABLE) && 133 !parea->cacheable) || (!(flags & AS_AREA_CACHEABLE) && 134 parea->cacheable)) { 135 /* 136 * This physical memory area cannot be mapped. 137 */ 138 spinlock_unlock(&parea_lock); 139 interrupts_restore(ipl); 140 return ENOENT; 141 } 142 143 #ifdef CONFIG_VIRT_IDX_DCACHE 144 if (PAGE_COLOR(parea->vbase) != PAGE_COLOR(vp)) { 145 /* 146 * Refuse to create an illegal address alias. 147 */ 148 spinlock_unlock(&parea_lock); 149 interrupts_restore(ipl); 150 return ENOTSUP; 151 } 152 #endif /* CONFIG_VIRT_IDX_DCACHE */ 153 154 spinlock_unlock(&parea_lock); 155 82 156 spinlock_lock(&TASK->lock); 83 157 … … 108 182 * @param size Size of the enabled I/O space.. 109 183 * 110 * @return 0 on success, EPERM if the caller lacks capabilities to use this syscall,111 * 184 * @return 0 on success, EPERM if the caller lacks capabilities to use this 185 * syscall, ENOENT if there is no task matching the specified ID. 112 186 */ 113 187 static int ddi_iospace_enable(task_id_t id, uintptr_t ioaddr, size_t size) … … 161 235 * @return 0 on success, otherwise it returns error code found in errno.h 162 236 */ 163 unative_t sys_physmem_map(unative_t phys_base, unative_t virt_base, unative_t pages,164 165 { 166 return (unative_t) ddi_physmem_map(ALIGN_DOWN((uintptr_t) phys_base, FRAME_SIZE),167 ALIGN_DOWN((uintptr_t) virt_base, PAGE_SIZE), (count_t) pages,168 237 unative_t sys_physmem_map(unative_t phys_base, unative_t virt_base, unative_t 238 pages, unative_t flags) 239 { 240 return (unative_t) ddi_physmem_map(ALIGN_DOWN((uintptr_t) phys_base, 241 FRAME_SIZE), ALIGN_DOWN((uintptr_t) virt_base, PAGE_SIZE), 242 (count_t) pages, (int) flags); 169 243 } 170 244 … … 184 258 return (unative_t) rc; 185 259 186 return (unative_t) ddi_iospace_enable((task_id_t) arg.task_id, (uintptr_t) arg.ioaddr, (size_t) arg.size); 260 return (unative_t) ddi_iospace_enable((task_id_t) arg.task_id, 261 (uintptr_t) arg.ioaddr, (size_t) arg.size); 187 262 } 188 263 189 264 /** Disable or enable preemption. 190 265 * 191 * @param enable If non-zero, the preemption counter will be decremented, leading to potential192 * enabling of preemption. Otherwise the preemption counter will be incremented,193 * 266 * @param enable If non-zero, the preemption counter will be decremented, 267 * leading to potential enabling of preemption. Otherwise the preemption 268 * counter will be incremented, preventing preemption from occurring. 194 269 * 195 270 * @return Zero on success or EPERM if callers capabilities are not sufficient. -
kernel/generic/src/lib/rd.c
r9ab9c2ec rf8ddd17 42 42 #include <mm/frame.h> 43 43 #include <sysinfo/sysinfo.h> 44 #include <ddi/ddi.h> 45 46 static parea_t rd_parea; /**< Physical memory area for rd. */ 44 47 45 48 int init_rd(rd_header * header, size_t size) 46 49 { 47 50 /* Identify RAM disk */ 48 if ((header->magic[0] != RD_MAG0) || (header->magic[1] != RD_MAG1) || (header->magic[2] != RD_MAG2) || (header->magic[3] != RD_MAG3)) 51 if ((header->magic[0] != RD_MAG0) || (header->magic[1] != RD_MAG1) || 52 (header->magic[2] != RD_MAG2) || (header->magic[3] != RD_MAG3)) 49 53 return RE_INVALID; 50 54 … … 77 81 dsize = size - hsize; 78 82 83 rd_parea.pbase = KA2PA((void *) header + hsize); 84 rd_parea.vbase = (uintptr_t) ((void *) header + hsize); 85 rd_parea.frames = SIZE2FRAMES(dsize); 86 rd_parea.cacheable = true; 87 ddi_parea_register(&rd_parea); 88 79 89 sysinfo_set_item_val("rd", NULL, true); 80 90 sysinfo_set_item_val("rd.size", NULL, dsize); 81 sysinfo_set_item_val("rd.address.physical", NULL, (unative_t) KA2PA((void *) header + hsize)); 91 sysinfo_set_item_val("rd.address.physical", NULL, (unative_t) 92 KA2PA((void *) header + hsize)); 93 sysinfo_set_item_val("rd.address.color", NULL, (unative_t) 94 PAGE_COLOR((uintptr_t) header + hsize)); 82 95 83 96 return RE_OK; -
kernel/generic/src/main/main.c
r9ab9c2ec rf8ddd17 81 81 #include <console/klog.h> 82 82 #include <smp/smp.h> 83 #include <ddi/ddi.h> 83 84 84 85 /** Global configuration structure. */ … … 103 104 * appropriate sizes and addresses. 104 105 */ 105 uintptr_t hardcoded_load_address = 0; /**< Virtual address of where the kernel is loaded. */ 106 size_t hardcoded_ktext_size = 0; /**< Size of the kernel code in bytes. */ 107 size_t hardcoded_kdata_size = 0; /**< Size of the kernel data in bytes. */ 108 109 uintptr_t stack_safe = 0; /**< Lowest safe stack virtual address */ 106 uintptr_t hardcoded_load_address = 0; /**< Virtual address of where the kernel 107 * is loaded. */ 108 size_t hardcoded_ktext_size = 0; /**< Size of the kernel code in bytes. 109 */ 110 size_t hardcoded_kdata_size = 0; /**< Size of the kernel data in bytes. 111 */ 112 uintptr_t stack_safe = 0; /**< Lowest safe stack virtual address. 113 */ 110 114 111 115 void main_bsp(void); … … 142 146 config.memory_size = get_memory_size(); 143 147 144 config.kernel_size = ALIGN_UP(hardcoded_ktext_size + hardcoded_kdata_size, PAGE_SIZE); 148 config.kernel_size = ALIGN_UP(hardcoded_ktext_size + 149 hardcoded_kdata_size, PAGE_SIZE); 145 150 config.stack_size = CONFIG_STACK_SIZE; 146 151 … … 151 156 count_t i; 152 157 for (i = 0; i < init.cnt; i++) { 153 if (PA_overlaps(config.stack_base, config.stack_size, init.tasks[i].addr, init.tasks[i].size)) 154 config.stack_base = ALIGN_UP(init.tasks[i].addr + init.tasks[i].size, config.stack_size); 158 if (PA_overlaps(config.stack_base, config.stack_size, 159 init.tasks[i].addr, init.tasks[i].size)) 160 config.stack_base = ALIGN_UP(init.tasks[i].addr + 161 init.tasks[i].size, config.stack_size); 155 162 } 156 163 157 164 /* Avoid placing stack on top of boot allocations. */ 158 165 if (ballocs.size) { 159 if (PA_overlaps(config.stack_base, config.stack_size, ballocs.base, ballocs.size)) 160 config.stack_base = ALIGN_UP(ballocs.base + ballocs.size, PAGE_SIZE); 166 if (PA_overlaps(config.stack_base, config.stack_size, 167 ballocs.base, ballocs.size)) 168 config.stack_base = ALIGN_UP(ballocs.base + 169 ballocs.size, PAGE_SIZE); 161 170 } 162 171 … … 165 174 166 175 context_save(&ctx); 167 context_set(&ctx, FADDR(main_bsp_separated_stack), config.stack_base, THREAD_STACK_SIZE); 176 context_set(&ctx, FADDR(main_bsp_separated_stack), config.stack_base, 177 THREAD_STACK_SIZE); 168 178 context_restore(&ctx); 169 179 /* not reached */ … … 201 211 */ 202 212 arch_pre_mm_init(); 203 frame_init(); /* Initialize at least 1 memory segment big enough for slab to work */ 213 frame_init(); 214 /* Initialize at least 1 memory segment big enough for slab to work. */ 204 215 slab_cache_init(); 205 216 btree_init(); … … 207 218 page_init(); 208 219 tlb_init(); 220 ddi_init(); 209 221 arch_post_mm_init(); 210 222 211 223 version_print(); 212 printf("kernel: %.*p hardcoded_ktext_size=%zdK, hardcoded_kdata_size=%zdK\n", sizeof(uintptr_t) * 2, config.base, hardcoded_ktext_size >> 10, hardcoded_kdata_size >> 10); 213 printf("stack: %.*p size=%zdK\n", sizeof(uintptr_t) * 2, config.stack_base, config.stack_size >> 10); 224 printf("kernel: %.*p hardcoded_ktext_size=%zdK, " 225 "hardcoded_kdata_size=%zdK\n", sizeof(uintptr_t) * 2, 226 config.base, hardcoded_ktext_size >> 10, hardcoded_kdata_size >> 227 10); 228 printf("stack: %.*p size=%zdK\n", sizeof(uintptr_t) * 2, 229 config.stack_base, config.stack_size >> 10); 214 230 215 231 arch_pre_smp_init(); 216 232 smp_init(); 217 218 slab_enable_cpucache(); /* Slab must be initialized AFTER we know the number of processors */233 /* Slab must be initialized after we know the number of processors. */ 234 slab_enable_cpucache(); 219 235 220 236 printf("config.memory_size=%zdM\n", config.memory_size >> 20); … … 233 249 if (init.cnt > 0) { 234 250 for (i = 0; i < init.cnt; i++) 235 printf("init[%zd].addr=%.*p, init[%zd].size=%zd\n", i, sizeof(uintptr_t) * 2, init.tasks[i].addr, i, init.tasks[i].size); 251 printf("init[%zd].addr=%.*p, init[%zd].size=%zd\n", i, 252 sizeof(uintptr_t) * 2, init.tasks[i].addr, i, 253 init.tasks[i].size); 236 254 } else 237 255 printf("No init binaries found\n"); … … 305 323 * switch to this cpu's private stack prior to waking kmp up. 306 324 */ 307 context_set(&CPU->saved_context, FADDR(main_ap_separated_stack), (uintptr_t) CPU->stack, CPU_STACK_SIZE); 325 context_set(&CPU->saved_context, FADDR(main_ap_separated_stack), 326 (uintptr_t) CPU->stack, CPU_STACK_SIZE); 308 327 context_restore(&CPU->saved_context); 309 328 /* not reached */ -
kernel/generic/src/mm/as.c
r9ab9c2ec rf8ddd17 167 167 as->page_table = page_table_create(flags); 168 168 169 #ifdef CONFIG_VIRT_IDX_DCACHE170 as->dcache_flush_on_install = false;171 as->dcache_flush_on_deinstall = false;172 #endif /* CONFIG_VIRT_IDX_DCACHE */173 174 169 return as; 175 170 } … … 278 273 else 279 274 memsetb((uintptr_t) &a->backend_data, sizeof(a->backend_data), 0); 280 281 #ifdef CONFIG_VIRT_IDX_DCACHE282 /*283 * When the area is being created with the AS_AREA_ATTR_PARTIAL flag, the284 * orig_color is probably wrong until the flag is reset. In other words, it is285 * initialized with the color of the area being created and not with the color286 * of the original address space area at the beginning of the share chain. Of287 * course, the correct color is set by as_area_share() before the flag is288 * reset.289 */290 a->orig_color = PAGE_COLOR(base);291 #endif /* CONFIG_VIRT_IDX_DCACHE */292 275 293 276 btree_create(&a->used_space); … … 576 559 * or ENOMEM if there was a problem in allocating destination address space 577 560 * area. ENOTSUP is returned if the address space area backend does not support 578 * sharing. 561 * sharing or if the kernel detects an attempt to create an illegal address 562 * alias. 579 563 */ 580 564 int as_area_share(as_t *src_as, uintptr_t src_base, size_t acc_size, … … 584 568 int src_flags; 585 569 size_t src_size; 586 int src_orig_color;587 570 as_area_t *src_area, *dst_area; 588 571 share_info_t *sh_info; … … 601 584 return ENOENT; 602 585 } 603 604 586 605 587 if (!src_area->backend || !src_area->backend->share) { … … 618 600 src_backend = src_area->backend; 619 601 src_backend_data = src_area->backend_data; 620 src_orig_color = src_area->orig_color;621 602 622 603 /* Share the cacheable flag from the original mapping */ … … 630 611 return EPERM; 631 612 } 613 614 #ifdef CONFIG_VIRT_IDX_DCACHE 615 if (!(dst_flags_mask & AS_AREA_EXEC)) { 616 if (PAGE_COLOR(src_area->base) != PAGE_COLOR(dst_base)) { 617 /* 618 * Refuse to create an illegal address alias. 619 */ 620 mutex_unlock(&src_area->lock); 621 mutex_unlock(&src_as->lock); 622 interrupts_restore(ipl); 623 return ENOTSUP; 624 } 625 } 626 #endif /* CONFIG_VIRT_IDX_DCACHE */ 632 627 633 628 /* … … 683 678 dst_area->attributes &= ~AS_AREA_ATTR_PARTIAL; 684 679 dst_area->sh_info = sh_info; 685 dst_area->orig_color = src_orig_color;686 #ifdef CONFIG_VIRT_IDX_DCACHE687 if (src_orig_color != PAGE_COLOR(dst_base)) {688 /*689 * We have just detected an attempt to create an invalid address690 * alias. We allow this and set a special flag that tells the691 * architecture specific code to flush the D-cache when the692 * offending address space is installed and deinstalled693 * (cleanup).694 *695 * In order for the flags to take effect immediately, we also696 * perform a global D-cache shootdown.697 */698 dcache_shootdown_start();699 dst_as->dcache_flush_on_install = true;700 dst_as->dcache_flush_on_deinstall = true;701 dcache_flush();702 dcache_shootdown_finalize();703 }704 #endif /* CONFIG_VIRT_IDX_DCACHE */705 680 mutex_unlock(&dst_area->lock); 706 681 mutex_unlock(&dst_as->lock); -
kernel/generic/src/sysinfo/sysinfo.c
r9ab9c2ec rf8ddd17 231 231 232 232 switch (root->val_type) { 233 234 235 236 237 238 239 240 241 242 243 244 233 case SYSINFO_VAL_UNDEFINED: 234 val = 0; 235 vtype = "UND"; 236 break; 237 case SYSINFO_VAL_VAL: 238 val = root->val.val; 239 vtype = "VAL"; 240 break; 241 case SYSINFO_VAL_FUNCTION: 242 val = ((sysinfo_val_fn_t) (root->val.fn)) (root); 243 vtype = "FUN"; 244 break; 245 245 } 246 246 247 printf("%s %s val:%d(%x) sub:%s\n", root->name, vtype, val, val, (root->subinfo_type == SYSINFO_SUBINFO_NONE) ? "NON" : ((root->subinfo_type == SYSINFO_SUBINFO_TABLE) ? "TAB" : "FUN")); 247 printf("%s %s val:%d(%x) sub:%s\n", root->name, vtype, val, 248 val, (root->subinfo_type == SYSINFO_SUBINFO_NONE) ? 249 "NON" : ((root->subinfo_type == SYSINFO_SUBINFO_TABLE) ? 250 "TAB" : "FUN")); 248 251 249 252 if (root->subinfo_type == SYSINFO_SUBINFO_TABLE) -
kernel/generic/src/time/clock.c
r9ab9c2ec rf8ddd17 55 55 #include <sysinfo/sysinfo.h> 56 56 #include <arch/barrier.h> 57 #include <mm/frame.h> 58 #include <ddi/ddi.h> 59 60 /** Physical memory area of the real time clock. */ 61 static parea_t clock_parea; 57 62 58 63 /* Pointers to public variables with time */ … … 73 78 * information about realtime data. We allocate 1 page with these 74 79 * data and update it periodically. 75 *76 *77 80 */ 78 81 void clock_counter_init(void) … … 80 83 void *faddr; 81 84 82 faddr = frame_alloc( 0, FRAME_ATOMIC);85 faddr = frame_alloc(ONE_FRAME, FRAME_ATOMIC); 83 86 if (!faddr) 84 87 panic("Cannot allocate page for clock"); 85 88 86 public_time = (struct ptime *) PA2KA(faddr);89 public_time = (struct ptime *) PA2KA(faddr); 87 90 88 91 /* TODO: We would need some arch dependent settings here */ … … 91 94 public_time->useconds = 0; 92 95 93 sysinfo_set_item_val("clock.faddr", NULL, (unative_t)faddr); 96 clock_parea.pbase = (uintptr_t) faddr; 97 clock_parea.vbase = (uintptr_t) public_time; 98 clock_parea.frames = 1; 99 clock_parea.cacheable = true; 100 ddi_parea_register(&clock_parea); 101 102 /* 103 * Prepare information for the userspace so that it can successfully 104 * physmem_map() the clock_parea. 105 */ 106 sysinfo_set_item_val("clock.cacheable", NULL, (unative_t) true); 107 sysinfo_set_item_val("clock.fcolor", NULL, (unative_t) 108 PAGE_COLOR(clock_parea.vbase)); 109 sysinfo_set_item_val("clock.faddr", NULL, (unative_t) faddr); 94 110 } 95 111
Note:
See TracChangeset
for help on using the changeset viewer.