Changeset da1bafb in mainline for kernel/genarch/src
- Timestamp:
- 2010-05-24T18:57:31Z (15 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 0095368
- Parents:
- 666f492
- Location:
- kernel/genarch/src
- Files:
-
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/genarch/src/drivers/ega/ega.c
r666f492 rda1bafb 63 63 64 64 typedef struct { 65 SPINLOCK_DECLARE(lock);65 IRQ_SPINLOCK_DECLARE(lock); 66 66 67 67 uint32_t cursor; … … 71 71 } ega_instance_t; 72 72 73 static void ega_putchar(outdev_t * dev, wchar_t ch, bool silent);74 static void ega_redraw(outdev_t * dev);73 static void ega_putchar(outdev_t *, wchar_t, bool); 74 static void ega_redraw(outdev_t *); 75 75 76 76 static outdev_operations_t egadev_ops = { … … 540 540 ega_instance_t *instance = (ega_instance_t *) dev->data; 541 541 542 ipl_t ipl = interrupts_disable(); 543 spinlock_lock(&instance->lock); 542 irq_spinlock_lock(&instance->lock, true); 544 543 545 544 switch (ch) { … … 564 563 ega_move_cursor(instance, silent); 565 564 566 spinlock_unlock(&instance->lock); 567 interrupts_restore(ipl); 565 irq_spinlock_unlock(&instance->lock, true); 568 566 } 569 567 … … 572 570 ega_instance_t *instance = (ega_instance_t *) dev->data; 573 571 574 ipl_t ipl = interrupts_disable(); 575 spinlock_lock(&instance->lock); 572 irq_spinlock_lock(&instance->lock, true); 576 573 577 574 memcpy(instance->addr, instance->backbuf, EGA_VRAM_SIZE); … … 579 576 ega_show_cursor(instance, silent); 580 577 581 spinlock_unlock(&instance->lock); 582 interrupts_restore(ipl); 578 irq_spinlock_unlock(&instance->lock, true); 583 579 } 584 580 … … 598 594 egadev->data = instance; 599 595 600 spinlock_initialize(&instance->lock, "*ega_lock");596 irq_spinlock_initialize(&instance->lock, "*ega.instance.lock"); 601 597 602 598 instance->base = base; -
kernel/genarch/src/mm/as_ht.c
r666f492 rda1bafb 30 30 * @{ 31 31 */ 32 32 33 33 /** 34 34 * @file 35 * @brief 35 * @brief Address space functions for global page hash table. 36 36 */ 37 37 … … 46 46 #include <synch/mutex.h> 47 47 48 static pte_t *ht_create( int flags);49 static void ht_destroy(pte_t * page_table);48 static pte_t *ht_create(unsigned int); 49 static void ht_destroy(pte_t *); 50 50 51 static void ht_lock(as_t * as, bool lock);52 static void ht_unlock(as_t * as, bool unlock);51 static void ht_lock(as_t *, bool); 52 static void ht_unlock(as_t *, bool); 53 53 54 54 as_operations_t as_ht_operations = { … … 68 68 * 69 69 * @return Returns NULL. 70 * 70 71 */ 71 pte_t *ht_create( int flags)72 pte_t *ht_create(unsigned int flags) 72 73 { 73 74 if (flags & FLAG_AS_KERNEL) { … … 75 76 mutex_initialize(&page_ht_lock, MUTEX_PASSIVE); 76 77 } 78 77 79 return NULL; 78 80 } … … 83 85 * 84 86 * @param page_table This parameter is ignored. 87 * 85 88 */ 86 89 void ht_destroy(pte_t *page_table) … … 94 97 * Interrupts must be disabled. 95 98 * 96 * @param as Address space.99 * @param as Address space. 97 100 * @param lock If false, do not attempt to lock the address space. 101 * 98 102 */ 99 103 void ht_lock(as_t *as, bool lock) … … 101 105 if (lock) 102 106 mutex_lock(&as->lock); 107 103 108 mutex_lock(&page_ht_lock); 104 109 } … … 109 114 * Interrupts must be disabled. 110 115 * 111 * @param as Address space.116 * @param as Address space. 112 117 * @param unlock If false, do not attempt to lock the address space. 118 * 113 119 */ 114 120 void ht_unlock(as_t *as, bool unlock) 115 121 { 116 122 mutex_unlock(&page_ht_lock); 123 117 124 if (unlock) 118 125 mutex_unlock(&as->lock); -
kernel/genarch/src/mm/as_pt.c
r666f492 rda1bafb 33 33 /** 34 34 * @file 35 * @brief 35 * @brief Address space functions for 4-level hierarchical pagetables. 36 36 */ 37 37 … … 47 47 #include <arch.h> 48 48 49 static pte_t *ptl0_create( int flags);50 static void ptl0_destroy(pte_t * page_table);49 static pte_t *ptl0_create(unsigned int); 50 static void ptl0_destroy(pte_t *); 51 51 52 static void pt_lock(as_t * as, bool lock);53 static void pt_unlock(as_t * as, bool unlock);52 static void pt_lock(as_t *, bool); 53 static void pt_unlock(as_t *, bool); 54 54 55 55 as_operations_t as_pt_operations = { … … 67 67 * 68 68 * @return New PTL0. 69 * 69 70 */ 70 pte_t *ptl0_create( int flags)71 pte_t *ptl0_create(unsigned int flags) 71 72 { 72 pte_t *src_ptl0, *dst_ptl0; 73 ipl_t ipl; 74 int table_size; 75 76 dst_ptl0 = (pte_t *) frame_alloc(PTL0_SIZE, FRAME_KA); 77 table_size = FRAME_SIZE << PTL0_SIZE; 78 79 if (flags & FLAG_AS_KERNEL) { 73 pte_t *dst_ptl0 = (pte_t *) frame_alloc(PTL0_SIZE, FRAME_KA); 74 size_t table_size = FRAME_SIZE << PTL0_SIZE; 75 76 if (flags & FLAG_AS_KERNEL) 80 77 memsetb(dst_ptl0, table_size, 0); 81 } else { 82 uintptr_t src, dst; 83 78 else { 84 79 /* 85 80 * Copy the kernel address space portion to new PTL0. 81 * 86 82 */ 87 88 ipl = interrupts_disable(); 89 mutex_lock(&AS_KERNEL->lock); 90 src_ptl0 = (pte_t *) PA2KA((uintptr_t) AS_KERNEL->genarch.page_table); 91 92 src = (uintptr_t) &src_ptl0[PTL0_INDEX(KERNEL_ADDRESS_SPACE_START)]; 93 dst = (uintptr_t) &dst_ptl0[PTL0_INDEX(KERNEL_ADDRESS_SPACE_START)]; 94 83 84 ipl_t ipl = interrupts_disable(); 85 mutex_lock(&AS_KERNEL->lock); 86 87 pte_t *src_ptl0 = 88 (pte_t *) PA2KA((uintptr_t) AS_KERNEL->genarch.page_table); 89 90 uintptr_t src = 91 (uintptr_t) &src_ptl0[PTL0_INDEX(KERNEL_ADDRESS_SPACE_START)]; 92 uintptr_t dst = 93 (uintptr_t) &dst_ptl0[PTL0_INDEX(KERNEL_ADDRESS_SPACE_START)]; 94 95 95 memsetb(dst_ptl0, table_size, 0); 96 memcpy((void *) dst, (void *) src, table_size - (src - (uintptr_t) src_ptl0)); 96 memcpy((void *) dst, (void *) src, 97 table_size - (src - (uintptr_t) src_ptl0)); 98 97 99 mutex_unlock(&AS_KERNEL->lock); 98 100 interrupts_restore(ipl); 99 101 } 100 102 101 103 return (pte_t *) KA2PA((uintptr_t) dst_ptl0); 102 104 } … … 107 109 * 108 110 * @param page_table Physical address of PTL0. 111 * 109 112 */ 110 113 void ptl0_destroy(pte_t *page_table) 111 114 { 112 frame_free((uintptr_t) page_table);115 frame_free((uintptr_t) page_table); 113 116 } 114 117 … … 118 121 * Interrupts must be disabled. 119 122 * 120 * @param as Address space.123 * @param as Address space. 121 124 * @param lock If false, do not attempt to lock the address space. 125 * 122 126 */ 123 127 void pt_lock(as_t *as, bool lock) … … 132 136 * Interrupts must be disabled. 133 137 * 134 * @param as Address space.138 * @param as Address space. 135 139 * @param unlock If false, do not attempt to unlock the address space. 140 * 136 141 */ 137 142 void pt_unlock(as_t *as, bool unlock) -
kernel/genarch/src/mm/page_ht.c
r666f492 rda1bafb 33 33 /** 34 34 * @file 35 * @brief 35 * @brief Virtual Address Translation (VAT) for global page hash table. 36 36 */ 37 37 … … 52 52 #include <align.h> 53 53 54 static size_t hash(unative_t key[]); 55 static bool compare(unative_t key[], size_t keys, link_t *item); 56 static void remove_callback(link_t *item); 57 58 static void ht_mapping_insert(as_t *as, uintptr_t page, uintptr_t frame, 59 int flags); 60 static void ht_mapping_remove(as_t *as, uintptr_t page); 61 static pte_t *ht_mapping_find(as_t *as, uintptr_t page); 54 static size_t hash(unative_t[]); 55 static bool compare(unative_t[], size_t, link_t *); 56 static void remove_callback(link_t *); 57 58 static void ht_mapping_insert(as_t *, uintptr_t, uintptr_t, unsigned int); 59 static void ht_mapping_remove(as_t *, uintptr_t); 60 static pte_t *ht_mapping_find(as_t *, uintptr_t); 62 61 63 62 /** … … 65 64 * after address space lock and after any address space area 66 65 * locks. 66 * 67 67 */ 68 68 mutex_t page_ht_lock; 69 69 70 /** 71 * Page hash table.70 /** Page hash table. 71 * 72 72 * The page hash table may be accessed only when page_ht_lock is held. 73 * 73 74 */ 74 75 hash_table_t page_ht; … … 93 94 * 94 95 * @return Index into page hash table. 96 * 95 97 */ 96 98 size_t hash(unative_t key[]) … … 98 100 as_t *as = (as_t *) key[KEY_AS]; 99 101 uintptr_t page = (uintptr_t) key[KEY_PAGE]; 100 size_t index;101 102 102 103 /* … … 104 105 * of occurring. Least significant bits of VPN compose the 105 106 * hash index. 106 */ 107 index = ((page >> PAGE_WIDTH) & (PAGE_HT_ENTRIES - 1)); 107 * 108 */ 109 size_t index = ((page >> PAGE_WIDTH) & (PAGE_HT_ENTRIES - 1)); 108 110 109 111 /* … … 111 113 * similar addresses. Least significant bits compose the 112 114 * hash index. 115 * 113 116 */ 114 117 index |= ((unative_t) as) & (PAGE_HT_ENTRIES - 1); … … 119 122 /** Compare page hash table item with page and/or address space. 120 123 * 121 * @param key Array of one or two keys (i.e. page and/or address space).124 * @param key Array of one or two keys (i.e. page and/or address space). 122 125 * @param keys Number of keys passed. 123 126 * @param item Item to compare the keys with. 124 127 * 125 128 * @return true on match, false otherwise. 129 * 126 130 */ 127 131 bool compare(unative_t key[], size_t keys, link_t *item) 128 132 { 129 pte_t *t;130 131 133 ASSERT(item); 132 ASSERT((keys > 0) && (keys <= PAGE_HT_KEYS)); 133 134 ASSERT(keys > 0); 135 ASSERT(keys <= PAGE_HT_KEYS); 136 134 137 /* 135 138 * Convert item to PTE. 136 * /137 t = hash_table_get_instance(item, pte_t, link);138 139 if (keys == PAGE_HT_KEYS) {140 return (key[KEY_AS] == (uintptr_t) t->as) &&141 (key[KEY_PAGE] == t->page);142 } else {143 return (key[KEY_AS] == (uintptr_t) t->as);144 }139 * 140 */ 141 pte_t *pte = hash_table_get_instance(item, pte_t, link); 142 143 if (keys == PAGE_HT_KEYS) 144 return (key[KEY_AS] == (uintptr_t) pte->as) && 145 (key[KEY_PAGE] == pte->page); 146 147 return (key[KEY_AS] == (uintptr_t) pte->as); 145 148 } 146 149 … … 148 151 * 149 152 * @param item Page hash table item being removed. 153 * 150 154 */ 151 155 void remove_callback(link_t *item) 152 156 { 153 pte_t *t;154 155 157 ASSERT(item); 156 158 157 159 /* 158 160 * Convert item to PTE. 159 */ 160 t = hash_table_get_instance(item, pte_t, link); 161 162 free(t); 161 * 162 */ 163 pte_t *pte = hash_table_get_instance(item, pte_t, link); 164 165 free(pte); 163 166 } 164 167 … … 166 169 * 167 170 * Map virtual address page to physical address frame 168 * using flags. 171 * using flags. 169 172 * 170 173 * The page table must be locked and interrupts must be disabled. 171 174 * 172 * @param as Address space to which page belongs.173 * @param page Virtual address of the page to be mapped.175 * @param as Address space to which page belongs. 176 * @param page Virtual address of the page to be mapped. 174 177 * @param frame Physical address of memory frame to which the mapping is done. 175 178 * @param flags Flags to be used for mapping. 176 */ 177 void ht_mapping_insert(as_t *as, uintptr_t page, uintptr_t frame, int flags) 178 { 179 pte_t *t; 179 * 180 */ 181 void ht_mapping_insert(as_t *as, uintptr_t page, uintptr_t frame, 182 unsigned int flags) 183 { 180 184 unative_t key[2] = { 181 185 (uintptr_t) as, … … 184 188 185 189 if (!hash_table_find(&page_ht, key)) { 186 t= (pte_t *) malloc(sizeof(pte_t), FRAME_ATOMIC);187 ASSERT( t!= NULL);188 189 t->g = (flags & PAGE_GLOBAL) != 0;190 t->x = (flags & PAGE_EXEC) != 0;191 t->w = (flags & PAGE_WRITE) != 0;192 t->k = !(flags & PAGE_USER);193 t->c = (flags & PAGE_CACHEABLE) != 0;194 t->p = !(flags & PAGE_NOT_PRESENT);195 t->a = false;196 t->d = false;197 198 t->as = as;199 t->page = ALIGN_DOWN(page, PAGE_SIZE);200 t->frame = ALIGN_DOWN(frame, FRAME_SIZE);201 202 hash_table_insert(&page_ht, key, & t->link);190 pte_t *pte = (pte_t *) malloc(sizeof(pte_t), FRAME_ATOMIC); 191 ASSERT(pte != NULL); 192 193 pte->g = (flags & PAGE_GLOBAL) != 0; 194 pte->x = (flags & PAGE_EXEC) != 0; 195 pte->w = (flags & PAGE_WRITE) != 0; 196 pte->k = !(flags & PAGE_USER); 197 pte->c = (flags & PAGE_CACHEABLE) != 0; 198 pte->p = !(flags & PAGE_NOT_PRESENT); 199 pte->a = false; 200 pte->d = false; 201 202 pte->as = as; 203 pte->page = ALIGN_DOWN(page, PAGE_SIZE); 204 pte->frame = ALIGN_DOWN(frame, FRAME_SIZE); 205 206 hash_table_insert(&page_ht, key, &pte->link); 203 207 } 204 208 } … … 212 216 * The page table must be locked and interrupts must be disabled. 213 217 * 214 * @param as Address space to wich page belongs.218 * @param as Address space to wich page belongs. 215 219 * @param page Virtual address of the page to be demapped. 220 * 216 221 */ 217 222 void ht_mapping_remove(as_t *as, uintptr_t page) … … 236 241 * The page table must be locked and interrupts must be disabled. 237 242 * 238 * @param as Address space to wich page belongs.243 * @param as Address space to wich page belongs. 239 244 * @param page Virtual page. 240 245 * 241 246 * @return NULL if there is no such mapping; requested mapping otherwise. 247 * 242 248 */ 243 249 pte_t *ht_mapping_find(as_t *as, uintptr_t page) 244 250 { 245 link_t *hlp;246 pte_t *t = NULL;247 251 unative_t key[2] = { 248 252 (uintptr_t) as, … … 250 254 }; 251 255 252 hlp= hash_table_find(&page_ht, key);253 if ( hlp)254 t = hash_table_get_instance(hlp, pte_t, link);255 256 return t;256 link_t *cur = hash_table_find(&page_ht, key); 257 if (cur) 258 return hash_table_get_instance(cur, pte_t, link); 259 260 return NULL; 257 261 } 258 262 -
kernel/genarch/src/mm/page_pt.c
r666f492 rda1bafb 33 33 /** 34 34 * @file 35 * @brief 35 * @brief Virtual Address Translation for hierarchical 4-level page tables. 36 36 */ 37 37 … … 46 46 #include <memstr.h> 47 47 48 static void pt_mapping_insert(as_t * as, uintptr_t page, uintptr_t frame, int flags);49 static void pt_mapping_remove(as_t * as, uintptr_t page);50 static pte_t *pt_mapping_find(as_t * as, uintptr_t page);48 static void pt_mapping_insert(as_t *, uintptr_t, uintptr_t, unsigned int); 49 static void pt_mapping_remove(as_t *, uintptr_t); 50 static pte_t *pt_mapping_find(as_t *, uintptr_t); 51 51 52 52 page_mapping_operations_t pt_mapping_operations = { … … 63 63 * The page table must be locked and interrupts must be disabled. 64 64 * 65 * @param as Address space to wich page belongs.66 * @param page Virtual address of the page to be mapped.65 * @param as Address space to wich page belongs. 66 * @param page Virtual address of the page to be mapped. 67 67 * @param frame Physical address of memory frame to which the mapping is done. 68 68 * @param flags Flags to be used for mapping. 69 */ 70 void pt_mapping_insert(as_t *as, uintptr_t page, uintptr_t frame, int flags) 69 * 70 */ 71 void pt_mapping_insert(as_t *as, uintptr_t page, uintptr_t frame, 72 unsigned int flags) 71 73 { 72 pte_t *ptl0, *ptl1, *ptl2, *ptl3; 73 pte_t *newpt; 74 75 ptl0 = (pte_t *) PA2KA((uintptr_t) as->genarch.page_table); 76 74 pte_t *ptl0 = (pte_t *) PA2KA((uintptr_t) as->genarch.page_table); 75 77 76 if (GET_PTL1_FLAGS(ptl0, PTL0_INDEX(page)) & PAGE_NOT_PRESENT) { 78 newpt = (pte_t *)frame_alloc(PTL1_SIZE, FRAME_KA);77 pte_t *newpt = (pte_t *) frame_alloc(PTL1_SIZE, FRAME_KA); 79 78 memsetb(newpt, FRAME_SIZE << PTL1_SIZE, 0); 80 79 SET_PTL1_ADDRESS(ptl0, PTL0_INDEX(page), KA2PA(newpt)); 81 80 SET_PTL1_FLAGS(ptl0, PTL0_INDEX(page), PAGE_PRESENT | PAGE_USER | PAGE_EXEC | PAGE_CACHEABLE | PAGE_WRITE); 82 81 } 83 84 pt l1 = (pte_t *) PA2KA(GET_PTL1_ADDRESS(ptl0, PTL0_INDEX(page)));85 82 83 pte_t *ptl1 = (pte_t *) PA2KA(GET_PTL1_ADDRESS(ptl0, PTL0_INDEX(page))); 84 86 85 if (GET_PTL2_FLAGS(ptl1, PTL1_INDEX(page)) & PAGE_NOT_PRESENT) { 87 newpt = (pte_t *)frame_alloc(PTL2_SIZE, FRAME_KA);86 pte_t *newpt = (pte_t *) frame_alloc(PTL2_SIZE, FRAME_KA); 88 87 memsetb(newpt, FRAME_SIZE << PTL2_SIZE, 0); 89 88 SET_PTL2_ADDRESS(ptl1, PTL1_INDEX(page), KA2PA(newpt)); 90 89 SET_PTL2_FLAGS(ptl1, PTL1_INDEX(page), PAGE_PRESENT | PAGE_USER | PAGE_EXEC | PAGE_CACHEABLE | PAGE_WRITE); 91 90 } 92 93 pt l2 = (pte_t *) PA2KA(GET_PTL2_ADDRESS(ptl1, PTL1_INDEX(page)));94 91 92 pte_t *ptl2 = (pte_t *) PA2KA(GET_PTL2_ADDRESS(ptl1, PTL1_INDEX(page))); 93 95 94 if (GET_PTL3_FLAGS(ptl2, PTL2_INDEX(page)) & PAGE_NOT_PRESENT) { 96 newpt = (pte_t *)frame_alloc(PTL3_SIZE, FRAME_KA);95 pte_t *newpt = (pte_t *) frame_alloc(PTL3_SIZE, FRAME_KA); 97 96 memsetb(newpt, FRAME_SIZE << PTL3_SIZE, 0); 98 97 SET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page), KA2PA(newpt)); 99 98 SET_PTL3_FLAGS(ptl2, PTL2_INDEX(page), PAGE_PRESENT | PAGE_USER | PAGE_EXEC | PAGE_CACHEABLE | PAGE_WRITE); 100 99 } 101 102 pt l3 = (pte_t *) PA2KA(GET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page)));103 100 101 pte_t *ptl3 = (pte_t *) PA2KA(GET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page))); 102 104 103 SET_FRAME_ADDRESS(ptl3, PTL3_INDEX(page), frame); 105 104 SET_FRAME_FLAGS(ptl3, PTL3_INDEX(page), flags); … … 116 115 * The page table must be locked and interrupts must be disabled. 117 116 * 118 * @param as Address space to wich page belongs.117 * @param as Address space to wich page belongs. 119 118 * @param page Virtual address of the page to be demapped. 119 * 120 120 */ 121 121 void pt_mapping_remove(as_t *as, uintptr_t page) 122 122 { 123 pte_t *ptl0, *ptl1, *ptl2, *ptl3;124 bool empty = true;125 int i;126 127 123 /* 128 124 * First, remove the mapping, if it exists. 125 * 129 126 */ 130 131 ptl0 = (pte_t *) PA2KA((uintptr_t) as->genarch.page_table); 132 127 128 pte_t *ptl0 = (pte_t *) PA2KA((uintptr_t) as->genarch.page_table); 133 129 if (GET_PTL1_FLAGS(ptl0, PTL0_INDEX(page)) & PAGE_NOT_PRESENT) 134 130 return; 135 136 ptl1 = (pte_t *) PA2KA(GET_PTL1_ADDRESS(ptl0, PTL0_INDEX(page))); 137 131 132 pte_t *ptl1 = (pte_t *) PA2KA(GET_PTL1_ADDRESS(ptl0, PTL0_INDEX(page))); 138 133 if (GET_PTL2_FLAGS(ptl1, PTL1_INDEX(page)) & PAGE_NOT_PRESENT) 139 134 return; 140 141 ptl2 = (pte_t *) PA2KA(GET_PTL2_ADDRESS(ptl1, PTL1_INDEX(page))); 142 135 136 pte_t *ptl2 = (pte_t *) PA2KA(GET_PTL2_ADDRESS(ptl1, PTL1_INDEX(page))); 143 137 if (GET_PTL3_FLAGS(ptl2, PTL2_INDEX(page)) & PAGE_NOT_PRESENT) 144 138 return; 145 146 pt l3 = (pte_t *) PA2KA(GET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page)));147 139 140 pte_t *ptl3 = (pte_t *) PA2KA(GET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page))); 141 148 142 /* Destroy the mapping. Setting to PAGE_NOT_PRESENT is not sufficient. */ 149 143 memsetb(&ptl3[PTL3_INDEX(page)], sizeof(pte_t), 0); 150 144 151 145 /* 152 146 * Second, free all empty tables along the way from PTL3 down to PTL0. 147 * 153 148 */ 154 149 155 /* check PTL3 */ 150 /* Check PTL3 */ 151 bool empty = true; 152 153 unsigned int i; 156 154 for (i = 0; i < PTL3_ENTRIES; i++) { 157 155 if (PTE_VALID(&ptl3[i])) { … … 160 158 } 161 159 } 160 162 161 if (empty) { 163 162 /* 164 163 * PTL3 is empty. 165 164 * Release the frame and remove PTL3 pointer from preceding table. 165 * 166 166 */ 167 167 frame_free(KA2PA((uintptr_t) ptl3)); 168 if (PTL2_ENTRIES) 169 memsetb(&ptl2[PTL2_INDEX(page)], sizeof(pte_t), 0); 170 else if (PTL1_ENTRIES) 171 memsetb(&ptl1[PTL1_INDEX(page)], sizeof(pte_t), 0); 172 else 173 memsetb(&ptl0[PTL0_INDEX(page)], sizeof(pte_t), 0); 168 #if (PTL2_ENTRIES != 0) 169 memsetb(&ptl2[PTL2_INDEX(page)], sizeof(pte_t), 0); 170 #elif (PTL1_ENTRIES != 0) 171 memsetb(&ptl1[PTL1_INDEX(page)], sizeof(pte_t), 0); 172 #else 173 memsetb(&ptl0[PTL0_INDEX(page)], sizeof(pte_t), 0); 174 #endif 174 175 } else { 175 176 /* … … 177 178 * Therefore, there must be a path from PTL0 to PTL3 and 178 179 * thus nothing to free in higher levels. 179 * /180 return;181 }182 183 /* check PTL2, empty is still true */184 if (PTL2_ENTRIES) {185 for (i = 0; i < PTL2_ENTRIES; i++) { 186 if (PTE_VALID(&ptl2[i])) {187 empty = false;188 break;189 }180 * 181 */ 182 return; 183 } 184 185 /* Check PTL2, empty is still true */ 186 #if (PTL2_ENTRIES != 0) 187 for (i = 0; i < PTL2_ENTRIES; i++) { 188 if (PTE_VALID(&ptl2[i])) { 189 empty = false; 190 break; 190 191 } 191 if (empty) { 192 /* 193 * PTL2 is empty. 194 * Release the frame and remove PTL2 pointer from preceding table. 195 */ 196 frame_free(KA2PA((uintptr_t) ptl2)); 197 if (PTL1_ENTRIES) 198 memsetb(&ptl1[PTL1_INDEX(page)], sizeof(pte_t), 0); 199 else 200 memsetb(&ptl0[PTL0_INDEX(page)], sizeof(pte_t), 0); 192 } 193 194 if (empty) { 195 /* 196 * PTL2 is empty. 197 * Release the frame and remove PTL2 pointer from preceding table. 198 * 199 */ 200 frame_free(KA2PA((uintptr_t) ptl2)); 201 #if (PTL1_ENTRIES != 0) 202 memsetb(&ptl1[PTL1_INDEX(page)], sizeof(pte_t), 0); 203 #else 204 memsetb(&ptl0[PTL0_INDEX(page)], sizeof(pte_t), 0); 205 #endif 206 } else { 207 /* 208 * PTL2 is not empty. 209 * Therefore, there must be a path from PTL0 to PTL2 and 210 * thus nothing to free in higher levels. 211 * 212 */ 213 return; 214 } 215 #endif /* PTL2_ENTRIES != 0 */ 216 217 /* check PTL1, empty is still true */ 218 #if (PTL1_ENTRIES != 0) 219 for (i = 0; i < PTL1_ENTRIES; i++) { 220 if (PTE_VALID(&ptl1[i])) { 221 empty = false; 222 break; 201 223 } 202 else { 203 /* 204 * PTL2 is not empty. 205 * Therefore, there must be a path from PTL0 to PTL2 and 206 * thus nothing to free in higher levels. 207 */ 208 return; 209 } 210 } 211 212 /* check PTL1, empty is still true */ 213 if (PTL1_ENTRIES) { 214 for (i = 0; i < PTL1_ENTRIES; i++) { 215 if (PTE_VALID(&ptl1[i])) { 216 empty = false; 217 break; 218 } 219 } 220 if (empty) { 221 /* 222 * PTL1 is empty. 223 * Release the frame and remove PTL1 pointer from preceding table. 224 */ 225 frame_free(KA2PA((uintptr_t) ptl1)); 226 memsetb(&ptl0[PTL0_INDEX(page)], sizeof(pte_t), 0); 227 } 228 } 229 224 } 225 226 if (empty) { 227 /* 228 * PTL1 is empty. 229 * Release the frame and remove PTL1 pointer from preceding table. 230 * 231 */ 232 frame_free(KA2PA((uintptr_t) ptl1)); 233 memsetb(&ptl0[PTL0_INDEX(page)], sizeof(pte_t), 0); 234 } 235 #endif /* PTL1_ENTRIES != 0 */ 230 236 } 231 237 … … 236 242 * The page table must be locked and interrupts must be disabled. 237 243 * 238 * @param as Address space to which page belongs.244 * @param as Address space to which page belongs. 239 245 * @param page Virtual page. 240 246 * 241 * @return NULL if there is no such mapping; entry from PTL3 describing the mapping otherwise. 247 * @return NULL if there is no such mapping; entry from PTL3 describing 248 * the mapping otherwise. 249 * 242 250 */ 243 251 pte_t *pt_mapping_find(as_t *as, uintptr_t page) 244 252 { 245 pte_t *ptl0, *ptl1, *ptl2, *ptl3; 246 247 ptl0 = (pte_t *) PA2KA((uintptr_t) as->genarch.page_table); 248 253 pte_t *ptl0 = (pte_t *) PA2KA((uintptr_t) as->genarch.page_table); 249 254 if (GET_PTL1_FLAGS(ptl0, PTL0_INDEX(page)) & PAGE_NOT_PRESENT) 250 255 return NULL; 251 252 ptl1 = (pte_t *) PA2KA(GET_PTL1_ADDRESS(ptl0, PTL0_INDEX(page))); 253 256 257 pte_t *ptl1 = (pte_t *) PA2KA(GET_PTL1_ADDRESS(ptl0, PTL0_INDEX(page))); 254 258 if (GET_PTL2_FLAGS(ptl1, PTL1_INDEX(page)) & PAGE_NOT_PRESENT) 255 259 return NULL; 256 257 ptl2 = (pte_t *) PA2KA(GET_PTL2_ADDRESS(ptl1, PTL1_INDEX(page))); 258 260 261 pte_t *ptl2 = (pte_t *) PA2KA(GET_PTL2_ADDRESS(ptl1, PTL1_INDEX(page))); 259 262 if (GET_PTL3_FLAGS(ptl2, PTL2_INDEX(page)) & PAGE_NOT_PRESENT) 260 263 return NULL; 261 262 pt l3 = (pte_t *) PA2KA(GET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page)));263 264 265 pte_t *ptl3 = (pte_t *) PA2KA(GET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page))); 266 264 267 return &ptl3[PTL3_INDEX(page)]; 265 268 }
Note:
See TracChangeset
for help on using the changeset viewer.