Fork us on GitHub Follow us on Facebook Follow us on Twitter

Changeset 59fb782 in mainline


Ignore:
Timestamp:
2013-03-24T19:38:18Z (9 years ago)
Author:
Jakub Jermar <jakub@…>
Branches:
lfn, master
Children:
d965dc3
Parents:
119b46e
Message:

Unify the use of virtual addresses and virtual page addresses in mm code.

  • as_page_fault() accepts faulting address (if available) and propagates the faulting page further along
  • backends' page_fault() handlers assume page fault address
  • page_mapping_create/destroy/find() accept addresses, but pass only page and frame addresses along
  • as_area_create(), as_area_resize() now test whether the address is page-aligned
  • renames of various variables to better fit their purpose (address vs. page)
  • no need to align the addresses in mips32 TLB exception handlers now
Location:
kernel
Files:
9 edited

Legend:

Unmodified
Added
Removed
  • kernel/arch/amd64/src/mm/page.c

    r119b46e r59fb782  
    7878void page_fault(unsigned int n, istate_t *istate)
    7979{
    80         uintptr_t page = read_cr2();
     80        uintptr_t badvaddr = read_cr2();
    8181       
    8282        if (istate->error_word & PFERR_CODE_RSVD)
     
    9292                access = PF_ACCESS_READ;
    9393       
    94         as_page_fault(page, access, istate);
     94        (void) as_page_fault(badvaddr, access, istate);
    9595}
    9696
  • kernel/arch/ia32/src/mm/page.c

    r119b46e r59fb782  
    8484void page_fault(unsigned int n __attribute__((unused)), istate_t *istate)
    8585{
    86         uintptr_t page;
     86        uintptr_t badvaddr;
    8787        pf_access_t access;
    8888       
    89         page = read_cr2();
     89        badvaddr = read_cr2();
    9090               
    9191        if (istate->error_word & PFERR_CODE_RSVD)
     
    9797                access = PF_ACCESS_READ;
    9898       
    99         if (as_page_fault(page, access, istate) == AS_PF_FAULT) {
    100                 fault_if_from_uspace(istate, "Page fault: %#x.", page);
    101                 panic_memtrap(istate, access, page, NULL);
    102         }
     99        (void) as_page_fault(badvaddr, access, istate);
    103100}
    104101
  • kernel/arch/mips32/src/mm/tlb.c

    r119b46e r59fb782  
    9494        entry_lo_t lo;
    9595        uintptr_t badvaddr;
    96         uintptr_t page;
    9796        pte_t *pte;
    9897       
    9998        badvaddr = cp0_badvaddr_read();
    100         page = ALIGN_DOWN(badvaddr, PAGE_SIZE);
    101 
    102         pte = page_mapping_find(AS, page, true);
     99
     100        pte = page_mapping_find(AS, badvaddr, true);
    103101        if (pte && pte->p) {
    104102                /*
     
    125123        }
    126124
    127         (void) as_page_fault(page, PF_ACCESS_READ, istate);
     125        (void) as_page_fault(badvaddr, PF_ACCESS_READ, istate);
    128126}
    129127
     
    137135        tlb_index_t index;
    138136        uintptr_t badvaddr;
    139         uintptr_t page;
    140137        pte_t *pte;
    141138
     
    161158
    162159        badvaddr = cp0_badvaddr_read();
    163         page = ALIGN_DOWN(badvaddr, PAGE_SIZE);
    164 
    165         pte = page_mapping_find(AS, page, true);
     160
     161        pte = page_mapping_find(AS, badvaddr, true);
    166162        if (pte && pte->p) {
    167163                /*
     
    189185        }
    190186
    191         (void) as_page_fault(page, PF_ACCESS_READ, istate);
     187        (void) as_page_fault(badvaddr, PF_ACCESS_READ, istate);
    192188}
    193189
     
    201197        tlb_index_t index;
    202198        uintptr_t badvaddr;
    203         uintptr_t page;
    204199        pte_t *pte;
    205200
    206201        badvaddr = cp0_badvaddr_read();
    207         page = ALIGN_DOWN(badvaddr, PAGE_SIZE);
    208202
    209203        /*
     
    227221        }
    228222
    229         pte = page_mapping_find(AS, page, true);
     223        pte = page_mapping_find(AS, badvaddr, true);
    230224        if (pte && pte->p && pte->w) {
    231225                /*
     
    254248        }
    255249
    256         (void) as_page_fault(page, PF_ACCESS_WRITE, istate);
     250        (void) as_page_fault(badvaddr, PF_ACCESS_WRITE, istate);
    257251}
    258252
  • kernel/arch/sparc64/src/mm/sun4u/tlb.c

    r119b46e r59fb782  
    196196void fast_instruction_access_mmu_miss(sysarg_t unused, istate_t *istate)
    197197{
    198         uintptr_t page_16k = ALIGN_DOWN(istate->tpc, PAGE_SIZE);
    199198        size_t index = (istate->tpc >> MMU_PAGE_WIDTH) % MMU_PAGES_PER_PAGE;
    200199        pte_t *t;
    201200
    202         t = page_mapping_find(AS, page_16k, true);
     201        t = page_mapping_find(AS, istate->tpc, true);
    203202        if (t && PTE_EXECUTABLE(t)) {
    204203                /*
     
    216215                 * handler.
    217216                 */
    218                 as_page_fault(page_16k, PF_ACCESS_EXEC, istate);
     217                as_page_fault(istate->tpc, PF_ACCESS_EXEC, istate);
    219218        }
    220219}
  • kernel/generic/src/mm/as.c

    r119b46e r59fb782  
    544544    mem_backend_data_t *backend_data, uintptr_t *base, uintptr_t bound)
    545545{
    546         if ((*base != (uintptr_t) -1) && ((*base % PAGE_SIZE) != 0))
     546        if ((*base != (uintptr_t) -1) && !IS_ALIGNED(*base, PAGE_SIZE))
    547547                return NULL;
    548548       
     
    688688int as_area_resize(as_t *as, uintptr_t address, size_t size, unsigned int flags)
    689689{
     690        if (!IS_ALIGNED(address, PAGE_SIZE))
     691                return EINVAL;
     692
    690693        mutex_lock(&as->lock);
    691694       
     
    13501353 * Interrupts are assumed disabled.
    13511354 *
    1352  * @param page   Faulting page.
    1353  * @param access Access mode that caused the page fault (i.e.
    1354  *               read/write/exec).
    1355  * @param istate Pointer to the interrupted state.
     1355 * @param address Faulting address.
     1356 * @param access  Access mode that caused the page fault (i.e.
     1357 *                read/write/exec).
     1358 * @param istate  Pointer to the interrupted state.
    13561359 *
    13571360 * @return AS_PF_FAULT on page fault.
     
    13611364 *
    13621365 */
    1363 int as_page_fault(uintptr_t page, pf_access_t access, istate_t *istate)
    1364 {
     1366int as_page_fault(uintptr_t address, pf_access_t access, istate_t *istate)
     1367{
     1368        uintptr_t page = ALIGN_DOWN(address, PAGE_SIZE);
    13651369        int rc = AS_PF_FAULT;
    13661370
     
    14521456                task_kill_self(true);
    14531457        } else {
    1454                 fault_if_from_uspace(istate, "Page fault: %p.", (void *) page);
    1455                 panic_memtrap(istate, access, page, NULL);
     1458                fault_if_from_uspace(istate, "Page fault: %p.", (void *) address);
     1459                panic_memtrap(istate, access, address, NULL);
    14561460        }
    14571461       
     
    16791683{
    16801684        ASSERT(mutex_locked(&area->lock));
    1681         ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
     1685        ASSERT(IS_ALIGNED(page, PAGE_SIZE));
    16821686        ASSERT(count);
    16831687       
     
    19631967{
    19641968        ASSERT(mutex_locked(&area->lock));
    1965         ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
     1969        ASSERT(IS_ALIGNED(page, PAGE_SIZE));
    19661970        ASSERT(count);
    19671971       
  • kernel/generic/src/mm/backend_anon.c

    r119b46e r59fb782  
    173173 *
    174174 * @param area Pointer to the address space area.
    175  * @param addr Faulting virtual address.
     175 * @param upage Faulting virtual page.
    176176 * @param access Access mode that caused the fault (i.e. read/write/exec).
    177177 *
     
    179179 *     serviced).
    180180 */
    181 int anon_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access)
    182 {
    183         uintptr_t upage = ALIGN_DOWN(addr, PAGE_SIZE);
     181int anon_page_fault(as_area_t *area, uintptr_t upage, pf_access_t access)
     182{
    184183        uintptr_t kpage;
    185184        uintptr_t frame;
     
    187186        ASSERT(page_table_locked(AS));
    188187        ASSERT(mutex_locked(&area->lock));
     188        ASSERT(IS_ALIGNED(upage, PAGE_SIZE));
    189189
    190190        if (!as_area_check_access(area, access))
  • kernel/generic/src/mm/backend_elf.c

    r119b46e r59fb782  
    235235 *
    236236 * @param area          Pointer to the address space area.
    237  * @param addr          Faulting virtual address.
     237 * @param upage         Faulting virtual page.
    238238 * @param access        Access mode that caused the fault (i.e.
    239239 *                      read/write/exec).
     
    242242 *                      on success (i.e. serviced).
    243243 */
    244 int elf_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access)
     244int elf_page_fault(as_area_t *area, uintptr_t upage, pf_access_t access)
    245245{
    246246        elf_header_t *elf = area->backend_data.elf;
     
    250250        uintptr_t frame;
    251251        uintptr_t kpage;
    252         uintptr_t upage;
    253252        uintptr_t start_anon;
    254253        size_t i;
     
    257256        ASSERT(page_table_locked(AS));
    258257        ASSERT(mutex_locked(&area->lock));
     258        ASSERT(IS_ALIGNED(upage, PAGE_SIZE));
    259259
    260260        if (!as_area_check_access(area, access))
    261261                return AS_PF_FAULT;
    262262       
    263         if (addr < ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE))
     263        if (upage < ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE))
    264264                return AS_PF_FAULT;
    265265       
    266         if (addr >= entry->p_vaddr + entry->p_memsz)
     266        if (upage >= entry->p_vaddr + entry->p_memsz)
    267267                return AS_PF_FAULT;
    268268       
    269         i = (addr - ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)) >> PAGE_WIDTH;
     269        i = (upage - ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)) >> PAGE_WIDTH;
    270270        base = (uintptr_t)
    271271            (((void *) elf) + ALIGN_DOWN(entry->p_offset, PAGE_SIZE));
    272 
    273         /* Virtual address of faulting page */
    274         upage = ALIGN_DOWN(addr, PAGE_SIZE);
    275272
    276273        /* Virtual address of the end of initialized part of segment */
  • kernel/generic/src/mm/backend_phys.c

    r119b46e r59fb782  
    111111 *
    112112 * @param area Pointer to the address space area.
    113  * @param addr Faulting virtual address.
     113 * @param upage Faulting virtual page.
    114114 * @param access Access mode that caused the fault (i.e. read/write/exec).
    115115 *
     
    117117 * serviced).
    118118 */
    119 int phys_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access)
     119int phys_page_fault(as_area_t *area, uintptr_t upage, pf_access_t access)
    120120{
    121121        uintptr_t base = area->backend_data.base;
     
    123123        ASSERT(page_table_locked(AS));
    124124        ASSERT(mutex_locked(&area->lock));
     125        ASSERT(IS_ALIGNED(upage, PAGE_SIZE));
    125126
    126127        if (!as_area_check_access(area, access))
    127128                return AS_PF_FAULT;
    128129
    129         ASSERT(addr - area->base < area->backend_data.frames * FRAME_SIZE);
    130         page_mapping_insert(AS, addr, base + (addr - area->base),
     130        ASSERT(upage - area->base < area->backend_data.frames * FRAME_SIZE);
     131        page_mapping_insert(AS, upage, base + (upage - area->base),
    131132            as_area_get_flags(area));
    132133       
    133         if (!used_space_insert(area, ALIGN_DOWN(addr, PAGE_SIZE), 1))
     134        if (!used_space_insert(area, upage, 1))
    134135                panic("Cannot insert used space.");
    135136
  • kernel/generic/src/mm/page.c

    r119b46e r59fb782  
    104104        ASSERT(page_mapping_operations->mapping_insert);
    105105
    106         page_mapping_operations->mapping_insert(as, page, frame, flags);
     106        page_mapping_operations->mapping_insert(as, ALIGN_DOWN(page, PAGE_SIZE),
     107            ALIGN_DOWN(frame, FRAME_SIZE), flags);
    107108       
    108109        /* Repel prefetched accesses to the old mapping. */
     
    127128        ASSERT(page_mapping_operations->mapping_remove);
    128129       
    129         page_mapping_operations->mapping_remove(as, page);
     130        page_mapping_operations->mapping_remove(as,
     131            ALIGN_DOWN(page, PAGE_SIZE));
    130132       
    131133        /* Repel prefetched accesses to the old mapping. */
     
    150152        ASSERT(page_mapping_operations->mapping_find);
    151153       
    152         return page_mapping_operations->mapping_find(as, page, nolock);
     154        return page_mapping_operations->mapping_find(as,
     155            ALIGN_DOWN(page, PAGE_SIZE), nolock);
    153156}
    154157
Note: See TracChangeset for help on using the changeset viewer.