Changeset 2c2d54a in mainline for kernel/generic/src
- Timestamp:
- 2016-09-02T17:58:05Z (9 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 4c3602c4
- Parents:
- 4bf0926e (diff), 3233adb (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)links above to see all the changes relative to each parent. - Location:
- kernel/generic/src
- Files:
-
- 2 added
- 8 edited
-
ipc/ipc.c (modified) (4 diffs)
-
ipc/ops/pagein.c (added)
-
ipc/sysipc.c (modified) (3 diffs)
-
ipc/sysipc_ops.c (modified) (2 diffs)
-
mm/as.c (modified) (10 diffs)
-
mm/backend_anon.c (modified) (1 diff)
-
mm/backend_elf.c (modified) (3 diffs)
-
mm/backend_user.c (added)
-
mm/page.c (modified) (3 diffs)
-
synch/futex.c (modified) (2 diffs)
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/ipc/ipc.c
r4bf0926e r2c2d54a 77 77 call->forget = false; 78 78 call->sender = NULL; 79 call->callerbox = &TASK->answerbox; 79 80 call->buffer = NULL; 80 81 } … … 185 186 phone->state = IPC_PHONE_FREE; 186 187 atomic_set(&phone->active_calls, 0); 188 } 189 190 /** Helper function to facilitate synchronous calls. 191 * 192 * @param phone Destination kernel phone structure. 193 * @param request Call structure with request. 194 * 195 * @return EOK on success or a negative error code. 196 * 197 */ 198 int ipc_call_sync(phone_t *phone, call_t *request) 199 { 200 answerbox_t *mybox = slab_alloc(ipc_answerbox_slab, 0); 201 ipc_answerbox_init(mybox, TASK); 202 203 /* We will receive data in a special box. */ 204 request->callerbox = mybox; 205 206 int rc = ipc_call(phone, request); 207 if (rc != EOK) { 208 slab_free(ipc_answerbox_slab, mybox); 209 return rc; 210 } 211 // TODO: forget the call if interrupted 212 (void) ipc_wait_for_call(mybox, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE); 213 214 slab_free(ipc_answerbox_slab, mybox); 215 return EOK; 187 216 } 188 217 … … 220 249 spinlock_unlock(&call->forget_lock); 221 250 222 answerbox_t *callerbox = &call->sender->answerbox;251 answerbox_t *callerbox = call->callerbox; 223 252 bool do_lock = ((!selflocked) || (callerbox != &TASK->answerbox)); 224 253 … … 755 784 ipc_cleanup_call_list(&TASK->answerbox, 756 785 &TASK->answerbox.dispatched_calls); 757 786 758 787 ipc_forget_all_active_calls(); 759 788 ipc_wait_for_all_answered_calls(); -
kernel/generic/src/ipc/sysipc.c
r4bf0926e r2c2d54a 106 106 { 107 107 switch (imethod) { 108 case IPC_M_PAGE_IN: 108 109 case IPC_M_SHARE_OUT: 109 110 case IPC_M_SHARE_IN: … … 137 138 case IPC_M_CONNECT_TO_ME: 138 139 case IPC_M_CONNECT_ME_TO: 140 case IPC_M_PAGE_IN: 139 141 case IPC_M_SHARE_OUT: 140 142 case IPC_M_SHARE_IN: … … 257 259 { 258 260 return SYSIPC_OP(request_process, call, box); 261 } 262 263 /** Make a call over IPC and wait for reply. 264 * 265 * @param phoneid Phone handle for the call. 266 * @param data[inout] Structure with request/reply data. 267 * 268 * @return EOK on success. 269 * @return ENOENT if there is no such phone handle. 270 * 271 */ 272 int ipc_req_internal(int phoneid, ipc_data_t *data) 273 { 274 phone_t *phone; 275 if (phone_get(phoneid, &phone) != EOK) 276 return ENOENT; 277 278 call_t *call = ipc_call_alloc(0); 279 memcpy(call->data.args, data->args, sizeof(data->args)); 280 281 int rc = request_preprocess(call, phone); 282 if (!rc) { 283 #ifdef CONFIG_UDEBUG 284 udebug_stoppable_begin(); 285 #endif 286 287 rc = ipc_call_sync(phone, call); 288 289 #ifdef CONFIG_UDEBUG 290 udebug_stoppable_end(); 291 #endif 292 293 if (rc != EOK) 294 return EINTR; 295 296 process_answer(call); 297 } else 298 IPC_SET_RETVAL(call->data, rc); 299 300 memcpy(data->args, call->data.args, sizeof(data->args)); 301 ipc_call_free(call); 302 303 return EOK; 259 304 } 260 305 -
kernel/generic/src/ipc/sysipc_ops.c
r4bf0926e r2c2d54a 42 42 sysipc_ops_t ipc_m_connect_to_me_ops; 43 43 sysipc_ops_t ipc_m_connect_me_to_ops; 44 sysipc_ops_t ipc_m_page_in_ops; 44 45 sysipc_ops_t ipc_m_share_out_ops; 45 46 sysipc_ops_t ipc_m_share_in_ops; … … 54 55 [IPC_M_CONNECT_TO_ME] = &ipc_m_connect_to_me_ops, 55 56 [IPC_M_CONNECT_ME_TO] = &ipc_m_connect_me_to_ops, 57 [IPC_M_PAGE_IN] = &ipc_m_page_in_ops, 56 58 [IPC_M_SHARE_OUT] = &ipc_m_share_out_ops, 57 59 [IPC_M_SHARE_IN] = &ipc_m_share_in_ops, -
kernel/generic/src/mm/as.c
r4bf0926e r2c2d54a 574 574 * @param backend_data NULL or a pointer to custom backend data. 575 575 * @param base Starting virtual address of the area. 576 * If set to -1, a suitable mappable area is found. 577 * @param bound Lowest address bound if base is set to -1. 576 * If set to AS_AREA_ANY, a suitable mappable area is 577 * found. 578 * @param bound Lowest address bound if base is set to AS_AREA_ANY. 578 579 * Otherwise ignored. 579 580 * … … 585 586 mem_backend_data_t *backend_data, uintptr_t *base, uintptr_t bound) 586 587 { 587 if ((*base != (uintptr_t) -1) && !IS_ALIGNED(*base, PAGE_SIZE))588 if ((*base != (uintptr_t) AS_AREA_ANY) && !IS_ALIGNED(*base, PAGE_SIZE)) 588 589 return NULL; 589 590 … … 601 602 mutex_lock(&as->lock); 602 603 603 if (*base == (uintptr_t) -1) {604 if (*base == (uintptr_t) AS_AREA_ANY) { 604 605 *base = as_get_unmapped_area(as, bound, size, guarded); 605 606 if (*base == (uintptr_t) -1) { … … 888 889 889 890 for (; i < node_size; i++) { 890 pte_t *pte = page_mapping_find(as, 891 ptr + P2SZ(i), false); 891 pte_t pte; 892 bool found = page_mapping_find(as, 893 ptr + P2SZ(i), false, &pte); 892 894 893 ASSERT( pte);894 ASSERT(PTE_VALID( pte));895 ASSERT(PTE_PRESENT( pte));895 ASSERT(found); 896 ASSERT(PTE_VALID(&pte)); 897 ASSERT(PTE_PRESENT(&pte)); 896 898 897 899 if ((area->backend) && … … 899 901 area->backend->frame_free(area, 900 902 ptr + P2SZ(i), 901 PTE_GET_FRAME( pte));903 PTE_GET_FRAME(&pte)); 902 904 } 903 905 … … 1002 1004 1003 1005 for (size = 0; size < (size_t) node->value[i]; size++) { 1004 pte_t *pte = page_mapping_find(as, 1005 ptr + P2SZ(size), false); 1006 pte_t pte; 1007 bool found = page_mapping_find(as, 1008 ptr + P2SZ(size), false, &pte); 1006 1009 1007 ASSERT( pte);1008 ASSERT(PTE_VALID( pte));1009 ASSERT(PTE_PRESENT( pte));1010 ASSERT(found); 1011 ASSERT(PTE_VALID(&pte)); 1012 ASSERT(PTE_PRESENT(&pte)); 1010 1013 1011 1014 if ((area->backend) && … … 1013 1016 area->backend->frame_free(area, 1014 1017 ptr + P2SZ(size), 1015 PTE_GET_FRAME( pte));1018 PTE_GET_FRAME(&pte)); 1016 1019 } 1017 1020 … … 1314 1317 1315 1318 for (size = 0; size < (size_t) node->value[i]; size++) { 1316 pte_t *pte = page_mapping_find(as, 1317 ptr + P2SZ(size), false); 1319 pte_t pte; 1320 bool found = page_mapping_find(as, 1321 ptr + P2SZ(size), false, &pte); 1318 1322 1319 ASSERT( pte);1320 ASSERT(PTE_VALID( pte));1321 ASSERT(PTE_PRESENT( pte));1323 ASSERT(found); 1324 ASSERT(PTE_VALID(&pte)); 1325 ASSERT(PTE_PRESENT(&pte)); 1322 1326 1323 old_frame[frame_idx++] = PTE_GET_FRAME( pte);1327 old_frame[frame_idx++] = PTE_GET_FRAME(&pte); 1324 1328 1325 1329 /* Remove old mapping */ … … 1451 1455 * we need to make sure the mapping has not been already inserted. 1452 1456 */ 1453 pte_t *pte; 1454 if ((pte = page_mapping_find(AS, page, false))) { 1455 if (PTE_PRESENT(pte)) { 1456 if (((access == PF_ACCESS_READ) && PTE_READABLE(pte)) || 1457 (access == PF_ACCESS_WRITE && PTE_WRITABLE(pte)) || 1458 (access == PF_ACCESS_EXEC && PTE_EXECUTABLE(pte))) { 1457 pte_t pte; 1458 bool found = page_mapping_find(AS, page, false, &pte); 1459 if (found) { 1460 if (PTE_PRESENT(&pte)) { 1461 if (((access == PF_ACCESS_READ) && PTE_READABLE(&pte)) || 1462 (access == PF_ACCESS_WRITE && PTE_WRITABLE(&pte)) || 1463 (access == PF_ACCESS_EXEC && PTE_EXECUTABLE(&pte))) { 1459 1464 page_table_unlock(AS, false); 1460 1465 mutex_unlock(&area->lock); … … 2182 2187 2183 2188 sysarg_t sys_as_area_create(uintptr_t base, size_t size, unsigned int flags, 2184 uintptr_t bound )2189 uintptr_t bound, as_area_pager_info_t *pager_info) 2185 2190 { 2186 2191 uintptr_t virt = base; 2192 mem_backend_t *backend; 2193 mem_backend_data_t backend_data; 2194 2195 if (pager_info == AS_AREA_UNPAGED) 2196 backend = &anon_backend; 2197 else { 2198 backend = &user_backend; 2199 if (copy_from_uspace(&backend_data.pager_info, pager_info, 2200 sizeof(as_area_pager_info_t)) != EOK) { 2201 return (sysarg_t) AS_MAP_FAILED; 2202 } 2203 } 2187 2204 as_area_t *area = as_area_create(AS, flags, size, 2188 AS_AREA_ATTR_NONE, &anon_backend, NULL, &virt, bound);2205 AS_AREA_ATTR_NONE, backend, &backend_data, &virt, bound); 2189 2206 if (area == NULL) 2190 return (sysarg_t) -1;2207 return (sysarg_t) AS_MAP_FAILED; 2191 2208 2192 2209 return (sysarg_t) virt; -
kernel/generic/src/mm/backend_anon.c
r4bf0926e r2c2d54a 131 131 132 132 for (j = 0; j < count; j++) { 133 pte_t *pte; 133 pte_t pte; 134 bool found; 134 135 135 136 page_table_lock(area->as, false); 136 pte = page_mapping_find(area->as, 137 base + P2SZ(j), false); 138 ASSERT(pte && PTE_VALID(pte) && 139 PTE_PRESENT(pte)); 137 found = page_mapping_find(area->as, 138 base + P2SZ(j), false, &pte); 139 140 ASSERT(found); 141 ASSERT(PTE_VALID(&pte)); 142 ASSERT(PTE_PRESENT(&pte)); 143 140 144 btree_insert(&area->sh_info->pagemap, 141 145 (base + P2SZ(j)) - area->base, 142 (void *) PTE_GET_FRAME( pte), NULL);146 (void *) PTE_GET_FRAME(&pte), NULL); 143 147 page_table_unlock(area->as, false); 144 148 145 pfn_t pfn = ADDR2PFN(PTE_GET_FRAME( pte));149 pfn_t pfn = ADDR2PFN(PTE_GET_FRAME(&pte)); 146 150 frame_reference_add(pfn); 147 151 } -
kernel/generic/src/mm/backend_elf.c
r4bf0926e r2c2d54a 184 184 185 185 for (j = 0; j < count; j++) { 186 pte_t *pte; 186 pte_t pte; 187 bool found; 187 188 188 189 /* … … 196 197 197 198 page_table_lock(area->as, false); 198 pte = page_mapping_find(area->as, 199 base + P2SZ(j), false); 200 ASSERT(pte && PTE_VALID(pte) && 201 PTE_PRESENT(pte)); 199 found = page_mapping_find(area->as, 200 base + P2SZ(j), false, &pte); 201 202 ASSERT(found); 203 ASSERT(PTE_VALID(&pte)); 204 ASSERT(PTE_PRESENT(&pte)); 205 202 206 btree_insert(&area->sh_info->pagemap, 203 207 (base + P2SZ(j)) - area->base, 204 (void *) PTE_GET_FRAME( pte), NULL);208 (void *) PTE_GET_FRAME(&pte), NULL); 205 209 page_table_unlock(area->as, false); 206 210 207 pfn_t pfn = ADDR2PFN(PTE_GET_FRAME( pte));211 pfn_t pfn = ADDR2PFN(PTE_GET_FRAME(&pte)); 208 212 frame_reference_add(pfn); 209 213 } … … 335 339 dirty = true; 336 340 } else { 337 pte_t *pte = page_mapping_find(AS_KERNEL, 338 base + i * FRAME_SIZE, true); 339 340 ASSERT(pte); 341 ASSERT(PTE_PRESENT(pte)); 342 343 frame = PTE_GET_FRAME(pte); 341 pte_t pte; 342 bool found; 343 344 found = page_mapping_find(AS_KERNEL, 345 base + i * FRAME_SIZE, true, &pte); 346 347 ASSERT(found); 348 ASSERT(PTE_PRESENT(&pte)); 349 350 frame = PTE_GET_FRAME(&pte); 344 351 } 345 352 } else if (upage >= start_anon) { -
kernel/generic/src/mm/page.c
r4bf0926e r2c2d54a 137 137 /** Find mapping for virtual page. 138 138 * 139 * @param as Address space to which page belongs.140 * @param page Virtual page.141 * @param nolock True if the page tables need not be locked.142 * 143 * @return NULL if there is no such mapping; requested mapping144 * otherwise.145 * 146 */ 147 NO_TRACE pte_t *page_mapping_find(as_t *as, uintptr_t page, bool nolock)139 * @param as Address space to which page belongs. 140 * @param page Virtual page. 141 * @param nolock True if the page tables need not be locked. 142 * @param[out] pte Structure that will receive a copy of the found PTE. 143 * 144 * @return True if the mapping was found, false otherwise. 145 */ 146 NO_TRACE bool page_mapping_find(as_t *as, uintptr_t page, bool nolock, 147 pte_t *pte) 148 148 { 149 149 ASSERT(nolock || page_table_locked(as)); … … 153 153 154 154 return page_mapping_operations->mapping_find(as, 155 ALIGN_DOWN(page, PAGE_SIZE), nolock); 155 ALIGN_DOWN(page, PAGE_SIZE), nolock, pte); 156 } 157 158 /** Update mapping for virtual page. 159 * 160 * Use only to update accessed and modified/dirty bits. 161 * 162 * @param as Address space to which page belongs. 163 * @param page Virtual page. 164 * @param nolock True if the page tables need not be locked. 165 * @param pte New PTE. 166 */ 167 NO_TRACE void page_mapping_update(as_t *as, uintptr_t page, bool nolock, 168 pte_t *pte) 169 { 170 ASSERT(nolock || page_table_locked(as)); 171 172 ASSERT(page_mapping_operations); 173 ASSERT(page_mapping_operations->mapping_find); 174 175 page_mapping_operations->mapping_update(as, 176 ALIGN_DOWN(page, PAGE_SIZE), nolock, pte); 156 177 } 157 178 … … 173 194 page_table_lock(AS, true); 174 195 175 pte_t *pte = page_mapping_find(AS, virt, false); 176 if ((!PTE_VALID(pte)) || (!PTE_PRESENT(pte))) { 196 pte_t pte; 197 bool found = page_mapping_find(AS, virt, false, &pte); 198 if (!found || !PTE_VALID(&pte) || !PTE_PRESENT(&pte)) { 177 199 page_table_unlock(AS, true); 178 200 return ENOENT; 179 201 } 180 202 181 *phys = PTE_GET_FRAME( pte) +203 *phys = PTE_GET_FRAME(&pte) + 182 204 (virt - ALIGN_DOWN(virt, PAGE_SIZE)); 183 205 -
kernel/generic/src/synch/futex.c
r4bf0926e r2c2d54a 291 291 spinlock_lock(&futex_ht_lock); 292 292 293 bool found = false; 294 pte_t *t = page_mapping_find(AS, ALIGN_DOWN(uaddr, PAGE_SIZE), true); 295 296 if (t && PTE_VALID(t) && PTE_PRESENT(t)) { 297 found = true; 298 *paddr = PTE_GET_FRAME(t) + (uaddr - ALIGN_DOWN(uaddr, PAGE_SIZE)); 293 bool success = false; 294 295 pte_t t; 296 bool found; 297 298 found = page_mapping_find(AS, ALIGN_DOWN(uaddr, PAGE_SIZE), true, &t); 299 if (found && PTE_VALID(&t) && PTE_PRESENT(&t)) { 300 success = true; 301 *paddr = PTE_GET_FRAME(&t) + 302 (uaddr - ALIGN_DOWN(uaddr, PAGE_SIZE)); 299 303 } 300 304 … … 302 306 page_table_unlock(AS, false); 303 307 304 return found;308 return success; 305 309 } 306 310
Note:
See TracChangeset
for help on using the changeset viewer.
