Changeset 3bacee1 in mainline for kernel/generic/src
- Timestamp:
- 2018-04-12T16:27:17Z (7 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 3cf22f9
- Parents:
- 76d0981d
- git-author:
- Jiri Svoboda <jiri@…> (2018-04-11 19:25:33)
- git-committer:
- Jiri Svoboda <jiri@…> (2018-04-12 16:27:17)
- Location:
- kernel/generic/src
- Files:
-
- 16 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/adt/avl.c
r76d0981d r3bacee1 186 186 } 187 187 gpa = par; 188 dpc = par->key > key ? &par->lft : &par->rgt;188 dpc = par->key > key ? &par->lft : &par->rgt; 189 189 } 190 190 … … 308 308 */ 309 309 static int 310 repair(avltree_t *t, avltree_node_t *u, avltree_node_t *v, avltree_node_t *w,310 repair(avltree_t *t, avltree_node_t *u, avltree_node_t *v, avltree_node_t *w, 311 311 int *dir, int ro) 312 312 { … … 404 404 gpa = node->par; 405 405 cur = NULL; 406 dir = (gpa->lft == node) ? LEFT : RIGHT;406 dir = (gpa->lft == node) ? LEFT : RIGHT; 407 407 } 408 408 } else { -
kernel/generic/src/adt/cht.c
r76d0981d r3bacee1 407 407 static size_t size_to_order(size_t bucket_cnt, size_t min_order); 408 408 static cht_buckets_t *alloc_buckets(size_t order, bool set_invalid, 409 409 bool can_block); 410 410 static inline cht_link_t *find_lazy(cht_t *h, void *key); 411 411 static cht_link_t *search_bucket(cht_t *h, marked_ptr_t head, void *key, 412 412 size_t search_hash); 413 413 static cht_link_t *find_resizing(cht_t *h, void *key, size_t hash, 414 414 marked_ptr_t old_head, size_t old_idx); 415 415 static bool insert_impl(cht_t *h, cht_link_t *item, cht_link_t **dup_item); 416 416 static bool insert_at(cht_link_t *item, const wnd_t *wnd, walk_mode_t walk_mode, 417 417 bool *resizing); 418 418 static bool has_duplicate(cht_t *h, const cht_link_t *item, size_t hash, 419 419 cht_link_t *cur, cht_link_t **dup_item); 420 420 static cht_link_t *find_duplicate(cht_t *h, const cht_link_t *item, size_t hash, 421 421 cht_link_t *start); 422 422 static bool remove_pred(cht_t *h, size_t hash, equal_pred_t pred, void *pred_arg); 423 423 static bool delete_at(cht_t *h, wnd_t *wnd, walk_mode_t walk_mode, 424 424 bool *deleted_but_gc, bool *resizing); 425 425 static bool mark_deleted(cht_link_t *cur, walk_mode_t walk_mode, bool *resizing); 426 426 static bool unlink_from_pred(wnd_t *wnd, walk_mode_t walk_mode, bool *resizing); 427 427 static bool find_wnd_and_gc_pred(cht_t *h, size_t hash, walk_mode_t walk_mode, 428 428 equal_pred_t pred, void *pred_arg, wnd_t *wnd, bool *resizing); 429 429 static bool find_wnd_and_gc(cht_t *h, size_t hash, walk_mode_t walk_mode, 430 430 wnd_t *wnd, bool *resizing); 431 431 static bool gc_deleted_node(cht_t *h, walk_mode_t walk_mode, wnd_t *wnd, 432 432 bool *resizing); 433 433 static bool join_completed(cht_t *h, const wnd_t *wnd); 434 434 static void upd_resizing_head(cht_t *h, size_t hash, marked_ptr_t **phead, 435 435 bool *join_finishing, walk_mode_t *walk_mode); 436 436 static void item_removed(cht_t *h); 437 437 static void item_inserted(cht_t *h); … … 442 442 static void complete_head_move(marked_ptr_t *psrc_head, marked_ptr_t *pdest_head); 443 443 static void split_bucket(cht_t *h, marked_ptr_t *psrc_head, 444 444 marked_ptr_t *pdest_head, size_t split_hash); 445 445 static void mark_join_follows(cht_t *h, marked_ptr_t *psrc_head, 446 446 size_t split_hash, wnd_t *wnd); 447 447 static void mark_join_node(cht_link_t *join_node); 448 448 static void join_buckets(cht_t *h, marked_ptr_t *psrc_head, 449 449 marked_ptr_t *pdest_head, size_t split_hash); 450 450 static void link_to_join_node(cht_t *h, marked_ptr_t *pdest_head, 451 451 cht_link_t *join_node, size_t split_hash); 452 452 static void resize_table(work_t *arg); 453 453 static void grow_table(cht_t *h); … … 455 455 static void cleanup_join_node(cht_t *h, marked_ptr_t *new_head); 456 456 static void clear_join_and_gc(cht_t *h, cht_link_t *join_node, 457 457 marked_ptr_t *new_head); 458 458 static void cleanup_join_follows(cht_t *h, marked_ptr_t *new_head); 459 459 static marked_ptr_t make_link(const cht_link_t *next, mark_t mark); 460 static cht_link_t * 460 static cht_link_t *get_next(marked_ptr_t link); 461 461 static mark_t get_mark(marked_ptr_t link); 462 462 static void next_wnd(wnd_t *wnd); … … 472 472 static size_t shrink_idx(size_t idx); 473 473 static marked_ptr_t cas_link(marked_ptr_t *link, const cht_link_t *cur_next, 474 474 mark_t cur_mark, const cht_link_t *new_next, mark_t new_mark); 475 475 static marked_ptr_t _cas_link(marked_ptr_t *link, marked_ptr_t cur, 476 476 marked_ptr_t new); 477 477 static void cas_order_barrier(void); 478 478 … … 513 513 */ 514 514 bool cht_create(cht_t *h, size_t init_size, size_t min_size, size_t max_load, 515 515 bool can_block, cht_ops_t *op) 516 516 { 517 517 assert(h); … … 572 572 size_t bucket_cnt = (1 << order); 573 573 size_t bytes = 574 574 sizeof(cht_buckets_t) + (bucket_cnt - 1) * sizeof(marked_ptr_t); 575 575 cht_buckets_t *b = malloc(bytes, can_block ? 0 : FRAME_ATOMIC); 576 576 … … 580 580 b->order = order; 581 581 582 marked_ptr_t head_link = set_invalid 583 ? make_link(&sentinel, N_INVALID)584 :make_link(&sentinel, N_NORMAL);582 marked_ptr_t head_link = set_invalid ? 583 make_link(&sentinel, N_INVALID) : 584 make_link(&sentinel, N_NORMAL); 585 585 586 586 for (size_t i = 0; i < bucket_cnt; ++i) { … … 733 733 /** Searches the bucket at head for key using search_hash. */ 734 734 static inline cht_link_t *search_bucket(cht_t *h, marked_ptr_t head, void *key, 735 735 size_t search_hash) 736 736 { 737 737 /* … … 781 781 /** Searches for the key while the table is undergoing a resize. */ 782 782 static cht_link_t *find_resizing(cht_t *h, void *key, size_t hash, 783 783 marked_ptr_t old_head, size_t old_idx) 784 784 { 785 785 assert(N_INVALID == get_mark(old_head)); … … 1022 1022 */ 1023 1023 inline static bool insert_at(cht_link_t *item, const wnd_t *wnd, 1024 1024 walk_mode_t walk_mode, bool *resizing) 1025 1025 { 1026 1026 marked_ptr_t ret; … … 1078 1078 */ 1079 1079 static inline bool has_duplicate(cht_t *h, const cht_link_t *item, size_t hash, 1080 1080 cht_link_t *cur, cht_link_t **dup_item) 1081 1081 { 1082 1082 assert(cur); 1083 assert(cur == &sentinel || hash <= node_hash(h, cur) 1084 ||node_hash(h, cur) == h->invalid_hash);1083 assert(cur == &sentinel || hash <= node_hash(h, cur) || 1084 node_hash(h, cur) == h->invalid_hash); 1085 1085 1086 1086 /* hash < node_hash(h, cur) */ … … 1101 1101 /** Returns an item that is equal to \a item starting in a chain at \a start. */ 1102 1102 static cht_link_t *find_duplicate(cht_t *h, const cht_link_t *item, size_t hash, 1103 1103 cht_link_t *start) 1104 1104 { 1105 1105 assert(hash <= node_hash(h, start) || h->invalid_hash == node_hash(h, start)); … … 1201 1201 1202 1202 if (!find_wnd_and_gc_pred( 1203 1203 h, hash, walk_mode, pred, pred_arg, &wnd, &resizing)) { 1204 1204 /* Could not GC a node; or detected an unexpected resize. */ 1205 1205 continue; … … 1253 1253 */ 1254 1254 static inline bool delete_at(cht_t *h, wnd_t *wnd, walk_mode_t walk_mode, 1255 1255 bool *deleted_but_gc, bool *resizing) 1256 1256 { 1257 1257 assert(wnd->cur && wnd->cur != &sentinel); … … 1283 1283 /** Marks cur logically deleted. Returns false to request a retry. */ 1284 1284 static inline bool mark_deleted(cht_link_t *cur, walk_mode_t walk_mode, 1285 1285 bool *resizing) 1286 1286 { 1287 1287 assert(cur && cur != &sentinel); … … 1309 1309 1310 1310 marked_ptr_t ret = 1311 1311 cas_link(&cur->link, next, cur_mark, next, cur_mark | N_DELETED); 1312 1312 1313 1313 if (ret != make_link(next, cur_mark)) … … 1320 1320 /** Unlinks wnd.cur from wnd.ppred. Returns false if it should be retried. */ 1321 1321 static inline bool unlink_from_pred(wnd_t *wnd, walk_mode_t walk_mode, 1322 1322 bool *resizing) 1323 1323 { 1324 1324 assert(wnd->cur != &sentinel); … … 1354 1354 if (pred_link != ret) { 1355 1355 /* If we're not resizing the table there are no JF/JN nodes. */ 1356 *resizing = (walk_mode == WM_NORMAL) 1357 &&(N_JOIN_FOLLOWS & get_mark(ret));1356 *resizing = (walk_mode == WM_NORMAL) && 1357 (N_JOIN_FOLLOWS & get_mark(ret)); 1358 1358 return false; 1359 1359 } … … 1389 1389 */ 1390 1390 static bool find_wnd_and_gc_pred(cht_t *h, size_t hash, walk_mode_t walk_mode, 1391 1391 equal_pred_t pred, void *pred_arg, wnd_t *wnd, bool *resizing) 1392 1392 { 1393 1393 assert(wnd->cur); … … 1459 1459 */ 1460 1460 static bool find_wnd_and_gc(cht_t *h, size_t hash, walk_mode_t walk_mode, 1461 1461 wnd_t *wnd, bool *resizing) 1462 1462 { 1463 1463 try_again: … … 1489 1489 /** Garbage collects the N_DELETED node at \a wnd skipping join nodes. */ 1490 1490 static bool gc_deleted_node(cht_t *h, walk_mode_t walk_mode, wnd_t *wnd, 1491 1491 bool *resizing) 1492 1492 { 1493 1493 assert(N_DELETED & get_mark(wnd->cur->link)); … … 1498 1498 } else { 1499 1499 /* Ordinary deleted node or a deleted JOIN_FOLLOWS. */ 1500 assert(walk_mode != WM_LEAVE_JOIN 1501 ||!((N_JOIN | N_JOIN_FOLLOWS) & get_mark(wnd->cur->link)));1500 assert(walk_mode != WM_LEAVE_JOIN || 1501 !((N_JOIN | N_JOIN_FOLLOWS) & get_mark(wnd->cur->link))); 1502 1502 1503 1503 /* Unlink an ordinary deleted node, move JOIN_FOLLOWS mark. */ … … 1591 1591 */ 1592 1592 static void upd_resizing_head(cht_t *h, size_t hash, marked_ptr_t **phead, 1593 1593 bool *join_finishing, walk_mode_t *walk_mode) 1594 1594 { 1595 1595 cht_buckets_t *b = rcu_access(h->b); … … 1641 1641 *walk_mode = WM_LEAVE_JOIN; 1642 1642 } 1643 } else if (h->new_b->order < b->order 1643 } else if (h->new_b->order < b->order) { 1644 1644 /* Shrinking the table. */ 1645 1645 … … 1702 1702 */ 1703 1703 static inline void help_head_move(marked_ptr_t *psrc_head, 1704 1704 marked_ptr_t *pdest_head) 1705 1705 { 1706 1706 /* Head move has to in progress already when calling this func. */ … … 1743 1743 /* Mark the normal/clean src link immutable/const. */ 1744 1744 ret = cas_link(psrc_head, next, N_NORMAL, next, N_CONST); 1745 } while (ret != src_link && !(N_CONST & get_mark(ret)));1745 } while (ret != src_link && !(N_CONST & get_mark(ret))); 1746 1746 } 1747 1747 … … 1783 1783 */ 1784 1784 static void split_bucket(cht_t *h, marked_ptr_t *psrc_head, 1785 1785 marked_ptr_t *pdest_head, size_t split_hash) 1786 1786 { 1787 1787 /* Already split. */ … … 1884 1884 */ 1885 1885 static void mark_join_follows(cht_t *h, marked_ptr_t *psrc_head, 1886 1886 size_t split_hash, wnd_t *wnd) 1887 1887 { 1888 1888 /* See comment in split_bucket(). */ … … 1909 1909 * that a join node follows. It must be clean/normal. 1910 1910 */ 1911 marked_ptr_t ret 1912 =cas_link(wnd->ppred, wnd->cur, N_NORMAL, wnd->cur, N_JOIN_FOLLOWS);1911 marked_ptr_t ret = 1912 cas_link(wnd->ppred, wnd->cur, N_NORMAL, wnd->cur, N_JOIN_FOLLOWS); 1913 1913 1914 1914 /* … … 1916 1916 * if also marked deleted - unlinking the node will move the JF mark). 1917 1917 */ 1918 done = (ret == make_link(wnd->cur, N_NORMAL)) 1919 ||(N_JOIN_FOLLOWS & get_mark(ret));1918 done = (ret == make_link(wnd->cur, N_NORMAL)) || 1919 (N_JOIN_FOLLOWS & get_mark(ret)); 1920 1920 } while (!done); 1921 1921 } … … 1935 1935 * because its predecessor is marked with JOIN_FOLLOWS or CONST. 1936 1936 */ 1937 marked_ptr_t ret 1938 =cas_link(&join_node->link, next, mark, next, mark | N_JOIN);1937 marked_ptr_t ret = 1938 cas_link(&join_node->link, next, mark, next, mark | N_JOIN); 1939 1939 1940 1940 /* Successfully marked or already marked as a join node. */ 1941 done = (ret == make_link(next, mark)) 1942 ||(N_JOIN & get_mark(ret));1943 } while (!done);1941 done = (ret == make_link(next, mark)) || 1942 (N_JOIN & get_mark(ret)); 1943 } while (!done); 1944 1944 } 1945 1945 … … 1952 1952 */ 1953 1953 static void join_buckets(cht_t *h, marked_ptr_t *psrc_head, 1954 1954 marked_ptr_t *pdest_head, size_t split_hash) 1955 1955 { 1956 1956 /* Buckets already joined. */ … … 2059 2059 */ 2060 2060 static void link_to_join_node(cht_t *h, marked_ptr_t *pdest_head, 2061 2061 cht_link_t *join_node, size_t split_hash) 2062 2062 { 2063 2063 bool done = false; … … 2078 2078 if (wnd.cur != &sentinel) { 2079 2079 /* Must be from the new appended bucket. */ 2080 assert(split_hash <= node_hash(h, wnd.cur) 2081 ||h->invalid_hash == node_hash(h, wnd.cur));2080 assert(split_hash <= node_hash(h, wnd.cur) || 2081 h->invalid_hash == node_hash(h, wnd.cur)); 2082 2082 return; 2083 2083 } … … 2085 2085 /* Reached the tail of pdest_head - link it to the join node. */ 2086 2086 marked_ptr_t ret = 2087 2087 cas_link(wnd.ppred, &sentinel, N_NORMAL, join_node, N_NORMAL); 2088 2088 2089 2089 done = (ret == make_link(&sentinel, N_NORMAL)); … … 2320 2320 size_t split_hash = calc_split_hash(old_idx, h->b->order); 2321 2321 join_buckets(h, &h->b->head[old_idx], &h->new_b->head[new_idx], 2322 2322 split_hash); 2323 2323 } 2324 2324 } … … 2391 2391 /** Clears the join_node's N_JOIN mark frees it if marked N_DELETED as well. */ 2392 2392 static void clear_join_and_gc(cht_t *h, cht_link_t *join_node, 2393 2393 marked_ptr_t *new_head) 2394 2394 { 2395 2395 assert(join_node != &sentinel); … … 2406 2406 2407 2407 marked_ptr_t ret = 2408 2408 _cas_link(&join_node->link, jn_link, make_link(next, cleared_mark)); 2409 2409 2410 2410 /* Done if the mark was cleared. Retry if a new node was inserted. */ … … 2431 2431 2432 2432 done = find_wnd_and_gc_pred(h, jn_hash, WM_NORMAL, same_node_pred, 2433 2433 join_node, &wnd, &resizing); 2434 2434 2435 2435 assert(!resizing); … … 2483 2483 cht_link_t *next = get_next(*cur_link); 2484 2484 marked_ptr_t ret = 2485 2486 2487 assert(next == &sentinel 2488 ||((N_JOIN | N_JOIN_FOLLOWS) & get_mark(ret)));2485 cas_link(cur_link, next, N_JOIN_FOLLOWS, &sentinel, N_NORMAL); 2486 2487 assert(next == &sentinel || 2488 ((N_JOIN | N_JOIN_FOLLOWS) & get_mark(ret))); 2489 2489 2490 2490 /* Successfully cleared the JF mark of a non-deleted node. */ … … 2559 2559 static inline size_t node_hash(cht_t *h, const cht_link_t *item) 2560 2560 { 2561 assert(item->hash == h->invalid_hash 2562 || item->hash == sentinel.hash2563 ||item->hash == calc_node_hash(h, item));2561 assert(item->hash == h->invalid_hash || 2562 item->hash == sentinel.hash || 2563 item->hash == calc_node_hash(h, item)); 2564 2564 2565 2565 return item->hash; … … 2595 2595 2596 2596 /** Strips any marks from the next item link and returns the next item's address.*/ 2597 static inline cht_link_t * 2598 { 2599 return (cht_link_t *)(link & ~N_MARK_MASK);2597 static inline cht_link_t *get_next(marked_ptr_t link) 2598 { 2599 return (cht_link_t *)(link & ~N_MARK_MASK); 2600 2600 } 2601 2601 … … 2620 2620 static bool same_node_pred(void *node, const cht_link_t *item2) 2621 2621 { 2622 const cht_link_t *item1 = (const cht_link_t *) node;2622 const cht_link_t *item1 = (const cht_link_t *) node; 2623 2623 return item1 == item2; 2624 2624 } … … 2626 2626 /** Compare-and-swaps a next item link. */ 2627 2627 static inline marked_ptr_t cas_link(marked_ptr_t *link, const cht_link_t *cur_next, 2628 2628 mark_t cur_mark, const cht_link_t *new_next, mark_t new_mark) 2629 2629 { 2630 2630 return _cas_link(link, make_link(cur_next, cur_mark), 2631 2631 make_link(new_next, new_mark)); 2632 2632 } 2633 2633 2634 2634 /** Compare-and-swaps a next item link. */ 2635 2635 static inline marked_ptr_t _cas_link(marked_ptr_t *link, marked_ptr_t cur, 2636 2636 marked_ptr_t new) 2637 2637 { 2638 2638 assert(link != &sentinel.link); … … 2690 2690 * is already visible to cpu3. * 2691 2691 */ 2692 void *expected = (void *)cur;2692 void *expected = (void *)cur; 2693 2693 2694 2694 /* … … 2697 2697 * of explicit memory barriers. 2698 2698 */ 2699 __atomic_compare_exchange_n((void **)link, &expected, (void *)new, false,2700 2699 __atomic_compare_exchange_n((void **)link, &expected, (void *)new, false, 2700 __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE); 2701 2701 2702 2702 return (marked_ptr_t) expected; -
kernel/generic/src/console/cmd.c
r76d0981d r3bacee1 923 923 924 924 printf("Up %" PRIun " days, %" PRIun " hours, %" PRIun " minutes, %" PRIun " seconds\n", 925 925 sec / 86400, (sec % 86400) / 3600, (sec % 3600) / 60, sec % 60); 926 926 927 927 return 1; … … 1088 1088 (void *) cmd_call2); 1089 1089 printf("Calling f(%#" PRIxn ", %#" PRIxn "): %p: %s\n", 1090 1090 arg1, arg2, (void *) symaddr, symbol); 1091 1091 printf("Result: %#" PRIxn "\n", fnc(arg1, arg2)); 1092 1092 interrupts_restore(ipl); … … 1125 1125 (void *) cmd_call3); 1126 1126 printf("Calling f(%#" PRIxn ",%#" PRIxn ", %#" PRIxn "): %p: %s\n", 1127 1127 arg1, arg2, arg3, (void *) symaddr, symbol); 1128 1128 printf("Result: %#" PRIxn "\n", fnc(arg1, arg2, arg3)); 1129 1129 interrupts_restore(ipl); … … 1188 1188 pointer = true; 1189 1189 } else if (((char *) argv->buffer)[0] >= '0' && 1190 1190 ((char *) argv->buffer)[0] <= '9') { 1191 1191 uint64_t value; 1192 1192 rc = str_uint64_t((char *) argv->buffer, NULL, 0, true, &value); … … 1206 1206 if (pointer) 1207 1207 addr = *(uintptr_t *) addr; 1208 printf("Writing %#" PRIx32 " -> %p\n", arg1, (void *) addr);1208 printf("Writing %#" PRIx32 " -> %p\n", arg1, (void *) addr); 1209 1209 *(uint32_t *) addr = arg1; 1210 1210 } else -
kernel/generic/src/console/kconsole.c
r76d0981d r3bacee1 86 86 LIST_INITIALIZE(cmd_list); /**< Command list. */ 87 87 88 static wchar_t history[KCONSOLE_HISTORY][MAX_CMDLINE] = { };88 static wchar_t history[KCONSOLE_HISTORY][MAX_CMDLINE] = { }; 89 89 static size_t history_pos = 0; 90 90 … … 166 166 const char *cmdtab_enum(const char *name, const char **h, void **ctx) 167 167 { 168 link_t **startpos = (link_t **) ctx;168 link_t **startpos = (link_t **) ctx; 169 169 size_t namelen = str_length(name); 170 170 … … 376 376 beg = position - 1; 377 377 while ((beg > 0) && (!isspace(current[beg]))) 378 378 beg--; 379 379 380 380 if (isspace(current[beg])) -
kernel/generic/src/cpu/cpu.c
r76d0981d r3bacee1 65 65 #endif /* CONFIG_SMP */ 66 66 67 cpus = (cpu_t *) malloc(sizeof(cpu_t) * config.cpu_count,67 cpus = (cpu_t *) malloc(sizeof(cpu_t) * config.cpu_count, 68 68 FRAME_ATOMIC); 69 69 if (!cpus) … … 71 71 72 72 /* Initialize everything */ 73 memsetb(cpus, sizeof(cpu_t) * config.cpu_count, 0);73 memsetb(cpus, sizeof(cpu_t) * config.cpu_count, 0); 74 74 75 75 size_t i; -
kernel/generic/src/main/main.c
r76d0981d r3bacee1 121 121 122 122 /** Boot arguments. */ 123 char bargs[CONFIG_BOOT_ARGUMENTS_BUFLEN] = { };123 char bargs[CONFIG_BOOT_ARGUMENTS_BUFLEN] = { }; 124 124 125 125 /** Initial user-space tasks */ -
kernel/generic/src/mm/as.c
r76d0981d r3bacee1 1006 1006 pte_t pte; 1007 1007 bool found = page_mapping_find(as, 1008 1008 ptr + P2SZ(size), false, &pte); 1009 1009 1010 1010 assert(found); … … 2196 2196 backend = &user_backend; 2197 2197 if (copy_from_uspace(&backend_data.pager_info, pager_info, 2198 2198 sizeof(as_area_pager_info_t)) != EOK) { 2199 2199 return (sysarg_t) AS_MAP_FAILED; 2200 2200 } -
kernel/generic/src/mm/backend_user.c
r76d0981d r3bacee1 118 118 as_area_pager_info_t *pager_info = &area->backend_data.pager_info; 119 119 120 ipc_data_t data = { };120 ipc_data_t data = { }; 121 121 IPC_SET_IMETHOD(data, IPC_M_PAGE_IN); 122 122 IPC_SET_ARG1(data, upage - area->base); -
kernel/generic/src/mm/frame.c
r76d0981d r3bacee1 198 198 size_t i = hint; 199 199 do { 200 if ((zones.info[i].base <= frame) 201 &&(zones.info[i].base + zones.info[i].count >= frame + count))200 if ((zones.info[i].base <= frame) && 201 (zones.info[i].base + zones.info[i].count >= frame + count)) 202 202 return i; 203 203 … … 551 551 552 552 pfn_t cframes = SIZE2FRAMES(zone_conf_size( 553 zones.info[z2].base - zones.info[z1].base 554 +zones.info[z2].count));553 zones.info[z2].base - zones.info[z1].base + 554 zones.info[z2].count)); 555 555 556 556 /* Allocate merged zone data inside one of the zones */ … … 704 704 * the assert 705 705 */ 706 assert(confframe != ADDR2PFN((uintptr_t 706 assert(confframe != ADDR2PFN((uintptr_t) NULL)); 707 707 708 708 /* Update the known end of physical memory. */ … … 1206 1206 size_t free_highprio = 0; 1207 1207 1208 for (size_t i = 0; ; i++) {1208 for (size_t i = 0; ; i++) { 1209 1209 irq_spinlock_lock(&zones.lock, true); 1210 1210 -
kernel/generic/src/printf/vsnprintf.c
r76d0981d r3bacee1 170 170 }; 171 171 printf_spec_t ps = { 172 (int (*) (const char *, size_t, void *)) vsnprintf_str_write,173 (int (*) (const wchar_t *, size_t, void *)) vsnprintf_wstr_write,172 (int (*) (const char *, size_t, void *)) vsnprintf_str_write, 173 (int (*) (const wchar_t *, size_t, void *)) vsnprintf_wstr_write, 174 174 &data 175 175 }; -
kernel/generic/src/proc/task.c
r76d0981d r3bacee1 112 112 113 113 #ifdef CONFIG_DEBUG 114 printf("[%" PRIu64"] ", task->taskid);114 printf("[%" PRIu64 "] ", task->taskid); 115 115 #endif 116 116 -
kernel/generic/src/proc/thread.c
r76d0981d r3bacee1 338 338 * 339 339 */ 340 thread_t *thread_create(void (* 340 thread_t *thread_create(void (*func)(void *), void *arg, task_t *task, 341 341 thread_flags_t flags, const char *name) 342 342 { … … 524 524 ipc_cleanup(); 525 525 futex_task_cleanup(); 526 LOG("Cleanup of task %" PRIu64 " completed.", TASK->taskid);526 LOG("Cleanup of task %" PRIu64 " completed.", TASK->taskid); 527 527 } 528 528 } … … 542 542 543 543 /* Not reached */ 544 while (true); 544 while (true) 545 ; 545 546 } 546 547 … … 978 979 979 980 return (sys_errno_t) rc; 980 981 } 981 982 } 982 983 -
kernel/generic/src/synch/futex.c
r76d0981d r3bacee1 171 171 /* Invoke the blocking cht_destroy in the background. */ 172 172 workq_global_enqueue_noblock(&task->futexes->destroy_work, 173 173 destroy_task_cache); 174 174 } else { 175 175 /* We can block. Invoke cht_destroy in this thread. */ … … 182 182 { 183 183 struct futex_cache *cache = 184 184 member_to_inst(work, struct futex_cache, destroy_work); 185 185 186 186 /* … … 319 319 320 320 if (futex_ptr_link) { 321 futex_ptr_t *futex_ptr 322 =member_to_inst(futex_ptr_link, futex_ptr_t, cht_link);321 futex_ptr_t *futex_ptr = 322 member_to_inst(futex_ptr_link, futex_ptr_t, cht_link); 323 323 324 324 futex = futex_ptr->futex; … … 485 485 static size_t task_fut_ht_key_hash(void *key) 486 486 { 487 return *(uintptr_t *)key;487 return *(uintptr_t *)key; 488 488 } 489 489 … … 499 499 { 500 500 const futex_ptr_t *fut_ptr = member_to_inst(item, futex_ptr_t, cht_link); 501 uintptr_t uaddr = *(uintptr_t *)key;501 uintptr_t uaddr = *(uintptr_t *)key; 502 502 503 503 return fut_ptr->uaddr == uaddr; -
kernel/generic/src/synch/workqueue.c
r76d0981d r3bacee1 133 133 static void wait_for_workers(struct work_queue *workq); 134 134 static int _workq_enqueue(struct work_queue *workq, work_t *work_item, 135 135 work_func_t func, bool can_block); 136 136 static void init_work_item(work_t *work_item, work_func_t func); 137 137 static signal_op_t signal_worker_logic(struct work_queue *workq, bool can_block); … … 185 185 186 186 /** Creates and initializes a work queue. Returns NULL upon failure. */ 187 struct work_queue * 187 struct work_queue *workq_create(const char *name) 188 188 { 189 189 struct work_queue *workq = malloc(sizeof(struct work_queue), 0); … … 269 269 270 270 thread_t *thread = thread_create(worker_thread, workq, TASK, 271 271 THREAD_FLAG_NONE, workq->name); 272 272 273 273 if (!thread) { … … 428 428 */ 429 429 bool workq_enqueue_noblock(struct work_queue *workq, work_t *work_item, 430 430 work_func_t func) 431 431 { 432 432 return _workq_enqueue(workq, work_item, func, false); … … 473 473 */ 474 474 static int _workq_enqueue(struct work_queue *workq, work_t *work_item, 475 475 work_func_t func, bool can_block) 476 476 { 477 477 assert(!workq_corrupted(workq)); … … 614 614 615 615 size_t remaining_idle = 616 616 workq->idle_worker_cnt - workq->activate_pending; 617 617 618 618 /* Idle workers still exist - activate one. */ … … 628 628 } else { 629 629 /* No idle workers remain. Request that a new one be created. */ 630 bool need_worker = (active < max_concurrent_workers) 631 &&(workq->cur_worker_cnt < max_worker_cnt);630 bool need_worker = (active < max_concurrent_workers) && 631 (workq->cur_worker_cnt < max_worker_cnt); 632 632 633 633 if (need_worker && can_block) { … … 780 780 781 781 _condvar_wait_timeout_irq_spinlock(&workq->activate_worker, 782 782 &workq->lock, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE); 783 783 784 784 assert(!workq_corrupted(workq)); … … 855 855 bool worker_surplus = worker_unnecessary(workq); 856 856 const char *load_str = worker_surplus ? "decreasing" : 857 857 (0 < workq->activate_pending) ? "increasing" : "stable"; 858 858 859 859 irq_spinlock_unlock(&workq->lock, true); 860 860 861 861 printf( 862 "Configuration: max_worker_cnt=%zu, min_worker_cnt=%zu,\n" 863 " max_concurrent_workers=%zu, max_items_per_worker=%zu\n" 864 "Workers: %zu\n" 865 "Active: %zu (workers currently processing work)\n" 866 "Blocked: %zu (work functions sleeping/blocked)\n" 867 "Idle: %zu (idle workers waiting for more work)\n" 868 "Items: %zu (queued not yet dispatched work)\n" 869 "Stopping: %d\n" 870 "Load: %s\n", 871 max_worker_cnt, min_worker_cnt, 872 max_concurrent_workers, max_items_per_worker, 873 total, 874 active, 875 blocked, 876 idle, 877 items, 878 stopping, 879 load_str 880 ); 862 "Configuration: max_worker_cnt=%zu, min_worker_cnt=%zu,\n" 863 " max_concurrent_workers=%zu, max_items_per_worker=%zu\n" 864 "Workers: %zu\n" 865 "Active: %zu (workers currently processing work)\n" 866 "Blocked: %zu (work functions sleeping/blocked)\n" 867 "Idle: %zu (idle workers waiting for more work)\n" 868 "Items: %zu (queued not yet dispatched work)\n" 869 "Stopping: %d\n" 870 "Load: %s\n", 871 max_worker_cnt, min_worker_cnt, 872 max_concurrent_workers, max_items_per_worker, 873 total, 874 active, 875 blocked, 876 idle, 877 items, 878 stopping, 879 load_str); 881 880 } 882 881 … … 896 895 while (list_empty(&info->work_queues) && !stop) { 897 896 errno_t ret = _condvar_wait_timeout_irq_spinlock(&info->req_cv, 898 897 &info->lock, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_INTERRUPTIBLE); 899 898 900 899 stop = (ret == EINTR); … … 903 902 if (!stop) { 904 903 *pworkq = list_get_instance(list_first(&info->work_queues), 905 904 struct work_queue, nb_link); 906 905 907 906 assert(!workq_corrupted(*pworkq)); … … 933 932 934 933 nonblock_adder.thread = thread_create(thr_nonblock_add_worker, 935 934 &nonblock_adder, TASK, THREAD_FLAG_NONE, "kworkq-nb"); 936 935 937 936 if (nonblock_adder.thread) { -
kernel/generic/src/sysinfo/stats.c
r76d0981d r3bacee1 74 74 * 75 75 */ 76 static load_t load_exp[LOAD_STEPS] = { 1884, 2014, 2037};76 static load_t load_exp[LOAD_STEPS] = { 1884, 2014, 2037 }; 77 77 78 78 /** Running average of the number of ready threads */ 79 static load_t avenrdy[LOAD_STEPS] = { 0, 0, 0};79 static load_t avenrdy[LOAD_STEPS] = { 0, 0, 0 }; 80 80 81 81 /** Load calculation lock */ -
kernel/generic/src/udebug/udebug.c
r76d0981d r3bacee1 170 170 171 171 mutex_unlock(&THREAD->udebug.lock); 172 172 mutex_unlock(&TASK->udebug.lock); 173 173 } 174 174
Note:
See TracChangeset
for help on using the changeset viewer.