Changeset a35b458 in mainline for kernel/generic/src/adt/cht.c
- Timestamp:
- 2018-03-02T20:10:49Z (7 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- f1380b7
- Parents:
- 3061bc1
- git-author:
- Jiří Zárevúcky <zarevucky.jiri@…> (2018-02-28 17:38:31)
- git-committer:
- Jiří Zárevúcky <zarevucky.jiri@…> (2018-03-02 20:10:49)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/adt/cht.c
r3061bc1 ra35b458 531 531 if (!op || !op->hash || !op->key_hash || !op->equal || !op->key_equal) 532 532 return false; 533 533 534 534 size_t min_order = size_to_order(min_size, CHT_MIN_ORDER); 535 535 size_t order = size_to_order(init_size, min_order); 536 536 537 537 h->b = alloc_buckets(order, false, can_block); 538 538 539 539 if (!h->b) 540 540 return false; 541 541 542 542 h->max_load = (max_load == 0) ? CHT_MAX_LOAD : max_load; 543 543 h->min_order = min_order; … … 546 546 atomic_set(&h->item_cnt, 0); 547 547 atomic_set(&h->resize_reqs, 0); 548 548 549 549 if (NULL == op->remove_callback) { 550 550 h->op->remove_callback = dummy_remove_callback; 551 551 } 552 552 553 553 /* 554 554 * Cached item hashes are stored in item->rcu_link.func. Once the item … … 556 556 */ 557 557 h->invalid_hash = (uintptr_t)h->op->remove_callback; 558 558 559 559 /* Ensure the initialization takes place before we start using the table. */ 560 560 write_barrier(); 561 561 562 562 return true; 563 563 } … … 581 581 sizeof(cht_buckets_t) + (bucket_cnt - 1) * sizeof(marked_ptr_t); 582 582 cht_buckets_t *b = malloc(bytes, can_block ? 0 : FRAME_ATOMIC); 583 583 584 584 if (!b) 585 585 return NULL; 586 586 587 587 b->order = order; 588 588 589 589 marked_ptr_t head_link = set_invalid 590 590 ? make_link(&sentinel, N_INVALID) 591 591 : make_link(&sentinel, N_NORMAL); 592 592 593 593 for (size_t i = 0; i < bucket_cnt; ++i) { 594 594 b->head[i] = head_link; 595 595 } 596 596 597 597 return b; 598 598 } … … 607 607 if (bucket_cnt <= ((size_t)1 << order)) 608 608 return order; 609 609 610 610 ++order; 611 611 } while (order < CHT_MAX_ORDER); 612 612 613 613 return order; 614 614 } … … 623 623 { 624 624 cht_destroy_unsafe(h); 625 625 626 626 /* You must clear the table of items. Otherwise cht_destroy will leak. */ 627 627 assert(atomic_get(&h->item_cnt) == 0); … … 635 635 rcu_barrier(); 636 636 } 637 637 638 638 /* Wait for all remove_callback()s to complete. */ 639 639 rcu_barrier(); 640 640 641 641 free(h->b); 642 642 h->b = NULL; … … 688 688 /* See docs to cht_find() and cht_find_lazy(). */ 689 689 assert(rcu_read_locked()); 690 690 691 691 size_t hash = calc_key_hash(h, key); 692 692 693 693 cht_buckets_t *b = rcu_access(h->b); 694 694 size_t idx = calc_bucket_idx(hash, b->order); … … 698 698 */ 699 699 marked_ptr_t head = b->head[idx]; 700 700 701 701 /* Undergoing a resize - take the slow path. */ 702 702 if (N_INVALID == get_mark(head)) 703 703 return find_resizing(h, key, hash, head, idx); 704 704 705 705 return search_bucket(h, head, key, hash); 706 706 } … … 734 734 assert(rcu_read_locked()); 735 735 assert(item); 736 736 737 737 return find_duplicate(h, item, calc_node_hash(h, item), get_next(item->link)); 738 738 } … … 758 758 prev = cur->link; 759 759 } while (node_hash(h, cur) < search_hash); 760 760 761 761 /* 762 762 * Only search for an item with an equal key if cur is not the sentinel … … 768 768 return cur; 769 769 } 770 770 771 771 cur = get_next(cur->link); 772 772 assert(cur); 773 773 } 774 774 775 775 /* 776 776 * In the unlikely case that we have encountered a node whose cached … … 782 782 goto try_again; 783 783 } 784 784 785 785 return NULL; 786 786 } … … 792 792 assert(N_INVALID == get_mark(old_head)); 793 793 assert(h->new_b); 794 794 795 795 size_t new_idx = calc_bucket_idx(hash, h->new_b->order); 796 796 marked_ptr_t new_head = h->new_b->head[new_idx]; 797 797 marked_ptr_t search_head = new_head; 798 798 799 799 /* Growing. */ 800 800 if (h->b->order < h->new_b->order) { … … 818 818 new_head = h->new_b->head[grow_idx(old_idx)]; 819 819 } 820 820 821 821 /* new_head is now the moved bucket, either valid or invalid. */ 822 822 823 823 /* 824 824 * The old bucket was definitely moved to new_head but the … … 845 845 } 846 846 } 847 847 848 848 return search_bucket(h, search_head, key, hash); 849 849 } else if (h->b->order > h->new_b->order) { 850 850 /* Shrinking. */ 851 851 852 852 /* Index of the bucket in the old table that was moved. */ 853 853 size_t move_src_idx = grow_idx(new_idx); 854 854 marked_ptr_t moved_old_head = h->b->head[move_src_idx]; 855 855 856 856 /* 857 857 * h->b->head[move_src_idx] had already been moved to new_head … … 883 883 search_head = moved_old_head; 884 884 } 885 885 886 886 cht_link_t *ret = search_bucket(h, search_head, key, hash); 887 887 888 888 if (ret) 889 889 return ret; … … 906 906 return search_bucket(h, old_head, key, hash); 907 907 } 908 908 909 909 return NULL; 910 910 } else { … … 979 979 bool resizing = false; 980 980 bool inserted = false; 981 981 982 982 do { 983 983 walk_mode_t walk_mode = WM_NORMAL; 984 984 bool join_finishing; 985 985 986 986 resizing = resizing || (N_NORMAL != get_mark(*phead)); 987 987 988 988 /* The table is resizing. Get the correct bucket head. */ 989 989 if (resizing) { 990 990 upd_resizing_head(h, hash, &phead, &join_finishing, &walk_mode); 991 991 } 992 992 993 993 wnd_t wnd = { 994 994 .ppred = phead, … … 996 996 .last = NULL 997 997 }; 998 998 999 999 if (!find_wnd_and_gc(h, hash, walk_mode, &wnd, &resizing)) { 1000 1000 /* Could not GC a node; or detected an unexpected resize. */ 1001 1001 continue; 1002 1002 } 1003 1003 1004 1004 if (dup_item && has_duplicate(h, item, hash, wnd.cur, dup_item)) { 1005 1005 rcu_read_unlock(); 1006 1006 return false; 1007 1007 } 1008 1008 1009 1009 inserted = insert_at(item, &wnd, walk_mode, &resizing); 1010 1010 } while (!inserted); 1011 1011 1012 1012 rcu_read_unlock(); 1013 1013 … … 1032 1032 { 1033 1033 marked_ptr_t ret; 1034 1034 1035 1035 if (walk_mode == WM_NORMAL) { 1036 1036 item->link = make_link(wnd->cur, N_NORMAL); 1037 1037 /* Initialize the item before adding it to a bucket. */ 1038 1038 memory_barrier(); 1039 1039 1040 1040 /* Link a clean/normal predecessor to the item. */ 1041 1041 ret = cas_link(wnd->ppred, wnd->cur, N_NORMAL, item, N_NORMAL); 1042 1042 1043 1043 if (ret == make_link(wnd->cur, N_NORMAL)) { 1044 1044 return true; … … 1054 1054 /* Initialize the item before adding it to a bucket. */ 1055 1055 memory_barrier(); 1056 1056 1057 1057 /* Link the not-deleted predecessor to the item. Move its JF mark. */ 1058 1058 ret = cas_link(wnd->ppred, wnd->cur, jf_mark, item, N_NORMAL); 1059 1059 1060 1060 return ret == make_link(wnd->cur, jf_mark); 1061 1061 } else { … … 1065 1065 /* Initialize the item before adding it to a bucket. */ 1066 1066 memory_barrier(); 1067 1067 1068 1068 mark_t pred_mark = get_mark(*wnd->ppred); 1069 1069 /* If the predecessor is a join node it may be marked deleted.*/ … … 1090 1090 assert(cur == &sentinel || hash <= node_hash(h, cur) 1091 1091 || node_hash(h, cur) == h->invalid_hash); 1092 1092 1093 1093 /* hash < node_hash(h, cur) */ 1094 1094 if (hash != node_hash(h, cur) && h->invalid_hash != node_hash(h, cur)) … … 1101 1101 */ 1102 1102 read_barrier(); 1103 1103 1104 1104 *dup_item = find_duplicate(h, item, hash, cur); 1105 1105 return NULL != *dup_item; … … 1113 1113 1114 1114 cht_link_t *cur = start; 1115 1115 1116 1116 try_again: 1117 1117 assert(cur); … … 1119 1119 while (node_hash(h, cur) == hash) { 1120 1120 assert(cur != &sentinel); 1121 1121 1122 1122 bool deleted = (N_DELETED & get_mark(cur->link)); 1123 1123 1124 1124 /* Skip logically deleted nodes. */ 1125 1125 if (!deleted && h->op->equal(item, cur)) 1126 1126 return cur; 1127 1127 1128 1128 cur = get_next(cur->link); 1129 1129 assert(cur); … … 1135 1135 goto try_again; 1136 1136 } 1137 1137 1138 1138 return NULL; 1139 1139 } … … 1143 1143 { 1144 1144 assert(h); 1145 1145 1146 1146 size_t hash = calc_key_hash(h, key); 1147 1147 size_t removed = 0; 1148 1148 1149 1149 while (remove_pred(h, hash, h->op->key_equal, key)) 1150 1150 ++removed; 1151 1151 1152 1152 return removed; 1153 1153 } … … 1181 1181 { 1182 1182 rcu_read_lock(); 1183 1183 1184 1184 bool resizing = false; 1185 1185 bool deleted = false; 1186 1186 bool deleted_but_gc = false; 1187 1187 1188 1188 cht_buckets_t *b = rcu_access(h->b); 1189 1189 size_t idx = calc_bucket_idx(hash, b->order); 1190 1190 marked_ptr_t *phead = &b->head[idx]; 1191 1191 1192 1192 do { 1193 1193 walk_mode_t walk_mode = WM_NORMAL; 1194 1194 bool join_finishing = false; 1195 1195 1196 1196 resizing = resizing || (N_NORMAL != get_mark(*phead)); 1197 1197 1198 1198 /* The table is resizing. Get the correct bucket head. */ 1199 1199 if (resizing) { 1200 1200 upd_resizing_head(h, hash, &phead, &join_finishing, &walk_mode); 1201 1201 } 1202 1202 1203 1203 wnd_t wnd = { 1204 1204 .ppred = phead, … … 1206 1206 .last = NULL 1207 1207 }; 1208 1208 1209 1209 if (!find_wnd_and_gc_pred( 1210 1210 h, hash, walk_mode, pred, pred_arg, &wnd, &resizing)) { … … 1212 1212 continue; 1213 1213 } 1214 1214 1215 1215 /* 1216 1216 * The item lookup is affected by a bucket join but effects of … … 1225 1225 continue; 1226 1226 } 1227 1227 1228 1228 /* Already deleted, but delete_at() requested one GC pass. */ 1229 1229 if (deleted_but_gc) 1230 1230 break; 1231 1231 1232 1232 bool found = (wnd.cur != &sentinel && pred(pred_arg, wnd.cur)); 1233 1233 1234 1234 if (!found) { 1235 1235 rcu_read_unlock(); 1236 1236 return false; 1237 1237 } 1238 1238 1239 1239 deleted = delete_at(h, &wnd, walk_mode, &deleted_but_gc, &resizing); 1240 1240 } while (!deleted || deleted_but_gc); 1241 1241 1242 1242 rcu_read_unlock(); 1243 1243 return true; … … 1263 1263 { 1264 1264 assert(wnd->cur && wnd->cur != &sentinel); 1265 1265 1266 1266 *deleted_but_gc = false; 1267 1267 1268 1268 if (!mark_deleted(wnd->cur, walk_mode, resizing)) { 1269 1269 /* Already deleted, or unexpectedly marked as JOIN/JOIN_FOLLOWS. */ 1270 1270 return false; 1271 1271 } 1272 1272 1273 1273 /* Marked deleted. Unlink from the bucket. */ 1274 1274 1275 1275 /* Never unlink join nodes. */ 1276 1276 if (walk_mode == WM_LEAVE_JOIN && (N_JOIN & get_mark(wnd->cur->link))) 1277 1277 return true; 1278 1278 1279 1279 cas_order_barrier(); 1280 1280 1281 1281 if (unlink_from_pred(wnd, walk_mode, resizing)) { 1282 1282 free_later(h, wnd->cur); … … 1284 1284 *deleted_but_gc = true; 1285 1285 } 1286 1286 1287 1287 return true; 1288 1288 } … … 1293 1293 { 1294 1294 assert(cur && cur != &sentinel); 1295 1295 1296 1296 /* 1297 1297 * Btw, we could loop here if the cas fails but let's not complicate 1298 1298 * things and let's retry from the head of the bucket. 1299 1299 */ 1300 1300 1301 1301 cht_link_t *next = get_next(cur->link); 1302 1302 1303 1303 if (walk_mode == WM_NORMAL) { 1304 1304 /* Only mark clean/normal nodes - JF/JN is used only during resize. */ 1305 1305 marked_ptr_t ret = cas_link(&cur->link, next, N_NORMAL, next, N_DELETED); 1306 1306 1307 1307 if (ret != make_link(next, N_NORMAL)) { 1308 1308 *resizing = (N_JOIN | N_JOIN_FOLLOWS) & get_mark(ret); … … 1311 1311 } else { 1312 1312 static_assert(N_JOIN == N_JOIN_FOLLOWS, ""); 1313 1313 1314 1314 /* Keep the N_JOIN/N_JOIN_FOLLOWS mark but strip N_DELETED. */ 1315 1315 mark_t cur_mark = get_mark(cur->link) & N_JOIN_FOLLOWS; 1316 1316 1317 1317 marked_ptr_t ret = 1318 1318 cas_link(&cur->link, next, cur_mark, next, cur_mark | N_DELETED); 1319 1319 1320 1320 if (ret != make_link(next, cur_mark)) 1321 1321 return false; 1322 1322 } 1323 1323 1324 1324 return true; 1325 1325 } … … 1331 1331 assert(wnd->cur != &sentinel); 1332 1332 assert(wnd->cur && (N_DELETED & get_mark(wnd->cur->link))); 1333 1333 1334 1334 cht_link_t *next = get_next(wnd->cur->link); 1335 1335 1336 1336 if (walk_mode == WM_LEAVE_JOIN) { 1337 1337 /* Never try to unlink join nodes. */ … … 1341 1341 /* Succeed only if the predecessor is clean/normal or a join node. */ 1342 1342 mark_t exp_pred_mark = (N_JOIN & pred_mark) ? pred_mark : N_NORMAL; 1343 1343 1344 1344 marked_ptr_t pred_link = make_link(wnd->cur, exp_pred_mark); 1345 1345 marked_ptr_t next_link = make_link(next, exp_pred_mark); 1346 1346 1347 1347 if (pred_link != _cas_link(wnd->ppred, pred_link, next_link)) 1348 1348 return false; … … 1351 1351 /* Move the JF mark if set. Clear DEL mark. */ 1352 1352 mark_t cur_mark = N_JOIN_FOLLOWS & get_mark(wnd->cur->link); 1353 1353 1354 1354 /* The predecessor must be clean/normal. */ 1355 1355 marked_ptr_t pred_link = make_link(wnd->cur, N_NORMAL); 1356 1356 /* Link to cur's successor keeping/copying cur's JF mark. */ 1357 1357 marked_ptr_t next_link = make_link(next, cur_mark); 1358 1358 1359 1359 marked_ptr_t ret = _cas_link(wnd->ppred, pred_link, next_link); 1360 1360 1361 1361 if (pred_link != ret) { 1362 1362 /* If we're not resizing the table there are no JF/JN nodes. */ … … 1366 1366 } 1367 1367 } 1368 1368 1369 1369 return true; 1370 1370 } … … 1399 1399 { 1400 1400 assert(wnd->cur); 1401 1401 1402 1402 if (wnd->cur == &sentinel) 1403 1403 return true; 1404 1404 1405 1405 /* 1406 1406 * A read barrier is not needed here to bring up the most recent … … 1408 1408 * an already deleted node; fail in delete_at(); and retry. 1409 1409 */ 1410 1410 1411 1411 size_t cur_hash; 1412 1412 1413 1413 try_again: 1414 1414 cur_hash = node_hash(h, wnd->cur); 1415 1415 1416 1416 while (cur_hash <= hash) { 1417 1417 assert(wnd->cur && wnd->cur != &sentinel); 1418 1418 1419 1419 /* GC any deleted nodes on the way. */ 1420 1420 if (N_DELETED & get_mark(wnd->cur->link)) { … … 1427 1427 if (cur_hash == hash && pred(pred_arg, wnd->cur)) 1428 1428 return true; 1429 1429 1430 1430 next_wnd(wnd); 1431 1431 } 1432 1432 1433 1433 cur_hash = node_hash(h, wnd->cur); 1434 1434 } 1435 1435 1436 1436 if (cur_hash == h->invalid_hash) { 1437 1437 next_wnd(wnd); … … 1439 1439 goto try_again; 1440 1440 } 1441 1441 1442 1442 /* The searched for node is not in the current bucket. */ 1443 1443 return true; … … 1481 1481 next_wnd(wnd); 1482 1482 } 1483 1483 1484 1484 assert(wnd->cur); 1485 1485 } 1486 1486 1487 1487 if (node_hash(h, wnd->cur) == h->invalid_hash) { 1488 1488 next_wnd(wnd); … … 1520 1520 wnd->cur = get_next(wnd->cur->link); 1521 1521 } 1522 1522 1523 1523 return true; 1524 1524 } … … 1539 1539 * is visible and if not, make it visible to this cpu. 1540 1540 */ 1541 1541 1542 1542 /* 1543 1543 * Resizer ensures h->b->order stays the same for the duration of this … … 1548 1548 assert(h->b->order > h->new_b->order); 1549 1549 assert(wnd->cur); 1550 1550 1551 1551 /* Either we did not need the joining link or we have already followed it.*/ 1552 1552 if (wnd->cur != &sentinel) 1553 1553 return true; 1554 1554 1555 1555 /* We have reached the end of a bucket. */ 1556 1556 1557 1557 if (wnd->last != &sentinel) { 1558 1558 size_t last_seen_hash = node_hash(h, wnd->last); 1559 1559 1560 1560 if (last_seen_hash == h->invalid_hash) { 1561 1561 last_seen_hash = calc_node_hash(h, wnd->last); 1562 1562 } 1563 1563 1564 1564 size_t last_old_idx = calc_bucket_idx(last_seen_hash, h->b->order); 1565 1565 size_t move_src_idx = grow_idx(shrink_idx(last_old_idx)); 1566 1566 1567 1567 /* 1568 1568 * Last node seen was in the joining bucket - if the searched … … 1572 1572 return true; 1573 1573 } 1574 1574 1575 1575 /* 1576 1576 * Reached the end of the bucket but no nodes from the joining bucket … … 1603 1603 size_t old_idx = calc_bucket_idx(hash, b->order); 1604 1604 size_t new_idx = calc_bucket_idx(hash, h->new_b->order); 1605 1605 1606 1606 marked_ptr_t *pold_head = &b->head[old_idx]; 1607 1607 marked_ptr_t *pnew_head = &h->new_b->head[new_idx]; 1608 1608 1609 1609 /* In any case, use the bucket in the new table. */ 1610 1610 *phead = pnew_head; … … 1614 1614 size_t move_dest_idx = grow_idx(old_idx); 1615 1615 marked_ptr_t *pmoved_head = &h->new_b->head[move_dest_idx]; 1616 1616 1617 1617 /* Complete moving the bucket from the old to the new table. */ 1618 1618 help_head_move(pold_head, pmoved_head); 1619 1619 1620 1620 /* The hash belongs to the moved bucket. */ 1621 1621 if (move_dest_idx == new_idx) { … … 1634 1634 * half of the split/old/moved bucket. 1635 1635 */ 1636 1636 1637 1637 /* The moved bucket has not yet been split. */ 1638 1638 if (N_NORMAL != get_mark(*pnew_head)) { … … 1645 1645 assert(N_NORMAL == get_mark(*pnew_head)); 1646 1646 } 1647 1647 1648 1648 *walk_mode = WM_LEAVE_JOIN; 1649 1649 } 1650 1650 } else if (h->new_b->order < b->order ) { 1651 1651 /* Shrinking the table. */ 1652 1652 1653 1653 size_t move_src_idx = grow_idx(new_idx); 1654 1654 1655 1655 /* 1656 1656 * Complete moving the bucket from the old to the new table. … … 1658 1658 */ 1659 1659 help_head_move(&b->head[move_src_idx], pnew_head); 1660 1660 1661 1661 /* Hash belongs to the bucket to be joined with the moved bucket. */ 1662 1662 if (move_src_idx != old_idx) { … … 1666 1666 join_buckets(h, pold_head, pnew_head, split_hash); 1667 1667 } 1668 1668 1669 1669 /* 1670 1670 * The resizer sets pold_head to &sentinel when all cpus are … … 1673 1673 *join_finishing = (&sentinel != get_next(*pold_head)); 1674 1674 } 1675 1675 1676 1676 /* move_head() or join_buckets() makes it so or makes the mark visible.*/ 1677 1677 assert(N_INVALID == get_mark(*pold_head)); … … 1713 1713 /* Head move has to in progress already when calling this func. */ 1714 1714 assert(N_CONST & get_mark(*psrc_head)); 1715 1715 1716 1716 /* Head already moved. */ 1717 1717 if (N_INVALID == get_mark(*psrc_head)) { … … 1724 1724 complete_head_move(psrc_head, pdest_head); 1725 1725 } 1726 1726 1727 1727 assert(!(N_CONST & get_mark(*pdest_head))); 1728 1728 } … … 1742 1742 { 1743 1743 marked_ptr_t ret, src_link; 1744 1744 1745 1745 /* Mark src head immutable. */ 1746 1746 do { 1747 1747 cht_link_t *next = get_next(*psrc_head); 1748 1748 src_link = make_link(next, N_NORMAL); 1749 1749 1750 1750 /* Mark the normal/clean src link immutable/const. */ 1751 1751 ret = cas_link(psrc_head, next, N_NORMAL, next, N_CONST); … … 1758 1758 assert(N_JOIN_FOLLOWS != get_mark(*psrc_head)); 1759 1759 assert(N_CONST & get_mark(*psrc_head)); 1760 1760 1761 1761 cht_link_t *next = get_next(*psrc_head); 1762 1762 … … 1765 1765 assert(ret == make_link(&sentinel, N_INVALID) || (N_NORMAL == get_mark(ret))); 1766 1766 cas_order_barrier(); 1767 1767 1768 1768 DBG(ret = ) 1769 1769 cas_link(psrc_head, next, N_CONST, next, N_INVALID); … … 1791 1791 if (N_NORMAL == get_mark(*pdest_head)) 1792 1792 return; 1793 1793 1794 1794 /* 1795 1795 * L == Last node of the first part of the split bucket. That part … … 1836 1836 */ 1837 1837 wnd_t wnd; 1838 1838 1839 1839 rcu_read_lock(); 1840 1840 1841 1841 /* Mark the last node of the first part of the split bucket as JF. */ 1842 1842 mark_join_follows(h, psrc_head, split_hash, &wnd); 1843 1843 cas_order_barrier(); 1844 1844 1845 1845 /* There are nodes in the dest bucket, ie the second part of the split. */ 1846 1846 if (wnd.cur != &sentinel) { … … 1857 1857 */ 1858 1858 } 1859 1859 1860 1860 /* Link the dest head to the second part of the split. */ 1861 1861 DBG(marked_ptr_t ret = ) … … 1863 1863 assert(ret == make_link(&sentinel, N_INVALID) || (N_NORMAL == get_mark(ret))); 1864 1864 cas_order_barrier(); 1865 1865 1866 1866 rcu_read_unlock(); 1867 1867 } … … 1888 1888 { 1889 1889 /* See comment in split_bucket(). */ 1890 1890 1891 1891 bool done = false; 1892 1892 … … 1895 1895 wnd->ppred = psrc_head; 1896 1896 wnd->cur = get_next(*psrc_head); 1897 1897 1898 1898 /* 1899 1899 * Find the split window, ie the last node of the first part of … … 1903 1903 if (!find_wnd_and_gc(h, split_hash, WM_MOVE_JOIN_FOLLOWS, wnd, &resizing)) 1904 1904 continue; 1905 1905 1906 1906 /* Must not report that the table is resizing if WM_MOVE_JOIN_FOLLOWS.*/ 1907 1907 assert(!resizing); … … 1926 1926 { 1927 1927 /* See comment in split_bucket(). */ 1928 1928 1929 1929 bool done; 1930 1930 do { 1931 1931 cht_link_t *next = get_next(join_node->link); 1932 1932 mark_t mark = get_mark(join_node->link); 1933 1933 1934 1934 /* 1935 1935 * May already be marked as deleted, but it won't be unlinked … … 1938 1938 marked_ptr_t ret 1939 1939 = cas_link(&join_node->link, next, mark, next, mark | N_JOIN); 1940 1940 1941 1941 /* Successfully marked or already marked as a join node. */ 1942 1942 done = (ret == make_link(next, mark)) … … 2023 2023 * [src_head | Inv]-----------> [JN] -> .. 2024 2024 */ 2025 2025 2026 2026 rcu_read_lock(); 2027 2027 2028 2028 /* Mark src_head immutable - signals updaters that bucket join started. */ 2029 2029 mark_const(psrc_head); 2030 2030 cas_order_barrier(); 2031 2031 2032 2032 cht_link_t *join_node = get_next(*psrc_head); 2033 2033 … … 2035 2035 mark_join_node(join_node); 2036 2036 cas_order_barrier(); 2037 2037 2038 2038 link_to_join_node(h, pdest_head, join_node, split_hash); 2039 2039 cas_order_barrier(); 2040 2040 } 2041 2041 2042 2042 DBG(marked_ptr_t ret = ) 2043 2043 cas_link(psrc_head, join_node, N_CONST, join_node, N_INVALID); 2044 2044 assert(ret == make_link(join_node, N_CONST) || (N_INVALID == get_mark(ret))); 2045 2045 cas_order_barrier(); 2046 2046 2047 2047 rcu_read_unlock(); 2048 2048 } … … 2067 2067 .cur = get_next(*pdest_head) 2068 2068 }; 2069 2069 2070 2070 bool resizing = false; 2071 2071 2072 2072 if (!find_wnd_and_gc(h, split_hash, WM_LEAVE_JOIN, &wnd, &resizing)) 2073 2073 continue; 2074 2074 2075 2075 assert(!resizing); 2076 2076 2077 2077 if (wnd.cur != &sentinel) { 2078 2078 /* Must be from the new appended bucket. */ … … 2081 2081 return; 2082 2082 } 2083 2083 2084 2084 /* Reached the tail of pdest_head - link it to the join node. */ 2085 2085 marked_ptr_t ret = 2086 2086 cas_link(wnd.ppred, &sentinel, N_NORMAL, join_node, N_NORMAL); 2087 2087 2088 2088 done = (ret == make_link(&sentinel, N_NORMAL)); 2089 2089 } while (!done); … … 2097 2097 { 2098 2098 assert(item != &sentinel); 2099 2099 2100 2100 /* 2101 2101 * remove_callback only works as rcu_func_t because rcu_link is the first … … 2103 2103 */ 2104 2104 rcu_call(&item->rcu_link, (rcu_func_t)h->op->remove_callback); 2105 2105 2106 2106 item_removed(h); 2107 2107 } … … 2116 2116 size_t items = (size_t) atomic_predec(&h->item_cnt); 2117 2117 size_t bucket_cnt = (1 << h->b->order); 2118 2118 2119 2119 bool need_shrink = (items == h->max_load * bucket_cnt / 4); 2120 2120 bool missed_shrink = (items == h->max_load * bucket_cnt / 8); 2121 2121 2122 2122 if ((need_shrink || missed_shrink) && h->b->order > h->min_order) { 2123 2123 atomic_count_t resize_reqs = atomic_preinc(&h->resize_reqs); … … 2137 2137 size_t items = (size_t) atomic_preinc(&h->item_cnt); 2138 2138 size_t bucket_cnt = (1 << h->b->order); 2139 2139 2140 2140 bool need_grow = (items == h->max_load * bucket_cnt); 2141 2141 bool missed_grow = (items == 2 * h->max_load * bucket_cnt); 2142 2142 2143 2143 if ((need_grow || missed_grow) && h->b->order < CHT_MAX_ORDER) { 2144 2144 atomic_count_t resize_reqs = atomic_preinc(&h->resize_reqs); … … 2154 2154 { 2155 2155 cht_t *h = member_to_inst(arg, cht_t, resize_work); 2156 2156 2157 2157 #ifdef CONFIG_DEBUG 2158 2158 assert(h->b); … … 2188 2188 if (h->b->order >= CHT_MAX_ORDER) 2189 2189 return; 2190 2190 2191 2191 h->new_b = alloc_buckets(h->b->order + 1, true, false); 2192 2192 … … 2198 2198 rcu_synchronize(); 2199 2199 size_t old_bucket_cnt = (1 << h->b->order); 2200 2200 2201 2201 /* 2202 2202 * Give updaters a chance to help out with the resize. Do the minimum … … 2206 2206 start_head_move(&h->b->head[idx]); 2207 2207 } 2208 2208 2209 2209 /* Order start_head_move() wrt complete_head_move(). */ 2210 2210 cas_order_barrier(); 2211 2211 2212 2212 /* Complete moving heads and split any buckets not yet split by updaters. */ 2213 2213 for (size_t old_idx = 0; old_idx < old_bucket_cnt; ++old_idx) { … … 2226 2226 split_bucket(h, move_dest_head, split_dest_head, split_hash); 2227 2227 } 2228 2228 2229 2229 /* 2230 2230 * Wait for all updaters to notice the new heads. Once everyone sees … … 2233 2233 */ 2234 2234 rcu_synchronize(); 2235 2235 2236 2236 /* Clear the JOIN_FOLLOWS mark and remove the link between the split buckets.*/ 2237 2237 for (size_t old_idx = 0; old_idx < old_bucket_cnt; ++old_idx) { 2238 2238 size_t new_idx = grow_idx(old_idx); 2239 2239 2240 2240 cleanup_join_follows(h, &h->new_b->head[new_idx]); 2241 2241 } … … 2246 2246 */ 2247 2247 rcu_synchronize(); 2248 2248 2249 2249 /* Clear the JOIN mark and GC any deleted join nodes. */ 2250 2250 for (size_t old_idx = 0; old_idx < old_bucket_cnt; ++old_idx) { 2251 2251 size_t new_idx = grow_to_split_idx(old_idx); 2252 2252 2253 2253 cleanup_join_node(h, &h->new_b->head[new_idx]); 2254 2254 } … … 2256 2256 /* Wait for everyone to see that the table is clear of any resize marks. */ 2257 2257 rcu_synchronize(); 2258 2258 2259 2259 cht_buckets_t *old_b = h->b; 2260 2260 rcu_assign(h->b, h->new_b); … … 2262 2262 /* Wait for everyone to start using the new table. */ 2263 2263 rcu_synchronize(); 2264 2264 2265 2265 free(old_b); 2266 2266 2267 2267 /* Not needed; just for increased readability. */ 2268 2268 h->new_b = NULL; … … 2274 2274 if (h->b->order <= h->min_order) 2275 2275 return; 2276 2276 2277 2277 h->new_b = alloc_buckets(h->b->order - 1, true, false); 2278 2278 … … 2283 2283 /* Wait for all readers and updaters to see the initialized new table. */ 2284 2284 rcu_synchronize(); 2285 2285 2286 2286 size_t old_bucket_cnt = (1 << h->b->order); 2287 2287 2288 2288 /* 2289 2289 * Give updaters a chance to help out with the resize. Do the minimum … … 2292 2292 for (size_t old_idx = 0; old_idx < old_bucket_cnt; ++old_idx) { 2293 2293 size_t new_idx = shrink_idx(old_idx); 2294 2294 2295 2295 /* This bucket should be moved. */ 2296 2296 if (grow_idx(new_idx) == old_idx) { … … 2300 2300 } 2301 2301 } 2302 2302 2303 2303 /* Order start_head_move() wrt to complete_head_move(). */ 2304 2304 cas_order_barrier(); 2305 2305 2306 2306 /* Complete moving heads and join buckets with the moved buckets. */ 2307 2307 for (size_t old_idx = 0; old_idx < old_bucket_cnt; ++old_idx) { 2308 2308 size_t new_idx = shrink_idx(old_idx); 2309 2309 size_t move_src_idx = grow_idx(new_idx); 2310 2310 2311 2311 /* This bucket should be moved. */ 2312 2312 if (move_src_idx == old_idx) { … … 2322 2322 } 2323 2323 } 2324 2324 2325 2325 /* 2326 2326 * Wait for all updaters to notice the new heads. Once everyone sees … … 2329 2329 */ 2330 2330 rcu_synchronize(); 2331 2331 2332 2332 /* Let everyone know joins are complete and fully visible. */ 2333 2333 for (size_t old_idx = 0; old_idx < old_bucket_cnt; ++old_idx) { 2334 2334 size_t move_src_idx = grow_idx(shrink_idx(old_idx)); 2335 2335 2336 2336 /* Set the invalid joinee head to NULL. */ 2337 2337 if (old_idx != move_src_idx) { 2338 2338 assert(N_INVALID == get_mark(h->b->head[old_idx])); 2339 2339 2340 2340 if (&sentinel != get_next(h->b->head[old_idx])) 2341 2341 h->b->head[old_idx] = make_link(&sentinel, N_INVALID); 2342 2342 } 2343 2343 } 2344 2344 2345 2345 /* todo comment join node vs reset joinee head*/ 2346 2346 rcu_synchronize(); 2347 2347 2348 2348 size_t new_bucket_cnt = (1 << h->new_b->order); 2349 2349 2350 2350 /* Clear the JOIN mark and GC any deleted join nodes. */ 2351 2351 for (size_t new_idx = 0; new_idx < new_bucket_cnt; ++new_idx) { … … 2355 2355 /* Wait for everyone to see that the table is clear of any resize marks. */ 2356 2356 rcu_synchronize(); 2357 2357 2358 2358 cht_buckets_t *old_b = h->b; 2359 2359 rcu_assign(h->b, h->new_b); 2360 2360 2361 2361 /* Wait for everyone to start using the new table. */ 2362 2362 rcu_synchronize(); 2363 2363 2364 2364 free(old_b); 2365 2365 2366 2366 /* Not needed; just for increased readability. */ 2367 2367 h->new_b = NULL; … … 2374 2374 2375 2375 cht_link_t *cur = get_next(*new_head); 2376 2376 2377 2377 while (cur != &sentinel) { 2378 2378 /* Clear the join node's JN mark - even if it is marked as deleted. */ … … 2381 2381 break; 2382 2382 } 2383 2383 2384 2384 cur = get_next(cur->link); 2385 2385 } 2386 2386 2387 2387 rcu_read_unlock(); 2388 2388 } … … 2394 2394 assert(join_node != &sentinel); 2395 2395 assert(join_node && (N_JOIN & get_mark(join_node->link))); 2396 2396 2397 2397 bool done; 2398 2398 2399 2399 /* Clear the JN mark. */ 2400 2400 do { … … 2411 2411 assert(ret == jn_link || (get_mark(ret) & N_JOIN)); 2412 2412 } while (!done); 2413 2413 2414 2414 if (!(N_DELETED & get_mark(join_node->link))) 2415 2415 return; … … 2419 2419 /* Clear the JOIN mark before trying to unlink the deleted join node.*/ 2420 2420 cas_order_barrier(); 2421 2421 2422 2422 size_t jn_hash = node_hash(h, join_node); 2423 2423 do { 2424 2424 bool resizing = false; 2425 2425 2426 2426 wnd_t wnd = { 2427 2427 .ppred = new_head, 2428 2428 .cur = get_next(*new_head) 2429 2429 }; 2430 2430 2431 2431 done = find_wnd_and_gc_pred(h, jn_hash, WM_NORMAL, same_node_pred, 2432 2432 join_node, &wnd, &resizing); 2433 2433 2434 2434 assert(!resizing); 2435 2435 } while (!done); … … 2440 2440 { 2441 2441 assert(new_head); 2442 2442 2443 2443 rcu_read_lock(); 2444 2444 … … 2448 2448 }; 2449 2449 marked_ptr_t *cur_link = new_head; 2450 2450 2451 2451 /* 2452 2452 * Find the non-deleted node with a JF mark and clear the JF mark. … … 2461 2461 while (true) { 2462 2462 bool is_jf_node = N_JOIN_FOLLOWS & get_mark(*cur_link); 2463 2463 2464 2464 /* GC any deleted nodes on the way - even deleted JOIN_FOLLOWS. */ 2465 2465 if (N_DELETED & get_mark(*cur_link)) { … … 2483 2483 marked_ptr_t ret = 2484 2484 cas_link(cur_link, next, N_JOIN_FOLLOWS, &sentinel, N_NORMAL); 2485 2485 2486 2486 assert(next == &sentinel 2487 2487 || ((N_JOIN | N_JOIN_FOLLOWS) & get_mark(ret))); … … 2508 2508 cur_link = &wnd.cur->link; 2509 2509 } 2510 2510 2511 2511 rcu_read_unlock(); 2512 2512 } … … 2561 2561 || item->hash == sentinel.hash 2562 2562 || item->hash == calc_node_hash(h, item)); 2563 2563 2564 2564 return item->hash; 2565 2565 } … … 2586 2586 { 2587 2587 marked_ptr_t ptr = (marked_ptr_t) next; 2588 2588 2589 2589 assert(!(ptr & N_MARK_MASK)); 2590 2590 assert(!((unsigned)mark & ~N_MARK_MASK)); 2591 2591 2592 2592 return ptr | mark; 2593 2593 } … … 2690 2690 */ 2691 2691 void *expected = (void*)cur; 2692 2692 2693 2693 /* 2694 2694 * Use the acquire-release model, although we could probably … … 2698 2698 __atomic_compare_exchange_n((void**)link, &expected, (void *)new, false, 2699 2699 __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE); 2700 2700 2701 2701 return (marked_ptr_t) expected; 2702 2702 }
Note:
See TracChangeset
for help on using the changeset viewer.