Changeset a35b458 in mainline for kernel/generic/src/adt
- Timestamp:
- 2018-03-02T20:10:49Z (7 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- f1380b7
- Parents:
- 3061bc1
- git-author:
- Jiří Zárevúcky <zarevucky.jiri@…> (2018-02-28 17:38:31)
- git-committer:
- Jiří Zárevúcky <zarevucky.jiri@…> (2018-03-02 20:10:49)
- Location:
- kernel/generic/src/adt
- Files:
-
- 6 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/adt/avl.c
r3061bc1 ra35b458 66 66 { 67 67 avltree_node_t *p; 68 68 69 69 /* 70 70 * Iteratively descend to the leaf that can contain the searched key. … … 92 92 { 93 93 avltree_node_t *p = t->root; 94 94 95 95 /* 96 96 * Check whether the tree is empty. … … 98 98 if (!p) 99 99 return NULL; 100 100 101 101 /* 102 102 * Iteratively descend to the leftmost leaf in the tree. … … 104 104 while (p->lft != NULL) 105 105 p = p->lft; 106 106 107 107 return p; 108 108 } … … 151 151 #define REBALANCE_INSERT_LR() REBALANCE_INSERT_XY(lft, rgt, 1) 152 152 #define REBALANCE_INSERT_RL() REBALANCE_INSERT_XY(rgt, lft, -1) 153 153 154 154 /** Insert new node into AVL tree. 155 155 * … … 172 172 */ 173 173 key = newnode->key + t->base; 174 174 175 175 /* 176 176 * Iteratively descend to the leaf that can contain the new node. … … 244 244 } 245 245 } 246 246 247 247 /* 248 248 * To balance the tree, we must check and balance top node. … … 260 260 */ 261 261 assert(par->balance == 1); 262 262 263 263 REBALANCE_INSERT_LR(); 264 264 } … … 275 275 */ 276 276 assert(par->balance == -1); 277 277 278 278 REBALANCE_INSERT_RL(); 279 279 } … … 375 375 assert(t); 376 376 assert(node); 377 377 378 378 if (node->lft == NULL) { 379 379 if (node->rgt) { … … 451 451 cur->par = node->par; 452 452 } 453 453 454 454 /* 455 455 * Repair the parent node's pointer which pointed previously to the … … 457 457 */ 458 458 (void) repair(t, node, node, cur, NULL, false); 459 459 460 460 /* 461 461 * Repair cycle which repairs balances of nodes on the way from from the … … 484 484 * RL rotation. 485 485 */ 486 486 487 487 cur = par->lft; 488 488 par->lft = cur->rgt; … … 490 490 gpa->rgt = cur->lft; 491 491 cur->lft = gpa; 492 492 493 493 /* 494 494 * Repair balances and paternity of … … 497 497 */ 498 498 REBALANCE_DELETE_RL(); 499 499 500 500 /* 501 501 * Repair paternity. … … 513 513 * RR rotation. 514 514 */ 515 515 516 516 gpa->rgt = par->lft; 517 517 if (par->lft) 518 518 par->lft->par = gpa; 519 519 par->lft = gpa; 520 520 521 521 /* 522 522 * Repair paternity. … … 524 524 par->par = gpa->par; 525 525 gpa->par = par; 526 526 527 527 if (par->balance == 0) { 528 528 /* … … 575 575 */ 576 576 par = gpa->lft; 577 577 578 578 if (par->balance == 1) { 579 579 /* 580 580 * LR rotation. 581 581 */ 582 582 583 583 cur = par->rgt; 584 584 par->rgt = cur->lft; … … 586 586 gpa->lft = cur->rgt; 587 587 cur->rgt = gpa; 588 588 589 589 /* 590 590 * Repair balances and paternity of … … 619 619 par->par = gpa->par; 620 620 gpa->par = par; 621 621 622 622 if (par->balance == 0) { 623 623 /* … … 630 630 par->balance = 1; 631 631 gpa->balance = -1; 632 632 633 633 (void) repair(t, par, gpa, par, 634 634 NULL, false); … … 637 637 par->balance = 0; 638 638 gpa->balance = 0; 639 639 640 640 if (!repair(t, par, gpa, par, 641 641 &dir, false)) … … 667 667 { 668 668 avltree_node_t *node; 669 669 670 670 /* 671 671 * Start searching for the smallest key in the tree starting in the root … … 673 673 * must have the smallest key). 674 674 */ 675 675 676 676 node = t->root; 677 677 if (!node) 678 678 return false; 679 679 680 680 while (node->lft != NULL) 681 681 node = node->lft; 682 682 683 683 avltree_delete(t, node); 684 684 -
kernel/generic/src/adt/bitmap.c
r3061bc1 ra35b458 62 62 size_t byte = element / BITMAP_ELEMENT; 63 63 uint8_t mask = 1 << (element & BITMAP_REMAINER); 64 64 65 65 return !!((bitmap->bits)[byte] & mask); 66 66 } … … 78 78 { 79 79 size_t size = elements / BITMAP_ELEMENT; 80 80 81 81 if ((elements % BITMAP_ELEMENT) != 0) 82 82 size++; 83 83 84 84 return size; 85 85 } … … 113 113 { 114 114 assert(start + count <= bitmap->elements); 115 115 116 116 if (count == 0) 117 117 return; 118 118 119 119 size_t start_byte = start / BITMAP_ELEMENT; 120 120 size_t aligned_start = ALIGN_UP(start, BITMAP_ELEMENT); 121 121 122 122 /* Leading unaligned bits */ 123 123 size_t lub = min(aligned_start - start, count); 124 124 125 125 /* Aligned middle bits */ 126 126 size_t amb = (count > lub) ? (count - lub) : 0; 127 127 128 128 /* Trailing aligned bits */ 129 129 size_t tab = amb % BITMAP_ELEMENT; 130 130 131 131 if (start + count < aligned_start) { 132 132 /* Set bits in the middle of byte. */ … … 135 135 return; 136 136 } 137 137 138 138 if (lub) { 139 139 /* Make sure to set any leading unaligned bits. */ … … 141 141 ~((1 << (BITMAP_ELEMENT - lub)) - 1); 142 142 } 143 143 144 144 size_t i; 145 145 146 146 for (i = 0; i < amb / BITMAP_ELEMENT; i++) { 147 147 /* The middle bits can be set byte by byte. */ … … 149 149 ALL_ONES; 150 150 } 151 151 152 152 if (tab) { 153 153 /* Make sure to set any trailing aligned bits. */ … … 167 167 { 168 168 assert(start + count <= bitmap->elements); 169 169 170 170 if (count == 0) 171 171 return; 172 172 173 173 size_t start_byte = start / BITMAP_ELEMENT; 174 174 size_t aligned_start = ALIGN_UP(start, BITMAP_ELEMENT); 175 175 176 176 /* Leading unaligned bits */ 177 177 size_t lub = min(aligned_start - start, count); 178 178 179 179 /* Aligned middle bits */ 180 180 size_t amb = (count > lub) ? (count - lub) : 0; 181 181 182 182 /* Trailing aligned bits */ 183 183 size_t tab = amb % BITMAP_ELEMENT; 184 184 185 185 if (start + count < aligned_start) { 186 186 /* Set bits in the middle of byte */ … … 189 189 return; 190 190 } 191 191 192 192 if (lub) { 193 193 /* Make sure to clear any leading unaligned bits. */ … … 195 195 (1 << (BITMAP_ELEMENT - lub)) - 1; 196 196 } 197 197 198 198 size_t i; 199 199 200 200 for (i = 0; i < amb / BITMAP_ELEMENT; i++) { 201 201 /* The middle bits can be cleared byte by byte. */ … … 203 203 ALL_ZEROES; 204 204 } 205 205 206 206 if (tab) { 207 207 /* Make sure to clear any trailing aligned bits. */ … … 209 209 ~((1 << tab) - 1); 210 210 } 211 211 212 212 bitmap->next_fit = start_byte; 213 213 } … … 224 224 assert(count <= dst->elements); 225 225 assert(count <= src->elements); 226 226 227 227 size_t i; 228 228 229 229 for (i = 0; i < count / BITMAP_ELEMENT; i++) 230 230 dst->bits[i] = src->bits[i]; 231 231 232 232 if (count % BITMAP_ELEMENT) { 233 233 bitmap_clear_range(dst, i * BITMAP_ELEMENT, … … 274 274 if (count == 0) 275 275 return false; 276 276 277 277 size_t size = bitmap_size(bitmap->elements); 278 278 size_t next_fit = bitmap->next_fit; 279 279 280 280 /* 281 281 * Adjust the next-fit value according to the address … … 284 284 if ((prefered > base) && (prefered < base + bitmap->elements)) { 285 285 size_t prefered_fit = (prefered - base) / BITMAP_ELEMENT; 286 286 287 287 if (prefered_fit > next_fit) 288 288 next_fit = prefered_fit; 289 289 } 290 290 291 291 for (size_t pos = 0; pos < size; pos++) { 292 292 size_t byte = (next_fit + pos) % size; 293 293 294 294 /* Skip if the current byte has all bits set */ 295 295 if (bitmap->bits[byte] == ALL_ONES) 296 296 continue; 297 297 298 298 size_t byte_bit = byte * BITMAP_ELEMENT; 299 299 300 300 for (size_t bit = 0; bit < BITMAP_ELEMENT; bit++) { 301 301 size_t i = byte_bit + bit; 302 302 303 303 if (i >= bitmap->elements) 304 304 break; 305 305 306 306 if (!constraint_satisfy(i, base, constraint)) 307 307 continue; 308 308 309 309 if (!bitmap_get_fast(bitmap, i)) { 310 310 size_t continuous = 1; 311 311 312 312 for (size_t j = 1; j < count; j++) { 313 313 if ((i + j < bitmap->elements) && … … 317 317 break; 318 318 } 319 319 320 320 if (continuous == count) { 321 321 if (index != NULL) { … … 324 324 *index = i; 325 325 } 326 326 327 327 return true; 328 328 } else … … 331 331 } 332 332 } 333 333 334 334 return false; 335 335 } -
kernel/generic/src/adt/btree.c
r3061bc1 ra35b458 83 83 { 84 84 unsigned int i; 85 85 86 86 node->keys = 0; 87 87 88 88 /* Clean also space for the extra key. */ 89 89 for (i = 0; i < BTREE_MAX_KEYS + 1; i++) { … … 92 92 node->subtree[i] = NULL; 93 93 } 94 94 95 95 node->subtree[i] = NULL; 96 96 node->parent = NULL; 97 97 98 98 link_initialize(&node->leaf_link); 99 99 link_initialize(&node->bfs_link); … … 122 122 { 123 123 size_t i; 124 124 125 125 if (root->keys) { 126 126 for (i = 0; i < root->keys + 1; i++) { … … 129 129 } 130 130 } 131 131 132 132 slab_free(btree_node_cache, root); 133 133 } … … 156 156 { 157 157 size_t i; 158 158 159 159 for (i = 0; i < node->keys; i++) { 160 160 if (key < node->key[i]) { 161 161 size_t j; 162 162 163 163 for (j = node->keys; j > i; j--) { 164 164 node->key[j] = node->key[j - 1]; … … 166 166 node->subtree[j + 1] = node->subtree[j]; 167 167 } 168 168 169 169 break; 170 170 } 171 171 } 172 172 173 173 node->key[i] = key; 174 174 node->value[i] = value; … … 191 191 { 192 192 size_t i; 193 193 194 194 for (i = 0; i < node->keys + 1; i++) { 195 195 if (subtree == node->subtree[i]) 196 196 return i - (int) (right != false); 197 197 } 198 198 199 199 panic("Node %p does not contain subtree %p.", node, subtree); 200 200 } … … 215 215 size_t i; 216 216 size_t j; 217 217 218 218 for (i = 0; i < node->keys; i++) { 219 219 if (key == node->key[i]) { … … 223 223 node->subtree[j - 1] = node->subtree[j]; 224 224 } 225 225 226 226 node->subtree[j - 1] = node->subtree[j]; 227 227 node->keys--; 228 228 229 229 return; 230 230 } 231 231 } 232 232 233 233 panic("Node %p does not contain key %" PRIu64 ".", node, key); 234 234 } … … 248 248 { 249 249 size_t i, j; 250 250 251 251 for (i = 0; i < node->keys; i++) { 252 252 if (key == node->key[i]) { … … 256 256 node->subtree[j] = node->subtree[j + 1]; 257 257 } 258 258 259 259 node->keys--; 260 260 return; 261 261 } 262 262 } 263 263 264 264 panic("Node %p does not contain key %" PRIu64 ".", node, key); 265 265 } … … 280 280 { 281 281 size_t i; 282 282 283 283 for (i = 0; i < node->keys; i++) { 284 284 if (key < node->key[i]) { 285 285 size_t j; 286 286 287 287 for (j = node->keys; j > i; j--) { 288 288 node->key[j] = node->key[j - 1]; … … 290 290 node->subtree[j + 1] = node->subtree[j]; 291 291 } 292 292 293 293 node->subtree[j + 1] = node->subtree[j]; 294 294 break; 295 295 } 296 296 } 297 297 298 298 node->key[i] = key; 299 299 node->value[i] = value; 300 300 node->subtree[i] = lsubtree; 301 301 302 302 node->keys++; 303 303 } … … 320 320 { 321 321 btree_key_t key = lnode->key[lnode->keys - 1]; 322 322 323 323 if (LEAF_NODE(lnode)) { 324 324 void *value = lnode->value[lnode->keys - 1]; 325 325 326 326 node_remove_key_and_rsubtree(lnode, key); 327 327 node_insert_key_and_lsubtree(rnode, key, value, NULL); … … 329 329 } else { 330 330 btree_node_t *rsubtree = lnode->subtree[lnode->keys]; 331 331 332 332 node_remove_key_and_rsubtree(lnode, key); 333 333 node_insert_key_and_lsubtree(rnode, lnode->parent->key[idx], NULL, rsubtree); 334 334 lnode->parent->key[idx] = key; 335 335 336 336 /* Fix parent link of the reconnected right subtree. */ 337 337 rsubtree->parent = rnode; … … 356 356 { 357 357 btree_key_t key = rnode->key[0]; 358 358 359 359 if (LEAF_NODE(rnode)) { 360 360 void *value = rnode->value[0]; 361 361 362 362 node_remove_key_and_lsubtree(rnode, key); 363 363 node_insert_key_and_rsubtree(lnode, key, value, NULL); … … 365 365 } else { 366 366 btree_node_t *lsubtree = rnode->subtree[0]; 367 367 368 368 node_remove_key_and_lsubtree(rnode, key); 369 369 node_insert_key_and_rsubtree(lnode, rnode->parent->key[idx], NULL, lsubtree); 370 370 rnode->parent->key[idx] = key; 371 371 372 372 /* Fix parent link of the reconnected left subtree. */ 373 373 lsubtree->parent = lnode; … … 395 395 size_t idx; 396 396 btree_node_t *lnode; 397 397 398 398 /* 399 399 * If this is root node, the rotation can not be done. … … 401 401 if (ROOT_NODE(node)) 402 402 return false; 403 403 404 404 idx = find_key_by_subtree(node->parent, node, true); 405 405 if ((int) idx == -1) { … … 410 410 return false; 411 411 } 412 412 413 413 lnode = node->parent->subtree[idx]; 414 414 if (lnode->keys < BTREE_MAX_KEYS) { … … 420 420 return true; 421 421 } 422 422 423 423 return false; 424 424 } … … 444 444 size_t idx; 445 445 btree_node_t *rnode; 446 446 447 447 /* 448 448 * If this is root node, the rotation can not be done. … … 450 450 if (ROOT_NODE(node)) 451 451 return false; 452 452 453 453 idx = find_key_by_subtree(node->parent, node, false); 454 454 if (idx == node->parent->keys) { … … 459 459 return false; 460 460 } 461 461 462 462 rnode = node->parent->subtree[idx + 1]; 463 463 if (rnode->keys < BTREE_MAX_KEYS) { … … 469 469 return true; 470 470 } 471 471 472 472 return false; 473 473 } … … 499 499 size_t i; 500 500 size_t j; 501 501 502 502 assert(median); 503 503 assert(node->keys == BTREE_MAX_KEYS); 504 504 505 505 /* 506 506 * Use the extra space to store the extra node. 507 507 */ 508 508 node_insert_key_and_rsubtree(node, key, value, rsubtree); 509 509 510 510 /* 511 511 * Compute median of keys. 512 512 */ 513 513 *median = MEDIAN_HIGH(node); 514 514 515 515 /* 516 516 * Allocate and initialize new right sibling. … … 520 520 rnode->parent = node->parent; 521 521 rnode->depth = node->depth; 522 522 523 523 /* 524 524 * Copy big keys, values and subtree pointers to the new right sibling. … … 530 530 rnode->value[j] = node->value[i]; 531 531 rnode->subtree[j] = node->subtree[i]; 532 532 533 533 /* 534 534 * Fix parent links in subtrees. … … 537 537 rnode->subtree[j]->parent = rnode; 538 538 } 539 539 540 540 rnode->subtree[j] = node->subtree[i]; 541 541 if (rnode->subtree[j]) 542 542 rnode->subtree[j]->parent = rnode; 543 543 544 544 rnode->keys = j; /* Set number of keys of the new node. */ 545 545 node->keys /= 2; /* Shrink the old node. */ 546 546 547 547 return rnode; 548 548 } … … 578 578 btree_node_t *rnode; 579 579 btree_key_t median; 580 580 581 581 /* 582 582 * Node is full and both siblings (if both exist) are full too. … … 584 584 * bigger keys (i.e. the new node) into its parent. 585 585 */ 586 586 587 587 rnode = node_split(node, key, value, rsubtree, &median); 588 588 589 589 if (LEAF_NODE(node)) { 590 590 list_insert_after(&rnode->leaf_link, &node->leaf_link); 591 591 } 592 592 593 593 if (ROOT_NODE(node)) { 594 594 /* … … 599 599 rnode->parent = t->root; 600 600 node_initialize(t->root); 601 601 602 602 /* 603 603 * Left-hand side subtree will be the old root (i.e. node). … … 605 605 */ 606 606 t->root->subtree[0] = node; 607 607 608 608 t->root->depth = node->depth + 1; 609 609 } … … 624 624 { 625 625 btree_node_t *lnode; 626 626 627 627 assert(value); 628 628 629 629 lnode = leaf_node; 630 630 if (!lnode) { … … 632 632 panic("B-tree %p already contains key %" PRIu64 ".", t, key); 633 633 } 634 634 635 635 _btree_insert(t, key, value, NULL, lnode); 636 636 } … … 648 648 size_t idx; 649 649 btree_node_t *lnode; 650 650 651 651 /* 652 652 * If this is root node, the rotation can not be done. … … 654 654 if (ROOT_NODE(rnode)) 655 655 return false; 656 656 657 657 idx = find_key_by_subtree(rnode->parent, rnode, true); 658 658 if ((int) idx == -1) { … … 663 663 return false; 664 664 } 665 665 666 666 lnode = rnode->parent->subtree[idx]; 667 667 if (lnode->keys > FILL_FACTOR) { … … 669 669 return true; 670 670 } 671 671 672 672 return false; 673 673 } … … 685 685 size_t idx; 686 686 btree_node_t *rnode; 687 687 688 688 /* 689 689 * If this is root node, the rotation can not be done. … … 691 691 if (ROOT_NODE(lnode)) 692 692 return false; 693 693 694 694 idx = find_key_by_subtree(lnode->parent, lnode, false); 695 695 if (idx == lnode->parent->keys) { … … 700 700 return false; 701 701 } 702 702 703 703 rnode = lnode->parent->subtree[idx + 1]; 704 704 if (rnode->keys > FILL_FACTOR) { … … 706 706 return true; 707 707 } 708 708 709 709 return false; 710 710 } … … 724 724 btree_node_t *rnode; 725 725 size_t i; 726 726 727 727 assert(!ROOT_NODE(node)); 728 728 729 729 idx = find_key_by_subtree(node->parent, node, false); 730 730 if (idx == node->parent->keys) { … … 737 737 } else 738 738 rnode = node->parent->subtree[idx + 1]; 739 739 740 740 /* Index nodes need to insert parent node key in between left and right node. */ 741 741 if (INDEX_NODE(node)) 742 742 node->key[node->keys++] = node->parent->key[idx]; 743 743 744 744 /* Copy the key-value-subtree triplets from the right node. */ 745 745 for (i = 0; i < rnode->keys; i++) { 746 746 node->key[node->keys + i] = rnode->key[i]; 747 747 node->value[node->keys + i] = rnode->value[i]; 748 748 749 749 if (INDEX_NODE(node)) { 750 750 node->subtree[node->keys + i] = rnode->subtree[i]; … … 752 752 } 753 753 } 754 754 755 755 if (INDEX_NODE(node)) { 756 756 node->subtree[node->keys + i] = rnode->subtree[i]; 757 757 rnode->subtree[i]->parent = node; 758 758 } 759 759 760 760 node->keys += rnode->keys; 761 761 return rnode; … … 789 789 node_remove_key_and_rsubtree(node, key); 790 790 } 791 791 792 792 return; 793 793 } 794 794 795 795 if (node->keys <= FILL_FACTOR) { 796 796 /* … … 801 801 try_rotation_from_right(node); 802 802 } 803 803 804 804 if (node->keys > FILL_FACTOR) { 805 805 size_t i; 806 806 807 807 /* 808 808 * The key can be immediately removed. … … 813 813 */ 814 814 node_remove_key_and_rsubtree(node, key); 815 815 816 816 for (i = 0; i < node->parent->keys; i++) { 817 817 if (node->parent->key[i] == key) … … 822 822 btree_node_t *rnode; 823 823 btree_node_t *parent; 824 824 825 825 /* 826 826 * The node is below the fill factor as well as its left and right sibling. … … 832 832 node_remove_key_and_rsubtree(node, key); 833 833 rnode = node_combine(node); 834 834 835 835 if (LEAF_NODE(rnode)) 836 836 list_remove(&rnode->leaf_link); 837 837 838 838 idx = find_key_by_subtree(parent, rnode, true); 839 839 assert((int) idx != -1); … … 855 855 { 856 856 btree_node_t *lnode; 857 857 858 858 lnode = leaf_node; 859 859 if (!lnode) { … … 861 861 panic("B-tree %p does not contain key %" PRIu64 ".", t, key); 862 862 } 863 863 864 864 _btree_remove(t, key, lnode); 865 865 } … … 877 877 { 878 878 btree_node_t *cur, *next; 879 879 880 880 /* 881 881 * Iteratively descend to the leaf that can contain the searched key. … … 887 887 */ 888 888 *leaf_node = cur; 889 889 890 890 if (cur->keys == 0) 891 891 return NULL; … … 901 901 void *val; 902 902 size_t i; 903 903 904 904 /* 905 905 * Now if the key is smaller than cur->key[i] … … 911 911 next = cur->subtree[i]; 912 912 val = cur->value[i - 1]; 913 913 914 914 if (LEAF_NODE(cur)) 915 915 return key == cur->key[i - 1] ? val : NULL; 916 916 917 917 goto descend; 918 918 } 919 919 } 920 920 921 921 /* 922 922 * Last possibility is that the key is … … 925 925 next = cur->subtree[i]; 926 926 val = cur->value[i - 1]; 927 927 928 928 if (LEAF_NODE(cur)) 929 929 return key == cur->key[i - 1] ? val : NULL; … … 932 932 ; 933 933 } 934 934 935 935 /* 936 936 * The key was not found in the *leaf_node and … … 952 952 { 953 953 assert(LEAF_NODE(node)); 954 954 955 955 if (node->leaf_link.prev != &t->leaf_list.head) 956 956 return list_get_instance(node->leaf_link.prev, btree_node_t, leaf_link); … … 971 971 { 972 972 assert(LEAF_NODE(node)); 973 973 974 974 if (node->leaf_link.next != &t->leaf_list.head) 975 975 return list_get_instance(node->leaf_link.next, btree_node_t, leaf_link); … … 988 988 int depth = t->root->depth; 989 989 list_t list; 990 990 991 991 printf("Printing B-tree:\n"); 992 992 list_initialize(&list); 993 993 list_append(&t->root->bfs_link, &list); 994 994 995 995 /* 996 996 * Use BFS search to print out the tree. … … 1000 1000 link_t *hlp; 1001 1001 btree_node_t *node; 1002 1002 1003 1003 hlp = list_first(&list); 1004 1004 assert(hlp != NULL); 1005 1005 node = list_get_instance(hlp, btree_node_t, bfs_link); 1006 1006 list_remove(hlp); 1007 1007 1008 1008 assert(node); 1009 1009 1010 1010 if (node->depth != depth) { 1011 1011 printf("\n"); 1012 1012 depth = node->depth; 1013 1013 } 1014 1014 1015 1015 printf("("); 1016 1016 1017 1017 for (i = 0; i < node->keys; i++) { 1018 1018 printf("%" PRIu64 "%s", node->key[i], i < node->keys - 1 ? "," : ""); … … 1021 1021 } 1022 1022 } 1023 1023 1024 1024 if (node->depth && node->subtree[i]) 1025 1025 list_append(&node->subtree[i]->bfs_link, &list); 1026 1026 1027 1027 printf(")"); 1028 1028 } 1029 1029 1030 1030 printf("\n"); 1031 1031 1032 1032 printf("Printing list of leaves:\n"); 1033 1033 list_foreach(t->leaf_list, leaf_link, btree_node_t, node) { 1034 1034 assert(node); 1035 1035 1036 1036 printf("("); 1037 1037 1038 1038 for (i = 0; i < node->keys; i++) 1039 1039 printf("%" PRIu64 "%s", node->key[i], i < node->keys - 1 ? "," : ""); 1040 1040 1041 1041 printf(")"); 1042 1042 } 1043 1043 1044 1044 printf("\n"); 1045 1045 } -
kernel/generic/src/adt/cht.c
r3061bc1 ra35b458 531 531 if (!op || !op->hash || !op->key_hash || !op->equal || !op->key_equal) 532 532 return false; 533 533 534 534 size_t min_order = size_to_order(min_size, CHT_MIN_ORDER); 535 535 size_t order = size_to_order(init_size, min_order); 536 536 537 537 h->b = alloc_buckets(order, false, can_block); 538 538 539 539 if (!h->b) 540 540 return false; 541 541 542 542 h->max_load = (max_load == 0) ? CHT_MAX_LOAD : max_load; 543 543 h->min_order = min_order; … … 546 546 atomic_set(&h->item_cnt, 0); 547 547 atomic_set(&h->resize_reqs, 0); 548 548 549 549 if (NULL == op->remove_callback) { 550 550 h->op->remove_callback = dummy_remove_callback; 551 551 } 552 552 553 553 /* 554 554 * Cached item hashes are stored in item->rcu_link.func. Once the item … … 556 556 */ 557 557 h->invalid_hash = (uintptr_t)h->op->remove_callback; 558 558 559 559 /* Ensure the initialization takes place before we start using the table. */ 560 560 write_barrier(); 561 561 562 562 return true; 563 563 } … … 581 581 sizeof(cht_buckets_t) + (bucket_cnt - 1) * sizeof(marked_ptr_t); 582 582 cht_buckets_t *b = malloc(bytes, can_block ? 0 : FRAME_ATOMIC); 583 583 584 584 if (!b) 585 585 return NULL; 586 586 587 587 b->order = order; 588 588 589 589 marked_ptr_t head_link = set_invalid 590 590 ? make_link(&sentinel, N_INVALID) 591 591 : make_link(&sentinel, N_NORMAL); 592 592 593 593 for (size_t i = 0; i < bucket_cnt; ++i) { 594 594 b->head[i] = head_link; 595 595 } 596 596 597 597 return b; 598 598 } … … 607 607 if (bucket_cnt <= ((size_t)1 << order)) 608 608 return order; 609 609 610 610 ++order; 611 611 } while (order < CHT_MAX_ORDER); 612 612 613 613 return order; 614 614 } … … 623 623 { 624 624 cht_destroy_unsafe(h); 625 625 626 626 /* You must clear the table of items. Otherwise cht_destroy will leak. */ 627 627 assert(atomic_get(&h->item_cnt) == 0); … … 635 635 rcu_barrier(); 636 636 } 637 637 638 638 /* Wait for all remove_callback()s to complete. */ 639 639 rcu_barrier(); 640 640 641 641 free(h->b); 642 642 h->b = NULL; … … 688 688 /* See docs to cht_find() and cht_find_lazy(). */ 689 689 assert(rcu_read_locked()); 690 690 691 691 size_t hash = calc_key_hash(h, key); 692 692 693 693 cht_buckets_t *b = rcu_access(h->b); 694 694 size_t idx = calc_bucket_idx(hash, b->order); … … 698 698 */ 699 699 marked_ptr_t head = b->head[idx]; 700 700 701 701 /* Undergoing a resize - take the slow path. */ 702 702 if (N_INVALID == get_mark(head)) 703 703 return find_resizing(h, key, hash, head, idx); 704 704 705 705 return search_bucket(h, head, key, hash); 706 706 } … … 734 734 assert(rcu_read_locked()); 735 735 assert(item); 736 736 737 737 return find_duplicate(h, item, calc_node_hash(h, item), get_next(item->link)); 738 738 } … … 758 758 prev = cur->link; 759 759 } while (node_hash(h, cur) < search_hash); 760 760 761 761 /* 762 762 * Only search for an item with an equal key if cur is not the sentinel … … 768 768 return cur; 769 769 } 770 770 771 771 cur = get_next(cur->link); 772 772 assert(cur); 773 773 } 774 774 775 775 /* 776 776 * In the unlikely case that we have encountered a node whose cached … … 782 782 goto try_again; 783 783 } 784 784 785 785 return NULL; 786 786 } … … 792 792 assert(N_INVALID == get_mark(old_head)); 793 793 assert(h->new_b); 794 794 795 795 size_t new_idx = calc_bucket_idx(hash, h->new_b->order); 796 796 marked_ptr_t new_head = h->new_b->head[new_idx]; 797 797 marked_ptr_t search_head = new_head; 798 798 799 799 /* Growing. */ 800 800 if (h->b->order < h->new_b->order) { … … 818 818 new_head = h->new_b->head[grow_idx(old_idx)]; 819 819 } 820 820 821 821 /* new_head is now the moved bucket, either valid or invalid. */ 822 822 823 823 /* 824 824 * The old bucket was definitely moved to new_head but the … … 845 845 } 846 846 } 847 847 848 848 return search_bucket(h, search_head, key, hash); 849 849 } else if (h->b->order > h->new_b->order) { 850 850 /* Shrinking. */ 851 851 852 852 /* Index of the bucket in the old table that was moved. */ 853 853 size_t move_src_idx = grow_idx(new_idx); 854 854 marked_ptr_t moved_old_head = h->b->head[move_src_idx]; 855 855 856 856 /* 857 857 * h->b->head[move_src_idx] had already been moved to new_head … … 883 883 search_head = moved_old_head; 884 884 } 885 885 886 886 cht_link_t *ret = search_bucket(h, search_head, key, hash); 887 887 888 888 if (ret) 889 889 return ret; … … 906 906 return search_bucket(h, old_head, key, hash); 907 907 } 908 908 909 909 return NULL; 910 910 } else { … … 979 979 bool resizing = false; 980 980 bool inserted = false; 981 981 982 982 do { 983 983 walk_mode_t walk_mode = WM_NORMAL; 984 984 bool join_finishing; 985 985 986 986 resizing = resizing || (N_NORMAL != get_mark(*phead)); 987 987 988 988 /* The table is resizing. Get the correct bucket head. */ 989 989 if (resizing) { 990 990 upd_resizing_head(h, hash, &phead, &join_finishing, &walk_mode); 991 991 } 992 992 993 993 wnd_t wnd = { 994 994 .ppred = phead, … … 996 996 .last = NULL 997 997 }; 998 998 999 999 if (!find_wnd_and_gc(h, hash, walk_mode, &wnd, &resizing)) { 1000 1000 /* Could not GC a node; or detected an unexpected resize. */ 1001 1001 continue; 1002 1002 } 1003 1003 1004 1004 if (dup_item && has_duplicate(h, item, hash, wnd.cur, dup_item)) { 1005 1005 rcu_read_unlock(); 1006 1006 return false; 1007 1007 } 1008 1008 1009 1009 inserted = insert_at(item, &wnd, walk_mode, &resizing); 1010 1010 } while (!inserted); 1011 1011 1012 1012 rcu_read_unlock(); 1013 1013 … … 1032 1032 { 1033 1033 marked_ptr_t ret; 1034 1034 1035 1035 if (walk_mode == WM_NORMAL) { 1036 1036 item->link = make_link(wnd->cur, N_NORMAL); 1037 1037 /* Initialize the item before adding it to a bucket. */ 1038 1038 memory_barrier(); 1039 1039 1040 1040 /* Link a clean/normal predecessor to the item. */ 1041 1041 ret = cas_link(wnd->ppred, wnd->cur, N_NORMAL, item, N_NORMAL); 1042 1042 1043 1043 if (ret == make_link(wnd->cur, N_NORMAL)) { 1044 1044 return true; … … 1054 1054 /* Initialize the item before adding it to a bucket. */ 1055 1055 memory_barrier(); 1056 1056 1057 1057 /* Link the not-deleted predecessor to the item. Move its JF mark. */ 1058 1058 ret = cas_link(wnd->ppred, wnd->cur, jf_mark, item, N_NORMAL); 1059 1059 1060 1060 return ret == make_link(wnd->cur, jf_mark); 1061 1061 } else { … … 1065 1065 /* Initialize the item before adding it to a bucket. */ 1066 1066 memory_barrier(); 1067 1067 1068 1068 mark_t pred_mark = get_mark(*wnd->ppred); 1069 1069 /* If the predecessor is a join node it may be marked deleted.*/ … … 1090 1090 assert(cur == &sentinel || hash <= node_hash(h, cur) 1091 1091 || node_hash(h, cur) == h->invalid_hash); 1092 1092 1093 1093 /* hash < node_hash(h, cur) */ 1094 1094 if (hash != node_hash(h, cur) && h->invalid_hash != node_hash(h, cur)) … … 1101 1101 */ 1102 1102 read_barrier(); 1103 1103 1104 1104 *dup_item = find_duplicate(h, item, hash, cur); 1105 1105 return NULL != *dup_item; … … 1113 1113 1114 1114 cht_link_t *cur = start; 1115 1115 1116 1116 try_again: 1117 1117 assert(cur); … … 1119 1119 while (node_hash(h, cur) == hash) { 1120 1120 assert(cur != &sentinel); 1121 1121 1122 1122 bool deleted = (N_DELETED & get_mark(cur->link)); 1123 1123 1124 1124 /* Skip logically deleted nodes. */ 1125 1125 if (!deleted && h->op->equal(item, cur)) 1126 1126 return cur; 1127 1127 1128 1128 cur = get_next(cur->link); 1129 1129 assert(cur); … … 1135 1135 goto try_again; 1136 1136 } 1137 1137 1138 1138 return NULL; 1139 1139 } … … 1143 1143 { 1144 1144 assert(h); 1145 1145 1146 1146 size_t hash = calc_key_hash(h, key); 1147 1147 size_t removed = 0; 1148 1148 1149 1149 while (remove_pred(h, hash, h->op->key_equal, key)) 1150 1150 ++removed; 1151 1151 1152 1152 return removed; 1153 1153 } … … 1181 1181 { 1182 1182 rcu_read_lock(); 1183 1183 1184 1184 bool resizing = false; 1185 1185 bool deleted = false; 1186 1186 bool deleted_but_gc = false; 1187 1187 1188 1188 cht_buckets_t *b = rcu_access(h->b); 1189 1189 size_t idx = calc_bucket_idx(hash, b->order); 1190 1190 marked_ptr_t *phead = &b->head[idx]; 1191 1191 1192 1192 do { 1193 1193 walk_mode_t walk_mode = WM_NORMAL; 1194 1194 bool join_finishing = false; 1195 1195 1196 1196 resizing = resizing || (N_NORMAL != get_mark(*phead)); 1197 1197 1198 1198 /* The table is resizing. Get the correct bucket head. */ 1199 1199 if (resizing) { 1200 1200 upd_resizing_head(h, hash, &phead, &join_finishing, &walk_mode); 1201 1201 } 1202 1202 1203 1203 wnd_t wnd = { 1204 1204 .ppred = phead, … … 1206 1206 .last = NULL 1207 1207 }; 1208 1208 1209 1209 if (!find_wnd_and_gc_pred( 1210 1210 h, hash, walk_mode, pred, pred_arg, &wnd, &resizing)) { … … 1212 1212 continue; 1213 1213 } 1214 1214 1215 1215 /* 1216 1216 * The item lookup is affected by a bucket join but effects of … … 1225 1225 continue; 1226 1226 } 1227 1227 1228 1228 /* Already deleted, but delete_at() requested one GC pass. */ 1229 1229 if (deleted_but_gc) 1230 1230 break; 1231 1231 1232 1232 bool found = (wnd.cur != &sentinel && pred(pred_arg, wnd.cur)); 1233 1233 1234 1234 if (!found) { 1235 1235 rcu_read_unlock(); 1236 1236 return false; 1237 1237 } 1238 1238 1239 1239 deleted = delete_at(h, &wnd, walk_mode, &deleted_but_gc, &resizing); 1240 1240 } while (!deleted || deleted_but_gc); 1241 1241 1242 1242 rcu_read_unlock(); 1243 1243 return true; … … 1263 1263 { 1264 1264 assert(wnd->cur && wnd->cur != &sentinel); 1265 1265 1266 1266 *deleted_but_gc = false; 1267 1267 1268 1268 if (!mark_deleted(wnd->cur, walk_mode, resizing)) { 1269 1269 /* Already deleted, or unexpectedly marked as JOIN/JOIN_FOLLOWS. */ 1270 1270 return false; 1271 1271 } 1272 1272 1273 1273 /* Marked deleted. Unlink from the bucket. */ 1274 1274 1275 1275 /* Never unlink join nodes. */ 1276 1276 if (walk_mode == WM_LEAVE_JOIN && (N_JOIN & get_mark(wnd->cur->link))) 1277 1277 return true; 1278 1278 1279 1279 cas_order_barrier(); 1280 1280 1281 1281 if (unlink_from_pred(wnd, walk_mode, resizing)) { 1282 1282 free_later(h, wnd->cur); … … 1284 1284 *deleted_but_gc = true; 1285 1285 } 1286 1286 1287 1287 return true; 1288 1288 } … … 1293 1293 { 1294 1294 assert(cur && cur != &sentinel); 1295 1295 1296 1296 /* 1297 1297 * Btw, we could loop here if the cas fails but let's not complicate 1298 1298 * things and let's retry from the head of the bucket. 1299 1299 */ 1300 1300 1301 1301 cht_link_t *next = get_next(cur->link); 1302 1302 1303 1303 if (walk_mode == WM_NORMAL) { 1304 1304 /* Only mark clean/normal nodes - JF/JN is used only during resize. */ 1305 1305 marked_ptr_t ret = cas_link(&cur->link, next, N_NORMAL, next, N_DELETED); 1306 1306 1307 1307 if (ret != make_link(next, N_NORMAL)) { 1308 1308 *resizing = (N_JOIN | N_JOIN_FOLLOWS) & get_mark(ret); … … 1311 1311 } else { 1312 1312 static_assert(N_JOIN == N_JOIN_FOLLOWS, ""); 1313 1313 1314 1314 /* Keep the N_JOIN/N_JOIN_FOLLOWS mark but strip N_DELETED. */ 1315 1315 mark_t cur_mark = get_mark(cur->link) & N_JOIN_FOLLOWS; 1316 1316 1317 1317 marked_ptr_t ret = 1318 1318 cas_link(&cur->link, next, cur_mark, next, cur_mark | N_DELETED); 1319 1319 1320 1320 if (ret != make_link(next, cur_mark)) 1321 1321 return false; 1322 1322 } 1323 1323 1324 1324 return true; 1325 1325 } … … 1331 1331 assert(wnd->cur != &sentinel); 1332 1332 assert(wnd->cur && (N_DELETED & get_mark(wnd->cur->link))); 1333 1333 1334 1334 cht_link_t *next = get_next(wnd->cur->link); 1335 1335 1336 1336 if (walk_mode == WM_LEAVE_JOIN) { 1337 1337 /* Never try to unlink join nodes. */ … … 1341 1341 /* Succeed only if the predecessor is clean/normal or a join node. */ 1342 1342 mark_t exp_pred_mark = (N_JOIN & pred_mark) ? pred_mark : N_NORMAL; 1343 1343 1344 1344 marked_ptr_t pred_link = make_link(wnd->cur, exp_pred_mark); 1345 1345 marked_ptr_t next_link = make_link(next, exp_pred_mark); 1346 1346 1347 1347 if (pred_link != _cas_link(wnd->ppred, pred_link, next_link)) 1348 1348 return false; … … 1351 1351 /* Move the JF mark if set. Clear DEL mark. */ 1352 1352 mark_t cur_mark = N_JOIN_FOLLOWS & get_mark(wnd->cur->link); 1353 1353 1354 1354 /* The predecessor must be clean/normal. */ 1355 1355 marked_ptr_t pred_link = make_link(wnd->cur, N_NORMAL); 1356 1356 /* Link to cur's successor keeping/copying cur's JF mark. */ 1357 1357 marked_ptr_t next_link = make_link(next, cur_mark); 1358 1358 1359 1359 marked_ptr_t ret = _cas_link(wnd->ppred, pred_link, next_link); 1360 1360 1361 1361 if (pred_link != ret) { 1362 1362 /* If we're not resizing the table there are no JF/JN nodes. */ … … 1366 1366 } 1367 1367 } 1368 1368 1369 1369 return true; 1370 1370 } … … 1399 1399 { 1400 1400 assert(wnd->cur); 1401 1401 1402 1402 if (wnd->cur == &sentinel) 1403 1403 return true; 1404 1404 1405 1405 /* 1406 1406 * A read barrier is not needed here to bring up the most recent … … 1408 1408 * an already deleted node; fail in delete_at(); and retry. 1409 1409 */ 1410 1410 1411 1411 size_t cur_hash; 1412 1412 1413 1413 try_again: 1414 1414 cur_hash = node_hash(h, wnd->cur); 1415 1415 1416 1416 while (cur_hash <= hash) { 1417 1417 assert(wnd->cur && wnd->cur != &sentinel); 1418 1418 1419 1419 /* GC any deleted nodes on the way. */ 1420 1420 if (N_DELETED & get_mark(wnd->cur->link)) { … … 1427 1427 if (cur_hash == hash && pred(pred_arg, wnd->cur)) 1428 1428 return true; 1429 1429 1430 1430 next_wnd(wnd); 1431 1431 } 1432 1432 1433 1433 cur_hash = node_hash(h, wnd->cur); 1434 1434 } 1435 1435 1436 1436 if (cur_hash == h->invalid_hash) { 1437 1437 next_wnd(wnd); … … 1439 1439 goto try_again; 1440 1440 } 1441 1441 1442 1442 /* The searched for node is not in the current bucket. */ 1443 1443 return true; … … 1481 1481 next_wnd(wnd); 1482 1482 } 1483 1483 1484 1484 assert(wnd->cur); 1485 1485 } 1486 1486 1487 1487 if (node_hash(h, wnd->cur) == h->invalid_hash) { 1488 1488 next_wnd(wnd); … … 1520 1520 wnd->cur = get_next(wnd->cur->link); 1521 1521 } 1522 1522 1523 1523 return true; 1524 1524 } … … 1539 1539 * is visible and if not, make it visible to this cpu. 1540 1540 */ 1541 1541 1542 1542 /* 1543 1543 * Resizer ensures h->b->order stays the same for the duration of this … … 1548 1548 assert(h->b->order > h->new_b->order); 1549 1549 assert(wnd->cur); 1550 1550 1551 1551 /* Either we did not need the joining link or we have already followed it.*/ 1552 1552 if (wnd->cur != &sentinel) 1553 1553 return true; 1554 1554 1555 1555 /* We have reached the end of a bucket. */ 1556 1556 1557 1557 if (wnd->last != &sentinel) { 1558 1558 size_t last_seen_hash = node_hash(h, wnd->last); 1559 1559 1560 1560 if (last_seen_hash == h->invalid_hash) { 1561 1561 last_seen_hash = calc_node_hash(h, wnd->last); 1562 1562 } 1563 1563 1564 1564 size_t last_old_idx = calc_bucket_idx(last_seen_hash, h->b->order); 1565 1565 size_t move_src_idx = grow_idx(shrink_idx(last_old_idx)); 1566 1566 1567 1567 /* 1568 1568 * Last node seen was in the joining bucket - if the searched … … 1572 1572 return true; 1573 1573 } 1574 1574 1575 1575 /* 1576 1576 * Reached the end of the bucket but no nodes from the joining bucket … … 1603 1603 size_t old_idx = calc_bucket_idx(hash, b->order); 1604 1604 size_t new_idx = calc_bucket_idx(hash, h->new_b->order); 1605 1605 1606 1606 marked_ptr_t *pold_head = &b->head[old_idx]; 1607 1607 marked_ptr_t *pnew_head = &h->new_b->head[new_idx]; 1608 1608 1609 1609 /* In any case, use the bucket in the new table. */ 1610 1610 *phead = pnew_head; … … 1614 1614 size_t move_dest_idx = grow_idx(old_idx); 1615 1615 marked_ptr_t *pmoved_head = &h->new_b->head[move_dest_idx]; 1616 1616 1617 1617 /* Complete moving the bucket from the old to the new table. */ 1618 1618 help_head_move(pold_head, pmoved_head); 1619 1619 1620 1620 /* The hash belongs to the moved bucket. */ 1621 1621 if (move_dest_idx == new_idx) { … … 1634 1634 * half of the split/old/moved bucket. 1635 1635 */ 1636 1636 1637 1637 /* The moved bucket has not yet been split. */ 1638 1638 if (N_NORMAL != get_mark(*pnew_head)) { … … 1645 1645 assert(N_NORMAL == get_mark(*pnew_head)); 1646 1646 } 1647 1647 1648 1648 *walk_mode = WM_LEAVE_JOIN; 1649 1649 } 1650 1650 } else if (h->new_b->order < b->order ) { 1651 1651 /* Shrinking the table. */ 1652 1652 1653 1653 size_t move_src_idx = grow_idx(new_idx); 1654 1654 1655 1655 /* 1656 1656 * Complete moving the bucket from the old to the new table. … … 1658 1658 */ 1659 1659 help_head_move(&b->head[move_src_idx], pnew_head); 1660 1660 1661 1661 /* Hash belongs to the bucket to be joined with the moved bucket. */ 1662 1662 if (move_src_idx != old_idx) { … … 1666 1666 join_buckets(h, pold_head, pnew_head, split_hash); 1667 1667 } 1668 1668 1669 1669 /* 1670 1670 * The resizer sets pold_head to &sentinel when all cpus are … … 1673 1673 *join_finishing = (&sentinel != get_next(*pold_head)); 1674 1674 } 1675 1675 1676 1676 /* move_head() or join_buckets() makes it so or makes the mark visible.*/ 1677 1677 assert(N_INVALID == get_mark(*pold_head)); … … 1713 1713 /* Head move has to in progress already when calling this func. */ 1714 1714 assert(N_CONST & get_mark(*psrc_head)); 1715 1715 1716 1716 /* Head already moved. */ 1717 1717 if (N_INVALID == get_mark(*psrc_head)) { … … 1724 1724 complete_head_move(psrc_head, pdest_head); 1725 1725 } 1726 1726 1727 1727 assert(!(N_CONST & get_mark(*pdest_head))); 1728 1728 } … … 1742 1742 { 1743 1743 marked_ptr_t ret, src_link; 1744 1744 1745 1745 /* Mark src head immutable. */ 1746 1746 do { 1747 1747 cht_link_t *next = get_next(*psrc_head); 1748 1748 src_link = make_link(next, N_NORMAL); 1749 1749 1750 1750 /* Mark the normal/clean src link immutable/const. */ 1751 1751 ret = cas_link(psrc_head, next, N_NORMAL, next, N_CONST); … … 1758 1758 assert(N_JOIN_FOLLOWS != get_mark(*psrc_head)); 1759 1759 assert(N_CONST & get_mark(*psrc_head)); 1760 1760 1761 1761 cht_link_t *next = get_next(*psrc_head); 1762 1762 … … 1765 1765 assert(ret == make_link(&sentinel, N_INVALID) || (N_NORMAL == get_mark(ret))); 1766 1766 cas_order_barrier(); 1767 1767 1768 1768 DBG(ret = ) 1769 1769 cas_link(psrc_head, next, N_CONST, next, N_INVALID); … … 1791 1791 if (N_NORMAL == get_mark(*pdest_head)) 1792 1792 return; 1793 1793 1794 1794 /* 1795 1795 * L == Last node of the first part of the split bucket. That part … … 1836 1836 */ 1837 1837 wnd_t wnd; 1838 1838 1839 1839 rcu_read_lock(); 1840 1840 1841 1841 /* Mark the last node of the first part of the split bucket as JF. */ 1842 1842 mark_join_follows(h, psrc_head, split_hash, &wnd); 1843 1843 cas_order_barrier(); 1844 1844 1845 1845 /* There are nodes in the dest bucket, ie the second part of the split. */ 1846 1846 if (wnd.cur != &sentinel) { … … 1857 1857 */ 1858 1858 } 1859 1859 1860 1860 /* Link the dest head to the second part of the split. */ 1861 1861 DBG(marked_ptr_t ret = ) … … 1863 1863 assert(ret == make_link(&sentinel, N_INVALID) || (N_NORMAL == get_mark(ret))); 1864 1864 cas_order_barrier(); 1865 1865 1866 1866 rcu_read_unlock(); 1867 1867 } … … 1888 1888 { 1889 1889 /* See comment in split_bucket(). */ 1890 1890 1891 1891 bool done = false; 1892 1892 … … 1895 1895 wnd->ppred = psrc_head; 1896 1896 wnd->cur = get_next(*psrc_head); 1897 1897 1898 1898 /* 1899 1899 * Find the split window, ie the last node of the first part of … … 1903 1903 if (!find_wnd_and_gc(h, split_hash, WM_MOVE_JOIN_FOLLOWS, wnd, &resizing)) 1904 1904 continue; 1905 1905 1906 1906 /* Must not report that the table is resizing if WM_MOVE_JOIN_FOLLOWS.*/ 1907 1907 assert(!resizing); … … 1926 1926 { 1927 1927 /* See comment in split_bucket(). */ 1928 1928 1929 1929 bool done; 1930 1930 do { 1931 1931 cht_link_t *next = get_next(join_node->link); 1932 1932 mark_t mark = get_mark(join_node->link); 1933 1933 1934 1934 /* 1935 1935 * May already be marked as deleted, but it won't be unlinked … … 1938 1938 marked_ptr_t ret 1939 1939 = cas_link(&join_node->link, next, mark, next, mark | N_JOIN); 1940 1940 1941 1941 /* Successfully marked or already marked as a join node. */ 1942 1942 done = (ret == make_link(next, mark)) … … 2023 2023 * [src_head | Inv]-----------> [JN] -> .. 2024 2024 */ 2025 2025 2026 2026 rcu_read_lock(); 2027 2027 2028 2028 /* Mark src_head immutable - signals updaters that bucket join started. */ 2029 2029 mark_const(psrc_head); 2030 2030 cas_order_barrier(); 2031 2031 2032 2032 cht_link_t *join_node = get_next(*psrc_head); 2033 2033 … … 2035 2035 mark_join_node(join_node); 2036 2036 cas_order_barrier(); 2037 2037 2038 2038 link_to_join_node(h, pdest_head, join_node, split_hash); 2039 2039 cas_order_barrier(); 2040 2040 } 2041 2041 2042 2042 DBG(marked_ptr_t ret = ) 2043 2043 cas_link(psrc_head, join_node, N_CONST, join_node, N_INVALID); 2044 2044 assert(ret == make_link(join_node, N_CONST) || (N_INVALID == get_mark(ret))); 2045 2045 cas_order_barrier(); 2046 2046 2047 2047 rcu_read_unlock(); 2048 2048 } … … 2067 2067 .cur = get_next(*pdest_head) 2068 2068 }; 2069 2069 2070 2070 bool resizing = false; 2071 2071 2072 2072 if (!find_wnd_and_gc(h, split_hash, WM_LEAVE_JOIN, &wnd, &resizing)) 2073 2073 continue; 2074 2074 2075 2075 assert(!resizing); 2076 2076 2077 2077 if (wnd.cur != &sentinel) { 2078 2078 /* Must be from the new appended bucket. */ … … 2081 2081 return; 2082 2082 } 2083 2083 2084 2084 /* Reached the tail of pdest_head - link it to the join node. */ 2085 2085 marked_ptr_t ret = 2086 2086 cas_link(wnd.ppred, &sentinel, N_NORMAL, join_node, N_NORMAL); 2087 2087 2088 2088 done = (ret == make_link(&sentinel, N_NORMAL)); 2089 2089 } while (!done); … … 2097 2097 { 2098 2098 assert(item != &sentinel); 2099 2099 2100 2100 /* 2101 2101 * remove_callback only works as rcu_func_t because rcu_link is the first … … 2103 2103 */ 2104 2104 rcu_call(&item->rcu_link, (rcu_func_t)h->op->remove_callback); 2105 2105 2106 2106 item_removed(h); 2107 2107 } … … 2116 2116 size_t items = (size_t) atomic_predec(&h->item_cnt); 2117 2117 size_t bucket_cnt = (1 << h->b->order); 2118 2118 2119 2119 bool need_shrink = (items == h->max_load * bucket_cnt / 4); 2120 2120 bool missed_shrink = (items == h->max_load * bucket_cnt / 8); 2121 2121 2122 2122 if ((need_shrink || missed_shrink) && h->b->order > h->min_order) { 2123 2123 atomic_count_t resize_reqs = atomic_preinc(&h->resize_reqs); … … 2137 2137 size_t items = (size_t) atomic_preinc(&h->item_cnt); 2138 2138 size_t bucket_cnt = (1 << h->b->order); 2139 2139 2140 2140 bool need_grow = (items == h->max_load * bucket_cnt); 2141 2141 bool missed_grow = (items == 2 * h->max_load * bucket_cnt); 2142 2142 2143 2143 if ((need_grow || missed_grow) && h->b->order < CHT_MAX_ORDER) { 2144 2144 atomic_count_t resize_reqs = atomic_preinc(&h->resize_reqs); … … 2154 2154 { 2155 2155 cht_t *h = member_to_inst(arg, cht_t, resize_work); 2156 2156 2157 2157 #ifdef CONFIG_DEBUG 2158 2158 assert(h->b); … … 2188 2188 if (h->b->order >= CHT_MAX_ORDER) 2189 2189 return; 2190 2190 2191 2191 h->new_b = alloc_buckets(h->b->order + 1, true, false); 2192 2192 … … 2198 2198 rcu_synchronize(); 2199 2199 size_t old_bucket_cnt = (1 << h->b->order); 2200 2200 2201 2201 /* 2202 2202 * Give updaters a chance to help out with the resize. Do the minimum … … 2206 2206 start_head_move(&h->b->head[idx]); 2207 2207 } 2208 2208 2209 2209 /* Order start_head_move() wrt complete_head_move(). */ 2210 2210 cas_order_barrier(); 2211 2211 2212 2212 /* Complete moving heads and split any buckets not yet split by updaters. */ 2213 2213 for (size_t old_idx = 0; old_idx < old_bucket_cnt; ++old_idx) { … … 2226 2226 split_bucket(h, move_dest_head, split_dest_head, split_hash); 2227 2227 } 2228 2228 2229 2229 /* 2230 2230 * Wait for all updaters to notice the new heads. Once everyone sees … … 2233 2233 */ 2234 2234 rcu_synchronize(); 2235 2235 2236 2236 /* Clear the JOIN_FOLLOWS mark and remove the link between the split buckets.*/ 2237 2237 for (size_t old_idx = 0; old_idx < old_bucket_cnt; ++old_idx) { 2238 2238 size_t new_idx = grow_idx(old_idx); 2239 2239 2240 2240 cleanup_join_follows(h, &h->new_b->head[new_idx]); 2241 2241 } … … 2246 2246 */ 2247 2247 rcu_synchronize(); 2248 2248 2249 2249 /* Clear the JOIN mark and GC any deleted join nodes. */ 2250 2250 for (size_t old_idx = 0; old_idx < old_bucket_cnt; ++old_idx) { 2251 2251 size_t new_idx = grow_to_split_idx(old_idx); 2252 2252 2253 2253 cleanup_join_node(h, &h->new_b->head[new_idx]); 2254 2254 } … … 2256 2256 /* Wait for everyone to see that the table is clear of any resize marks. */ 2257 2257 rcu_synchronize(); 2258 2258 2259 2259 cht_buckets_t *old_b = h->b; 2260 2260 rcu_assign(h->b, h->new_b); … … 2262 2262 /* Wait for everyone to start using the new table. */ 2263 2263 rcu_synchronize(); 2264 2264 2265 2265 free(old_b); 2266 2266 2267 2267 /* Not needed; just for increased readability. */ 2268 2268 h->new_b = NULL; … … 2274 2274 if (h->b->order <= h->min_order) 2275 2275 return; 2276 2276 2277 2277 h->new_b = alloc_buckets(h->b->order - 1, true, false); 2278 2278 … … 2283 2283 /* Wait for all readers and updaters to see the initialized new table. */ 2284 2284 rcu_synchronize(); 2285 2285 2286 2286 size_t old_bucket_cnt = (1 << h->b->order); 2287 2287 2288 2288 /* 2289 2289 * Give updaters a chance to help out with the resize. Do the minimum … … 2292 2292 for (size_t old_idx = 0; old_idx < old_bucket_cnt; ++old_idx) { 2293 2293 size_t new_idx = shrink_idx(old_idx); 2294 2294 2295 2295 /* This bucket should be moved. */ 2296 2296 if (grow_idx(new_idx) == old_idx) { … … 2300 2300 } 2301 2301 } 2302 2302 2303 2303 /* Order start_head_move() wrt to complete_head_move(). */ 2304 2304 cas_order_barrier(); 2305 2305 2306 2306 /* Complete moving heads and join buckets with the moved buckets. */ 2307 2307 for (size_t old_idx = 0; old_idx < old_bucket_cnt; ++old_idx) { 2308 2308 size_t new_idx = shrink_idx(old_idx); 2309 2309 size_t move_src_idx = grow_idx(new_idx); 2310 2310 2311 2311 /* This bucket should be moved. */ 2312 2312 if (move_src_idx == old_idx) { … … 2322 2322 } 2323 2323 } 2324 2324 2325 2325 /* 2326 2326 * Wait for all updaters to notice the new heads. Once everyone sees … … 2329 2329 */ 2330 2330 rcu_synchronize(); 2331 2331 2332 2332 /* Let everyone know joins are complete and fully visible. */ 2333 2333 for (size_t old_idx = 0; old_idx < old_bucket_cnt; ++old_idx) { 2334 2334 size_t move_src_idx = grow_idx(shrink_idx(old_idx)); 2335 2335 2336 2336 /* Set the invalid joinee head to NULL. */ 2337 2337 if (old_idx != move_src_idx) { 2338 2338 assert(N_INVALID == get_mark(h->b->head[old_idx])); 2339 2339 2340 2340 if (&sentinel != get_next(h->b->head[old_idx])) 2341 2341 h->b->head[old_idx] = make_link(&sentinel, N_INVALID); 2342 2342 } 2343 2343 } 2344 2344 2345 2345 /* todo comment join node vs reset joinee head*/ 2346 2346 rcu_synchronize(); 2347 2347 2348 2348 size_t new_bucket_cnt = (1 << h->new_b->order); 2349 2349 2350 2350 /* Clear the JOIN mark and GC any deleted join nodes. */ 2351 2351 for (size_t new_idx = 0; new_idx < new_bucket_cnt; ++new_idx) { … … 2355 2355 /* Wait for everyone to see that the table is clear of any resize marks. */ 2356 2356 rcu_synchronize(); 2357 2357 2358 2358 cht_buckets_t *old_b = h->b; 2359 2359 rcu_assign(h->b, h->new_b); 2360 2360 2361 2361 /* Wait for everyone to start using the new table. */ 2362 2362 rcu_synchronize(); 2363 2363 2364 2364 free(old_b); 2365 2365 2366 2366 /* Not needed; just for increased readability. */ 2367 2367 h->new_b = NULL; … … 2374 2374 2375 2375 cht_link_t *cur = get_next(*new_head); 2376 2376 2377 2377 while (cur != &sentinel) { 2378 2378 /* Clear the join node's JN mark - even if it is marked as deleted. */ … … 2381 2381 break; 2382 2382 } 2383 2383 2384 2384 cur = get_next(cur->link); 2385 2385 } 2386 2386 2387 2387 rcu_read_unlock(); 2388 2388 } … … 2394 2394 assert(join_node != &sentinel); 2395 2395 assert(join_node && (N_JOIN & get_mark(join_node->link))); 2396 2396 2397 2397 bool done; 2398 2398 2399 2399 /* Clear the JN mark. */ 2400 2400 do { … … 2411 2411 assert(ret == jn_link || (get_mark(ret) & N_JOIN)); 2412 2412 } while (!done); 2413 2413 2414 2414 if (!(N_DELETED & get_mark(join_node->link))) 2415 2415 return; … … 2419 2419 /* Clear the JOIN mark before trying to unlink the deleted join node.*/ 2420 2420 cas_order_barrier(); 2421 2421 2422 2422 size_t jn_hash = node_hash(h, join_node); 2423 2423 do { 2424 2424 bool resizing = false; 2425 2425 2426 2426 wnd_t wnd = { 2427 2427 .ppred = new_head, 2428 2428 .cur = get_next(*new_head) 2429 2429 }; 2430 2430 2431 2431 done = find_wnd_and_gc_pred(h, jn_hash, WM_NORMAL, same_node_pred, 2432 2432 join_node, &wnd, &resizing); 2433 2433 2434 2434 assert(!resizing); 2435 2435 } while (!done); … … 2440 2440 { 2441 2441 assert(new_head); 2442 2442 2443 2443 rcu_read_lock(); 2444 2444 … … 2448 2448 }; 2449 2449 marked_ptr_t *cur_link = new_head; 2450 2450 2451 2451 /* 2452 2452 * Find the non-deleted node with a JF mark and clear the JF mark. … … 2461 2461 while (true) { 2462 2462 bool is_jf_node = N_JOIN_FOLLOWS & get_mark(*cur_link); 2463 2463 2464 2464 /* GC any deleted nodes on the way - even deleted JOIN_FOLLOWS. */ 2465 2465 if (N_DELETED & get_mark(*cur_link)) { … … 2483 2483 marked_ptr_t ret = 2484 2484 cas_link(cur_link, next, N_JOIN_FOLLOWS, &sentinel, N_NORMAL); 2485 2485 2486 2486 assert(next == &sentinel 2487 2487 || ((N_JOIN | N_JOIN_FOLLOWS) & get_mark(ret))); … … 2508 2508 cur_link = &wnd.cur->link; 2509 2509 } 2510 2510 2511 2511 rcu_read_unlock(); 2512 2512 } … … 2561 2561 || item->hash == sentinel.hash 2562 2562 || item->hash == calc_node_hash(h, item)); 2563 2563 2564 2564 return item->hash; 2565 2565 } … … 2586 2586 { 2587 2587 marked_ptr_t ptr = (marked_ptr_t) next; 2588 2588 2589 2589 assert(!(ptr & N_MARK_MASK)); 2590 2590 assert(!((unsigned)mark & ~N_MARK_MASK)); 2591 2591 2592 2592 return ptr | mark; 2593 2593 } … … 2690 2690 */ 2691 2691 void *expected = (void*)cur; 2692 2692 2693 2693 /* 2694 2694 * Use the acquire-release model, although we could probably … … 2698 2698 __atomic_compare_exchange_n((void**)link, &expected, (void *)new, false, 2699 2699 __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE); 2700 2700 2701 2701 return (marked_ptr_t) expected; 2702 2702 } -
kernel/generic/src/adt/hash_table.c
r3061bc1 ra35b458 96 96 assert(h); 97 97 assert(op && op->hash && op->key_hash && op->key_equal); 98 98 99 99 /* Check for compulsory ops. */ 100 100 if (!op || !op->hash || !op->key_hash || !op->key_equal) 101 101 return false; 102 102 103 103 h->bucket_cnt = round_up_size(init_size); 104 104 105 105 if (!alloc_table(h->bucket_cnt, &h->bucket)) 106 106 return false; 107 107 108 108 h->max_load = (max_load == 0) ? HT_MAX_LOAD : max_load; 109 109 h->item_cnt = 0; … … 115 115 h->op->remove_callback = nop_remove_callback; 116 116 } 117 117 118 118 return true; 119 119 } … … 128 128 assert(h && h->bucket); 129 129 assert(!h->apply_ongoing); 130 130 131 131 clear_items(h); 132 132 133 133 free(h->bucket); 134 134 … … 159 159 assert(h && h->bucket); 160 160 assert(!h->apply_ongoing); 161 161 162 162 clear_items(h); 163 163 164 164 /* Shrink the table to its minimum size if possible. */ 165 165 if (HT_MIN_BUCKETS < h->bucket_cnt) { … … 173 173 if (h->item_cnt == 0) 174 174 return; 175 175 176 176 for (size_t idx = 0; idx < h->bucket_cnt; ++idx) { 177 177 list_foreach_safe(h->bucket[idx], cur, next) { 178 178 assert(cur); 179 179 ht_link_t *cur_link = member_to_inst(cur, ht_link_t, link); 180 180 181 181 list_remove(cur); 182 182 h->op->remove_callback(cur_link); 183 183 } 184 184 } 185 185 186 186 h->item_cnt = 0; 187 187 } … … 197 197 assert(h && h->bucket); 198 198 assert(!h->apply_ongoing); 199 199 200 200 size_t idx = h->op->hash(item) % h->bucket_cnt; 201 201 202 202 list_append(&item->link, &h->bucket[idx]); 203 203 ++h->item_cnt; … … 220 220 assert(h->op && h->op->hash && h->op->equal); 221 221 assert(!h->apply_ongoing); 222 222 223 223 size_t idx = h->op->hash(item) % h->bucket_cnt; 224 224 225 225 /* Check for duplicates. */ 226 226 list_foreach(h->bucket[idx], link, ht_link_t, cur_link) { … … 232 232 return false; 233 233 } 234 234 235 235 list_append(&item->link, &h->bucket[idx]); 236 236 ++h->item_cnt; 237 237 grow_if_needed(h); 238 238 239 239 return true; 240 240 } … … 251 251 { 252 252 assert(h && h->bucket); 253 253 254 254 size_t idx = h->op->key_hash(key) % h->bucket_cnt; 255 255 … … 264 264 } 265 265 } 266 266 267 267 return NULL; 268 268 } … … 305 305 assert(h && h->bucket); 306 306 assert(!h->apply_ongoing); 307 307 308 308 size_t idx = h->op->key_hash(key) % h->bucket_cnt; 309 309 310 310 size_t removed = 0; 311 311 312 312 list_foreach_safe(h->bucket[idx], cur, next) { 313 313 ht_link_t *cur_link = member_to_inst(cur, ht_link_t, link); 314 314 315 315 if (h->op->key_equal(key, cur_link)) { 316 316 ++removed; … … 322 322 h->item_cnt -= removed; 323 323 shrink_if_needed(h); 324 324 325 325 return removed; 326 326 } … … 352 352 assert(f); 353 353 assert(h && h->bucket); 354 354 355 355 if (h->item_cnt == 0) 356 356 return; 357 357 358 358 h->apply_ongoing = true; 359 359 360 360 for (size_t idx = 0; idx < h->bucket_cnt; ++idx) { 361 361 list_foreach_safe(h->bucket[idx], cur, next) { … … 371 371 out: 372 372 h->apply_ongoing = false; 373 373 374 374 shrink_if_needed(h); 375 375 grow_if_needed(h); … … 380 380 { 381 381 size_t rounded_size = HT_MIN_BUCKETS; 382 382 383 383 while (rounded_size < size) { 384 384 rounded_size = 2 * rounded_size + 1; 385 385 } 386 386 387 387 return rounded_size; 388 388 } … … 392 392 { 393 393 assert(pbuckets && HT_MIN_BUCKETS <= bucket_cnt); 394 394 395 395 list_t *buckets = malloc(bucket_cnt * sizeof(list_t), FRAME_ATOMIC); 396 396 if (!buckets) 397 397 return false; 398 398 399 399 for (size_t i = 0; i < bucket_cnt; i++) 400 400 list_initialize(&buckets[i]); … … 434 434 assert(h && h->bucket); 435 435 assert(HT_MIN_BUCKETS <= new_bucket_cnt); 436 436 437 437 /* We are traversing the table and resizing would mess up the buckets. */ 438 438 if (h->apply_ongoing) 439 439 return; 440 440 441 441 list_t *new_buckets; 442 442 … … 444 444 if (!alloc_table(new_bucket_cnt, &new_buckets)) 445 445 return; 446 446 447 447 if (0 < h->item_cnt) { 448 448 /* Rehash all the items to the new table. */ … … 457 457 } 458 458 } 459 459 460 460 free(h->bucket); 461 461 h->bucket = new_buckets; -
kernel/generic/src/adt/list.c
r3061bc1 ra35b458 56 56 bool found = false; 57 57 link_t *hlp = list->head.next; 58 58 59 59 while (hlp != &list->head) { 60 60 if (hlp == link) { … … 64 64 hlp = hlp->next; 65 65 } 66 66 67 67 return found; 68 68 } … … 80 80 if (list_empty(list)) 81 81 return; 82 82 83 83 /* Attach list to destination. */ 84 84 list->head.next->prev = pos; 85 85 list->head.prev->next = pos->next; 86 86 87 87 /* Link destination list to the added list. */ 88 88 pos->next->prev = list->head.prev; 89 89 pos->next = list->head.next; 90 90 91 91 list_initialize(list); 92 92 } … … 102 102 { 103 103 unsigned long count = 0; 104 104 105 105 link_t *link = list_first(list); 106 106 while (link != NULL) { … … 108 108 link = list_next(link, list); 109 109 } 110 110 111 111 return count; 112 112 }
Note:
See TracChangeset
for help on using the changeset viewer.