Changeset cefb126 in mainline for kernel/generic/src
- Timestamp:
- 2010-07-02T14:19:30Z (15 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 89c57b6
- Parents:
- fe7abd0 (diff), e3ee9b9 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Location:
- kernel/generic/src
- Files:
-
- 1 deleted
- 24 edited
- 2 moved
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/adt/btree.c
rfe7abd0 rcefb126 33 33 /** 34 34 * @file 35 * @brief 35 * @brief B+tree implementation. 36 36 * 37 37 * This file implements B+tree type and operations. … … 54 54 #include <print.h> 55 55 56 static void btree_destroy_subtree(btree_node_t *root);57 static void _btree_insert(btree_t *t, btree_key_t key, void *value, btree_node_t *rsubtree, btree_node_t *node);58 static void _btree_remove(btree_t *t, btree_key_t key, btree_node_t *node);59 static void node_initialize(btree_node_t *node);60 static void node_insert_key_and_lsubtree(btree_node_t *node, btree_key_t key, void *value, btree_node_t *lsubtree);61 static void node_insert_key_and_rsubtree(btree_node_t *node, btree_key_t key, void *value, btree_node_t *rsubtree);62 static void node_remove_key_and_lsubtree(btree_node_t *node, btree_key_t key);63 static void node_remove_key_and_rsubtree(btree_node_t *node, btree_key_t key);64 static btree_node_t *node_split(btree_node_t *node, btree_key_t key, void *value, btree_node_t *rsubtree, btree_key_t *median);65 static btree_node_t *node_combine(btree_node_t *node);66 static size_t find_key_by_subtree(btree_node_t *node, btree_node_t *subtree, bool right);67 static void rotate_from_right(btree_node_t *lnode, btree_node_t *rnode, size_t idx);68 static void rotate_from_left(btree_node_t *lnode, btree_node_t *rnode, size_t idx);69 static bool try_insert_by_rotation_to_left(btree_node_t *node, btree_key_t key, void *value, btree_node_t *rsubtree);70 static bool try_insert_by_rotation_to_right(btree_node_t *node, btree_key_t key, void *value, btree_node_t *rsubtree);71 static bool try_rotation_from_left(btree_node_t *rnode);72 static bool try_rotation_from_right(btree_node_t *lnode);73 74 #define ROOT_NODE(n) (!(n)->parent)75 #define INDEX_NODE(n) ((n)->subtree[0] != NULL)76 #define LEAF_NODE(n) ((n)->subtree[0] == NULL)77 78 #define FILL_FACTOR ((BTREE_M-1)/2)79 80 #define MEDIAN_LOW_INDEX(n) (((n)->keys-1)/2)81 #define MEDIAN_HIGH_INDEX(n) ((n)->keys/2)82 #define MEDIAN_LOW(n) ((n)->key[MEDIAN_LOW_INDEX((n))]);83 #define MEDIAN_HIGH(n) ((n)->key[MEDIAN_HIGH_INDEX((n))]);84 85 56 static slab_cache_t *btree_node_slab; 57 58 #define ROOT_NODE(n) (!(n)->parent) 59 #define INDEX_NODE(n) ((n)->subtree[0] != NULL) 60 #define LEAF_NODE(n) ((n)->subtree[0] == NULL) 61 62 #define FILL_FACTOR ((BTREE_M - 1) / 2) 63 64 #define MEDIAN_LOW_INDEX(n) (((n)->keys-1) / 2) 65 #define MEDIAN_HIGH_INDEX(n) ((n)->keys / 2) 66 #define MEDIAN_LOW(n) ((n)->key[MEDIAN_LOW_INDEX((n))]); 67 #define MEDIAN_HIGH(n) ((n)->key[MEDIAN_HIGH_INDEX((n))]); 86 68 87 69 /** Initialize B-trees. */ 88 70 void btree_init(void) 89 71 { 90 btree_node_slab = slab_cache_create("btree_node_slab", sizeof(btree_node_t), 0, NULL, NULL, SLAB_CACHE_MAGDEFERRED); 72 btree_node_slab = slab_cache_create("btree_node_slab", 73 sizeof(btree_node_t), 0, NULL, NULL, SLAB_CACHE_MAGDEFERRED); 74 } 75 76 /** Initialize B-tree node. 77 * 78 * @param node B-tree node. 79 * 80 */ 81 static void node_initialize(btree_node_t *node) 82 { 83 unsigned int i; 84 85 node->keys = 0; 86 87 /* Clean also space for the extra key. */ 88 for (i = 0; i < BTREE_MAX_KEYS + 1; i++) { 89 node->key[i] = 0; 90 node->value[i] = NULL; 91 node->subtree[i] = NULL; 92 } 93 94 node->subtree[i] = NULL; 95 node->parent = NULL; 96 97 link_initialize(&node->leaf_link); 98 link_initialize(&node->bfs_link); 99 node->depth = 0; 91 100 } 92 101 … … 94 103 * 95 104 * @param t B-tree. 105 * 96 106 */ 97 107 void btree_create(btree_t *t) … … 103 113 } 104 114 105 /** Destroy empty B-tree. */106 void btree_destroy(btree_t *t)107 {108 btree_destroy_subtree(t->root);109 }110 111 /** Insert key-value pair into B-tree.112 *113 * @param t B-tree.114 * @param key Key to be inserted.115 * @param value Value to be inserted.116 * @param leaf_node Leaf node where the insertion should begin.117 */118 void btree_insert(btree_t *t, btree_key_t key, void *value, btree_node_t *leaf_node)119 {120 btree_node_t *lnode;121 122 ASSERT(value);123 124 lnode = leaf_node;125 if (!lnode) {126 if (btree_search(t, key, &lnode)) {127 panic("B-tree %p already contains key %" PRIu64 ".", t, key);128 }129 }130 131 _btree_insert(t, key, value, NULL, lnode);132 }133 134 115 /** Destroy subtree rooted in a node. 135 116 * 136 117 * @param root Root of the subtree. 137 */ 138 void btree_destroy_subtree(btree_node_t *root) 118 * 119 */ 120 static void btree_destroy_subtree(btree_node_t *root) 139 121 { 140 122 size_t i; 141 123 142 124 if (root->keys) { 143 125 for (i = 0; i < root->keys + 1; i++) { … … 146 128 } 147 129 } 130 148 131 slab_free(btree_node_slab, root); 149 132 } 150 133 134 /** Destroy empty B-tree. */ 135 void btree_destroy(btree_t *t) 136 { 137 btree_destroy_subtree(t->root); 138 } 139 140 /** Insert key-value-rsubtree triplet into B-tree node. 141 * 142 * It is actually possible to have more keys than BTREE_MAX_KEYS. 143 * This feature is used during splitting the node when the 144 * number of keys is BTREE_MAX_KEYS + 1. Insert by left rotation 145 * also makes use of this feature. 146 * 147 * @param node B-tree node into wich the new key is to be inserted. 148 * @param key The key to be inserted. 149 * @param value Pointer to value to be inserted. 150 * @param rsubtree Pointer to the right subtree. 151 * 152 */ 153 static void node_insert_key_and_rsubtree(btree_node_t *node, btree_key_t key, 154 void *value, btree_node_t *rsubtree) 155 { 156 size_t i; 157 158 for (i = 0; i < node->keys; i++) { 159 if (key < node->key[i]) { 160 size_t j; 161 162 for (j = node->keys; j > i; j--) { 163 node->key[j] = node->key[j - 1]; 164 node->value[j] = node->value[j - 1]; 165 node->subtree[j + 1] = node->subtree[j]; 166 } 167 168 break; 169 } 170 } 171 172 node->key[i] = key; 173 node->value[i] = value; 174 node->subtree[i + 1] = rsubtree; 175 node->keys++; 176 } 177 178 /** Find key by its left or right subtree. 179 * 180 * @param node B-tree node. 181 * @param subtree Left or right subtree of a key found in node. 182 * @param right If true, subtree is a right subtree. If false, 183 * subtree is a left subtree. 184 * 185 * @return Index of the key associated with the subtree. 186 * 187 */ 188 static size_t find_key_by_subtree(btree_node_t *node, btree_node_t *subtree, 189 bool right) 190 { 191 size_t i; 192 193 for (i = 0; i < node->keys + 1; i++) { 194 if (subtree == node->subtree[i]) 195 return i - (int) (right != false); 196 } 197 198 panic("Node %p does not contain subtree %p.", node, subtree); 199 } 200 201 /** Remove key and its left subtree pointer from B-tree node. 202 * 203 * Remove the key and eliminate gaps in node->key array. 204 * Note that the value pointer and the left subtree pointer 205 * is removed from the node as well. 206 * 207 * @param node B-tree node. 208 * @param key Key to be removed. 209 * 210 */ 211 static void node_remove_key_and_lsubtree(btree_node_t *node, btree_key_t key) 212 { 213 size_t i; 214 size_t j; 215 216 for (i = 0; i < node->keys; i++) { 217 if (key == node->key[i]) { 218 for (j = i + 1; j < node->keys; j++) { 219 node->key[j - 1] = node->key[j]; 220 node->value[j - 1] = node->value[j]; 221 node->subtree[j - 1] = node->subtree[j]; 222 } 223 224 node->subtree[j - 1] = node->subtree[j]; 225 node->keys--; 226 227 return; 228 } 229 } 230 231 panic("Node %p does not contain key %" PRIu64 ".", node, key); 232 } 233 234 /** Remove key and its right subtree pointer from B-tree node. 235 * 236 * Remove the key and eliminate gaps in node->key array. 237 * Note that the value pointer and the right subtree pointer 238 * is removed from the node as well. 239 * 240 * @param node B-tree node. 241 * @param key Key to be removed. 242 * 243 */ 244 static void node_remove_key_and_rsubtree(btree_node_t *node, btree_key_t key) 245 { 246 size_t i, j; 247 248 for (i = 0; i < node->keys; i++) { 249 if (key == node->key[i]) { 250 for (j = i + 1; j < node->keys; j++) { 251 node->key[j - 1] = node->key[j]; 252 node->value[j - 1] = node->value[j]; 253 node->subtree[j] = node->subtree[j + 1]; 254 } 255 256 node->keys--; 257 return; 258 } 259 } 260 261 panic("Node %p does not contain key %" PRIu64 ".", node, key); 262 } 263 264 /** Insert key-value-lsubtree triplet into B-tree node. 265 * 266 * It is actually possible to have more keys than BTREE_MAX_KEYS. 267 * This feature is used during insert by right rotation. 268 * 269 * @param node B-tree node into wich the new key is to be inserted. 270 * @param key The key to be inserted. 271 * @param value Pointer to value to be inserted. 272 * @param lsubtree Pointer to the left subtree. 273 * 274 */ 275 static void node_insert_key_and_lsubtree(btree_node_t *node, btree_key_t key, 276 void *value, btree_node_t *lsubtree) 277 { 278 size_t i; 279 280 for (i = 0; i < node->keys; i++) { 281 if (key < node->key[i]) { 282 size_t j; 283 284 for (j = node->keys; j > i; j--) { 285 node->key[j] = node->key[j - 1]; 286 node->value[j] = node->value[j - 1]; 287 node->subtree[j + 1] = node->subtree[j]; 288 } 289 290 node->subtree[j + 1] = node->subtree[j]; 291 break; 292 } 293 } 294 295 node->key[i] = key; 296 node->value[i] = value; 297 node->subtree[i] = lsubtree; 298 299 node->keys++; 300 } 301 302 /** Rotate one key-value-rsubtree triplet from the left sibling to the right sibling. 303 * 304 * The biggest key and its value and right subtree is rotated 305 * from the left node to the right. If the node is an index node, 306 * than the parent node key belonging to the left node takes part 307 * in the rotation. 308 * 309 * @param lnode Left sibling. 310 * @param rnode Right sibling. 311 * @param idx Index of the parent node key that is taking part 312 * in the rotation. 313 * 314 */ 315 static void rotate_from_left(btree_node_t *lnode, btree_node_t *rnode, size_t idx) 316 { 317 btree_key_t key = lnode->key[lnode->keys - 1]; 318 319 if (LEAF_NODE(lnode)) { 320 void *value = lnode->value[lnode->keys - 1]; 321 322 node_remove_key_and_rsubtree(lnode, key); 323 node_insert_key_and_lsubtree(rnode, key, value, NULL); 324 lnode->parent->key[idx] = key; 325 } else { 326 btree_node_t *rsubtree = lnode->subtree[lnode->keys]; 327 328 node_remove_key_and_rsubtree(lnode, key); 329 node_insert_key_and_lsubtree(rnode, lnode->parent->key[idx], NULL, rsubtree); 330 lnode->parent->key[idx] = key; 331 332 /* Fix parent link of the reconnected right subtree. */ 333 rsubtree->parent = rnode; 334 } 335 } 336 337 /** Rotate one key-value-lsubtree triplet from the right sibling to the left sibling. 338 * 339 * The smallest key and its value and left subtree is rotated 340 * from the right node to the left. If the node is an index node, 341 * than the parent node key belonging to the right node takes part 342 * in the rotation. 343 * 344 * @param lnode Left sibling. 345 * @param rnode Right sibling. 346 * @param idx Index of the parent node key that is taking part 347 * in the rotation. 348 * 349 */ 350 static void rotate_from_right(btree_node_t *lnode, btree_node_t *rnode, size_t idx) 351 { 352 btree_key_t key = rnode->key[0]; 353 354 if (LEAF_NODE(rnode)) { 355 void *value = rnode->value[0]; 356 357 node_remove_key_and_lsubtree(rnode, key); 358 node_insert_key_and_rsubtree(lnode, key, value, NULL); 359 rnode->parent->key[idx] = rnode->key[0]; 360 } else { 361 btree_node_t *lsubtree = rnode->subtree[0]; 362 363 node_remove_key_and_lsubtree(rnode, key); 364 node_insert_key_and_rsubtree(lnode, rnode->parent->key[idx], NULL, lsubtree); 365 rnode->parent->key[idx] = key; 366 367 /* Fix parent link of the reconnected left subtree. */ 368 lsubtree->parent = lnode; 369 } 370 } 371 372 /** Insert key-value-rsubtree triplet and rotate the node to the left, if this operation can be done. 373 * 374 * Left sibling of the node (if it exists) is checked for free space. 375 * If there is free space, the key is inserted and the smallest key of 376 * the node is moved there. The index node which is the parent of both 377 * nodes is fixed. 378 * 379 * @param node B-tree node. 380 * @param inskey Key to be inserted. 381 * @param insvalue Value to be inserted. 382 * @param rsubtree Right subtree of inskey. 383 * 384 * @return True if the rotation was performed, false otherwise. 385 * 386 */ 387 static bool try_insert_by_rotation_to_left(btree_node_t *node, 388 btree_key_t inskey, void *insvalue, btree_node_t *rsubtree) 389 { 390 size_t idx; 391 btree_node_t *lnode; 392 393 /* 394 * If this is root node, the rotation can not be done. 395 */ 396 if (ROOT_NODE(node)) 397 return false; 398 399 idx = find_key_by_subtree(node->parent, node, true); 400 if ((int) idx == -1) { 401 /* 402 * If this node is the leftmost subtree of its parent, 403 * the rotation can not be done. 404 */ 405 return false; 406 } 407 408 lnode = node->parent->subtree[idx]; 409 if (lnode->keys < BTREE_MAX_KEYS) { 410 /* 411 * The rotaion can be done. The left sibling has free space. 412 */ 413 node_insert_key_and_rsubtree(node, inskey, insvalue, rsubtree); 414 rotate_from_right(lnode, node, idx); 415 return true; 416 } 417 418 return false; 419 } 420 421 /** Insert key-value-rsubtree triplet and rotate the node to the right, if this operation can be done. 422 * 423 * Right sibling of the node (if it exists) is checked for free space. 424 * If there is free space, the key is inserted and the biggest key of 425 * the node is moved there. The index node which is the parent of both 426 * nodes is fixed. 427 * 428 * @param node B-tree node. 429 * @param inskey Key to be inserted. 430 * @param insvalue Value to be inserted. 431 * @param rsubtree Right subtree of inskey. 432 * 433 * @return True if the rotation was performed, false otherwise. 434 * 435 */ 436 static bool try_insert_by_rotation_to_right(btree_node_t *node, 437 btree_key_t inskey, void *insvalue, btree_node_t *rsubtree) 438 { 439 size_t idx; 440 btree_node_t *rnode; 441 442 /* 443 * If this is root node, the rotation can not be done. 444 */ 445 if (ROOT_NODE(node)) 446 return false; 447 448 idx = find_key_by_subtree(node->parent, node, false); 449 if (idx == node->parent->keys) { 450 /* 451 * If this node is the rightmost subtree of its parent, 452 * the rotation can not be done. 453 */ 454 return false; 455 } 456 457 rnode = node->parent->subtree[idx + 1]; 458 if (rnode->keys < BTREE_MAX_KEYS) { 459 /* 460 * The rotaion can be done. The right sibling has free space. 461 */ 462 node_insert_key_and_rsubtree(node, inskey, insvalue, rsubtree); 463 rotate_from_left(node, rnode, idx); 464 return true; 465 } 466 467 return false; 468 } 469 470 /** Split full B-tree node and insert new key-value-right-subtree triplet. 471 * 472 * This function will split a node and return a pointer to a newly created 473 * node containing keys greater than or equal to the greater of medians 474 * (or median) of the old keys and the newly added key. It will also write 475 * the median key to a memory address supplied by the caller. 476 * 477 * If the node being split is an index node, the median will not be 478 * included in the new node. If the node is a leaf node, 479 * the median will be copied there. 480 * 481 * @param node B-tree node wich is going to be split. 482 * @param key The key to be inserted. 483 * @param value Pointer to the value to be inserted. 484 * @param rsubtree Pointer to the right subtree of the key being added. 485 * @param median Address in memory, where the median key will be stored. 486 * 487 * @return Newly created right sibling of node. 488 * 489 */ 490 static btree_node_t *node_split(btree_node_t *node, btree_key_t key, 491 void *value, btree_node_t *rsubtree, btree_key_t *median) 492 { 493 btree_node_t *rnode; 494 size_t i; 495 size_t j; 496 497 ASSERT(median); 498 ASSERT(node->keys == BTREE_MAX_KEYS); 499 500 /* 501 * Use the extra space to store the extra node. 502 */ 503 node_insert_key_and_rsubtree(node, key, value, rsubtree); 504 505 /* 506 * Compute median of keys. 507 */ 508 *median = MEDIAN_HIGH(node); 509 510 /* 511 * Allocate and initialize new right sibling. 512 */ 513 rnode = (btree_node_t *) slab_alloc(btree_node_slab, 0); 514 node_initialize(rnode); 515 rnode->parent = node->parent; 516 rnode->depth = node->depth; 517 518 /* 519 * Copy big keys, values and subtree pointers to the new right sibling. 520 * If this is an index node, do not copy the median. 521 */ 522 i = (size_t) INDEX_NODE(node); 523 for (i += MEDIAN_HIGH_INDEX(node), j = 0; i < node->keys; i++, j++) { 524 rnode->key[j] = node->key[i]; 525 rnode->value[j] = node->value[i]; 526 rnode->subtree[j] = node->subtree[i]; 527 528 /* 529 * Fix parent links in subtrees. 530 */ 531 if (rnode->subtree[j]) 532 rnode->subtree[j]->parent = rnode; 533 } 534 535 rnode->subtree[j] = node->subtree[i]; 536 if (rnode->subtree[j]) 537 rnode->subtree[j]->parent = rnode; 538 539 rnode->keys = j; /* Set number of keys of the new node. */ 540 node->keys /= 2; /* Shrink the old node. */ 541 542 return rnode; 543 } 544 151 545 /** Recursively insert into B-tree. 152 546 * 153 * @param t B-tree.154 * @param key Key to be inserted.155 * @param value Value to be inserted.547 * @param t B-tree. 548 * @param key Key to be inserted. 549 * @param value Value to be inserted. 156 550 * @param rsubtree Right subtree of the inserted key. 157 * @param node Start inserting into this node. 158 */ 159 void _btree_insert(btree_t *t, btree_key_t key, void *value, btree_node_t *rsubtree, btree_node_t *node) 551 * @param node Start inserting into this node. 552 * 553 */ 554 static void _btree_insert(btree_t *t, btree_key_t key, void *value, 555 btree_node_t *rsubtree, btree_node_t *node) 160 556 { 161 557 if (node->keys < BTREE_MAX_KEYS) { … … 183 579 * bigger keys (i.e. the new node) into its parent. 184 580 */ 185 581 186 582 rnode = node_split(node, key, value, rsubtree, &median); 187 583 188 584 if (LEAF_NODE(node)) { 189 585 list_prepend(&rnode->leaf_link, &node->leaf_link); … … 202 598 * Left-hand side subtree will be the old root (i.e. node). 203 599 * Right-hand side subtree will be rnode. 204 */ 600 */ 205 601 t->root->subtree[0] = node; 206 602 207 603 t->root->depth = node->depth + 1; 208 604 } 209 605 _btree_insert(t, median, NULL, rnode, node->parent); 210 } 211 212 } 213 214 /** Remove B-tree node. 215 * 216 * @param t B-tree. 217 * @param key Key to be removed from the B-tree along with its associated value. 218 * @param leaf_node If not NULL, pointer to the leaf node where the key is found. 219 */ 220 void btree_remove(btree_t *t, btree_key_t key, btree_node_t *leaf_node) 606 } 607 } 608 609 /** Insert key-value pair into B-tree. 610 * 611 * @param t B-tree. 612 * @param key Key to be inserted. 613 * @param value Value to be inserted. 614 * @param leaf_node Leaf node where the insertion should begin. 615 * 616 */ 617 void btree_insert(btree_t *t, btree_key_t key, void *value, 618 btree_node_t *leaf_node) 221 619 { 222 620 btree_node_t *lnode; 621 622 ASSERT(value); 223 623 224 624 lnode = leaf_node; 225 625 if (!lnode) { 226 if (!btree_search(t, key, &lnode)) { 227 panic("B-tree %p does not contain key %" PRIu64 ".", t, key); 228 } 229 } 230 231 _btree_remove(t, key, lnode); 626 if (btree_search(t, key, &lnode)) 627 panic("B-tree %p already contains key %" PRIu64 ".", t, key); 628 } 629 630 _btree_insert(t, key, value, NULL, lnode); 631 } 632 633 /** Rotate in a key from the left sibling or from the index node, if this operation can be done. 634 * 635 * @param rnode Node into which to add key from its left sibling 636 * or from the index node. 637 * 638 * @return True if the rotation was performed, false otherwise. 639 * 640 */ 641 static bool try_rotation_from_left(btree_node_t *rnode) 642 { 643 size_t idx; 644 btree_node_t *lnode; 645 646 /* 647 * If this is root node, the rotation can not be done. 648 */ 649 if (ROOT_NODE(rnode)) 650 return false; 651 652 idx = find_key_by_subtree(rnode->parent, rnode, true); 653 if ((int) idx == -1) { 654 /* 655 * If this node is the leftmost subtree of its parent, 656 * the rotation can not be done. 657 */ 658 return false; 659 } 660 661 lnode = rnode->parent->subtree[idx]; 662 if (lnode->keys > FILL_FACTOR) { 663 rotate_from_left(lnode, rnode, idx); 664 return true; 665 } 666 667 return false; 668 } 669 670 /** Rotate in a key from the right sibling or from the index node, if this operation can be done. 671 * 672 * @param lnode Node into which to add key from its right sibling 673 * or from the index node. 674 * 675 * @return True if the rotation was performed, false otherwise. 676 * 677 */ 678 static bool try_rotation_from_right(btree_node_t *lnode) 679 { 680 size_t idx; 681 btree_node_t *rnode; 682 683 /* 684 * If this is root node, the rotation can not be done. 685 */ 686 if (ROOT_NODE(lnode)) 687 return false; 688 689 idx = find_key_by_subtree(lnode->parent, lnode, false); 690 if (idx == lnode->parent->keys) { 691 /* 692 * If this node is the rightmost subtree of its parent, 693 * the rotation can not be done. 694 */ 695 return false; 696 } 697 698 rnode = lnode->parent->subtree[idx + 1]; 699 if (rnode->keys > FILL_FACTOR) { 700 rotate_from_right(lnode, rnode, idx); 701 return true; 702 } 703 704 return false; 705 } 706 707 /** Combine node with any of its siblings. 708 * 709 * The siblings are required to be below the fill factor. 710 * 711 * @param node Node to combine with one of its siblings. 712 * 713 * @return Pointer to the rightmost of the two nodes. 714 * 715 */ 716 static btree_node_t *node_combine(btree_node_t *node) 717 { 718 size_t idx; 719 btree_node_t *rnode; 720 size_t i; 721 722 ASSERT(!ROOT_NODE(node)); 723 724 idx = find_key_by_subtree(node->parent, node, false); 725 if (idx == node->parent->keys) { 726 /* 727 * Rightmost subtree of its parent, combine with the left sibling. 728 */ 729 idx--; 730 rnode = node; 731 node = node->parent->subtree[idx]; 732 } else 733 rnode = node->parent->subtree[idx + 1]; 734 735 /* Index nodes need to insert parent node key in between left and right node. */ 736 if (INDEX_NODE(node)) 737 node->key[node->keys++] = node->parent->key[idx]; 738 739 /* Copy the key-value-subtree triplets from the right node. */ 740 for (i = 0; i < rnode->keys; i++) { 741 node->key[node->keys + i] = rnode->key[i]; 742 node->value[node->keys + i] = rnode->value[i]; 743 744 if (INDEX_NODE(node)) { 745 node->subtree[node->keys + i] = rnode->subtree[i]; 746 rnode->subtree[i]->parent = node; 747 } 748 } 749 750 if (INDEX_NODE(node)) { 751 node->subtree[node->keys + i] = rnode->subtree[i]; 752 rnode->subtree[i]->parent = node; 753 } 754 755 node->keys += rnode->keys; 756 return rnode; 232 757 } 233 758 234 759 /** Recursively remove B-tree node. 235 760 * 236 * @param t B-tree.237 * @param key Key to be removed from the B-tree along with its associated value.761 * @param t B-tree. 762 * @param key Key to be removed from the B-tree along with its associated value. 238 763 * @param node Node where the key being removed resides. 239 */ 240 void _btree_remove(btree_t *t, btree_key_t key, btree_node_t *node) 764 * 765 */ 766 static void _btree_remove(btree_t *t, btree_key_t key, btree_node_t *node) 241 767 { 242 768 if (ROOT_NODE(node)) { 243 if ( node->keys == 1 && node->subtree[0]) {769 if ((node->keys == 1) && (node->subtree[0])) { 244 770 /* 245 771 * Free the current root and set new root. … … 257 783 node_remove_key_and_rsubtree(node, key); 258 784 } 785 259 786 return; 260 787 } … … 271 798 if (node->keys > FILL_FACTOR) { 272 799 size_t i; 273 800 274 801 /* 275 802 * The key can be immediatelly removed. … … 280 807 */ 281 808 node_remove_key_and_rsubtree(node, key); 809 282 810 for (i = 0; i < node->parent->keys; i++) { 283 811 if (node->parent->key[i] == key) 284 812 node->parent->key[i] = node->key[0]; 285 813 } 286 287 814 } else { 288 815 size_t idx; 289 btree_node_t *rnode, *parent; 290 816 btree_node_t *rnode; 817 btree_node_t *parent; 818 291 819 /* 292 820 * The node is below the fill factor as well as its left and right sibling. … … 298 826 node_remove_key_and_rsubtree(node, key); 299 827 rnode = node_combine(node); 828 300 829 if (LEAF_NODE(rnode)) 301 830 list_remove(&rnode->leaf_link); 831 302 832 idx = find_key_by_subtree(parent, rnode, true); 303 833 ASSERT((int) idx != -1); … … 307 837 } 308 838 839 /** Remove B-tree node. 840 * 841 * @param t B-tree. 842 * @param key Key to be removed from the B-tree along 843 * with its associated value. 844 * @param leaf_node If not NULL, pointer to the leaf node where 845 * the key is found. 846 * 847 */ 848 void btree_remove(btree_t *t, btree_key_t key, btree_node_t *leaf_node) 849 { 850 btree_node_t *lnode; 851 852 lnode = leaf_node; 853 if (!lnode) { 854 if (!btree_search(t, key, &lnode)) 855 panic("B-tree %p does not contain key %" PRIu64 ".", t, key); 856 } 857 858 _btree_remove(t, key, lnode); 859 } 860 309 861 /** Search key in a B-tree. 310 862 * 311 * @param t B-tree.312 * @param key Key to be searched.863 * @param t B-tree. 864 * @param key Key to be searched. 313 865 * @param leaf_node Address where to put pointer to visited leaf node. 314 866 * 315 867 * @return Pointer to value or NULL if there is no such key. 868 * 316 869 */ 317 870 void *btree_search(btree_t *t, btree_key_t key, btree_node_t **leaf_node) … … 323 876 */ 324 877 for (cur = t->root; cur; cur = next) { 325 326 /* Last iteration will set this with proper leaf node address. */ 878 /* 879 * Last iteration will set this with proper 880 * leaf node address. 881 */ 327 882 *leaf_node = cur; 328 883 … … 337 892 void *val; 338 893 size_t i; 339 894 340 895 /* 341 896 * Now if the key is smaller than cur->key[i] … … 347 902 next = cur->subtree[i]; 348 903 val = cur->value[i - 1]; 349 904 350 905 if (LEAF_NODE(cur)) 351 906 return key == cur->key[i - 1] ? val : NULL; 352 907 353 908 goto descend; 354 } 909 } 355 910 } 356 911 357 912 /* 358 * Last possibility is that the key is in the rightmost subtree. 913 * Last possibility is that the key is 914 * in the rightmost subtree. 359 915 */ 360 916 next = cur->subtree[i]; 361 917 val = cur->value[i - 1]; 918 362 919 if (LEAF_NODE(cur)) 363 920 return key == cur->key[i - 1] ? val : NULL; 364 921 } 365 922 descend: 366 367 } 368 923 ; 924 } 925 369 926 /* 370 * The key was not found in the *leaf_node and is smaller than any of its keys. 927 * The key was not found in the *leaf_node and 928 * is smaller than any of its keys. 371 929 */ 372 930 return NULL; … … 375 933 /** Return pointer to B-tree leaf node's left neighbour. 376 934 * 377 * @param t B-tree.935 * @param t B-tree. 378 936 * @param node Node whose left neighbour will be returned. 379 937 * 380 * @return Left neighbour of the node or NULL if the node does not have the left neighbour. 938 * @return Left neighbour of the node or NULL if the node 939 * does not have the left neighbour. 940 * 381 941 */ 382 942 btree_node_t *btree_leaf_node_left_neighbour(btree_t *t, btree_node_t *node) 383 943 { 384 944 ASSERT(LEAF_NODE(node)); 945 385 946 if (node->leaf_link.prev != &t->leaf_head) 386 947 return list_get_instance(node->leaf_link.prev, btree_node_t, leaf_link); … … 391 952 /** Return pointer to B-tree leaf node's right neighbour. 392 953 * 393 * @param t B-tree.954 * @param t B-tree. 394 955 * @param node Node whose right neighbour will be returned. 395 956 * 396 * @return Right neighbour of the node or NULL if the node does not have the right neighbour. 957 * @return Right neighbour of the node or NULL if the node 958 * does not have the right neighbour. 959 * 397 960 */ 398 961 btree_node_t *btree_leaf_node_right_neighbour(btree_t *t, btree_node_t *node) 399 962 { 400 963 ASSERT(LEAF_NODE(node)); 964 401 965 if (node->leaf_link.next != &t->leaf_head) 402 966 return list_get_instance(node->leaf_link.next, btree_node_t, leaf_link); … … 405 969 } 406 970 407 /** Initialize B-tree node.408 *409 * @param node B-tree node.410 */411 void node_initialize(btree_node_t *node)412 {413 int i;414 415 node->keys = 0;416 417 /* Clean also space for the extra key. */418 for (i = 0; i < BTREE_MAX_KEYS + 1; i++) {419 node->key[i] = 0;420 node->value[i] = NULL;421 node->subtree[i] = NULL;422 }423 node->subtree[i] = NULL;424 425 node->parent = NULL;426 427 link_initialize(&node->leaf_link);428 429 link_initialize(&node->bfs_link);430 node->depth = 0;431 }432 433 /** Insert key-value-lsubtree triplet into B-tree node.434 *435 * It is actually possible to have more keys than BTREE_MAX_KEYS.436 * This feature is used during insert by right rotation.437 *438 * @param node B-tree node into wich the new key is to be inserted.439 * @param key The key to be inserted.440 * @param value Pointer to value to be inserted.441 * @param lsubtree Pointer to the left subtree.442 */443 void node_insert_key_and_lsubtree(btree_node_t *node, btree_key_t key, void *value, btree_node_t *lsubtree)444 {445 size_t i;446 447 for (i = 0; i < node->keys; i++) {448 if (key < node->key[i]) {449 size_t j;450 451 for (j = node->keys; j > i; j--) {452 node->key[j] = node->key[j - 1];453 node->value[j] = node->value[j - 1];454 node->subtree[j + 1] = node->subtree[j];455 }456 node->subtree[j + 1] = node->subtree[j];457 break;458 }459 }460 node->key[i] = key;461 node->value[i] = value;462 node->subtree[i] = lsubtree;463 464 node->keys++;465 }466 467 /** Insert key-value-rsubtree triplet into B-tree node.468 *469 * It is actually possible to have more keys than BTREE_MAX_KEYS.470 * This feature is used during splitting the node when the471 * number of keys is BTREE_MAX_KEYS + 1. Insert by left rotation472 * also makes use of this feature.473 *474 * @param node B-tree node into wich the new key is to be inserted.475 * @param key The key to be inserted.476 * @param value Pointer to value to be inserted.477 * @param rsubtree Pointer to the right subtree.478 */479 void node_insert_key_and_rsubtree(btree_node_t *node, btree_key_t key, void *value, btree_node_t *rsubtree)480 {481 size_t i;482 483 for (i = 0; i < node->keys; i++) {484 if (key < node->key[i]) {485 size_t j;486 487 for (j = node->keys; j > i; j--) {488 node->key[j] = node->key[j - 1];489 node->value[j] = node->value[j - 1];490 node->subtree[j + 1] = node->subtree[j];491 }492 break;493 }494 }495 node->key[i] = key;496 node->value[i] = value;497 node->subtree[i + 1] = rsubtree;498 499 node->keys++;500 }501 502 /** Remove key and its left subtree pointer from B-tree node.503 *504 * Remove the key and eliminate gaps in node->key array.505 * Note that the value pointer and the left subtree pointer506 * is removed from the node as well.507 *508 * @param node B-tree node.509 * @param key Key to be removed.510 */511 void node_remove_key_and_lsubtree(btree_node_t *node, btree_key_t key)512 {513 size_t i, j;514 515 for (i = 0; i < node->keys; i++) {516 if (key == node->key[i]) {517 for (j = i + 1; j < node->keys; j++) {518 node->key[j - 1] = node->key[j];519 node->value[j - 1] = node->value[j];520 node->subtree[j - 1] = node->subtree[j];521 }522 node->subtree[j - 1] = node->subtree[j];523 node->keys--;524 return;525 }526 }527 panic("Node %p does not contain key %" PRIu64 ".", node, key);528 }529 530 /** Remove key and its right subtree pointer from B-tree node.531 *532 * Remove the key and eliminate gaps in node->key array.533 * Note that the value pointer and the right subtree pointer534 * is removed from the node as well.535 *536 * @param node B-tree node.537 * @param key Key to be removed.538 */539 void node_remove_key_and_rsubtree(btree_node_t *node, btree_key_t key)540 {541 size_t i, j;542 543 for (i = 0; i < node->keys; i++) {544 if (key == node->key[i]) {545 for (j = i + 1; j < node->keys; j++) {546 node->key[j - 1] = node->key[j];547 node->value[j - 1] = node->value[j];548 node->subtree[j] = node->subtree[j + 1];549 }550 node->keys--;551 return;552 }553 }554 panic("Node %p does not contain key %" PRIu64 ".", node, key);555 }556 557 /** Split full B-tree node and insert new key-value-right-subtree triplet.558 *559 * This function will split a node and return a pointer to a newly created560 * node containing keys greater than or equal to the greater of medians561 * (or median) of the old keys and the newly added key. It will also write562 * the median key to a memory address supplied by the caller.563 *564 * If the node being split is an index node, the median will not be565 * included in the new node. If the node is a leaf node,566 * the median will be copied there.567 *568 * @param node B-tree node wich is going to be split.569 * @param key The key to be inserted.570 * @param value Pointer to the value to be inserted.571 * @param rsubtree Pointer to the right subtree of the key being added.572 * @param median Address in memory, where the median key will be stored.573 *574 * @return Newly created right sibling of node.575 */576 btree_node_t *node_split(btree_node_t *node, btree_key_t key, void *value, btree_node_t *rsubtree, btree_key_t *median)577 {578 btree_node_t *rnode;579 size_t i, j;580 581 ASSERT(median);582 ASSERT(node->keys == BTREE_MAX_KEYS);583 584 /*585 * Use the extra space to store the extra node.586 */587 node_insert_key_and_rsubtree(node, key, value, rsubtree);588 589 /*590 * Compute median of keys.591 */592 *median = MEDIAN_HIGH(node);593 594 /*595 * Allocate and initialize new right sibling.596 */597 rnode = (btree_node_t *) slab_alloc(btree_node_slab, 0);598 node_initialize(rnode);599 rnode->parent = node->parent;600 rnode->depth = node->depth;601 602 /*603 * Copy big keys, values and subtree pointers to the new right sibling.604 * If this is an index node, do not copy the median.605 */606 i = (size_t) INDEX_NODE(node);607 for (i += MEDIAN_HIGH_INDEX(node), j = 0; i < node->keys; i++, j++) {608 rnode->key[j] = node->key[i];609 rnode->value[j] = node->value[i];610 rnode->subtree[j] = node->subtree[i];611 612 /*613 * Fix parent links in subtrees.614 */615 if (rnode->subtree[j])616 rnode->subtree[j]->parent = rnode;617 618 }619 rnode->subtree[j] = node->subtree[i];620 if (rnode->subtree[j])621 rnode->subtree[j]->parent = rnode;622 623 rnode->keys = j; /* Set number of keys of the new node. */624 node->keys /= 2; /* Shrink the old node. */625 626 return rnode;627 }628 629 /** Combine node with any of its siblings.630 *631 * The siblings are required to be below the fill factor.632 *633 * @param node Node to combine with one of its siblings.634 *635 * @return Pointer to the rightmost of the two nodes.636 */637 btree_node_t *node_combine(btree_node_t *node)638 {639 size_t idx;640 btree_node_t *rnode;641 size_t i;642 643 ASSERT(!ROOT_NODE(node));644 645 idx = find_key_by_subtree(node->parent, node, false);646 if (idx == node->parent->keys) {647 /*648 * Rightmost subtree of its parent, combine with the left sibling.649 */650 idx--;651 rnode = node;652 node = node->parent->subtree[idx];653 } else {654 rnode = node->parent->subtree[idx + 1];655 }656 657 /* Index nodes need to insert parent node key in between left and right node. */658 if (INDEX_NODE(node))659 node->key[node->keys++] = node->parent->key[idx];660 661 /* Copy the key-value-subtree triplets from the right node. */662 for (i = 0; i < rnode->keys; i++) {663 node->key[node->keys + i] = rnode->key[i];664 node->value[node->keys + i] = rnode->value[i];665 if (INDEX_NODE(node)) {666 node->subtree[node->keys + i] = rnode->subtree[i];667 rnode->subtree[i]->parent = node;668 }669 }670 if (INDEX_NODE(node)) {671 node->subtree[node->keys + i] = rnode->subtree[i];672 rnode->subtree[i]->parent = node;673 }674 675 node->keys += rnode->keys;676 677 return rnode;678 }679 680 /** Find key by its left or right subtree.681 *682 * @param node B-tree node.683 * @param subtree Left or right subtree of a key found in node.684 * @param right If true, subtree is a right subtree. If false, subtree is a left subtree.685 *686 * @return Index of the key associated with the subtree.687 */688 size_t find_key_by_subtree(btree_node_t *node, btree_node_t *subtree, bool right)689 {690 size_t i;691 692 for (i = 0; i < node->keys + 1; i++) {693 if (subtree == node->subtree[i])694 return i - (int) (right != false);695 }696 panic("Node %p does not contain subtree %p.", node, subtree);697 }698 699 /** Rotate one key-value-rsubtree triplet from the left sibling to the right sibling.700 *701 * The biggest key and its value and right subtree is rotated from the left node702 * to the right. If the node is an index node, than the parent node key belonging to703 * the left node takes part in the rotation.704 *705 * @param lnode Left sibling.706 * @param rnode Right sibling.707 * @param idx Index of the parent node key that is taking part in the rotation.708 */709 void rotate_from_left(btree_node_t *lnode, btree_node_t *rnode, size_t idx)710 {711 btree_key_t key;712 713 key = lnode->key[lnode->keys - 1];714 715 if (LEAF_NODE(lnode)) {716 void *value;717 718 value = lnode->value[lnode->keys - 1];719 node_remove_key_and_rsubtree(lnode, key);720 node_insert_key_and_lsubtree(rnode, key, value, NULL);721 lnode->parent->key[idx] = key;722 } else {723 btree_node_t *rsubtree;724 725 rsubtree = lnode->subtree[lnode->keys];726 node_remove_key_and_rsubtree(lnode, key);727 node_insert_key_and_lsubtree(rnode, lnode->parent->key[idx], NULL, rsubtree);728 lnode->parent->key[idx] = key;729 730 /* Fix parent link of the reconnected right subtree. */731 rsubtree->parent = rnode;732 }733 734 }735 736 /** Rotate one key-value-lsubtree triplet from the right sibling to the left sibling.737 *738 * The smallest key and its value and left subtree is rotated from the right node739 * to the left. If the node is an index node, than the parent node key belonging to740 * the right node takes part in the rotation.741 *742 * @param lnode Left sibling.743 * @param rnode Right sibling.744 * @param idx Index of the parent node key that is taking part in the rotation.745 */746 void rotate_from_right(btree_node_t *lnode, btree_node_t *rnode, size_t idx)747 {748 btree_key_t key;749 750 key = rnode->key[0];751 752 if (LEAF_NODE(rnode)) {753 void *value;754 755 value = rnode->value[0];756 node_remove_key_and_lsubtree(rnode, key);757 node_insert_key_and_rsubtree(lnode, key, value, NULL);758 rnode->parent->key[idx] = rnode->key[0];759 } else {760 btree_node_t *lsubtree;761 762 lsubtree = rnode->subtree[0];763 node_remove_key_and_lsubtree(rnode, key);764 node_insert_key_and_rsubtree(lnode, rnode->parent->key[idx], NULL, lsubtree);765 rnode->parent->key[idx] = key;766 767 /* Fix parent link of the reconnected left subtree. */768 lsubtree->parent = lnode;769 }770 771 }772 773 /** Insert key-value-rsubtree triplet and rotate the node to the left, if this operation can be done.774 *775 * Left sibling of the node (if it exists) is checked for free space.776 * If there is free space, the key is inserted and the smallest key of777 * the node is moved there. The index node which is the parent of both778 * nodes is fixed.779 *780 * @param node B-tree node.781 * @param inskey Key to be inserted.782 * @param insvalue Value to be inserted.783 * @param rsubtree Right subtree of inskey.784 *785 * @return True if the rotation was performed, false otherwise.786 */787 bool try_insert_by_rotation_to_left(btree_node_t *node, btree_key_t inskey, void *insvalue, btree_node_t *rsubtree)788 {789 size_t idx;790 btree_node_t *lnode;791 792 /*793 * If this is root node, the rotation can not be done.794 */795 if (ROOT_NODE(node))796 return false;797 798 idx = find_key_by_subtree(node->parent, node, true);799 if ((int) idx == -1) {800 /*801 * If this node is the leftmost subtree of its parent,802 * the rotation can not be done.803 */804 return false;805 }806 807 lnode = node->parent->subtree[idx];808 if (lnode->keys < BTREE_MAX_KEYS) {809 /*810 * The rotaion can be done. The left sibling has free space.811 */812 node_insert_key_and_rsubtree(node, inskey, insvalue, rsubtree);813 rotate_from_right(lnode, node, idx);814 return true;815 }816 817 return false;818 }819 820 /** Insert key-value-rsubtree triplet and rotate the node to the right, if this operation can be done.821 *822 * Right sibling of the node (if it exists) is checked for free space.823 * If there is free space, the key is inserted and the biggest key of824 * the node is moved there. The index node which is the parent of both825 * nodes is fixed.826 *827 * @param node B-tree node.828 * @param inskey Key to be inserted.829 * @param insvalue Value to be inserted.830 * @param rsubtree Right subtree of inskey.831 *832 * @return True if the rotation was performed, false otherwise.833 */834 bool try_insert_by_rotation_to_right(btree_node_t *node, btree_key_t inskey, void *insvalue, btree_node_t *rsubtree)835 {836 size_t idx;837 btree_node_t *rnode;838 839 /*840 * If this is root node, the rotation can not be done.841 */842 if (ROOT_NODE(node))843 return false;844 845 idx = find_key_by_subtree(node->parent, node, false);846 if (idx == node->parent->keys) {847 /*848 * If this node is the rightmost subtree of its parent,849 * the rotation can not be done.850 */851 return false;852 }853 854 rnode = node->parent->subtree[idx + 1];855 if (rnode->keys < BTREE_MAX_KEYS) {856 /*857 * The rotaion can be done. The right sibling has free space.858 */859 node_insert_key_and_rsubtree(node, inskey, insvalue, rsubtree);860 rotate_from_left(node, rnode, idx);861 return true;862 }863 864 return false;865 }866 867 /** Rotate in a key from the left sibling or from the index node, if this operation can be done.868 *869 * @param rnode Node into which to add key from its left sibling or from the index node.870 *871 * @return True if the rotation was performed, false otherwise.872 */873 bool try_rotation_from_left(btree_node_t *rnode)874 {875 size_t idx;876 btree_node_t *lnode;877 878 /*879 * If this is root node, the rotation can not be done.880 */881 if (ROOT_NODE(rnode))882 return false;883 884 idx = find_key_by_subtree(rnode->parent, rnode, true);885 if ((int) idx == -1) {886 /*887 * If this node is the leftmost subtree of its parent,888 * the rotation can not be done.889 */890 return false;891 }892 893 lnode = rnode->parent->subtree[idx];894 if (lnode->keys > FILL_FACTOR) {895 rotate_from_left(lnode, rnode, idx);896 return true;897 }898 899 return false;900 }901 902 /** Rotate in a key from the right sibling or from the index node, if this operation can be done.903 *904 * @param lnode Node into which to add key from its right sibling or from the index node.905 *906 * @return True if the rotation was performed, false otherwise.907 */908 bool try_rotation_from_right(btree_node_t *lnode)909 {910 size_t idx;911 btree_node_t *rnode;912 913 /*914 * If this is root node, the rotation can not be done.915 */916 if (ROOT_NODE(lnode))917 return false;918 919 idx = find_key_by_subtree(lnode->parent, lnode, false);920 if (idx == lnode->parent->keys) {921 /*922 * If this node is the rightmost subtree of its parent,923 * the rotation can not be done.924 */925 return false;926 }927 928 rnode = lnode->parent->subtree[idx + 1];929 if (rnode->keys > FILL_FACTOR) {930 rotate_from_right(lnode, rnode, idx);931 return true;932 }933 934 return false;935 }936 937 971 /** Print B-tree. 938 972 * 939 973 * @param t Print out B-tree. 974 * 940 975 */ 941 976 void btree_print(btree_t *t) … … 944 979 int depth = t->root->depth; 945 980 link_t head, *cur; 946 981 947 982 printf("Printing B-tree:\n"); 948 983 list_initialize(&head); 949 984 list_append(&t->root->bfs_link, &head); 950 985 951 986 /* 952 987 * Use BFS search to print out the tree. 953 988 * Levels are distinguished from one another by node->depth. 954 */ 989 */ 955 990 while (!list_empty(&head)) { 956 991 link_t *hlp; … … 968 1003 depth = node->depth; 969 1004 } 970 1005 971 1006 printf("("); 1007 972 1008 for (i = 0; i < node->keys; i++) { 973 1009 printf("%" PRIu64 "%s", node->key[i], i < node->keys - 1 ? "," : ""); … … 976 1012 } 977 1013 } 978 if (node->depth && node->subtree[i]) { 1014 1015 if (node->depth && node->subtree[i]) 979 1016 list_append(&node->subtree[i]->bfs_link, &head); 980 }1017 981 1018 printf(")"); 982 1019 } 1020 983 1021 printf("\n"); 984 1022 … … 990 1028 991 1029 ASSERT(node); 992 1030 993 1031 printf("("); 1032 994 1033 for (i = 0; i < node->keys; i++) 995 1034 printf("%" PRIu64 "%s", node->key[i], i < node->keys - 1 ? "," : ""); 1035 996 1036 printf(")"); 997 1037 } 1038 998 1039 printf("\n"); 999 1040 } -
kernel/generic/src/console/cmd.c
rfe7abd0 rcefb126 108 108 109 109 #ifdef CONFIG_TEST 110 static int cmd_tests(cmd_arg_t *argv);111 static cmd_info_t tests_info = {112 .name = "tests",113 .description = "Print available kernel tests.",114 .func = cmd_tests,115 .argc = 0116 };117 118 110 static char test_buf[MAX_CMDLINE + 1]; 119 111 static int cmd_test(cmd_arg_t *argv); 120 112 static cmd_arg_t test_argv[] = { 121 113 { 122 .type = ARG_TYPE_STRING ,114 .type = ARG_TYPE_STRING_OPTIONAL, 123 115 .buffer = test_buf, 124 116 .len = sizeof(test_buf) … … 127 119 static cmd_info_t test_info = { 128 120 .name = "test", 129 .description = " Run kerneltest.",121 .description = "Print list of kernel tests or run a test.", 130 122 .func = cmd_test, 131 123 .argc = 1, … … 509 501 &zone_info, 510 502 #ifdef CONFIG_TEST 511 &tests_info,512 503 &test_info, 513 504 &bench_info, … … 1066 1057 1067 1058 #ifdef CONFIG_TEST 1068 /** Command for printing kernel tests list.1069 *1070 * @param argv Ignored.1071 *1072 * return Always 1.1073 */1074 int cmd_tests(cmd_arg_t *argv)1075 {1076 size_t len = 0;1077 test_t *test;1078 for (test = tests; test->name != NULL; test++) {1079 if (str_length(test->name) > len)1080 len = str_length(test->name);1081 }1082 1083 for (test = tests; test->name != NULL; test++)1084 printf("%-*s %s%s\n", len, test->name, test->desc, (test->safe ? "" : " (unsafe)"));1085 1086 printf("%-*s Run all safe tests\n", len, "*");1087 return 1;1088 }1089 1090 1059 static bool run_test(const test_t *test) 1091 1060 { … … 1193 1162 } 1194 1163 1195 /** Command for returning kernel tests 1164 static void list_tests(void) 1165 { 1166 size_t len = 0; 1167 test_t *test; 1168 1169 for (test = tests; test->name != NULL; test++) { 1170 if (str_length(test->name) > len) 1171 len = str_length(test->name); 1172 } 1173 1174 for (test = tests; test->name != NULL; test++) 1175 printf("%-*s %s%s\n", len, test->name, test->desc, (test->safe ? "" : " (unsafe)")); 1176 1177 printf("%-*s Run all safe tests\n", len, "*"); 1178 } 1179 1180 /** Command for listing and running kernel tests 1196 1181 * 1197 1182 * @param argv Argument vector. 1198 1183 * 1199 1184 * return Always 1. 1185 * 1200 1186 */ 1201 1187 int cmd_test(cmd_arg_t *argv) … … 1211 1197 } 1212 1198 } 1213 } else {1199 } else if (str_cmp((char *) argv->buffer, "") != 0) { 1214 1200 bool fnd = false; 1215 1201 … … 1224 1210 if (!fnd) 1225 1211 printf("Unknown test\n"); 1226 } 1212 } else 1213 list_tests(); 1227 1214 1228 1215 return 1; -
kernel/generic/src/console/console.c
rfe7abd0 rcefb126 294 294 stdout->op->write(stdout, ch, silent); 295 295 else { 296 /* The character is just in the kernel log */ 296 /* 297 * No standard output routine defined yet. 298 * The character is still stored in the kernel log 299 * for possible future output. 300 * 301 * The early_putchar() function is used to output 302 * the character for low-level debugging purposes. 303 * Note that the early_putc() function might be 304 * a no-op on certain hardware configurations. 305 * 306 */ 307 early_putchar(ch); 308 297 309 if (klog_stored < klog_len) 298 310 klog_stored++; -
kernel/generic/src/cpu/cpu.c
rfe7abd0 rcefb126 49 49 #include <print.h> 50 50 #include <sysinfo/sysinfo.h> 51 #include <arch/cycle.h> 51 52 52 53 cpu_t *cpus; … … 94 95 CPU->tlb_active = true; 95 96 97 CPU->idle = false; 98 CPU->last_cycle = get_cycle(); 99 CPU->idle_cycles = 0; 100 CPU->busy_cycles = 0; 101 96 102 cpu_identify(); 97 103 cpu_arch_init(); … … 113 119 /** @} 114 120 */ 115 -
kernel/generic/src/debug/debug.c
rfe7abd0 rcefb126 1 1 /* 2 * Copyright (c) 20 01-2004 Jakub Jermar2 * Copyright (c) 2010 Martin Decky 3 3 * All rights reserved. 4 4 * … … 27 27 */ 28 28 29 #include <test.h> 30 #include <arch.h> 31 #include <atomic.h> 29 /** @addtogroup genericdebug 30 * @{ 31 */ 32 33 /** 34 * @file 35 * @brief Kernel instrumentation functions. 36 */ 37 38 #ifdef CONFIG_TRACE 39 40 #include <debug.h> 41 #include <symtab.h> 42 #include <errno.h> 32 43 #include <print.h> 33 #include <proc/thread.h>34 44 35 #include <synch/rwlock.h> 36 37 #define READERS 50 38 #define WRITERS 50 39 40 static rwlock_t rwlock; 41 42 static void writer(void *arg) 45 void __cyg_profile_func_enter(void *fn, void *call_site) 43 46 { 44 TPRINTF("Trying to lock rwlock for writing....\n");47 const char *fn_sym = symtab_fmt_name_lookup((uintptr_t) fn); 45 48 46 rwlock_write_lock(&rwlock);47 rwlock_write_unlock(&rwlock);49 const char *call_site_sym; 50 uintptr_t call_site_off; 48 51 49 TPRINTF("Trying to lock rwlock for reading....\n"); 50 51 rwlock_read_lock(&rwlock); 52 rwlock_read_unlock(&rwlock); 52 if (symtab_name_lookup((uintptr_t) call_site, &call_site_sym, 53 &call_site_off) == EOK) 54 printf("%s+%" PRIp "->%s\n", call_site_sym, call_site_off, 55 fn_sym); 56 else 57 printf("->%s\n", fn_sym); 53 58 } 54 59 55 const char *test_rwlock2(void)60 void __cyg_profile_func_exit(void *fn, void *call_site) 56 61 { 57 thread_t *thrd;62 const char *fn_sym = symtab_fmt_name_lookup((uintptr_t) fn); 58 63 59 rwlock_initialize(&rwlock); 64 const char *call_site_sym; 65 uintptr_t call_site_off; 60 66 61 rwlock_read_lock(&rwlock); 62 rwlock_read_lock(&rwlock); 63 rwlock_read_lock(&rwlock); 64 rwlock_read_lock(&rwlock); 65 66 thrd = thread_create(writer, NULL, TASK, 0, "writer", false); 67 if (thrd) 68 thread_ready(thrd); 67 if (symtab_name_lookup((uintptr_t) call_site, &call_site_sym, 68 &call_site_off) == EOK) 69 printf("%s+%" PRIp "<-%s\n", call_site_sym, call_site_off, 70 fn_sym); 69 71 else 70 return "Could not create thread"; 71 72 thread_sleep(1); 73 74 rwlock_read_unlock(&rwlock); 75 rwlock_read_unlock(&rwlock); 76 rwlock_read_unlock(&rwlock); 77 rwlock_read_unlock(&rwlock); 78 79 thread_join(thrd); 80 thread_detach(thrd); 81 82 return NULL; 72 printf("<-%s\n", fn_sym); 83 73 } 74 75 #endif /* CONFIG_TRACE */ 76 77 /** @} 78 */ -
kernel/generic/src/debug/panic.c
rfe7abd0 rcefb126 1 1 /* 2 * Copyright (c) 20 06 Josef Cejka2 * Copyright (c) 2010 Jakub Jermar 3 3 * All rights reserved. 4 4 * … … 27 27 */ 28 28 29 /** @addtogroup libc29 /** @addtogroup genericdebug 30 30 * @{ 31 31 */ … … 33 33 */ 34 34 35 #ifndef LIBC_LIMITS_H_ 36 #define LIBC_LIMITS_H_ 35 #include <panic.h> 36 #include <print.h> 37 #include <stacktrace.h> 38 #include <func.h> 39 #include <typedefs.h> 40 #include <mm/as.h> 41 #include <stdarg.h> 42 #include <interrupt.h> 37 43 38 #include <stdint.h> 39 #include <libarch/limits.h> 44 void panic_common(panic_category_t cat, istate_t *istate, int access, 45 uintptr_t address, const char *fmt, ...) 46 { 47 va_list args; 40 48 41 /* char */ 42 #define SCHAR_MIN MIN_INT8 43 #define SCHAR_MAX MAX_INT8 44 #define UCHAR_MIN MIN_UINT8 45 #define UCHAR_MAX MAX_UINT8 49 silent = false; 46 50 47 #ifdef __CHAR_UNSIGNED__ 48 #define CHAR_MIN UCHAR_MIN 49 #define CHAR_MAX UCHAR_MAX 50 #else 51 #define CHAR_MIN SCHAR_MIN 52 #define CHAR_MAX SCHAR_MAX 53 #endif 51 printf("\nKERNEL PANIC "); 52 if (CPU) 53 printf("ON cpu%d ", CPU->id); 54 printf("DUE TO "); 54 55 55 /* short int */ 56 #define SHRT_MIN MIN_INT16 57 #define SHRT_MAX MAX_INT16 58 #define USHRT_MIN MIN_UINT16 59 #define USHRT_MAX MAX_UINT16 56 va_start(args, fmt); 57 if (cat == PANIC_ASSERT) { 58 printf("A FAILED ASSERTION:\n"); 59 vprintf(fmt, args); 60 printf("\n"); 61 } else if (cat == PANIC_BADTRAP) { 62 printf("BAD TRAP %ld.\n", address); 63 } else if (cat == PANIC_MEMTRAP) { 64 printf("A BAD MEMORY ACCESS WHILE "); 65 if (access == PF_ACCESS_READ) 66 printf("LOADING FROM"); 67 else if (access == PF_ACCESS_WRITE) 68 printf("STORING TO"); 69 else if (access == PF_ACCESS_EXEC) 70 printf("BRANCHING TO"); 71 else 72 printf("REFERENCING"); 73 printf(" ADDRESS %p.\n", address); 74 } else { 75 printf("THE FOLLOWING REASON:\n"); 76 vprintf(fmt, args); 77 } 78 va_end(args); 60 79 61 /* int */ 62 #define INT_MIN MIN_INT32 63 #define INT_MAX MAX_INT32 64 #define UINT_MIN MIN_UINT32 65 #define UINT_MAX MAX_UINT32 80 printf("\n"); 66 81 67 /* long long int */ 68 #define LLONG_MIN MIN_INT64 69 #define LLONG_MAX MAX_INT64 70 #define ULLONG_MIN MIN_UINT64 71 #define ULLONG_MAX MAX_UINT64 82 if (istate) { 83 istate_decode(istate); 84 printf("\n"); 85 } 72 86 73 /* off64_t */ 74 #define OFF64_MIN MIN_INT64 75 #define OFF64_MAX MAX_INT64 76 77 /* aoff64_t */ 78 #define AOFF64_MIN MIN_UINT64 79 #define AOFF64_MAX MAX_UINT64 80 81 #endif 87 stack_trace(); 88 halt(); 89 } 82 90 83 91 /** @} -
kernel/generic/src/debug/stacktrace.c
rfe7abd0 rcefb126 37 37 #include <typedefs.h> 38 38 #include <symtab.h> 39 #include <print.h> 39 40 40 41 #define STACK_FRAMES_MAX 20 … … 51 52 ops->symbol_resolve(pc, &symbol, &offset)) { 52 53 if (offset) 53 printf("%p: %s+% p()\n", fp, symbol, offset);54 printf("%p: %s+%" PRIp "()\n", fp, symbol, offset); 54 55 else 55 56 printf("%p: %s()\n", fp, symbol); -
kernel/generic/src/debug/symtab.c
rfe7abd0 rcefb126 46 46 /** Get name of a symbol that seems most likely to correspond to address. 47 47 * 48 * @param addr 49 * @param name 50 * @param offset 48 * @param addr Address. 49 * @param name Place to store pointer to the symbol name. 50 * @param offset Place to store offset from the symbol address. 51 51 * 52 52 * @return Zero on success or negative error code, ENOENT if not found, … … 83 83 /** Lookup symbol by address and format for display. 84 84 * 85 * Returns name of closest corresponding symbol, "Not found" if none exists 86 * or "N/A" if no symbol information is available. 85 * Returns name of closest corresponding symbol, 86 * "unknown" if none exists and "N/A" if no symbol 87 * information is available. 87 88 * 88 89 * @param addr Address. … … 101 102 return name; 102 103 case ENOENT: 103 return " Not found";104 return "unknown"; 104 105 default: 105 106 return "N/A"; -
kernel/generic/src/interrupt/interrupt.c
rfe7abd0 rcefb126 99 99 void exc_dispatch(unsigned int n, istate_t *istate) 100 100 { 101 ASSERT(CPU); 102 101 103 #if (IVT_ITEMS > 0) 102 104 ASSERT(n < IVT_ITEMS); … … 110 112 } 111 113 114 /* Account CPU usage if it has waked up from sleep */ 115 irq_spinlock_lock(&CPU->lock, false); 116 if (CPU->idle) { 117 uint64_t now = get_cycle(); 118 CPU->idle_cycles += now - CPU->last_cycle; 119 CPU->last_cycle = now; 120 CPU->idle = false; 121 } 122 irq_spinlock_unlock(&CPU->lock, false); 123 112 124 uint64_t begin_cycle = get_cycle(); 113 125 … … 131 143 uint64_t end_cycle = get_cycle(); 132 144 145 irq_spinlock_lock(&exctbl_lock, false); 133 146 exc_table[n].cycles += end_cycle - begin_cycle; 134 147 exc_table[n].count++; 148 irq_spinlock_unlock(&exctbl_lock, false); 135 149 136 150 /* Do not charge THREAD for exception cycles */ -
kernel/generic/src/ipc/sysipc.c
rfe7abd0 rcefb126 1150 1150 return (unative_t) rc; 1151 1151 1152 LOG("sys_ipc_connect_kbox(%" PRIu64 ") \n", taskid_arg.value);1152 LOG("sys_ipc_connect_kbox(%" PRIu64 ")", taskid_arg.value); 1153 1153 1154 1154 return ipc_connect_kbox(taskid_arg.value); -
kernel/generic/src/lib/sort.c
rfe7abd0 rcefb126 27 27 */ 28 28 29 /** @addtogroup generic 29 /** @addtogroup generic 30 30 * @{ 31 31 */ … … 33 33 /** 34 34 * @file 35 * @brief 35 * @brief Sorting functions. 36 36 * 37 37 * This files contains functions implementing several sorting 38 * algorithms (e.g. quick sort and bubble sort). 39 */ 40 38 * algorithms (e.g. quick sort and gnome sort). 39 * 40 */ 41 41 42 #include <mm/slab.h> 42 43 #include <memstr.h> 43 44 #include <sort.h> 44 #include <panic.h> 45 46 #define EBUFSIZE 32 47 48 void _qsort(void * data, size_t n, size_t e_size, int (* cmp) (void * a, void * b), void *tmp, void *pivot); 49 void _bubblesort(void * data, size_t n, size_t e_size, int (* cmp) (void * a, void * b), void *slot); 50 51 /** Quicksort wrapper 52 * 53 * This is only a wrapper that takes care of memory allocations for storing 54 * the pivot and temporary elements for generic quicksort algorithm. 55 * 56 * This function _can_ sleep 57 * 58 * @param data Pointer to data to be sorted. 59 * @param n Number of elements to be sorted. 60 * @param e_size Size of one element. 61 * @param cmp Comparator function. 62 * 63 */ 64 void qsort(void * data, size_t n, size_t e_size, int (* cmp) (void * a, void * b)) 65 { 66 uint8_t buf_tmp[EBUFSIZE]; 67 uint8_t buf_pivot[EBUFSIZE]; 68 void * tmp = buf_tmp; 69 void * pivot = buf_pivot; 70 71 if (e_size > EBUFSIZE) { 72 pivot = (void *) malloc(e_size, 0); 73 tmp = (void *) malloc(e_size, 0); 45 46 /** Immediate buffer size. 47 * 48 * For small buffer sizes avoid doing malloc() 49 * and use the stack. 50 * 51 */ 52 #define IBUF_SIZE 32 53 54 /** Array accessor. 55 * 56 */ 57 #define INDEX(buf, i, elem_size) ((buf) + (i) * (elem_size)) 58 59 /** Gnome sort 60 * 61 * Apply generic gnome sort algorithm on supplied data, 62 * using pre-allocated buffer. 63 * 64 * @param data Pointer to data to be sorted. 65 * @param cnt Number of elements to be sorted. 66 * @param elem_size Size of one element. 67 * @param cmp Comparator function. 68 * @param arg 3rd argument passed to cmp. 69 * @param slot Pointer to scratch memory buffer 70 * elem_size bytes long. 71 * 72 */ 73 static void _gsort(void *data, size_t cnt, size_t elem_size, sort_cmp_t cmp, 74 void *arg, void *slot) 75 { 76 size_t i = 0; 77 78 while (i < cnt) { 79 if ((i > 0) && 80 (cmp(INDEX(data, i, elem_size), 81 INDEX(data, i - 1, elem_size), arg) == -1)) { 82 memcpy(slot, INDEX(data, i, elem_size), elem_size); 83 memcpy(INDEX(data, i, elem_size), INDEX(data, i - 1, elem_size), 84 elem_size); 85 memcpy(INDEX(data, i - 1, elem_size), slot, elem_size); 86 i--; 87 } else 88 i++; 74 89 } 75 76 _qsort(data, n, e_size, cmp, tmp, pivot);77 78 if (e_size > EBUFSIZE) {79 free(tmp);80 free(pivot);81 }82 90 } 83 91 84 92 /** Quicksort 85 93 * 86 * Apply generic quicksort algorithm on supplied data, using pre-allocated buffers. 87 * 88 * @param data Pointer to data to be sorted. 89 * @param n Number of elements to be sorted. 90 * @param e_size Size of one element. 91 * @param cmp Comparator function. 92 * @param tmp Pointer to scratch memory buffer e_size bytes long. 93 * @param pivot Pointer to scratch memory buffer e_size bytes long. 94 * 95 */ 96 void _qsort(void * data, size_t n, size_t e_size, int (* cmp) (void * a, void * b), void *tmp, void *pivot) 97 { 98 if (n > 4) { 99 unsigned int i = 0, j = n - 1; 100 101 memcpy(pivot, data, e_size); 102 103 while (1) { 104 while ((cmp(data + i * e_size, pivot) < 0) && (i < n)) 94 * Apply generic quicksort algorithm on supplied data, 95 * using pre-allocated buffers. 96 * 97 * @param data Pointer to data to be sorted. 98 * @param cnt Number of elements to be sorted. 99 * @param elem_size Size of one element. 100 * @param cmp Comparator function. 101 * @param arg 3rd argument passed to cmp. 102 * @param slot Pointer to scratch memory buffer 103 * elem_size bytes long. 104 * @param pivot Pointer to scratch memory buffer 105 * elem_size bytes long. 106 * 107 */ 108 static void _qsort(void *data, size_t cnt, size_t elem_size, sort_cmp_t cmp, 109 void *arg, void *slot, void *pivot) 110 { 111 if (cnt > 4) { 112 size_t i = 0; 113 size_t j = cnt - 1; 114 115 memcpy(pivot, data, elem_size); 116 117 while (true) { 118 while ((cmp(INDEX(data, i, elem_size), pivot, arg) < 0) && (i < cnt)) 105 119 i++; 106 while ((cmp(data + j * e_size, pivot) >= 0) && (j > 0)) 120 121 while ((cmp(INDEX(data, j, elem_size), pivot, arg) >= 0) && (j > 0)) 107 122 j--; 108 123 109 124 if (i < j) { 110 memcpy(tmp, data + i * e_size, e_size); 111 memcpy(data + i * e_size, data + j * e_size, e_size); 112 memcpy(data + j * e_size, tmp, e_size); 113 } else { 125 memcpy(slot, INDEX(data, i, elem_size), elem_size); 126 memcpy(INDEX(data, i, elem_size), INDEX(data, j, elem_size), 127 elem_size); 128 memcpy(INDEX(data, j, elem_size), slot, elem_size); 129 } else 114 130 break; 115 }116 131 } 117 118 _qsort(data, j + 1, e_size, cmp, tmp, pivot); 119 _qsort(data + (j + 1) * e_size, n - j - 1, e_size, cmp, tmp, pivot); 132 133 _qsort(data, j + 1, elem_size, cmp, arg, slot, pivot); 134 _qsort(INDEX(data, j + 1, elem_size), cnt - j - 1, elem_size, 135 cmp, arg, slot, pivot); 136 } else 137 _gsort(data, cnt, elem_size, cmp, arg, slot); 138 } 139 140 /** Gnome sort wrapper 141 * 142 * This is only a wrapper that takes care of memory 143 * allocations for storing the slot element for generic 144 * gnome sort algorithm. 145 * 146 * This function can sleep. 147 * 148 * @param data Pointer to data to be sorted. 149 * @param cnt Number of elements to be sorted. 150 * @param elem_size Size of one element. 151 * @param cmp Comparator function. 152 * @param arg 3rd argument passed to cmp. 153 * 154 * @return True if sorting succeeded. 155 * 156 */ 157 bool gsort(void *data, size_t cnt, size_t elem_size, sort_cmp_t cmp, void *arg) 158 { 159 uint8_t ibuf_slot[IBUF_SIZE]; 160 void *slot; 161 162 if (elem_size > IBUF_SIZE) { 163 slot = (void *) malloc(elem_size, 0); 164 if (!slot) 165 return false; 166 } else 167 slot = (void *) ibuf_slot; 168 169 _gsort(data, cnt, elem_size, cmp, arg, slot); 170 171 if (elem_size > IBUF_SIZE) 172 free(slot); 173 174 return true; 175 } 176 177 /** Quicksort wrapper 178 * 179 * This is only a wrapper that takes care of memory 180 * allocations for storing the pivot and temporary elements 181 * for generic quicksort algorithm. 182 * 183 * This function can sleep. 184 * 185 * @param data Pointer to data to be sorted. 186 * @param cnt Number of elements to be sorted. 187 * @param elem_size Size of one element. 188 * @param cmp Comparator function. 189 * @param arg 3rd argument passed to cmp. 190 * 191 * @return True if sorting succeeded. 192 * 193 */ 194 bool qsort(void *data, size_t cnt, size_t elem_size, sort_cmp_t cmp, void *arg) 195 { 196 uint8_t ibuf_slot[IBUF_SIZE]; 197 uint8_t ibuf_pivot[IBUF_SIZE]; 198 void *slot; 199 void *pivot; 200 201 if (elem_size > IBUF_SIZE) { 202 slot = (void *) malloc(elem_size, 0); 203 if (!slot) 204 return false; 205 206 pivot = (void *) malloc(elem_size, 0); 207 if (!pivot) { 208 free(slot); 209 return false; 210 } 120 211 } else { 121 _bubblesort(data, n, e_size, cmp, tmp); 212 slot = (void *) ibuf_slot; 213 pivot = (void *) ibuf_pivot; 122 214 } 123 } 124 125 /** Bubblesort wrapper 126 * 127 * This is only a wrapper that takes care of memory allocation for storing 128 * the slot element for generic bubblesort algorithm. 129 * 130 * @param data Pointer to data to be sorted. 131 * @param n Number of elements to be sorted. 132 * @param e_size Size of one element. 133 * @param cmp Comparator function. 134 * 135 */ 136 void bubblesort(void * data, size_t n, size_t e_size, int (* cmp) (void * a, void * b)) 137 { 138 uint8_t buf_slot[EBUFSIZE]; 139 void * slot = buf_slot; 140 141 if (e_size > EBUFSIZE) { 142 slot = (void *) malloc(e_size, 0); 143 } 144 145 _bubblesort(data, n, e_size, cmp, slot); 146 147 if (e_size > EBUFSIZE) { 215 216 _qsort(data, cnt, elem_size, cmp, arg, slot, pivot); 217 218 if (elem_size > IBUF_SIZE) { 219 free(pivot); 148 220 free(slot); 149 221 } 150 } 151 152 /** Bubblesort 153 * 154 * Apply generic bubblesort algorithm on supplied data, using pre-allocated buffer. 155 * 156 * @param data Pointer to data to be sorted. 157 * @param n Number of elements to be sorted. 158 * @param e_size Size of one element. 159 * @param cmp Comparator function. 160 * @param slot Pointer to scratch memory buffer e_size bytes long. 161 * 162 */ 163 void _bubblesort(void * data, size_t n, size_t e_size, int (* cmp) (void * a, void * b), void *slot) 164 { 165 bool done = false; 166 void * p; 167 168 while (!done) { 169 done = true; 170 for (p = data; p < data + e_size * (n - 1); p = p + e_size) { 171 if (cmp(p, p + e_size) == 1) { 172 memcpy(slot, p, e_size); 173 memcpy(p, p + e_size, e_size); 174 memcpy(p + e_size, slot, e_size); 175 done = false; 176 } 177 } 178 } 179 180 } 181 182 /* 183 * Comparator returns 1 if a > b, 0 if a == b, -1 if a < b 184 */ 185 int int_cmp(void * a, void * b) 186 { 187 return (* (int *) a > * (int*)b) ? 1 : (*(int *)a < * (int *)b) ? -1 : 0; 188 } 189 190 int uint8_t_cmp(void * a, void * b) 191 { 192 return (* (uint8_t *) a > * (uint8_t *)b) ? 1 : (*(uint8_t *)a < * (uint8_t *)b) ? -1 : 0; 193 } 194 195 int uint16_t_cmp(void * a, void * b) 196 { 197 return (* (uint16_t *) a > * (uint16_t *)b) ? 1 : (*(uint16_t *)a < * (uint16_t *)b) ? -1 : 0; 198 } 199 200 int uint32_t_cmp(void * a, void * b) 201 { 202 return (* (uint32_t *) a > * (uint32_t *)b) ? 1 : (*(uint32_t *)a < * (uint32_t *)b) ? -1 : 0; 222 223 return true; 203 224 } 204 225 -
kernel/generic/src/main/kinit.c
rfe7abd0 rcefb126 107 107 if (config.cpu_count > 1) { 108 108 waitq_initialize(&ap_completion_wq); 109 109 110 /* 110 111 * Create the kmp thread and wait for its completion. … … 124 125 thread_join(thread); 125 126 thread_detach(thread); 126 } 127 128 if (config.cpu_count > 1) { 127 128 /* 129 * For each CPU, create its load balancing thread. 130 */ 129 131 size_t i; 130 132 131 /*132 * For each CPU, create its load balancing thread.133 */134 133 for (i = 0; i < config.cpu_count; i++) { 135 134 thread = thread_create(kcpulb, NULL, TASK, THREAD_FLAG_WIRED, "kcpulb", true); … … 181 180 if (init.tasks[i].addr % FRAME_SIZE) { 182 181 printf("init[%" PRIs "].addr is not frame aligned\n", i); 182 programs[i].task = NULL; 183 183 continue; 184 184 } -
kernel/generic/src/main/main.c
rfe7abd0 rcefb126 104 104 105 105 /** Lowest safe stack virtual address. */ 106 uintptr_t stack_safe = 0; 106 uintptr_t stack_safe = 0; 107 107 108 108 /* … … 113 113 */ 114 114 static void main_bsp_separated_stack(void); 115 115 116 #ifdef CONFIG_SMP 116 117 static void main_ap_separated_stack(void); 117 118 #endif 118 119 119 #define CONFIG_STACK_SIZE 120 #define CONFIG_STACK_SIZE ((1 << STACK_FRAMES) * STACK_SIZE) 120 121 121 122 /** Main kernel routine for bootstrap CPU. … … 130 131 * 131 132 */ 132 void main_bsp(void)133 void __attribute__((no_instrument_function)) main_bsp(void) 133 134 { 134 135 config.cpu_count = 1; … … 151 152 init.tasks[i].size, config.stack_size); 152 153 } 153 154 154 155 /* Avoid placing stack on top of boot allocations. */ 155 156 if (ballocs.size) { … … 170 171 } 171 172 172 173 173 /** Main kernel routine for bootstrap CPU using new stack. 174 174 * … … 176 176 * 177 177 */ 178 void main_bsp_separated_stack(void) 178 void main_bsp_separated_stack(void) 179 179 { 180 180 /* Keep this the first thing. */ … … 183 183 version_print(); 184 184 185 LOG("\nconfig.base=% #" PRIp "config.kernel_size=%" PRIs186 "\nconfig.stack_base=% #" PRIp "config.stack_size=%" PRIs,185 LOG("\nconfig.base=%p config.kernel_size=%" PRIs 186 "\nconfig.stack_base=%p config.stack_size=%" PRIs, 187 187 config.base, config.kernel_size, config.stack_base, 188 188 config.stack_size); … … 194 194 * commands. 195 195 */ 196 LOG_EXEC(kconsole_init());196 kconsole_init(); 197 197 #endif 198 198 … … 201 201 * starts adding its own handlers 202 202 */ 203 LOG_EXEC(exc_init());203 exc_init(); 204 204 205 205 /* 206 206 * Memory management subsystems initialization. 207 207 */ 208 LOG_EXEC(arch_pre_mm_init());209 LOG_EXEC(frame_init());208 arch_pre_mm_init(); 209 frame_init(); 210 210 211 211 /* Initialize at least 1 memory segment big enough for slab to work. */ 212 LOG_EXEC(slab_cache_init());213 LOG_EXEC(sysinfo_init());214 LOG_EXEC(btree_init());215 LOG_EXEC(as_init());216 LOG_EXEC(page_init());217 LOG_EXEC(tlb_init());218 LOG_EXEC(ddi_init());219 LOG_EXEC(tasklet_init());220 LOG_EXEC(arch_post_mm_init());221 LOG_EXEC(arch_pre_smp_init());222 LOG_EXEC(smp_init());212 slab_cache_init(); 213 sysinfo_init(); 214 btree_init(); 215 as_init(); 216 page_init(); 217 tlb_init(); 218 ddi_init(); 219 tasklet_init(); 220 arch_post_mm_init(); 221 arch_pre_smp_init(); 222 smp_init(); 223 223 224 224 /* Slab must be initialized after we know the number of processors. */ 225 LOG_EXEC(slab_enable_cpucache());225 slab_enable_cpucache(); 226 226 227 227 printf("Detected %" PRIs " CPU(s), %" PRIu64" MiB free memory\n", 228 228 config.cpu_count, SIZE2MB(zones_total_size())); 229 230 LOG_EXEC(cpu_init());231 232 LOG_EXEC(calibrate_delay_loop());233 LOG_EXEC(clock_counter_init());234 LOG_EXEC(timeout_init());235 LOG_EXEC(scheduler_init());236 LOG_EXEC(task_init());237 LOG_EXEC(thread_init());238 LOG_EXEC(futex_init());229 230 cpu_init(); 231 232 calibrate_delay_loop(); 233 clock_counter_init(); 234 timeout_init(); 235 scheduler_init(); 236 task_init(); 237 thread_init(); 238 futex_init(); 239 239 240 240 if (init.cnt > 0) { 241 241 size_t i; 242 242 for (i = 0; i < init.cnt; i++) 243 LOG("init[%" PRIs "].addr=% #" PRIp ", init[%" PRIs244 "].size=% #" PRIs, i, init.tasks[i].addr, i,243 LOG("init[%" PRIs "].addr=%p, init[%" PRIs 244 "].size=%" PRIs, i, init.tasks[i].addr, i, 245 245 init.tasks[i].size); 246 246 } else 247 247 printf("No init binaries found.\n"); 248 248 249 LOG_EXEC(ipc_init());250 LOG_EXEC(event_init());251 LOG_EXEC(klog_init());252 LOG_EXEC(stats_init());249 ipc_init(); 250 event_init(); 251 klog_init(); 252 stats_init(); 253 253 254 254 /* … … 266 266 if (!kinit_thread) 267 267 panic("Cannot create kinit thread."); 268 LOG_EXEC(thread_ready(kinit_thread));268 thread_ready(kinit_thread); 269 269 270 270 /* … … 276 276 } 277 277 278 279 278 #ifdef CONFIG_SMP 279 280 280 /** Main kernel routine for application CPUs. 281 281 * … … 296 296 */ 297 297 config.cpu_active++; 298 298 299 299 /* 300 300 * The THE structure is well defined because ctx.sp is used as stack. … … 311 311 calibrate_delay_loop(); 312 312 arch_post_cpu_init(); 313 313 314 314 the_copy(THE, (the_t *) CPU->stack); 315 315 316 316 /* 317 317 * If we woke kmp up before we left the kernel stack, we could … … 326 326 } 327 327 328 329 328 /** Main kernel routine for application CPUs using new stack. 330 329 * … … 338 337 */ 339 338 timeout_init(); 340 339 341 340 waitq_wakeup(&ap_completion_wq, WAKEUP_FIRST); 342 341 scheduler(); 343 342 /* not reached */ 344 343 } 344 345 345 #endif /* CONFIG_SMP */ 346 346 -
kernel/generic/src/mm/as.c
rfe7abd0 rcefb126 116 116 as_t *AS_KERNEL = NULL; 117 117 118 static unsigned int area_flags_to_page_flags(unsigned int);119 static as_area_t *find_area_and_lock(as_t *, uintptr_t);120 static bool check_area_conflicts(as_t *, uintptr_t, size_t, as_area_t *);121 static void sh_info_remove_reference(share_info_t *);122 123 118 static int as_constructor(void *obj, unsigned int flags) 124 119 { … … 239 234 240 235 spinlock_unlock(&asidlock); 236 interrupts_restore(ipl); 237 241 238 242 239 /* … … 266 263 #endif 267 264 268 interrupts_restore(ipl);269 270 265 slab_free(as_slab, as); 271 266 } … … 296 291 if (atomic_predec(&as->refcount) == 0) 297 292 as_destroy(as); 293 } 294 295 /** Check area conflicts with other areas. 296 * 297 * @param as Address space. 298 * @param va Starting virtual address of the area being tested. 299 * @param size Size of the area being tested. 300 * @param avoid_area Do not touch this area. 301 * 302 * @return True if there is no conflict, false otherwise. 303 * 304 */ 305 static bool check_area_conflicts(as_t *as, uintptr_t va, size_t size, 306 as_area_t *avoid_area) 307 { 308 ASSERT(mutex_locked(&as->lock)); 309 310 /* 311 * We don't want any area to have conflicts with NULL page. 312 * 313 */ 314 if (overlaps(va, size, NULL, PAGE_SIZE)) 315 return false; 316 317 /* 318 * The leaf node is found in O(log n), where n is proportional to 319 * the number of address space areas belonging to as. 320 * The check for conflicts is then attempted on the rightmost 321 * record in the left neighbour, the leftmost record in the right 322 * neighbour and all records in the leaf node itself. 323 * 324 */ 325 btree_node_t *leaf; 326 as_area_t *area = 327 (as_area_t *) btree_search(&as->as_area_btree, va, &leaf); 328 if (area) { 329 if (area != avoid_area) 330 return false; 331 } 332 333 /* First, check the two border cases. */ 334 btree_node_t *node = 335 btree_leaf_node_left_neighbour(&as->as_area_btree, leaf); 336 if (node) { 337 area = (as_area_t *) node->value[node->keys - 1]; 338 339 mutex_lock(&area->lock); 340 341 if (overlaps(va, size, area->base, area->pages * PAGE_SIZE)) { 342 mutex_unlock(&area->lock); 343 return false; 344 } 345 346 mutex_unlock(&area->lock); 347 } 348 349 node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf); 350 if (node) { 351 area = (as_area_t *) node->value[0]; 352 353 mutex_lock(&area->lock); 354 355 if (overlaps(va, size, area->base, area->pages * PAGE_SIZE)) { 356 mutex_unlock(&area->lock); 357 return false; 358 } 359 360 mutex_unlock(&area->lock); 361 } 362 363 /* Second, check the leaf node. */ 364 btree_key_t i; 365 for (i = 0; i < leaf->keys; i++) { 366 area = (as_area_t *) leaf->value[i]; 367 368 if (area == avoid_area) 369 continue; 370 371 mutex_lock(&area->lock); 372 373 if (overlaps(va, size, area->base, area->pages * PAGE_SIZE)) { 374 mutex_unlock(&area->lock); 375 return false; 376 } 377 378 mutex_unlock(&area->lock); 379 } 380 381 /* 382 * So far, the area does not conflict with other areas. 383 * Check if it doesn't conflict with kernel address space. 384 * 385 */ 386 if (!KERNEL_ADDRESS_SPACE_SHADOWED) { 387 return !overlaps(va, size, 388 KERNEL_ADDRESS_SPACE_START, 389 KERNEL_ADDRESS_SPACE_END - KERNEL_ADDRESS_SPACE_START); 390 } 391 392 return true; 298 393 } 299 394 … … 327 422 return NULL; 328 423 329 ipl_t ipl = interrupts_disable();330 424 mutex_lock(&as->lock); 331 425 332 426 if (!check_area_conflicts(as, base, size, NULL)) { 333 427 mutex_unlock(&as->lock); 334 interrupts_restore(ipl);335 428 return NULL; 336 429 } … … 357 450 358 451 mutex_unlock(&as->lock); 359 interrupts_restore(ipl);360 452 361 453 return area; 454 } 455 456 /** Find address space area and lock it. 457 * 458 * @param as Address space. 459 * @param va Virtual address. 460 * 461 * @return Locked address space area containing va on success or 462 * NULL on failure. 463 * 464 */ 465 static as_area_t *find_area_and_lock(as_t *as, uintptr_t va) 466 { 467 ASSERT(mutex_locked(&as->lock)); 468 469 btree_node_t *leaf; 470 as_area_t *area = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf); 471 if (area) { 472 /* va is the base address of an address space area */ 473 mutex_lock(&area->lock); 474 return area; 475 } 476 477 /* 478 * Search the leaf node and the righmost record of its left neighbour 479 * to find out whether this is a miss or va belongs to an address 480 * space area found there. 481 * 482 */ 483 484 /* First, search the leaf node itself. */ 485 btree_key_t i; 486 487 for (i = 0; i < leaf->keys; i++) { 488 area = (as_area_t *) leaf->value[i]; 489 490 mutex_lock(&area->lock); 491 492 if ((area->base <= va) && (va < area->base + area->pages * PAGE_SIZE)) 493 return area; 494 495 mutex_unlock(&area->lock); 496 } 497 498 /* 499 * Second, locate the left neighbour and test its last record. 500 * Because of its position in the B+tree, it must have base < va. 501 * 502 */ 503 btree_node_t *lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf); 504 if (lnode) { 505 area = (as_area_t *) lnode->value[lnode->keys - 1]; 506 507 mutex_lock(&area->lock); 508 509 if (va < area->base + area->pages * PAGE_SIZE) 510 return area; 511 512 mutex_unlock(&area->lock); 513 } 514 515 return NULL; 362 516 } 363 517 … … 376 530 int as_area_resize(as_t *as, uintptr_t address, size_t size, unsigned int flags) 377 531 { 378 ipl_t ipl = interrupts_disable();379 532 mutex_lock(&as->lock); 380 533 … … 386 539 if (!area) { 387 540 mutex_unlock(&as->lock); 388 interrupts_restore(ipl);389 541 return ENOENT; 390 542 } … … 398 550 mutex_unlock(&area->lock); 399 551 mutex_unlock(&as->lock); 400 interrupts_restore(ipl);401 552 return ENOTSUP; 402 553 } … … 410 561 mutex_unlock(&area->lock); 411 562 mutex_unlock(&as->lock); 412 interrupts_restore(ipl);413 563 return ENOTSUP; 414 564 } … … 422 572 mutex_unlock(&area->lock); 423 573 mutex_unlock(&as->lock); 424 interrupts_restore(ipl);425 574 return EPERM; 426 575 } … … 441 590 * 442 591 */ 443 tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base +444 pages * PAGE_SIZE, area->pages - pages);592 ipl_t ipl = tlb_shootdown_start(TLB_INVL_PAGES, as->asid, 593 area->base + pages * PAGE_SIZE, area->pages - pages); 445 594 446 595 /* … … 536 685 as_invalidate_translation_cache(as, area->base + 537 686 pages * PAGE_SIZE, area->pages - pages); 538 tlb_shootdown_finalize( );687 tlb_shootdown_finalize(ipl); 539 688 540 689 page_table_unlock(as, false); … … 549 698 mutex_unlock(&area->lock); 550 699 mutex_unlock(&as->lock); 551 interrupts_restore(ipl);552 700 return EADDRNOTAVAIL; 553 701 } … … 558 706 mutex_unlock(&area->lock); 559 707 mutex_unlock(&as->lock); 560 interrupts_restore(ipl);561 708 562 709 return 0; 710 } 711 712 /** Remove reference to address space area share info. 713 * 714 * If the reference count drops to 0, the sh_info is deallocated. 715 * 716 * @param sh_info Pointer to address space area share info. 717 * 718 */ 719 static void sh_info_remove_reference(share_info_t *sh_info) 720 { 721 bool dealloc = false; 722 723 mutex_lock(&sh_info->lock); 724 ASSERT(sh_info->refcount); 725 726 if (--sh_info->refcount == 0) { 727 dealloc = true; 728 link_t *cur; 729 730 /* 731 * Now walk carefully the pagemap B+tree and free/remove 732 * reference from all frames found there. 733 */ 734 for (cur = sh_info->pagemap.leaf_head.next; 735 cur != &sh_info->pagemap.leaf_head; cur = cur->next) { 736 btree_node_t *node 737 = list_get_instance(cur, btree_node_t, leaf_link); 738 btree_key_t i; 739 740 for (i = 0; i < node->keys; i++) 741 frame_free((uintptr_t) node->value[i]); 742 } 743 744 } 745 mutex_unlock(&sh_info->lock); 746 747 if (dealloc) { 748 btree_destroy(&sh_info->pagemap); 749 free(sh_info); 750 } 563 751 } 564 752 … … 573 761 int as_area_destroy(as_t *as, uintptr_t address) 574 762 { 575 ipl_t ipl = interrupts_disable();576 763 mutex_lock(&as->lock); 577 764 … … 579 766 if (!area) { 580 767 mutex_unlock(&as->lock); 581 interrupts_restore(ipl);582 768 return ENOENT; 583 769 } … … 590 776 * Start TLB shootdown sequence. 591 777 */ 592 tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base, area->pages); 778 ipl_t ipl = tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base, 779 area->pages); 593 780 594 781 /* … … 637 824 */ 638 825 as_invalidate_translation_cache(as, area->base, area->pages); 639 tlb_shootdown_finalize( );826 tlb_shootdown_finalize(ipl); 640 827 641 828 page_table_unlock(as, false); … … 659 846 660 847 mutex_unlock(&as->lock); 661 interrupts_restore(ipl);662 848 return 0; 663 849 } … … 690 876 as_t *dst_as, uintptr_t dst_base, unsigned int dst_flags_mask) 691 877 { 692 ipl_t ipl = interrupts_disable();693 878 mutex_lock(&src_as->lock); 694 879 as_area_t *src_area = find_area_and_lock(src_as, src_base); … … 699 884 */ 700 885 mutex_unlock(&src_as->lock); 701 interrupts_restore(ipl);702 886 return ENOENT; 703 887 } … … 711 895 mutex_unlock(&src_area->lock); 712 896 mutex_unlock(&src_as->lock); 713 interrupts_restore(ipl);714 897 return ENOTSUP; 715 898 } … … 728 911 mutex_unlock(&src_area->lock); 729 912 mutex_unlock(&src_as->lock); 730 interrupts_restore(ipl);731 913 return EPERM; 732 914 } … … 777 959 sh_info_remove_reference(sh_info); 778 960 779 interrupts_restore(ipl);780 961 return ENOMEM; 781 962 } … … 794 975 mutex_unlock(&dst_as->lock); 795 976 796 interrupts_restore(ipl);797 798 977 return 0; 799 978 } … … 816 995 }; 817 996 818 ASSERT(interrupts_disabled());819 997 ASSERT(mutex_locked(&area->lock)); 820 998 … … 823 1001 824 1002 return true; 1003 } 1004 1005 /** Convert address space area flags to page flags. 1006 * 1007 * @param aflags Flags of some address space area. 1008 * 1009 * @return Flags to be passed to page_mapping_insert(). 1010 * 1011 */ 1012 static unsigned int area_flags_to_page_flags(unsigned int aflags) 1013 { 1014 unsigned int flags = PAGE_USER | PAGE_PRESENT; 1015 1016 if (aflags & AS_AREA_READ) 1017 flags |= PAGE_READ; 1018 1019 if (aflags & AS_AREA_WRITE) 1020 flags |= PAGE_WRITE; 1021 1022 if (aflags & AS_AREA_EXEC) 1023 flags |= PAGE_EXEC; 1024 1025 if (aflags & AS_AREA_CACHEABLE) 1026 flags |= PAGE_CACHEABLE; 1027 1028 return flags; 825 1029 } 826 1030 … … 844 1048 unsigned int page_flags = area_flags_to_page_flags(flags); 845 1049 846 ipl_t ipl = interrupts_disable();847 1050 mutex_lock(&as->lock); 848 1051 … … 850 1053 if (!area) { 851 1054 mutex_unlock(&as->lock); 852 interrupts_restore(ipl);853 1055 return ENOENT; 854 1056 } … … 859 1061 mutex_unlock(&area->lock); 860 1062 mutex_unlock(&as->lock); 861 interrupts_restore(ipl);862 1063 return ENOTSUP; 863 1064 } … … 889 1090 * 890 1091 */ 891 tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base, area->pages); 1092 ipl_t ipl = tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base, 1093 area->pages); 892 1094 893 1095 /* … … 936 1138 */ 937 1139 as_invalidate_translation_cache(as, area->base, area->pages); 938 tlb_shootdown_finalize( );1140 tlb_shootdown_finalize(ipl); 939 1141 940 1142 page_table_unlock(as, false); … … 978 1180 mutex_unlock(&area->lock); 979 1181 mutex_unlock(&as->lock); 980 interrupts_restore(ipl);981 1182 982 1183 return 0; … … 1184 1385 } 1185 1386 1186 /** Convert address space area flags to page flags. 1187 * 1188 * @param aflags Flags of some address space area. 1189 * 1190 * @return Flags to be passed to page_mapping_insert(). 1191 * 1192 */ 1193 unsigned int area_flags_to_page_flags(unsigned int aflags) 1194 { 1195 unsigned int flags = PAGE_USER | PAGE_PRESENT; 1196 1197 if (aflags & AS_AREA_READ) 1198 flags |= PAGE_READ; 1199 1200 if (aflags & AS_AREA_WRITE) 1201 flags |= PAGE_WRITE; 1202 1203 if (aflags & AS_AREA_EXEC) 1204 flags |= PAGE_EXEC; 1205 1206 if (aflags & AS_AREA_CACHEABLE) 1207 flags |= PAGE_CACHEABLE; 1208 1209 return flags; 1210 } 1387 1211 1388 1212 1389 /** Compute flags for virtual address translation subsytem. … … 1219 1396 unsigned int as_area_get_flags(as_area_t *area) 1220 1397 { 1221 ASSERT(interrupts_disabled());1222 1398 ASSERT(mutex_locked(&area->lock)); 1223 1399 … … 1296 1472 /** Test whether page tables are locked. 1297 1473 * 1298 * @param as 1299 * 1300 * @return 1301 * 1474 * @param as Address space where the page tables belong. 1475 * 1476 * @return True if the page tables belonging to the address soace 1477 * are locked, otherwise false. 1302 1478 */ 1303 1479 bool page_table_locked(as_t *as) … … 1309 1485 } 1310 1486 1311 1312 /** Find address space area and lock it.1313 *1314 * @param as Address space.1315 * @param va Virtual address.1316 *1317 * @return Locked address space area containing va on success or1318 * NULL on failure.1319 *1320 */1321 as_area_t *find_area_and_lock(as_t *as, uintptr_t va)1322 {1323 ASSERT(interrupts_disabled());1324 ASSERT(mutex_locked(&as->lock));1325 1326 btree_node_t *leaf;1327 as_area_t *area = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf);1328 if (area) {1329 /* va is the base address of an address space area */1330 mutex_lock(&area->lock);1331 return area;1332 }1333 1334 /*1335 * Search the leaf node and the righmost record of its left neighbour1336 * to find out whether this is a miss or va belongs to an address1337 * space area found there.1338 *1339 */1340 1341 /* First, search the leaf node itself. */1342 btree_key_t i;1343 1344 for (i = 0; i < leaf->keys; i++) {1345 area = (as_area_t *) leaf->value[i];1346 1347 mutex_lock(&area->lock);1348 1349 if ((area->base <= va) && (va < area->base + area->pages * PAGE_SIZE))1350 return area;1351 1352 mutex_unlock(&area->lock);1353 }1354 1355 /*1356 * Second, locate the left neighbour and test its last record.1357 * Because of its position in the B+tree, it must have base < va.1358 *1359 */1360 btree_node_t *lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf);1361 if (lnode) {1362 area = (as_area_t *) lnode->value[lnode->keys - 1];1363 1364 mutex_lock(&area->lock);1365 1366 if (va < area->base + area->pages * PAGE_SIZE)1367 return area;1368 1369 mutex_unlock(&area->lock);1370 }1371 1372 return NULL;1373 }1374 1375 /** Check area conflicts with other areas.1376 *1377 * @param as Address space.1378 * @param va Starting virtual address of the area being tested.1379 * @param size Size of the area being tested.1380 * @param avoid_area Do not touch this area.1381 *1382 * @return True if there is no conflict, false otherwise.1383 *1384 */1385 bool check_area_conflicts(as_t *as, uintptr_t va, size_t size,1386 as_area_t *avoid_area)1387 {1388 ASSERT(interrupts_disabled());1389 ASSERT(mutex_locked(&as->lock));1390 1391 /*1392 * We don't want any area to have conflicts with NULL page.1393 *1394 */1395 if (overlaps(va, size, NULL, PAGE_SIZE))1396 return false;1397 1398 /*1399 * The leaf node is found in O(log n), where n is proportional to1400 * the number of address space areas belonging to as.1401 * The check for conflicts is then attempted on the rightmost1402 * record in the left neighbour, the leftmost record in the right1403 * neighbour and all records in the leaf node itself.1404 *1405 */1406 btree_node_t *leaf;1407 as_area_t *area =1408 (as_area_t *) btree_search(&as->as_area_btree, va, &leaf);1409 if (area) {1410 if (area != avoid_area)1411 return false;1412 }1413 1414 /* First, check the two border cases. */1415 btree_node_t *node =1416 btree_leaf_node_left_neighbour(&as->as_area_btree, leaf);1417 if (node) {1418 area = (as_area_t *) node->value[node->keys - 1];1419 1420 mutex_lock(&area->lock);1421 1422 if (overlaps(va, size, area->base, area->pages * PAGE_SIZE)) {1423 mutex_unlock(&area->lock);1424 return false;1425 }1426 1427 mutex_unlock(&area->lock);1428 }1429 1430 node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf);1431 if (node) {1432 area = (as_area_t *) node->value[0];1433 1434 mutex_lock(&area->lock);1435 1436 if (overlaps(va, size, area->base, area->pages * PAGE_SIZE)) {1437 mutex_unlock(&area->lock);1438 return false;1439 }1440 1441 mutex_unlock(&area->lock);1442 }1443 1444 /* Second, check the leaf node. */1445 btree_key_t i;1446 for (i = 0; i < leaf->keys; i++) {1447 area = (as_area_t *) leaf->value[i];1448 1449 if (area == avoid_area)1450 continue;1451 1452 mutex_lock(&area->lock);1453 1454 if (overlaps(va, size, area->base, area->pages * PAGE_SIZE)) {1455 mutex_unlock(&area->lock);1456 return false;1457 }1458 1459 mutex_unlock(&area->lock);1460 }1461 1462 /*1463 * So far, the area does not conflict with other areas.1464 * Check if it doesn't conflict with kernel address space.1465 *1466 */1467 if (!KERNEL_ADDRESS_SPACE_SHADOWED) {1468 return !overlaps(va, size,1469 KERNEL_ADDRESS_SPACE_START,1470 KERNEL_ADDRESS_SPACE_END - KERNEL_ADDRESS_SPACE_START);1471 }1472 1473 return true;1474 }1475 1476 1487 /** Return size of the address space area with given base. 1477 1488 * … … 1486 1497 size_t size; 1487 1498 1488 ipl_t ipl = interrupts_disable();1489 1499 page_table_lock(AS, true); 1490 1500 as_area_t *src_area = find_area_and_lock(AS, base); … … 1497 1507 1498 1508 page_table_unlock(AS, true); 1499 interrupts_restore(ipl);1500 1509 return size; 1501 1510 } … … 1988 1997 } 1989 1998 1990 /** Remove reference to address space area share info.1991 *1992 * If the reference count drops to 0, the sh_info is deallocated.1993 *1994 * @param sh_info Pointer to address space area share info.1995 *1996 */1997 void sh_info_remove_reference(share_info_t *sh_info)1998 {1999 bool dealloc = false;2000 2001 mutex_lock(&sh_info->lock);2002 ASSERT(sh_info->refcount);2003 2004 if (--sh_info->refcount == 0) {2005 dealloc = true;2006 link_t *cur;2007 2008 /*2009 * Now walk carefully the pagemap B+tree and free/remove2010 * reference from all frames found there.2011 */2012 for (cur = sh_info->pagemap.leaf_head.next;2013 cur != &sh_info->pagemap.leaf_head; cur = cur->next) {2014 btree_node_t *node2015 = list_get_instance(cur, btree_node_t, leaf_link);2016 btree_key_t i;2017 2018 for (i = 0; i < node->keys; i++)2019 frame_free((uintptr_t) node->value[i]);2020 }2021 2022 }2023 mutex_unlock(&sh_info->lock);2024 2025 if (dealloc) {2026 btree_destroy(&sh_info->pagemap);2027 free(sh_info);2028 }2029 }2030 2031 1999 /* 2032 2000 * Address space related syscalls. … … 2070 2038 void as_get_area_info(as_t *as, as_area_info_t **obuf, size_t *osize) 2071 2039 { 2072 ipl_t ipl = interrupts_disable();2073 2040 mutex_lock(&as->lock); 2074 2041 … … 2114 2081 2115 2082 mutex_unlock(&as->lock); 2116 interrupts_restore(ipl);2117 2083 2118 2084 *obuf = info; … … 2127 2093 void as_print(as_t *as) 2128 2094 { 2129 ipl_t ipl = interrupts_disable();2130 2095 mutex_lock(&as->lock); 2131 2096 … … 2150 2115 2151 2116 mutex_unlock(&as->lock); 2152 interrupts_restore(ipl);2153 2117 } 2154 2118 -
kernel/generic/src/mm/frame.c
rfe7abd0 rcefb126 1234 1234 { 1235 1235 #ifdef __32_BITS__ 1236 printf("# base address frames flags free frames busy frames\n"); 1237 printf("-- ------------ ------------ -------- ------------ ------------\n"); 1236 printf("[nr] [base addr] [frames ] [flags ] [free frames ] [busy frames ]\n"); 1238 1237 #endif 1239 1238 1240 1239 #ifdef __64_BITS__ 1241 printf("# base address frames flags free frames busy frames\n"); 1242 printf("-- -------------------- ------------ -------- ------------ ------------\n"); 1240 printf("[nr] [base address ] [frames ] [flags ] [free frames ] [busy frames ]\n"); 1243 1241 #endif 1244 1242 … … 1273 1271 bool available = zone_flags_available(flags); 1274 1272 1275 printf("%- 2" PRIs, i);1273 printf("%-4" PRIs, i); 1276 1274 1277 1275 #ifdef __32_BITS__ 1278 printf(" 1276 printf(" %10p", base); 1279 1277 #endif 1280 1278 1281 1279 #ifdef __64_BITS__ 1282 printf(" 1280 printf(" %18p", base); 1283 1281 #endif 1284 1282 … … 1289 1287 1290 1288 if (available) 1291 printf("%1 2" PRIs " %12" PRIs,1289 printf("%14" PRIs " %14" PRIs, 1292 1290 free_count, busy_count); 1293 1291 -
kernel/generic/src/mm/page.c
rfe7abd0 rcefb126 38 38 * mappings between virtual addresses and physical addresses. 39 39 * Functions here are mere wrappers that call the real implementation. 40 * They however, define the single interface. 40 * They however, define the single interface. 41 41 * 42 42 */ … … 118 118 unsigned int flags) 119 119 { 120 ASSERT(interrupts_disabled());121 120 ASSERT(page_table_locked(as)); 122 121 … … 142 141 void page_mapping_remove(as_t *as, uintptr_t page) 143 142 { 144 ASSERT(interrupts_disabled());145 143 ASSERT(page_table_locked(as)); 146 144 … … 167 165 pte_t *page_mapping_find(as_t *as, uintptr_t page) 168 166 { 169 ASSERT(interrupts_disabled());170 167 ASSERT(page_table_locked(as)); 171 168 -
kernel/generic/src/mm/slab.c
rfe7abd0 rcefb126 829 829 void slab_print_list(void) 830 830 { 831 printf("slab name size pages obj/pg slabs cached allocated" 832 " ctl\n"); 833 printf("---------------- -------- ------ -------- ------ ------ ---------" 834 " ---\n"); 831 printf("[slab name ] [size ] [pages ] [obj/pg] [slabs ]" 832 " [cached] [alloc ] [ctl]\n"); 835 833 836 834 size_t skip = 0; … … 887 885 irq_spinlock_unlock(&slab_cache_lock, true); 888 886 889 printf("%-1 6s %8" PRIs " %6u %8" PRIs " %6ld %6ld %9ld %-3s\n",887 printf("%-18s %8" PRIs " %8u %8" PRIs " %8ld %8ld %8ld %-5s\n", 890 888 name, size, (1 << order), objects, allocated_slabs, 891 889 cached_objs, allocated_objs, -
kernel/generic/src/mm/tlb.c
rfe7abd0 rcefb126 73 73 * to all other processors. 74 74 * 75 * This function must be called with interrupts disabled. 76 * 77 * @param type Type describing scope of shootdown. 78 * @param asid Address space, if required by type. 79 * @param page Virtual page address, if required by type. 75 * @param type Type describing scope of shootdown. 76 * @param asid Address space, if required by type. 77 * @param page Virtual page address, if required by type. 80 78 * @param count Number of pages, if required by type. 81 79 * 80 * @return The interrupt priority level as it existed prior to this call. 81 * 82 82 */ 83 voidtlb_shootdown_start(tlb_invalidate_type_t type, asid_t asid,83 ipl_t tlb_shootdown_start(tlb_invalidate_type_t type, asid_t asid, 84 84 uintptr_t page, size_t count) 85 85 { 86 ipl_t ipl = interrupts_disable(); 86 87 CPU->tlb_active = false; 87 88 irq_spinlock_lock(&tlblock, false); … … 89 90 size_t i; 90 91 for (i = 0; i < config.cpu_count; i++) { 91 cpu_t *cpu;92 93 92 if (i == CPU->id) 94 93 continue; 95 94 96 cpu = &cpus[i]; 95 cpu_t *cpu = &cpus[i]; 96 97 97 irq_spinlock_lock(&cpu->lock, false); 98 98 if (cpu->tlb_messages_count == TLB_MESSAGE_QUEUE_LEN) { … … 122 122 123 123 busy_wait: 124 for (i = 0; i < config.cpu_count; i++) 124 for (i = 0; i < config.cpu_count; i++) { 125 125 if (cpus[i].tlb_active) 126 126 goto busy_wait; 127 } 128 129 return ipl; 127 130 } 128 131 129 132 /** Finish TLB shootdown sequence. 130 133 * 134 * @param ipl Previous interrupt priority level. 135 * 131 136 */ 132 void tlb_shootdown_finalize( void)137 void tlb_shootdown_finalize(ipl_t ipl) 133 138 { 134 139 irq_spinlock_unlock(&tlblock, false); 135 140 CPU->tlb_active = true; 141 interrupts_restore(ipl); 136 142 } 137 143 -
kernel/generic/src/proc/program.c
rfe7abd0 rcefb126 145 145 146 146 program_loader = image_addr; 147 LOG("Registered program loader at 0x%" PRIp "\n",147 LOG("Registered program loader at 0x%" PRIp, 148 148 image_addr); 149 149 -
kernel/generic/src/proc/scheduler.c
rfe7abd0 rcefb126 193 193 * This improves energy saving and hyperthreading. 194 194 */ 195 196 /* Mark CPU as it was idle this clock tick */197 195 irq_spinlock_lock(&CPU->lock, false); 198 196 CPU->idle = true; 199 197 irq_spinlock_unlock(&CPU->lock, false); 200 201 198 interrupts_enable(); 199 202 200 /* 203 201 * An interrupt might occur right now and wake up a thread. … … 386 384 as_t *old_as = AS; 387 385 388 ASSERT( !THREAD || irq_spinlock_locked(&THREAD->lock));386 ASSERT((!THREAD) || (irq_spinlock_locked(&THREAD->lock))); 389 387 ASSERT(CPU != NULL); 390 388 … … 457 455 irq_spinlock_unlock(&THREAD->sleep_queue->lock, false); 458 456 459 /*460 * Check for possible requests for out-of-context461 * invocation.462 *463 */464 if (THREAD->call_me) {465 THREAD->call_me(THREAD->call_me_with);466 THREAD->call_me = NULL;467 THREAD->call_me_with = NULL;468 }469 470 457 irq_spinlock_unlock(&THREAD->lock, false); 471 472 458 break; 473 459 -
kernel/generic/src/proc/the.c
rfe7abd0 rcefb126 33 33 /** 34 34 * @file 35 * @brief 35 * @brief THE structure functions. 36 36 * 37 37 * This file contains functions to manage the THE structure. … … 43 43 44 44 #include <arch.h> 45 46 45 47 46 /** Initialize THE structure -
kernel/generic/src/proc/thread.c
rfe7abd0 rcefb126 48 48 #include <synch/spinlock.h> 49 49 #include <synch/waitq.h> 50 #include <synch/rwlock.h>51 50 #include <cpu.h> 52 51 #include <str.h> … … 329 328 thread->flags = flags; 330 329 thread->state = Entering; 331 thread->call_me = NULL;332 thread->call_me_with = NULL;333 330 334 331 timeout_initialize(&thread->sleep_timeout); … … 343 340 thread->detached = false; 344 341 waitq_initialize(&thread->join_wq); 345 346 thread->rwlock_holder_type = RWLOCK_NONE;347 342 348 343 thread->task = task; … … 583 578 584 579 (void) waitq_sleep_timeout(&wq, usec, SYNCH_FLAGS_NON_BLOCKING); 585 }586 587 /** Register thread out-of-context invocation588 *589 * Register a function and its argument to be executed590 * on next context switch to the current thread. Must591 * be called with interrupts disabled.592 *593 * @param call_me Out-of-context function.594 * @param call_me_with Out-of-context function argument.595 *596 */597 void thread_register_call_me(void (* call_me)(void *), void *call_me_with)598 {599 irq_spinlock_lock(&THREAD->lock, false);600 THREAD->call_me = call_me;601 THREAD->call_me_with = call_me_with;602 irq_spinlock_unlock(&THREAD->lock, false);603 580 } 604 581 -
kernel/generic/src/smp/ipi.c
rfe7abd0 rcefb126 47 47 * @param ipi Message to broadcast. 48 48 * 49 * @bug The decision whether to actually send the IPI must be based50 * on a different criterion. The current version has51 * problems when some of the detected CPUs are marked52 * disabled in machine configuration.53 49 */ 54 50 void ipi_broadcast(int ipi) … … 60 56 */ 61 57 62 if ( (config.cpu_active > 1) && (config.cpu_active == config.cpu_count))58 if (config.cpu_count > 1) 63 59 ipi_broadcast_arch(ipi); 64 60 } -
kernel/generic/src/synch/futex.c
rfe7abd0 rcefb126 37 37 38 38 #include <synch/futex.h> 39 #include <synch/ rwlock.h>39 #include <synch/mutex.h> 40 40 #include <synch/spinlock.h> 41 41 #include <synch/synch.h> … … 65 65 66 66 /** 67 * Read-write lockprotecting global futex hash table.67 * Mutex protecting global futex hash table. 68 68 * It is also used to serialize access to all futex_t structures. 69 69 * Must be acquired before the task futex B+tree lock. 70 70 */ 71 static rwlock_t futex_ht_lock;71 static mutex_t futex_ht_lock; 72 72 73 73 /** Futex hash table. */ … … 84 84 void futex_init(void) 85 85 { 86 rwlock_initialize(&futex_ht_lock);86 mutex_initialize(&futex_ht_lock, MUTEX_PASSIVE); 87 87 hash_table_create(&futex_ht, FUTEX_HT_SIZE, 1, &futex_ht_ops); 88 88 } … … 113 113 uintptr_t paddr; 114 114 pte_t *t; 115 ipl_t ipl;116 115 int rc; 117 116 118 ipl = interrupts_disable();119 120 117 /* 121 118 * Find physical address of futex counter. … … 125 122 if (!t || !PTE_VALID(t) || !PTE_PRESENT(t)) { 126 123 page_table_unlock(AS, true); 127 interrupts_restore(ipl);128 124 return (unative_t) ENOENT; 129 125 } … … 131 127 page_table_unlock(AS, true); 132 128 133 interrupts_restore(ipl);134 135 129 futex = futex_find(paddr); 136 130 … … 156 150 uintptr_t paddr; 157 151 pte_t *t; 158 ipl_t ipl;159 160 ipl = interrupts_disable();161 152 162 153 /* … … 167 158 if (!t || !PTE_VALID(t) || !PTE_PRESENT(t)) { 168 159 page_table_unlock(AS, true); 169 interrupts_restore(ipl);170 160 return (unative_t) ENOENT; 171 161 } … … 173 163 page_table_unlock(AS, true); 174 164 175 interrupts_restore(ipl);176 177 165 futex = futex_find(paddr); 178 166 … … 200 188 * or allocate new one if it does not exist already. 201 189 */ 202 rwlock_read_lock(&futex_ht_lock);190 mutex_lock(&futex_ht_lock); 203 191 item = hash_table_find(&futex_ht, &paddr); 204 192 if (item) { … … 212 200 /* 213 201 * The futex is new to the current task. 214 * However, we only have read access.215 * Gain write access and try again.202 * Upgrade its reference count and put it to the 203 * current task's B+tree of known futexes. 216 204 */ 217 mutex_unlock(&TASK->futexes_lock);218 goto gain_write_access;205 futex->refcount++; 206 btree_insert(&TASK->futexes, paddr, futex, leaf); 219 207 } 220 208 mutex_unlock(&TASK->futexes_lock); 221 222 rwlock_read_unlock(&futex_ht_lock);223 209 } else { 224 gain_write_access: 210 futex = (futex_t *) malloc(sizeof(futex_t), 0); 211 futex_initialize(futex); 212 futex->paddr = paddr; 213 hash_table_insert(&futex_ht, &paddr, &futex->ht_link); 214 225 215 /* 226 * Upgrade to writer is not currently supported,227 * therefore, it is necessary to release the read lock228 * and reacquire it as a writer.216 * This is the first task referencing the futex. 217 * It can be directly inserted into its 218 * B+tree of known futexes. 229 219 */ 230 rwlock_read_unlock(&futex_ht_lock); 231 232 rwlock_write_lock(&futex_ht_lock); 233 /* 234 * Avoid possible race condition by searching 235 * the hash table once again with write access. 236 */ 237 item = hash_table_find(&futex_ht, &paddr); 238 if (item) { 239 futex = hash_table_get_instance(item, futex_t, ht_link); 240 241 /* 242 * See if this futex is known to the current task. 243 */ 244 mutex_lock(&TASK->futexes_lock); 245 if (!btree_search(&TASK->futexes, paddr, &leaf)) { 246 /* 247 * The futex is new to the current task. 248 * Upgrade its reference count and put it to the 249 * current task's B+tree of known futexes. 250 */ 251 futex->refcount++; 252 btree_insert(&TASK->futexes, paddr, futex, 253 leaf); 254 } 255 mutex_unlock(&TASK->futexes_lock); 256 257 rwlock_write_unlock(&futex_ht_lock); 258 } else { 259 futex = (futex_t *) malloc(sizeof(futex_t), 0); 260 futex_initialize(futex); 261 futex->paddr = paddr; 262 hash_table_insert(&futex_ht, &paddr, &futex->ht_link); 263 264 /* 265 * This is the first task referencing the futex. 266 * It can be directly inserted into its 267 * B+tree of known futexes. 268 */ 269 mutex_lock(&TASK->futexes_lock); 270 btree_insert(&TASK->futexes, paddr, futex, NULL); 271 mutex_unlock(&TASK->futexes_lock); 272 273 rwlock_write_unlock(&futex_ht_lock); 274 } 220 mutex_lock(&TASK->futexes_lock); 221 btree_insert(&TASK->futexes, paddr, futex, NULL); 222 mutex_unlock(&TASK->futexes_lock); 223 275 224 } 225 mutex_unlock(&futex_ht_lock); 276 226 277 227 return futex; … … 324 274 link_t *cur; 325 275 326 rwlock_write_lock(&futex_ht_lock);276 mutex_lock(&futex_ht_lock); 327 277 mutex_lock(&TASK->futexes_lock); 328 278 … … 344 294 345 295 mutex_unlock(&TASK->futexes_lock); 346 rwlock_write_unlock(&futex_ht_lock);296 mutex_unlock(&futex_ht_lock); 347 297 } 348 298 -
kernel/generic/src/sysinfo/stats.c
rfe7abd0 rcefb126 124 124 stats_cpus[i].active = cpus[i].active; 125 125 stats_cpus[i].frequency_mhz = cpus[i].frequency_mhz; 126 stats_cpus[i].busy_ ticks = cpus[i].busy_ticks;127 stats_cpus[i].idle_ ticks = cpus[i].idle_ticks;126 stats_cpus[i].busy_cycles = cpus[i].busy_cycles; 127 stats_cpus[i].idle_cycles = cpus[i].idle_cycles; 128 128 129 129 irq_spinlock_unlock(&cpus[i].lock, true); -
kernel/generic/src/time/clock.c
rfe7abd0 rcefb126 57 57 #include <mm/frame.h> 58 58 #include <ddi/ddi.h> 59 #include <arch/cycle.h> 59 60 60 61 /* Pointer to variable with uptime */ … … 125 126 } 126 127 128 static void cpu_update_accounting(void) 129 { 130 irq_spinlock_lock(&CPU->lock, false); 131 uint64_t now = get_cycle(); 132 CPU->busy_cycles += now - CPU->last_cycle; 133 CPU->last_cycle = now; 134 irq_spinlock_unlock(&CPU->lock, false); 135 } 136 127 137 /** Clock routine 128 138 * … … 136 146 size_t missed_clock_ticks = CPU->missed_clock_ticks; 137 147 138 /* Account lost ticks to CPU usage */ 139 if (CPU->idle) 140 CPU->idle_ticks += missed_clock_ticks + 1; 141 else 142 CPU->busy_ticks += missed_clock_ticks + 1; 143 144 CPU->idle = false; 148 /* Account CPU usage */ 149 cpu_update_accounting(); 145 150 146 151 /* … … 151 156 size_t i; 152 157 for (i = 0; i <= missed_clock_ticks; i++) { 158 /* Update counters and accounting */ 153 159 clock_update_counters(); 160 cpu_update_accounting(); 161 154 162 irq_spinlock_lock(&CPU->timeoutlock, false); 155 163
Note:
See TracChangeset
for help on using the changeset viewer.