Changeset 99c2c69e in mainline for kernel/generic/src
- Timestamp:
- 2013-09-13T00:36:30Z (12 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 67fbd5e
- Parents:
- 7f84430 (diff), 11d41be5 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Location:
- kernel/generic/src
- Files:
-
- 1 deleted
- 28 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/adt/bitmap.c
r7f84430 r99c2c69e 35 35 * 36 36 * This file implements bitmap ADT and provides functions for 37 * setting and clearing ranges of bits. 37 * setting and clearing ranges of bits and for finding ranges 38 * of unset bits. 38 39 */ 39 40 … … 44 45 #include <macros.h> 45 46 46 #define ALL_ONES 0xff 47 #define ALL_ZEROES 0x00 47 #define ALL_ONES 0xff 48 #define ALL_ZEROES 0x00 49 50 /** Unchecked version of bitmap_get() 51 * 52 * This version of bitmap_get() does not do any boundary checks. 53 * 54 * @param bitmap Bitmap to access. 55 * @param element Element to access. 56 * 57 * @return Bit value of the element in the bitmap. 58 * 59 */ 60 static unsigned int bitmap_get_fast(bitmap_t *bitmap, size_t element) 61 { 62 size_t byte = element / BITMAP_ELEMENT; 63 uint8_t mask = 1 << (element & BITMAP_REMAINER); 64 65 return !!((bitmap->bits)[byte] & mask); 66 } 67 68 /** Get bitmap size 69 * 70 * Return the size (in bytes) required for the bitmap. 71 * 72 * @param elements Number bits stored in bitmap. 73 * 74 * @return Size (in bytes) required for the bitmap. 75 * 76 */ 77 size_t bitmap_size(size_t elements) 78 { 79 size_t size = elements / BITMAP_ELEMENT; 80 81 if ((elements % BITMAP_ELEMENT) != 0) 82 size++; 83 84 return size; 85 } 48 86 49 87 /** Initialize bitmap. … … 51 89 * No portion of the bitmap is set or cleared by this function. 52 90 * 53 * @param bitmap Bitmap structure. 54 * @param map Address of the memory used to hold the map. 55 * @param bits Number of bits stored in bitmap. 56 */ 57 void bitmap_initialize(bitmap_t *bitmap, uint8_t *map, size_t bits) 58 { 59 bitmap->map = map; 60 bitmap->bits = bits; 91 * @param bitmap Bitmap structure. 92 * @param elements Number of bits stored in bitmap. 93 * @param data Address of the memory used to hold the map. 94 * The optional 2nd level bitmap follows the 1st 95 * level bitmap. 96 * 97 */ 98 void bitmap_initialize(bitmap_t *bitmap, size_t elements, void *data) 99 { 100 bitmap->elements = elements; 101 bitmap->bits = (uint8_t *) data; 102 bitmap->next_fit = 0; 61 103 } 62 104 63 105 /** Set range of bits. 64 106 * 65 * @param bitmap Bitmap structure. 66 * @param start Starting bit. 67 * @param bits Number of bits to set. 68 */ 69 void bitmap_set_range(bitmap_t *bitmap, size_t start, size_t bits) 70 { 71 size_t i = 0; 72 size_t aligned_start; 73 size_t lub; /* leading unaligned bits */ 74 size_t amb; /* aligned middle bits */ 75 size_t tab; /* trailing aligned bits */ 76 77 ASSERT(start + bits <= bitmap->bits); 78 79 aligned_start = ALIGN_UP(start, 8); 80 lub = min(aligned_start - start, bits); 81 amb = bits > lub ? bits - lub : 0; 82 tab = amb % 8; 83 84 if (!bits) 107 * @param bitmap Bitmap structure. 108 * @param start Starting bit. 109 * @param count Number of bits to set. 110 * 111 */ 112 void bitmap_set_range(bitmap_t *bitmap, size_t start, size_t count) 113 { 114 ASSERT(start + count <= bitmap->elements); 115 116 if (count == 0) 85 117 return; 86 87 if (start + bits < aligned_start) { 118 119 size_t start_byte = start / BITMAP_ELEMENT; 120 size_t aligned_start = ALIGN_UP(start, BITMAP_ELEMENT); 121 122 /* Leading unaligned bits */ 123 size_t lub = min(aligned_start - start, count); 124 125 /* Aligned middle bits */ 126 size_t amb = (count > lub) ? (count - lub) : 0; 127 128 /* Trailing aligned bits */ 129 size_t tab = amb % BITMAP_ELEMENT; 130 131 if (start + count < aligned_start) { 88 132 /* Set bits in the middle of byte. */ 89 bitmap->map[start / 8] |= ((1 << lub) - 1) << (start & 7); 133 bitmap->bits[start_byte] |= 134 ((1 << lub) - 1) << (start & BITMAP_REMAINER); 90 135 return; 91 136 } … … 93 138 if (lub) { 94 139 /* Make sure to set any leading unaligned bits. */ 95 bitmap->map[start / 8] |= ~((1 << (8 - lub)) - 1); 96 } 97 for (i = 0; i < amb / 8; i++) { 140 bitmap->bits[start_byte] |= 141 ~((1 << (BITMAP_ELEMENT - lub)) - 1); 142 } 143 144 size_t i; 145 146 for (i = 0; i < amb / BITMAP_ELEMENT; i++) { 98 147 /* The middle bits can be set byte by byte. */ 99 bitmap->map[aligned_start / 8 + i] = ALL_ONES; 100 } 148 bitmap->bits[aligned_start / BITMAP_ELEMENT + i] = 149 ALL_ONES; 150 } 151 101 152 if (tab) { 102 153 /* Make sure to set any trailing aligned bits. */ 103 bitmap-> map[aligned_start / 8 + i] |= (1 << tab) - 1;104 }105 154 bitmap->bits[aligned_start / BITMAP_ELEMENT + i] |= 155 (1 << tab) - 1; 156 } 106 157 } 107 158 108 159 /** Clear range of bits. 109 160 * 110 * @param bitmap Bitmap structure. 111 * @param start Starting bit. 112 * @param bits Number of bits to clear. 113 */ 114 void bitmap_clear_range(bitmap_t *bitmap, size_t start, size_t bits) 115 { 116 size_t i = 0; 117 size_t aligned_start; 118 size_t lub; /* leading unaligned bits */ 119 size_t amb; /* aligned middle bits */ 120 size_t tab; /* trailing aligned bits */ 121 122 ASSERT(start + bits <= bitmap->bits); 123 124 aligned_start = ALIGN_UP(start, 8); 125 lub = min(aligned_start - start, bits); 126 amb = bits > lub ? bits - lub : 0; 127 tab = amb % 8; 128 129 if (!bits) 161 * @param bitmap Bitmap structure. 162 * @param start Starting bit. 163 * @param count Number of bits to clear. 164 * 165 */ 166 void bitmap_clear_range(bitmap_t *bitmap, size_t start, size_t count) 167 { 168 ASSERT(start + count <= bitmap->elements); 169 170 if (count == 0) 130 171 return; 131 132 if (start + bits < aligned_start) { 172 173 size_t start_byte = start / BITMAP_ELEMENT; 174 size_t aligned_start = ALIGN_UP(start, BITMAP_ELEMENT); 175 176 /* Leading unaligned bits */ 177 size_t lub = min(aligned_start - start, count); 178 179 /* Aligned middle bits */ 180 size_t amb = (count > lub) ? (count - lub) : 0; 181 182 /* Trailing aligned bits */ 183 size_t tab = amb % BITMAP_ELEMENT; 184 185 if (start + count < aligned_start) { 133 186 /* Set bits in the middle of byte */ 134 bitmap->map[start / 8] &= ~(((1 << lub) - 1) << (start & 7)); 187 bitmap->bits[start_byte] &= 188 ~(((1 << lub) - 1) << (start & BITMAP_REMAINER)); 135 189 return; 136 190 } 137 191 138 192 if (lub) { 139 193 /* Make sure to clear any leading unaligned bits. */ 140 bitmap->map[start / 8] &= (1 << (8 - lub)) - 1; 141 } 142 for (i = 0; i < amb / 8; i++) { 194 bitmap->bits[start_byte] &= 195 (1 << (BITMAP_ELEMENT - lub)) - 1; 196 } 197 198 size_t i; 199 200 for (i = 0; i < amb / BITMAP_ELEMENT; i++) { 143 201 /* The middle bits can be cleared byte by byte. */ 144 bitmap->map[aligned_start / 8 + i] = ALL_ZEROES; 145 } 202 bitmap->bits[aligned_start / BITMAP_ELEMENT + i] = 203 ALL_ZEROES; 204 } 205 146 206 if (tab) { 147 207 /* Make sure to clear any trailing aligned bits. */ 148 bitmap->map[aligned_start / 8 + i] &= ~((1 << tab) - 1); 149 } 150 208 bitmap->bits[aligned_start / BITMAP_ELEMENT + i] &= 209 ~((1 << tab) - 1); 210 } 211 212 bitmap->next_fit = start_byte; 151 213 } 152 214 153 215 /** Copy portion of one bitmap into another bitmap. 154 216 * 155 * @param dst Destination bitmap. 156 * @param src Source bitmap. 157 * @param bits Number of bits to copy. 158 */ 159 void bitmap_copy(bitmap_t *dst, bitmap_t *src, size_t bits) 160 { 217 * @param dst Destination bitmap. 218 * @param src Source bitmap. 219 * @param count Number of bits to copy. 220 * 221 */ 222 void bitmap_copy(bitmap_t *dst, bitmap_t *src, size_t count) 223 { 224 ASSERT(count <= dst->elements); 225 ASSERT(count <= src->elements); 226 161 227 size_t i; 162 228 163 ASSERT(bits <= dst->bits); 164 ASSERT(bits <= src->bits); 165 166 for (i = 0; i < bits / 8; i++) 167 dst->map[i] = src->map[i]; 168 169 if (bits % 8) { 170 bitmap_clear_range(dst, i * 8, bits % 8); 171 dst->map[i] |= src->map[i] & ((1 << (bits % 8)) - 1); 172 } 229 for (i = 0; i < count / BITMAP_ELEMENT; i++) 230 dst->bits[i] = src->bits[i]; 231 232 if (count % BITMAP_ELEMENT) { 233 bitmap_clear_range(dst, i * BITMAP_ELEMENT, 234 count % BITMAP_ELEMENT); 235 dst->bits[i] |= src->bits[i] & 236 ((1 << (count % BITMAP_ELEMENT)) - 1); 237 } 238 } 239 240 static int constraint_satisfy(size_t index, size_t base, size_t constraint) 241 { 242 return (((base + index) & constraint) == 0); 243 } 244 245 /** Find a continuous zero bit range 246 * 247 * Find a continuous zero bit range in the bitmap. The address 248 * computed as the sum of the index of the first zero bit and 249 * the base argument needs to be compliant with the constraint 250 * (those bits that are set in the constraint cannot be set in 251 * the address). 252 * 253 * If the index argument is non-NULL, the continuous zero range 254 * is set and the index of the first bit is stored to index. 255 * Otherwise the bitmap stays untouched. 256 * 257 * @param bitmap Bitmap structure. 258 * @param count Number of continuous zero bits to find. 259 * @param base Address of the first bit in the bitmap. 260 * @param prefered Prefered address to start searching from. 261 * @param constraint Constraint for the address of the first zero bit. 262 * @param index Place to store the index of the first zero 263 * bit. Can be NULL (in which case the bitmap 264 * is not modified). 265 * 266 * @return Non-zero if a continuous range of zero bits satisfying 267 * the constraint has been found. 268 * @return Zero otherwise. 269 * 270 */ 271 int bitmap_allocate_range(bitmap_t *bitmap, size_t count, size_t base, 272 size_t prefered, size_t constraint, size_t *index) 273 { 274 if (count == 0) 275 return false; 276 277 size_t size = bitmap_size(bitmap->elements); 278 size_t next_fit = bitmap->next_fit; 279 280 /* 281 * Adjust the next-fit value according to the address 282 * the caller prefers to start the search at. 283 */ 284 if ((prefered > base) && (prefered < base + bitmap->elements)) { 285 size_t prefered_fit = (prefered - base) / BITMAP_ELEMENT; 286 287 if (prefered_fit > next_fit) 288 next_fit = prefered_fit; 289 } 290 291 for (size_t pos = 0; pos < size; pos++) { 292 size_t byte = (next_fit + pos) % size; 293 294 /* Skip if the current byte has all bits set */ 295 if (bitmap->bits[byte] == ALL_ONES) 296 continue; 297 298 size_t byte_bit = byte * BITMAP_ELEMENT; 299 300 for (size_t bit = 0; bit < BITMAP_ELEMENT; bit++) { 301 size_t i = byte_bit + bit; 302 303 if (i >= bitmap->elements) 304 break; 305 306 if (!constraint_satisfy(i, base, constraint)) 307 continue; 308 309 if (!bitmap_get_fast(bitmap, i)) { 310 size_t continuous = 1; 311 312 for (size_t j = 1; j < count; j++) { 313 if ((i + j < bitmap->elements) && 314 (!bitmap_get_fast(bitmap, i + j))) 315 continuous++; 316 else 317 break; 318 } 319 320 if (continuous == count) { 321 if (index != NULL) { 322 bitmap_set_range(bitmap, i, count); 323 bitmap->next_fit = i / BITMAP_ELEMENT; 324 *index = i; 325 } 326 327 return true; 328 } else 329 i += continuous; 330 } 331 } 332 } 333 334 return false; 173 335 } 174 336 -
kernel/generic/src/adt/btree.c
r7f84430 r99c2c69e 1031 1031 1032 1032 printf("Printing list of leaves:\n"); 1033 list_foreach(t->leaf_list, cur) { 1034 btree_node_t *node; 1035 1036 node = list_get_instance(cur, btree_node_t, leaf_link); 1037 1033 list_foreach(t->leaf_list, leaf_link, btree_node_t, node) { 1038 1034 ASSERT(node); 1039 1035 -
kernel/generic/src/adt/hash_table.c
r7f84430 r99c2c69e 117 117 ASSERT(chain < h->entries); 118 118 119 list_foreach(h->entry[chain], cur) { 119 link_t *cur = list_first(&h->entry[chain]); 120 while (cur != NULL) { 120 121 if (h->op->compare(key, h->max_keys, cur)) { 121 122 /* … … 124 125 return cur; 125 126 } 127 cur = list_next(cur, &h->entry[chain]); 126 128 } 127 129 -
kernel/generic/src/adt/list.c
r7f84430 r99c2c69e 101 101 unsigned int count = 0; 102 102 103 list_foreach(*list, link) { 103 link_t *link = list_first(list); 104 while (link != NULL) { 104 105 count++; 106 link = list_next(link, list); 105 107 } 106 108 -
kernel/generic/src/console/cmd.c
r7f84430 r99c2c69e 656 656 657 657 size_t len = 0; 658 list_foreach(cmd_list, cur) { 659 cmd_info_t *hlp; 660 hlp = list_get_instance(cur, cmd_info_t, link); 661 658 list_foreach(cmd_list, link, cmd_info_t, hlp) { 662 659 spinlock_lock(&hlp->lock); 663 660 if (str_length(hlp->name) > len) … … 672 669 } 673 670 674 list_foreach(cmd_list, cur) { 675 cmd_info_t *hlp; 676 hlp = list_get_instance(cur, cmd_info_t, link); 677 671 list_foreach(cmd_list, link, cmd_info_t, hlp) { 678 672 spinlock_lock(&hlp->lock); 679 673 printf("%-*s %s\n", _len, hlp->name, hlp->description); … … 912 906 spinlock_lock(&cmd_lock); 913 907 914 list_foreach(cmd_list, cur) { 915 cmd_info_t *hlp; 916 917 hlp = list_get_instance(cur, cmd_info_t, link); 908 list_foreach(cmd_list, link, cmd_info_t, hlp) { 918 909 spinlock_lock(&hlp->lock); 919 910 -
kernel/generic/src/console/console.c
r7f84430 r99c2c69e 125 125 static void stdout_write(outdev_t *dev, wchar_t ch) 126 126 { 127 list_foreach(dev->list, cur) { 128 outdev_t *sink = list_get_instance(cur, outdev_t, link); 127 list_foreach(dev->list, link, outdev_t, sink) { 129 128 if ((sink) && (sink->op->write)) 130 129 sink->op->write(sink, ch); … … 134 133 static void stdout_redraw(outdev_t *dev) 135 134 { 136 list_foreach(dev->list, cur) { 137 outdev_t *sink = list_get_instance(cur, outdev_t, link); 135 list_foreach(dev->list, link, outdev_t, sink) { 138 136 if ((sink) && (sink->op->redraw)) 139 137 sink->op->redraw(sink); -
kernel/generic/src/console/kconsole.c
r7f84430 r99c2c69e 53 53 #include <func.h> 54 54 #include <str.h> 55 #include <macros.h>56 55 #include <sysinfo/sysinfo.h> 57 56 #include <ddi/device.h> … … 119 118 * Make sure the command is not already listed. 120 119 */ 121 list_foreach(cmd_list, cur) { 122 cmd_info_t *hlp = list_get_instance(cur, cmd_info_t, link); 123 120 list_foreach(cmd_list, link, cmd_info_t, hlp) { 124 121 if (hlp == cmd) { 125 122 /* The command is already there. */ … … 613 610 cmd_info_t *cmd = NULL; 614 611 615 list_foreach(cmd_list, cur) { 616 cmd_info_t *hlp = list_get_instance(cur, cmd_info_t, link); 612 list_foreach(cmd_list, link, cmd_info_t, hlp) { 617 613 spinlock_lock(&hlp->lock); 618 614 -
kernel/generic/src/cpu/cpu.c
r7f84430 r99c2c69e 73 73 size_t i; 74 74 for (i = 0; i < config.cpu_count; i++) { 75 cpus[i].stack = (uint8_t *) frame_alloc(STACK_FRAMES, 76 FRAME_LOWMEM | FRAME_KA | FRAME_ATOMIC); 75 uintptr_t stack_phys = frame_alloc(STACK_FRAMES, 76 FRAME_LOWMEM | FRAME_ATOMIC, STACK_SIZE - 1); 77 if (!stack_phys) 78 panic("Cannot allocate CPU stack."); 79 80 cpus[i].stack = (uint8_t *) PA2KA(stack_phys); 77 81 cpus[i].id = i; 78 82 79 83 irq_spinlock_initialize(&cpus[i].lock, "cpus[].lock"); 80 84 81 unsigned int j; 82 for (j = 0; j < RQ_COUNT; j++) { 85 for (unsigned int j = 0; j < RQ_COUNT; j++) { 83 86 irq_spinlock_initialize(&cpus[i].rq[j].lock, "cpus[].rq[].lock"); 84 87 list_initialize(&cpus[i].rq[j].rq); -
kernel/generic/src/ddi/ddi.c
r7f84430 r99c2c69e 314 314 315 315 NO_TRACE static int dmamem_map(uintptr_t virt, size_t size, unsigned int map_flags, 316 unsigned int flags, void **phys)316 unsigned int flags, uintptr_t *phys) 317 317 { 318 318 ASSERT(TASK); … … 322 322 } 323 323 324 NO_TRACE static int dmamem_map_anonymous(size_t size, unsigned int map_flags, 325 unsigned int flags, void **phys, uintptr_t *virt, uintptr_t bound) 324 NO_TRACE static int dmamem_map_anonymous(size_t size, uintptr_t constraint, 325 unsigned int map_flags, unsigned int flags, uintptr_t *phys, 326 uintptr_t *virt, uintptr_t bound) 326 327 { 327 328 ASSERT(TASK); 328 329 329 size_t pages = SIZE2FRAMES(size); 330 uint8_t order; 331 332 /* We need the 2^order >= pages */ 333 if (pages == 1) 334 order = 0; 335 else 336 order = fnzb(pages - 1) + 1; 337 338 *phys = frame_alloc_noreserve(order, FRAME_DMA); 339 if (*phys == NULL) 330 size_t frames = SIZE2FRAMES(size); 331 *phys = frame_alloc(frames, FRAME_NO_RESERVE, constraint); 332 if (*phys == 0) 340 333 return ENOMEM; 341 334 342 335 mem_backend_data_t backend_data; 343 backend_data.base = (uintptr_t)*phys;344 backend_data.frames = pages;336 backend_data.base = *phys; 337 backend_data.frames = frames; 345 338 346 339 if (!as_area_create(TASK->as, map_flags, size, 347 340 AS_AREA_ATTR_NONE, &phys_backend, &backend_data, virt, bound)) { 348 frame_free_noreserve( (uintptr_t) *phys);341 frame_free_noreserve(*phys, frames); 349 342 return ENOMEM; 350 343 } … … 361 354 NO_TRACE static int dmamem_unmap_anonymous(uintptr_t virt) 362 355 { 363 // TODO: This is an ugly hack 364 as_t *as = TASK->as; 365 366 mutex_lock(&as->lock); 367 as_area_t *area = find_locked_area(as, virt); 368 if (!area) { 369 mutex_unlock(&as->lock); 370 return ENOENT; 371 } 372 frame_free_noreserve(area->backend_data.base); 373 area->backend_data.base = 0; 374 area->backend_data.frames = 0; 375 mutex_unlock(&area->lock); 376 mutex_unlock(&as->lock); 377 378 return as_area_destroy(as, virt); 356 // TODO: implement unlocking & unmap 357 return EOK; 379 358 } 380 359 … … 387 366 */ 388 367 389 void *phys;368 uintptr_t phys; 390 369 int rc = dmamem_map((uintptr_t) virt_ptr, size, map_flags, 391 370 flags, &phys); … … 404 383 */ 405 384 406 void *phys; 385 uintptr_t constraint; 386 int rc = copy_from_uspace(&constraint, phys_ptr, 387 sizeof(constraint)); 388 if (rc != EOK) 389 return rc; 390 391 uintptr_t phys; 407 392 uintptr_t virt = (uintptr_t) -1; 408 int rc = dmamem_map_anonymous(size, map_flags, flags,393 rc = dmamem_map_anonymous(size, constraint, map_flags, flags, 409 394 &phys, &virt, bound); 410 395 if (rc != EOK) -
kernel/generic/src/debug/stacktrace.c
r7f84430 r99c2c69e 39 39 #include <print.h> 40 40 41 #define STACK_FRAMES_MAX 41 #define STACK_FRAMES_MAX 20 42 42 43 43 void stack_trace_ctx(stack_trace_ops_t *ops, stack_trace_context_t *ctx) … … 49 49 uintptr_t pc; 50 50 51 while ( cnt++ < STACK_FRAMES_MAX&&52 ops->stack_trace_context_validate(ctx)) {51 while ((cnt++ < STACK_FRAMES_MAX) && 52 (ops->stack_trace_context_validate(ctx))) { 53 53 if (ops->symbol_resolve && 54 54 ops->symbol_resolve(ctx->pc, &symbol, &offset)) { -
kernel/generic/src/ipc/ipc.c
r7f84430 r99c2c69e 774 774 static void ipc_print_call_list(list_t *list) 775 775 { 776 list_foreach(*list, cur) { 777 call_t *call = list_get_instance(cur, call_t, ab_link); 778 776 list_foreach(*list, ab_link, call_t, call) { 779 777 #ifdef __32_BITS__ 780 778 printf("%10p ", call); -
kernel/generic/src/ipc/ipcrsc.c
r7f84430 r99c2c69e 151 151 irq_spinlock_lock(&TASK->answerbox.lock, true); 152 152 153 list_foreach(TASK->answerbox.dispatched_calls, lst) { 154 call_t *call = list_get_instance(lst, call_t, ab_link); 153 list_foreach(TASK->answerbox.dispatched_calls, ab_link, call_t, call) { 155 154 if ((sysarg_t) call == callid) { 156 155 result = call; -
kernel/generic/src/lib/ra.c
r7f84430 r99c2c69e 391 391 392 392 irq_spinlock_lock(&arena->lock, true); 393 list_foreach(arena->spans, cur) { 394 ra_span_t *span = list_get_instance(cur, ra_span_t, span_link); 395 393 list_foreach(arena->spans, span_link, ra_span_t, span) { 396 394 base = ra_span_alloc(span, size, alignment); 397 395 if (base) … … 407 405 { 408 406 irq_spinlock_lock(&arena->lock, true); 409 list_foreach(arena->spans, cur) { 410 ra_span_t *span = list_get_instance(cur, ra_span_t, span_link); 411 407 list_foreach(arena->spans, span_link, ra_span_t, span) { 412 408 if (iswithin(span->base, span->size, base, size)) { 413 409 ra_span_free(span, base, size); -
kernel/generic/src/mm/as.c
r7f84430 r99c2c69e 488 488 489 489 /* Eventually check the addresses behind each area */ 490 list_foreach(as->as_area_btree.leaf_list, cur) { 491 btree_node_t *node = 492 list_get_instance(cur, btree_node_t, leaf_link); 490 list_foreach(as->as_area_btree.leaf_list, leaf_link, btree_node_t, node) { 493 491 494 492 for (btree_key_t i = 0; i < node->keys; i++) { … … 672 670 673 671 return NULL; 674 }675 676 /** UGLY! UGLY! UGLY! */677 // TODO: REMOVE ASAP!678 as_area_t * find_locked_area(as_t *as, uintptr_t va)679 {680 return find_area_and_lock(as, va);681 672 } 682 673 … … 911 902 * reference from all frames found there. 912 903 */ 913 list_foreach(sh_info->pagemap.leaf_list, cur) { 914 btree_node_t *node 915 = list_get_instance(cur, btree_node_t, leaf_link); 904 list_foreach(sh_info->pagemap.leaf_list, leaf_link, 905 btree_node_t, node) { 916 906 btree_key_t i; 917 907 918 908 for (i = 0; i < node->keys; i++) 919 frame_free((uintptr_t) node->value[i] );909 frame_free((uintptr_t) node->value[i], 1); 920 910 } 921 911 … … 963 953 * Visit only the pages mapped by used_space B+tree. 964 954 */ 965 list_foreach(area->used_space.leaf_list, cur) {966 btree_node_t *node;955 list_foreach(area->used_space.leaf_list, leaf_link, btree_node_t, 956 node) { 967 957 btree_key_t i; 968 958 969 node = list_get_instance(cur, btree_node_t, leaf_link);970 959 for (i = 0; i < node->keys; i++) { 971 960 uintptr_t ptr = node->key[i]; … … 1245 1234 size_t used_pages = 0; 1246 1235 1247 list_foreach(area->used_space.leaf_list, cur) { 1248 btree_node_t *node 1249 = list_get_instance(cur, btree_node_t, leaf_link); 1236 list_foreach(area->used_space.leaf_list, leaf_link, btree_node_t, 1237 node) { 1250 1238 btree_key_t i; 1251 1239 … … 1271 1259 size_t frame_idx = 0; 1272 1260 1273 list_foreach(area->used_space.leaf_list, cur) { 1274 btree_node_t *node = list_get_instance(cur, btree_node_t, 1275 leaf_link); 1261 list_foreach(area->used_space.leaf_list, leaf_link, btree_node_t, 1262 node) { 1276 1263 btree_key_t i; 1277 1264 … … 1323 1310 frame_idx = 0; 1324 1311 1325 list_foreach(area->used_space.leaf_list, cur) { 1326 btree_node_t *node 1327 = list_get_instance(cur, btree_node_t, leaf_link); 1312 list_foreach(area->used_space.leaf_list, leaf_link, btree_node_t, 1313 node) { 1328 1314 btree_key_t i; 1329 1315 … … 2189 2175 size_t area_cnt = 0; 2190 2176 2191 list_foreach(as->as_area_btree.leaf_list, cur) { 2192 btree_node_t *node = 2193 list_get_instance(cur, btree_node_t, leaf_link); 2177 list_foreach(as->as_area_btree.leaf_list, leaf_link, btree_node_t, 2178 node) { 2194 2179 area_cnt += node->keys; 2195 2180 } … … 2202 2187 size_t area_idx = 0; 2203 2188 2204 list_foreach(as->as_area_btree.leaf_list, cur) { 2205 btree_node_t *node = 2206 list_get_instance(cur, btree_node_t, leaf_link); 2189 list_foreach(as->as_area_btree.leaf_list, leaf_link, btree_node_t, 2190 node) { 2207 2191 btree_key_t i; 2208 2192 … … 2238 2222 2239 2223 /* Print out info about address space areas */ 2240 list_foreach(as->as_area_btree.leaf_list, cur) { 2241 btree_node_t *node 2242 = list_get_instance(cur, btree_node_t, leaf_link); 2224 list_foreach(as->as_area_btree.leaf_list, leaf_link, btree_node_t, 2225 node) { 2243 2226 btree_key_t i; 2244 2227 -
kernel/generic/src/mm/backend_anon.c
r7f84430 r99c2c69e 118 118 */ 119 119 mutex_lock(&area->sh_info->lock); 120 list_foreach(area->used_space.leaf_list, cur) {121 btree_node_t *node;120 list_foreach(area->used_space.leaf_list, leaf_link, btree_node_t, 121 node) { 122 122 unsigned int i; 123 123 124 node = list_get_instance(cur, btree_node_t, leaf_link);125 124 for (i = 0; i < node->keys; i++) { 126 125 uintptr_t base = node->key[i]; … … 295 294 * the normal unreserving frame_free(). 296 295 */ 297 frame_free(frame );296 frame_free(frame, 1); 298 297 } else { 299 298 /* … … 302 301 * manipulate the reserve or it would be given back twice. 303 302 */ 304 frame_free_noreserve(frame );303 frame_free_noreserve(frame, 1); 305 304 } 306 305 } -
kernel/generic/src/mm/backend_elf.c
r7f84430 r99c2c69e 429 429 * data. 430 430 */ 431 frame_free_noreserve(frame );431 frame_free_noreserve(frame, 1); 432 432 } 433 433 } else { … … 437 437 * anonymous). In any case, a frame needs to be freed. 438 438 */ 439 frame_free_noreserve(frame );439 frame_free_noreserve(frame, 1); 440 440 } 441 441 } -
kernel/generic/src/mm/frame.c
r7f84430 r99c2c69e 38 38 * 39 39 * This file contains the physical frame allocator and memory zone management. 40 * The frame allocator is built on top of the buddy allocator. 41 * 42 * @see buddy.c 40 * The frame allocator is built on top of the two-level bitmap structure. 41 * 43 42 */ 44 43 … … 92 91 } 93 92 94 NO_TRACE static inline size_t make_frame_index(zone_t *zone, frame_t *frame)95 {96 return (frame - zone->frames);97 }98 99 93 /** Initialize frame structure. 100 94 * … … 104 98 NO_TRACE static void frame_initialize(frame_t *frame) 105 99 { 106 frame->refcount = 1;107 frame-> buddy_order = 0;100 frame->refcount = 0; 101 frame->parent = NULL; 108 102 } 109 103 … … 161 155 162 156 /* Move other zones up */ 163 size_t j; 164 for (j = zones.count; j > i; j--) { 157 for (size_t j = zones.count; j > i; j--) 165 158 zones.info[j] = zones.info[j - 1]; 166 if (zones.info[j].buddy_system != NULL)167 zones.info[j].buddy_system->data =168 (void *) &zones.info[j];169 }170 159 171 160 zones.count++; … … 237 226 } 238 227 239 /** @return True if zone can allocate specified order */ 240 NO_TRACE static bool zone_can_alloc(zone_t *zone, uint8_t order) 241 { 228 /** @return True if zone can allocate specified number of frames */ 229 NO_TRACE static bool zone_can_alloc(zone_t *zone, size_t count, 230 pfn_t constraint) 231 { 232 /* 233 * The function bitmap_allocate_range() does not modify 234 * the bitmap if the last argument is NULL. 235 */ 236 242 237 return ((zone->flags & ZONE_AVAILABLE) && 243 buddy_system_can_alloc(zone->buddy_system, order)); 244 } 245 246 /** Find a zone that can allocate order frames. 238 bitmap_allocate_range(&zone->bitmap, count, zone->base, 239 FRAME_LOWPRIO, constraint, NULL)); 240 } 241 242 /** Find a zone that can allocate specified number of frames 243 * 244 * This function searches among all zones. Assume interrupts are 245 * disabled and zones lock is locked. 246 * 247 * @param count Number of free frames we are trying to find. 248 * @param flags Required flags of the zone. 249 * @param constraint Indication of bits that cannot be set in the 250 * physical frame number of the first allocated frame. 251 * @param hint Preferred zone. 252 * 253 * @return Zone that can allocate specified number of frames. 254 * @return -1 if no zone can satisfy the request. 255 * 256 */ 257 NO_TRACE static size_t find_free_zone_all(size_t count, zone_flags_t flags, 258 pfn_t constraint, size_t hint) 259 { 260 for (size_t pos = 0; pos < zones.count; pos++) { 261 size_t i = (pos + hint) % zones.count; 262 263 /* Check whether the zone meets the search criteria. */ 264 if (!ZONE_FLAGS_MATCH(zones.info[i].flags, flags)) 265 continue; 266 267 /* Check if the zone can satisfy the allocation request. */ 268 if (zone_can_alloc(&zones.info[i], count, constraint)) 269 return i; 270 } 271 272 return (size_t) -1; 273 } 274 275 /** Check if frame range priority memory 276 * 277 * @param pfn Starting frame. 278 * @param count Number of frames. 279 * 280 * @return True if the range contains only priority memory. 281 * 282 */ 283 NO_TRACE static bool is_high_priority(pfn_t base, size_t count) 284 { 285 return (base + count <= FRAME_LOWPRIO); 286 } 287 288 /** Find a zone that can allocate specified number of frames 289 * 290 * This function ignores zones that contain only high-priority 291 * memory. Assume interrupts are disabled and zones lock is locked. 292 * 293 * @param count Number of free frames we are trying to find. 294 * @param flags Required flags of the zone. 295 * @param constraint Indication of bits that cannot be set in the 296 * physical frame number of the first allocated frame. 297 * @param hint Preferred zone. 298 * 299 * @return Zone that can allocate specified number of frames. 300 * @return -1 if no low-priority zone can satisfy the request. 301 * 302 */ 303 NO_TRACE static size_t find_free_zone_lowprio(size_t count, zone_flags_t flags, 304 pfn_t constraint, size_t hint) 305 { 306 for (size_t pos = 0; pos < zones.count; pos++) { 307 size_t i = (pos + hint) % zones.count; 308 309 /* Skip zones containing only high-priority memory. */ 310 if (is_high_priority(zones.info[i].base, zones.info[i].count)) 311 continue; 312 313 /* Check whether the zone meets the search criteria. */ 314 if (!ZONE_FLAGS_MATCH(zones.info[i].flags, flags)) 315 continue; 316 317 /* Check if the zone can satisfy the allocation request. */ 318 if (zone_can_alloc(&zones.info[i], count, constraint)) 319 return i; 320 } 321 322 return (size_t) -1; 323 } 324 325 /** Find a zone that can allocate specified number of frames 247 326 * 248 327 * Assume interrupts are disabled and zones lock is 249 328 * locked. 250 329 * 251 * @param order Size (2^order) of free space we are trying to find. 252 * @param flags Required flags of the target zone. 253 * @param hind Preferred zone. 254 * 255 */ 256 NO_TRACE static size_t find_free_zone(uint8_t order, zone_flags_t flags, 257 size_t hint) 330 * @param count Number of free frames we are trying to find. 331 * @param flags Required flags of the target zone. 332 * @param constraint Indication of bits that cannot be set in the 333 * physical frame number of the first allocated frame. 334 * @param hint Preferred zone. 335 * 336 * @return Zone that can allocate specified number of frames. 337 * @return -1 if no zone can satisfy the request. 338 * 339 */ 340 NO_TRACE static size_t find_free_zone(size_t count, zone_flags_t flags, 341 pfn_t constraint, size_t hint) 258 342 { 259 343 if (hint >= zones.count) 260 344 hint = 0; 261 345 262 size_t i = hint; 263 do { 264 /* 265 * Check whether the zone meets the search criteria. 266 */ 267 if (ZONE_FLAGS_MATCH(zones.info[i].flags, flags)) { 268 /* 269 * Check if the zone has 2^order frames area available. 270 */ 271 if (zone_can_alloc(&zones.info[i], order)) 272 return i; 273 } 274 275 i++; 276 if (i >= zones.count) 277 i = 0; 278 279 } while (i != hint); 280 281 return (size_t) -1; 282 } 283 284 /**************************/ 285 /* Buddy system functions */ 286 /**************************/ 287 288 /** Buddy system find_block implementation. 289 * 290 * Find block that is parent of current list. 291 * That means go to lower addresses, until such block is found 292 * 293 * @param order Order of parent must be different then this 294 * parameter!! 295 * 296 */ 297 NO_TRACE static link_t *zone_buddy_find_block(buddy_system_t *buddy, 298 link_t *child, uint8_t order) 299 { 300 frame_t *frame = list_get_instance(child, frame_t, buddy_link); 301 zone_t *zone = (zone_t *) buddy->data; 302 303 size_t index = frame_index(zone, frame); 304 do { 305 if (zone->frames[index].buddy_order != order) 306 return &zone->frames[index].buddy_link; 307 } while (index-- > 0); 308 309 return NULL; 310 } 311 312 /** Buddy system find_buddy implementation. 313 * 314 * @param buddy Buddy system. 315 * @param block Block for which buddy should be found. 316 * 317 * @return Buddy for given block if found. 318 * 319 */ 320 NO_TRACE static link_t *zone_buddy_find_buddy(buddy_system_t *buddy, 321 link_t *block) 322 { 323 frame_t *frame = list_get_instance(block, frame_t, buddy_link); 324 zone_t *zone = (zone_t *) buddy->data; 325 ASSERT(IS_BUDDY_ORDER_OK(frame_index_abs(zone, frame), 326 frame->buddy_order)); 327 328 bool is_left = IS_BUDDY_LEFT_BLOCK_ABS(zone, frame); 329 330 size_t index; 331 if (is_left) { 332 index = (frame_index(zone, frame)) + 333 (1 << frame->buddy_order); 334 } else { /* is_right */ 335 index = (frame_index(zone, frame)) - 336 (1 << frame->buddy_order); 337 } 338 339 if (frame_index_valid(zone, index)) { 340 if ((zone->frames[index].buddy_order == frame->buddy_order) && 341 (zone->frames[index].refcount == 0)) { 342 return &zone->frames[index].buddy_link; 343 } 344 } 345 346 return NULL; 347 } 348 349 /** Buddy system bisect implementation. 350 * 351 * @param buddy Buddy system. 352 * @param block Block to bisect. 353 * 354 * @return Right block. 355 * 356 */ 357 NO_TRACE static link_t *zone_buddy_bisect(buddy_system_t *buddy, link_t *block) 358 { 359 frame_t *frame_l = list_get_instance(block, frame_t, buddy_link); 360 frame_t *frame_r = (frame_l + (1 << (frame_l->buddy_order - 1))); 361 362 return &frame_r->buddy_link; 363 } 364 365 /** Buddy system coalesce implementation. 366 * 367 * @param buddy Buddy system. 368 * @param block_1 First block. 369 * @param block_2 First block's buddy. 370 * 371 * @return Coalesced block (actually block that represents lower 372 * address). 373 * 374 */ 375 NO_TRACE static link_t *zone_buddy_coalesce(buddy_system_t *buddy, 376 link_t *block_1, link_t *block_2) 377 { 378 frame_t *frame1 = list_get_instance(block_1, frame_t, buddy_link); 379 frame_t *frame2 = list_get_instance(block_2, frame_t, buddy_link); 380 381 return ((frame1 < frame2) ? block_1 : block_2); 382 } 383 384 /** Buddy system set_order implementation. 385 * 386 * @param buddy Buddy system. 387 * @param block Buddy system block. 388 * @param order Order to set. 389 * 390 */ 391 NO_TRACE static void zone_buddy_set_order(buddy_system_t *buddy, link_t *block, 392 uint8_t order) 393 { 394 list_get_instance(block, frame_t, buddy_link)->buddy_order = order; 395 } 396 397 /** Buddy system get_order implementation. 398 * 399 * @param buddy Buddy system. 400 * @param block Buddy system block. 401 * 402 * @return Order of block. 403 * 404 */ 405 NO_TRACE static uint8_t zone_buddy_get_order(buddy_system_t *buddy, 406 link_t *block) 407 { 408 return list_get_instance(block, frame_t, buddy_link)->buddy_order; 409 } 410 411 /** Buddy system mark_busy implementation. 412 * 413 * @param buddy Buddy system. 414 * @param block Buddy system block. 415 * 416 */ 417 NO_TRACE static void zone_buddy_mark_busy(buddy_system_t *buddy, link_t *block) 418 { 419 list_get_instance(block, frame_t, buddy_link)->refcount = 1; 420 } 421 422 /** Buddy system mark_available implementation. 423 * 424 * @param buddy Buddy system. 425 * @param block Buddy system block. 426 * 427 */ 428 NO_TRACE static void zone_buddy_mark_available(buddy_system_t *buddy, 429 link_t *block) 430 { 431 list_get_instance(block, frame_t, buddy_link)->refcount = 0; 432 } 433 434 static buddy_system_operations_t zone_buddy_system_operations = { 435 .find_buddy = zone_buddy_find_buddy, 436 .bisect = zone_buddy_bisect, 437 .coalesce = zone_buddy_coalesce, 438 .set_order = zone_buddy_set_order, 439 .get_order = zone_buddy_get_order, 440 .mark_busy = zone_buddy_mark_busy, 441 .mark_available = zone_buddy_mark_available, 442 .find_block = zone_buddy_find_block 443 }; 346 /* 347 * Prefer zones with low-priority memory over 348 * zones with high-priority memory. 349 */ 350 351 size_t znum = find_free_zone_lowprio(count, flags, constraint, hint); 352 if (znum != (size_t) -1) 353 return znum; 354 355 /* Take all zones into account */ 356 return find_free_zone_all(count, flags, constraint, hint); 357 } 444 358 445 359 /******************/ … … 447 361 /******************/ 448 362 363 /** Return frame from zone. */ 364 NO_TRACE static frame_t *zone_get_frame(zone_t *zone, size_t index) 365 { 366 ASSERT(index < zone->count); 367 368 return &zone->frames[index]; 369 } 370 449 371 /** Allocate frame in particular zone. 450 372 * … … 452 374 * Panics if allocation is impossible. 453 375 * 454 * @param zone Zone to allocate from. 455 * @param order Allocate exactly 2^order frames. 376 * @param zone Zone to allocate from. 377 * @param count Number of frames to allocate 378 * @param constraint Indication of bits that cannot be set in the 379 * physical frame number of the first allocated frame. 456 380 * 457 381 * @return Frame index in zone. 458 382 * 459 383 */ 460 NO_TRACE static pfn_t zone_frame_alloc(zone_t *zone, uint8_t order) 384 NO_TRACE static size_t zone_frame_alloc(zone_t *zone, size_t count, 385 pfn_t constraint) 461 386 { 462 387 ASSERT(zone->flags & ZONE_AVAILABLE); 463 388 464 /* Allocate frames from zone buddy system */ 465 link_t *link = buddy_system_alloc(zone->buddy_system, order); 466 467 ASSERT(link); 389 /* Allocate frames from zone */ 390 size_t index; 391 int avail = bitmap_allocate_range(&zone->bitmap, count, zone->base, 392 FRAME_LOWPRIO, constraint, &index); 393 394 ASSERT(avail); 395 396 /* Update frame reference count */ 397 for (size_t i = 0; i < count; i++) { 398 frame_t *frame = zone_get_frame(zone, index + i); 399 400 ASSERT(frame->refcount == 0); 401 frame->refcount = 1; 402 } 468 403 469 404 /* Update zone information. */ 470 zone->free_count -= (1 << order); 471 zone->busy_count += (1 << order); 472 473 /* Frame will be actually a first frame of the block. */ 474 frame_t *frame = list_get_instance(link, frame_t, buddy_link); 475 476 /* Get frame address */ 477 return make_frame_index(zone, frame); 405 zone->free_count -= count; 406 zone->busy_count += count; 407 408 return index; 478 409 } 479 410 … … 482 413 * Assume zone is locked and is available for deallocation. 483 414 * 484 * @param zone 485 * @param frame_idx Frame index relative to zone.486 * 487 * @return 488 * 489 */ 490 NO_TRACE static size_t zone_frame_free(zone_t *zone, size_t frame_idx)415 * @param zone Pointer to zone from which the frame is to be freed. 416 * @param index Frame index relative to zone. 417 * 418 * @return Number of freed frames. 419 * 420 */ 421 NO_TRACE static size_t zone_frame_free(zone_t *zone, size_t index) 491 422 { 492 423 ASSERT(zone->flags & ZONE_AVAILABLE); 493 424 494 frame_t *frame = &zone->frames[frame_idx]; 495 size_t size = 0; 496 497 ASSERT(frame->refcount); 425 frame_t *frame = zone_get_frame(zone, index); 426 427 ASSERT(frame->refcount > 0); 498 428 499 429 if (!--frame->refcount) { 500 size = 1 << frame->buddy_order;501 buddy_system_free(zone->buddy_system, &frame->buddy_link);430 bitmap_set(&zone->bitmap, index, 0); 431 502 432 /* Update zone information. */ 503 zone->free_count += size; 504 zone->busy_count -= size; 505 } 506 507 return size; 508 } 509 510 /** Return frame from zone. */ 511 NO_TRACE static frame_t *zone_get_frame(zone_t *zone, size_t frame_idx) 512 { 513 ASSERT(frame_idx < zone->count); 514 return &zone->frames[frame_idx]; 433 zone->free_count++; 434 zone->busy_count--; 435 436 return 1; 437 } 438 439 return 0; 515 440 } 516 441 517 442 /** Mark frame in zone unavailable to allocation. */ 518 NO_TRACE static void zone_mark_unavailable(zone_t *zone, size_t frame_idx) 519 { 520 if (!(zone->flags & ZONE_AVAILABLE)) 443 NO_TRACE static void zone_mark_unavailable(zone_t *zone, size_t index) 444 { 445 ASSERT(zone->flags & ZONE_AVAILABLE); 446 447 frame_t *frame = zone_get_frame(zone, index); 448 if (frame->refcount > 0) 521 449 return; 522 // ASSERT(zone->flags & ZONE_AVAILABLE); 523 524 frame_t *frame = zone_get_frame(zone, frame_idx); 525 if (frame->refcount) 526 return; 527 528 link_t *link __attribute__ ((unused)); 529 530 link = buddy_system_alloc_block(zone->buddy_system, 531 &frame->buddy_link); 532 533 ASSERT(link); 450 451 frame->refcount = 1; 452 bitmap_set_range(&zone->bitmap, index, 1); 453 534 454 zone->free_count--; 535 455 reserve_force_alloc(1); … … 538 458 /** Merge two zones. 539 459 * 540 * Expect buddy to point to space at least zone_conf_size large.541 460 * Assume z1 & z2 are locked and compatible and zones lock is 542 461 * locked. 543 462 * 544 * @param z1 First zone to merge.545 * @param z2 Second zone to merge.546 * @param old_z1 Original dateof the first zone.547 * @param buddy Merged zone buddy.463 * @param z1 First zone to merge. 464 * @param z2 Second zone to merge. 465 * @param old_z1 Original data of the first zone. 466 * @param confdata Merged zone configuration data. 548 467 * 549 468 */ 550 469 NO_TRACE static void zone_merge_internal(size_t z1, size_t z2, zone_t *old_z1, 551 buddy_system_t *buddy)470 void *confdata) 552 471 { 553 472 ASSERT(zones.info[z1].flags & ZONE_AVAILABLE); … … 564 483 zones.info[z1].free_count += zones.info[z2].free_count; 565 484 zones.info[z1].busy_count += zones.info[z2].busy_count; 566 zones.info[z1].buddy_system = buddy; 567 568 uint8_t order = fnzb(zones.info[z1].count); 569 buddy_system_create(zones.info[z1].buddy_system, order, 570 &zone_buddy_system_operations, (void *) &zones.info[z1]); 571 572 zones.info[z1].frames = 573 (frame_t *) ((uint8_t *) zones.info[z1].buddy_system 574 + buddy_conf_size(order)); 575 576 /* This marks all frames busy */ 577 size_t i; 578 for (i = 0; i < zones.info[z1].count; i++) 579 frame_initialize(&zones.info[z1].frames[i]); 580 581 /* Copy frames from both zones to preserve full frame orders, 582 * parents etc. Set all free frames with refcount = 0 to 1, because 583 * we add all free frames to buddy allocator later again, clearing 584 * order to 0. Don't set busy frames with refcount = 0, as they 585 * will not be reallocated during merge and it would make later 586 * problems with allocation/free. 485 486 bitmap_initialize(&zones.info[z1].bitmap, zones.info[z1].count, 487 confdata + (sizeof(frame_t) * zones.info[z1].count)); 488 bitmap_clear_range(&zones.info[z1].bitmap, 0, zones.info[z1].count); 489 490 zones.info[z1].frames = (frame_t *) confdata; 491 492 /* 493 * Copy frames and bits from both zones to preserve parents, etc. 587 494 */ 588 for (i = 0; i < old_z1->count; i++) 495 496 for (size_t i = 0; i < old_z1->count; i++) { 497 bitmap_set(&zones.info[z1].bitmap, i, 498 bitmap_get(&old_z1->bitmap, i)); 589 499 zones.info[z1].frames[i] = old_z1->frames[i]; 590 591 for (i = 0; i < zones.info[z2].count; i++) 592 zones.info[z1].frames[base_diff + i] 593 = zones.info[z2].frames[i]; 594 595 i = 0; 596 while (i < zones.info[z1].count) { 597 if (zones.info[z1].frames[i].refcount) { 598 /* Skip busy frames */ 599 i += 1 << zones.info[z1].frames[i].buddy_order; 600 } else { 601 /* Free frames, set refcount = 1 602 * (all free frames have refcount == 0, we need not 603 * to check the order) 604 */ 605 zones.info[z1].frames[i].refcount = 1; 606 zones.info[z1].frames[i].buddy_order = 0; 607 i++; 608 } 609 } 610 611 /* Add free blocks from the original zone z1 */ 612 while (zone_can_alloc(old_z1, 0)) { 613 /* Allocate from the original zone */ 614 pfn_t frame_idx = zone_frame_alloc(old_z1, 0); 615 616 /* Free the frame from the merged zone */ 617 frame_t *frame = &zones.info[z1].frames[frame_idx]; 618 frame->refcount = 0; 619 buddy_system_free(zones.info[z1].buddy_system, &frame->buddy_link); 620 } 621 622 /* Add free blocks from the original zone z2 */ 623 while (zone_can_alloc(&zones.info[z2], 0)) { 624 /* Allocate from the original zone */ 625 pfn_t frame_idx = zone_frame_alloc(&zones.info[z2], 0); 626 627 /* Free the frame from the merged zone */ 628 frame_t *frame = &zones.info[z1].frames[base_diff + frame_idx]; 629 frame->refcount = 0; 630 buddy_system_free(zones.info[z1].buddy_system, &frame->buddy_link); 500 } 501 502 for (size_t i = 0; i < zones.info[z2].count; i++) { 503 bitmap_set(&zones.info[z1].bitmap, base_diff + i, 504 bitmap_get(&zones.info[z2].bitmap, i)); 505 zones.info[z1].frames[base_diff + i] = 506 zones.info[z2].frames[i]; 631 507 } 632 508 } … … 651 527 size_t cframes = SIZE2FRAMES(zone_conf_size(count)); 652 528 653 if ((pfn < zones.info[znum].base) 654 ||(pfn >= zones.info[znum].base + zones.info[znum].count))529 if ((pfn < zones.info[znum].base) || 530 (pfn >= zones.info[znum].base + zones.info[znum].count)) 655 531 return; 656 532 657 frame_t *frame __attribute__ ((unused)); 658 659 frame = &zones.info[znum].frames[pfn - zones.info[znum].base]; 660 ASSERT(!frame->buddy_order); 661 662 size_t i; 663 for (i = 0; i < cframes; i++) { 664 zones.info[znum].busy_count++; 533 for (size_t i = 0; i < cframes; i++) 665 534 (void) zone_frame_free(&zones.info[znum], 666 535 pfn - zones.info[znum].base + i); 667 }668 }669 670 /** Reduce allocated block to count of order 0 frames.671 *672 * The allocated block needs 2^order frames. Reduce all frames673 * in the block to order 0 and free the unneeded frames. This means that674 * when freeing the previously allocated block starting with frame_idx,675 * you have to free every frame.676 *677 * @param znum Zone.678 * @param frame_idx Index the first frame of the block.679 * @param count Allocated frames in block.680 *681 */682 NO_TRACE static void zone_reduce_region(size_t znum, pfn_t frame_idx,683 size_t count)684 {685 ASSERT(zones.info[znum].flags & ZONE_AVAILABLE);686 ASSERT(frame_idx + count < zones.info[znum].count);687 688 uint8_t order = zones.info[znum].frames[frame_idx].buddy_order;689 ASSERT((size_t) (1 << order) >= count);690 691 /* Reduce all blocks to order 0 */692 size_t i;693 for (i = 0; i < (size_t) (1 << order); i++) {694 frame_t *frame = &zones.info[znum].frames[i + frame_idx];695 frame->buddy_order = 0;696 if (!frame->refcount)697 frame->refcount = 1;698 ASSERT(frame->refcount == 1);699 }700 701 /* Free unneeded frames */702 for (i = count; i < (size_t) (1 << order); i++)703 (void) zone_frame_free(&zones.info[znum], i + frame_idx);704 536 } 705 537 … … 721 553 bool ret = true; 722 554 723 /* We can join only 2 zones with none existing inbetween, 555 /* 556 * We can join only 2 zones with none existing inbetween, 724 557 * the zones have to be available and with the same 725 558 * set of flags … … 735 568 + zones.info[z2].count)); 736 569 737 uint8_t order;738 if (cframes == 1)739 order = 0;740 else741 order = fnzb(cframes - 1) + 1;742 743 570 /* Allocate merged zone data inside one of the zones */ 744 571 pfn_t pfn; 745 if (zone_can_alloc(&zones.info[z1], order)) { 746 pfn = zones.info[z1].base + zone_frame_alloc(&zones.info[z1], order); 747 } else if (zone_can_alloc(&zones.info[z2], order)) { 748 pfn = zones.info[z2].base + zone_frame_alloc(&zones.info[z2], order); 572 if (zone_can_alloc(&zones.info[z1], cframes, 0)) { 573 pfn = zones.info[z1].base + 574 zone_frame_alloc(&zones.info[z1], cframes, 0); 575 } else if (zone_can_alloc(&zones.info[z2], cframes, 0)) { 576 pfn = zones.info[z2].base + 577 zone_frame_alloc(&zones.info[z2], cframes, 0); 749 578 } else { 750 579 ret = false; … … 754 583 /* Preserve original data from z1 */ 755 584 zone_t old_z1 = zones.info[z1]; 756 old_z1.buddy_system->data = (void *) &old_z1;757 585 758 586 /* Do zone merging */ 759 buddy_system_t *buddy = (buddy_system_t *) PA2KA(PFN2ADDR(pfn)); 760 zone_merge_internal(z1, z2, &old_z1, buddy); 761 762 /* Free unneeded config frames */ 763 zone_reduce_region(z1, pfn - zones.info[z1].base, cframes); 587 zone_merge_internal(z1, z2, &old_z1, (void *) PA2KA(PFN2ADDR(pfn))); 764 588 765 589 /* Subtract zone information from busy frames */ … … 774 598 775 599 /* Move zones down */ 776 size_t i; 777 for (i = z2 + 1; i < zones.count; i++) { 600 for (size_t i = z2 + 1; i < zones.count; i++) 778 601 zones.info[i - 1] = zones.info[i]; 779 if (zones.info[i - 1].buddy_system != NULL)780 zones.info[i - 1].buddy_system->data =781 (void *) &zones.info[i - 1];782 }783 602 784 603 zones.count--; … … 799 618 void zone_merge_all(void) 800 619 { 801 size_t i = 0; 620 size_t i = 1; 621 802 622 while (i < zones.count) { 803 if (!zone_merge(i , i + 1))623 if (!zone_merge(i - 1, i)) 804 624 i++; 805 625 } … … 808 628 /** Create new frame zone. 809 629 * 810 * @param zone Zone to construct.811 * @param buddy Address of buddy system configuration information.812 * @param start Physical address of the first frame within thezone.813 * @param count Count of frames in zone.814 * @param flags Zone flags.630 * @param zone Zone to construct. 631 * @param start Physical address of the first frame within the zone. 632 * @param count Count of frames in zone. 633 * @param flags Zone flags. 634 * @param confdata Configuration data of the zone. 815 635 * 816 636 * @return Initialized zone. 817 637 * 818 638 */ 819 NO_TRACE static void zone_construct(zone_t *zone, buddy_system_t *buddy,820 pfn_t start, size_t count, zone_flags_t flags)639 NO_TRACE static void zone_construct(zone_t *zone, pfn_t start, size_t count, 640 zone_flags_t flags, void *confdata) 821 641 { 822 642 zone->base = start; … … 825 645 zone->free_count = count; 826 646 zone->busy_count = 0; 827 zone->buddy_system = buddy;828 647 829 648 if (flags & ZONE_AVAILABLE) { 830 649 /* 831 * Compute order for buddy system and initialize 650 * Initialize frame bitmap (located after the array of 651 * frame_t structures in the configuration space). 832 652 */ 833 uint8_t order = fnzb(count);834 b uddy_system_create(zone->buddy_system, order,835 &zone_buddy_system_operations, (void *) zone);836 837 /* Allocate frames _after_ the confframe */838 839 /* Check sizes */840 zone->frames = (frame_t *) ((uint8_t *) zone->buddy_system +841 buddy_conf_size(order));842 843 size_t i;844 for ( i = 0; i < count; i++)653 654 bitmap_initialize(&zone->bitmap, count, confdata + 655 (sizeof(frame_t) * count)); 656 bitmap_clear_range(&zone->bitmap, 0, count); 657 658 /* 659 * Initialize the array of frame_t structures. 660 */ 661 662 zone->frames = (frame_t *) confdata; 663 664 for (size_t i = 0; i < count; i++) 845 665 frame_initialize(&zone->frames[i]); 846 847 /* Stuffing frames */ 848 for (i = 0; i < count; i++) { 849 zone->frames[i].refcount = 0; 850 buddy_system_free(zone->buddy_system, &zone->frames[i].buddy_link); 851 } 852 } else 666 } else { 667 bitmap_initialize(&zone->bitmap, 0, NULL); 853 668 zone->frames = NULL; 669 } 854 670 } 855 671 … … 863 679 size_t zone_conf_size(size_t count) 864 680 { 865 return (count * sizeof(frame_t) + b uddy_conf_size(fnzb(count)));681 return (count * sizeof(frame_t) + bitmap_size(count)); 866 682 } 867 683 … … 869 685 pfn_t zone_external_conf_alloc(size_t count) 870 686 { 871 size_t size = zone_conf_size(count); 872 size_t order = ispwr2(size) ? fnzb(size) : (fnzb(size) + 1); 873 874 return ADDR2PFN((uintptr_t) frame_alloc(order - FRAME_WIDTH, 875 FRAME_LOWMEM | FRAME_ATOMIC)); 687 size_t frames = SIZE2FRAMES(zone_conf_size(count)); 688 689 return ADDR2PFN((uintptr_t) 690 frame_alloc(frames, FRAME_LOWMEM | FRAME_ATOMIC, 0)); 876 691 } 877 692 … … 881 696 * @param count Size of zone in frames. 882 697 * @param confframe Where configuration frames are supposed to be. 883 * Automatically checks ,that we will not disturb the698 * Automatically checks that we will not disturb the 884 699 * kernel and possibly init. If confframe is given 885 700 * _outside_ this zone, it is expected, that the area is … … 898 713 899 714 if (flags & ZONE_AVAILABLE) { /* Create available zone */ 900 /* Theoretically we could have NULL here, practically make sure 715 /* 716 * Theoretically we could have NULL here, practically make sure 901 717 * nobody tries to do that. If some platform requires, remove 902 718 * the assert 903 719 */ 904 720 ASSERT(confframe != ADDR2PFN((uintptr_t ) NULL)); 905 721 906 722 /* Update the known end of physical memory. */ 907 723 config.physmem_end = max(config.physmem_end, PFN2ADDR(start + count)); 908 724 909 /* If confframe is supposed to be inside our zone, then make sure 725 /* 726 * If confframe is supposed to be inside our zone, then make sure 910 727 * it does not span kernel & init 911 728 */ 912 729 size_t confcount = SIZE2FRAMES(zone_conf_size(count)); 730 913 731 if ((confframe >= start) && (confframe < start + count)) { 914 732 for (; confframe < start + count; confframe++) { … … 923 741 924 742 bool overlap = false; 925 size_t i; 926 for (i = 0; i < init.cnt; i++) 743 for (size_t i = 0; i < init.cnt; i++) { 927 744 if (overlaps(addr, PFN2ADDR(confcount), 928 745 init.tasks[i].paddr, … … 931 748 break; 932 749 } 750 } 751 933 752 if (overlap) 934 753 continue; … … 937 756 } 938 757 939 if (confframe >= start + count) { 940 flags &= ~ZONE_AVAILABLE; 941 goto nonavail; 942 // panic("Cannot find configuration data for zone."); 943 } 758 if (confframe >= start + count) 759 panic("Cannot find configuration data for zone."); 944 760 } 945 761 … … 950 766 } 951 767 952 buddy_system_t *buddy = (buddy_system_t*) PA2KA(PFN2ADDR(confframe));953 zone_construct(&zones.info[znum], buddy, start, count, flags);768 void *confdata = (void *) PA2KA(PFN2ADDR(confframe)); 769 zone_construct(&zones.info[znum], start, count, flags, confdata); 954 770 955 771 /* If confdata in zone, mark as unavailable */ 956 772 if ((confframe >= start) && (confframe < start + count)) { 957 size_t i; 958 for (i = confframe; i < confframe + confcount; i++) 773 for (size_t i = confframe; i < confframe + confcount; i++) 959 774 zone_mark_unavailable(&zones.info[znum], 960 775 i - zones.info[znum].base); … … 965 780 return znum; 966 781 } 967 nonavail: 968 (void)0; // label trick 782 969 783 /* Non-available zone */ 970 784 size_t znum = zones_insert_zone(start, count, flags); … … 973 787 return (size_t) -1; 974 788 } 975 zone_construct(&zones.info[znum], NULL, start, count, flags); 789 790 zone_construct(&zones.info[znum], start, count, flags, NULL); 976 791 977 792 irq_spinlock_unlock(&zones.lock, true); … … 1015 830 } 1016 831 1017 /** Allocate power-of-two frames of physical memory. 1018 * 1019 * @param order Allocate exactly 2^order frames. 1020 * @param flags Flags for host zone selection and address processing. 1021 * @param pzone Preferred zone. 832 /** Allocate frames of physical memory. 833 * 834 * @param count Number of continuous frames to allocate. 835 * @param flags Flags for host zone selection and address processing. 836 * @param constraint Indication of physical address bits that cannot be 837 * set in the address of the first allocated frame. 838 * @param pzone Preferred zone. 1022 839 * 1023 840 * @return Physical address of the allocated frame. 1024 841 * 1025 842 */ 1026 void *frame_alloc_generic(uint8_t order, frame_flags_t flags, size_t *pzone) 1027 { 1028 size_t size = ((size_t) 1) << order; 843 uintptr_t frame_alloc_generic(size_t count, frame_flags_t flags, 844 uintptr_t constraint, size_t *pzone) 845 { 846 ASSERT(count > 0); 847 1029 848 size_t hint = pzone ? (*pzone) : 0; 849 pfn_t frame_constraint = ADDR2PFN(constraint); 1030 850 1031 851 /* 1032 852 * If not told otherwise, we must first reserve the memory. 1033 853 */ 1034 if (!(flags & FRAME_NO_RESERVE)) 1035 reserve_force_alloc( size);1036 854 if (!(flags & FRAME_NO_RESERVE)) 855 reserve_force_alloc(count); 856 1037 857 loop: 1038 858 irq_spinlock_lock(&zones.lock, true); … … 1041 861 * First, find suitable frame zone. 1042 862 */ 1043 size_t znum = find_free_zone(order, 1044 FRAME_TO_ZONE_FLAGS(flags), hint); 1045 1046 /* If no memory, reclaim some slab memory, 1047 if it does not help, reclaim all */ 863 size_t znum = find_free_zone(count, FRAME_TO_ZONE_FLAGS(flags), 864 frame_constraint, hint); 865 866 /* 867 * If no memory, reclaim some slab memory, 868 * if it does not help, reclaim all. 869 */ 1048 870 if ((znum == (size_t) -1) && (!(flags & FRAME_NO_RECLAIM))) { 1049 871 irq_spinlock_unlock(&zones.lock, true); … … 1052 874 1053 875 if (freed > 0) 1054 znum = find_free_zone( order,1055 FRAME_TO_ZONE_FLAGS(flags), hint);876 znum = find_free_zone(count, FRAME_TO_ZONE_FLAGS(flags), 877 frame_constraint, hint); 1056 878 1057 879 if (znum == (size_t) -1) { … … 1061 883 1062 884 if (freed > 0) 1063 znum = find_free_zone( order,1064 FRAME_TO_ZONE_FLAGS(flags), hint);885 znum = find_free_zone(count, FRAME_TO_ZONE_FLAGS(flags), 886 frame_constraint, hint); 1065 887 } 1066 888 } … … 1069 891 if (flags & FRAME_ATOMIC) { 1070 892 irq_spinlock_unlock(&zones.lock, true); 893 1071 894 if (!(flags & FRAME_NO_RESERVE)) 1072 reserve_free(size); 1073 return NULL; 895 reserve_free(count); 896 897 return 0; 1074 898 } 1075 899 … … 1081 905 1082 906 if (!THREAD) 1083 panic("Cannot wait for memory to become available."); 907 panic("Cannot wait for %zu frames to become available " 908 "(%zu available).", count, avail); 1084 909 1085 910 /* … … 1088 913 1089 914 #ifdef CONFIG_DEBUG 1090 printf("Thread %" PRIu64 " waiting for %zu frames ,"1091 " %zu available.\n", THREAD->tid, size, avail);915 printf("Thread %" PRIu64 " waiting for %zu frames " 916 "(%zu available).\n", THREAD->tid, count, avail); 1092 917 #endif 1093 918 1094 919 /* 1095 * Since the mem_avail_mtx is an active mutex, we need to disable interrupts1096 * to prevent deadlock with TLB shootdown.920 * Since the mem_avail_mtx is an active mutex, we need to 921 * disable interrupts to prevent deadlock with TLB shootdown. 1097 922 */ 1098 923 ipl_t ipl = interrupts_disable(); … … 1100 925 1101 926 if (mem_avail_req > 0) 1102 mem_avail_req = min(mem_avail_req, size);927 mem_avail_req = min(mem_avail_req, count); 1103 928 else 1104 mem_avail_req = size; 929 mem_avail_req = count; 930 1105 931 size_t gen = mem_avail_gen; 1106 932 … … 1118 944 } 1119 945 1120 pfn_t pfn = zone_frame_alloc(&zones.info[znum], order)1121 + zones.info[znum].base;946 pfn_t pfn = zone_frame_alloc(&zones.info[znum], count, 947 frame_constraint) + zones.info[znum].base; 1122 948 1123 949 irq_spinlock_unlock(&zones.lock, true); … … 1126 952 *pzone = znum; 1127 953 1128 if (flags & FRAME_KA) 1129 return (void *) PA2KA(PFN2ADDR(pfn)); 1130 1131 return (void *) PFN2ADDR(pfn); 1132 } 1133 1134 void *frame_alloc(uint8_t order, frame_flags_t flags) 1135 { 1136 return frame_alloc_generic(order, flags, NULL); 1137 } 1138 1139 void *frame_alloc_noreserve(uint8_t order, frame_flags_t flags) 1140 { 1141 return frame_alloc_generic(order, flags | FRAME_NO_RESERVE, NULL); 1142 } 1143 1144 /** Free a frame. 1145 * 1146 * Find respective frame structure for supplied physical frame address. 1147 * Decrement frame reference count. If it drops to zero, move the frame 1148 * structure to free list. 1149 * 1150 * @param frame Physical Address of of the frame to be freed. 954 return PFN2ADDR(pfn); 955 } 956 957 uintptr_t frame_alloc(size_t count, frame_flags_t flags, uintptr_t constraint) 958 { 959 return frame_alloc_generic(count, flags, constraint, NULL); 960 } 961 962 /** Free frames of physical memory. 963 * 964 * Find respective frame structures for supplied physical frames. 965 * Decrement each frame reference count. If it drops to zero, mark 966 * the frames as available. 967 * 968 * @param start Physical Address of the first frame to be freed. 969 * @param count Number of frames to free. 1151 970 * @param flags Flags to control memory reservation. 1152 971 * 1153 972 */ 1154 void frame_free_generic(uintptr_t frame, frame_flags_t flags)1155 { 1156 size_t size;973 void frame_free_generic(uintptr_t start, size_t count, frame_flags_t flags) 974 { 975 size_t freed = 0; 1157 976 1158 977 irq_spinlock_lock(&zones.lock, true); 1159 978 1160 /* 1161 * First, find host frame zone for addr. 1162 */ 1163 pfn_t pfn = ADDR2PFN(frame); 1164 size_t znum = find_zone(pfn, 1, 0); 1165 1166 ASSERT(znum != (size_t) -1); 1167 1168 size = zone_frame_free(&zones.info[znum], pfn - zones.info[znum].base); 979 for (size_t i = 0; i < count; i++) { 980 /* 981 * First, find host frame zone for addr. 982 */ 983 pfn_t pfn = ADDR2PFN(start) + i; 984 size_t znum = find_zone(pfn, 1, 0); 985 986 ASSERT(znum != (size_t) -1); 987 988 freed += zone_frame_free(&zones.info[znum], 989 pfn - zones.info[znum].base); 990 } 1169 991 1170 992 irq_spinlock_unlock(&zones.lock, true); … … 1172 994 /* 1173 995 * Signal that some memory has been freed. 996 * Since the mem_avail_mtx is an active mutex, 997 * we need to disable interruptsto prevent deadlock 998 * with TLB shootdown. 1174 999 */ 1175 1176 1177 /* 1178 * Since the mem_avail_mtx is an active mutex, we need to disable interrupts 1179 * to prevent deadlock with TLB shootdown. 1180 */ 1000 1181 1001 ipl_t ipl = interrupts_disable(); 1182 1002 mutex_lock(&mem_avail_mtx); 1003 1183 1004 if (mem_avail_req > 0) 1184 mem_avail_req -= min(mem_avail_req, size);1005 mem_avail_req -= min(mem_avail_req, freed); 1185 1006 1186 1007 if (mem_avail_req == 0) { … … 1188 1009 condvar_broadcast(&mem_avail_cv); 1189 1010 } 1011 1190 1012 mutex_unlock(&mem_avail_mtx); 1191 1013 interrupts_restore(ipl); 1192 1014 1193 1015 if (!(flags & FRAME_NO_RESERVE)) 1194 reserve_free( size);1195 } 1196 1197 void frame_free(uintptr_t frame )1198 { 1199 frame_free_generic(frame, 0);1200 } 1201 1202 void frame_free_noreserve(uintptr_t frame )1203 { 1204 frame_free_generic(frame, FRAME_NO_RESERVE);1016 reserve_free(freed); 1017 } 1018 1019 void frame_free(uintptr_t frame, size_t count) 1020 { 1021 frame_free_generic(frame, count, 0); 1022 } 1023 1024 void frame_free_noreserve(uintptr_t frame, size_t count) 1025 { 1026 frame_free_generic(frame, count, FRAME_NO_RESERVE); 1205 1027 } 1206 1028 … … 1236 1058 irq_spinlock_lock(&zones.lock, true); 1237 1059 1238 size_t i; 1239 for (i = 0; i < count; i++) { 1060 for (size_t i = 0; i < count; i++) { 1240 1061 size_t znum = find_zone(start + i, 1, 0); 1062 1241 1063 if (znum == (size_t) -1) /* PFN not found */ 1242 1064 continue; … … 1263 1085 /* Tell the architecture to create some memory */ 1264 1086 frame_low_arch_init(); 1087 1265 1088 if (config.cpu_active == 1) { 1266 1089 frame_mark_unavailable(ADDR2PFN(KA2PA(config.base)), … … 1269 1092 SIZE2FRAMES(config.stack_size)); 1270 1093 1271 size_t i; 1272 for (i = 0; i < init.cnt; i++) { 1273 pfn_t pfn = ADDR2PFN(init.tasks[i].paddr); 1274 frame_mark_unavailable(pfn, 1094 for (size_t i = 0; i < init.cnt; i++) 1095 frame_mark_unavailable(ADDR2PFN(init.tasks[i].paddr), 1275 1096 SIZE2FRAMES(init.tasks[i].size)); 1276 }1277 1097 1278 1098 if (ballocs.size) … … 1280 1100 SIZE2FRAMES(ballocs.size)); 1281 1101 1282 /* Black list first frame, as allocating NULL would 1102 /* 1103 * Blacklist first frame, as allocating NULL would 1283 1104 * fail in some places 1284 1105 */ 1285 1106 frame_mark_unavailable(0, 1); 1286 1107 } 1108 1287 1109 frame_high_arch_init(); 1288 1110 } … … 1290 1112 /** Adjust bounds of physical memory region according to low/high memory split. 1291 1113 * 1292 * @param low[in] If true, the adjustment is performed to make the region 1293 * fit in the low memory. Otherwise the adjustment is 1294 * performed to make the region fit in the high memory. 1295 * @param basep[inout] Pointer to a variable which contains the region's base 1296 * address and which may receive the adjusted base address. 1297 * @param sizep[inout] Pointer to a variable which contains the region's size 1298 * and which may receive the adjusted size. 1299 * @retun True if the region still exists even after the 1300 * adjustment, false otherwise. 1114 * @param low[in] If true, the adjustment is performed to make the region 1115 * fit in the low memory. Otherwise the adjustment is 1116 * performed to make the region fit in the high memory. 1117 * @param basep[inout] Pointer to a variable which contains the region's base 1118 * address and which may receive the adjusted base address. 1119 * @param sizep[inout] Pointer to a variable which contains the region's size 1120 * and which may receive the adjusted size. 1121 * 1122 * @return True if the region still exists even after the adjustment. 1123 * @return False otherwise. 1124 * 1301 1125 */ 1302 1126 bool frame_adjust_zone_bounds(bool low, uintptr_t *basep, size_t *sizep) 1303 1127 { 1304 1128 uintptr_t limit = KA2PA(config.identity_base) + config.identity_size; 1305 1129 1306 1130 if (low) { 1307 1131 if (*basep > limit) 1308 1132 return false; 1133 1309 1134 if (*basep + *sizep > limit) 1310 1135 *sizep = limit - *basep; … … 1312 1137 if (*basep + *sizep <= limit) 1313 1138 return false; 1139 1314 1140 if (*basep <= limit) { 1315 1141 *sizep -= limit - *basep; … … 1317 1143 } 1318 1144 } 1145 1319 1146 return true; 1320 1147 } … … 1328 1155 1329 1156 uint64_t total = 0; 1330 size_t i;1331 for ( i = 0; i < zones.count; i++)1157 1158 for (size_t i = 0; i < zones.count; i++) 1332 1159 total += (uint64_t) FRAMES2SIZE(zones.info[i].count); 1333 1160 … … 1352 1179 *free = 0; 1353 1180 1354 size_t i; 1355 for (i = 0; i < zones.count; i++) { 1181 for (size_t i = 0; i < zones.count; i++) { 1356 1182 *total += (uint64_t) FRAMES2SIZE(zones.info[i].count); 1357 1183 … … 1381 1207 /* 1382 1208 * Because printing may require allocation of memory, we may not hold 1383 * the frame allocator locks when printing zone statistics. 1209 * the frame allocator locks when printing zone statistics. Therefore, 1384 1210 * we simply gather the statistics under the protection of the locks and 1385 1211 * print the statistics when the locks have been released. … … 1390 1216 */ 1391 1217 1392 size_t i; 1393 for (i = 0;; i++) { 1218 size_t free_lowmem = 0; 1219 size_t free_highmem = 0; 1220 size_t free_highprio = 0; 1221 1222 for (size_t i = 0;; i++) { 1394 1223 irq_spinlock_lock(&zones.lock, true); 1395 1224 … … 1399 1228 } 1400 1229 1401 uintptr_t base = PFN2ADDR(zones.info[i].base); 1230 pfn_t fbase = zones.info[i].base; 1231 uintptr_t base = PFN2ADDR(fbase); 1402 1232 size_t count = zones.info[i].count; 1403 1233 zone_flags_t flags = zones.info[i].flags; … … 1405 1235 size_t busy_count = zones.info[i].busy_count; 1406 1236 1237 bool available = ((flags & ZONE_AVAILABLE) != 0); 1238 bool lowmem = ((flags & ZONE_LOWMEM) != 0); 1239 bool highmem = ((flags & ZONE_HIGHMEM) != 0); 1240 bool highprio = is_high_priority(fbase, count); 1241 1242 if (available) { 1243 if (lowmem) 1244 free_lowmem += free_count; 1245 1246 if (highmem) 1247 free_highmem += free_count; 1248 1249 if (highprio) { 1250 free_highprio += free_count; 1251 } else { 1252 /* 1253 * Walk all frames of the zone and examine 1254 * all high priority memory to get accurate 1255 * statistics. 1256 */ 1257 1258 for (size_t index = 0; index < count; index++) { 1259 if (is_high_priority(fbase + index, 0)) { 1260 if (!bitmap_get(&zones.info[i].bitmap, index)) 1261 free_highprio++; 1262 } else 1263 break; 1264 } 1265 } 1266 } 1267 1407 1268 irq_spinlock_unlock(&zones.lock, true); 1408 1409 bool available = ((flags & ZONE_AVAILABLE) != 0);1410 1269 1411 1270 printf("%-4zu", i); … … 1432 1291 printf("\n"); 1433 1292 } 1293 1294 printf("\n"); 1295 1296 uint64_t size; 1297 const char *size_suffix; 1298 1299 bin_order_suffix(FRAMES2SIZE(free_lowmem), &size, &size_suffix, 1300 false); 1301 printf("Available low memory: %zu frames (%" PRIu64 " %s)\n", 1302 free_lowmem, size, size_suffix); 1303 1304 bin_order_suffix(FRAMES2SIZE(free_highmem), &size, &size_suffix, 1305 false); 1306 printf("Available high memory: %zu frames (%" PRIu64 " %s)\n", 1307 free_highmem, size, size_suffix); 1308 1309 bin_order_suffix(FRAMES2SIZE(free_highprio), &size, &size_suffix, 1310 false); 1311 printf("Available high priority: %zu frames (%" PRIu64 " %s)\n", 1312 free_highprio, size, size_suffix); 1434 1313 } 1435 1314 … … 1444 1323 size_t znum = (size_t) -1; 1445 1324 1446 size_t i; 1447 for (i = 0; i < zones.count; i++) { 1325 for (size_t i = 0; i < zones.count; i++) { 1448 1326 if ((i == num) || (PFN2ADDR(zones.info[i].base) == num)) { 1449 1327 znum = i; … … 1458 1336 } 1459 1337 1460 uintptr_t base = PFN2ADDR(zones.info[i].base); 1461 zone_flags_t flags = zones.info[i].flags; 1462 size_t count = zones.info[i].count; 1463 size_t free_count = zones.info[i].free_count; 1464 size_t busy_count = zones.info[i].busy_count; 1338 size_t free_lowmem = 0; 1339 size_t free_highmem = 0; 1340 size_t free_highprio = 0; 1341 1342 pfn_t fbase = zones.info[znum].base; 1343 uintptr_t base = PFN2ADDR(fbase); 1344 zone_flags_t flags = zones.info[znum].flags; 1345 size_t count = zones.info[znum].count; 1346 size_t free_count = zones.info[znum].free_count; 1347 size_t busy_count = zones.info[znum].busy_count; 1348 1349 bool available = ((flags & ZONE_AVAILABLE) != 0); 1350 bool lowmem = ((flags & ZONE_LOWMEM) != 0); 1351 bool highmem = ((flags & ZONE_HIGHMEM) != 0); 1352 bool highprio = is_high_priority(fbase, count); 1353 1354 if (available) { 1355 if (lowmem) 1356 free_lowmem = free_count; 1357 1358 if (highmem) 1359 free_highmem = free_count; 1360 1361 if (highprio) { 1362 free_highprio = free_count; 1363 } else { 1364 /* 1365 * Walk all frames of the zone and examine 1366 * all high priority memory to get accurate 1367 * statistics. 1368 */ 1369 1370 for (size_t index = 0; index < count; index++) { 1371 if (is_high_priority(fbase + index, 0)) { 1372 if (!bitmap_get(&zones.info[znum].bitmap, index)) 1373 free_highprio++; 1374 } else 1375 break; 1376 } 1377 } 1378 } 1465 1379 1466 1380 irq_spinlock_unlock(&zones.lock, true); 1467 1468 bool available = ((flags & ZONE_AVAILABLE) != 0);1469 1381 1470 1382 uint64_t size; 1471 1383 const char *size_suffix; 1384 1472 1385 bin_order_suffix(FRAMES2SIZE(count), &size, &size_suffix, false); 1473 1386 1474 printf("Zone number: %zu\n", znum);1475 printf("Zone base address: %p\n", (void *) base);1476 printf("Zone size: %zu frames (%" PRIu64 " %s)\n", count,1387 printf("Zone number: %zu\n", znum); 1388 printf("Zone base address: %p\n", (void *) base); 1389 printf("Zone size: %zu frames (%" PRIu64 " %s)\n", count, 1477 1390 size, size_suffix); 1478 printf("Zone flags: %c%c%c%c%c\n",1391 printf("Zone flags: %c%c%c%c%c\n", 1479 1392 available ? 'A' : '-', 1480 1393 (flags & ZONE_RESERVED) ? 'R' : '-', … … 1486 1399 bin_order_suffix(FRAMES2SIZE(busy_count), &size, &size_suffix, 1487 1400 false); 1488 printf("Allocated space: %zu frames (%" PRIu64 " %s)\n",1401 printf("Allocated space: %zu frames (%" PRIu64 " %s)\n", 1489 1402 busy_count, size, size_suffix); 1403 1490 1404 bin_order_suffix(FRAMES2SIZE(free_count), &size, &size_suffix, 1491 1405 false); 1492 printf("Available space: %zu frames (%" PRIu64 " %s)\n",1406 printf("Available space: %zu frames (%" PRIu64 " %s)\n", 1493 1407 free_count, size, size_suffix); 1408 1409 bin_order_suffix(FRAMES2SIZE(free_lowmem), &size, &size_suffix, 1410 false); 1411 printf("Available low memory: %zu frames (%" PRIu64 " %s)\n", 1412 free_lowmem, size, size_suffix); 1413 1414 bin_order_suffix(FRAMES2SIZE(free_highmem), &size, &size_suffix, 1415 false); 1416 printf("Available high memory: %zu frames (%" PRIu64 " %s)\n", 1417 free_highmem, size, size_suffix); 1418 1419 bin_order_suffix(FRAMES2SIZE(free_highprio), &size, &size_suffix, 1420 false); 1421 printf("Available high priority: %zu frames (%" PRIu64 " %s)\n", 1422 free_highprio, size, size_suffix); 1494 1423 } 1495 1424 } -
kernel/generic/src/mm/km.c
r7f84430 r99c2c69e 239 239 uintptr_t km_temporary_page_get(uintptr_t *framep, frame_flags_t flags) 240 240 { 241 uintptr_t frame;242 uintptr_t page;243 244 241 ASSERT(THREAD); 245 242 ASSERT(framep); 246 243 ASSERT(!(flags & ~(FRAME_NO_RESERVE | FRAME_ATOMIC))); 247 244 248 245 /* 249 246 * Allocate a frame, preferably from high memory. 250 247 */ 251 frame = (uintptr_t) frame_alloc(ONE_FRAME, 252 FRAME_HIGHMEM | FRAME_ATOMIC | flags); 248 uintptr_t page; 249 uintptr_t frame = 250 frame_alloc(1, FRAME_HIGHMEM | FRAME_ATOMIC | flags, 0); 253 251 if (frame) { 254 252 page = km_map(frame, PAGE_SIZE, 255 253 PAGE_READ | PAGE_WRITE | PAGE_CACHEABLE); 256 ASSERT(page); // FIXME 254 255 // FIXME 256 ASSERT(page); 257 257 } else { 258 frame = (uintptr_t) frame_alloc(ONE_FRAME, 259 FRAME_LOWMEM | flags); 258 frame = frame_alloc(1, FRAME_LOWMEM | flags, 0); 260 259 if (!frame) 261 260 return (uintptr_t) NULL; 261 262 262 page = PA2KA(frame); 263 263 } 264 264 265 265 *framep = frame; 266 return page; 266 return page; 267 267 } 268 268 -
kernel/generic/src/mm/page.c
r7f84430 r99c2c69e 169 169 } 170 170 171 int page_find_mapping(uintptr_t virt, void **phys)171 int page_find_mapping(uintptr_t virt, uintptr_t *phys) 172 172 { 173 173 page_table_lock(AS, true); … … 179 179 } 180 180 181 *phys = (void *)PTE_GET_FRAME(pte) +181 *phys = PTE_GET_FRAME(pte) + 182 182 (virt - ALIGN_DOWN(virt, PAGE_SIZE)); 183 183 … … 193 193 * 194 194 */ 195 sysarg_t sys_page_find_mapping(uintptr_t virt, void*phys_ptr)196 { 197 void *phys;195 sysarg_t sys_page_find_mapping(uintptr_t virt, uintptr_t *phys_ptr) 196 { 197 uintptr_t phys; 198 198 int rc = page_find_mapping(virt, &phys); 199 199 if (rc != EOK) -
kernel/generic/src/mm/slab.c
r7f84430 r99c2c69e 182 182 size_t zone = 0; 183 183 184 void *data = frame_alloc_generic(cache->order, FRAME_KA | flags, &zone); 185 if (!data) { 184 uintptr_t data_phys = 185 frame_alloc_generic(cache->frames, flags, 0, &zone); 186 if (!data_phys) 186 187 return NULL; 187 } 188 189 void *data = (void *) PA2KA(data_phys); 188 190 189 191 slab_t *slab; … … 193 195 slab = slab_alloc(slab_extern_cache, flags); 194 196 if (!slab) { 195 frame_free(KA2PA(data) );197 frame_free(KA2PA(data), cache->frames); 196 198 return NULL; 197 199 } 198 200 } else { 199 fsize = (PAGE_SIZE << cache->order);201 fsize = FRAMES2SIZE(cache->frames); 200 202 slab = data + fsize - sizeof(*slab); 201 203 } … … 203 205 /* Fill in slab structures */ 204 206 size_t i; 205 for (i = 0; i < ((size_t) 1 << cache->order); i++)207 for (i = 0; i < cache->frames; i++) 206 208 frame_set_parent(ADDR2PFN(KA2PA(data)) + i, slab, zone); 207 209 … … 225 227 NO_TRACE static size_t slab_space_free(slab_cache_t *cache, slab_t *slab) 226 228 { 227 frame_free(KA2PA(slab->start) );229 frame_free(KA2PA(slab->start), slab->cache->frames); 228 230 if (!(cache->flags & SLAB_CACHE_SLINSIDE)) 229 231 slab_free(slab_extern_cache, slab); … … 231 233 atomic_dec(&cache->allocated_slabs); 232 234 233 return (1 << cache->order);235 return cache->frames; 234 236 } 235 237 … … 558 560 { 559 561 if (cache->flags & SLAB_CACHE_SLINSIDE) 560 return ( (PAGE_SIZE << cache->order)561 - sizeof(slab_t)) /cache->size;562 return (FRAMES2SIZE(cache->frames) - sizeof(slab_t)) / 563 cache->size; 562 564 else 563 return (PAGE_SIZE << cache->order) / cache->size;565 return FRAMES2SIZE(cache->frames) / cache->size; 564 566 } 565 567 … … 570 572 { 571 573 size_t objects = comp_objects(cache); 572 size_t ssize = PAGE_SIZE << cache->order;574 size_t ssize = FRAMES2SIZE(cache->frames); 573 575 574 576 if (cache->flags & SLAB_CACHE_SLINSIDE) … … 634 636 cache->flags |= SLAB_CACHE_SLINSIDE; 635 637 636 /* Minimum slab order */ 637 size_t pages = SIZE2FRAMES(cache->size); 638 639 /* We need the 2^order >= pages */ 640 if (pages == 1) 641 cache->order = 0; 642 else 643 cache->order = fnzb(pages - 1) + 1; 638 /* Minimum slab frames */ 639 cache->frames = SIZE2FRAMES(cache->size); 644 640 645 641 while (badness(cache) > SLAB_MAX_BADNESS(cache)) 646 cache-> order += 1;642 cache->frames <<= 1; 647 643 648 644 cache->objects = comp_objects(cache); … … 810 806 811 807 size_t frames = 0; 812 list_foreach(slab_cache_list, cur) { 813 slab_cache_t *cache = list_get_instance(cur, slab_cache_t, link); 808 list_foreach(slab_cache_list, link, slab_cache_t, cache) { 814 809 frames += _slab_reclaim(cache, flags); 815 810 } … … 871 866 872 867 const char *name = cache->name; 873 uint8_t order = cache->order;868 size_t frames = cache->frames; 874 869 size_t size = cache->size; 875 870 size_t objects = cache->objects; … … 881 876 irq_spinlock_unlock(&slab_cache_lock, true); 882 877 883 printf("%-18s %8zu %8 u %8zu %8ld %8ld %8ld %-5s\n",884 name, size, (1 << order), objects, allocated_slabs,878 printf("%-18s %8zu %8zu %8zu %8ld %8ld %8ld %-5s\n", 879 name, size, frames, objects, allocated_slabs, 885 880 cached_objs, allocated_objs, 886 881 flags & SLAB_CACHE_SLINSIDE ? "in" : "out"); … … 936 931 irq_spinlock_lock(&slab_cache_lock, false); 937 932 938 list_foreach(slab_cache_list, cur) { 939 slab_cache_t *slab = list_get_instance(cur, slab_cache_t, link); 933 list_foreach(slab_cache_list, link, slab_cache_t, slab) { 940 934 if ((slab->flags & SLAB_CACHE_MAGDEFERRED) != 941 935 SLAB_CACHE_MAGDEFERRED) -
kernel/generic/src/proc/scheduler.c
r7f84430 r99c2c69e 739 739 740 740 printf("\trq[%u]: ", i); 741 list_foreach(cpus[cpu].rq[i].rq, cur) { 742 thread_t *thread = list_get_instance(cur, 743 thread_t, rq_link); 741 list_foreach(cpus[cpu].rq[i].rq, rq_link, thread_t, 742 thread) { 744 743 printf("%" PRIu64 "(%s) ", thread->tid, 745 744 thread_states[thread->state]); -
kernel/generic/src/proc/task.c
r7f84430 r99c2c69e 452 452 453 453 /* Current values of threads */ 454 list_foreach(task->threads, cur) { 455 thread_t *thread = list_get_instance(cur, thread_t, th_link); 456 454 list_foreach(task->threads, th_link, thread_t, thread) { 457 455 irq_spinlock_lock(&thread->lock, false); 458 456 … … 484 482 */ 485 483 486 list_foreach(task->threads, cur) { 487 thread_t *thread = list_get_instance(cur, thread_t, th_link); 484 list_foreach(task->threads, th_link, thread_t, thread) { 488 485 bool sleeping = false; 489 486 -
kernel/generic/src/proc/thread.c
r7f84430 r99c2c69e 192 192 kmflags &= ~FRAME_HIGHMEM; 193 193 194 thread->kstack = (uint8_t *) frame_alloc(STACK_FRAMES, FRAME_KA | kmflags); 195 if (!thread->kstack) { 194 uintptr_t stack_phys = 195 frame_alloc(STACK_FRAMES, kmflags, STACK_SIZE - 1); 196 if (!stack_phys) { 196 197 #ifdef CONFIG_FPU 197 198 if (thread->saved_fpu_context) … … 201 202 } 202 203 204 thread->kstack = (uint8_t *) PA2KA(stack_phys); 205 203 206 #ifdef CONFIG_UDEBUG 204 207 mutex_initialize(&thread->udebug.lock, MUTEX_PASSIVE); … … 216 219 thr_destructor_arch(thread); 217 220 218 frame_free(KA2PA(thread->kstack) );221 frame_free(KA2PA(thread->kstack), STACK_FRAMES); 219 222 220 223 #ifdef CONFIG_FPU -
kernel/generic/src/synch/futex.c
r7f84430 r99c2c69e 274 274 mutex_lock(&TASK->futexes_lock); 275 275 276 list_foreach(TASK->futexes.leaf_list, cur) { 277 btree_node_t *node; 276 list_foreach(TASK->futexes.leaf_list, leaf_link, btree_node_t, node) { 278 277 unsigned int i; 279 278 280 node = list_get_instance(cur, btree_node_t, leaf_link);281 279 for (i = 0; i < node->keys; i++) { 282 280 futex_t *ftx; -
kernel/generic/src/sysinfo/stats.c
r7f84430 r99c2c69e 175 175 176 176 /* Walk the B+ tree and count pages */ 177 list_foreach(as->as_area_btree.leaf_list, cur) { 178 btree_node_t *node = 179 list_get_instance(cur, btree_node_t, leaf_link); 180 177 list_foreach(as->as_area_btree.leaf_list, leaf_link, btree_node_t, 178 node) { 181 179 unsigned int i; 182 180 for (i = 0; i < node->keys; i++) { … … 218 216 219 217 /* Walk the B+ tree and count pages */ 220 list_foreach(as->as_area_btree.leaf_list, cur) { 221 btree_node_t *node = 222 list_get_instance(cur, btree_node_t, leaf_link); 223 218 list_foreach(as->as_area_btree.leaf_list, leaf_link, btree_node_t, node) { 224 219 unsigned int i; 225 220 for (i = 0; i < node->keys; i++) { -
kernel/generic/src/time/clock.c
r7f84430 r99c2c69e 81 81 void clock_counter_init(void) 82 82 { 83 void *faddr = frame_alloc(ONE_FRAME, FRAME_ATOMIC);84 if ( !faddr)83 uintptr_t faddr = frame_alloc(1, FRAME_ATOMIC, 0); 84 if (faddr == 0) 85 85 panic("Cannot allocate page for clock."); 86 86 … … 91 91 uptime->useconds = 0; 92 92 93 clock_parea.pbase = (uintptr_t)faddr;93 clock_parea.pbase = faddr; 94 94 clock_parea.frames = 1; 95 95 clock_parea.unpriv = true; -
kernel/generic/src/udebug/udebug.c
r7f84430 r99c2c69e 406 406 407 407 /* Finish debugging of all userspace threads */ 408 list_foreach(task->threads, cur) { 409 thread_t *thread = list_get_instance(cur, thread_t, th_link); 410 408 list_foreach(task->threads, th_link, thread_t, thread) { 411 409 mutex_lock(&thread->udebug.lock); 412 410 -
kernel/generic/src/udebug/udebug_ops.c
r7f84430 r99c2c69e 196 196 /* Set udebug.active on all of the task's userspace threads. */ 197 197 198 list_foreach(TASK->threads, cur) { 199 thread_t *thread = list_get_instance(cur, thread_t, th_link); 200 198 list_foreach(TASK->threads, th_link, thread_t, thread) { 201 199 mutex_lock(&thread->udebug.lock); 202 200 if (thread->uspace) { … … 389 387 390 388 /* FIXME: make sure the thread isn't past debug shutdown... */ 391 list_foreach(TASK->threads, cur) { 392 thread_t *thread = list_get_instance(cur, thread_t, th_link); 393 389 list_foreach(TASK->threads, th_link, thread_t, thread) { 394 390 irq_spinlock_lock(&thread->lock, false); 395 391 bool uspace = thread->uspace;
Note:
See TracChangeset
for help on using the changeset viewer.