source: mainline/kernel/generic/src/mm/frame.c@ 207e8880

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 207e8880 was 1066041, checked in by Adam Hraska <adam.hraska+hos@…>, 13 years ago

preemption_disable: Turned functions into macros. Moved THREAD, AS, TASK, CPU into thread.h, as.h, task.h, cpu.h to fix the include hell that ensued.

  • Property mode set to 100644
File size: 38.1 KB
Line 
1/*
2 * Copyright (c) 2001-2005 Jakub Jermar
3 * Copyright (c) 2005 Sergey Bondari
4 * Copyright (c) 2009 Martin Decky
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * - Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * - Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * - The name of the author may not be used to endorse or promote products
17 * derived from this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31/** @addtogroup genericmm
32 * @{
33 */
34
35/**
36 * @file
37 * @brief Physical frame allocator.
38 *
39 * This file contains the physical frame allocator and memory zone management.
40 * The frame allocator is built on top of the buddy allocator.
41 *
42 * @see buddy.c
43 */
44
45#include <typedefs.h>
46#include <mm/frame.h>
47#include <mm/reserve.h>
48#include <mm/as.h>
49#include <panic.h>
50#include <debug.h>
51#include <adt/list.h>
52#include <synch/mutex.h>
53#include <synch/condvar.h>
54#include <arch/asm.h>
55#include <arch.h>
56#include <print.h>
57#include <align.h>
58#include <mm/slab.h>
59#include <bitops.h>
60#include <macros.h>
61#include <config.h>
62#include <str.h>
63#include <proc/thread.h> /* THREAD */
64
65zones_t zones;
66
67/*
68 * Synchronization primitives used to sleep when there is no memory
69 * available.
70 */
71static mutex_t mem_avail_mtx;
72static condvar_t mem_avail_cv;
73static size_t mem_avail_req = 0; /**< Number of frames requested. */
74static size_t mem_avail_gen = 0; /**< Generation counter. */
75
76/********************/
77/* Helper functions */
78/********************/
79
80NO_TRACE static inline size_t frame_index(zone_t *zone, frame_t *frame)
81{
82 return (size_t) (frame - zone->frames);
83}
84
85NO_TRACE static inline size_t frame_index_abs(zone_t *zone, frame_t *frame)
86{
87 return (size_t) (frame - zone->frames) + zone->base;
88}
89
90NO_TRACE static inline bool frame_index_valid(zone_t *zone, size_t index)
91{
92 return (index < zone->count);
93}
94
95NO_TRACE static inline size_t make_frame_index(zone_t *zone, frame_t *frame)
96{
97 return (frame - zone->frames);
98}
99
100/** Initialize frame structure.
101 *
102 * @param frame Frame structure to be initialized.
103 *
104 */
105NO_TRACE static void frame_initialize(frame_t *frame)
106{
107 frame->refcount = 1;
108 frame->buddy_order = 0;
109}
110
111/*******************/
112/* Zones functions */
113/*******************/
114
115/** Insert-sort zone into zones list.
116 *
117 * Assume interrupts are disabled and zones lock is
118 * locked.
119 *
120 * @param base Base frame of the newly inserted zone.
121 * @param count Number of frames of the newly inserted zone.
122 *
123 * @return Zone number on success, -1 on error.
124 *
125 */
126NO_TRACE static size_t zones_insert_zone(pfn_t base, size_t count,
127 zone_flags_t flags)
128{
129 if (zones.count + 1 == ZONES_MAX) {
130 printf("Maximum zone count %u exceeded!\n", ZONES_MAX);
131 return (size_t) -1;
132 }
133
134 size_t i;
135 for (i = 0; i < zones.count; i++) {
136 /* Check for overlap */
137 if (overlaps(zones.info[i].base, zones.info[i].count,
138 base, count)) {
139
140 /*
141 * If the overlaping zones are of the same type
142 * and the new zone is completely within the previous
143 * one, then quietly ignore the new zone.
144 *
145 */
146
147 if ((zones.info[i].flags != flags) ||
148 (!iswithin(zones.info[i].base, zones.info[i].count,
149 base, count))) {
150 printf("Zone (%p, %p) overlaps "
151 "with previous zone (%p %p)!\n",
152 (void *) PFN2ADDR(base), (void *) PFN2ADDR(count),
153 (void *) PFN2ADDR(zones.info[i].base),
154 (void *) PFN2ADDR(zones.info[i].count));
155 }
156
157 return (size_t) -1;
158 }
159 if (base < zones.info[i].base)
160 break;
161 }
162
163 /* Move other zones up */
164 size_t j;
165 for (j = zones.count; j > i; j--) {
166 zones.info[j] = zones.info[j - 1];
167 if (zones.info[j].buddy_system != NULL)
168 zones.info[j].buddy_system->data =
169 (void *) &zones.info[j];
170 }
171
172 zones.count++;
173
174 return i;
175}
176
177/** Get total available frames.
178 *
179 * Assume interrupts are disabled and zones lock is
180 * locked.
181 *
182 * @return Total number of available frames.
183 *
184 */
185NO_TRACE static size_t frame_total_free_get_internal(void)
186{
187 size_t total = 0;
188 size_t i;
189
190 for (i = 0; i < zones.count; i++)
191 total += zones.info[i].free_count;
192
193 return total;
194}
195
196NO_TRACE size_t frame_total_free_get(void)
197{
198 size_t total;
199
200 irq_spinlock_lock(&zones.lock, true);
201 total = frame_total_free_get_internal();
202 irq_spinlock_unlock(&zones.lock, true);
203
204 return total;
205}
206
207
208/** Find a zone with a given frames.
209 *
210 * Assume interrupts are disabled and zones lock is
211 * locked.
212 *
213 * @param frame Frame number contained in zone.
214 * @param count Number of frames to look for.
215 * @param hint Used as zone hint.
216 *
217 * @return Zone index or -1 if not found.
218 *
219 */
220NO_TRACE size_t find_zone(pfn_t frame, size_t count, size_t hint)
221{
222 if (hint >= zones.count)
223 hint = 0;
224
225 size_t i = hint;
226 do {
227 if ((zones.info[i].base <= frame)
228 && (zones.info[i].base + zones.info[i].count >= frame + count))
229 return i;
230
231 i++;
232 if (i >= zones.count)
233 i = 0;
234
235 } while (i != hint);
236
237 return (size_t) -1;
238}
239
240/** @return True if zone can allocate specified order */
241NO_TRACE static bool zone_can_alloc(zone_t *zone, uint8_t order)
242{
243 return ((zone->flags & ZONE_AVAILABLE) &&
244 buddy_system_can_alloc(zone->buddy_system, order));
245}
246
247/** Find a zone that can allocate order frames.
248 *
249 * Assume interrupts are disabled and zones lock is
250 * locked.
251 *
252 * @param order Size (2^order) of free space we are trying to find.
253 * @param flags Required flags of the target zone.
254 * @param hind Preferred zone.
255 *
256 */
257NO_TRACE static size_t find_free_zone(uint8_t order, zone_flags_t flags,
258 size_t hint)
259{
260 if (hint >= zones.count)
261 hint = 0;
262
263 size_t i = hint;
264 do {
265 /*
266 * Check whether the zone meets the search criteria.
267 */
268 if (ZONE_FLAGS_MATCH(zones.info[i].flags, flags)) {
269 /*
270 * Check if the zone has 2^order frames area available.
271 */
272 if (zone_can_alloc(&zones.info[i], order))
273 return i;
274 }
275
276 i++;
277 if (i >= zones.count)
278 i = 0;
279
280 } while (i != hint);
281
282 return (size_t) -1;
283}
284
285/**************************/
286/* Buddy system functions */
287/**************************/
288
289/** Buddy system find_block implementation.
290 *
291 * Find block that is parent of current list.
292 * That means go to lower addresses, until such block is found
293 *
294 * @param order Order of parent must be different then this
295 * parameter!!
296 *
297 */
298NO_TRACE static link_t *zone_buddy_find_block(buddy_system_t *buddy,
299 link_t *child, uint8_t order)
300{
301 frame_t *frame = list_get_instance(child, frame_t, buddy_link);
302 zone_t *zone = (zone_t *) buddy->data;
303
304 size_t index = frame_index(zone, frame);
305 do {
306 if (zone->frames[index].buddy_order != order)
307 return &zone->frames[index].buddy_link;
308 } while (index-- > 0);
309
310 return NULL;
311}
312
313/** Buddy system find_buddy implementation.
314 *
315 * @param buddy Buddy system.
316 * @param block Block for which buddy should be found.
317 *
318 * @return Buddy for given block if found.
319 *
320 */
321NO_TRACE static link_t *zone_buddy_find_buddy(buddy_system_t *buddy,
322 link_t *block)
323{
324 frame_t *frame = list_get_instance(block, frame_t, buddy_link);
325 zone_t *zone = (zone_t *) buddy->data;
326 ASSERT(IS_BUDDY_ORDER_OK(frame_index_abs(zone, frame),
327 frame->buddy_order));
328
329 bool is_left = IS_BUDDY_LEFT_BLOCK_ABS(zone, frame);
330
331 size_t index;
332 if (is_left) {
333 index = (frame_index(zone, frame)) +
334 (1 << frame->buddy_order);
335 } else { /* is_right */
336 index = (frame_index(zone, frame)) -
337 (1 << frame->buddy_order);
338 }
339
340 if (frame_index_valid(zone, index)) {
341 if ((zone->frames[index].buddy_order == frame->buddy_order) &&
342 (zone->frames[index].refcount == 0)) {
343 return &zone->frames[index].buddy_link;
344 }
345 }
346
347 return NULL;
348}
349
350/** Buddy system bisect implementation.
351 *
352 * @param buddy Buddy system.
353 * @param block Block to bisect.
354 *
355 * @return Right block.
356 *
357 */
358NO_TRACE static link_t *zone_buddy_bisect(buddy_system_t *buddy, link_t *block)
359{
360 frame_t *frame_l = list_get_instance(block, frame_t, buddy_link);
361 frame_t *frame_r = (frame_l + (1 << (frame_l->buddy_order - 1)));
362
363 return &frame_r->buddy_link;
364}
365
366/** Buddy system coalesce implementation.
367 *
368 * @param buddy Buddy system.
369 * @param block_1 First block.
370 * @param block_2 First block's buddy.
371 *
372 * @return Coalesced block (actually block that represents lower
373 * address).
374 *
375 */
376NO_TRACE static link_t *zone_buddy_coalesce(buddy_system_t *buddy,
377 link_t *block_1, link_t *block_2)
378{
379 frame_t *frame1 = list_get_instance(block_1, frame_t, buddy_link);
380 frame_t *frame2 = list_get_instance(block_2, frame_t, buddy_link);
381
382 return ((frame1 < frame2) ? block_1 : block_2);
383}
384
385/** Buddy system set_order implementation.
386 *
387 * @param buddy Buddy system.
388 * @param block Buddy system block.
389 * @param order Order to set.
390 *
391 */
392NO_TRACE static void zone_buddy_set_order(buddy_system_t *buddy, link_t *block,
393 uint8_t order)
394{
395 list_get_instance(block, frame_t, buddy_link)->buddy_order = order;
396}
397
398/** Buddy system get_order implementation.
399 *
400 * @param buddy Buddy system.
401 * @param block Buddy system block.
402 *
403 * @return Order of block.
404 *
405 */
406NO_TRACE static uint8_t zone_buddy_get_order(buddy_system_t *buddy,
407 link_t *block)
408{
409 return list_get_instance(block, frame_t, buddy_link)->buddy_order;
410}
411
412/** Buddy system mark_busy implementation.
413 *
414 * @param buddy Buddy system.
415 * @param block Buddy system block.
416 *
417 */
418NO_TRACE static void zone_buddy_mark_busy(buddy_system_t *buddy, link_t *block)
419{
420 list_get_instance(block, frame_t, buddy_link)->refcount = 1;
421}
422
423/** Buddy system mark_available implementation.
424 *
425 * @param buddy Buddy system.
426 * @param block Buddy system block.
427 *
428 */
429NO_TRACE static void zone_buddy_mark_available(buddy_system_t *buddy,
430 link_t *block)
431{
432 list_get_instance(block, frame_t, buddy_link)->refcount = 0;
433}
434
435static buddy_system_operations_t zone_buddy_system_operations = {
436 .find_buddy = zone_buddy_find_buddy,
437 .bisect = zone_buddy_bisect,
438 .coalesce = zone_buddy_coalesce,
439 .set_order = zone_buddy_set_order,
440 .get_order = zone_buddy_get_order,
441 .mark_busy = zone_buddy_mark_busy,
442 .mark_available = zone_buddy_mark_available,
443 .find_block = zone_buddy_find_block
444};
445
446/******************/
447/* Zone functions */
448/******************/
449
450/** Allocate frame in particular zone.
451 *
452 * Assume zone is locked and is available for allocation.
453 * Panics if allocation is impossible.
454 *
455 * @param zone Zone to allocate from.
456 * @param order Allocate exactly 2^order frames.
457 *
458 * @return Frame index in zone.
459 *
460 */
461NO_TRACE static pfn_t zone_frame_alloc(zone_t *zone, uint8_t order)
462{
463 ASSERT(zone->flags & ZONE_AVAILABLE);
464
465 /* Allocate frames from zone buddy system */
466 link_t *link = buddy_system_alloc(zone->buddy_system, order);
467
468 ASSERT(link);
469
470 /* Update zone information. */
471 zone->free_count -= (1 << order);
472 zone->busy_count += (1 << order);
473
474 /* Frame will be actually a first frame of the block. */
475 frame_t *frame = list_get_instance(link, frame_t, buddy_link);
476
477 /* Get frame address */
478 return make_frame_index(zone, frame);
479}
480
481/** Free frame from zone.
482 *
483 * Assume zone is locked and is available for deallocation.
484 *
485 * @param zone Pointer to zone from which the frame is to be freed.
486 * @param frame_idx Frame index relative to zone.
487 *
488 * @return Number of freed frames.
489 *
490 */
491NO_TRACE static size_t zone_frame_free(zone_t *zone, size_t frame_idx)
492{
493 ASSERT(zone->flags & ZONE_AVAILABLE);
494
495 frame_t *frame = &zone->frames[frame_idx];
496 size_t size = 0;
497
498 ASSERT(frame->refcount);
499
500 if (!--frame->refcount) {
501 size = 1 << frame->buddy_order;
502 buddy_system_free(zone->buddy_system, &frame->buddy_link);
503 /* Update zone information. */
504 zone->free_count += size;
505 zone->busy_count -= size;
506 }
507
508 return size;
509}
510
511/** Return frame from zone. */
512NO_TRACE static frame_t *zone_get_frame(zone_t *zone, size_t frame_idx)
513{
514 ASSERT(frame_idx < zone->count);
515 return &zone->frames[frame_idx];
516}
517
518/** Mark frame in zone unavailable to allocation. */
519NO_TRACE static void zone_mark_unavailable(zone_t *zone, size_t frame_idx)
520{
521 ASSERT(zone->flags & ZONE_AVAILABLE);
522
523 frame_t *frame = zone_get_frame(zone, frame_idx);
524 if (frame->refcount)
525 return;
526
527 link_t *link __attribute__ ((unused));
528
529 link = buddy_system_alloc_block(zone->buddy_system,
530 &frame->buddy_link);
531
532 ASSERT(link);
533 zone->free_count--;
534 reserve_force_alloc(1);
535}
536
537/** Merge two zones.
538 *
539 * Expect buddy to point to space at least zone_conf_size large.
540 * Assume z1 & z2 are locked and compatible and zones lock is
541 * locked.
542 *
543 * @param z1 First zone to merge.
544 * @param z2 Second zone to merge.
545 * @param old_z1 Original date of the first zone.
546 * @param buddy Merged zone buddy.
547 *
548 */
549NO_TRACE static void zone_merge_internal(size_t z1, size_t z2, zone_t *old_z1,
550 buddy_system_t *buddy)
551{
552 ASSERT(zones.info[z1].flags & ZONE_AVAILABLE);
553 ASSERT(zones.info[z2].flags & ZONE_AVAILABLE);
554 ASSERT(zones.info[z1].flags == zones.info[z2].flags);
555 ASSERT(zones.info[z1].base < zones.info[z2].base);
556 ASSERT(!overlaps(zones.info[z1].base, zones.info[z1].count,
557 zones.info[z2].base, zones.info[z2].count));
558
559 /* Difference between zone bases */
560 pfn_t base_diff = zones.info[z2].base - zones.info[z1].base;
561
562 zones.info[z1].count = base_diff + zones.info[z2].count;
563 zones.info[z1].free_count += zones.info[z2].free_count;
564 zones.info[z1].busy_count += zones.info[z2].busy_count;
565 zones.info[z1].buddy_system = buddy;
566
567 uint8_t order = fnzb(zones.info[z1].count);
568 buddy_system_create(zones.info[z1].buddy_system, order,
569 &zone_buddy_system_operations, (void *) &zones.info[z1]);
570
571 zones.info[z1].frames =
572 (frame_t *) ((uint8_t *) zones.info[z1].buddy_system
573 + buddy_conf_size(order));
574
575 /* This marks all frames busy */
576 size_t i;
577 for (i = 0; i < zones.info[z1].count; i++)
578 frame_initialize(&zones.info[z1].frames[i]);
579
580 /* Copy frames from both zones to preserve full frame orders,
581 * parents etc. Set all free frames with refcount = 0 to 1, because
582 * we add all free frames to buddy allocator later again, clearing
583 * order to 0. Don't set busy frames with refcount = 0, as they
584 * will not be reallocated during merge and it would make later
585 * problems with allocation/free.
586 */
587 for (i = 0; i < old_z1->count; i++)
588 zones.info[z1].frames[i] = old_z1->frames[i];
589
590 for (i = 0; i < zones.info[z2].count; i++)
591 zones.info[z1].frames[base_diff + i]
592 = zones.info[z2].frames[i];
593
594 i = 0;
595 while (i < zones.info[z1].count) {
596 if (zones.info[z1].frames[i].refcount) {
597 /* Skip busy frames */
598 i += 1 << zones.info[z1].frames[i].buddy_order;
599 } else {
600 /* Free frames, set refcount = 1
601 * (all free frames have refcount == 0, we need not
602 * to check the order)
603 */
604 zones.info[z1].frames[i].refcount = 1;
605 zones.info[z1].frames[i].buddy_order = 0;
606 i++;
607 }
608 }
609
610 /* Add free blocks from the original zone z1 */
611 while (zone_can_alloc(old_z1, 0)) {
612 /* Allocate from the original zone */
613 pfn_t frame_idx = zone_frame_alloc(old_z1, 0);
614
615 /* Free the frame from the merged zone */
616 frame_t *frame = &zones.info[z1].frames[frame_idx];
617 frame->refcount = 0;
618 buddy_system_free(zones.info[z1].buddy_system, &frame->buddy_link);
619 }
620
621 /* Add free blocks from the original zone z2 */
622 while (zone_can_alloc(&zones.info[z2], 0)) {
623 /* Allocate from the original zone */
624 pfn_t frame_idx = zone_frame_alloc(&zones.info[z2], 0);
625
626 /* Free the frame from the merged zone */
627 frame_t *frame = &zones.info[z1].frames[base_diff + frame_idx];
628 frame->refcount = 0;
629 buddy_system_free(zones.info[z1].buddy_system, &frame->buddy_link);
630 }
631}
632
633/** Return old configuration frames into the zone.
634 *
635 * We have two cases:
636 * - The configuration data is outside the zone
637 * -> do nothing (perhaps call frame_free?)
638 * - The configuration data was created by zone_create
639 * or updated by reduce_region -> free every frame
640 *
641 * @param znum The actual zone where freeing should occur.
642 * @param pfn Old zone configuration frame.
643 * @param count Old zone frame count.
644 *
645 */
646NO_TRACE static void return_config_frames(size_t znum, pfn_t pfn, size_t count)
647{
648 ASSERT(zones.info[znum].flags & ZONE_AVAILABLE);
649
650 size_t cframes = SIZE2FRAMES(zone_conf_size(count));
651
652 if ((pfn < zones.info[znum].base)
653 || (pfn >= zones.info[znum].base + zones.info[znum].count))
654 return;
655
656 frame_t *frame __attribute__ ((unused));
657
658 frame = &zones.info[znum].frames[pfn - zones.info[znum].base];
659 ASSERT(!frame->buddy_order);
660
661 size_t i;
662 for (i = 0; i < cframes; i++) {
663 zones.info[znum].busy_count++;
664 (void) zone_frame_free(&zones.info[znum],
665 pfn - zones.info[znum].base + i);
666 }
667}
668
669/** Reduce allocated block to count of order 0 frames.
670 *
671 * The allocated block needs 2^order frames. Reduce all frames
672 * in the block to order 0 and free the unneeded frames. This means that
673 * when freeing the previously allocated block starting with frame_idx,
674 * you have to free every frame.
675 *
676 * @param znum Zone.
677 * @param frame_idx Index the first frame of the block.
678 * @param count Allocated frames in block.
679 *
680 */
681NO_TRACE static void zone_reduce_region(size_t znum, pfn_t frame_idx,
682 size_t count)
683{
684 ASSERT(zones.info[znum].flags & ZONE_AVAILABLE);
685 ASSERT(frame_idx + count < zones.info[znum].count);
686
687 uint8_t order = zones.info[znum].frames[frame_idx].buddy_order;
688 ASSERT((size_t) (1 << order) >= count);
689
690 /* Reduce all blocks to order 0 */
691 size_t i;
692 for (i = 0; i < (size_t) (1 << order); i++) {
693 frame_t *frame = &zones.info[znum].frames[i + frame_idx];
694 frame->buddy_order = 0;
695 if (!frame->refcount)
696 frame->refcount = 1;
697 ASSERT(frame->refcount == 1);
698 }
699
700 /* Free unneeded frames */
701 for (i = count; i < (size_t) (1 << order); i++)
702 (void) zone_frame_free(&zones.info[znum], i + frame_idx);
703}
704
705/** Merge zones z1 and z2.
706 *
707 * The merged zones must be 2 zones with no zone existing in between
708 * (which means that z2 = z1 + 1). Both zones must be available zones
709 * with the same flags.
710 *
711 * When you create a new zone, the frame allocator configuration does
712 * not to be 2^order size. Once the allocator is running it is no longer
713 * possible, merged configuration data occupies more space :-/
714 *
715 */
716bool zone_merge(size_t z1, size_t z2)
717{
718 irq_spinlock_lock(&zones.lock, true);
719
720 bool ret = true;
721
722 /* We can join only 2 zones with none existing inbetween,
723 * the zones have to be available and with the same
724 * set of flags
725 */
726 if ((z1 >= zones.count) || (z2 >= zones.count) || (z2 - z1 != 1) ||
727 (zones.info[z1].flags != zones.info[z2].flags)) {
728 ret = false;
729 goto errout;
730 }
731
732 pfn_t cframes = SIZE2FRAMES(zone_conf_size(
733 zones.info[z2].base - zones.info[z1].base
734 + zones.info[z2].count));
735
736 uint8_t order;
737 if (cframes == 1)
738 order = 0;
739 else
740 order = fnzb(cframes - 1) + 1;
741
742 /* Allocate merged zone data inside one of the zones */
743 pfn_t pfn;
744 if (zone_can_alloc(&zones.info[z1], order)) {
745 pfn = zones.info[z1].base + zone_frame_alloc(&zones.info[z1], order);
746 } else if (zone_can_alloc(&zones.info[z2], order)) {
747 pfn = zones.info[z2].base + zone_frame_alloc(&zones.info[z2], order);
748 } else {
749 ret = false;
750 goto errout;
751 }
752
753 /* Preserve original data from z1 */
754 zone_t old_z1 = zones.info[z1];
755 old_z1.buddy_system->data = (void *) &old_z1;
756
757 /* Do zone merging */
758 buddy_system_t *buddy = (buddy_system_t *) PA2KA(PFN2ADDR(pfn));
759 zone_merge_internal(z1, z2, &old_z1, buddy);
760
761 /* Free unneeded config frames */
762 zone_reduce_region(z1, pfn - zones.info[z1].base, cframes);
763
764 /* Subtract zone information from busy frames */
765 zones.info[z1].busy_count -= cframes;
766
767 /* Free old zone information */
768 return_config_frames(z1,
769 ADDR2PFN(KA2PA((uintptr_t) old_z1.frames)), old_z1.count);
770 return_config_frames(z1,
771 ADDR2PFN(KA2PA((uintptr_t) zones.info[z2].frames)),
772 zones.info[z2].count);
773
774 /* Move zones down */
775 size_t i;
776 for (i = z2 + 1; i < zones.count; i++) {
777 zones.info[i - 1] = zones.info[i];
778 if (zones.info[i - 1].buddy_system != NULL)
779 zones.info[i - 1].buddy_system->data =
780 (void *) &zones.info[i - 1];
781 }
782
783 zones.count--;
784
785errout:
786 irq_spinlock_unlock(&zones.lock, true);
787
788 return ret;
789}
790
791/** Merge all mergeable zones into one big zone.
792 *
793 * It is reasonable to do this on systems where
794 * BIOS reports parts in chunks, so that we could
795 * have 1 zone (it's faster).
796 *
797 */
798void zone_merge_all(void)
799{
800 size_t i = 0;
801 while (i < zones.count) {
802 if (!zone_merge(i, i + 1))
803 i++;
804 }
805}
806
807/** Create new frame zone.
808 *
809 * @param zone Zone to construct.
810 * @param buddy Address of buddy system configuration information.
811 * @param start Physical address of the first frame within the zone.
812 * @param count Count of frames in zone.
813 * @param flags Zone flags.
814 *
815 * @return Initialized zone.
816 *
817 */
818NO_TRACE static void zone_construct(zone_t *zone, buddy_system_t *buddy,
819 pfn_t start, size_t count, zone_flags_t flags)
820{
821 zone->base = start;
822 zone->count = count;
823 zone->flags = flags;
824 zone->free_count = count;
825 zone->busy_count = 0;
826 zone->buddy_system = buddy;
827
828 if (flags & ZONE_AVAILABLE) {
829 /*
830 * Compute order for buddy system and initialize
831 */
832 uint8_t order = fnzb(count);
833 buddy_system_create(zone->buddy_system, order,
834 &zone_buddy_system_operations, (void *) zone);
835
836 /* Allocate frames _after_ the confframe */
837
838 /* Check sizes */
839 zone->frames = (frame_t *) ((uint8_t *) zone->buddy_system +
840 buddy_conf_size(order));
841
842 size_t i;
843 for (i = 0; i < count; i++)
844 frame_initialize(&zone->frames[i]);
845
846 /* Stuffing frames */
847 for (i = 0; i < count; i++) {
848 zone->frames[i].refcount = 0;
849 buddy_system_free(zone->buddy_system, &zone->frames[i].buddy_link);
850 }
851 } else
852 zone->frames = NULL;
853}
854
855/** Compute configuration data size for zone.
856 *
857 * @param count Size of zone in frames.
858 *
859 * @return Size of zone configuration info (in bytes).
860 *
861 */
862size_t zone_conf_size(size_t count)
863{
864 return (count * sizeof(frame_t) + buddy_conf_size(fnzb(count)));
865}
866
867/** Allocate external configuration frames from low memory. */
868pfn_t zone_external_conf_alloc(size_t count)
869{
870 size_t size = zone_conf_size(count);
871 size_t order = ispwr2(size) ? fnzb(size) : (fnzb(size) + 1);
872
873 return ADDR2PFN((uintptr_t) frame_alloc(order - FRAME_WIDTH,
874 FRAME_LOWMEM | FRAME_ATOMIC));
875}
876
877/** Create and add zone to system.
878 *
879 * @param start First frame number (absolute).
880 * @param count Size of zone in frames.
881 * @param confframe Where configuration frames are supposed to be.
882 * Automatically checks, that we will not disturb the
883 * kernel and possibly init. If confframe is given
884 * _outside_ this zone, it is expected, that the area is
885 * already marked BUSY and big enough to contain
886 * zone_conf_size() amount of data. If the confframe is
887 * inside the area, the zone free frame information is
888 * modified not to include it.
889 *
890 * @return Zone number or -1 on error.
891 *
892 */
893size_t zone_create(pfn_t start, size_t count, pfn_t confframe,
894 zone_flags_t flags)
895{
896 irq_spinlock_lock(&zones.lock, true);
897
898 if (flags & ZONE_AVAILABLE) { /* Create available zone */
899 /* Theoretically we could have NULL here, practically make sure
900 * nobody tries to do that. If some platform requires, remove
901 * the assert
902 */
903 ASSERT(confframe != ADDR2PFN((uintptr_t ) NULL));
904
905 /* Update the known end of physical memory. */
906 config.physmem_end = max(config.physmem_end, PFN2ADDR(start + count));
907
908 /* If confframe is supposed to be inside our zone, then make sure
909 * it does not span kernel & init
910 */
911 size_t confcount = SIZE2FRAMES(zone_conf_size(count));
912 if ((confframe >= start) && (confframe < start + count)) {
913 for (; confframe < start + count; confframe++) {
914 uintptr_t addr = PFN2ADDR(confframe);
915 if (overlaps(addr, PFN2ADDR(confcount),
916 KA2PA(config.base), config.kernel_size))
917 continue;
918
919 if (overlaps(addr, PFN2ADDR(confcount),
920 KA2PA(config.stack_base), config.stack_size))
921 continue;
922
923 bool overlap = false;
924 size_t i;
925 for (i = 0; i < init.cnt; i++)
926 if (overlaps(addr, PFN2ADDR(confcount),
927 init.tasks[i].paddr,
928 init.tasks[i].size)) {
929 overlap = true;
930 break;
931 }
932 if (overlap)
933 continue;
934
935 break;
936 }
937
938 if (confframe >= start + count)
939 panic("Cannot find configuration data for zone.");
940 }
941
942 size_t znum = zones_insert_zone(start, count, flags);
943 if (znum == (size_t) -1) {
944 irq_spinlock_unlock(&zones.lock, true);
945 return (size_t) -1;
946 }
947
948 buddy_system_t *buddy = (buddy_system_t *) PA2KA(PFN2ADDR(confframe));
949 zone_construct(&zones.info[znum], buddy, start, count, flags);
950
951 /* If confdata in zone, mark as unavailable */
952 if ((confframe >= start) && (confframe < start + count)) {
953 size_t i;
954 for (i = confframe; i < confframe + confcount; i++)
955 zone_mark_unavailable(&zones.info[znum],
956 i - zones.info[znum].base);
957 }
958
959 irq_spinlock_unlock(&zones.lock, true);
960
961 return znum;
962 }
963
964 /* Non-available zone */
965 size_t znum = zones_insert_zone(start, count, flags);
966 if (znum == (size_t) -1) {
967 irq_spinlock_unlock(&zones.lock, true);
968 return (size_t) -1;
969 }
970 zone_construct(&zones.info[znum], NULL, start, count, flags);
971
972 irq_spinlock_unlock(&zones.lock, true);
973
974 return znum;
975}
976
977/*******************/
978/* Frame functions */
979/*******************/
980
981/** Set parent of frame. */
982void frame_set_parent(pfn_t pfn, void *data, size_t hint)
983{
984 irq_spinlock_lock(&zones.lock, true);
985
986 size_t znum = find_zone(pfn, 1, hint);
987
988 ASSERT(znum != (size_t) -1);
989
990 zone_get_frame(&zones.info[znum],
991 pfn - zones.info[znum].base)->parent = data;
992
993 irq_spinlock_unlock(&zones.lock, true);
994}
995
996void *frame_get_parent(pfn_t pfn, size_t hint)
997{
998 irq_spinlock_lock(&zones.lock, true);
999
1000 size_t znum = find_zone(pfn, 1, hint);
1001
1002 ASSERT(znum != (size_t) -1);
1003
1004 void *res = zone_get_frame(&zones.info[znum],
1005 pfn - zones.info[znum].base)->parent;
1006
1007 irq_spinlock_unlock(&zones.lock, true);
1008
1009 return res;
1010}
1011
1012/** Allocate power-of-two frames of physical memory.
1013 *
1014 * @param order Allocate exactly 2^order frames.
1015 * @param flags Flags for host zone selection and address processing.
1016 * @param pzone Preferred zone.
1017 *
1018 * @return Physical address of the allocated frame.
1019 *
1020 */
1021void *frame_alloc_generic(uint8_t order, frame_flags_t flags, size_t *pzone)
1022{
1023 size_t size = ((size_t) 1) << order;
1024 size_t hint = pzone ? (*pzone) : 0;
1025
1026 /*
1027 * If not told otherwise, we must first reserve the memory.
1028 */
1029 if (!(flags & FRAME_NO_RESERVE))
1030 reserve_force_alloc(size);
1031
1032loop:
1033 irq_spinlock_lock(&zones.lock, true);
1034
1035 /*
1036 * First, find suitable frame zone.
1037 */
1038 size_t znum = find_free_zone(order,
1039 FRAME_TO_ZONE_FLAGS(flags), hint);
1040
1041 /* If no memory, reclaim some slab memory,
1042 if it does not help, reclaim all */
1043 if ((znum == (size_t) -1) && (!(flags & FRAME_NO_RECLAIM))) {
1044 irq_spinlock_unlock(&zones.lock, true);
1045 size_t freed = slab_reclaim(0);
1046 irq_spinlock_lock(&zones.lock, true);
1047
1048 if (freed > 0)
1049 znum = find_free_zone(order,
1050 FRAME_TO_ZONE_FLAGS(flags), hint);
1051
1052 if (znum == (size_t) -1) {
1053 irq_spinlock_unlock(&zones.lock, true);
1054 freed = slab_reclaim(SLAB_RECLAIM_ALL);
1055 irq_spinlock_lock(&zones.lock, true);
1056
1057 if (freed > 0)
1058 znum = find_free_zone(order,
1059 FRAME_TO_ZONE_FLAGS(flags), hint);
1060 }
1061 }
1062
1063 if (znum == (size_t) -1) {
1064 if (flags & FRAME_ATOMIC) {
1065 irq_spinlock_unlock(&zones.lock, true);
1066 if (!(flags & FRAME_NO_RESERVE))
1067 reserve_free(size);
1068 return NULL;
1069 }
1070
1071#ifdef CONFIG_DEBUG
1072 size_t avail = frame_total_free_get_internal();
1073#endif
1074
1075 irq_spinlock_unlock(&zones.lock, true);
1076
1077 if (!THREAD)
1078 panic("Cannot wait for memory to become available.");
1079
1080 /*
1081 * Sleep until some frames are available again.
1082 */
1083
1084#ifdef CONFIG_DEBUG
1085 printf("Thread %" PRIu64 " waiting for %zu frames, "
1086 "%zu available.\n", THREAD->tid, size, avail);
1087#endif
1088
1089 /*
1090 * Since the mem_avail_mtx is an active mutex, we need to disable interrupts
1091 * to prevent deadlock with TLB shootdown.
1092 */
1093 ipl_t ipl = interrupts_disable();
1094 mutex_lock(&mem_avail_mtx);
1095
1096 if (mem_avail_req > 0)
1097 mem_avail_req = min(mem_avail_req, size);
1098 else
1099 mem_avail_req = size;
1100 size_t gen = mem_avail_gen;
1101
1102 while (gen == mem_avail_gen)
1103 condvar_wait(&mem_avail_cv, &mem_avail_mtx);
1104
1105 mutex_unlock(&mem_avail_mtx);
1106 interrupts_restore(ipl);
1107
1108#ifdef CONFIG_DEBUG
1109 printf("Thread %" PRIu64 " woken up.\n", THREAD->tid);
1110#endif
1111
1112 goto loop;
1113 }
1114
1115 pfn_t pfn = zone_frame_alloc(&zones.info[znum], order)
1116 + zones.info[znum].base;
1117
1118 irq_spinlock_unlock(&zones.lock, true);
1119
1120 if (pzone)
1121 *pzone = znum;
1122
1123 if (flags & FRAME_KA)
1124 return (void *) PA2KA(PFN2ADDR(pfn));
1125
1126 return (void *) PFN2ADDR(pfn);
1127}
1128
1129void *frame_alloc(uint8_t order, frame_flags_t flags)
1130{
1131 return frame_alloc_generic(order, flags, NULL);
1132}
1133
1134void *frame_alloc_noreserve(uint8_t order, frame_flags_t flags)
1135{
1136 return frame_alloc_generic(order, flags | FRAME_NO_RESERVE, NULL);
1137}
1138
1139/** Free a frame.
1140 *
1141 * Find respective frame structure for supplied physical frame address.
1142 * Decrement frame reference count. If it drops to zero, move the frame
1143 * structure to free list.
1144 *
1145 * @param frame Physical Address of of the frame to be freed.
1146 * @param flags Flags to control memory reservation.
1147 *
1148 */
1149void frame_free_generic(uintptr_t frame, frame_flags_t flags)
1150{
1151 size_t size;
1152
1153 irq_spinlock_lock(&zones.lock, true);
1154
1155 /*
1156 * First, find host frame zone for addr.
1157 */
1158 pfn_t pfn = ADDR2PFN(frame);
1159 size_t znum = find_zone(pfn, 1, 0);
1160
1161 ASSERT(znum != (size_t) -1);
1162
1163 size = zone_frame_free(&zones.info[znum], pfn - zones.info[znum].base);
1164
1165 irq_spinlock_unlock(&zones.lock, true);
1166
1167 /*
1168 * Signal that some memory has been freed.
1169 */
1170
1171
1172 /*
1173 * Since the mem_avail_mtx is an active mutex, we need to disable interrupts
1174 * to prevent deadlock with TLB shootdown.
1175 */
1176 ipl_t ipl = interrupts_disable();
1177 mutex_lock(&mem_avail_mtx);
1178 if (mem_avail_req > 0)
1179 mem_avail_req -= min(mem_avail_req, size);
1180
1181 if (mem_avail_req == 0) {
1182 mem_avail_gen++;
1183 condvar_broadcast(&mem_avail_cv);
1184 }
1185 mutex_unlock(&mem_avail_mtx);
1186 interrupts_restore(ipl);
1187
1188 if (!(flags & FRAME_NO_RESERVE))
1189 reserve_free(size);
1190}
1191
1192void frame_free(uintptr_t frame)
1193{
1194 frame_free_generic(frame, 0);
1195}
1196
1197void frame_free_noreserve(uintptr_t frame)
1198{
1199 frame_free_generic(frame, FRAME_NO_RESERVE);
1200}
1201
1202/** Add reference to frame.
1203 *
1204 * Find respective frame structure for supplied PFN and
1205 * increment frame reference count.
1206 *
1207 * @param pfn Frame number of the frame to be freed.
1208 *
1209 */
1210NO_TRACE void frame_reference_add(pfn_t pfn)
1211{
1212 irq_spinlock_lock(&zones.lock, true);
1213
1214 /*
1215 * First, find host frame zone for addr.
1216 */
1217 size_t znum = find_zone(pfn, 1, 0);
1218
1219 ASSERT(znum != (size_t) -1);
1220
1221 zones.info[znum].frames[pfn - zones.info[znum].base].refcount++;
1222
1223 irq_spinlock_unlock(&zones.lock, true);
1224}
1225
1226/** Mark given range unavailable in frame zones.
1227 *
1228 */
1229NO_TRACE void frame_mark_unavailable(pfn_t start, size_t count)
1230{
1231 irq_spinlock_lock(&zones.lock, true);
1232
1233 size_t i;
1234 for (i = 0; i < count; i++) {
1235 size_t znum = find_zone(start + i, 1, 0);
1236 if (znum == (size_t) -1) /* PFN not found */
1237 continue;
1238
1239 zone_mark_unavailable(&zones.info[znum],
1240 start + i - zones.info[znum].base);
1241 }
1242
1243 irq_spinlock_unlock(&zones.lock, true);
1244}
1245
1246/** Initialize physical memory management.
1247 *
1248 */
1249void frame_init(void)
1250{
1251 if (config.cpu_active == 1) {
1252 zones.count = 0;
1253 irq_spinlock_initialize(&zones.lock, "frame.zones.lock");
1254 mutex_initialize(&mem_avail_mtx, MUTEX_ACTIVE);
1255 condvar_initialize(&mem_avail_cv);
1256 }
1257
1258 /* Tell the architecture to create some memory */
1259 frame_low_arch_init();
1260 if (config.cpu_active == 1) {
1261 frame_mark_unavailable(ADDR2PFN(KA2PA(config.base)),
1262 SIZE2FRAMES(config.kernel_size));
1263 frame_mark_unavailable(ADDR2PFN(KA2PA(config.stack_base)),
1264 SIZE2FRAMES(config.stack_size));
1265
1266 size_t i;
1267 for (i = 0; i < init.cnt; i++) {
1268 pfn_t pfn = ADDR2PFN(init.tasks[i].paddr);
1269 frame_mark_unavailable(pfn,
1270 SIZE2FRAMES(init.tasks[i].size));
1271 }
1272
1273 if (ballocs.size)
1274 frame_mark_unavailable(ADDR2PFN(KA2PA(ballocs.base)),
1275 SIZE2FRAMES(ballocs.size));
1276
1277 /* Black list first frame, as allocating NULL would
1278 * fail in some places
1279 */
1280 frame_mark_unavailable(0, 1);
1281 }
1282 frame_high_arch_init();
1283}
1284
1285/** Adjust bounds of physical memory region according to low/high memory split.
1286 *
1287 * @param low[in] If true, the adjustment is performed to make the region
1288 * fit in the low memory. Otherwise the adjustment is
1289 * performed to make the region fit in the high memory.
1290 * @param basep[inout] Pointer to a variable which contains the region's base
1291 * address and which may receive the adjusted base address.
1292 * @param sizep[inout] Pointer to a variable which contains the region's size
1293 * and which may receive the adjusted size.
1294 * @retun True if the region still exists even after the
1295 * adjustment, false otherwise.
1296 */
1297bool frame_adjust_zone_bounds(bool low, uintptr_t *basep, size_t *sizep)
1298{
1299 uintptr_t limit = KA2PA(config.identity_base) + config.identity_size;
1300
1301 if (low) {
1302 if (*basep > limit)
1303 return false;
1304 if (*basep + *sizep > limit)
1305 *sizep = limit - *basep;
1306 } else {
1307 if (*basep + *sizep <= limit)
1308 return false;
1309 if (*basep <= limit) {
1310 *sizep -= limit - *basep;
1311 *basep = limit;
1312 }
1313 }
1314 return true;
1315}
1316
1317/** Return total size of all zones.
1318 *
1319 */
1320uint64_t zones_total_size(void)
1321{
1322 irq_spinlock_lock(&zones.lock, true);
1323
1324 uint64_t total = 0;
1325 size_t i;
1326 for (i = 0; i < zones.count; i++)
1327 total += (uint64_t) FRAMES2SIZE(zones.info[i].count);
1328
1329 irq_spinlock_unlock(&zones.lock, true);
1330
1331 return total;
1332}
1333
1334void zones_stats(uint64_t *total, uint64_t *unavail, uint64_t *busy,
1335 uint64_t *free)
1336{
1337 ASSERT(total != NULL);
1338 ASSERT(unavail != NULL);
1339 ASSERT(busy != NULL);
1340 ASSERT(free != NULL);
1341
1342 irq_spinlock_lock(&zones.lock, true);
1343
1344 *total = 0;
1345 *unavail = 0;
1346 *busy = 0;
1347 *free = 0;
1348
1349 size_t i;
1350 for (i = 0; i < zones.count; i++) {
1351 *total += (uint64_t) FRAMES2SIZE(zones.info[i].count);
1352
1353 if (zones.info[i].flags & ZONE_AVAILABLE) {
1354 *busy += (uint64_t) FRAMES2SIZE(zones.info[i].busy_count);
1355 *free += (uint64_t) FRAMES2SIZE(zones.info[i].free_count);
1356 } else
1357 *unavail += (uint64_t) FRAMES2SIZE(zones.info[i].count);
1358 }
1359
1360 irq_spinlock_unlock(&zones.lock, true);
1361}
1362
1363/** Prints list of zones.
1364 *
1365 */
1366void zones_print_list(void)
1367{
1368#ifdef __32_BITS__
1369 printf("[nr] [base addr] [frames ] [flags ] [free frames ] [busy frames ]\n");
1370#endif
1371
1372#ifdef __64_BITS__
1373 printf("[nr] [base address ] [frames ] [flags ] [free frames ] [busy frames ]\n");
1374#endif
1375
1376 /*
1377 * Because printing may require allocation of memory, we may not hold
1378 * the frame allocator locks when printing zone statistics. Therefore,
1379 * we simply gather the statistics under the protection of the locks and
1380 * print the statistics when the locks have been released.
1381 *
1382 * When someone adds/removes zones while we are printing the statistics,
1383 * we may end up with inaccurate output (e.g. a zone being skipped from
1384 * the listing).
1385 */
1386
1387 size_t i;
1388 for (i = 0;; i++) {
1389 irq_spinlock_lock(&zones.lock, true);
1390
1391 if (i >= zones.count) {
1392 irq_spinlock_unlock(&zones.lock, true);
1393 break;
1394 }
1395
1396 uintptr_t base = PFN2ADDR(zones.info[i].base);
1397 size_t count = zones.info[i].count;
1398 zone_flags_t flags = zones.info[i].flags;
1399 size_t free_count = zones.info[i].free_count;
1400 size_t busy_count = zones.info[i].busy_count;
1401
1402 irq_spinlock_unlock(&zones.lock, true);
1403
1404 bool available = ((flags & ZONE_AVAILABLE) != 0);
1405
1406 printf("%-4zu", i);
1407
1408#ifdef __32_BITS__
1409 printf(" %p", (void *) base);
1410#endif
1411
1412#ifdef __64_BITS__
1413 printf(" %p", (void *) base);
1414#endif
1415
1416 printf(" %12zu %c%c%c%c%c ", count,
1417 available ? 'A' : '-',
1418 (flags & ZONE_RESERVED) ? 'R' : '-',
1419 (flags & ZONE_FIRMWARE) ? 'F' : '-',
1420 (flags & ZONE_LOWMEM) ? 'L' : '-',
1421 (flags & ZONE_HIGHMEM) ? 'H' : '-');
1422
1423 if (available)
1424 printf("%14zu %14zu",
1425 free_count, busy_count);
1426
1427 printf("\n");
1428 }
1429}
1430
1431/** Prints zone details.
1432 *
1433 * @param num Zone base address or zone number.
1434 *
1435 */
1436void zone_print_one(size_t num)
1437{
1438 irq_spinlock_lock(&zones.lock, true);
1439 size_t znum = (size_t) -1;
1440
1441 size_t i;
1442 for (i = 0; i < zones.count; i++) {
1443 if ((i == num) || (PFN2ADDR(zones.info[i].base) == num)) {
1444 znum = i;
1445 break;
1446 }
1447 }
1448
1449 if (znum == (size_t) -1) {
1450 irq_spinlock_unlock(&zones.lock, true);
1451 printf("Zone not found.\n");
1452 return;
1453 }
1454
1455 uintptr_t base = PFN2ADDR(zones.info[i].base);
1456 zone_flags_t flags = zones.info[i].flags;
1457 size_t count = zones.info[i].count;
1458 size_t free_count = zones.info[i].free_count;
1459 size_t busy_count = zones.info[i].busy_count;
1460
1461 irq_spinlock_unlock(&zones.lock, true);
1462
1463 bool available = ((flags & ZONE_AVAILABLE) != 0);
1464
1465 uint64_t size;
1466 const char *size_suffix;
1467 bin_order_suffix(FRAMES2SIZE(count), &size, &size_suffix, false);
1468
1469 printf("Zone number: %zu\n", znum);
1470 printf("Zone base address: %p\n", (void *) base);
1471 printf("Zone size: %zu frames (%" PRIu64 " %s)\n", count,
1472 size, size_suffix);
1473 printf("Zone flags: %c%c%c%c%c\n",
1474 available ? 'A' : '-',
1475 (flags & ZONE_RESERVED) ? 'R' : '-',
1476 (flags & ZONE_FIRMWARE) ? 'F' : '-',
1477 (flags & ZONE_LOWMEM) ? 'L' : '-',
1478 (flags & ZONE_HIGHMEM) ? 'H' : '-');
1479
1480 if (available) {
1481 bin_order_suffix(FRAMES2SIZE(busy_count), &size, &size_suffix,
1482 false);
1483 printf("Allocated space: %zu frames (%" PRIu64 " %s)\n",
1484 busy_count, size, size_suffix);
1485 bin_order_suffix(FRAMES2SIZE(free_count), &size, &size_suffix,
1486 false);
1487 printf("Available space: %zu frames (%" PRIu64 " %s)\n",
1488 free_count, size, size_suffix);
1489 }
1490}
1491
1492/** @}
1493 */
Note: See TracBrowser for help on using the repository browser.