source: mainline/kernel/generic/src/mm/frame.c@ 65f3117

ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 65f3117 was 65f3117, checked in by Jiří Zárevúcky <zarevucky.jiri@…>, 2 years ago

Make bootstrap stack statically, rather than dynamically allocated

With aligment requirements being part of the language now, it is
simple to allocate the extra stack area in kernel data, and we
don't need to go to so much trouble with manual allocation.
It also makes it slightly more straightforward to use the stack
from assembly, without having to dig through a saved context
structure.

  • Property mode set to 100644
File size: 36.8 KB
Line 
1/*
2 * Copyright (c) 2001-2005 Jakub Jermar
3 * Copyright (c) 2005 Sergey Bondari
4 * Copyright (c) 2009 Martin Decky
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * - Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * - Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * - The name of the author may not be used to endorse or promote products
17 * derived from this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31/** @addtogroup kernel_generic_mm
32 * @{
33 */
34
35/**
36 * @file
37 * @brief Physical frame allocator.
38 *
39 * This file contains the physical frame allocator and memory zone management.
40 * The frame allocator is built on top of the two-level bitmap structure.
41 *
42 */
43
44#include <typedefs.h>
45#include <mm/frame.h>
46#include <mm/reserve.h>
47#include <mm/as.h>
48#include <panic.h>
49#include <assert.h>
50#include <adt/list.h>
51#include <synch/mutex.h>
52#include <synch/condvar.h>
53#include <arch/asm.h>
54#include <arch.h>
55#include <stdio.h>
56#include <log.h>
57#include <align.h>
58#include <mm/slab.h>
59#include <bitops.h>
60#include <macros.h>
61#include <config.h>
62#include <str.h>
63#include <proc/thread.h> /* THREAD */
64
65zones_t zones;
66
67/*
68 * Synchronization primitives used to sleep when there is no memory
69 * available.
70 */
71static mutex_t mem_avail_mtx;
72static condvar_t mem_avail_cv;
73static size_t mem_avail_req = 0; /**< Number of frames requested. */
74static size_t mem_avail_gen = 0; /**< Generation counter. */
75
76/** Initialize frame structure.
77 *
78 * @param frame Frame structure to be initialized.
79 *
80 */
81_NO_TRACE static void frame_initialize(frame_t *frame)
82{
83 frame->refcount = 0;
84 frame->parent = NULL;
85}
86
87/*
88 * Zones functions
89 */
90
91/** Insert-sort zone into zones list.
92 *
93 * Assume interrupts are disabled and zones lock is
94 * locked.
95 *
96 * @param base Base frame of the newly inserted zone.
97 * @param count Number of frames of the newly inserted zone.
98 *
99 * @return Zone number on success, -1 on error.
100 *
101 */
102_NO_TRACE static size_t zones_insert_zone(pfn_t base, size_t count,
103 zone_flags_t flags)
104{
105 if (zones.count + 1 == ZONES_MAX) {
106 log(LF_OTHER, LVL_ERROR, "Maximum zone count %u exceeded!",
107 ZONES_MAX);
108 return (size_t) -1;
109 }
110
111 size_t i;
112 for (i = 0; i < zones.count; i++) {
113 /* Check for overlap */
114 if (overlaps(zones.info[i].base, zones.info[i].count,
115 base, count)) {
116
117 /*
118 * If the overlaping zones are of the same type
119 * and the new zone is completely within the previous
120 * one, then quietly ignore the new zone.
121 *
122 */
123
124 if ((zones.info[i].flags != flags) ||
125 (!iswithin(zones.info[i].base, zones.info[i].count,
126 base, count))) {
127 log(LF_OTHER, LVL_WARN,
128 "Zone (%p, %p) overlaps "
129 "with previous zone (%p %p)!",
130 (void *) PFN2ADDR(base), (void *) PFN2ADDR(count),
131 (void *) PFN2ADDR(zones.info[i].base),
132 (void *) PFN2ADDR(zones.info[i].count));
133 }
134
135 return (size_t) -1;
136 }
137 if (base < zones.info[i].base)
138 break;
139 }
140
141 /* Move other zones up */
142 for (size_t j = zones.count; j > i; j--)
143 zones.info[j] = zones.info[j - 1];
144
145 zones.count++;
146
147 return i;
148}
149
150/** Get total available frames.
151 *
152 * Assume interrupts are disabled and zones lock is
153 * locked.
154 *
155 * @return Total number of available frames.
156 *
157 */
158_NO_TRACE static size_t frame_total_free_get_internal(void)
159{
160 size_t total = 0;
161 size_t i;
162
163 for (i = 0; i < zones.count; i++)
164 total += zones.info[i].free_count;
165
166 return total;
167}
168
169_NO_TRACE size_t frame_total_free_get(void)
170{
171 size_t total;
172
173 irq_spinlock_lock(&zones.lock, true);
174 total = frame_total_free_get_internal();
175 irq_spinlock_unlock(&zones.lock, true);
176
177 return total;
178}
179
180/** Find a zone with a given frames.
181 *
182 * Assume interrupts are disabled and zones lock is
183 * locked.
184 *
185 * @param frame Frame number contained in zone.
186 * @param count Number of frames to look for.
187 * @param hint Used as zone hint.
188 *
189 * @return Zone index or -1 if not found.
190 *
191 */
192_NO_TRACE size_t find_zone(pfn_t frame, size_t count, size_t hint)
193{
194 if (hint >= zones.count)
195 hint = 0;
196
197 size_t i = hint;
198 do {
199 if ((zones.info[i].base <= frame) &&
200 (zones.info[i].base + zones.info[i].count >= frame + count))
201 return i;
202
203 i++;
204 if (i >= zones.count)
205 i = 0;
206
207 } while (i != hint);
208
209 return (size_t) -1;
210}
211
212/** @return True if zone can allocate specified number of frames */
213_NO_TRACE static bool zone_can_alloc(zone_t *zone, size_t count,
214 pfn_t constraint)
215{
216 /*
217 * The function bitmap_allocate_range() does not modify
218 * the bitmap if the last argument is NULL.
219 */
220
221 return ((zone->flags & ZONE_AVAILABLE) &&
222 bitmap_allocate_range(&zone->bitmap, count, zone->base,
223 FRAME_LOWPRIO, constraint, NULL));
224}
225
226/** Find a zone that can allocate specified number of frames
227 *
228 * This function searches among all zones. Assume interrupts are
229 * disabled and zones lock is locked.
230 *
231 * @param count Number of free frames we are trying to find.
232 * @param flags Required flags of the zone.
233 * @param constraint Indication of bits that cannot be set in the
234 * physical frame number of the first allocated frame.
235 * @param hint Preferred zone.
236 *
237 * @return Zone that can allocate specified number of frames.
238 * @return -1 if no zone can satisfy the request.
239 *
240 */
241_NO_TRACE static size_t find_free_zone_all(size_t count, zone_flags_t flags,
242 pfn_t constraint, size_t hint)
243{
244 for (size_t pos = 0; pos < zones.count; pos++) {
245 size_t i = (pos + hint) % zones.count;
246
247 /* Check whether the zone meets the search criteria. */
248 if (!ZONE_FLAGS_MATCH(zones.info[i].flags, flags))
249 continue;
250
251 /* Check if the zone can satisfy the allocation request. */
252 if (zone_can_alloc(&zones.info[i], count, constraint))
253 return i;
254 }
255
256 return (size_t) -1;
257}
258
259/** Check if frame range priority memory
260 *
261 * @param pfn Starting frame.
262 * @param count Number of frames.
263 *
264 * @return True if the range contains only priority memory.
265 *
266 */
267_NO_TRACE static bool is_high_priority(pfn_t base, size_t count)
268{
269 return (base + count <= FRAME_LOWPRIO);
270}
271
272/** Find a zone that can allocate specified number of frames
273 *
274 * This function ignores zones that contain only high-priority
275 * memory. Assume interrupts are disabled and zones lock is locked.
276 *
277 * @param count Number of free frames we are trying to find.
278 * @param flags Required flags of the zone.
279 * @param constraint Indication of bits that cannot be set in the
280 * physical frame number of the first allocated frame.
281 * @param hint Preferred zone.
282 *
283 * @return Zone that can allocate specified number of frames.
284 * @return -1 if no low-priority zone can satisfy the request.
285 *
286 */
287_NO_TRACE static size_t find_free_zone_lowprio(size_t count, zone_flags_t flags,
288 pfn_t constraint, size_t hint)
289{
290 for (size_t pos = 0; pos < zones.count; pos++) {
291 size_t i = (pos + hint) % zones.count;
292
293 /* Skip zones containing only high-priority memory. */
294 if (is_high_priority(zones.info[i].base, zones.info[i].count))
295 continue;
296
297 /* Check whether the zone meets the search criteria. */
298 if (!ZONE_FLAGS_MATCH(zones.info[i].flags, flags))
299 continue;
300
301 /* Check if the zone can satisfy the allocation request. */
302 if (zone_can_alloc(&zones.info[i], count, constraint))
303 return i;
304 }
305
306 return (size_t) -1;
307}
308
309/** Find a zone that can allocate specified number of frames
310 *
311 * Assume interrupts are disabled and zones lock is
312 * locked.
313 *
314 * @param count Number of free frames we are trying to find.
315 * @param flags Required flags of the target zone.
316 * @param constraint Indication of bits that cannot be set in the
317 * physical frame number of the first allocated frame.
318 * @param hint Preferred zone.
319 *
320 * @return Zone that can allocate specified number of frames.
321 * @return -1 if no zone can satisfy the request.
322 *
323 */
324_NO_TRACE static size_t find_free_zone(size_t count, zone_flags_t flags,
325 pfn_t constraint, size_t hint)
326{
327 if (hint >= zones.count)
328 hint = 0;
329
330 /*
331 * Prefer zones with low-priority memory over
332 * zones with high-priority memory.
333 */
334
335 size_t znum = find_free_zone_lowprio(count, flags, constraint, hint);
336 if (znum != (size_t) -1)
337 return znum;
338
339 /* Take all zones into account */
340 return find_free_zone_all(count, flags, constraint, hint);
341}
342
343/*
344 * Zone functions
345 */
346
347/** Return frame from zone. */
348_NO_TRACE static frame_t *zone_get_frame(zone_t *zone, size_t index)
349{
350 assert(index < zone->count);
351
352 return &zone->frames[index];
353}
354
355/** Allocate frame in particular zone.
356 *
357 * Assume zone is locked and is available for allocation.
358 * Panics if allocation is impossible.
359 *
360 * @param zone Zone to allocate from.
361 * @param count Number of frames to allocate
362 * @param constraint Indication of bits that cannot be set in the
363 * physical frame number of the first allocated frame.
364 *
365 * @return Frame index in zone.
366 *
367 */
368_NO_TRACE static size_t zone_frame_alloc(zone_t *zone, size_t count,
369 pfn_t constraint)
370{
371 assert(zone->flags & ZONE_AVAILABLE);
372 assert(zone->free_count >= count);
373
374 /* Allocate frames from zone */
375 size_t index = (size_t) -1;
376 int avail = bitmap_allocate_range(&zone->bitmap, count, zone->base,
377 FRAME_LOWPRIO, constraint, &index);
378
379 (void) avail;
380 assert(avail);
381 assert(index != (size_t) -1);
382
383 /* Update frame reference count */
384 for (size_t i = 0; i < count; i++) {
385 frame_t *frame = zone_get_frame(zone, index + i);
386
387 assert(frame->refcount == 0);
388 frame->refcount = 1;
389 }
390
391 /* Update zone information. */
392 zone->free_count -= count;
393 zone->busy_count += count;
394
395 return index;
396}
397
398/** Free frame from zone.
399 *
400 * Assume zone is locked and is available for deallocation.
401 *
402 * @param zone Pointer to zone from which the frame is to be freed.
403 * @param index Frame index relative to zone.
404 *
405 * @return Number of freed frames.
406 *
407 */
408_NO_TRACE static size_t zone_frame_free(zone_t *zone, size_t index)
409{
410 assert(zone->flags & ZONE_AVAILABLE);
411
412 frame_t *frame = zone_get_frame(zone, index);
413 assert(frame->refcount > 0);
414
415 if (!--frame->refcount) {
416 assert(zone->busy_count > 0);
417
418 bitmap_set(&zone->bitmap, index, 0);
419
420 /* Update zone information. */
421 zone->free_count++;
422 zone->busy_count--;
423
424 return 1;
425 }
426
427 return 0;
428}
429
430/** Mark frame in zone unavailable to allocation. */
431_NO_TRACE static void zone_mark_unavailable(zone_t *zone, size_t index)
432{
433 assert(zone->flags & ZONE_AVAILABLE);
434
435 frame_t *frame = zone_get_frame(zone, index);
436 assert(frame->refcount <= 1);
437
438 if (frame->refcount > 0)
439 return;
440
441 assert(zone->free_count > 0);
442
443 frame->refcount = 1;
444 bitmap_set_range(&zone->bitmap, index, 1);
445
446 zone->free_count--;
447 reserve_force_alloc(1);
448}
449
450/** Mark frame in zone available to allocation. */
451_NO_TRACE static void zone_mark_available(zone_t *zone, size_t index)
452{
453 assert(zone->flags & ZONE_AVAILABLE);
454
455 frame_t *frame = zone_get_frame(zone, index);
456 assert(frame->refcount == 1);
457
458 frame->refcount = 0;
459 bitmap_set_range(&zone->bitmap, index, 0);
460
461 zone->free_count++;
462}
463
464/** Merge two zones.
465 *
466 * Assume z1 & z2 are locked and compatible and zones lock is
467 * locked.
468 *
469 * @param z1 First zone to merge.
470 * @param z2 Second zone to merge.
471 * @param old_z1 Original data of the first zone.
472 * @param confdata Merged zone configuration data.
473 *
474 */
475_NO_TRACE static void zone_merge_internal(size_t z1, size_t z2, zone_t *old_z1,
476 void *confdata)
477{
478 assert(zones.info[z1].flags & ZONE_AVAILABLE);
479 assert(zones.info[z2].flags & ZONE_AVAILABLE);
480 assert(zones.info[z1].flags == zones.info[z2].flags);
481 assert(zones.info[z1].base < zones.info[z2].base);
482 assert(!overlaps(zones.info[z1].base, zones.info[z1].count,
483 zones.info[z2].base, zones.info[z2].count));
484
485 /* Difference between zone bases */
486 pfn_t base_diff = zones.info[z2].base - zones.info[z1].base;
487 pfn_t gap = base_diff - zones.info[z1].count;
488
489 zones.info[z1].count = base_diff + zones.info[z2].count;
490 zones.info[z1].free_count += zones.info[z2].free_count;
491 zones.info[z1].busy_count += zones.info[z2].busy_count;
492
493 bitmap_initialize(&zones.info[z1].bitmap, zones.info[z1].count,
494 confdata + (sizeof(frame_t) * zones.info[z1].count));
495 bitmap_clear_range(&zones.info[z1].bitmap, 0, zones.info[z1].count);
496
497 zones.info[z1].frames = (frame_t *) confdata;
498
499 /*
500 * Copy frames and bits from both zones to preserve parents, etc.
501 */
502
503 for (size_t i = 0; i < old_z1->count; i++) {
504 bitmap_set(&zones.info[z1].bitmap, i,
505 bitmap_get(&old_z1->bitmap, i));
506 zones.info[z1].frames[i] = old_z1->frames[i];
507 }
508
509 for (size_t i = 0; i < zones.info[z2].count; i++) {
510 bitmap_set(&zones.info[z1].bitmap, base_diff + i,
511 bitmap_get(&zones.info[z2].bitmap, i));
512 zones.info[z1].frames[base_diff + i] =
513 zones.info[z2].frames[i];
514 }
515
516 /*
517 * Mark the gap between the original zones as unavailable.
518 */
519
520 for (size_t i = 0; i < gap; i++) {
521 frame_initialize(&zones.info[z1].frames[old_z1->count + i]);
522 zone_mark_unavailable(&zones.info[z1], old_z1->count + i);
523 }
524}
525
526/** Return old configuration frames into the zone.
527 *
528 * We have two cases:
529 * - The configuration data is outside the zone
530 * -> do nothing (perhaps call frame_free?)
531 * - The configuration data was created by zone_create
532 * or updated by reduce_region -> free every frame
533 *
534 * @param znum The actual zone where freeing should occur.
535 * @param pfn Old zone configuration frame.
536 * @param count Old zone frame count.
537 *
538 */
539_NO_TRACE static void return_config_frames(size_t znum, pfn_t pfn, size_t count)
540{
541 assert(zones.info[znum].flags & ZONE_AVAILABLE);
542
543 size_t cframes = SIZE2FRAMES(zone_conf_size(count));
544
545 if ((pfn < zones.info[znum].base) ||
546 (pfn >= zones.info[znum].base + zones.info[znum].count))
547 return;
548
549 for (size_t i = 0; i < cframes; i++)
550 zone_mark_available(&zones.info[znum],
551 pfn - zones.info[znum].base + i);
552}
553
554/** Merge zones z1 and z2.
555 *
556 * The merged zones must be 2 zones with no zone existing in between
557 * (which means that z2 = z1 + 1). Both zones must be available zones
558 * with the same flags.
559 *
560 * When you create a new zone, the frame allocator configuration does
561 * not to be 2^order size. Once the allocator is running it is no longer
562 * possible, merged configuration data occupies more space :-/
563 *
564 */
565bool zone_merge(size_t z1, size_t z2)
566{
567 irq_spinlock_lock(&zones.lock, true);
568
569 bool ret = true;
570
571 /*
572 * We can join only 2 zones with none existing inbetween,
573 * the zones have to be available and with the same
574 * set of flags
575 */
576 if ((z1 >= zones.count) || (z2 >= zones.count) || (z2 - z1 != 1) ||
577 (zones.info[z1].flags != zones.info[z2].flags)) {
578 ret = false;
579 goto errout;
580 }
581
582 pfn_t cframes = SIZE2FRAMES(zone_conf_size(
583 zones.info[z2].base - zones.info[z1].base +
584 zones.info[z2].count));
585
586 /* Allocate merged zone data inside one of the zones */
587 pfn_t pfn;
588 if (zone_can_alloc(&zones.info[z1], cframes, 0)) {
589 pfn = zones.info[z1].base +
590 zone_frame_alloc(&zones.info[z1], cframes, 0);
591 } else if (zone_can_alloc(&zones.info[z2], cframes, 0)) {
592 pfn = zones.info[z2].base +
593 zone_frame_alloc(&zones.info[z2], cframes, 0);
594 } else {
595 ret = false;
596 goto errout;
597 }
598
599 /* Preserve original data from z1 */
600 zone_t old_z1 = zones.info[z1];
601
602 /* Do zone merging */
603 zone_merge_internal(z1, z2, &old_z1, (void *) PA2KA(PFN2ADDR(pfn)));
604
605 /* Subtract zone information from busy frames */
606 zones.info[z1].busy_count -= cframes;
607
608 /* Free old zone information */
609 return_config_frames(z1,
610 ADDR2PFN(KA2PA((uintptr_t) old_z1.frames)), old_z1.count);
611 return_config_frames(z1,
612 ADDR2PFN(KA2PA((uintptr_t) zones.info[z2].frames)),
613 zones.info[z2].count);
614
615 /* Move zones down */
616 for (size_t i = z2 + 1; i < zones.count; i++)
617 zones.info[i - 1] = zones.info[i];
618
619 zones.count--;
620
621errout:
622 irq_spinlock_unlock(&zones.lock, true);
623
624 return ret;
625}
626
627/** Merge all mergeable zones into one big zone.
628 *
629 * It is reasonable to do this on systems where
630 * BIOS reports parts in chunks, so that we could
631 * have 1 zone (it's faster).
632 *
633 */
634void zone_merge_all(void)
635{
636 size_t i = 1;
637
638 while (i < zones.count) {
639 if (!zone_merge(i - 1, i))
640 i++;
641 }
642}
643
644/** Create new frame zone.
645 *
646 * @param zone Zone to construct.
647 * @param start Physical address of the first frame within the zone.
648 * @param count Count of frames in zone.
649 * @param flags Zone flags.
650 * @param confdata Configuration data of the zone.
651 *
652 * @return Initialized zone.
653 *
654 */
655_NO_TRACE static void zone_construct(zone_t *zone, pfn_t start, size_t count,
656 zone_flags_t flags, void *confdata)
657{
658 zone->base = start;
659 zone->count = count;
660 zone->flags = flags;
661 zone->free_count = count;
662 zone->busy_count = 0;
663
664 if (flags & ZONE_AVAILABLE) {
665 /*
666 * Initialize frame bitmap (located after the array of
667 * frame_t structures in the configuration space).
668 */
669
670 bitmap_initialize(&zone->bitmap, count, confdata +
671 (sizeof(frame_t) * count));
672 bitmap_clear_range(&zone->bitmap, 0, count);
673
674 /*
675 * Initialize the array of frame_t structures.
676 */
677
678 zone->frames = (frame_t *) confdata;
679
680 for (size_t i = 0; i < count; i++)
681 frame_initialize(&zone->frames[i]);
682 } else {
683 bitmap_initialize(&zone->bitmap, 0, NULL);
684 zone->frames = NULL;
685 }
686}
687
688/** Compute configuration data size for zone.
689 *
690 * @param count Size of zone in frames.
691 *
692 * @return Size of zone configuration info (in bytes).
693 *
694 */
695size_t zone_conf_size(size_t count)
696{
697 return (count * sizeof(frame_t) + bitmap_size(count));
698}
699
700/** Allocate external configuration frames from low memory. */
701pfn_t zone_external_conf_alloc(size_t count)
702{
703 size_t frames = SIZE2FRAMES(zone_conf_size(count));
704
705 return ADDR2PFN((uintptr_t)
706 frame_alloc(frames, FRAME_LOWMEM | FRAME_ATOMIC, 0));
707}
708
709/** Create and add zone to system.
710 *
711 * @param start First frame number (absolute).
712 * @param count Size of zone in frames.
713 * @param confframe Where configuration frames are supposed to be.
714 * Automatically checks that we will not disturb the
715 * kernel and possibly init. If confframe is given
716 * _outside_ this zone, it is expected, that the area is
717 * already marked BUSY and big enough to contain
718 * zone_conf_size() amount of data. If the confframe is
719 * inside the area, the zone free frame information is
720 * modified not to include it.
721 *
722 * @return Zone number or -1 on error.
723 *
724 */
725size_t zone_create(pfn_t start, size_t count, pfn_t confframe,
726 zone_flags_t flags)
727{
728 irq_spinlock_lock(&zones.lock, true);
729
730 if (flags & ZONE_AVAILABLE) { /* Create available zone */
731 /*
732 * Theoretically we could have NULL here, practically make sure
733 * nobody tries to do that. If some platform requires, remove
734 * the assert
735 */
736 assert(confframe != ADDR2PFN((uintptr_t) NULL));
737
738 /* Update the known end of physical memory. */
739 config.physmem_end = max(config.physmem_end, PFN2ADDR(start + count));
740
741 /*
742 * If confframe is supposed to be inside our zone, then make sure
743 * it does not span kernel & init
744 */
745 size_t confcount = SIZE2FRAMES(zone_conf_size(count));
746
747 if ((confframe >= start) && (confframe < start + count)) {
748 for (; confframe < start + count; confframe++) {
749 uintptr_t addr = PFN2ADDR(confframe);
750 if (overlaps(addr, PFN2ADDR(confcount),
751 KA2PA(config.base), config.kernel_size))
752 continue;
753
754 bool overlap = false;
755 for (size_t i = 0; i < init.cnt; i++) {
756 if (overlaps(addr, PFN2ADDR(confcount),
757 init.tasks[i].paddr,
758 init.tasks[i].size)) {
759 overlap = true;
760 break;
761 }
762 }
763
764 if (overlap)
765 continue;
766
767 break;
768 }
769
770 if (confframe >= start + count)
771 panic("Cannot find configuration data for zone.");
772 }
773
774 size_t znum = zones_insert_zone(start, count, flags);
775 if (znum == (size_t) -1) {
776 irq_spinlock_unlock(&zones.lock, true);
777 return (size_t) -1;
778 }
779
780 void *confdata = (void *) PA2KA(PFN2ADDR(confframe));
781 zone_construct(&zones.info[znum], start, count, flags, confdata);
782
783 /* If confdata in zone, mark as unavailable */
784 if ((confframe >= start) && (confframe < start + count)) {
785 for (size_t i = confframe; i < confframe + confcount; i++)
786 zone_mark_unavailable(&zones.info[znum],
787 i - zones.info[znum].base);
788 }
789
790 irq_spinlock_unlock(&zones.lock, true);
791
792 return znum;
793 }
794
795 /* Non-available zone */
796 size_t znum = zones_insert_zone(start, count, flags);
797 if (znum == (size_t) -1) {
798 irq_spinlock_unlock(&zones.lock, true);
799 return (size_t) -1;
800 }
801
802 zone_construct(&zones.info[znum], start, count, flags, NULL);
803
804 irq_spinlock_unlock(&zones.lock, true);
805
806 return znum;
807}
808
809/*
810 * Frame functions
811 */
812
813/** Set parent of frame. */
814void frame_set_parent(pfn_t pfn, void *data, size_t hint)
815{
816 irq_spinlock_lock(&zones.lock, true);
817
818 size_t znum = find_zone(pfn, 1, hint);
819
820 assert(znum != (size_t) -1);
821
822 zone_get_frame(&zones.info[znum],
823 pfn - zones.info[znum].base)->parent = data;
824
825 irq_spinlock_unlock(&zones.lock, true);
826}
827
828void *frame_get_parent(pfn_t pfn, size_t hint)
829{
830 irq_spinlock_lock(&zones.lock, true);
831
832 size_t znum = find_zone(pfn, 1, hint);
833
834 assert(znum != (size_t) -1);
835
836 void *res = zone_get_frame(&zones.info[znum],
837 pfn - zones.info[znum].base)->parent;
838
839 irq_spinlock_unlock(&zones.lock, true);
840
841 return res;
842}
843
844static size_t try_find_zone(size_t count, bool lowmem,
845 pfn_t frame_constraint, size_t hint)
846{
847 if (!lowmem) {
848 size_t znum = find_free_zone(count,
849 ZONE_HIGHMEM | ZONE_AVAILABLE, frame_constraint, hint);
850 if (znum != (size_t) -1)
851 return znum;
852 }
853
854 return find_free_zone(count, ZONE_LOWMEM | ZONE_AVAILABLE,
855 frame_constraint, hint);
856}
857
858/** Allocate frames of physical memory.
859 *
860 * @param count Number of continuous frames to allocate.
861 * @param flags Flags for host zone selection and address processing.
862 * @param constraint Indication of physical address bits that cannot be
863 * set in the address of the first allocated frame.
864 * @param pzone Preferred zone.
865 *
866 * @return Physical address of the allocated frame.
867 *
868 */
869uintptr_t frame_alloc_generic(size_t count, frame_flags_t flags,
870 uintptr_t constraint, size_t *pzone)
871{
872 assert(count > 0);
873
874 size_t hint = pzone ? (*pzone) : 0;
875 pfn_t frame_constraint = ADDR2PFN(constraint);
876
877 /*
878 * If not told otherwise, we must first reserve the memory.
879 */
880 if (!(flags & FRAME_NO_RESERVE))
881 reserve_force_alloc(count);
882
883loop:
884 irq_spinlock_lock(&zones.lock, true);
885
886 // TODO: Print diagnostic if neither is explicitly specified.
887 bool lowmem = (flags & FRAME_LOWMEM) || !(flags & FRAME_HIGHMEM);
888
889 /*
890 * First, find suitable frame zone.
891 */
892 size_t znum = try_find_zone(count, lowmem, frame_constraint, hint);
893
894 /*
895 * If no memory, reclaim some slab memory,
896 * if it does not help, reclaim all.
897 */
898 if ((znum == (size_t) -1) && (!(flags & FRAME_NO_RECLAIM))) {
899 irq_spinlock_unlock(&zones.lock, true);
900 size_t freed = slab_reclaim(0);
901 irq_spinlock_lock(&zones.lock, true);
902
903 if (freed > 0)
904 znum = try_find_zone(count, lowmem,
905 frame_constraint, hint);
906
907 if (znum == (size_t) -1) {
908 irq_spinlock_unlock(&zones.lock, true);
909 freed = slab_reclaim(SLAB_RECLAIM_ALL);
910 irq_spinlock_lock(&zones.lock, true);
911
912 if (freed > 0)
913 znum = try_find_zone(count, lowmem,
914 frame_constraint, hint);
915 }
916 }
917
918 if (znum == (size_t) -1) {
919 if (flags & FRAME_ATOMIC) {
920 irq_spinlock_unlock(&zones.lock, true);
921
922 if (!(flags & FRAME_NO_RESERVE))
923 reserve_free(count);
924
925 return 0;
926 }
927
928 size_t avail = frame_total_free_get_internal();
929
930 irq_spinlock_unlock(&zones.lock, true);
931
932 if (!THREAD)
933 panic("Cannot wait for %zu frames to become available "
934 "(%zu available).", count, avail);
935
936 /*
937 * Sleep until some frames are available again.
938 */
939
940#ifdef CONFIG_DEBUG
941 log(LF_OTHER, LVL_DEBUG,
942 "Thread %" PRIu64 " waiting for %zu frames "
943 "%zu available.", THREAD->tid, count, avail);
944#endif
945
946 /*
947 * Since the mem_avail_mtx is an active mutex, we need to
948 * disable interrupts to prevent deadlock with TLB shootdown.
949 */
950 ipl_t ipl = interrupts_disable();
951 mutex_lock(&mem_avail_mtx);
952
953 if (mem_avail_req > 0)
954 mem_avail_req = min(mem_avail_req, count);
955 else
956 mem_avail_req = count;
957
958 size_t gen = mem_avail_gen;
959
960 while (gen == mem_avail_gen)
961 condvar_wait(&mem_avail_cv, &mem_avail_mtx);
962
963 mutex_unlock(&mem_avail_mtx);
964 interrupts_restore(ipl);
965
966#ifdef CONFIG_DEBUG
967 log(LF_OTHER, LVL_DEBUG, "Thread %" PRIu64 " woken up.",
968 THREAD->tid);
969#endif
970
971 goto loop;
972 }
973
974 pfn_t pfn = zone_frame_alloc(&zones.info[znum], count,
975 frame_constraint) + zones.info[znum].base;
976
977 irq_spinlock_unlock(&zones.lock, true);
978
979 if (pzone)
980 *pzone = znum;
981
982 return PFN2ADDR(pfn);
983}
984
985uintptr_t frame_alloc(size_t count, frame_flags_t flags, uintptr_t constraint)
986{
987 return frame_alloc_generic(count, flags, constraint, NULL);
988}
989
990/** Free frames of physical memory.
991 *
992 * Find respective frame structures for supplied physical frames.
993 * Decrement each frame reference count. If it drops to zero, mark
994 * the frames as available.
995 *
996 * @param start Physical Address of the first frame to be freed.
997 * @param count Number of frames to free.
998 * @param flags Flags to control memory reservation.
999 *
1000 */
1001void frame_free_generic(uintptr_t start, size_t count, frame_flags_t flags)
1002{
1003 size_t freed = 0;
1004
1005 irq_spinlock_lock(&zones.lock, true);
1006
1007 for (size_t i = 0; i < count; i++) {
1008 /*
1009 * First, find host frame zone for addr.
1010 */
1011 pfn_t pfn = ADDR2PFN(start) + i;
1012 size_t znum = find_zone(pfn, 1, 0);
1013
1014 assert(znum != (size_t) -1);
1015
1016 freed += zone_frame_free(&zones.info[znum],
1017 pfn - zones.info[znum].base);
1018 }
1019
1020 irq_spinlock_unlock(&zones.lock, true);
1021
1022 /*
1023 * Signal that some memory has been freed.
1024 * Since the mem_avail_mtx is an active mutex,
1025 * we need to disable interruptsto prevent deadlock
1026 * with TLB shootdown.
1027 */
1028
1029 ipl_t ipl = interrupts_disable();
1030 mutex_lock(&mem_avail_mtx);
1031
1032 if (mem_avail_req > 0)
1033 mem_avail_req -= min(mem_avail_req, freed);
1034
1035 if (mem_avail_req == 0) {
1036 mem_avail_gen++;
1037 condvar_broadcast(&mem_avail_cv);
1038 }
1039
1040 mutex_unlock(&mem_avail_mtx);
1041 interrupts_restore(ipl);
1042
1043 if (!(flags & FRAME_NO_RESERVE))
1044 reserve_free(freed);
1045}
1046
1047void frame_free(uintptr_t frame, size_t count)
1048{
1049 frame_free_generic(frame, count, 0);
1050}
1051
1052void frame_free_noreserve(uintptr_t frame, size_t count)
1053{
1054 frame_free_generic(frame, count, FRAME_NO_RESERVE);
1055}
1056
1057/** Add reference to frame.
1058 *
1059 * Find respective frame structure for supplied PFN and
1060 * increment frame reference count.
1061 *
1062 * @param pfn Frame number of the frame to be freed.
1063 *
1064 */
1065_NO_TRACE void frame_reference_add(pfn_t pfn)
1066{
1067 irq_spinlock_lock(&zones.lock, true);
1068
1069 /*
1070 * First, find host frame zone for addr.
1071 */
1072 size_t znum = find_zone(pfn, 1, 0);
1073
1074 assert(znum != (size_t) -1);
1075
1076 zones.info[znum].frames[pfn - zones.info[znum].base].refcount++;
1077
1078 irq_spinlock_unlock(&zones.lock, true);
1079}
1080
1081/** Mark given range unavailable in frame zones.
1082 *
1083 */
1084_NO_TRACE void frame_mark_unavailable(pfn_t start, size_t count)
1085{
1086 irq_spinlock_lock(&zones.lock, true);
1087
1088 for (size_t i = 0; i < count; i++) {
1089 size_t znum = find_zone(start + i, 1, 0);
1090
1091 if (znum == (size_t) -1) /* PFN not found */
1092 continue;
1093
1094 zone_mark_unavailable(&zones.info[znum],
1095 start + i - zones.info[znum].base);
1096 }
1097
1098 irq_spinlock_unlock(&zones.lock, true);
1099}
1100
1101/** Initialize physical memory management.
1102 *
1103 */
1104void frame_init(void)
1105{
1106 if (config.cpu_active == 1) {
1107 zones.count = 0;
1108 irq_spinlock_initialize(&zones.lock, "frame.zones.lock");
1109 mutex_initialize(&mem_avail_mtx, MUTEX_ACTIVE);
1110 condvar_initialize(&mem_avail_cv);
1111 }
1112
1113 /* Tell the architecture to create some memory */
1114 frame_low_arch_init();
1115
1116 if (config.cpu_active == 1) {
1117 frame_mark_unavailable(ADDR2PFN(KA2PA(config.base)),
1118 SIZE2FRAMES(config.kernel_size));
1119
1120 for (size_t i = 0; i < init.cnt; i++)
1121 frame_mark_unavailable(ADDR2PFN(init.tasks[i].paddr),
1122 SIZE2FRAMES(init.tasks[i].size));
1123
1124 if (ballocs.size)
1125 frame_mark_unavailable(ADDR2PFN(KA2PA(ballocs.base)),
1126 SIZE2FRAMES(ballocs.size));
1127
1128 /*
1129 * Blacklist first frame, as allocating NULL would
1130 * fail in some places
1131 */
1132 frame_mark_unavailable(0, 1);
1133 }
1134
1135 frame_high_arch_init();
1136}
1137
1138/** Adjust bounds of physical memory region according to low/high memory split.
1139 *
1140 * @param low[in] If true, the adjustment is performed to make the region
1141 * fit in the low memory. Otherwise the adjustment is
1142 * performed to make the region fit in the high memory.
1143 * @param basep[inout] Pointer to a variable which contains the region's base
1144 * address and which may receive the adjusted base address.
1145 * @param sizep[inout] Pointer to a variable which contains the region's size
1146 * and which may receive the adjusted size.
1147 *
1148 * @return True if the region still exists even after the adjustment.
1149 * @return False otherwise.
1150 *
1151 */
1152bool frame_adjust_zone_bounds(bool low, uintptr_t *basep, size_t *sizep)
1153{
1154 uintptr_t limit = KA2PA(config.identity_base) + config.identity_size;
1155
1156 if (low) {
1157 if (*basep > limit)
1158 return false;
1159
1160 if (*basep + *sizep > limit)
1161 *sizep = limit - *basep;
1162 } else {
1163 if (*basep + *sizep <= limit)
1164 return false;
1165
1166 if (*basep <= limit) {
1167 *sizep -= limit - *basep;
1168 *basep = limit;
1169 }
1170 }
1171
1172 return true;
1173}
1174
1175/** Return total size of all zones.
1176 *
1177 */
1178uint64_t zones_total_size(void)
1179{
1180 irq_spinlock_lock(&zones.lock, true);
1181
1182 uint64_t total = 0;
1183
1184 for (size_t i = 0; i < zones.count; i++)
1185 total += (uint64_t) FRAMES2SIZE(zones.info[i].count);
1186
1187 irq_spinlock_unlock(&zones.lock, true);
1188
1189 return total;
1190}
1191
1192void zones_stats(uint64_t *total, uint64_t *unavail, uint64_t *busy,
1193 uint64_t *free)
1194{
1195 assert(total != NULL);
1196 assert(unavail != NULL);
1197 assert(busy != NULL);
1198 assert(free != NULL);
1199
1200 irq_spinlock_lock(&zones.lock, true);
1201
1202 *total = 0;
1203 *unavail = 0;
1204 *busy = 0;
1205 *free = 0;
1206
1207 for (size_t i = 0; i < zones.count; i++) {
1208 *total += (uint64_t) FRAMES2SIZE(zones.info[i].count);
1209
1210 if (zones.info[i].flags & ZONE_AVAILABLE) {
1211 *busy += (uint64_t) FRAMES2SIZE(zones.info[i].busy_count);
1212 *free += (uint64_t) FRAMES2SIZE(zones.info[i].free_count);
1213 } else
1214 *unavail += (uint64_t) FRAMES2SIZE(zones.info[i].count);
1215 }
1216
1217 irq_spinlock_unlock(&zones.lock, true);
1218}
1219
1220/** Prints list of zones.
1221 *
1222 */
1223void zones_print_list(void)
1224{
1225#ifdef __32_BITS__
1226 printf("[nr] [base addr] [frames ] [flags ] [free frames ] [busy frames ]\n");
1227#endif
1228
1229#ifdef __64_BITS__
1230 printf("[nr] [base address ] [frames ] [flags ] [free frames ] [busy frames ]\n");
1231#endif
1232
1233 /*
1234 * Because printing may require allocation of memory, we may not hold
1235 * the frame allocator locks when printing zone statistics. Therefore,
1236 * we simply gather the statistics under the protection of the locks and
1237 * print the statistics when the locks have been released.
1238 *
1239 * When someone adds/removes zones while we are printing the statistics,
1240 * we may end up with inaccurate output (e.g. a zone being skipped from
1241 * the listing).
1242 */
1243
1244 size_t free_lowmem = 0;
1245 size_t free_highmem = 0;
1246 size_t free_highprio = 0;
1247
1248 for (size_t i = 0; ; i++) {
1249 irq_spinlock_lock(&zones.lock, true);
1250
1251 if (i >= zones.count) {
1252 irq_spinlock_unlock(&zones.lock, true);
1253 break;
1254 }
1255
1256 pfn_t fbase = zones.info[i].base;
1257 uintptr_t base = PFN2ADDR(fbase);
1258 size_t count = zones.info[i].count;
1259 zone_flags_t flags = zones.info[i].flags;
1260 size_t free_count = zones.info[i].free_count;
1261 size_t busy_count = zones.info[i].busy_count;
1262
1263 bool available = ((flags & ZONE_AVAILABLE) != 0);
1264 bool lowmem = ((flags & ZONE_LOWMEM) != 0);
1265 bool highmem = ((flags & ZONE_HIGHMEM) != 0);
1266 bool highprio = is_high_priority(fbase, count);
1267
1268 if (available) {
1269 if (lowmem)
1270 free_lowmem += free_count;
1271
1272 if (highmem)
1273 free_highmem += free_count;
1274
1275 if (highprio) {
1276 free_highprio += free_count;
1277 } else {
1278 /*
1279 * Walk all frames of the zone and examine
1280 * all high priority memory to get accurate
1281 * statistics.
1282 */
1283
1284 for (size_t index = 0; index < count; index++) {
1285 if (is_high_priority(fbase + index, 0)) {
1286 if (!bitmap_get(&zones.info[i].bitmap, index))
1287 free_highprio++;
1288 } else
1289 break;
1290 }
1291 }
1292 }
1293
1294 irq_spinlock_unlock(&zones.lock, true);
1295
1296 printf("%-4zu", i);
1297
1298#ifdef __32_BITS__
1299 printf(" %p", (void *) base);
1300#endif
1301
1302#ifdef __64_BITS__
1303 printf(" %p", (void *) base);
1304#endif
1305
1306 printf(" %12zu %c%c%c%c%c ", count,
1307 available ? 'A' : '-',
1308 (flags & ZONE_RESERVED) ? 'R' : '-',
1309 (flags & ZONE_FIRMWARE) ? 'F' : '-',
1310 (flags & ZONE_LOWMEM) ? 'L' : '-',
1311 (flags & ZONE_HIGHMEM) ? 'H' : '-');
1312
1313 if (available)
1314 printf("%14zu %14zu",
1315 free_count, busy_count);
1316
1317 printf("\n");
1318 }
1319
1320 printf("\n");
1321
1322 uint64_t size;
1323 const char *size_suffix;
1324
1325 bin_order_suffix(FRAMES2SIZE(free_lowmem), &size, &size_suffix,
1326 false);
1327 printf("Available low memory: %zu frames (%" PRIu64 " %s)\n",
1328 free_lowmem, size, size_suffix);
1329
1330 bin_order_suffix(FRAMES2SIZE(free_highmem), &size, &size_suffix,
1331 false);
1332 printf("Available high memory: %zu frames (%" PRIu64 " %s)\n",
1333 free_highmem, size, size_suffix);
1334
1335 bin_order_suffix(FRAMES2SIZE(free_highprio), &size, &size_suffix,
1336 false);
1337 printf("Available high priority: %zu frames (%" PRIu64 " %s)\n",
1338 free_highprio, size, size_suffix);
1339}
1340
1341/** Prints zone details.
1342 *
1343 * @param num Zone base address or zone number.
1344 *
1345 */
1346void zone_print_one(size_t num)
1347{
1348 irq_spinlock_lock(&zones.lock, true);
1349 size_t znum = (size_t) -1;
1350
1351 for (size_t i = 0; i < zones.count; i++) {
1352 if ((i == num) || (PFN2ADDR(zones.info[i].base) == num)) {
1353 znum = i;
1354 break;
1355 }
1356 }
1357
1358 if (znum == (size_t) -1) {
1359 irq_spinlock_unlock(&zones.lock, true);
1360 printf("Zone not found.\n");
1361 return;
1362 }
1363
1364 size_t free_lowmem = 0;
1365 size_t free_highmem = 0;
1366 size_t free_highprio = 0;
1367
1368 pfn_t fbase = zones.info[znum].base;
1369 uintptr_t base = PFN2ADDR(fbase);
1370 zone_flags_t flags = zones.info[znum].flags;
1371 size_t count = zones.info[znum].count;
1372 size_t free_count = zones.info[znum].free_count;
1373 size_t busy_count = zones.info[znum].busy_count;
1374
1375 bool available = ((flags & ZONE_AVAILABLE) != 0);
1376 bool lowmem = ((flags & ZONE_LOWMEM) != 0);
1377 bool highmem = ((flags & ZONE_HIGHMEM) != 0);
1378 bool highprio = is_high_priority(fbase, count);
1379
1380 if (available) {
1381 if (lowmem)
1382 free_lowmem = free_count;
1383
1384 if (highmem)
1385 free_highmem = free_count;
1386
1387 if (highprio) {
1388 free_highprio = free_count;
1389 } else {
1390 /*
1391 * Walk all frames of the zone and examine
1392 * all high priority memory to get accurate
1393 * statistics.
1394 */
1395
1396 for (size_t index = 0; index < count; index++) {
1397 if (is_high_priority(fbase + index, 0)) {
1398 if (!bitmap_get(&zones.info[znum].bitmap, index))
1399 free_highprio++;
1400 } else
1401 break;
1402 }
1403 }
1404 }
1405
1406 irq_spinlock_unlock(&zones.lock, true);
1407
1408 uint64_t size;
1409 const char *size_suffix;
1410
1411 bin_order_suffix(FRAMES2SIZE(count), &size, &size_suffix, false);
1412
1413 printf("Zone number: %zu\n", znum);
1414 printf("Zone base address: %p\n", (void *) base);
1415 printf("Zone size: %zu frames (%" PRIu64 " %s)\n", count,
1416 size, size_suffix);
1417 printf("Zone flags: %c%c%c%c%c\n",
1418 available ? 'A' : '-',
1419 (flags & ZONE_RESERVED) ? 'R' : '-',
1420 (flags & ZONE_FIRMWARE) ? 'F' : '-',
1421 (flags & ZONE_LOWMEM) ? 'L' : '-',
1422 (flags & ZONE_HIGHMEM) ? 'H' : '-');
1423
1424 if (available) {
1425 bin_order_suffix(FRAMES2SIZE(busy_count), &size, &size_suffix,
1426 false);
1427 printf("Allocated space: %zu frames (%" PRIu64 " %s)\n",
1428 busy_count, size, size_suffix);
1429
1430 bin_order_suffix(FRAMES2SIZE(free_count), &size, &size_suffix,
1431 false);
1432 printf("Available space: %zu frames (%" PRIu64 " %s)\n",
1433 free_count, size, size_suffix);
1434
1435 bin_order_suffix(FRAMES2SIZE(free_lowmem), &size, &size_suffix,
1436 false);
1437 printf("Available low memory: %zu frames (%" PRIu64 " %s)\n",
1438 free_lowmem, size, size_suffix);
1439
1440 bin_order_suffix(FRAMES2SIZE(free_highmem), &size, &size_suffix,
1441 false);
1442 printf("Available high memory: %zu frames (%" PRIu64 " %s)\n",
1443 free_highmem, size, size_suffix);
1444
1445 bin_order_suffix(FRAMES2SIZE(free_highprio), &size, &size_suffix,
1446 false);
1447 printf("Available high priority: %zu frames (%" PRIu64 " %s)\n",
1448 free_highprio, size, size_suffix);
1449 }
1450}
1451
1452/** @}
1453 */
Note: See TracBrowser for help on using the repository browser.