Changeset 3a0a4d8 in mainline for kernel/generic
- Timestamp:
- 2013-09-12T07:54:05Z (12 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 95027b5
- Parents:
- 47f5a77 (diff), 64f3d3b (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)links above to see all the changes relative to each parent. - Location:
- kernel/generic
- Files:
-
- 2 deleted
- 35 edited
-
include/adt/bitmap.h (modified) (1 diff)
-
include/adt/list.h (modified) (4 diffs)
-
include/config.h (modified) (3 diffs)
-
include/macros.h (modified) (1 diff)
-
include/mm/buddy.h (deleted)
-
include/mm/frame.h (modified) (4 diffs)
-
include/mm/page.h (modified) (1 diff)
-
include/mm/slab.h (modified) (3 diffs)
-
src/adt/bitmap.c (modified) (4 diffs)
-
src/adt/btree.c (modified) (1 diff)
-
src/adt/hash_table.c (modified) (2 diffs)
-
src/adt/list.c (modified) (1 diff)
-
src/console/cmd.c (modified) (3 diffs)
-
src/console/console.c (modified) (2 diffs)
-
src/console/kconsole.c (modified) (3 diffs)
-
src/cpu/cpu.c (modified) (1 diff)
-
src/ddi/ddi.c (modified) (4 diffs)
-
src/debug/stacktrace.c (modified) (2 diffs)
-
src/ipc/ipc.c (modified) (1 diff)
-
src/ipc/ipcrsc.c (modified) (1 diff)
-
src/lib/ra.c (modified) (2 diffs)
-
src/mm/as.c (modified) (9 diffs)
-
src/mm/backend_anon.c (modified) (3 diffs)
-
src/mm/backend_elf.c (modified) (2 diffs)
-
src/mm/buddy.c (deleted)
-
src/mm/frame.c (modified) (52 diffs)
-
src/mm/km.c (modified) (1 diff)
-
src/mm/page.c (modified) (3 diffs)
-
src/mm/slab.c (modified) (12 diffs)
-
src/proc/scheduler.c (modified) (1 diff)
-
src/proc/task.c (modified) (2 diffs)
-
src/proc/thread.c (modified) (3 diffs)
-
src/synch/futex.c (modified) (1 diff)
-
src/sysinfo/stats.c (modified) (2 diffs)
-
src/time/clock.c (modified) (2 diffs)
-
src/udebug/udebug.c (modified) (1 diff)
-
src/udebug/udebug_ops.c (modified) (2 diffs)
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/include/adt/bitmap.h
r47f5a77 r3a0a4d8 38 38 #include <typedefs.h> 39 39 40 #define BITS2BYTES(bits) (bits ? ((((bits)-1)>>3)+1) : 0) 40 #define BITMAP_ELEMENT 8 41 #define BITMAP_REMAINER 7 41 42 42 43 typedef struct { 43 uint8_t *map; 44 size_t bits; 44 size_t elements; 45 uint8_t *bits; 46 47 size_t block_size; 48 uint8_t *blocks; 45 49 } bitmap_t; 46 50 47 extern void bitmap_initialize(bitmap_t *bitmap, uint8_t *map, size_t bits); 48 extern void bitmap_set_range(bitmap_t *bitmap, size_t start, size_t bits); 49 extern void bitmap_clear_range(bitmap_t *bitmap, size_t start, size_t bits); 50 extern void bitmap_copy(bitmap_t *dst, bitmap_t *src, size_t bits); 51 static inline void bitmap_set(bitmap_t *bitmap, size_t element, 52 unsigned int value) 53 { 54 if (element < bitmap->elements) { 55 /* 56 * The 2nd level bitmap is conservative. 57 * Make sure we update it properly. 58 */ 59 60 if (value) { 61 bitmap->bits[element / BITMAP_ELEMENT] |= 62 (1 << (element & BITMAP_REMAINER)); 63 } else { 64 bitmap->bits[element / BITMAP_ELEMENT] &= 65 ~(1 << (element & BITMAP_REMAINER)); 66 67 if (bitmap->block_size > 0) { 68 size_t block = element / bitmap->block_size; 69 70 bitmap->blocks[block / BITMAP_ELEMENT] &= 71 ~(1 << (block & BITMAP_REMAINER)); 72 } 73 } 74 } 75 } 51 76 52 static inline int bitmap_get(bitmap_t *bitmap, size_t bit)77 static inline unsigned int bitmap_get(bitmap_t *bitmap, size_t element) 53 78 { 54 if (bit >= bitmap->bits)79 if (element >= bitmap->elements) 55 80 return 0; 56 81 57 return !! ((bitmap->map)[bit/8] & (1 << (bit & 7))); 82 return !!((bitmap->bits)[element / BITMAP_ELEMENT] & 83 (1 << (element & BITMAP_REMAINER))); 58 84 } 59 85 86 extern size_t bitmap_size(size_t, size_t); 87 extern void bitmap_initialize(bitmap_t *, size_t, size_t, void *); 88 89 extern void bitmap_set_range(bitmap_t *, size_t, size_t); 90 extern void bitmap_clear_range(bitmap_t *, size_t, size_t); 91 92 extern int bitmap_allocate_range(bitmap_t *, size_t, size_t, size_t, size_t *); 93 extern void bitmap_copy(bitmap_t *, bitmap_t *, size_t); 60 94 61 95 #endif -
kernel/generic/include/adt/list.h
r47f5a77 r3a0a4d8 1 1 /* 2 2 * Copyright (c) 2001-2004 Jakub Jermar 3 * Copyright (c) 201 1Jiri Svoboda3 * Copyright (c) 2013 Jiri Svoboda 4 4 * All rights reserved. 5 5 * … … 65 65 66 66 #define list_get_instance(link, type, member) \ 67 ((type *) (((void *)(link)) - ((void *) &(((type *) NULL)->member)))) 68 69 #define list_foreach(list, iterator) \ 70 for (link_t *iterator = (list).head.next; \ 71 iterator != &(list).head; iterator = iterator->next) 67 ((type *) (((void *)(link)) - list_link_to_void(&(((type *) NULL)->member)))) 68 69 #define list_foreach(list, member, itype, iterator) \ 70 for (itype *iterator = NULL; iterator == NULL; iterator = (itype *) 1) \ 71 for (link_t *_link = (list).head.next; \ 72 iterator = list_get_instance(_link, itype, member), \ 73 _link != &(list).head; _link = _link->next) 72 74 73 75 #define assert_link_not_used(link) \ … … 204 206 } 205 207 208 /** Get next item in list. 209 * 210 * @param link Current item link 211 * @param list List containing @a link 212 * 213 * @return Next item or NULL if @a link is the last item. 214 */ 215 static inline link_t *list_next(link_t *link, const list_t *list) 216 { 217 return (link->next == &list->head) ? NULL : link->next; 218 } 219 220 /** Get previous item in list. 221 * 222 * @param link Current item link 223 * @param list List containing @a link 224 * 225 * @return Previous item or NULL if @a link is the first item. 226 */ 227 static inline link_t *list_prev(link_t *link, const list_t *list) 228 { 229 return (link->prev == &list->head) ? NULL : link->prev; 230 } 231 206 232 /** Split or concatenate headless doubly-linked circular list 207 233 * … … 270 296 { 271 297 unsigned int cnt = 0; 272 273 list_foreach(*list, link) { 298 link_t *link; 299 300 link = list_first(list); 301 while (link != NULL) { 274 302 if (cnt == n) 275 303 return link; 276 304 277 305 cnt++; 306 link = list_next(link, list); 278 307 } 279 308 280 309 return NULL; 310 } 311 312 /** Verify that argument type is a pointer to link_t (at compile time). 313 * 314 * This can be used to check argument type in a macro. 315 */ 316 static inline const void *list_link_to_void(const link_t *link) 317 { 318 return link; 281 319 } 282 320 -
kernel/generic/include/config.h
r47f5a77 r3a0a4d8 37 37 38 38 #include <arch/mm/page.h> 39 #include <macros.h> 39 40 40 #define ONE_FRAME 0 41 #define TWO_FRAMES 1 42 #define FOUR_FRAMES 2 41 #define STACK_FRAMES 2 42 #define STACK_SIZE FRAMES2SIZE(STACK_FRAMES) 43 43 44 #define STACK_FRAMES TWO_FRAMES 45 #define STACK_SIZE ((1 << STACK_FRAMES) << PAGE_WIDTH) 46 47 #define STACK_SIZE_USER (1 * 1024 * 1024) 44 #define STACK_SIZE_USER (1 * 1024 * 1024) 48 45 49 46 #define CONFIG_INIT_TASKS 32 … … 97 94 /** Size of initial stack. */ 98 95 size_t stack_size; 99 96 100 97 bool identity_configured; 101 98 /** Base address of the kernel identity mapped memory. */ … … 103 100 /** Size of the kernel identity mapped memory. */ 104 101 size_t identity_size; 105 106 bool non_identity_configured; 107 102 103 bool non_identity_configured; 104 108 105 /** End of physical memory. */ 109 106 uint64_t physmem_end; -
kernel/generic/include/macros.h
r47f5a77 r3a0a4d8 117 117 overlaps(KA2PA((x)), (szx), KA2PA((y)), (szy)) 118 118 119 #define PFN2ADDR(frame) ((frame) << FRAME_WIDTH) 120 #define ADDR2PFN(addr) ((addr) >> FRAME_WIDTH) 121 122 #define FRAMES2SIZE(frames) ((frames) << FRAME_WIDTH) 123 #define SIZE2FRAMES(size) \ 124 (((size) == 0) ? 0 : ((((size) - 1) >> FRAME_WIDTH) + 1)) 125 119 126 #define KiB2SIZE(kb) ((kb) << 10) 120 127 #define MiB2SIZE(mb) ((mb) << 20) -
kernel/generic/include/mm/frame.h
r47f5a77 r3a0a4d8 39 39 #include <typedefs.h> 40 40 #include <trace.h> 41 #include <adt/bitmap.h> 41 42 #include <adt/list.h> 42 #include <mm/buddy.h>43 43 #include <synch/spinlock.h> 44 44 #include <arch/mm/page.h> … … 50 50 typedef uint8_t frame_flags_t; 51 51 52 #define FRAME_NONE 0x0 53 /** Convert the frame address to kernel VA. */ 54 #define FRAME_KA 0x1 52 #define FRAME_NONE 0x00 55 53 /** Do not panic and do not sleep on failure. */ 56 #define FRAME_ATOMIC 0x 254 #define FRAME_ATOMIC 0x01 57 55 /** Do not start reclaiming when no free memory. */ 58 #define FRAME_NO_RECLAIM 0x 456 #define FRAME_NO_RECLAIM 0x02 59 57 /** Do not reserve / unreserve memory. */ 60 #define FRAME_NO_RESERVE 0x 858 #define FRAME_NO_RESERVE 0x04 61 59 /** Allocate a frame which can be identity-mapped. */ 62 #define FRAME_LOWMEM 0x1060 #define FRAME_LOWMEM 0x08 63 61 /** Allocate a frame which cannot be identity-mapped. */ 64 #define FRAME_HIGHMEM 0x2062 #define FRAME_HIGHMEM 0x10 65 63 66 64 typedef uint8_t zone_flags_t; 67 65 68 #define ZONE_NONE 0x066 #define ZONE_NONE 0x00 69 67 /** Available zone (free for allocation) */ 70 #define ZONE_AVAILABLE 0x 168 #define ZONE_AVAILABLE 0x01 71 69 /** Zone is reserved (not available for allocation) */ 72 #define ZONE_RESERVED 0x 270 #define ZONE_RESERVED 0x02 73 71 /** Zone is used by firmware (not available for allocation) */ 74 #define ZONE_FIRMWARE 0x 472 #define ZONE_FIRMWARE 0x04 75 73 /** Zone contains memory that can be identity-mapped */ 76 #define ZONE_LOWMEM 0x874 #define ZONE_LOWMEM 0x08 77 75 /** Zone contains memory that cannot be identity-mapped */ 78 #define ZONE_HIGHMEM 0x1076 #define ZONE_HIGHMEM 0x10 79 77 80 78 /** Mask of zone bits that must be matched exactly. */ 81 #define ZONE_EF_MASK 0x779 #define ZONE_EF_MASK 0x07 82 80 83 #define FRAME_TO_ZONE_FLAGS(ff) \81 #define FRAME_TO_ZONE_FLAGS(ff) \ 84 82 ((((ff) & FRAME_LOWMEM) ? ZONE_LOWMEM : \ 85 83 (((ff) & FRAME_HIGHMEM) ? ZONE_HIGHMEM : \ 86 84 ZONE_LOWMEM /* | ZONE_HIGHMEM */)) | \ 87 ZONE_AVAILABLE) 85 ZONE_AVAILABLE) 88 86 89 87 #define ZONE_FLAGS_MATCH(zf, f) \ … … 92 90 93 91 typedef struct { 94 size_t refcount; /**< Tracking of shared frames */ 95 link_t buddy_link; /**< Link to the next free block inside 96 one order */ 97 void *parent; /**< If allocated by slab, this points there */ 98 uint8_t buddy_order; /**< Buddy system block order */ 92 size_t refcount; /**< Tracking of shared frames */ 93 void *parent; /**< If allocated by slab, this points there */ 99 94 } frame_t; 100 95 101 96 typedef struct { 102 pfn_t base; /**< Frame_no of the first frame 103 in the frames array */ 104 size_t count; /**< Size of zone */ 105 size_t free_count; /**< Number of free frame_t 106 structures */ 107 size_t busy_count; /**< Number of busy frame_t 108 structures */ 109 zone_flags_t flags; /**< Type of the zone */ 97 /** Frame_no of the first frame in the frames array */ 98 pfn_t base; 110 99 111 frame_t *frames; /**< Array of frame_t structures 112 in this zone */ 113 buddy_system_t *buddy_system; /**< Buddy system for the zone */ 100 /** Size of zone */ 101 size_t count; 102 103 /** Number of free frame_t structures */ 104 size_t free_count; 105 106 /** Number of busy frame_t structures */ 107 size_t busy_count; 108 109 /** Type of the zone */ 110 zone_flags_t flags; 111 112 /** Frame bitmap */ 113 bitmap_t bitmap; 114 115 /** Array of frame_t structures in this zone */ 116 frame_t *frames; 114 117 } zone_t; 115 118 … … 126 129 extern zones_t zones; 127 130 128 NO_TRACE static inline uintptr_t PFN2ADDR(pfn_t frame)129 {130 return (uintptr_t) (frame << FRAME_WIDTH);131 }132 133 NO_TRACE static inline pfn_t ADDR2PFN(uintptr_t addr)134 {135 return (pfn_t) (addr >> FRAME_WIDTH);136 }137 138 NO_TRACE static inline size_t SIZE2FRAMES(size_t size)139 {140 if (!size)141 return 0;142 return (size_t) ((size - 1) >> FRAME_WIDTH) + 1;143 }144 145 NO_TRACE static inline size_t FRAMES2SIZE(size_t frames)146 {147 return (size_t) (frames << FRAME_WIDTH);148 }149 150 #define IS_BUDDY_ORDER_OK(index, order) \151 ((~(((sysarg_t) -1) << (order)) & (index)) == 0)152 #define IS_BUDDY_LEFT_BLOCK(zone, frame) \153 (((frame_index((zone), (frame)) >> (frame)->buddy_order) & 0x1) == 0)154 #define IS_BUDDY_RIGHT_BLOCK(zone, frame) \155 (((frame_index((zone), (frame)) >> (frame)->buddy_order) & 0x1) == 1)156 #define IS_BUDDY_LEFT_BLOCK_ABS(zone, frame) \157 (((frame_index_abs((zone), (frame)) >> (frame)->buddy_order) & 0x1) == 0)158 #define IS_BUDDY_RIGHT_BLOCK_ABS(zone, frame) \159 (((frame_index_abs((zone), (frame)) >> (frame)->buddy_order) & 0x1) == 1)160 161 131 extern void frame_init(void); 162 132 extern bool frame_adjust_zone_bounds(bool, uintptr_t *, size_t *); 163 extern void *frame_alloc_generic(uint8_t, frame_flags_t, size_t *);164 extern void *frame_alloc(uint8_t, frame_flags_t);165 extern void *frame_alloc_noreserve(uint8_t, frame_flags_t);166 extern void frame_free_generic(uintptr_t, frame_flags_t);167 extern void frame_free(uintptr_t );168 extern void frame_free_noreserve(uintptr_t );133 extern uintptr_t frame_alloc_generic(size_t, frame_flags_t, uintptr_t, size_t *); 134 extern uintptr_t frame_alloc(size_t, frame_flags_t, uintptr_t); 135 extern uintptr_t frame_alloc_noreserve(size_t, frame_flags_t, uintptr_t); 136 extern void frame_free_generic(uintptr_t, size_t, frame_flags_t); 137 extern void frame_free(uintptr_t, size_t); 138 extern void frame_free_noreserve(uintptr_t, size_t); 169 139 extern void frame_reference_add(pfn_t); 170 140 extern size_t frame_total_free_get(void); -
kernel/generic/include/mm/page.h
r47f5a77 r3a0a4d8 65 65 extern void page_table_destroy(pte_t *); 66 66 67 extern int page_find_mapping(uintptr_t, void **);68 extern sysarg_t sys_page_find_mapping(uintptr_t, void*);67 extern int page_find_mapping(uintptr_t, uintptr_t *); 68 extern sysarg_t sys_page_find_mapping(uintptr_t, uintptr_t *); 69 69 70 70 #endif -
kernel/generic/include/mm/slab.h
r47f5a77 r3a0a4d8 55 55 /** Maximum wasted space we allow for cache */ 56 56 #define SLAB_MAX_BADNESS(cache) \ 57 ( ((unsigned int) PAGE_SIZE << (cache)->order) >> 2)57 (FRAMES2SIZE((cache)->frames) >> 2) 58 58 59 59 /* slab_reclaim constants */ … … 90 90 91 91 /* Configuration */ 92 92 93 /** Size of slab position - align_up(sizeof(obj)) */ 93 94 size_t size; … … 100 101 101 102 /* Computed values */ 102 uint8_t order; /**< Order of frames to be allocated */103 size_t frames; /**< Number of frames to be allocated */ 103 104 size_t objects; /**< Number of objects that fit in */ 104 105 -
kernel/generic/src/adt/bitmap.c
r47f5a77 r3a0a4d8 35 35 * 36 36 * This file implements bitmap ADT and provides functions for 37 * setting and clearing ranges of bits. 37 * setting and clearing ranges of bits and for finding ranges 38 * of unset bits. 39 * 40 * The bitmap ADT can optionally implement a two-level hierarchy 41 * for faster range searches. The second level bitmap (of blocks) 42 * is not precise, but conservative. This means that if the block 43 * bit is set, it guarantees that all bits in the block are set. 44 * But if the block bit is unset, nothing can be said about the 45 * bits in the block. 46 * 38 47 */ 39 48 … … 44 53 #include <macros.h> 45 54 46 #define ALL_ONES 0xff 47 #define ALL_ZEROES 0x00 55 #define ALL_ONES 0xff 56 #define ALL_ZEROES 0x00 57 58 /** Compute the size of a bitmap 59 * 60 * Compute the size of a bitmap that can store given number 61 * of elements. 62 * 63 * @param elements Number of elements to store. 64 * 65 * @return Size of the bitmap (in units of BITMAP_ELEMENT bits). 66 * 67 */ 68 static size_t bitmap_bytes(size_t elements) 69 { 70 size_t bytes = elements / BITMAP_ELEMENT; 71 72 if ((elements % BITMAP_ELEMENT) != 0) 73 bytes++; 74 75 return bytes; 76 } 77 78 /** Compute the number of 2nd level blocks 79 * 80 * Compute the number of 2nd level blocks for a given number 81 * of elements. 82 * 83 * @param elements Number of elements. 84 * @param block_size Number of elements in one block. 85 * 86 * @return Number of 2nd level blocks. 87 * @return Zero if block_size is zero. 88 * 89 */ 90 static size_t bitmap_blocks(size_t elements, size_t block_size) 91 { 92 if (block_size == 0) 93 return 0; 94 95 size_t blocks = elements / block_size; 96 97 if ((elements % block_size) != 0) 98 blocks++; 99 100 return blocks; 101 } 102 103 /** Unchecked version of bitmap_get() 104 * 105 * This version of bitmap_get() does not do any boundary checks. 106 * 107 * @param bitmap Bitmap to access. 108 * @param element Element to access. 109 * 110 * @return Bit value of the element in the bitmap. 111 * 112 */ 113 static unsigned int bitmap_get_fast(bitmap_t *bitmap, size_t element) 114 { 115 return !!((bitmap->bits)[element / BITMAP_ELEMENT] & 116 (1 << (element & BITMAP_REMAINER))); 117 } 118 119 /** Get bitmap size 120 * 121 * Return the size (in bytes) required for the bitmap. 122 * 123 * @param elements Number bits stored in bitmap. 124 * @param block_size Block size of the 2nd level bitmap. 125 * If set to zero, no 2nd level is used. 126 * 127 * @return Size (in bytes) required for the bitmap. 128 * 129 */ 130 size_t bitmap_size(size_t elements, size_t block_size) 131 { 132 size_t blocks = bitmap_blocks(elements, block_size); 133 134 return (bitmap_bytes(elements) + bitmap_bytes(blocks)); 135 } 48 136 49 137 /** Initialize bitmap. … … 51 139 * No portion of the bitmap is set or cleared by this function. 52 140 * 53 * @param bitmap Bitmap structure. 54 * @param map Address of the memory used to hold the map. 55 * @param bits Number of bits stored in bitmap. 56 */ 57 void bitmap_initialize(bitmap_t *bitmap, uint8_t *map, size_t bits) 58 { 59 bitmap->map = map; 60 bitmap->bits = bits; 61 } 62 63 /** Set range of bits. 64 * 65 * @param bitmap Bitmap structure. 66 * @param start Starting bit. 67 * @param bits Number of bits to set. 68 */ 69 void bitmap_set_range(bitmap_t *bitmap, size_t start, size_t bits) 70 { 71 size_t i = 0; 72 size_t aligned_start; 73 size_t lub; /* leading unaligned bits */ 74 size_t amb; /* aligned middle bits */ 75 size_t tab; /* trailing aligned bits */ 76 77 ASSERT(start + bits <= bitmap->bits); 78 79 aligned_start = ALIGN_UP(start, 8); 80 lub = min(aligned_start - start, bits); 81 amb = bits > lub ? bits - lub : 0; 82 tab = amb % 8; 83 84 if (!bits) 141 * @param bitmap Bitmap structure. 142 * @param elements Number of bits stored in bitmap. 143 * @param block_size Block size of the 2nd level bitmap. 144 * If set to zero, no 2nd level is used. 145 * @param data Address of the memory used to hold the map. 146 * The optional 2nd level bitmap follows the 1st 147 * level bitmap. 148 * 149 */ 150 void bitmap_initialize(bitmap_t *bitmap, size_t elements, size_t block_size, 151 void *data) 152 { 153 bitmap->elements = elements; 154 bitmap->bits = (uint8_t *) data; 155 156 if (block_size > 0) { 157 bitmap->block_size = block_size; 158 bitmap->blocks = bitmap->bits + 159 bitmap_size(elements, 0); 160 } else { 161 bitmap->block_size = 0; 162 bitmap->blocks = NULL; 163 } 164 } 165 166 static void bitmap_set_range_internal(uint8_t *bits, size_t start, size_t count) 167 { 168 if (count == 0) 85 169 return; 86 87 if (start + bits < aligned_start) { 170 171 size_t aligned_start = ALIGN_UP(start, BITMAP_ELEMENT); 172 173 /* Leading unaligned bits */ 174 size_t lub = min(aligned_start - start, count); 175 176 /* Aligned middle bits */ 177 size_t amb = (count > lub) ? (count - lub) : 0; 178 179 /* Trailing aligned bits */ 180 size_t tab = amb % BITMAP_ELEMENT; 181 182 if (start + count < aligned_start) { 88 183 /* Set bits in the middle of byte. */ 89 bitmap->map[start / 8] |= ((1 << lub) - 1) << (start & 7); 184 bits[start / BITMAP_ELEMENT] |= 185 ((1 << lub) - 1) << (start & BITMAP_REMAINER); 90 186 return; 91 187 } … … 93 189 if (lub) { 94 190 /* Make sure to set any leading unaligned bits. */ 95 bitmap->map[start / 8] |= ~((1 << (8 - lub)) - 1); 96 } 97 for (i = 0; i < amb / 8; i++) { 191 bits[start / BITMAP_ELEMENT] |= 192 ~((1 << (BITMAP_ELEMENT - lub)) - 1); 193 } 194 195 size_t i; 196 197 for (i = 0; i < amb / BITMAP_ELEMENT; i++) { 98 198 /* The middle bits can be set byte by byte. */ 99 bitmap->map[aligned_start / 8 + i] = ALL_ONES; 100 } 199 bits[aligned_start / BITMAP_ELEMENT + i] = ALL_ONES; 200 } 201 101 202 if (tab) { 102 203 /* Make sure to set any trailing aligned bits. */ 103 bitmap->map[aligned_start / 8 + i] |= (1 << tab) - 1; 104 } 105 106 } 107 108 /** Clear range of bits. 109 * 110 * @param bitmap Bitmap structure. 111 * @param start Starting bit. 112 * @param bits Number of bits to clear. 113 */ 114 void bitmap_clear_range(bitmap_t *bitmap, size_t start, size_t bits) 115 { 116 size_t i = 0; 117 size_t aligned_start; 118 size_t lub; /* leading unaligned bits */ 119 size_t amb; /* aligned middle bits */ 120 size_t tab; /* trailing aligned bits */ 121 122 ASSERT(start + bits <= bitmap->bits); 123 124 aligned_start = ALIGN_UP(start, 8); 125 lub = min(aligned_start - start, bits); 126 amb = bits > lub ? bits - lub : 0; 127 tab = amb % 8; 128 129 if (!bits) 204 bits[aligned_start / BITMAP_ELEMENT + i] |= (1 << tab) - 1; 205 } 206 } 207 208 /** Set range of bits. 209 * 210 * @param bitmap Bitmap structure. 211 * @param start Starting bit. 212 * @param count Number of bits to set. 213 * 214 */ 215 void bitmap_set_range(bitmap_t *bitmap, size_t start, size_t count) 216 { 217 ASSERT(start + count <= bitmap->elements); 218 219 bitmap_set_range_internal(bitmap->bits, start, count); 220 221 if (bitmap->block_size > 0) { 222 size_t aligned_start = ALIGN_UP(start, bitmap->block_size); 223 224 /* Leading unaligned bits */ 225 size_t lub = min(aligned_start - start, count); 226 227 /* Aligned middle bits */ 228 size_t amb = (count > lub) ? (count - lub) : 0; 229 230 size_t aligned_size = amb / bitmap->block_size; 231 232 bitmap_set_range_internal(bitmap->blocks, aligned_start, 233 aligned_size); 234 } 235 } 236 237 static void bitmap_clear_range_internal(uint8_t *bits, size_t start, 238 size_t count) 239 { 240 if (count == 0) 130 241 return; 131 132 if (start + bits < aligned_start) { 242 243 size_t aligned_start = ALIGN_UP(start, BITMAP_ELEMENT); 244 245 /* Leading unaligned bits */ 246 size_t lub = min(aligned_start - start, count); 247 248 /* Aligned middle bits */ 249 size_t amb = (count > lub) ? (count - lub) : 0; 250 251 /* Trailing aligned bits */ 252 size_t tab = amb % BITMAP_ELEMENT; 253 254 if (start + count < aligned_start) { 133 255 /* Set bits in the middle of byte */ 134 bitmap->map[start / 8] &= ~(((1 << lub) - 1) << (start & 7)); 256 bits[start / BITMAP_ELEMENT] &= 257 ~(((1 << lub) - 1) << (start & BITMAP_REMAINER)); 135 258 return; 136 259 } 137 260 138 261 if (lub) { 139 262 /* Make sure to clear any leading unaligned bits. */ 140 bitmap->map[start / 8] &= (1 << (8 - lub)) - 1; 141 } 142 for (i = 0; i < amb / 8; i++) { 263 bits[start / BITMAP_ELEMENT] &= 264 (1 << (BITMAP_ELEMENT - lub)) - 1; 265 } 266 267 size_t i; 268 269 for (i = 0; i < amb / BITMAP_ELEMENT; i++) { 143 270 /* The middle bits can be cleared byte by byte. */ 144 bitmap->map[aligned_start / 8 + i] = ALL_ZEROES; 145 } 271 bits[aligned_start / BITMAP_ELEMENT + i] = ALL_ZEROES; 272 } 273 146 274 if (tab) { 147 275 /* Make sure to clear any trailing aligned bits. */ 148 bitmap->map[aligned_start / 8 + i] &= ~((1 << tab) - 1); 149 } 150 276 bits[aligned_start / BITMAP_ELEMENT + i] &= ~((1 << tab) - 1); 277 } 278 } 279 280 /** Clear range of bits. 281 * 282 * @param bitmap Bitmap structure. 283 * @param start Starting bit. 284 * @param count Number of bits to clear. 285 * 286 */ 287 void bitmap_clear_range(bitmap_t *bitmap, size_t start, size_t count) 288 { 289 ASSERT(start + count <= bitmap->elements); 290 291 bitmap_clear_range_internal(bitmap->bits, start, count); 292 293 if (bitmap->block_size > 0) { 294 size_t aligned_start = start / bitmap->block_size; 295 296 size_t aligned_end = (start + count) / bitmap->block_size; 297 298 if (((start + count) % bitmap->block_size) != 0) 299 aligned_end++; 300 301 size_t aligned_size = aligned_end - aligned_start; 302 303 bitmap_clear_range_internal(bitmap->blocks, aligned_start, 304 aligned_size); 305 } 151 306 } 152 307 153 308 /** Copy portion of one bitmap into another bitmap. 154 309 * 155 * @param dst Destination bitmap. 156 * @param src Source bitmap. 157 * @param bits Number of bits to copy. 158 */ 159 void bitmap_copy(bitmap_t *dst, bitmap_t *src, size_t bits) 160 { 310 * @param dst Destination bitmap. 311 * @param src Source bitmap. 312 * @param count Number of bits to copy. 313 * 314 */ 315 void bitmap_copy(bitmap_t *dst, bitmap_t *src, size_t count) 316 { 317 ASSERT(count <= dst->elements); 318 ASSERT(count <= src->elements); 319 161 320 size_t i; 162 321 163 ASSERT(bits <= dst->bits); 164 ASSERT(bits <= src->bits); 165 166 for (i = 0; i < bits / 8; i++) 167 dst->map[i] = src->map[i]; 168 169 if (bits % 8) { 170 bitmap_clear_range(dst, i * 8, bits % 8); 171 dst->map[i] |= src->map[i] & ((1 << (bits % 8)) - 1); 172 } 322 for (i = 0; i < count / BITMAP_ELEMENT; i++) 323 dst->bits[i] = src->bits[i]; 324 325 if (count % BITMAP_ELEMENT) { 326 bitmap_clear_range(dst, i * BITMAP_ELEMENT, 327 count % BITMAP_ELEMENT); 328 dst->bits[i] |= src->bits[i] & 329 ((1 << (count % BITMAP_ELEMENT)) - 1); 330 } 331 } 332 333 static int constraint_satisfy(size_t index, size_t base, size_t constraint) 334 { 335 return (((base + index) & constraint) == 0); 336 } 337 338 /** Find a continuous zero bit range 339 * 340 * Find a continuous zero bit range in the bitmap. The address 341 * computed as the sum of the index of the first zero bit and 342 * the base argument needs to be compliant with the constraint 343 * (those bits that are set in the constraint cannot be set in 344 * the address). 345 * 346 * If the index argument is non-NULL, the continuous zero range 347 * is set and the index of the first bit is stored to index. 348 * Otherwise the bitmap stays untouched. 349 * 350 * @param bitmap Bitmap structure. 351 * @param count Number of continuous zero bits to find. 352 * @param base Address of the first bit in the bitmap. 353 * @param constraint Constraint for the address of the first zero bit. 354 * @param index Place to store the index of the first zero 355 * bit. Can be NULL (in which case the bitmap 356 * is not modified). 357 * 358 * @return Non-zero if a continuous range of zero bits satisfying 359 * the constraint has been found. 360 * @return Zero otherwise. 361 * 362 */ 363 int bitmap_allocate_range(bitmap_t *bitmap, size_t count, size_t base, 364 size_t constraint, size_t *index) 365 { 366 if (count == 0) 367 return false; 368 369 size_t bytes = bitmap_bytes(bitmap->elements); 370 371 for (size_t byte = 0; byte < bytes; byte++) { 372 /* Skip if the current byte has all bits set */ 373 if (bitmap->bits[byte] == ALL_ONES) 374 continue; 375 376 size_t byte_bit = byte * BITMAP_ELEMENT; 377 378 for (size_t bit = 0; bit < BITMAP_ELEMENT; bit++) { 379 size_t i = byte_bit + bit; 380 381 if (i >= bitmap->elements) 382 break; 383 384 if (!constraint_satisfy(i, base, constraint)) 385 continue; 386 387 if (!bitmap_get_fast(bitmap, i)) { 388 bool continuous = true; 389 390 for (size_t j = 1; j < count; j++) { 391 if ((i + j >= bitmap->elements) || 392 (bitmap_get_fast(bitmap, i + j))) { 393 continuous = false; 394 break; 395 } 396 } 397 398 if (continuous) { 399 if (index != NULL) { 400 bitmap_set_range(bitmap, i, count); 401 *index = i; 402 } 403 404 return true; 405 } 406 } 407 } 408 } 409 410 return false; 173 411 } 174 412 -
kernel/generic/src/adt/btree.c
r47f5a77 r3a0a4d8 1031 1031 1032 1032 printf("Printing list of leaves:\n"); 1033 list_foreach(t->leaf_list, cur) { 1034 btree_node_t *node; 1035 1036 node = list_get_instance(cur, btree_node_t, leaf_link); 1037 1033 list_foreach(t->leaf_list, leaf_link, btree_node_t, node) { 1038 1034 ASSERT(node); 1039 1035 -
kernel/generic/src/adt/hash_table.c
r47f5a77 r3a0a4d8 117 117 ASSERT(chain < h->entries); 118 118 119 list_foreach(h->entry[chain], cur) { 119 link_t *cur = list_first(&h->entry[chain]); 120 while (cur != NULL) { 120 121 if (h->op->compare(key, h->max_keys, cur)) { 121 122 /* … … 124 125 return cur; 125 126 } 127 cur = list_next(cur, &h->entry[chain]); 126 128 } 127 129 -
kernel/generic/src/adt/list.c
r47f5a77 r3a0a4d8 101 101 unsigned int count = 0; 102 102 103 list_foreach(*list, link) { 103 link_t *link = list_first(list); 104 while (link != NULL) { 104 105 count++; 106 link = list_next(link, list); 105 107 } 106 108 -
kernel/generic/src/console/cmd.c
r47f5a77 r3a0a4d8 656 656 657 657 size_t len = 0; 658 list_foreach(cmd_list, cur) { 659 cmd_info_t *hlp; 660 hlp = list_get_instance(cur, cmd_info_t, link); 661 658 list_foreach(cmd_list, link, cmd_info_t, hlp) { 662 659 spinlock_lock(&hlp->lock); 663 660 if (str_length(hlp->name) > len) … … 672 669 } 673 670 674 list_foreach(cmd_list, cur) { 675 cmd_info_t *hlp; 676 hlp = list_get_instance(cur, cmd_info_t, link); 677 671 list_foreach(cmd_list, link, cmd_info_t, hlp) { 678 672 spinlock_lock(&hlp->lock); 679 673 printf("%-*s %s\n", _len, hlp->name, hlp->description); … … 912 906 spinlock_lock(&cmd_lock); 913 907 914 list_foreach(cmd_list, cur) { 915 cmd_info_t *hlp; 916 917 hlp = list_get_instance(cur, cmd_info_t, link); 908 list_foreach(cmd_list, link, cmd_info_t, hlp) { 918 909 spinlock_lock(&hlp->lock); 919 910 -
kernel/generic/src/console/console.c
r47f5a77 r3a0a4d8 125 125 static void stdout_write(outdev_t *dev, wchar_t ch) 126 126 { 127 list_foreach(dev->list, cur) { 128 outdev_t *sink = list_get_instance(cur, outdev_t, link); 127 list_foreach(dev->list, link, outdev_t, sink) { 129 128 if ((sink) && (sink->op->write)) 130 129 sink->op->write(sink, ch); … … 134 133 static void stdout_redraw(outdev_t *dev) 135 134 { 136 list_foreach(dev->list, cur) { 137 outdev_t *sink = list_get_instance(cur, outdev_t, link); 135 list_foreach(dev->list, link, outdev_t, sink) { 138 136 if ((sink) && (sink->op->redraw)) 139 137 sink->op->redraw(sink); -
kernel/generic/src/console/kconsole.c
r47f5a77 r3a0a4d8 53 53 #include <func.h> 54 54 #include <str.h> 55 #include <macros.h>56 55 #include <sysinfo/sysinfo.h> 57 56 #include <ddi/device.h> … … 119 118 * Make sure the command is not already listed. 120 119 */ 121 list_foreach(cmd_list, cur) { 122 cmd_info_t *hlp = list_get_instance(cur, cmd_info_t, link); 123 120 list_foreach(cmd_list, link, cmd_info_t, hlp) { 124 121 if (hlp == cmd) { 125 122 /* The command is already there. */ … … 613 610 cmd_info_t *cmd = NULL; 614 611 615 list_foreach(cmd_list, cur) { 616 cmd_info_t *hlp = list_get_instance(cur, cmd_info_t, link); 612 list_foreach(cmd_list, link, cmd_info_t, hlp) { 617 613 spinlock_lock(&hlp->lock); 618 614 -
kernel/generic/src/cpu/cpu.c
r47f5a77 r3a0a4d8 73 73 size_t i; 74 74 for (i = 0; i < config.cpu_count; i++) { 75 cpus[i].stack = (uint8_t *) frame_alloc(STACK_FRAMES, 76 FRAME_LOWMEM | FRAME_KA | FRAME_ATOMIC); 75 uintptr_t stack_phys = frame_alloc(STACK_FRAMES, 76 FRAME_LOWMEM | FRAME_ATOMIC, STACK_SIZE - 1); 77 if (!stack_phys) 78 panic("Cannot allocate CPU stack."); 79 80 cpus[i].stack = (uint8_t *) PA2KA(stack_phys); 77 81 cpus[i].id = i; 78 82 79 83 irq_spinlock_initialize(&cpus[i].lock, "cpus[].lock"); 80 84 81 unsigned int j; 82 for (j = 0; j < RQ_COUNT; j++) { 85 for (unsigned int j = 0; j < RQ_COUNT; j++) { 83 86 irq_spinlock_initialize(&cpus[i].rq[j].lock, "cpus[].rq[].lock"); 84 87 list_initialize(&cpus[i].rq[j].rq); -
kernel/generic/src/ddi/ddi.c
r47f5a77 r3a0a4d8 314 314 315 315 NO_TRACE static int dmamem_map(uintptr_t virt, size_t size, unsigned int map_flags, 316 unsigned int flags, void **phys)316 unsigned int flags, uintptr_t *phys) 317 317 { 318 318 ASSERT(TASK); … … 322 322 } 323 323 324 NO_TRACE static int dmamem_map_anonymous(size_t size, unsigned int map_flags, 325 unsigned int flags, void **phys, uintptr_t *virt, uintptr_t bound) 324 NO_TRACE static int dmamem_map_anonymous(size_t size, uintptr_t constraint, 325 unsigned int map_flags, unsigned int flags, uintptr_t *phys, 326 uintptr_t *virt, uintptr_t bound) 326 327 { 327 328 ASSERT(TASK); 328 329 329 size_t pages = SIZE2FRAMES(size); 330 uint8_t order; 331 332 /* We need the 2^order >= pages */ 333 if (pages == 1) 334 order = 0; 335 else 336 order = fnzb(pages - 1) + 1; 337 338 *phys = frame_alloc_noreserve(order, 0); 339 if (*phys == NULL) 330 size_t frames = SIZE2FRAMES(size); 331 *phys = frame_alloc_noreserve(frames, 0, constraint); 332 if (*phys == 0) 340 333 return ENOMEM; 341 334 342 335 mem_backend_data_t backend_data; 343 backend_data.base = (uintptr_t)*phys;344 backend_data.frames = pages;336 backend_data.base = *phys; 337 backend_data.frames = frames; 345 338 346 339 if (!as_area_create(TASK->as, map_flags, size, 347 340 AS_AREA_ATTR_NONE, &phys_backend, &backend_data, virt, bound)) { 348 frame_free_noreserve( (uintptr_t) *phys);341 frame_free_noreserve(*phys, frames); 349 342 return ENOMEM; 350 343 } … … 373 366 */ 374 367 375 void *phys;368 uintptr_t phys; 376 369 int rc = dmamem_map((uintptr_t) virt_ptr, size, map_flags, 377 370 flags, &phys); … … 390 383 */ 391 384 392 void *phys; 385 uintptr_t constraint; 386 int rc = copy_from_uspace(&constraint, phys_ptr, 387 sizeof(constraint)); 388 if (rc != EOK) 389 return rc; 390 391 uintptr_t phys; 393 392 uintptr_t virt = (uintptr_t) -1; 394 int rc = dmamem_map_anonymous(size, map_flags, flags,393 rc = dmamem_map_anonymous(size, constraint, map_flags, flags, 395 394 &phys, &virt, bound); 396 395 if (rc != EOK) -
kernel/generic/src/debug/stacktrace.c
r47f5a77 r3a0a4d8 39 39 #include <print.h> 40 40 41 #define STACK_FRAMES_MAX 2041 #define STACK_FRAMES_MAX 20 42 42 43 43 void stack_trace_ctx(stack_trace_ops_t *ops, stack_trace_context_t *ctx) … … 49 49 uintptr_t pc; 50 50 51 while ( cnt++ < STACK_FRAMES_MAX&&52 ops->stack_trace_context_validate(ctx)) {51 while ((cnt++ < STACK_FRAMES_MAX) && 52 (ops->stack_trace_context_validate(ctx))) { 53 53 if (ops->symbol_resolve && 54 54 ops->symbol_resolve(ctx->pc, &symbol, &offset)) { -
kernel/generic/src/ipc/ipc.c
r47f5a77 r3a0a4d8 774 774 static void ipc_print_call_list(list_t *list) 775 775 { 776 list_foreach(*list, cur) { 777 call_t *call = list_get_instance(cur, call_t, ab_link); 778 776 list_foreach(*list, ab_link, call_t, call) { 779 777 #ifdef __32_BITS__ 780 778 printf("%10p ", call); -
kernel/generic/src/ipc/ipcrsc.c
r47f5a77 r3a0a4d8 151 151 irq_spinlock_lock(&TASK->answerbox.lock, true); 152 152 153 list_foreach(TASK->answerbox.dispatched_calls, lst) { 154 call_t *call = list_get_instance(lst, call_t, ab_link); 153 list_foreach(TASK->answerbox.dispatched_calls, ab_link, call_t, call) { 155 154 if ((sysarg_t) call == callid) { 156 155 result = call; -
kernel/generic/src/lib/ra.c
r47f5a77 r3a0a4d8 391 391 392 392 irq_spinlock_lock(&arena->lock, true); 393 list_foreach(arena->spans, cur) { 394 ra_span_t *span = list_get_instance(cur, ra_span_t, span_link); 395 393 list_foreach(arena->spans, span_link, ra_span_t, span) { 396 394 base = ra_span_alloc(span, size, alignment); 397 395 if (base) … … 407 405 { 408 406 irq_spinlock_lock(&arena->lock, true); 409 list_foreach(arena->spans, cur) { 410 ra_span_t *span = list_get_instance(cur, ra_span_t, span_link); 411 407 list_foreach(arena->spans, span_link, ra_span_t, span) { 412 408 if (iswithin(span->base, span->size, base, size)) { 413 409 ra_span_free(span, base, size); -
kernel/generic/src/mm/as.c
r47f5a77 r3a0a4d8 488 488 489 489 /* Eventually check the addresses behind each area */ 490 list_foreach(as->as_area_btree.leaf_list, cur) { 491 btree_node_t *node = 492 list_get_instance(cur, btree_node_t, leaf_link); 490 list_foreach(as->as_area_btree.leaf_list, leaf_link, btree_node_t, node) { 493 491 494 492 for (btree_key_t i = 0; i < node->keys; i++) { … … 904 902 * reference from all frames found there. 905 903 */ 906 list_foreach(sh_info->pagemap.leaf_list, cur) { 907 btree_node_t *node 908 = list_get_instance(cur, btree_node_t, leaf_link); 904 list_foreach(sh_info->pagemap.leaf_list, leaf_link, 905 btree_node_t, node) { 909 906 btree_key_t i; 910 907 911 908 for (i = 0; i < node->keys; i++) 912 frame_free((uintptr_t) node->value[i] );909 frame_free((uintptr_t) node->value[i], 1); 913 910 } 914 911 … … 956 953 * Visit only the pages mapped by used_space B+tree. 957 954 */ 958 list_foreach(area->used_space.leaf_list, cur) {959 btree_node_t *node;955 list_foreach(area->used_space.leaf_list, leaf_link, btree_node_t, 956 node) { 960 957 btree_key_t i; 961 958 962 node = list_get_instance(cur, btree_node_t, leaf_link);963 959 for (i = 0; i < node->keys; i++) { 964 960 uintptr_t ptr = node->key[i]; … … 1238 1234 size_t used_pages = 0; 1239 1235 1240 list_foreach(area->used_space.leaf_list, cur) { 1241 btree_node_t *node 1242 = list_get_instance(cur, btree_node_t, leaf_link); 1236 list_foreach(area->used_space.leaf_list, leaf_link, btree_node_t, 1237 node) { 1243 1238 btree_key_t i; 1244 1239 … … 1264 1259 size_t frame_idx = 0; 1265 1260 1266 list_foreach(area->used_space.leaf_list, cur) { 1267 btree_node_t *node = list_get_instance(cur, btree_node_t, 1268 leaf_link); 1261 list_foreach(area->used_space.leaf_list, leaf_link, btree_node_t, 1262 node) { 1269 1263 btree_key_t i; 1270 1264 … … 1316 1310 frame_idx = 0; 1317 1311 1318 list_foreach(area->used_space.leaf_list, cur) { 1319 btree_node_t *node 1320 = list_get_instance(cur, btree_node_t, leaf_link); 1312 list_foreach(area->used_space.leaf_list, leaf_link, btree_node_t, 1313 node) { 1321 1314 btree_key_t i; 1322 1315 … … 2182 2175 size_t area_cnt = 0; 2183 2176 2184 list_foreach(as->as_area_btree.leaf_list, cur) { 2185 btree_node_t *node = 2186 list_get_instance(cur, btree_node_t, leaf_link); 2177 list_foreach(as->as_area_btree.leaf_list, leaf_link, btree_node_t, 2178 node) { 2187 2179 area_cnt += node->keys; 2188 2180 } … … 2195 2187 size_t area_idx = 0; 2196 2188 2197 list_foreach(as->as_area_btree.leaf_list, cur) { 2198 btree_node_t *node = 2199 list_get_instance(cur, btree_node_t, leaf_link); 2189 list_foreach(as->as_area_btree.leaf_list, leaf_link, btree_node_t, 2190 node) { 2200 2191 btree_key_t i; 2201 2192 … … 2231 2222 2232 2223 /* Print out info about address space areas */ 2233 list_foreach(as->as_area_btree.leaf_list, cur) { 2234 btree_node_t *node 2235 = list_get_instance(cur, btree_node_t, leaf_link); 2224 list_foreach(as->as_area_btree.leaf_list, leaf_link, btree_node_t, 2225 node) { 2236 2226 btree_key_t i; 2237 2227 -
kernel/generic/src/mm/backend_anon.c
r47f5a77 r3a0a4d8 118 118 */ 119 119 mutex_lock(&area->sh_info->lock); 120 list_foreach(area->used_space.leaf_list, cur) {121 btree_node_t *node;120 list_foreach(area->used_space.leaf_list, leaf_link, btree_node_t, 121 node) { 122 122 unsigned int i; 123 123 124 node = list_get_instance(cur, btree_node_t, leaf_link);125 124 for (i = 0; i < node->keys; i++) { 126 125 uintptr_t base = node->key[i]; … … 295 294 * the normal unreserving frame_free(). 296 295 */ 297 frame_free(frame );296 frame_free(frame, 1); 298 297 } else { 299 298 /* … … 302 301 * manipulate the reserve or it would be given back twice. 303 302 */ 304 frame_free_noreserve(frame );303 frame_free_noreserve(frame, 1); 305 304 } 306 305 } -
kernel/generic/src/mm/backend_elf.c
r47f5a77 r3a0a4d8 429 429 * data. 430 430 */ 431 frame_free_noreserve(frame );431 frame_free_noreserve(frame, 1); 432 432 } 433 433 } else { … … 437 437 * anonymous). In any case, a frame needs to be freed. 438 438 */ 439 frame_free_noreserve(frame );439 frame_free_noreserve(frame, 1); 440 440 } 441 441 } -
kernel/generic/src/mm/frame.c
r47f5a77 r3a0a4d8 38 38 * 39 39 * This file contains the physical frame allocator and memory zone management. 40 * The frame allocator is built on top of the buddy allocator. 41 * 42 * @see buddy.c 40 * The frame allocator is built on top of the two-level bitmap structure. 41 * 43 42 */ 44 43 … … 62 61 #include <str.h> 63 62 63 #define BITMAP_BLOCK_SIZE 128 64 64 65 zones_t zones; 65 66 … … 92 93 } 93 94 94 NO_TRACE static inline size_t make_frame_index(zone_t *zone, frame_t *frame)95 {96 return (frame - zone->frames);97 }98 99 95 /** Initialize frame structure. 100 96 * … … 104 100 NO_TRACE static void frame_initialize(frame_t *frame) 105 101 { 106 frame->refcount = 1;107 frame-> buddy_order = 0;102 frame->refcount = 0; 103 frame->parent = NULL; 108 104 } 109 105 … … 161 157 162 158 /* Move other zones up */ 163 size_t j; 164 for (j = zones.count; j > i; j--) { 159 for (size_t j = zones.count; j > i; j--) 165 160 zones.info[j] = zones.info[j - 1]; 166 if (zones.info[j].buddy_system != NULL)167 zones.info[j].buddy_system->data =168 (void *) &zones.info[j];169 }170 161 171 162 zones.count++; … … 237 228 } 238 229 239 /** @return True if zone can allocate specified order */ 240 NO_TRACE static bool zone_can_alloc(zone_t *zone, uint8_t order) 241 { 230 /** @return True if zone can allocate specified number of frames */ 231 NO_TRACE static bool zone_can_alloc(zone_t *zone, size_t count, 232 pfn_t constraint) 233 { 234 /* 235 * The function bitmap_allocate_range() does not modify 236 * the bitmap if the last argument is NULL. 237 */ 242 238 return ((zone->flags & ZONE_AVAILABLE) && 243 buddy_system_can_alloc(zone->buddy_system, order)); 244 } 245 246 /** Find a zone that can allocate order frames. 239 bitmap_allocate_range(&zone->bitmap, count, zone->base, 240 constraint, NULL)); 241 } 242 243 /** Find a zone that can allocate specified number of frames 247 244 * 248 245 * Assume interrupts are disabled and zones lock is 249 246 * locked. 250 247 * 251 * @param order Size (2^order) of free space we are trying to find. 252 * @param flags Required flags of the target zone. 253 * @param hind Preferred zone. 254 * 255 */ 256 NO_TRACE static size_t find_free_zone(uint8_t order, zone_flags_t flags, 257 size_t hint) 248 * @param count Number of free frames we are trying to find. 249 * @param flags Required flags of the target zone. 250 * @param constraint Indication of bits that cannot be set in the 251 * physical frame number of the first allocated frame. 252 * @param hind Preferred zone. 253 * 254 */ 255 NO_TRACE static size_t find_free_zone(size_t count, zone_flags_t flags, 256 pfn_t constraint, size_t hint) 258 257 { 259 258 if (hint >= zones.count) … … 267 266 if (ZONE_FLAGS_MATCH(zones.info[i].flags, flags)) { 268 267 /* 269 * Check if the zone has 2^order frames area available.268 * Check if the zone can satisfy the allocation request. 270 269 */ 271 if (zone_can_alloc(&zones.info[i], order))270 if (zone_can_alloc(&zones.info[i], count, constraint)) 272 271 return i; 273 272 } … … 282 281 } 283 282 284 /**************************/285 /* Buddy system functions */286 /**************************/287 288 /** Buddy system find_block implementation.289 *290 * Find block that is parent of current list.291 * That means go to lower addresses, until such block is found292 *293 * @param order Order of parent must be different then this294 * parameter!!295 *296 */297 NO_TRACE static link_t *zone_buddy_find_block(buddy_system_t *buddy,298 link_t *child, uint8_t order)299 {300 frame_t *frame = list_get_instance(child, frame_t, buddy_link);301 zone_t *zone = (zone_t *) buddy->data;302 303 size_t index = frame_index(zone, frame);304 do {305 if (zone->frames[index].buddy_order != order)306 return &zone->frames[index].buddy_link;307 } while (index-- > 0);308 309 return NULL;310 }311 312 /** Buddy system find_buddy implementation.313 *314 * @param buddy Buddy system.315 * @param block Block for which buddy should be found.316 *317 * @return Buddy for given block if found.318 *319 */320 NO_TRACE static link_t *zone_buddy_find_buddy(buddy_system_t *buddy,321 link_t *block)322 {323 frame_t *frame = list_get_instance(block, frame_t, buddy_link);324 zone_t *zone = (zone_t *) buddy->data;325 ASSERT(IS_BUDDY_ORDER_OK(frame_index_abs(zone, frame),326 frame->buddy_order));327 328 bool is_left = IS_BUDDY_LEFT_BLOCK_ABS(zone, frame);329 330 size_t index;331 if (is_left) {332 index = (frame_index(zone, frame)) +333 (1 << frame->buddy_order);334 } else { /* is_right */335 index = (frame_index(zone, frame)) -336 (1 << frame->buddy_order);337 }338 339 if (frame_index_valid(zone, index)) {340 if ((zone->frames[index].buddy_order == frame->buddy_order) &&341 (zone->frames[index].refcount == 0)) {342 return &zone->frames[index].buddy_link;343 }344 }345 346 return NULL;347 }348 349 /** Buddy system bisect implementation.350 *351 * @param buddy Buddy system.352 * @param block Block to bisect.353 *354 * @return Right block.355 *356 */357 NO_TRACE static link_t *zone_buddy_bisect(buddy_system_t *buddy, link_t *block)358 {359 frame_t *frame_l = list_get_instance(block, frame_t, buddy_link);360 frame_t *frame_r = (frame_l + (1 << (frame_l->buddy_order - 1)));361 362 return &frame_r->buddy_link;363 }364 365 /** Buddy system coalesce implementation.366 *367 * @param buddy Buddy system.368 * @param block_1 First block.369 * @param block_2 First block's buddy.370 *371 * @return Coalesced block (actually block that represents lower372 * address).373 *374 */375 NO_TRACE static link_t *zone_buddy_coalesce(buddy_system_t *buddy,376 link_t *block_1, link_t *block_2)377 {378 frame_t *frame1 = list_get_instance(block_1, frame_t, buddy_link);379 frame_t *frame2 = list_get_instance(block_2, frame_t, buddy_link);380 381 return ((frame1 < frame2) ? block_1 : block_2);382 }383 384 /** Buddy system set_order implementation.385 *386 * @param buddy Buddy system.387 * @param block Buddy system block.388 * @param order Order to set.389 *390 */391 NO_TRACE static void zone_buddy_set_order(buddy_system_t *buddy, link_t *block,392 uint8_t order)393 {394 list_get_instance(block, frame_t, buddy_link)->buddy_order = order;395 }396 397 /** Buddy system get_order implementation.398 *399 * @param buddy Buddy system.400 * @param block Buddy system block.401 *402 * @return Order of block.403 *404 */405 NO_TRACE static uint8_t zone_buddy_get_order(buddy_system_t *buddy,406 link_t *block)407 {408 return list_get_instance(block, frame_t, buddy_link)->buddy_order;409 }410 411 /** Buddy system mark_busy implementation.412 *413 * @param buddy Buddy system.414 * @param block Buddy system block.415 *416 */417 NO_TRACE static void zone_buddy_mark_busy(buddy_system_t *buddy, link_t *block)418 {419 list_get_instance(block, frame_t, buddy_link)->refcount = 1;420 }421 422 /** Buddy system mark_available implementation.423 *424 * @param buddy Buddy system.425 * @param block Buddy system block.426 *427 */428 NO_TRACE static void zone_buddy_mark_available(buddy_system_t *buddy,429 link_t *block)430 {431 list_get_instance(block, frame_t, buddy_link)->refcount = 0;432 }433 434 static buddy_system_operations_t zone_buddy_system_operations = {435 .find_buddy = zone_buddy_find_buddy,436 .bisect = zone_buddy_bisect,437 .coalesce = zone_buddy_coalesce,438 .set_order = zone_buddy_set_order,439 .get_order = zone_buddy_get_order,440 .mark_busy = zone_buddy_mark_busy,441 .mark_available = zone_buddy_mark_available,442 .find_block = zone_buddy_find_block443 };444 445 283 /******************/ 446 284 /* Zone functions */ 447 285 /******************/ 448 286 287 /** Return frame from zone. */ 288 NO_TRACE static frame_t *zone_get_frame(zone_t *zone, size_t index) 289 { 290 ASSERT(index < zone->count); 291 292 return &zone->frames[index]; 293 } 294 449 295 /** Allocate frame in particular zone. 450 296 * … … 452 298 * Panics if allocation is impossible. 453 299 * 454 * @param zone Zone to allocate from. 455 * @param order Allocate exactly 2^order frames. 300 * @param zone Zone to allocate from. 301 * @param count Number of frames to allocate 302 * @param constraint Indication of bits that cannot be set in the 303 * physical frame number of the first allocated frame. 456 304 * 457 305 * @return Frame index in zone. 458 306 * 459 307 */ 460 NO_TRACE static pfn_t zone_frame_alloc(zone_t *zone, uint8_t order) 308 NO_TRACE static size_t zone_frame_alloc(zone_t *zone, size_t count, 309 pfn_t constraint) 461 310 { 462 311 ASSERT(zone->flags & ZONE_AVAILABLE); 463 312 464 /* Allocate frames from zone buddy system */ 465 link_t *link = buddy_system_alloc(zone->buddy_system, order); 466 467 ASSERT(link); 313 /* Allocate frames from zone */ 314 size_t index; 315 int avail = bitmap_allocate_range(&zone->bitmap, count, zone->base, 316 constraint, &index); 317 318 ASSERT(avail); 319 320 /* Update frame reference count */ 321 for (size_t i = 0; i < count; i++) { 322 frame_t *frame = zone_get_frame(zone, index + i); 323 324 ASSERT(frame->refcount == 0); 325 frame->refcount = 1; 326 } 468 327 469 328 /* Update zone information. */ 470 zone->free_count -= (1 << order); 471 zone->busy_count += (1 << order); 472 473 /* Frame will be actually a first frame of the block. */ 474 frame_t *frame = list_get_instance(link, frame_t, buddy_link); 475 476 /* Get frame address */ 477 return make_frame_index(zone, frame); 329 zone->free_count -= count; 330 zone->busy_count += count; 331 332 return index; 478 333 } 479 334 … … 482 337 * Assume zone is locked and is available for deallocation. 483 338 * 484 * @param zone Pointer to zone from which the frame is to be freed.485 * @param frame_idx Frame index relative to zone.486 * 487 * @return Number of freed frames.488 * 489 */ 490 NO_TRACE static size_t zone_frame_free(zone_t *zone, size_t frame_idx)339 * @param zone Pointer to zone from which the frame is to be freed. 340 * @param index Frame index relative to zone. 341 * 342 * @return Number of freed frames. 343 * 344 */ 345 NO_TRACE static size_t zone_frame_free(zone_t *zone, size_t index) 491 346 { 492 347 ASSERT(zone->flags & ZONE_AVAILABLE); 493 348 494 frame_t *frame = &zone->frames[frame_idx]; 495 size_t size = 0; 496 497 ASSERT(frame->refcount); 349 frame_t *frame = zone_get_frame(zone, index); 350 351 ASSERT(frame->refcount > 0); 498 352 499 353 if (!--frame->refcount) { 500 size = 1 << frame->buddy_order;501 buddy_system_free(zone->buddy_system, &frame->buddy_link);354 bitmap_set(&zone->bitmap, index, 0); 355 502 356 /* Update zone information. */ 503 zone->free_count += size; 504 zone->busy_count -= size; 505 } 506 507 return size; 508 } 509 510 /** Return frame from zone. */ 511 NO_TRACE static frame_t *zone_get_frame(zone_t *zone, size_t frame_idx) 512 { 513 ASSERT(frame_idx < zone->count); 514 return &zone->frames[frame_idx]; 357 zone->free_count++; 358 zone->busy_count--; 359 360 return 1; 361 } 362 363 return 0; 515 364 } 516 365 517 366 /** Mark frame in zone unavailable to allocation. */ 518 NO_TRACE static void zone_mark_unavailable(zone_t *zone, size_t frame_idx) 519 { 520 ASSERT(zone->flags & ZONE_AVAILABLE); 521 522 frame_t *frame = zone_get_frame(zone, frame_idx); 523 if (frame->refcount) 367 NO_TRACE static void zone_mark_unavailable(zone_t *zone, size_t index) 368 { 369 if (!(zone->flags & ZONE_AVAILABLE)) 524 370 return; 525 371 526 link_t *link __attribute__ ((unused)); 527 528 link = buddy_system_alloc_block(zone->buddy_system, 529 &frame->buddy_link); 530 531 ASSERT(link); 372 frame_t *frame = zone_get_frame(zone, index); 373 if (frame->refcount > 0) 374 return; 375 376 frame->refcount = 1; 377 bitmap_set_range(&zone->bitmap, index, 1); 378 532 379 zone->free_count--; 533 380 reserve_force_alloc(1); … … 536 383 /** Merge two zones. 537 384 * 538 * Expect buddy to point to space at least zone_conf_size large.539 385 * Assume z1 & z2 are locked and compatible and zones lock is 540 386 * locked. 541 387 * 542 * @param z1 First zone to merge.543 * @param z2 Second zone to merge.544 * @param old_z1 Original dateof the first zone.545 * @param buddy Merged zone buddy.388 * @param z1 First zone to merge. 389 * @param z2 Second zone to merge. 390 * @param old_z1 Original data of the first zone. 391 * @param confdata Merged zone configuration data. 546 392 * 547 393 */ 548 394 NO_TRACE static void zone_merge_internal(size_t z1, size_t z2, zone_t *old_z1, 549 buddy_system_t *buddy)395 void *confdata) 550 396 { 551 397 ASSERT(zones.info[z1].flags & ZONE_AVAILABLE); … … 562 408 zones.info[z1].free_count += zones.info[z2].free_count; 563 409 zones.info[z1].busy_count += zones.info[z2].busy_count; 564 zones.info[z1].buddy_system = buddy; 565 566 uint8_t order = fnzb(zones.info[z1].count); 567 buddy_system_create(zones.info[z1].buddy_system, order, 568 &zone_buddy_system_operations, (void *) &zones.info[z1]); 569 570 zones.info[z1].frames = 571 (frame_t *) ((uint8_t *) zones.info[z1].buddy_system 572 + buddy_conf_size(order)); 573 574 /* This marks all frames busy */ 575 size_t i; 576 for (i = 0; i < zones.info[z1].count; i++) 577 frame_initialize(&zones.info[z1].frames[i]); 578 579 /* Copy frames from both zones to preserve full frame orders, 580 * parents etc. Set all free frames with refcount = 0 to 1, because 581 * we add all free frames to buddy allocator later again, clearing 582 * order to 0. Don't set busy frames with refcount = 0, as they 583 * will not be reallocated during merge and it would make later 584 * problems with allocation/free. 410 411 bitmap_initialize(&zones.info[z1].bitmap, zones.info[z1].count, 412 BITMAP_BLOCK_SIZE, confdata + 413 (sizeof(frame_t) * zones.info[z1].count)); 414 bitmap_clear_range(&zones.info[z1].bitmap, 0, zones.info[z1].count); 415 416 zones.info[z1].frames = (frame_t *) confdata; 417 418 /* 419 * Copy frames and bits from both zones to preserve parents, etc. 585 420 */ 586 for (i = 0; i < old_z1->count; i++) 421 422 for (size_t i = 0; i < old_z1->count; i++) { 423 bitmap_set(&zones.info[z1].bitmap, i, 424 bitmap_get(&old_z1->bitmap, i)); 587 425 zones.info[z1].frames[i] = old_z1->frames[i]; 588 589 for (i = 0; i < zones.info[z2].count; i++) 590 zones.info[z1].frames[base_diff + i] 591 = zones.info[z2].frames[i]; 592 593 i = 0; 594 while (i < zones.info[z1].count) { 595 if (zones.info[z1].frames[i].refcount) { 596 /* Skip busy frames */ 597 i += 1 << zones.info[z1].frames[i].buddy_order; 598 } else { 599 /* Free frames, set refcount = 1 600 * (all free frames have refcount == 0, we need not 601 * to check the order) 602 */ 603 zones.info[z1].frames[i].refcount = 1; 604 zones.info[z1].frames[i].buddy_order = 0; 605 i++; 606 } 607 } 608 609 /* Add free blocks from the original zone z1 */ 610 while (zone_can_alloc(old_z1, 0)) { 611 /* Allocate from the original zone */ 612 pfn_t frame_idx = zone_frame_alloc(old_z1, 0); 613 614 /* Free the frame from the merged zone */ 615 frame_t *frame = &zones.info[z1].frames[frame_idx]; 616 frame->refcount = 0; 617 buddy_system_free(zones.info[z1].buddy_system, &frame->buddy_link); 618 } 619 620 /* Add free blocks from the original zone z2 */ 621 while (zone_can_alloc(&zones.info[z2], 0)) { 622 /* Allocate from the original zone */ 623 pfn_t frame_idx = zone_frame_alloc(&zones.info[z2], 0); 624 625 /* Free the frame from the merged zone */ 626 frame_t *frame = &zones.info[z1].frames[base_diff + frame_idx]; 627 frame->refcount = 0; 628 buddy_system_free(zones.info[z1].buddy_system, &frame->buddy_link); 426 } 427 428 for (size_t i = 0; i < zones.info[z2].count; i++) { 429 bitmap_set(&zones.info[z1].bitmap, base_diff + i, 430 bitmap_get(&zones.info[z2].bitmap, i)); 431 zones.info[z1].frames[base_diff + i] = 432 zones.info[z2].frames[i]; 629 433 } 630 434 } … … 649 453 size_t cframes = SIZE2FRAMES(zone_conf_size(count)); 650 454 651 if ((pfn < zones.info[znum].base) 652 ||(pfn >= zones.info[znum].base + zones.info[znum].count))455 if ((pfn < zones.info[znum].base) || 456 (pfn >= zones.info[znum].base + zones.info[znum].count)) 653 457 return; 654 458 655 frame_t *frame __attribute__ ((unused)); 656 657 frame = &zones.info[znum].frames[pfn - zones.info[znum].base]; 658 ASSERT(!frame->buddy_order); 659 660 size_t i; 661 for (i = 0; i < cframes; i++) { 662 zones.info[znum].busy_count++; 459 for (size_t i = 0; i < cframes; i++) 663 460 (void) zone_frame_free(&zones.info[znum], 664 461 pfn - zones.info[znum].base + i); 665 }666 }667 668 /** Reduce allocated block to count of order 0 frames.669 *670 * The allocated block needs 2^order frames. Reduce all frames671 * in the block to order 0 and free the unneeded frames. This means that672 * when freeing the previously allocated block starting with frame_idx,673 * you have to free every frame.674 *675 * @param znum Zone.676 * @param frame_idx Index the first frame of the block.677 * @param count Allocated frames in block.678 *679 */680 NO_TRACE static void zone_reduce_region(size_t znum, pfn_t frame_idx,681 size_t count)682 {683 ASSERT(zones.info[znum].flags & ZONE_AVAILABLE);684 ASSERT(frame_idx + count < zones.info[znum].count);685 686 uint8_t order = zones.info[znum].frames[frame_idx].buddy_order;687 ASSERT((size_t) (1 << order) >= count);688 689 /* Reduce all blocks to order 0 */690 size_t i;691 for (i = 0; i < (size_t) (1 << order); i++) {692 frame_t *frame = &zones.info[znum].frames[i + frame_idx];693 frame->buddy_order = 0;694 if (!frame->refcount)695 frame->refcount = 1;696 ASSERT(frame->refcount == 1);697 }698 699 /* Free unneeded frames */700 for (i = count; i < (size_t) (1 << order); i++)701 (void) zone_frame_free(&zones.info[znum], i + frame_idx);702 462 } 703 463 … … 719 479 bool ret = true; 720 480 721 /* We can join only 2 zones with none existing inbetween, 481 /* 482 * We can join only 2 zones with none existing inbetween, 722 483 * the zones have to be available and with the same 723 484 * set of flags … … 733 494 + zones.info[z2].count)); 734 495 735 uint8_t order;736 if (cframes == 1)737 order = 0;738 else739 order = fnzb(cframes - 1) + 1;740 741 496 /* Allocate merged zone data inside one of the zones */ 742 497 pfn_t pfn; 743 if (zone_can_alloc(&zones.info[z1], order)) { 744 pfn = zones.info[z1].base + zone_frame_alloc(&zones.info[z1], order); 745 } else if (zone_can_alloc(&zones.info[z2], order)) { 746 pfn = zones.info[z2].base + zone_frame_alloc(&zones.info[z2], order); 498 if (zone_can_alloc(&zones.info[z1], cframes, 0)) { 499 pfn = zones.info[z1].base + 500 zone_frame_alloc(&zones.info[z1], cframes, 0); 501 } else if (zone_can_alloc(&zones.info[z2], cframes, 0)) { 502 pfn = zones.info[z2].base + 503 zone_frame_alloc(&zones.info[z2], cframes, 0); 747 504 } else { 748 505 ret = false; … … 752 509 /* Preserve original data from z1 */ 753 510 zone_t old_z1 = zones.info[z1]; 754 old_z1.buddy_system->data = (void *) &old_z1;755 511 756 512 /* Do zone merging */ 757 buddy_system_t *buddy = (buddy_system_t *) PA2KA(PFN2ADDR(pfn)); 758 zone_merge_internal(z1, z2, &old_z1, buddy); 759 760 /* Free unneeded config frames */ 761 zone_reduce_region(z1, pfn - zones.info[z1].base, cframes); 513 zone_merge_internal(z1, z2, &old_z1, (void *) PA2KA(PFN2ADDR(pfn))); 762 514 763 515 /* Subtract zone information from busy frames */ … … 772 524 773 525 /* Move zones down */ 774 size_t i; 775 for (i = z2 + 1; i < zones.count; i++) { 526 for (size_t i = z2 + 1; i < zones.count; i++) 776 527 zones.info[i - 1] = zones.info[i]; 777 if (zones.info[i - 1].buddy_system != NULL)778 zones.info[i - 1].buddy_system->data =779 (void *) &zones.info[i - 1];780 }781 528 782 529 zones.count--; … … 797 544 void zone_merge_all(void) 798 545 { 799 size_t i = 0; 546 size_t i = 1; 547 800 548 while (i < zones.count) { 801 if (!zone_merge(i , i + 1))549 if (!zone_merge(i - 1, i)) 802 550 i++; 803 551 } … … 806 554 /** Create new frame zone. 807 555 * 808 * @param zone Zone to construct.809 * @param buddy Address of buddy system configuration information.810 * @param start Physical address of the first frame within thezone.811 * @param count Count of frames in zone.812 * @param flags Zone flags.556 * @param zone Zone to construct. 557 * @param start Physical address of the first frame within the zone. 558 * @param count Count of frames in zone. 559 * @param flags Zone flags. 560 * @param confdata Configuration data of the zone. 813 561 * 814 562 * @return Initialized zone. 815 563 * 816 564 */ 817 NO_TRACE static void zone_construct(zone_t *zone, buddy_system_t *buddy,818 pfn_t start, size_t count, zone_flags_t flags)565 NO_TRACE static void zone_construct(zone_t *zone, pfn_t start, size_t count, 566 zone_flags_t flags, void *confdata) 819 567 { 820 568 zone->base = start; … … 823 571 zone->free_count = count; 824 572 zone->busy_count = 0; 825 zone->buddy_system = buddy;826 573 827 574 if (flags & ZONE_AVAILABLE) { 828 575 /* 829 * Compute order for buddy system and initialize 576 * Initialize frame bitmap (located after the array of 577 * frame_t structures in the configuration space). 830 578 */ 831 uint8_t order = fnzb(count);832 b uddy_system_create(zone->buddy_system, order,833 &zone_buddy_system_operations, (void *) zone);834 835 /* Allocate frames _after_ the confframe */836 837 /* Check sizes */838 zone->frames = (frame_t *) ((uint8_t *) zone->buddy_system +839 buddy_conf_size(order));840 841 size_t i;842 for ( i = 0; i < count; i++)579 580 bitmap_initialize(&zone->bitmap, count, BITMAP_BLOCK_SIZE, 581 confdata + (sizeof(frame_t) * count)); 582 bitmap_clear_range(&zone->bitmap, 0, count); 583 584 /* 585 * Initialize the array of frame_t structures. 586 */ 587 588 zone->frames = (frame_t *) confdata; 589 590 for (size_t i = 0; i < count; i++) 843 591 frame_initialize(&zone->frames[i]); 844 845 /* Stuffing frames */ 846 for (i = 0; i < count; i++) { 847 zone->frames[i].refcount = 0; 848 buddy_system_free(zone->buddy_system, &zone->frames[i].buddy_link); 849 } 850 } else 592 } else { 593 bitmap_initialize(&zone->bitmap, 0, 0, NULL); 851 594 zone->frames = NULL; 595 } 852 596 } 853 597 … … 861 605 size_t zone_conf_size(size_t count) 862 606 { 863 return (count * sizeof(frame_t) + buddy_conf_size(fnzb(count))); 607 return (count * sizeof(frame_t) + 608 bitmap_size(count, BITMAP_BLOCK_SIZE)); 864 609 } 865 610 … … 867 612 pfn_t zone_external_conf_alloc(size_t count) 868 613 { 869 size_t size = zone_conf_size(count); 870 size_t order = ispwr2(size) ? fnzb(size) : (fnzb(size) + 1); 871 872 return ADDR2PFN((uintptr_t) frame_alloc(order - FRAME_WIDTH, 873 FRAME_LOWMEM | FRAME_ATOMIC)); 614 size_t frames = SIZE2FRAMES(zone_conf_size(count)); 615 616 return ADDR2PFN((uintptr_t) 617 frame_alloc(frames, FRAME_LOWMEM | FRAME_ATOMIC, 0)); 874 618 } 875 619 … … 879 623 * @param count Size of zone in frames. 880 624 * @param confframe Where configuration frames are supposed to be. 881 * Automatically checks ,that we will not disturb the625 * Automatically checks that we will not disturb the 882 626 * kernel and possibly init. If confframe is given 883 627 * _outside_ this zone, it is expected, that the area is … … 896 640 897 641 if (flags & ZONE_AVAILABLE) { /* Create available zone */ 898 /* Theoretically we could have NULL here, practically make sure 642 /* 643 * Theoretically we could have NULL here, practically make sure 899 644 * nobody tries to do that. If some platform requires, remove 900 645 * the assert 901 646 */ 902 647 ASSERT(confframe != ADDR2PFN((uintptr_t ) NULL)); 903 648 904 649 /* Update the known end of physical memory. */ 905 650 config.physmem_end = max(config.physmem_end, PFN2ADDR(start + count)); 906 651 907 /* If confframe is supposed to be inside our zone, then make sure 652 /* 653 * If confframe is supposed to be inside our zone, then make sure 908 654 * it does not span kernel & init 909 655 */ 910 656 size_t confcount = SIZE2FRAMES(zone_conf_size(count)); 657 911 658 if ((confframe >= start) && (confframe < start + count)) { 912 659 for (; confframe < start + count; confframe++) { … … 921 668 922 669 bool overlap = false; 923 size_t i; 924 for (i = 0; i < init.cnt; i++) 670 for (size_t i = 0; i < init.cnt; i++) { 925 671 if (overlaps(addr, PFN2ADDR(confcount), 926 672 init.tasks[i].paddr, … … 929 675 break; 930 676 } 677 } 678 931 679 if (overlap) 932 680 continue; … … 945 693 } 946 694 947 buddy_system_t *buddy = (buddy_system_t*) PA2KA(PFN2ADDR(confframe));948 zone_construct(&zones.info[znum], buddy, start, count, flags);695 void *confdata = (void *) PA2KA(PFN2ADDR(confframe)); 696 zone_construct(&zones.info[znum], start, count, flags, confdata); 949 697 950 698 /* If confdata in zone, mark as unavailable */ 951 699 if ((confframe >= start) && (confframe < start + count)) { 952 size_t i; 953 for (i = confframe; i < confframe + confcount; i++) 700 for (size_t i = confframe; i < confframe + confcount; i++) 954 701 zone_mark_unavailable(&zones.info[znum], 955 702 i - zones.info[znum].base); … … 967 714 return (size_t) -1; 968 715 } 969 zone_construct(&zones.info[znum], NULL, start, count, flags); 716 717 zone_construct(&zones.info[znum], start, count, flags, NULL); 970 718 971 719 irq_spinlock_unlock(&zones.lock, true); … … 1009 757 } 1010 758 1011 /** Allocate power-of-two frames of physical memory. 1012 * 1013 * @param order Allocate exactly 2^order frames. 1014 * @param flags Flags for host zone selection and address processing. 1015 * @param pzone Preferred zone. 759 /** Allocate frames of physical memory. 760 * 761 * @param count Number of continuous frames to allocate. 762 * @param flags Flags for host zone selection and address processing. 763 * @param constraint Indication of physical address bits that cannot be 764 * set in the address of the first allocated frame. 765 * @param pzone Preferred zone. 1016 766 * 1017 767 * @return Physical address of the allocated frame. 1018 768 * 1019 769 */ 1020 void *frame_alloc_generic(uint8_t order, frame_flags_t flags, size_t *pzone) 1021 { 1022 size_t size = ((size_t) 1) << order; 770 uintptr_t frame_alloc_generic(size_t count, frame_flags_t flags, 771 uintptr_t constraint, size_t *pzone) 772 { 773 ASSERT(count > 0); 774 1023 775 size_t hint = pzone ? (*pzone) : 0; 776 pfn_t frame_constraint = ADDR2PFN(constraint); 1024 777 1025 778 /* 1026 779 * If not told otherwise, we must first reserve the memory. 1027 780 */ 1028 if (!(flags & FRAME_NO_RESERVE)) 1029 reserve_force_alloc( size);1030 781 if (!(flags & FRAME_NO_RESERVE)) 782 reserve_force_alloc(count); 783 1031 784 loop: 1032 785 irq_spinlock_lock(&zones.lock, true); … … 1035 788 * First, find suitable frame zone. 1036 789 */ 1037 size_t znum = find_free_zone(order, 1038 FRAME_TO_ZONE_FLAGS(flags), hint); 1039 1040 /* If no memory, reclaim some slab memory, 1041 if it does not help, reclaim all */ 790 size_t znum = find_free_zone(count, FRAME_TO_ZONE_FLAGS(flags), 791 frame_constraint, hint); 792 793 /* 794 * If no memory, reclaim some slab memory, 795 * if it does not help, reclaim all. 796 */ 1042 797 if ((znum == (size_t) -1) && (!(flags & FRAME_NO_RECLAIM))) { 1043 798 irq_spinlock_unlock(&zones.lock, true); … … 1046 801 1047 802 if (freed > 0) 1048 znum = find_free_zone( order,1049 FRAME_TO_ZONE_FLAGS(flags), hint);803 znum = find_free_zone(count, FRAME_TO_ZONE_FLAGS(flags), 804 frame_constraint, hint); 1050 805 1051 806 if (znum == (size_t) -1) { … … 1055 810 1056 811 if (freed > 0) 1057 znum = find_free_zone( order,1058 FRAME_TO_ZONE_FLAGS(flags), hint);812 znum = find_free_zone(count, FRAME_TO_ZONE_FLAGS(flags), 813 frame_constraint, hint); 1059 814 } 1060 815 } … … 1063 818 if (flags & FRAME_ATOMIC) { 1064 819 irq_spinlock_unlock(&zones.lock, true); 820 1065 821 if (!(flags & FRAME_NO_RESERVE)) 1066 reserve_free(size); 1067 return NULL; 822 reserve_free(count); 823 824 return 0; 1068 825 } 1069 826 … … 1075 832 1076 833 if (!THREAD) 1077 panic("Cannot wait for memory to become available."); 834 panic("Cannot wait for %zu frames to become available " 835 "(%zu available).", count, avail); 1078 836 1079 837 /* … … 1082 840 1083 841 #ifdef CONFIG_DEBUG 1084 printf("Thread %" PRIu64 " waiting for %zu frames ,"1085 " %zu available.\n", THREAD->tid, size, avail);842 printf("Thread %" PRIu64 " waiting for %zu frames " 843 "(%zu available).\n", THREAD->tid, count, avail); 1086 844 #endif 1087 845 1088 846 /* 1089 * Since the mem_avail_mtx is an active mutex, we need to disable interrupts1090 * to prevent deadlock with TLB shootdown.847 * Since the mem_avail_mtx is an active mutex, we need to 848 * disable interrupts to prevent deadlock with TLB shootdown. 1091 849 */ 1092 850 ipl_t ipl = interrupts_disable(); … … 1094 852 1095 853 if (mem_avail_req > 0) 1096 mem_avail_req = min(mem_avail_req, size);854 mem_avail_req = min(mem_avail_req, count); 1097 855 else 1098 mem_avail_req = size; 856 mem_avail_req = count; 857 1099 858 size_t gen = mem_avail_gen; 1100 859 … … 1112 871 } 1113 872 1114 pfn_t pfn = zone_frame_alloc(&zones.info[znum], order)1115 + zones.info[znum].base;873 pfn_t pfn = zone_frame_alloc(&zones.info[znum], count, 874 frame_constraint) + zones.info[znum].base; 1116 875 1117 876 irq_spinlock_unlock(&zones.lock, true); … … 1120 879 *pzone = znum; 1121 880 1122 if (flags & FRAME_KA)1123 return (void *) PA2KA(PFN2ADDR(pfn)); 1124 1125 return (void *) PFN2ADDR(pfn); 1126 } 1127 1128 void *frame_alloc(uint8_t order, frame_flags_t flags) 1129 { 1130 return frame_alloc_generic(order, flags, NULL); 1131 } 1132 1133 void *frame_alloc_noreserve(uint8_t order, frame_flags_t flags) 1134 { 1135 return frame_alloc_generic(order, flags | FRAME_NO_RESERVE, NULL); 1136 } 1137 1138 /** Free a frame. 1139 * 1140 * Find respective frame structure for supplied physical frame address.1141 * Decrement frame reference count. If it drops to zero, move the frame1142 * structure to free list.1143 * 1144 * @param frame Physical Address of of the frame to be freed.881 return PFN2ADDR(pfn); 882 } 883 884 uintptr_t frame_alloc(size_t count, frame_flags_t flags, uintptr_t constraint) 885 { 886 return frame_alloc_generic(count, flags, constraint, NULL); 887 } 888 889 uintptr_t frame_alloc_noreserve(size_t count, frame_flags_t flags, 890 uintptr_t constraint) 891 { 892 return frame_alloc_generic(count, flags | FRAME_NO_RESERVE, constraint, 893 NULL); 894 } 895 896 /** Free frames of physical memory. 897 * 898 * Find respective frame structures for supplied physical frames. 899 * Decrement each frame reference count. If it drops to zero, mark 900 * the frames as available. 901 * 902 * @param start Physical Address of the first frame to be freed. 903 * @param count Number of frames to free. 1145 904 * @param flags Flags to control memory reservation. 1146 905 * 1147 906 */ 1148 void frame_free_generic(uintptr_t frame, frame_flags_t flags)1149 { 1150 size_t size;907 void frame_free_generic(uintptr_t start, size_t count, frame_flags_t flags) 908 { 909 size_t freed = 0; 1151 910 1152 911 irq_spinlock_lock(&zones.lock, true); 1153 912 1154 /* 1155 * First, find host frame zone for addr. 1156 */ 1157 pfn_t pfn = ADDR2PFN(frame); 1158 size_t znum = find_zone(pfn, 1, 0); 1159 1160 ASSERT(znum != (size_t) -1); 1161 1162 size = zone_frame_free(&zones.info[znum], pfn - zones.info[znum].base); 913 for (size_t i = 0; i < count; i++) { 914 /* 915 * First, find host frame zone for addr. 916 */ 917 pfn_t pfn = ADDR2PFN(start) + i; 918 size_t znum = find_zone(pfn, 1, 0); 919 920 ASSERT(znum != (size_t) -1); 921 922 freed += zone_frame_free(&zones.info[znum], 923 pfn - zones.info[znum].base); 924 } 1163 925 1164 926 irq_spinlock_unlock(&zones.lock, true); … … 1166 928 /* 1167 929 * Signal that some memory has been freed. 930 * Since the mem_avail_mtx is an active mutex, 931 * we need to disable interruptsto prevent deadlock 932 * with TLB shootdown. 1168 933 */ 1169 1170 1171 /* 1172 * Since the mem_avail_mtx is an active mutex, we need to disable interrupts 1173 * to prevent deadlock with TLB shootdown. 1174 */ 934 1175 935 ipl_t ipl = interrupts_disable(); 1176 936 mutex_lock(&mem_avail_mtx); 937 1177 938 if (mem_avail_req > 0) 1178 mem_avail_req -= min(mem_avail_req, size);939 mem_avail_req -= min(mem_avail_req, freed); 1179 940 1180 941 if (mem_avail_req == 0) { … … 1182 943 condvar_broadcast(&mem_avail_cv); 1183 944 } 945 1184 946 mutex_unlock(&mem_avail_mtx); 1185 947 interrupts_restore(ipl); 1186 948 1187 949 if (!(flags & FRAME_NO_RESERVE)) 1188 reserve_free( size);1189 } 1190 1191 void frame_free(uintptr_t frame )1192 { 1193 frame_free_generic(frame, 0);1194 } 1195 1196 void frame_free_noreserve(uintptr_t frame )1197 { 1198 frame_free_generic(frame, FRAME_NO_RESERVE);950 reserve_free(freed); 951 } 952 953 void frame_free(uintptr_t frame, size_t count) 954 { 955 frame_free_generic(frame, count, 0); 956 } 957 958 void frame_free_noreserve(uintptr_t frame, size_t count) 959 { 960 frame_free_generic(frame, count, FRAME_NO_RESERVE); 1199 961 } 1200 962 … … 1230 992 irq_spinlock_lock(&zones.lock, true); 1231 993 1232 size_t i; 1233 for (i = 0; i < count; i++) { 994 for (size_t i = 0; i < count; i++) { 1234 995 size_t znum = find_zone(start + i, 1, 0); 996 1235 997 if (znum == (size_t) -1) /* PFN not found */ 1236 998 continue; … … 1257 1019 /* Tell the architecture to create some memory */ 1258 1020 frame_low_arch_init(); 1021 1259 1022 if (config.cpu_active == 1) { 1260 1023 frame_mark_unavailable(ADDR2PFN(KA2PA(config.base)), … … 1263 1026 SIZE2FRAMES(config.stack_size)); 1264 1027 1265 size_t i; 1266 for (i = 0; i < init.cnt; i++) { 1267 pfn_t pfn = ADDR2PFN(init.tasks[i].paddr); 1268 frame_mark_unavailable(pfn, 1028 for (size_t i = 0; i < init.cnt; i++) 1029 frame_mark_unavailable(ADDR2PFN(init.tasks[i].paddr), 1269 1030 SIZE2FRAMES(init.tasks[i].size)); 1270 }1271 1031 1272 1032 if (ballocs.size) … … 1274 1034 SIZE2FRAMES(ballocs.size)); 1275 1035 1276 /* Black list first frame, as allocating NULL would 1036 /* 1037 * Blacklist first frame, as allocating NULL would 1277 1038 * fail in some places 1278 1039 */ 1279 1040 frame_mark_unavailable(0, 1); 1280 1041 } 1042 1281 1043 frame_high_arch_init(); 1282 1044 } … … 1284 1046 /** Adjust bounds of physical memory region according to low/high memory split. 1285 1047 * 1286 * @param low[in] If true, the adjustment is performed to make the region 1287 * fit in the low memory. Otherwise the adjustment is 1288 * performed to make the region fit in the high memory. 1289 * @param basep[inout] Pointer to a variable which contains the region's base 1290 * address and which may receive the adjusted base address. 1291 * @param sizep[inout] Pointer to a variable which contains the region's size 1292 * and which may receive the adjusted size. 1293 * @retun True if the region still exists even after the 1294 * adjustment, false otherwise. 1048 * @param low[in] If true, the adjustment is performed to make the region 1049 * fit in the low memory. Otherwise the adjustment is 1050 * performed to make the region fit in the high memory. 1051 * @param basep[inout] Pointer to a variable which contains the region's base 1052 * address and which may receive the adjusted base address. 1053 * @param sizep[inout] Pointer to a variable which contains the region's size 1054 * and which may receive the adjusted size. 1055 * 1056 * @return True if the region still exists even after the adjustment. 1057 * @return False otherwise. 1058 * 1295 1059 */ 1296 1060 bool frame_adjust_zone_bounds(bool low, uintptr_t *basep, size_t *sizep) 1297 1061 { 1298 1062 uintptr_t limit = KA2PA(config.identity_base) + config.identity_size; 1299 1063 1300 1064 if (low) { 1301 1065 if (*basep > limit) 1302 1066 return false; 1067 1303 1068 if (*basep + *sizep > limit) 1304 1069 *sizep = limit - *basep; … … 1306 1071 if (*basep + *sizep <= limit) 1307 1072 return false; 1073 1308 1074 if (*basep <= limit) { 1309 1075 *sizep -= limit - *basep; … … 1311 1077 } 1312 1078 } 1079 1313 1080 return true; 1314 1081 } … … 1322 1089 1323 1090 uint64_t total = 0; 1324 size_t i;1325 for ( i = 0; i < zones.count; i++)1091 1092 for (size_t i = 0; i < zones.count; i++) 1326 1093 total += (uint64_t) FRAMES2SIZE(zones.info[i].count); 1327 1094 … … 1346 1113 *free = 0; 1347 1114 1348 size_t i; 1349 for (i = 0; i < zones.count; i++) { 1115 for (size_t i = 0; i < zones.count; i++) { 1350 1116 *total += (uint64_t) FRAMES2SIZE(zones.info[i].count); 1351 1117 … … 1384 1150 */ 1385 1151 1386 size_t i; 1387 for (i = 0;; i++) { 1152 for (size_t i = 0;; i++) { 1388 1153 irq_spinlock_lock(&zones.lock, true); 1389 1154 … … 1438 1203 size_t znum = (size_t) -1; 1439 1204 1440 size_t i; 1441 for (i = 0; i < zones.count; i++) { 1205 for (size_t i = 0; i < zones.count; i++) { 1442 1206 if ((i == num) || (PFN2ADDR(zones.info[i].base) == num)) { 1443 1207 znum = i; … … 1452 1216 } 1453 1217 1454 uintptr_t base = PFN2ADDR(zones.info[ i].base);1455 zone_flags_t flags = zones.info[ i].flags;1456 size_t count = zones.info[ i].count;1457 size_t free_count = zones.info[ i].free_count;1458 size_t busy_count = zones.info[ i].busy_count;1218 uintptr_t base = PFN2ADDR(zones.info[znum].base); 1219 zone_flags_t flags = zones.info[znum].flags; 1220 size_t count = zones.info[znum].count; 1221 size_t free_count = zones.info[znum].free_count; 1222 size_t busy_count = zones.info[znum].busy_count; 1459 1223 1460 1224 irq_spinlock_unlock(&zones.lock, true); -
kernel/generic/src/mm/km.c
r47f5a77 r3a0a4d8 239 239 uintptr_t km_temporary_page_get(uintptr_t *framep, frame_flags_t flags) 240 240 { 241 uintptr_t frame;242 uintptr_t page;243 244 241 ASSERT(THREAD); 245 242 ASSERT(framep); 246 243 ASSERT(!(flags & ~(FRAME_NO_RESERVE | FRAME_ATOMIC))); 247 244 248 245 /* 249 246 * Allocate a frame, preferably from high memory. 250 247 */ 251 frame = (uintptr_t) frame_alloc(ONE_FRAME, 252 FRAME_HIGHMEM | FRAME_ATOMIC | flags); 248 uintptr_t page; 249 uintptr_t frame = 250 frame_alloc(1, FRAME_HIGHMEM | FRAME_ATOMIC | flags, 0); 253 251 if (frame) { 254 252 page = km_map(frame, PAGE_SIZE, 255 253 PAGE_READ | PAGE_WRITE | PAGE_CACHEABLE); 256 ASSERT(page); // FIXME 254 255 // FIXME 256 ASSERT(page); 257 257 } else { 258 frame = (uintptr_t) frame_alloc(ONE_FRAME, 259 FRAME_LOWMEM | flags); 258 frame = frame_alloc(1, FRAME_LOWMEM | flags, 0); 260 259 if (!frame) 261 260 return (uintptr_t) NULL; 261 262 262 page = PA2KA(frame); 263 263 } 264 264 265 265 *framep = frame; 266 return page; 266 return page; 267 267 } 268 268 -
kernel/generic/src/mm/page.c
r47f5a77 r3a0a4d8 169 169 } 170 170 171 int page_find_mapping(uintptr_t virt, void **phys)171 int page_find_mapping(uintptr_t virt, uintptr_t *phys) 172 172 { 173 173 page_table_lock(AS, true); … … 179 179 } 180 180 181 *phys = (void *)PTE_GET_FRAME(pte) +181 *phys = PTE_GET_FRAME(pte) + 182 182 (virt - ALIGN_DOWN(virt, PAGE_SIZE)); 183 183 … … 193 193 * 194 194 */ 195 sysarg_t sys_page_find_mapping(uintptr_t virt, void*phys_ptr)196 { 197 void *phys;195 sysarg_t sys_page_find_mapping(uintptr_t virt, uintptr_t *phys_ptr) 196 { 197 uintptr_t phys; 198 198 int rc = page_find_mapping(virt, &phys); 199 199 if (rc != EOK) -
kernel/generic/src/mm/slab.c
r47f5a77 r3a0a4d8 182 182 size_t zone = 0; 183 183 184 void *data = frame_alloc_generic(cache->order, FRAME_KA | flags, &zone); 185 if (!data) { 184 uintptr_t data_phys = 185 frame_alloc_generic(cache->frames, flags, 0, &zone); 186 if (!data_phys) 186 187 return NULL; 187 } 188 189 void *data = (void *) PA2KA(data_phys); 188 190 189 191 slab_t *slab; … … 193 195 slab = slab_alloc(slab_extern_cache, flags); 194 196 if (!slab) { 195 frame_free(KA2PA(data) );197 frame_free(KA2PA(data), cache->frames); 196 198 return NULL; 197 199 } 198 200 } else { 199 fsize = (PAGE_SIZE << cache->order);201 fsize = FRAMES2SIZE(cache->frames); 200 202 slab = data + fsize - sizeof(*slab); 201 203 } … … 203 205 /* Fill in slab structures */ 204 206 size_t i; 205 for (i = 0; i < ((size_t) 1 << cache->order); i++)207 for (i = 0; i < cache->frames; i++) 206 208 frame_set_parent(ADDR2PFN(KA2PA(data)) + i, slab, zone); 207 209 … … 225 227 NO_TRACE static size_t slab_space_free(slab_cache_t *cache, slab_t *slab) 226 228 { 227 frame_free(KA2PA(slab->start) );229 frame_free(KA2PA(slab->start), slab->cache->frames); 228 230 if (!(cache->flags & SLAB_CACHE_SLINSIDE)) 229 231 slab_free(slab_extern_cache, slab); … … 231 233 atomic_dec(&cache->allocated_slabs); 232 234 233 return (1 << cache->order);235 return cache->frames; 234 236 } 235 237 … … 558 560 { 559 561 if (cache->flags & SLAB_CACHE_SLINSIDE) 560 return ( (PAGE_SIZE << cache->order)561 - sizeof(slab_t)) /cache->size;562 return (FRAMES2SIZE(cache->frames) - sizeof(slab_t)) / 563 cache->size; 562 564 else 563 return (PAGE_SIZE << cache->order) / cache->size;565 return FRAMES2SIZE(cache->frames) / cache->size; 564 566 } 565 567 … … 570 572 { 571 573 size_t objects = comp_objects(cache); 572 size_t ssize = PAGE_SIZE << cache->order;574 size_t ssize = FRAMES2SIZE(cache->frames); 573 575 574 576 if (cache->flags & SLAB_CACHE_SLINSIDE) … … 634 636 cache->flags |= SLAB_CACHE_SLINSIDE; 635 637 636 /* Minimum slab order */ 637 size_t pages = SIZE2FRAMES(cache->size); 638 639 /* We need the 2^order >= pages */ 640 if (pages == 1) 641 cache->order = 0; 642 else 643 cache->order = fnzb(pages - 1) + 1; 638 /* Minimum slab frames */ 639 cache->frames = SIZE2FRAMES(cache->size); 644 640 645 641 while (badness(cache) > SLAB_MAX_BADNESS(cache)) 646 cache-> order += 1;642 cache->frames <<= 1; 647 643 648 644 cache->objects = comp_objects(cache); … … 810 806 811 807 size_t frames = 0; 812 list_foreach(slab_cache_list, cur) { 813 slab_cache_t *cache = list_get_instance(cur, slab_cache_t, link); 808 list_foreach(slab_cache_list, link, slab_cache_t, cache) { 814 809 frames += _slab_reclaim(cache, flags); 815 810 } … … 871 866 872 867 const char *name = cache->name; 873 uint8_t order = cache->order;868 size_t frames = cache->frames; 874 869 size_t size = cache->size; 875 870 size_t objects = cache->objects; … … 881 876 irq_spinlock_unlock(&slab_cache_lock, true); 882 877 883 printf("%-18s %8zu %8 u %8zu %8ld %8ld %8ld %-5s\n",884 name, size, (1 << order), objects, allocated_slabs,878 printf("%-18s %8zu %8zu %8zu %8ld %8ld %8ld %-5s\n", 879 name, size, frames, objects, allocated_slabs, 885 880 cached_objs, allocated_objs, 886 881 flags & SLAB_CACHE_SLINSIDE ? "in" : "out"); … … 936 931 irq_spinlock_lock(&slab_cache_lock, false); 937 932 938 list_foreach(slab_cache_list, cur) { 939 slab_cache_t *slab = list_get_instance(cur, slab_cache_t, link); 933 list_foreach(slab_cache_list, link, slab_cache_t, slab) { 940 934 if ((slab->flags & SLAB_CACHE_MAGDEFERRED) != 941 935 SLAB_CACHE_MAGDEFERRED) -
kernel/generic/src/proc/scheduler.c
r47f5a77 r3a0a4d8 739 739 740 740 printf("\trq[%u]: ", i); 741 list_foreach(cpus[cpu].rq[i].rq, cur) { 742 thread_t *thread = list_get_instance(cur, 743 thread_t, rq_link); 741 list_foreach(cpus[cpu].rq[i].rq, rq_link, thread_t, 742 thread) { 744 743 printf("%" PRIu64 "(%s) ", thread->tid, 745 744 thread_states[thread->state]); -
kernel/generic/src/proc/task.c
r47f5a77 r3a0a4d8 452 452 453 453 /* Current values of threads */ 454 list_foreach(task->threads, cur) { 455 thread_t *thread = list_get_instance(cur, thread_t, th_link); 456 454 list_foreach(task->threads, th_link, thread_t, thread) { 457 455 irq_spinlock_lock(&thread->lock, false); 458 456 … … 484 482 */ 485 483 486 list_foreach(task->threads, cur) { 487 thread_t *thread = list_get_instance(cur, thread_t, th_link); 484 list_foreach(task->threads, th_link, thread_t, thread) { 488 485 bool sleeping = false; 489 486 -
kernel/generic/src/proc/thread.c
r47f5a77 r3a0a4d8 192 192 kmflags &= ~FRAME_HIGHMEM; 193 193 194 thread->kstack = (uint8_t *) frame_alloc(STACK_FRAMES, FRAME_KA | kmflags); 195 if (!thread->kstack) { 194 uintptr_t stack_phys = 195 frame_alloc(STACK_FRAMES, kmflags, STACK_SIZE - 1); 196 if (!stack_phys) { 196 197 #ifdef CONFIG_FPU 197 198 if (thread->saved_fpu_context) … … 201 202 } 202 203 204 thread->kstack = (uint8_t *) PA2KA(stack_phys); 205 203 206 #ifdef CONFIG_UDEBUG 204 207 mutex_initialize(&thread->udebug.lock, MUTEX_PASSIVE); … … 216 219 thr_destructor_arch(thread); 217 220 218 frame_free(KA2PA(thread->kstack) );221 frame_free(KA2PA(thread->kstack), STACK_FRAMES); 219 222 220 223 #ifdef CONFIG_FPU -
kernel/generic/src/synch/futex.c
r47f5a77 r3a0a4d8 274 274 mutex_lock(&TASK->futexes_lock); 275 275 276 list_foreach(TASK->futexes.leaf_list, cur) { 277 btree_node_t *node; 276 list_foreach(TASK->futexes.leaf_list, leaf_link, btree_node_t, node) { 278 277 unsigned int i; 279 278 280 node = list_get_instance(cur, btree_node_t, leaf_link);281 279 for (i = 0; i < node->keys; i++) { 282 280 futex_t *ftx; -
kernel/generic/src/sysinfo/stats.c
r47f5a77 r3a0a4d8 175 175 176 176 /* Walk the B+ tree and count pages */ 177 list_foreach(as->as_area_btree.leaf_list, cur) { 178 btree_node_t *node = 179 list_get_instance(cur, btree_node_t, leaf_link); 180 177 list_foreach(as->as_area_btree.leaf_list, leaf_link, btree_node_t, 178 node) { 181 179 unsigned int i; 182 180 for (i = 0; i < node->keys; i++) { … … 218 216 219 217 /* Walk the B+ tree and count pages */ 220 list_foreach(as->as_area_btree.leaf_list, cur) { 221 btree_node_t *node = 222 list_get_instance(cur, btree_node_t, leaf_link); 223 218 list_foreach(as->as_area_btree.leaf_list, leaf_link, btree_node_t, node) { 224 219 unsigned int i; 225 220 for (i = 0; i < node->keys; i++) { -
kernel/generic/src/time/clock.c
r47f5a77 r3a0a4d8 81 81 void clock_counter_init(void) 82 82 { 83 void *faddr = frame_alloc(ONE_FRAME, FRAME_ATOMIC);84 if ( !faddr)83 uintptr_t faddr = frame_alloc(1, FRAME_ATOMIC, 0); 84 if (faddr == 0) 85 85 panic("Cannot allocate page for clock."); 86 86 … … 91 91 uptime->useconds = 0; 92 92 93 clock_parea.pbase = (uintptr_t)faddr;93 clock_parea.pbase = faddr; 94 94 clock_parea.frames = 1; 95 95 clock_parea.unpriv = true; -
kernel/generic/src/udebug/udebug.c
r47f5a77 r3a0a4d8 406 406 407 407 /* Finish debugging of all userspace threads */ 408 list_foreach(task->threads, cur) { 409 thread_t *thread = list_get_instance(cur, thread_t, th_link); 410 408 list_foreach(task->threads, th_link, thread_t, thread) { 411 409 mutex_lock(&thread->udebug.lock); 412 410 -
kernel/generic/src/udebug/udebug_ops.c
r47f5a77 r3a0a4d8 196 196 /* Set udebug.active on all of the task's userspace threads. */ 197 197 198 list_foreach(TASK->threads, cur) { 199 thread_t *thread = list_get_instance(cur, thread_t, th_link); 200 198 list_foreach(TASK->threads, th_link, thread_t, thread) { 201 199 mutex_lock(&thread->udebug.lock); 202 200 if (thread->uspace) { … … 389 387 390 388 /* FIXME: make sure the thread isn't past debug shutdown... */ 391 list_foreach(TASK->threads, cur) { 392 thread_t *thread = list_get_instance(cur, thread_t, th_link); 393 389 list_foreach(TASK->threads, th_link, thread_t, thread) { 394 390 irq_spinlock_lock(&thread->lock, false); 395 391 bool uspace = thread->uspace;
Note:
See TracChangeset
for help on using the changeset viewer.
