Changeset 8b863a62 in mainline for kernel/generic/src
- Timestamp:
- 2014-04-16T17:14:06Z (12 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- f857e8b
- Parents:
- dba3e2c (diff), 70b570c (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)links above to see all the changes relative to each parent. - Location:
- kernel/generic/src
- Files:
-
- 1 added
- 1 deleted
- 39 edited
-
adt/bitmap.c (modified) (4 diffs)
-
adt/btree.c (modified) (1 diff)
-
adt/hash_table.c (modified) (2 diffs)
-
adt/list.c (modified) (1 diff)
-
console/chardev.c (modified) (2 diffs)
-
console/cmd.c (modified) (5 diffs)
-
console/console.c (modified) (12 diffs)
-
console/kconsole.c (modified) (3 diffs)
-
cpu/cpu.c (modified) (1 diff)
-
ddi/ddi.c (modified) (6 diffs)
-
debug/panic.c (modified) (1 diff)
-
debug/stacktrace.c (modified) (2 diffs)
-
ipc/ipc.c (modified) (1 diff)
-
ipc/ipcrsc.c (modified) (1 diff)
-
lib/func.c (modified) (2 diffs)
-
lib/memfnc.c (modified) (1 diff)
-
lib/ra.c (modified) (2 diffs)
-
lib/rd.c (modified) (2 diffs)
-
log/log.c (added)
-
main/kinit.c (modified) (6 diffs)
-
main/main.c (modified) (2 diffs)
-
main/shutdown.c (modified) (2 diffs)
-
mm/as.c (modified) (18 diffs)
-
mm/backend_anon.c (modified) (9 diffs)
-
mm/backend_elf.c (modified) (7 diffs)
-
mm/backend_phys.c (modified) (4 diffs)
-
mm/buddy.c (deleted)
-
mm/frame.c (modified) (58 diffs)
-
mm/km.c (modified) (1 diff)
-
mm/page.c (modified) (3 diffs)
-
mm/slab.c (modified) (12 diffs)
-
proc/program.c (modified) (4 diffs)
-
proc/scheduler.c (modified) (4 diffs)
-
proc/task.c (modified) (2 diffs)
-
proc/thread.c (modified) (3 diffs)
-
synch/futex.c (modified) (1 diff)
-
syscall/syscall.c (modified) (4 diffs)
-
sysinfo/stats.c (modified) (2 diffs)
-
time/clock.c (modified) (2 diffs)
-
udebug/udebug.c (modified) (1 diff)
-
udebug/udebug_ops.c (modified) (2 diffs)
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/adt/bitmap.c
rdba3e2c r8b863a62 35 35 * 36 36 * This file implements bitmap ADT and provides functions for 37 * setting and clearing ranges of bits. 37 * setting and clearing ranges of bits and for finding ranges 38 * of unset bits. 38 39 */ 39 40 … … 44 45 #include <macros.h> 45 46 46 #define ALL_ONES 0xff 47 #define ALL_ZEROES 0x00 47 #define ALL_ONES 0xff 48 #define ALL_ZEROES 0x00 49 50 /** Unchecked version of bitmap_get() 51 * 52 * This version of bitmap_get() does not do any boundary checks. 53 * 54 * @param bitmap Bitmap to access. 55 * @param element Element to access. 56 * 57 * @return Bit value of the element in the bitmap. 58 * 59 */ 60 static unsigned int bitmap_get_fast(bitmap_t *bitmap, size_t element) 61 { 62 size_t byte = element / BITMAP_ELEMENT; 63 uint8_t mask = 1 << (element & BITMAP_REMAINER); 64 65 return !!((bitmap->bits)[byte] & mask); 66 } 67 68 /** Get bitmap size 69 * 70 * Return the size (in bytes) required for the bitmap. 71 * 72 * @param elements Number bits stored in bitmap. 73 * 74 * @return Size (in bytes) required for the bitmap. 75 * 76 */ 77 size_t bitmap_size(size_t elements) 78 { 79 size_t size = elements / BITMAP_ELEMENT; 80 81 if ((elements % BITMAP_ELEMENT) != 0) 82 size++; 83 84 return size; 85 } 48 86 49 87 /** Initialize bitmap. … … 51 89 * No portion of the bitmap is set or cleared by this function. 52 90 * 53 * @param bitmap Bitmap structure. 54 * @param map Address of the memory used to hold the map. 55 * @param bits Number of bits stored in bitmap. 56 */ 57 void bitmap_initialize(bitmap_t *bitmap, uint8_t *map, size_t bits) 58 { 59 bitmap->map = map; 60 bitmap->bits = bits; 91 * @param bitmap Bitmap structure. 92 * @param elements Number of bits stored in bitmap. 93 * @param data Address of the memory used to hold the map. 94 * The optional 2nd level bitmap follows the 1st 95 * level bitmap. 96 * 97 */ 98 void bitmap_initialize(bitmap_t *bitmap, size_t elements, void *data) 99 { 100 bitmap->elements = elements; 101 bitmap->bits = (uint8_t *) data; 102 bitmap->next_fit = 0; 61 103 } 62 104 63 105 /** Set range of bits. 64 106 * 65 * @param bitmap Bitmap structure. 66 * @param start Starting bit. 67 * @param bits Number of bits to set. 68 */ 69 void bitmap_set_range(bitmap_t *bitmap, size_t start, size_t bits) 70 { 71 size_t i = 0; 72 size_t aligned_start; 73 size_t lub; /* leading unaligned bits */ 74 size_t amb; /* aligned middle bits */ 75 size_t tab; /* trailing aligned bits */ 76 77 ASSERT(start + bits <= bitmap->bits); 78 79 aligned_start = ALIGN_UP(start, 8); 80 lub = min(aligned_start - start, bits); 81 amb = bits > lub ? bits - lub : 0; 82 tab = amb % 8; 83 84 if (!bits) 107 * @param bitmap Bitmap structure. 108 * @param start Starting bit. 109 * @param count Number of bits to set. 110 * 111 */ 112 void bitmap_set_range(bitmap_t *bitmap, size_t start, size_t count) 113 { 114 ASSERT(start + count <= bitmap->elements); 115 116 if (count == 0) 85 117 return; 86 87 if (start + bits < aligned_start) { 118 119 size_t start_byte = start / BITMAP_ELEMENT; 120 size_t aligned_start = ALIGN_UP(start, BITMAP_ELEMENT); 121 122 /* Leading unaligned bits */ 123 size_t lub = min(aligned_start - start, count); 124 125 /* Aligned middle bits */ 126 size_t amb = (count > lub) ? (count - lub) : 0; 127 128 /* Trailing aligned bits */ 129 size_t tab = amb % BITMAP_ELEMENT; 130 131 if (start + count < aligned_start) { 88 132 /* Set bits in the middle of byte. */ 89 bitmap->map[start / 8] |= ((1 << lub) - 1) << (start & 7); 133 bitmap->bits[start_byte] |= 134 ((1 << lub) - 1) << (start & BITMAP_REMAINER); 90 135 return; 91 136 } … … 93 138 if (lub) { 94 139 /* Make sure to set any leading unaligned bits. */ 95 bitmap->map[start / 8] |= ~((1 << (8 - lub)) - 1); 96 } 97 for (i = 0; i < amb / 8; i++) { 140 bitmap->bits[start_byte] |= 141 ~((1 << (BITMAP_ELEMENT - lub)) - 1); 142 } 143 144 size_t i; 145 146 for (i = 0; i < amb / BITMAP_ELEMENT; i++) { 98 147 /* The middle bits can be set byte by byte. */ 99 bitmap->map[aligned_start / 8 + i] = ALL_ONES; 100 } 148 bitmap->bits[aligned_start / BITMAP_ELEMENT + i] = 149 ALL_ONES; 150 } 151 101 152 if (tab) { 102 153 /* Make sure to set any trailing aligned bits. */ 103 bitmap-> map[aligned_start / 8 + i] |= (1 << tab) - 1;104 }105 154 bitmap->bits[aligned_start / BITMAP_ELEMENT + i] |= 155 (1 << tab) - 1; 156 } 106 157 } 107 158 108 159 /** Clear range of bits. 109 160 * 110 * @param bitmap Bitmap structure. 111 * @param start Starting bit. 112 * @param bits Number of bits to clear. 113 */ 114 void bitmap_clear_range(bitmap_t *bitmap, size_t start, size_t bits) 115 { 116 size_t i = 0; 117 size_t aligned_start; 118 size_t lub; /* leading unaligned bits */ 119 size_t amb; /* aligned middle bits */ 120 size_t tab; /* trailing aligned bits */ 121 122 ASSERT(start + bits <= bitmap->bits); 123 124 aligned_start = ALIGN_UP(start, 8); 125 lub = min(aligned_start - start, bits); 126 amb = bits > lub ? bits - lub : 0; 127 tab = amb % 8; 128 129 if (!bits) 161 * @param bitmap Bitmap structure. 162 * @param start Starting bit. 163 * @param count Number of bits to clear. 164 * 165 */ 166 void bitmap_clear_range(bitmap_t *bitmap, size_t start, size_t count) 167 { 168 ASSERT(start + count <= bitmap->elements); 169 170 if (count == 0) 130 171 return; 131 132 if (start + bits < aligned_start) { 172 173 size_t start_byte = start / BITMAP_ELEMENT; 174 size_t aligned_start = ALIGN_UP(start, BITMAP_ELEMENT); 175 176 /* Leading unaligned bits */ 177 size_t lub = min(aligned_start - start, count); 178 179 /* Aligned middle bits */ 180 size_t amb = (count > lub) ? (count - lub) : 0; 181 182 /* Trailing aligned bits */ 183 size_t tab = amb % BITMAP_ELEMENT; 184 185 if (start + count < aligned_start) { 133 186 /* Set bits in the middle of byte */ 134 bitmap->map[start / 8] &= ~(((1 << lub) - 1) << (start & 7)); 187 bitmap->bits[start_byte] &= 188 ~(((1 << lub) - 1) << (start & BITMAP_REMAINER)); 135 189 return; 136 190 } 137 191 138 192 if (lub) { 139 193 /* Make sure to clear any leading unaligned bits. */ 140 bitmap->map[start / 8] &= (1 << (8 - lub)) - 1; 141 } 142 for (i = 0; i < amb / 8; i++) { 194 bitmap->bits[start_byte] &= 195 (1 << (BITMAP_ELEMENT - lub)) - 1; 196 } 197 198 size_t i; 199 200 for (i = 0; i < amb / BITMAP_ELEMENT; i++) { 143 201 /* The middle bits can be cleared byte by byte. */ 144 bitmap->map[aligned_start / 8 + i] = ALL_ZEROES; 145 } 202 bitmap->bits[aligned_start / BITMAP_ELEMENT + i] = 203 ALL_ZEROES; 204 } 205 146 206 if (tab) { 147 207 /* Make sure to clear any trailing aligned bits. */ 148 bitmap->map[aligned_start / 8 + i] &= ~((1 << tab) - 1); 149 } 150 208 bitmap->bits[aligned_start / BITMAP_ELEMENT + i] &= 209 ~((1 << tab) - 1); 210 } 211 212 bitmap->next_fit = start_byte; 151 213 } 152 214 153 215 /** Copy portion of one bitmap into another bitmap. 154 216 * 155 * @param dst Destination bitmap. 156 * @param src Source bitmap. 157 * @param bits Number of bits to copy. 158 */ 159 void bitmap_copy(bitmap_t *dst, bitmap_t *src, size_t bits) 160 { 217 * @param dst Destination bitmap. 218 * @param src Source bitmap. 219 * @param count Number of bits to copy. 220 * 221 */ 222 void bitmap_copy(bitmap_t *dst, bitmap_t *src, size_t count) 223 { 224 ASSERT(count <= dst->elements); 225 ASSERT(count <= src->elements); 226 161 227 size_t i; 162 228 163 ASSERT(bits <= dst->bits); 164 ASSERT(bits <= src->bits); 165 166 for (i = 0; i < bits / 8; i++) 167 dst->map[i] = src->map[i]; 168 169 if (bits % 8) { 170 bitmap_clear_range(dst, i * 8, bits % 8); 171 dst->map[i] |= src->map[i] & ((1 << (bits % 8)) - 1); 172 } 229 for (i = 0; i < count / BITMAP_ELEMENT; i++) 230 dst->bits[i] = src->bits[i]; 231 232 if (count % BITMAP_ELEMENT) { 233 bitmap_clear_range(dst, i * BITMAP_ELEMENT, 234 count % BITMAP_ELEMENT); 235 dst->bits[i] |= src->bits[i] & 236 ((1 << (count % BITMAP_ELEMENT)) - 1); 237 } 238 } 239 240 static int constraint_satisfy(size_t index, size_t base, size_t constraint) 241 { 242 return (((base + index) & constraint) == 0); 243 } 244 245 /** Find a continuous zero bit range 246 * 247 * Find a continuous zero bit range in the bitmap. The address 248 * computed as the sum of the index of the first zero bit and 249 * the base argument needs to be compliant with the constraint 250 * (those bits that are set in the constraint cannot be set in 251 * the address). 252 * 253 * If the index argument is non-NULL, the continuous zero range 254 * is set and the index of the first bit is stored to index. 255 * Otherwise the bitmap stays untouched. 256 * 257 * @param bitmap Bitmap structure. 258 * @param count Number of continuous zero bits to find. 259 * @param base Address of the first bit in the bitmap. 260 * @param prefered Prefered address to start searching from. 261 * @param constraint Constraint for the address of the first zero bit. 262 * @param index Place to store the index of the first zero 263 * bit. Can be NULL (in which case the bitmap 264 * is not modified). 265 * 266 * @return Non-zero if a continuous range of zero bits satisfying 267 * the constraint has been found. 268 * @return Zero otherwise. 269 * 270 */ 271 int bitmap_allocate_range(bitmap_t *bitmap, size_t count, size_t base, 272 size_t prefered, size_t constraint, size_t *index) 273 { 274 if (count == 0) 275 return false; 276 277 size_t size = bitmap_size(bitmap->elements); 278 size_t next_fit = bitmap->next_fit; 279 280 /* 281 * Adjust the next-fit value according to the address 282 * the caller prefers to start the search at. 283 */ 284 if ((prefered > base) && (prefered < base + bitmap->elements)) { 285 size_t prefered_fit = (prefered - base) / BITMAP_ELEMENT; 286 287 if (prefered_fit > next_fit) 288 next_fit = prefered_fit; 289 } 290 291 for (size_t pos = 0; pos < size; pos++) { 292 size_t byte = (next_fit + pos) % size; 293 294 /* Skip if the current byte has all bits set */ 295 if (bitmap->bits[byte] == ALL_ONES) 296 continue; 297 298 size_t byte_bit = byte * BITMAP_ELEMENT; 299 300 for (size_t bit = 0; bit < BITMAP_ELEMENT; bit++) { 301 size_t i = byte_bit + bit; 302 303 if (i >= bitmap->elements) 304 break; 305 306 if (!constraint_satisfy(i, base, constraint)) 307 continue; 308 309 if (!bitmap_get_fast(bitmap, i)) { 310 size_t continuous = 1; 311 312 for (size_t j = 1; j < count; j++) { 313 if ((i + j < bitmap->elements) && 314 (!bitmap_get_fast(bitmap, i + j))) 315 continuous++; 316 else 317 break; 318 } 319 320 if (continuous == count) { 321 if (index != NULL) { 322 bitmap_set_range(bitmap, i, count); 323 bitmap->next_fit = i / BITMAP_ELEMENT; 324 *index = i; 325 } 326 327 return true; 328 } else 329 i += continuous; 330 } 331 } 332 } 333 334 return false; 173 335 } 174 336 -
kernel/generic/src/adt/btree.c
rdba3e2c r8b863a62 1031 1031 1032 1032 printf("Printing list of leaves:\n"); 1033 list_foreach(t->leaf_list, cur) { 1034 btree_node_t *node; 1035 1036 node = list_get_instance(cur, btree_node_t, leaf_link); 1037 1033 list_foreach(t->leaf_list, leaf_link, btree_node_t, node) { 1038 1034 ASSERT(node); 1039 1035 -
kernel/generic/src/adt/hash_table.c
rdba3e2c r8b863a62 117 117 ASSERT(chain < h->entries); 118 118 119 list_foreach(h->entry[chain], cur) { 119 link_t *cur = list_first(&h->entry[chain]); 120 while (cur != NULL) { 120 121 if (h->op->compare(key, h->max_keys, cur)) { 121 122 /* … … 124 125 return cur; 125 126 } 127 cur = list_next(cur, &h->entry[chain]); 126 128 } 127 129 -
kernel/generic/src/adt/list.c
rdba3e2c r8b863a62 101 101 unsigned int count = 0; 102 102 103 list_foreach(*list, link) { 103 link_t *link = list_first(list); 104 while (link != NULL) { 104 105 count++; 106 link = list_next(link, list); 105 107 } 106 108 -
kernel/generic/src/console/chardev.c
rdba3e2c r8b863a62 94 94 { 95 95 if (atomic_get(&haltstate)) { 96 /* If we are here, we are hopefully on the processor that 96 /* 97 * If we are here, we are hopefully on the processor that 97 98 * issued the 'halt' command, so proceed to read the character 98 99 * directly from input … … 115 116 waitq_sleep(&indev->wq); 116 117 irq_spinlock_lock(&indev->lock, true); 117 wchar_t ch = indev->buffer[(indev->index - indev->counter) % INDEV_BUFLEN]; 118 wchar_t ch = indev->buffer[(indev->index - indev->counter) % 119 INDEV_BUFLEN]; 118 120 indev->counter--; 119 121 irq_spinlock_unlock(&indev->lock, true); 120 122 121 123 return ch; 124 } 125 126 /** Signal out-of-band condition 127 * 128 * @param indev Input character device. 129 * @param signal Out-of-band condition to signal. 130 * 131 */ 132 void indev_signal(indev_t *indev, indev_signal_t signal) 133 { 134 if ((indev != NULL) && (indev->op != NULL) && 135 (indev->op->signal != NULL)) 136 indev->op->signal(indev, signal); 122 137 } 123 138 -
kernel/generic/src/console/cmd.c
rdba3e2c r8b863a62 45 45 #include <console/kconsole.h> 46 46 #include <print.h> 47 #include <log.h> 47 48 #include <panic.h> 48 49 #include <typedefs.h> … … 639 640 for (i = 0; basic_commands[i]; i++) { 640 641 if (!cmd_register(basic_commands[i])) { 641 printf("Cannot register command %s\n", 642 log(LF_OTHER, LVL_ERROR, 643 "Cannot register command %s", 642 644 basic_commands[i]->name); 643 645 } … … 656 658 657 659 size_t len = 0; 658 list_foreach(cmd_list, cur) { 659 cmd_info_t *hlp; 660 hlp = list_get_instance(cur, cmd_info_t, link); 661 660 list_foreach(cmd_list, link, cmd_info_t, hlp) { 662 661 spinlock_lock(&hlp->lock); 663 662 if (str_length(hlp->name) > len) … … 668 667 unsigned int _len = (unsigned int) len; 669 668 if ((_len != len) || (((int) _len) < 0)) { 670 printf("Command length overflow\n");669 log(LF_OTHER, LVL_ERROR, "Command length overflow"); 671 670 return 1; 672 671 } 673 672 674 list_foreach(cmd_list, cur) { 675 cmd_info_t *hlp; 676 hlp = list_get_instance(cur, cmd_info_t, link); 677 673 list_foreach(cmd_list, link, cmd_info_t, hlp) { 678 674 spinlock_lock(&hlp->lock); 679 675 printf("%-*s %s\n", _len, hlp->name, hlp->description); … … 912 908 spinlock_lock(&cmd_lock); 913 909 914 list_foreach(cmd_list, cur) { 915 cmd_info_t *hlp; 916 917 hlp = list_get_instance(cur, cmd_info_t, link); 910 list_foreach(cmd_list, link, cmd_info_t, hlp) { 918 911 spinlock_lock(&hlp->lock); 919 912 -
kernel/generic/src/console/console.c
rdba3e2c r8b863a62 52 52 #include <errno.h> 53 53 #include <str.h> 54 #include <abi/k log.h>55 56 #define K LOG_PAGES 857 #define K LOG_LENGTH (KLOG_PAGES * PAGE_SIZE / sizeof(wchar_t))54 #include <abi/kio.h> 55 56 #define KIO_PAGES 8 57 #define KIO_LENGTH (KIO_PAGES * PAGE_SIZE / sizeof(wchar_t)) 58 58 59 59 /** Kernel log cyclic buffer */ 60 wchar_t k log[KLOG_LENGTH] __attribute__((aligned(PAGE_SIZE)));60 wchar_t kio[KIO_LENGTH] __attribute__((aligned(PAGE_SIZE))); 61 61 62 62 /** Kernel log initialized */ 63 static atomic_t k log_inited = {false};63 static atomic_t kio_inited = {false}; 64 64 65 65 /** First kernel log characters */ 66 static size_t k log_start = 0;66 static size_t kio_start = 0; 67 67 68 68 /** Number of valid kernel log characters */ 69 static size_t k log_len = 0;69 static size_t kio_len = 0; 70 70 71 71 /** Number of stored (not printed) kernel log characters */ 72 static size_t k log_stored = 0;72 static size_t kio_stored = 0; 73 73 74 74 /** Number of stored kernel log characters for uspace */ 75 static size_t k log_uspace = 0;75 static size_t kio_uspace = 0; 76 76 77 77 /** Kernel log spinlock */ 78 SPINLOCK_ STATIC_INITIALIZE_NAME(klog_lock, "klog_lock");79 80 /** Physical memory area used for k logbuffer */81 static parea_t k log_parea;78 SPINLOCK_INITIALIZE_NAME(kio_lock, "kio_lock"); 79 80 /** Physical memory area used for kio buffer */ 81 static parea_t kio_parea; 82 82 83 83 static indev_t stdin_sink; 84 84 static outdev_t stdout_source; 85 85 86 static void stdin_signal(indev_t *, indev_signal_t); 87 86 88 static indev_operations_t stdin_ops = { 87 .poll = NULL 89 .poll = NULL, 90 .signal = stdin_signal 88 91 }; 89 92 90 93 static void stdout_write(outdev_t *, wchar_t); 91 94 static void stdout_redraw(outdev_t *); 95 static void stdout_scroll_up(outdev_t *); 96 static void stdout_scroll_down(outdev_t *); 92 97 93 98 static outdev_operations_t stdout_ops = { 94 99 .write = stdout_write, 95 .redraw = stdout_redraw 100 .redraw = stdout_redraw, 101 .scroll_up = stdout_scroll_up, 102 .scroll_down = stdout_scroll_down 96 103 }; 97 104 … … 113 120 } 114 121 122 static void stdin_signal(indev_t *indev, indev_signal_t signal) 123 { 124 switch (signal) { 125 case INDEV_SIGNAL_SCROLL_UP: 126 if (stdout != NULL) 127 stdout_scroll_up(stdout); 128 break; 129 case INDEV_SIGNAL_SCROLL_DOWN: 130 if (stdout != NULL) 131 stdout_scroll_down(stdout); 132 break; 133 } 134 } 135 115 136 void stdout_wire(outdev_t *outdev) 116 137 { … … 125 146 static void stdout_write(outdev_t *dev, wchar_t ch) 126 147 { 127 list_foreach(dev->list, cur) { 128 outdev_t *sink = list_get_instance(cur, outdev_t, link); 148 list_foreach(dev->list, link, outdev_t, sink) { 129 149 if ((sink) && (sink->op->write)) 130 150 sink->op->write(sink, ch); … … 134 154 static void stdout_redraw(outdev_t *dev) 135 155 { 136 list_foreach(dev->list, cur) { 137 outdev_t *sink = list_get_instance(cur, outdev_t, link); 156 list_foreach(dev->list, link, outdev_t, sink) { 138 157 if ((sink) && (sink->op->redraw)) 139 158 sink->op->redraw(sink); 159 } 160 } 161 162 static void stdout_scroll_up(outdev_t *dev) 163 { 164 list_foreach(dev->list, link, outdev_t, sink) { 165 if ((sink) && (sink->op->scroll_up)) 166 sink->op->scroll_up(sink); 167 } 168 } 169 170 static void stdout_scroll_down(outdev_t *dev) 171 { 172 list_foreach(dev->list, link, outdev_t, sink) { 173 if ((sink) && (sink->op->scroll_down)) 174 sink->op->scroll_down(sink); 140 175 } 141 176 } … … 148 183 * 149 184 */ 150 void k log_init(void)151 { 152 void *faddr = (void *) KA2PA(k log);185 void kio_init(void) 186 { 187 void *faddr = (void *) KA2PA(kio); 153 188 154 189 ASSERT((uintptr_t) faddr % FRAME_SIZE == 0); 155 190 156 k log_parea.pbase = (uintptr_t) faddr;157 k log_parea.frames = SIZE2FRAMES(sizeof(klog));158 k log_parea.unpriv = false;159 k log_parea.mapped = false;160 ddi_parea_register(&k log_parea);161 162 sysinfo_set_item_val("k log.faddr", NULL, (sysarg_t) faddr);163 sysinfo_set_item_val("k log.pages", NULL, KLOG_PAGES);164 165 event_set_unmask_callback(EVENT_K LOG, klog_update);166 atomic_set(&k log_inited, true);191 kio_parea.pbase = (uintptr_t) faddr; 192 kio_parea.frames = SIZE2FRAMES(sizeof(kio)); 193 kio_parea.unpriv = false; 194 kio_parea.mapped = false; 195 ddi_parea_register(&kio_parea); 196 197 sysinfo_set_item_val("kio.faddr", NULL, (sysarg_t) faddr); 198 sysinfo_set_item_val("kio.pages", NULL, KIO_PAGES); 199 200 event_set_unmask_callback(EVENT_KIO, kio_update); 201 atomic_set(&kio_inited, true); 167 202 } 168 203 … … 231 266 } 232 267 } 268 233 269 if (chr_encode(ch, buf, &offset, buflen - 1) == EOK) { 234 270 putchar(ch); … … 249 285 } 250 286 251 void k log_update(void *event)252 { 253 if (!atomic_get(&k log_inited))287 void kio_update(void *event) 288 { 289 if (!atomic_get(&kio_inited)) 254 290 return; 255 291 256 spinlock_lock(&klog_lock); 257 258 if (klog_uspace > 0) { 259 if (event_notify_3(EVENT_KLOG, true, klog_start, klog_len, 260 klog_uspace) == EOK) 261 klog_uspace = 0; 262 } 263 264 spinlock_unlock(&klog_lock); 292 spinlock_lock(&kio_lock); 293 294 if (kio_uspace > 0) { 295 if (event_notify_3(EVENT_KIO, true, kio_start, kio_len, 296 kio_uspace) == EOK) 297 kio_uspace = 0; 298 } 299 300 spinlock_unlock(&kio_lock); 301 } 302 303 /** Flush characters that are stored in the output buffer 304 * 305 */ 306 void kio_flush(void) 307 { 308 bool ordy = ((stdout) && (stdout->op->write)); 309 310 if (!ordy) 311 return; 312 313 spinlock_lock(&kio_lock); 314 315 /* Print characters that weren't printed earlier */ 316 while (kio_stored > 0) { 317 wchar_t tmp = kio[(kio_start + kio_len - kio_stored) % KIO_LENGTH]; 318 kio_stored--; 319 320 /* 321 * We need to give up the spinlock for 322 * the physical operation of writing out 323 * the character. 324 */ 325 spinlock_unlock(&kio_lock); 326 stdout->op->write(stdout, tmp); 327 spinlock_lock(&kio_lock); 328 } 329 330 spinlock_unlock(&kio_lock); 331 } 332 333 /** Put a character into the output buffer. 334 * 335 * The caller is required to hold kio_lock 336 */ 337 void kio_push_char(const wchar_t ch) 338 { 339 kio[(kio_start + kio_len) % KIO_LENGTH] = ch; 340 if (kio_len < KIO_LENGTH) 341 kio_len++; 342 else 343 kio_start = (kio_start + 1) % KIO_LENGTH; 344 345 if (kio_stored < kio_len) 346 kio_stored++; 347 348 /* The character is stored for uspace */ 349 if (kio_uspace < kio_len) 350 kio_uspace++; 265 351 } 266 352 … … 269 355 bool ordy = ((stdout) && (stdout->op->write)); 270 356 271 spinlock_lock(&klog_lock); 272 273 /* Print charaters stored in kernel log */ 274 if (ordy) { 275 while (klog_stored > 0) { 276 wchar_t tmp = klog[(klog_start + klog_len - klog_stored) % KLOG_LENGTH]; 277 klog_stored--; 278 279 /* 280 * We need to give up the spinlock for 281 * the physical operation of writting out 282 * the character. 283 */ 284 spinlock_unlock(&klog_lock); 285 stdout->op->write(stdout, tmp); 286 spinlock_lock(&klog_lock); 287 } 288 } 289 290 /* Store character in the cyclic kernel log */ 291 klog[(klog_start + klog_len) % KLOG_LENGTH] = ch; 292 if (klog_len < KLOG_LENGTH) 293 klog_len++; 294 else 295 klog_start = (klog_start + 1) % KLOG_LENGTH; 357 spinlock_lock(&kio_lock); 358 kio_push_char(ch); 359 spinlock_unlock(&kio_lock); 360 361 /* Output stored characters */ 362 kio_flush(); 296 363 297 364 if (!ordy) { 298 if (klog_stored < klog_len)299 klog_stored++;300 }301 302 /* The character is stored for uspace */303 if (klog_uspace < klog_len)304 klog_uspace++;305 306 spinlock_unlock(&klog_lock);307 308 if (ordy) {309 /*310 * Output the character. In this case311 * it should be no longer buffered.312 */313 stdout->op->write(stdout, ch);314 } else {315 365 /* 316 366 * No standard output routine defined yet. … … 328 378 /* Force notification on newline */ 329 379 if (ch == '\n') 330 k log_update(NULL);380 kio_update(NULL); 331 381 } 332 382 … … 336 386 * 337 387 */ 338 sysarg_t sys_k log(int cmd, const void *buf, size_t size)388 sysarg_t sys_kio(int cmd, const void *buf, size_t size) 339 389 { 340 390 char *data; … … 342 392 343 393 switch (cmd) { 344 case K LOG_UPDATE:345 k log_update(NULL);394 case KIO_UPDATE: 395 kio_update(NULL); 346 396 return EOK; 347 case K LOG_WRITE:348 case K LOG_COMMAND:397 case KIO_WRITE: 398 case KIO_COMMAND: 349 399 break; 350 400 default: … … 368 418 369 419 switch (cmd) { 370 case K LOG_WRITE:420 case KIO_WRITE: 371 421 printf("%s", data); 372 422 break; 373 case K LOG_COMMAND:423 case KIO_COMMAND: 374 424 if (!stdin) 375 425 break; -
kernel/generic/src/console/kconsole.c
rdba3e2c r8b863a62 53 53 #include <func.h> 54 54 #include <str.h> 55 #include <macros.h>56 55 #include <sysinfo/sysinfo.h> 57 56 #include <ddi/device.h> … … 119 118 * Make sure the command is not already listed. 120 119 */ 121 list_foreach(cmd_list, cur) { 122 cmd_info_t *hlp = list_get_instance(cur, cmd_info_t, link); 123 120 list_foreach(cmd_list, link, cmd_info_t, hlp) { 124 121 if (hlp == cmd) { 125 122 /* The command is already there. */ … … 613 610 cmd_info_t *cmd = NULL; 614 611 615 list_foreach(cmd_list, cur) { 616 cmd_info_t *hlp = list_get_instance(cur, cmd_info_t, link); 612 list_foreach(cmd_list, link, cmd_info_t, hlp) { 617 613 spinlock_lock(&hlp->lock); 618 614 -
kernel/generic/src/cpu/cpu.c
rdba3e2c r8b863a62 73 73 size_t i; 74 74 for (i = 0; i < config.cpu_count; i++) { 75 cpus[i].stack = (uint8_t *) frame_alloc(STACK_FRAMES, 76 FRAME_LOWMEM | FRAME_KA | FRAME_ATOMIC); 75 uintptr_t stack_phys = frame_alloc(STACK_FRAMES, 76 FRAME_LOWMEM | FRAME_ATOMIC, STACK_SIZE - 1); 77 if (!stack_phys) 78 panic("Cannot allocate CPU stack."); 79 80 cpus[i].stack = (uint8_t *) PA2KA(stack_phys); 77 81 cpus[i].id = i; 78 82 79 83 irq_spinlock_initialize(&cpus[i].lock, "cpus[].lock"); 80 84 81 unsigned int j; 82 for (j = 0; j < RQ_COUNT; j++) { 85 for (unsigned int j = 0; j < RQ_COUNT; j++) { 83 86 irq_spinlock_initialize(&cpus[i].rq[j].lock, "cpus[].rq[].lock"); 84 87 list_initialize(&cpus[i].rq[j].rq); -
kernel/generic/src/ddi/ddi.c
rdba3e2c r8b863a62 121 121 backend_data.base = phys; 122 122 backend_data.frames = pages; 123 backend_data.anonymous = false; 123 124 124 125 /* … … 314 315 315 316 NO_TRACE static int dmamem_map(uintptr_t virt, size_t size, unsigned int map_flags, 316 unsigned int flags, void **phys)317 unsigned int flags, uintptr_t *phys) 317 318 { 318 319 ASSERT(TASK); … … 322 323 } 323 324 324 NO_TRACE static int dmamem_map_anonymous(size_t size, unsigned int map_flags, 325 unsigned int flags, void **phys, uintptr_t *virt, uintptr_t bound) 325 NO_TRACE static int dmamem_map_anonymous(size_t size, uintptr_t constraint, 326 unsigned int map_flags, unsigned int flags, uintptr_t *phys, 327 uintptr_t *virt, uintptr_t bound) 326 328 { 327 329 ASSERT(TASK); 328 330 329 size_t pages = SIZE2FRAMES(size); 330 uint8_t order; 331 332 /* We need the 2^order >= pages */ 333 if (pages == 1) 334 order = 0; 335 else 336 order = fnzb(pages - 1) + 1; 337 338 *phys = frame_alloc_noreserve(order, 0); 339 if (*phys == NULL) 331 size_t frames = SIZE2FRAMES(size); 332 *phys = frame_alloc(frames, FRAME_ATOMIC, constraint); 333 if (*phys == 0) 340 334 return ENOMEM; 341 335 342 336 mem_backend_data_t backend_data; 343 backend_data.base = (uintptr_t) *phys; 344 backend_data.frames = pages; 337 backend_data.base = *phys; 338 backend_data.frames = frames; 339 backend_data.anonymous = true; 345 340 346 341 if (!as_area_create(TASK->as, map_flags, size, 347 342 AS_AREA_ATTR_NONE, &phys_backend, &backend_data, virt, bound)) { 348 frame_free _noreserve((uintptr_t) *phys);343 frame_free(*phys, frames); 349 344 return ENOMEM; 350 345 } … … 361 356 NO_TRACE static int dmamem_unmap_anonymous(uintptr_t virt) 362 357 { 363 // TODO: implement unlocking & unmap 364 return EOK; 358 return as_area_destroy(TASK->as, virt); 365 359 } 366 360 … … 373 367 */ 374 368 375 void *phys;369 uintptr_t phys; 376 370 int rc = dmamem_map((uintptr_t) virt_ptr, size, map_flags, 377 371 flags, &phys); … … 390 384 */ 391 385 392 void *phys; 386 uintptr_t constraint; 387 int rc = copy_from_uspace(&constraint, phys_ptr, 388 sizeof(constraint)); 389 if (rc != EOK) 390 return rc; 391 392 uintptr_t phys; 393 393 uintptr_t virt = (uintptr_t) -1; 394 int rc = dmamem_map_anonymous(size, map_flags, flags,394 rc = dmamem_map_anonymous(size, constraint, map_flags, flags, 395 395 &phys, &virt, bound); 396 396 if (rc != EOK) -
kernel/generic/src/debug/panic.c
rdba3e2c r8b863a62 96 96 printf("THE=%p: ", THE); 97 97 if (THE != NULL) { 98 printf("p e=%" PRIun " thr=%p task=%p cpu=%p as=%p"98 printf("pd=%" PRIun " thread=%p task=%p cpu=%p as=%p" 99 99 " magic=%#" PRIx32 "\n", THE->preemption_disabled, 100 100 THE->thread, THE->task, THE->cpu, THE->as, THE->magic); 101 102 if (THE->thread != NULL) 103 printf("thread=\"%s\"\n", THE->thread->name); 104 105 if (THE->task != NULL) 106 printf("task=\"%s\"\n", THE->task->name); 101 107 } else 102 108 printf("invalid\n"); -
kernel/generic/src/debug/stacktrace.c
rdba3e2c r8b863a62 39 39 #include <print.h> 40 40 41 #define STACK_FRAMES_MAX 2041 #define STACK_FRAMES_MAX 20 42 42 43 43 void stack_trace_ctx(stack_trace_ops_t *ops, stack_trace_context_t *ctx) … … 49 49 uintptr_t pc; 50 50 51 while ( cnt++ < STACK_FRAMES_MAX&&52 ops->stack_trace_context_validate(ctx)) {51 while ((cnt++ < STACK_FRAMES_MAX) && 52 (ops->stack_trace_context_validate(ctx))) { 53 53 if (ops->symbol_resolve && 54 54 ops->symbol_resolve(ctx->pc, &symbol, &offset)) { -
kernel/generic/src/ipc/ipc.c
rdba3e2c r8b863a62 774 774 static void ipc_print_call_list(list_t *list) 775 775 { 776 list_foreach(*list, cur) { 777 call_t *call = list_get_instance(cur, call_t, ab_link); 778 776 list_foreach(*list, ab_link, call_t, call) { 779 777 #ifdef __32_BITS__ 780 778 printf("%10p ", call); -
kernel/generic/src/ipc/ipcrsc.c
rdba3e2c r8b863a62 151 151 irq_spinlock_lock(&TASK->answerbox.lock, true); 152 152 153 list_foreach(TASK->answerbox.dispatched_calls, lst) { 154 call_t *call = list_get_instance(lst, call_t, ab_link); 153 list_foreach(TASK->answerbox.dispatched_calls, ab_link, call_t, call) { 155 154 if ((sysarg_t) call == callid) { 156 155 result = call; -
kernel/generic/src/lib/func.c
rdba3e2c r8b863a62 37 37 38 38 #include <func.h> 39 #include < print.h>39 #include <log.h> 40 40 #include <cpu.h> 41 41 #include <arch/asm.h> … … 72 72 73 73 if (CPU) 74 printf("cpu%u: halted\n", CPU->id);74 log(LF_OTHER, LVL_NOTE, "cpu%u: halted", CPU->id); 75 75 else 76 printf("cpu: halted\n");76 log(LF_OTHER, LVL_NOTE, "cpu: halted"); 77 77 78 78 cpu_halt(); -
kernel/generic/src/lib/memfnc.c
rdba3e2c r8b863a62 1 /*1 /* 2 2 * Copyright (c) 2011 Martin Decky 3 3 * All rights reserved. -
kernel/generic/src/lib/ra.c
rdba3e2c r8b863a62 391 391 392 392 irq_spinlock_lock(&arena->lock, true); 393 list_foreach(arena->spans, cur) { 394 ra_span_t *span = list_get_instance(cur, ra_span_t, span_link); 395 393 list_foreach(arena->spans, span_link, ra_span_t, span) { 396 394 base = ra_span_alloc(span, size, alignment); 397 395 if (base) … … 407 405 { 408 406 irq_spinlock_lock(&arena->lock, true); 409 list_foreach(arena->spans, cur) { 410 ra_span_t *span = list_get_instance(cur, ra_span_t, span_link); 411 407 list_foreach(arena->spans, span_link, ra_span_t, span) { 412 408 if (iswithin(span->base, span->size, base, size)) { 413 409 ra_span_free(span, base, size); -
kernel/generic/src/lib/rd.c
rdba3e2c r8b863a62 38 38 */ 39 39 40 #include < print.h>40 #include <log.h> 41 41 #include <lib/rd.h> 42 42 #include <mm/frame.h> … … 68 68 sysinfo_set_item_val("rd.address.physical", NULL, (sysarg_t) base); 69 69 70 printf("RAM disk at %p (size %zu bytes)\n", (void *) base, size); 70 log(LF_OTHER, LVL_NOTE, "RAM disk at %p (size %zu bytes)", (void *) base, 71 size); 71 72 } 72 73 -
kernel/generic/src/main/kinit.c
rdba3e2c r8b863a62 59 59 #include <mm/km.h> 60 60 #include <print.h> 61 #include <log.h> 61 62 #include <memstr.h> 62 63 #include <console/console.h> … … 140 141 thread_ready(thread); 141 142 } else 142 printf("Unable to create kcpulb thread for cpu%u\n", i); 143 log(LF_OTHER, LVL_ERROR, 144 "Unable to create kcpulb thread for cpu%u", i); 143 145 } 144 146 } … … 156 158 thread_ready(thread); 157 159 else 158 printf("Unable to create kload thread\n");160 log(LF_OTHER, LVL_ERROR, "Unable to create kload thread"); 159 161 160 162 #ifdef CONFIG_KCONSOLE … … 168 170 thread_ready(thread); 169 171 else 170 printf("Unable to create kconsole thread\n"); 172 log(LF_OTHER, LVL_ERROR, 173 "Unable to create kconsole thread"); 171 174 } 172 175 #endif /* CONFIG_KCONSOLE */ … … 210 213 for (i = 0; i < init.cnt; i++) { 211 214 if (init.tasks[i].paddr % FRAME_SIZE) { 212 printf("init[%zu]: Address is not frame aligned\n", i); 215 log(LF_OTHER, LVL_ERROR, 216 "init[%zu]: Address is not frame aligned", i); 213 217 programs[i].task = NULL; 214 218 continue; … … 273 277 init_rd((void *) init.tasks[i].paddr, init.tasks[i].size); 274 278 } else 275 printf("init[%zu]: Init binary load failed " 276 "(error %d, loader status %u)\n", i, rc, 279 log(LF_OTHER, LVL_ERROR, 280 "init[%zu]: Init binary load failed " 281 "(error %d, loader status %u)", i, rc, 277 282 programs[i].loader_status); 278 283 } -
kernel/generic/src/main/main.c
rdba3e2c r8b863a62 62 62 #include <console/kconsole.h> 63 63 #include <console/console.h> 64 #include <log.h> 64 65 #include <cpu.h> 65 66 #include <align.h> … … 281 282 ipc_init(); 282 283 event_init(); 283 klog_init(); 284 kio_init(); 285 log_init(); 284 286 stats_init(); 285 287 -
kernel/generic/src/main/shutdown.c
rdba3e2c r8b863a62 39 39 #include <func.h> 40 40 #include <print.h> 41 #include <log.h> 41 42 42 43 void reboot(void) … … 45 46 46 47 #ifdef CONFIG_DEBUG 47 printf("Rebooting the system\n");48 log(LF_OTHER, LVL_DEBUG, "Rebooting the system"); 48 49 #endif 49 50 -
kernel/generic/src/mm/as.c
rdba3e2c r8b863a62 488 488 489 489 /* Eventually check the addresses behind each area */ 490 list_foreach(as->as_area_btree.leaf_list, cur) { 491 btree_node_t *node = 492 list_get_instance(cur, btree_node_t, leaf_link); 490 list_foreach(as->as_area_btree.leaf_list, leaf_link, btree_node_t, node) { 493 491 494 492 for (btree_key_t i = 0; i < node->keys; i++) { … … 521 519 return (uintptr_t) -1; 522 520 } 521 522 /** Remove reference to address space area share info. 523 * 524 * If the reference count drops to 0, the sh_info is deallocated. 525 * 526 * @param sh_info Pointer to address space area share info. 527 * 528 */ 529 NO_TRACE static void sh_info_remove_reference(share_info_t *sh_info) 530 { 531 bool dealloc = false; 532 533 mutex_lock(&sh_info->lock); 534 ASSERT(sh_info->refcount); 535 536 if (--sh_info->refcount == 0) { 537 dealloc = true; 538 539 /* 540 * Now walk carefully the pagemap B+tree and free/remove 541 * reference from all frames found there. 542 */ 543 list_foreach(sh_info->pagemap.leaf_list, leaf_link, 544 btree_node_t, node) { 545 btree_key_t i; 546 547 for (i = 0; i < node->keys; i++) 548 frame_free((uintptr_t) node->value[i], 1); 549 } 550 551 } 552 mutex_unlock(&sh_info->lock); 553 554 if (dealloc) { 555 if (sh_info->backend && sh_info->backend->destroy_shared_data) { 556 sh_info->backend->destroy_shared_data( 557 sh_info->backend_shared_data); 558 } 559 btree_destroy(&sh_info->pagemap); 560 free(sh_info); 561 } 562 } 563 523 564 524 565 /** Create address space area of common attributes. … … 568 609 } 569 610 570 if (overflows_into_positive(*base, size)) 611 if (overflows_into_positive(*base, size)) { 612 mutex_unlock(&as->lock); 571 613 return NULL; 614 } 572 615 573 616 if (!check_area_conflicts(as, *base, pages, guarded, NULL)) { … … 586 629 area->resident = 0; 587 630 area->base = *base; 631 area->backend = backend; 588 632 area->sh_info = NULL; 589 area->backend = backend;590 633 591 634 if (backend_data) … … 593 636 else 594 637 memsetb(&area->backend_data, sizeof(area->backend_data), 0); 595 638 639 share_info_t *si = NULL; 640 641 /* 642 * Create the sharing info structure. 643 * We do this in advance for every new area, even if it is not going 644 * to be shared. 645 */ 646 if (!(attrs & AS_AREA_ATTR_PARTIAL)) { 647 si = (share_info_t *) malloc(sizeof(share_info_t), 0); 648 mutex_initialize(&si->lock, MUTEX_PASSIVE); 649 si->refcount = 1; 650 si->shared = false; 651 si->backend_shared_data = NULL; 652 si->backend = backend; 653 btree_create(&si->pagemap); 654 655 area->sh_info = si; 656 657 if (area->backend && area->backend->create_shared_data) { 658 if (!area->backend->create_shared_data(area)) { 659 free(area); 660 mutex_unlock(&as->lock); 661 sh_info_remove_reference(si); 662 return NULL; 663 } 664 } 665 } 666 596 667 if (area->backend && area->backend->create) { 597 668 if (!area->backend->create(area)) { 598 669 free(area); 599 670 mutex_unlock(&as->lock); 671 if (!(attrs & AS_AREA_ATTR_PARTIAL)) 672 sh_info_remove_reference(si); 600 673 return NULL; 601 674 } 602 675 } 603 676 604 677 btree_create(&area->used_space); 605 678 btree_insert(&as->as_area_btree, *base, (void *) area, … … 711 784 } 712 785 713 if (area->sh_info) { 786 mutex_lock(&area->sh_info->lock); 787 if (area->sh_info->shared) { 714 788 /* 715 789 * Remapping of shared address space areas 716 790 * is not supported. 717 791 */ 792 mutex_unlock(&area->sh_info->lock); 718 793 mutex_unlock(&area->lock); 719 794 mutex_unlock(&as->lock); 720 795 return ENOTSUP; 721 796 } 797 mutex_unlock(&area->sh_info->lock); 722 798 723 799 size_t pages = SIZE2FRAMES((address - area->base) + size); … … 883 959 } 884 960 885 /** Remove reference to address space area share info.886 *887 * If the reference count drops to 0, the sh_info is deallocated.888 *889 * @param sh_info Pointer to address space area share info.890 *891 */892 NO_TRACE static void sh_info_remove_reference(share_info_t *sh_info)893 {894 bool dealloc = false;895 896 mutex_lock(&sh_info->lock);897 ASSERT(sh_info->refcount);898 899 if (--sh_info->refcount == 0) {900 dealloc = true;901 902 /*903 * Now walk carefully the pagemap B+tree and free/remove904 * reference from all frames found there.905 */906 list_foreach(sh_info->pagemap.leaf_list, cur) {907 btree_node_t *node908 = list_get_instance(cur, btree_node_t, leaf_link);909 btree_key_t i;910 911 for (i = 0; i < node->keys; i++)912 frame_free((uintptr_t) node->value[i]);913 }914 915 }916 mutex_unlock(&sh_info->lock);917 918 if (dealloc) {919 btree_destroy(&sh_info->pagemap);920 free(sh_info);921 }922 }923 924 961 /** Destroy address space area. 925 962 * … … 956 993 * Visit only the pages mapped by used_space B+tree. 957 994 */ 958 list_foreach(area->used_space.leaf_list, cur) {959 btree_node_t *node;995 list_foreach(area->used_space.leaf_list, leaf_link, btree_node_t, 996 node) { 960 997 btree_key_t i; 961 998 962 node = list_get_instance(cur, btree_node_t, leaf_link);963 999 for (i = 0; i < node->keys; i++) { 964 1000 uintptr_t ptr = node->key[i]; … … 1004 1040 area->attributes |= AS_AREA_ATTR_PARTIAL; 1005 1041 1006 if (area->sh_info) 1007 sh_info_remove_reference(area->sh_info); 1042 sh_info_remove_reference(area->sh_info); 1008 1043 1009 1044 mutex_unlock(&area->lock); … … 1092 1127 */ 1093 1128 share_info_t *sh_info = src_area->sh_info; 1094 if (!sh_info) { 1095 sh_info = (share_info_t *) malloc(sizeof(share_info_t), 0); 1096 mutex_initialize(&sh_info->lock, MUTEX_PASSIVE); 1097 sh_info->refcount = 2; 1098 btree_create(&sh_info->pagemap); 1099 src_area->sh_info = sh_info; 1100 1129 1130 mutex_lock(&sh_info->lock); 1131 sh_info->refcount++; 1132 bool shared = sh_info->shared; 1133 sh_info->shared = true; 1134 mutex_unlock(&sh_info->lock); 1135 1136 if (!shared) { 1101 1137 /* 1102 1138 * Call the backend to setup sharing. 1139 * This only happens once for each sh_info. 1103 1140 */ 1104 1141 src_area->backend->share(src_area); 1105 } else {1106 mutex_lock(&sh_info->lock);1107 sh_info->refcount++;1108 mutex_unlock(&sh_info->lock);1109 1142 } 1110 1143 … … 1225 1258 } 1226 1259 1227 if ((area->sh_info) || (area->backend != &anon_backend)) { 1228 /* Copying shared areas not supported yet */ 1260 if (area->backend != &anon_backend) { 1229 1261 /* Copying non-anonymous memory not supported yet */ 1230 1262 mutex_unlock(&area->lock); … … 1232 1264 return ENOTSUP; 1233 1265 } 1266 1267 mutex_lock(&area->sh_info->lock); 1268 if (area->sh_info->shared) { 1269 /* Copying shared areas not supported yet */ 1270 mutex_unlock(&area->sh_info->lock); 1271 mutex_unlock(&area->lock); 1272 mutex_unlock(&as->lock); 1273 return ENOTSUP; 1274 } 1275 mutex_unlock(&area->sh_info->lock); 1234 1276 1235 1277 /* … … 1238 1280 size_t used_pages = 0; 1239 1281 1240 list_foreach(area->used_space.leaf_list, cur) { 1241 btree_node_t *node 1242 = list_get_instance(cur, btree_node_t, leaf_link); 1282 list_foreach(area->used_space.leaf_list, leaf_link, btree_node_t, 1283 node) { 1243 1284 btree_key_t i; 1244 1285 … … 1264 1305 size_t frame_idx = 0; 1265 1306 1266 list_foreach(area->used_space.leaf_list, cur) { 1267 btree_node_t *node = list_get_instance(cur, btree_node_t, 1268 leaf_link); 1307 list_foreach(area->used_space.leaf_list, leaf_link, btree_node_t, 1308 node) { 1269 1309 btree_key_t i; 1270 1310 … … 1316 1356 frame_idx = 0; 1317 1357 1318 list_foreach(area->used_space.leaf_list, cur) { 1319 btree_node_t *node 1320 = list_get_instance(cur, btree_node_t, leaf_link); 1358 list_foreach(area->used_space.leaf_list, leaf_link, btree_node_t, 1359 node) { 1321 1360 btree_key_t i; 1322 1361 … … 2182 2221 size_t area_cnt = 0; 2183 2222 2184 list_foreach(as->as_area_btree.leaf_list, cur) { 2185 btree_node_t *node = 2186 list_get_instance(cur, btree_node_t, leaf_link); 2223 list_foreach(as->as_area_btree.leaf_list, leaf_link, btree_node_t, 2224 node) { 2187 2225 area_cnt += node->keys; 2188 2226 } … … 2195 2233 size_t area_idx = 0; 2196 2234 2197 list_foreach(as->as_area_btree.leaf_list, cur) { 2198 btree_node_t *node = 2199 list_get_instance(cur, btree_node_t, leaf_link); 2235 list_foreach(as->as_area_btree.leaf_list, leaf_link, btree_node_t, 2236 node) { 2200 2237 btree_key_t i; 2201 2238 … … 2231 2268 2232 2269 /* Print out info about address space areas */ 2233 list_foreach(as->as_area_btree.leaf_list, cur) { 2234 btree_node_t *node 2235 = list_get_instance(cur, btree_node_t, leaf_link); 2270 list_foreach(as->as_area_btree.leaf_list, leaf_link, btree_node_t, 2271 node) { 2236 2272 btree_key_t i; 2237 2273 -
kernel/generic/src/mm/backend_anon.c
rdba3e2c r8b863a62 76 76 .page_fault = anon_page_fault, 77 77 .frame_free = anon_frame_free, 78 79 .create_shared_data = NULL, 80 .destroy_shared_data = NULL 78 81 }; 79 82 … … 118 121 */ 119 122 mutex_lock(&area->sh_info->lock); 120 list_foreach(area->used_space.leaf_list, cur) {121 btree_node_t *node;123 list_foreach(area->used_space.leaf_list, leaf_link, btree_node_t, 124 node) { 122 125 unsigned int i; 123 126 124 node = list_get_instance(cur, btree_node_t, leaf_link);125 127 for (i = 0; i < node->keys; i++) { 126 128 uintptr_t base = node->key[i]; … … 191 193 return AS_PF_FAULT; 192 194 193 if (area->sh_info) { 195 mutex_lock(&area->sh_info->lock); 196 if (area->sh_info->shared) { 194 197 btree_node_t *leaf; 195 198 … … 201 204 * mapping, a new frame is allocated and the mapping is created. 202 205 */ 203 mutex_lock(&area->sh_info->lock);204 206 frame = (uintptr_t) btree_search(&area->sh_info->pagemap, 205 207 upage - area->base, &leaf); … … 233 235 } 234 236 frame_reference_add(ADDR2PFN(frame)); 235 mutex_unlock(&area->sh_info->lock);236 237 } else { 237 238 … … 255 256 * Reserve the memory for this page now. 256 257 */ 257 if (!reserve_try_alloc(1)) 258 if (!reserve_try_alloc(1)) { 259 mutex_unlock(&area->sh_info->lock); 258 260 return AS_PF_SILENT; 261 } 259 262 } 260 263 … … 263 266 km_temporary_page_put(kpage); 264 267 } 268 mutex_unlock(&area->sh_info->lock); 265 269 266 270 /* … … 295 299 * the normal unreserving frame_free(). 296 300 */ 297 frame_free(frame );301 frame_free(frame, 1); 298 302 } else { 299 303 /* … … 302 306 * manipulate the reserve or it would be given back twice. 303 307 */ 304 frame_free_noreserve(frame );308 frame_free_noreserve(frame, 1); 305 309 } 306 310 } -
kernel/generic/src/mm/backend_elf.c
rdba3e2c r8b863a62 75 75 .page_fault = elf_page_fault, 76 76 .frame_free = elf_frame_free, 77 78 .create_shared_data = NULL, 79 .destroy_shared_data = NULL 77 80 }; 78 81 … … 274 277 start_anon = entry->p_vaddr + entry->p_filesz; 275 278 276 if (area->sh_info) { 279 mutex_lock(&area->sh_info->lock); 280 if (area->sh_info->shared) { 277 281 bool found = false; 278 282 … … 281 285 */ 282 286 283 mutex_lock(&area->sh_info->lock);284 287 frame = (uintptr_t) btree_search(&area->sh_info->pagemap, 285 288 upage - area->base, &leaf); … … 384 387 } 385 388 386 if (dirty && area->sh_info ) {389 if (dirty && area->sh_info->shared) { 387 390 frame_reference_add(ADDR2PFN(frame)); 388 391 btree_insert(&area->sh_info->pagemap, upage - area->base, … … 390 393 } 391 394 392 if (area->sh_info) 393 mutex_unlock(&area->sh_info->lock); 395 mutex_unlock(&area->sh_info->lock); 394 396 395 397 page_mapping_insert(AS, upage, frame, as_area_get_flags(area)); … … 429 431 * data. 430 432 */ 431 frame_free_noreserve(frame );433 frame_free_noreserve(frame, 1); 432 434 } 433 435 } else { … … 437 439 * anonymous). In any case, a frame needs to be freed. 438 440 */ 439 frame_free_noreserve(frame );441 frame_free_noreserve(frame, 1); 440 442 } 441 443 } -
kernel/generic/src/mm/backend_phys.c
rdba3e2c r8b863a62 55 55 static bool phys_is_shareable(as_area_t *); 56 56 57 static int phys_page_fault(as_area_t *, uintptr_t, pf_access_t); 57 58 58 static int phys_page_fault(as_area_t *, uintptr_t, pf_access_t); 59 static bool phys_create_shared_data(as_area_t *); 60 static void phys_destroy_shared_data(void *); 61 62 typedef struct { 63 uintptr_t base; 64 size_t frames; 65 } phys_shared_data_t; 59 66 60 67 mem_backend_t phys_backend = { … … 69 76 .page_fault = phys_page_fault, 70 77 .frame_free = NULL, 78 79 .create_shared_data = phys_create_shared_data, 80 .destroy_shared_data = phys_destroy_shared_data 71 81 }; 82 72 83 73 84 bool phys_create(as_area_t *area) … … 92 103 void phys_destroy(as_area_t *area) 93 104 { 94 /* Nothing to do. */ 105 /* 106 * Nothing to do. 107 * The anonymous frames, if any, are released in 108 * phys_destroy_shared_data(). 109 */ 95 110 } 96 111 … … 138 153 } 139 154 155 bool phys_create_shared_data(as_area_t *area) 156 { 157 /* 158 * For anonymous phys areas, create the shared data. 159 */ 160 if (area->backend_data.anonymous) { 161 phys_shared_data_t *data; 162 163 data = (phys_shared_data_t *) malloc(sizeof(*data), 0); 164 165 data->base = area->backend_data.base; 166 data->frames = area->backend_data.frames; 167 area->sh_info->backend_shared_data = data; 168 } 169 170 return true; 171 } 172 173 void phys_destroy_shared_data(void *opaque_data) 174 { 175 phys_shared_data_t *data = (phys_shared_data_t *) opaque_data; 176 177 if (data) { 178 frame_free(data->base, data->frames); 179 free(data); 180 } 181 } 182 140 183 /** @} 141 184 */ -
kernel/generic/src/mm/frame.c
rdba3e2c r8b863a62 38 38 * 39 39 * This file contains the physical frame allocator and memory zone management. 40 * The frame allocator is built on top of the buddy allocator. 41 * 42 * @see buddy.c 40 * The frame allocator is built on top of the two-level bitmap structure. 41 * 43 42 */ 44 43 … … 55 54 #include <arch.h> 56 55 #include <print.h> 56 #include <log.h> 57 57 #include <align.h> 58 58 #include <mm/slab.h> … … 92 92 } 93 93 94 NO_TRACE static inline size_t make_frame_index(zone_t *zone, frame_t *frame)95 {96 return (frame - zone->frames);97 }98 99 94 /** Initialize frame structure. 100 95 * … … 104 99 NO_TRACE static void frame_initialize(frame_t *frame) 105 100 { 106 frame->refcount = 1;107 frame-> buddy_order = 0;101 frame->refcount = 0; 102 frame->parent = NULL; 108 103 } 109 104 … … 127 122 { 128 123 if (zones.count + 1 == ZONES_MAX) { 129 printf("Maximum zone count %u exceeded!\n", ZONES_MAX); 124 log(LF_OTHER, LVL_ERROR, "Maximum zone count %u exceeded!", 125 ZONES_MAX); 130 126 return (size_t) -1; 131 127 } … … 147 143 (!iswithin(zones.info[i].base, zones.info[i].count, 148 144 base, count))) { 149 printf("Zone (%p, %p) overlaps " 150 "with previous zone (%p %p)!\n", 145 log(LF_OTHER, LVL_WARN, 146 "Zone (%p, %p) overlaps " 147 "with previous zone (%p %p)!", 151 148 (void *) PFN2ADDR(base), (void *) PFN2ADDR(count), 152 149 (void *) PFN2ADDR(zones.info[i].base), … … 161 158 162 159 /* Move other zones up */ 163 size_t j; 164 for (j = zones.count; j > i; j--) { 160 for (size_t j = zones.count; j > i; j--) 165 161 zones.info[j] = zones.info[j - 1]; 166 if (zones.info[j].buddy_system != NULL)167 zones.info[j].buddy_system->data =168 (void *) &zones.info[j];169 }170 162 171 163 zones.count++; … … 237 229 } 238 230 239 /** @return True if zone can allocate specified order */ 240 NO_TRACE static bool zone_can_alloc(zone_t *zone, uint8_t order) 241 { 231 /** @return True if zone can allocate specified number of frames */ 232 NO_TRACE static bool zone_can_alloc(zone_t *zone, size_t count, 233 pfn_t constraint) 234 { 235 /* 236 * The function bitmap_allocate_range() does not modify 237 * the bitmap if the last argument is NULL. 238 */ 239 242 240 return ((zone->flags & ZONE_AVAILABLE) && 243 buddy_system_can_alloc(zone->buddy_system, order)); 244 } 245 246 /** Find a zone that can allocate order frames. 241 bitmap_allocate_range(&zone->bitmap, count, zone->base, 242 FRAME_LOWPRIO, constraint, NULL)); 243 } 244 245 /** Find a zone that can allocate specified number of frames 246 * 247 * This function searches among all zones. Assume interrupts are 248 * disabled and zones lock is locked. 249 * 250 * @param count Number of free frames we are trying to find. 251 * @param flags Required flags of the zone. 252 * @param constraint Indication of bits that cannot be set in the 253 * physical frame number of the first allocated frame. 254 * @param hint Preferred zone. 255 * 256 * @return Zone that can allocate specified number of frames. 257 * @return -1 if no zone can satisfy the request. 258 * 259 */ 260 NO_TRACE static size_t find_free_zone_all(size_t count, zone_flags_t flags, 261 pfn_t constraint, size_t hint) 262 { 263 for (size_t pos = 0; pos < zones.count; pos++) { 264 size_t i = (pos + hint) % zones.count; 265 266 /* Check whether the zone meets the search criteria. */ 267 if (!ZONE_FLAGS_MATCH(zones.info[i].flags, flags)) 268 continue; 269 270 /* Check if the zone can satisfy the allocation request. */ 271 if (zone_can_alloc(&zones.info[i], count, constraint)) 272 return i; 273 } 274 275 return (size_t) -1; 276 } 277 278 /** Check if frame range priority memory 279 * 280 * @param pfn Starting frame. 281 * @param count Number of frames. 282 * 283 * @return True if the range contains only priority memory. 284 * 285 */ 286 NO_TRACE static bool is_high_priority(pfn_t base, size_t count) 287 { 288 return (base + count <= FRAME_LOWPRIO); 289 } 290 291 /** Find a zone that can allocate specified number of frames 292 * 293 * This function ignores zones that contain only high-priority 294 * memory. Assume interrupts are disabled and zones lock is locked. 295 * 296 * @param count Number of free frames we are trying to find. 297 * @param flags Required flags of the zone. 298 * @param constraint Indication of bits that cannot be set in the 299 * physical frame number of the first allocated frame. 300 * @param hint Preferred zone. 301 * 302 * @return Zone that can allocate specified number of frames. 303 * @return -1 if no low-priority zone can satisfy the request. 304 * 305 */ 306 NO_TRACE static size_t find_free_zone_lowprio(size_t count, zone_flags_t flags, 307 pfn_t constraint, size_t hint) 308 { 309 for (size_t pos = 0; pos < zones.count; pos++) { 310 size_t i = (pos + hint) % zones.count; 311 312 /* Skip zones containing only high-priority memory. */ 313 if (is_high_priority(zones.info[i].base, zones.info[i].count)) 314 continue; 315 316 /* Check whether the zone meets the search criteria. */ 317 if (!ZONE_FLAGS_MATCH(zones.info[i].flags, flags)) 318 continue; 319 320 /* Check if the zone can satisfy the allocation request. */ 321 if (zone_can_alloc(&zones.info[i], count, constraint)) 322 return i; 323 } 324 325 return (size_t) -1; 326 } 327 328 /** Find a zone that can allocate specified number of frames 247 329 * 248 330 * Assume interrupts are disabled and zones lock is 249 331 * locked. 250 332 * 251 * @param order Size (2^order) of free space we are trying to find. 252 * @param flags Required flags of the target zone. 253 * @param hind Preferred zone. 254 * 255 */ 256 NO_TRACE static size_t find_free_zone(uint8_t order, zone_flags_t flags, 257 size_t hint) 333 * @param count Number of free frames we are trying to find. 334 * @param flags Required flags of the target zone. 335 * @param constraint Indication of bits that cannot be set in the 336 * physical frame number of the first allocated frame. 337 * @param hint Preferred zone. 338 * 339 * @return Zone that can allocate specified number of frames. 340 * @return -1 if no zone can satisfy the request. 341 * 342 */ 343 NO_TRACE static size_t find_free_zone(size_t count, zone_flags_t flags, 344 pfn_t constraint, size_t hint) 258 345 { 259 346 if (hint >= zones.count) 260 347 hint = 0; 261 348 262 size_t i = hint; 263 do { 264 /* 265 * Check whether the zone meets the search criteria. 266 */ 267 if (ZONE_FLAGS_MATCH(zones.info[i].flags, flags)) { 268 /* 269 * Check if the zone has 2^order frames area available. 270 */ 271 if (zone_can_alloc(&zones.info[i], order)) 272 return i; 273 } 274 275 i++; 276 if (i >= zones.count) 277 i = 0; 278 279 } while (i != hint); 280 281 return (size_t) -1; 282 } 283 284 /**************************/ 285 /* Buddy system functions */ 286 /**************************/ 287 288 /** Buddy system find_block implementation. 289 * 290 * Find block that is parent of current list. 291 * That means go to lower addresses, until such block is found 292 * 293 * @param order Order of parent must be different then this 294 * parameter!! 295 * 296 */ 297 NO_TRACE static link_t *zone_buddy_find_block(buddy_system_t *buddy, 298 link_t *child, uint8_t order) 299 { 300 frame_t *frame = list_get_instance(child, frame_t, buddy_link); 301 zone_t *zone = (zone_t *) buddy->data; 302 303 size_t index = frame_index(zone, frame); 304 do { 305 if (zone->frames[index].buddy_order != order) 306 return &zone->frames[index].buddy_link; 307 } while (index-- > 0); 308 309 return NULL; 310 } 311 312 /** Buddy system find_buddy implementation. 313 * 314 * @param buddy Buddy system. 315 * @param block Block for which buddy should be found. 316 * 317 * @return Buddy for given block if found. 318 * 319 */ 320 NO_TRACE static link_t *zone_buddy_find_buddy(buddy_system_t *buddy, 321 link_t *block) 322 { 323 frame_t *frame = list_get_instance(block, frame_t, buddy_link); 324 zone_t *zone = (zone_t *) buddy->data; 325 ASSERT(IS_BUDDY_ORDER_OK(frame_index_abs(zone, frame), 326 frame->buddy_order)); 327 328 bool is_left = IS_BUDDY_LEFT_BLOCK_ABS(zone, frame); 329 330 size_t index; 331 if (is_left) { 332 index = (frame_index(zone, frame)) + 333 (1 << frame->buddy_order); 334 } else { /* is_right */ 335 index = (frame_index(zone, frame)) - 336 (1 << frame->buddy_order); 337 } 338 339 if (frame_index_valid(zone, index)) { 340 if ((zone->frames[index].buddy_order == frame->buddy_order) && 341 (zone->frames[index].refcount == 0)) { 342 return &zone->frames[index].buddy_link; 343 } 344 } 345 346 return NULL; 347 } 348 349 /** Buddy system bisect implementation. 350 * 351 * @param buddy Buddy system. 352 * @param block Block to bisect. 353 * 354 * @return Right block. 355 * 356 */ 357 NO_TRACE static link_t *zone_buddy_bisect(buddy_system_t *buddy, link_t *block) 358 { 359 frame_t *frame_l = list_get_instance(block, frame_t, buddy_link); 360 frame_t *frame_r = (frame_l + (1 << (frame_l->buddy_order - 1))); 361 362 return &frame_r->buddy_link; 363 } 364 365 /** Buddy system coalesce implementation. 366 * 367 * @param buddy Buddy system. 368 * @param block_1 First block. 369 * @param block_2 First block's buddy. 370 * 371 * @return Coalesced block (actually block that represents lower 372 * address). 373 * 374 */ 375 NO_TRACE static link_t *zone_buddy_coalesce(buddy_system_t *buddy, 376 link_t *block_1, link_t *block_2) 377 { 378 frame_t *frame1 = list_get_instance(block_1, frame_t, buddy_link); 379 frame_t *frame2 = list_get_instance(block_2, frame_t, buddy_link); 380 381 return ((frame1 < frame2) ? block_1 : block_2); 382 } 383 384 /** Buddy system set_order implementation. 385 * 386 * @param buddy Buddy system. 387 * @param block Buddy system block. 388 * @param order Order to set. 389 * 390 */ 391 NO_TRACE static void zone_buddy_set_order(buddy_system_t *buddy, link_t *block, 392 uint8_t order) 393 { 394 list_get_instance(block, frame_t, buddy_link)->buddy_order = order; 395 } 396 397 /** Buddy system get_order implementation. 398 * 399 * @param buddy Buddy system. 400 * @param block Buddy system block. 401 * 402 * @return Order of block. 403 * 404 */ 405 NO_TRACE static uint8_t zone_buddy_get_order(buddy_system_t *buddy, 406 link_t *block) 407 { 408 return list_get_instance(block, frame_t, buddy_link)->buddy_order; 409 } 410 411 /** Buddy system mark_busy implementation. 412 * 413 * @param buddy Buddy system. 414 * @param block Buddy system block. 415 * 416 */ 417 NO_TRACE static void zone_buddy_mark_busy(buddy_system_t *buddy, link_t *block) 418 { 419 list_get_instance(block, frame_t, buddy_link)->refcount = 1; 420 } 421 422 /** Buddy system mark_available implementation. 423 * 424 * @param buddy Buddy system. 425 * @param block Buddy system block. 426 * 427 */ 428 NO_TRACE static void zone_buddy_mark_available(buddy_system_t *buddy, 429 link_t *block) 430 { 431 list_get_instance(block, frame_t, buddy_link)->refcount = 0; 432 } 433 434 static buddy_system_operations_t zone_buddy_system_operations = { 435 .find_buddy = zone_buddy_find_buddy, 436 .bisect = zone_buddy_bisect, 437 .coalesce = zone_buddy_coalesce, 438 .set_order = zone_buddy_set_order, 439 .get_order = zone_buddy_get_order, 440 .mark_busy = zone_buddy_mark_busy, 441 .mark_available = zone_buddy_mark_available, 442 .find_block = zone_buddy_find_block 443 }; 349 /* 350 * Prefer zones with low-priority memory over 351 * zones with high-priority memory. 352 */ 353 354 size_t znum = find_free_zone_lowprio(count, flags, constraint, hint); 355 if (znum != (size_t) -1) 356 return znum; 357 358 /* Take all zones into account */ 359 return find_free_zone_all(count, flags, constraint, hint); 360 } 444 361 445 362 /******************/ … … 447 364 /******************/ 448 365 366 /** Return frame from zone. */ 367 NO_TRACE static frame_t *zone_get_frame(zone_t *zone, size_t index) 368 { 369 ASSERT(index < zone->count); 370 371 return &zone->frames[index]; 372 } 373 449 374 /** Allocate frame in particular zone. 450 375 * … … 452 377 * Panics if allocation is impossible. 453 378 * 454 * @param zone Zone to allocate from. 455 * @param order Allocate exactly 2^order frames. 379 * @param zone Zone to allocate from. 380 * @param count Number of frames to allocate 381 * @param constraint Indication of bits that cannot be set in the 382 * physical frame number of the first allocated frame. 456 383 * 457 384 * @return Frame index in zone. 458 385 * 459 386 */ 460 NO_TRACE static pfn_t zone_frame_alloc(zone_t *zone, uint8_t order) 387 NO_TRACE static size_t zone_frame_alloc(zone_t *zone, size_t count, 388 pfn_t constraint) 461 389 { 462 390 ASSERT(zone->flags & ZONE_AVAILABLE); 463 391 464 /* Allocate frames from zone buddy system */ 465 link_t *link = buddy_system_alloc(zone->buddy_system, order); 466 467 ASSERT(link); 392 /* Allocate frames from zone */ 393 size_t index; 394 int avail = bitmap_allocate_range(&zone->bitmap, count, zone->base, 395 FRAME_LOWPRIO, constraint, &index); 396 397 ASSERT(avail); 398 399 /* Update frame reference count */ 400 for (size_t i = 0; i < count; i++) { 401 frame_t *frame = zone_get_frame(zone, index + i); 402 403 ASSERT(frame->refcount == 0); 404 frame->refcount = 1; 405 } 468 406 469 407 /* Update zone information. */ 470 zone->free_count -= (1 << order); 471 zone->busy_count += (1 << order); 472 473 /* Frame will be actually a first frame of the block. */ 474 frame_t *frame = list_get_instance(link, frame_t, buddy_link); 475 476 /* Get frame address */ 477 return make_frame_index(zone, frame); 408 zone->free_count -= count; 409 zone->busy_count += count; 410 411 return index; 478 412 } 479 413 … … 482 416 * Assume zone is locked and is available for deallocation. 483 417 * 484 * @param zone Pointer to zone from which the frame is to be freed.485 * @param frame_idx Frame index relative to zone.486 * 487 * @return Number of freed frames.488 * 489 */ 490 NO_TRACE static size_t zone_frame_free(zone_t *zone, size_t frame_idx)418 * @param zone Pointer to zone from which the frame is to be freed. 419 * @param index Frame index relative to zone. 420 * 421 * @return Number of freed frames. 422 * 423 */ 424 NO_TRACE static size_t zone_frame_free(zone_t *zone, size_t index) 491 425 { 492 426 ASSERT(zone->flags & ZONE_AVAILABLE); 493 427 494 frame_t *frame = &zone->frames[frame_idx]; 495 size_t size = 0; 496 497 ASSERT(frame->refcount); 428 frame_t *frame = zone_get_frame(zone, index); 429 430 ASSERT(frame->refcount > 0); 498 431 499 432 if (!--frame->refcount) { 500 size = 1 << frame->buddy_order;501 buddy_system_free(zone->buddy_system, &frame->buddy_link);433 bitmap_set(&zone->bitmap, index, 0); 434 502 435 /* Update zone information. */ 503 zone->free_count += size; 504 zone->busy_count -= size; 505 } 506 507 return size; 508 } 509 510 /** Return frame from zone. */ 511 NO_TRACE static frame_t *zone_get_frame(zone_t *zone, size_t frame_idx) 512 { 513 ASSERT(frame_idx < zone->count); 514 return &zone->frames[frame_idx]; 436 zone->free_count++; 437 zone->busy_count--; 438 439 return 1; 440 } 441 442 return 0; 515 443 } 516 444 517 445 /** Mark frame in zone unavailable to allocation. */ 518 NO_TRACE static void zone_mark_unavailable(zone_t *zone, size_t frame_idx)446 NO_TRACE static void zone_mark_unavailable(zone_t *zone, size_t index) 519 447 { 520 448 ASSERT(zone->flags & ZONE_AVAILABLE); 521 449 522 frame_t *frame = zone_get_frame(zone, frame_idx);523 if (frame->refcount )450 frame_t *frame = zone_get_frame(zone, index); 451 if (frame->refcount > 0) 524 452 return; 525 453 526 link_t *link __attribute__ ((unused)); 527 528 link = buddy_system_alloc_block(zone->buddy_system, 529 &frame->buddy_link); 530 531 ASSERT(link); 454 frame->refcount = 1; 455 bitmap_set_range(&zone->bitmap, index, 1); 456 532 457 zone->free_count--; 533 458 reserve_force_alloc(1); … … 536 461 /** Merge two zones. 537 462 * 538 * Expect buddy to point to space at least zone_conf_size large.539 463 * Assume z1 & z2 are locked and compatible and zones lock is 540 464 * locked. 541 465 * 542 * @param z1 First zone to merge.543 * @param z2 Second zone to merge.544 * @param old_z1 Original dateof the first zone.545 * @param buddy Merged zone buddy.466 * @param z1 First zone to merge. 467 * @param z2 Second zone to merge. 468 * @param old_z1 Original data of the first zone. 469 * @param confdata Merged zone configuration data. 546 470 * 547 471 */ 548 472 NO_TRACE static void zone_merge_internal(size_t z1, size_t z2, zone_t *old_z1, 549 buddy_system_t *buddy)473 void *confdata) 550 474 { 551 475 ASSERT(zones.info[z1].flags & ZONE_AVAILABLE); … … 562 486 zones.info[z1].free_count += zones.info[z2].free_count; 563 487 zones.info[z1].busy_count += zones.info[z2].busy_count; 564 zones.info[z1].buddy_system = buddy; 565 566 uint8_t order = fnzb(zones.info[z1].count); 567 buddy_system_create(zones.info[z1].buddy_system, order, 568 &zone_buddy_system_operations, (void *) &zones.info[z1]); 569 570 zones.info[z1].frames = 571 (frame_t *) ((uint8_t *) zones.info[z1].buddy_system 572 + buddy_conf_size(order)); 573 574 /* This marks all frames busy */ 575 size_t i; 576 for (i = 0; i < zones.info[z1].count; i++) 577 frame_initialize(&zones.info[z1].frames[i]); 578 579 /* Copy frames from both zones to preserve full frame orders, 580 * parents etc. Set all free frames with refcount = 0 to 1, because 581 * we add all free frames to buddy allocator later again, clearing 582 * order to 0. Don't set busy frames with refcount = 0, as they 583 * will not be reallocated during merge and it would make later 584 * problems with allocation/free. 488 489 bitmap_initialize(&zones.info[z1].bitmap, zones.info[z1].count, 490 confdata + (sizeof(frame_t) * zones.info[z1].count)); 491 bitmap_clear_range(&zones.info[z1].bitmap, 0, zones.info[z1].count); 492 493 zones.info[z1].frames = (frame_t *) confdata; 494 495 /* 496 * Copy frames and bits from both zones to preserve parents, etc. 585 497 */ 586 for (i = 0; i < old_z1->count; i++) 498 499 for (size_t i = 0; i < old_z1->count; i++) { 500 bitmap_set(&zones.info[z1].bitmap, i, 501 bitmap_get(&old_z1->bitmap, i)); 587 502 zones.info[z1].frames[i] = old_z1->frames[i]; 588 589 for (i = 0; i < zones.info[z2].count; i++) 590 zones.info[z1].frames[base_diff + i] 591 = zones.info[z2].frames[i]; 592 593 i = 0; 594 while (i < zones.info[z1].count) { 595 if (zones.info[z1].frames[i].refcount) { 596 /* Skip busy frames */ 597 i += 1 << zones.info[z1].frames[i].buddy_order; 598 } else { 599 /* Free frames, set refcount = 1 600 * (all free frames have refcount == 0, we need not 601 * to check the order) 602 */ 603 zones.info[z1].frames[i].refcount = 1; 604 zones.info[z1].frames[i].buddy_order = 0; 605 i++; 606 } 607 } 608 609 /* Add free blocks from the original zone z1 */ 610 while (zone_can_alloc(old_z1, 0)) { 611 /* Allocate from the original zone */ 612 pfn_t frame_idx = zone_frame_alloc(old_z1, 0); 613 614 /* Free the frame from the merged zone */ 615 frame_t *frame = &zones.info[z1].frames[frame_idx]; 616 frame->refcount = 0; 617 buddy_system_free(zones.info[z1].buddy_system, &frame->buddy_link); 618 } 619 620 /* Add free blocks from the original zone z2 */ 621 while (zone_can_alloc(&zones.info[z2], 0)) { 622 /* Allocate from the original zone */ 623 pfn_t frame_idx = zone_frame_alloc(&zones.info[z2], 0); 624 625 /* Free the frame from the merged zone */ 626 frame_t *frame = &zones.info[z1].frames[base_diff + frame_idx]; 627 frame->refcount = 0; 628 buddy_system_free(zones.info[z1].buddy_system, &frame->buddy_link); 503 } 504 505 for (size_t i = 0; i < zones.info[z2].count; i++) { 506 bitmap_set(&zones.info[z1].bitmap, base_diff + i, 507 bitmap_get(&zones.info[z2].bitmap, i)); 508 zones.info[z1].frames[base_diff + i] = 509 zones.info[z2].frames[i]; 629 510 } 630 511 } … … 649 530 size_t cframes = SIZE2FRAMES(zone_conf_size(count)); 650 531 651 if ((pfn < zones.info[znum].base) 652 ||(pfn >= zones.info[znum].base + zones.info[znum].count))532 if ((pfn < zones.info[znum].base) || 533 (pfn >= zones.info[znum].base + zones.info[znum].count)) 653 534 return; 654 535 655 frame_t *frame __attribute__ ((unused)); 656 657 frame = &zones.info[znum].frames[pfn - zones.info[znum].base]; 658 ASSERT(!frame->buddy_order); 659 660 size_t i; 661 for (i = 0; i < cframes; i++) { 662 zones.info[znum].busy_count++; 536 for (size_t i = 0; i < cframes; i++) 663 537 (void) zone_frame_free(&zones.info[znum], 664 538 pfn - zones.info[znum].base + i); 665 }666 }667 668 /** Reduce allocated block to count of order 0 frames.669 *670 * The allocated block needs 2^order frames. Reduce all frames671 * in the block to order 0 and free the unneeded frames. This means that672 * when freeing the previously allocated block starting with frame_idx,673 * you have to free every frame.674 *675 * @param znum Zone.676 * @param frame_idx Index the first frame of the block.677 * @param count Allocated frames in block.678 *679 */680 NO_TRACE static void zone_reduce_region(size_t znum, pfn_t frame_idx,681 size_t count)682 {683 ASSERT(zones.info[znum].flags & ZONE_AVAILABLE);684 ASSERT(frame_idx + count < zones.info[znum].count);685 686 uint8_t order = zones.info[znum].frames[frame_idx].buddy_order;687 ASSERT((size_t) (1 << order) >= count);688 689 /* Reduce all blocks to order 0 */690 size_t i;691 for (i = 0; i < (size_t) (1 << order); i++) {692 frame_t *frame = &zones.info[znum].frames[i + frame_idx];693 frame->buddy_order = 0;694 if (!frame->refcount)695 frame->refcount = 1;696 ASSERT(frame->refcount == 1);697 }698 699 /* Free unneeded frames */700 for (i = count; i < (size_t) (1 << order); i++)701 (void) zone_frame_free(&zones.info[znum], i + frame_idx);702 539 } 703 540 … … 719 556 bool ret = true; 720 557 721 /* We can join only 2 zones with none existing inbetween, 558 /* 559 * We can join only 2 zones with none existing inbetween, 722 560 * the zones have to be available and with the same 723 561 * set of flags … … 733 571 + zones.info[z2].count)); 734 572 735 uint8_t order;736 if (cframes == 1)737 order = 0;738 else739 order = fnzb(cframes - 1) + 1;740 741 573 /* Allocate merged zone data inside one of the zones */ 742 574 pfn_t pfn; 743 if (zone_can_alloc(&zones.info[z1], order)) { 744 pfn = zones.info[z1].base + zone_frame_alloc(&zones.info[z1], order); 745 } else if (zone_can_alloc(&zones.info[z2], order)) { 746 pfn = zones.info[z2].base + zone_frame_alloc(&zones.info[z2], order); 575 if (zone_can_alloc(&zones.info[z1], cframes, 0)) { 576 pfn = zones.info[z1].base + 577 zone_frame_alloc(&zones.info[z1], cframes, 0); 578 } else if (zone_can_alloc(&zones.info[z2], cframes, 0)) { 579 pfn = zones.info[z2].base + 580 zone_frame_alloc(&zones.info[z2], cframes, 0); 747 581 } else { 748 582 ret = false; … … 752 586 /* Preserve original data from z1 */ 753 587 zone_t old_z1 = zones.info[z1]; 754 old_z1.buddy_system->data = (void *) &old_z1;755 588 756 589 /* Do zone merging */ 757 buddy_system_t *buddy = (buddy_system_t *) PA2KA(PFN2ADDR(pfn)); 758 zone_merge_internal(z1, z2, &old_z1, buddy); 759 760 /* Free unneeded config frames */ 761 zone_reduce_region(z1, pfn - zones.info[z1].base, cframes); 590 zone_merge_internal(z1, z2, &old_z1, (void *) PA2KA(PFN2ADDR(pfn))); 762 591 763 592 /* Subtract zone information from busy frames */ … … 772 601 773 602 /* Move zones down */ 774 size_t i; 775 for (i = z2 + 1; i < zones.count; i++) { 603 for (size_t i = z2 + 1; i < zones.count; i++) 776 604 zones.info[i - 1] = zones.info[i]; 777 if (zones.info[i - 1].buddy_system != NULL)778 zones.info[i - 1].buddy_system->data =779 (void *) &zones.info[i - 1];780 }781 605 782 606 zones.count--; … … 797 621 void zone_merge_all(void) 798 622 { 799 size_t i = 0; 623 size_t i = 1; 624 800 625 while (i < zones.count) { 801 if (!zone_merge(i , i + 1))626 if (!zone_merge(i - 1, i)) 802 627 i++; 803 628 } … … 806 631 /** Create new frame zone. 807 632 * 808 * @param zone Zone to construct.809 * @param buddy Address of buddy system configuration information.810 * @param start Physical address of the first frame within thezone.811 * @param count Count of frames in zone.812 * @param flags Zone flags.633 * @param zone Zone to construct. 634 * @param start Physical address of the first frame within the zone. 635 * @param count Count of frames in zone. 636 * @param flags Zone flags. 637 * @param confdata Configuration data of the zone. 813 638 * 814 639 * @return Initialized zone. 815 640 * 816 641 */ 817 NO_TRACE static void zone_construct(zone_t *zone, buddy_system_t *buddy,818 pfn_t start, size_t count, zone_flags_t flags)642 NO_TRACE static void zone_construct(zone_t *zone, pfn_t start, size_t count, 643 zone_flags_t flags, void *confdata) 819 644 { 820 645 zone->base = start; … … 823 648 zone->free_count = count; 824 649 zone->busy_count = 0; 825 zone->buddy_system = buddy;826 650 827 651 if (flags & ZONE_AVAILABLE) { 828 652 /* 829 * Compute order for buddy system and initialize 653 * Initialize frame bitmap (located after the array of 654 * frame_t structures in the configuration space). 830 655 */ 831 uint8_t order = fnzb(count);832 b uddy_system_create(zone->buddy_system, order,833 &zone_buddy_system_operations, (void *) zone);834 835 /* Allocate frames _after_ the confframe */836 837 /* Check sizes */838 zone->frames = (frame_t *) ((uint8_t *) zone->buddy_system +839 buddy_conf_size(order));840 841 size_t i;842 for ( i = 0; i < count; i++)656 657 bitmap_initialize(&zone->bitmap, count, confdata + 658 (sizeof(frame_t) * count)); 659 bitmap_clear_range(&zone->bitmap, 0, count); 660 661 /* 662 * Initialize the array of frame_t structures. 663 */ 664 665 zone->frames = (frame_t *) confdata; 666 667 for (size_t i = 0; i < count; i++) 843 668 frame_initialize(&zone->frames[i]); 844 845 /* Stuffing frames */ 846 for (i = 0; i < count; i++) { 847 zone->frames[i].refcount = 0; 848 buddy_system_free(zone->buddy_system, &zone->frames[i].buddy_link); 849 } 850 } else 669 } else { 670 bitmap_initialize(&zone->bitmap, 0, NULL); 851 671 zone->frames = NULL; 672 } 852 673 } 853 674 … … 861 682 size_t zone_conf_size(size_t count) 862 683 { 863 return (count * sizeof(frame_t) + b uddy_conf_size(fnzb(count)));684 return (count * sizeof(frame_t) + bitmap_size(count)); 864 685 } 865 686 … … 867 688 pfn_t zone_external_conf_alloc(size_t count) 868 689 { 869 size_t size = zone_conf_size(count); 870 size_t order = ispwr2(size) ? fnzb(size) : (fnzb(size) + 1); 871 872 return ADDR2PFN((uintptr_t) frame_alloc(order - FRAME_WIDTH, 873 FRAME_LOWMEM | FRAME_ATOMIC)); 690 size_t frames = SIZE2FRAMES(zone_conf_size(count)); 691 692 return ADDR2PFN((uintptr_t) 693 frame_alloc(frames, FRAME_LOWMEM | FRAME_ATOMIC, 0)); 874 694 } 875 695 … … 879 699 * @param count Size of zone in frames. 880 700 * @param confframe Where configuration frames are supposed to be. 881 * Automatically checks ,that we will not disturb the701 * Automatically checks that we will not disturb the 882 702 * kernel and possibly init. If confframe is given 883 703 * _outside_ this zone, it is expected, that the area is … … 896 716 897 717 if (flags & ZONE_AVAILABLE) { /* Create available zone */ 898 /* Theoretically we could have NULL here, practically make sure 718 /* 719 * Theoretically we could have NULL here, practically make sure 899 720 * nobody tries to do that. If some platform requires, remove 900 721 * the assert 901 722 */ 902 723 ASSERT(confframe != ADDR2PFN((uintptr_t ) NULL)); 903 724 904 725 /* Update the known end of physical memory. */ 905 726 config.physmem_end = max(config.physmem_end, PFN2ADDR(start + count)); 906 727 907 /* If confframe is supposed to be inside our zone, then make sure 728 /* 729 * If confframe is supposed to be inside our zone, then make sure 908 730 * it does not span kernel & init 909 731 */ 910 732 size_t confcount = SIZE2FRAMES(zone_conf_size(count)); 733 911 734 if ((confframe >= start) && (confframe < start + count)) { 912 735 for (; confframe < start + count; confframe++) { … … 921 744 922 745 bool overlap = false; 923 size_t i; 924 for (i = 0; i < init.cnt; i++) 746 for (size_t i = 0; i < init.cnt; i++) { 925 747 if (overlaps(addr, PFN2ADDR(confcount), 926 748 init.tasks[i].paddr, … … 929 751 break; 930 752 } 753 } 754 931 755 if (overlap) 932 756 continue; … … 945 769 } 946 770 947 buddy_system_t *buddy = (buddy_system_t*) PA2KA(PFN2ADDR(confframe));948 zone_construct(&zones.info[znum], buddy, start, count, flags);771 void *confdata = (void *) PA2KA(PFN2ADDR(confframe)); 772 zone_construct(&zones.info[znum], start, count, flags, confdata); 949 773 950 774 /* If confdata in zone, mark as unavailable */ 951 775 if ((confframe >= start) && (confframe < start + count)) { 952 size_t i; 953 for (i = confframe; i < confframe + confcount; i++) 776 for (size_t i = confframe; i < confframe + confcount; i++) 954 777 zone_mark_unavailable(&zones.info[znum], 955 778 i - zones.info[znum].base); … … 967 790 return (size_t) -1; 968 791 } 969 zone_construct(&zones.info[znum], NULL, start, count, flags); 792 793 zone_construct(&zones.info[znum], start, count, flags, NULL); 970 794 971 795 irq_spinlock_unlock(&zones.lock, true); … … 1009 833 } 1010 834 1011 /** Allocate power-of-two frames of physical memory. 1012 * 1013 * @param order Allocate exactly 2^order frames. 1014 * @param flags Flags for host zone selection and address processing. 1015 * @param pzone Preferred zone. 835 /** Allocate frames of physical memory. 836 * 837 * @param count Number of continuous frames to allocate. 838 * @param flags Flags for host zone selection and address processing. 839 * @param constraint Indication of physical address bits that cannot be 840 * set in the address of the first allocated frame. 841 * @param pzone Preferred zone. 1016 842 * 1017 843 * @return Physical address of the allocated frame. 1018 844 * 1019 845 */ 1020 void *frame_alloc_generic(uint8_t order, frame_flags_t flags, size_t *pzone) 1021 { 1022 size_t size = ((size_t) 1) << order; 846 uintptr_t frame_alloc_generic(size_t count, frame_flags_t flags, 847 uintptr_t constraint, size_t *pzone) 848 { 849 ASSERT(count > 0); 850 1023 851 size_t hint = pzone ? (*pzone) : 0; 852 pfn_t frame_constraint = ADDR2PFN(constraint); 1024 853 1025 854 /* 1026 855 * If not told otherwise, we must first reserve the memory. 1027 856 */ 1028 if (!(flags & FRAME_NO_RESERVE)) 1029 reserve_force_alloc( size);1030 857 if (!(flags & FRAME_NO_RESERVE)) 858 reserve_force_alloc(count); 859 1031 860 loop: 1032 861 irq_spinlock_lock(&zones.lock, true); … … 1035 864 * First, find suitable frame zone. 1036 865 */ 1037 size_t znum = find_free_zone(order, 1038 FRAME_TO_ZONE_FLAGS(flags), hint); 1039 1040 /* If no memory, reclaim some slab memory, 1041 if it does not help, reclaim all */ 866 size_t znum = find_free_zone(count, FRAME_TO_ZONE_FLAGS(flags), 867 frame_constraint, hint); 868 869 /* 870 * If no memory, reclaim some slab memory, 871 * if it does not help, reclaim all. 872 */ 1042 873 if ((znum == (size_t) -1) && (!(flags & FRAME_NO_RECLAIM))) { 1043 874 irq_spinlock_unlock(&zones.lock, true); … … 1046 877 1047 878 if (freed > 0) 1048 znum = find_free_zone( order,1049 FRAME_TO_ZONE_FLAGS(flags), hint);879 znum = find_free_zone(count, FRAME_TO_ZONE_FLAGS(flags), 880 frame_constraint, hint); 1050 881 1051 882 if (znum == (size_t) -1) { … … 1055 886 1056 887 if (freed > 0) 1057 znum = find_free_zone( order,1058 FRAME_TO_ZONE_FLAGS(flags), hint);888 znum = find_free_zone(count, FRAME_TO_ZONE_FLAGS(flags), 889 frame_constraint, hint); 1059 890 } 1060 891 } … … 1063 894 if (flags & FRAME_ATOMIC) { 1064 895 irq_spinlock_unlock(&zones.lock, true); 896 1065 897 if (!(flags & FRAME_NO_RESERVE)) 1066 reserve_free(size); 1067 return NULL; 898 reserve_free(count); 899 900 return 0; 1068 901 } 1069 902 1070 #ifdef CONFIG_DEBUG1071 903 size_t avail = frame_total_free_get_internal(); 1072 #endif1073 904 1074 905 irq_spinlock_unlock(&zones.lock, true); 1075 906 1076 907 if (!THREAD) 1077 panic("Cannot wait for memory to become available."); 908 panic("Cannot wait for %zu frames to become available " 909 "(%zu available).", count, avail); 1078 910 1079 911 /* … … 1082 914 1083 915 #ifdef CONFIG_DEBUG 1084 printf("Thread %" PRIu64 " waiting for %zu frames, " 1085 "%zu available.\n", THREAD->tid, size, avail); 916 log(LF_OTHER, LVL_DEBUG, 917 "Thread %" PRIu64 " waiting for %zu frames " 918 "%zu available.", THREAD->tid, count, avail); 1086 919 #endif 1087 920 1088 921 /* 1089 * Since the mem_avail_mtx is an active mutex, we need to disable interrupts1090 * to prevent deadlock with TLB shootdown.922 * Since the mem_avail_mtx is an active mutex, we need to 923 * disable interrupts to prevent deadlock with TLB shootdown. 1091 924 */ 1092 925 ipl_t ipl = interrupts_disable(); … … 1094 927 1095 928 if (mem_avail_req > 0) 1096 mem_avail_req = min(mem_avail_req, size);929 mem_avail_req = min(mem_avail_req, count); 1097 930 else 1098 mem_avail_req = size; 931 mem_avail_req = count; 932 1099 933 size_t gen = mem_avail_gen; 1100 934 … … 1106 940 1107 941 #ifdef CONFIG_DEBUG 1108 printf("Thread %" PRIu64 " woken up.\n", THREAD->tid); 942 log(LF_OTHER, LVL_DEBUG, "Thread %" PRIu64 " woken up.", 943 THREAD->tid); 1109 944 #endif 1110 945 … … 1112 947 } 1113 948 1114 pfn_t pfn = zone_frame_alloc(&zones.info[znum], order)1115 + zones.info[znum].base;949 pfn_t pfn = zone_frame_alloc(&zones.info[znum], count, 950 frame_constraint) + zones.info[znum].base; 1116 951 1117 952 irq_spinlock_unlock(&zones.lock, true); … … 1120 955 *pzone = znum; 1121 956 1122 if (flags & FRAME_KA) 1123 return (void *) PA2KA(PFN2ADDR(pfn)); 1124 1125 return (void *) PFN2ADDR(pfn); 1126 } 1127 1128 void *frame_alloc(uint8_t order, frame_flags_t flags) 1129 { 1130 return frame_alloc_generic(order, flags, NULL); 1131 } 1132 1133 void *frame_alloc_noreserve(uint8_t order, frame_flags_t flags) 1134 { 1135 return frame_alloc_generic(order, flags | FRAME_NO_RESERVE, NULL); 1136 } 1137 1138 /** Free a frame. 1139 * 1140 * Find respective frame structure for supplied physical frame address. 1141 * Decrement frame reference count. If it drops to zero, move the frame 1142 * structure to free list. 1143 * 1144 * @param frame Physical Address of of the frame to be freed. 957 return PFN2ADDR(pfn); 958 } 959 960 uintptr_t frame_alloc(size_t count, frame_flags_t flags, uintptr_t constraint) 961 { 962 return frame_alloc_generic(count, flags, constraint, NULL); 963 } 964 965 /** Free frames of physical memory. 966 * 967 * Find respective frame structures for supplied physical frames. 968 * Decrement each frame reference count. If it drops to zero, mark 969 * the frames as available. 970 * 971 * @param start Physical Address of the first frame to be freed. 972 * @param count Number of frames to free. 1145 973 * @param flags Flags to control memory reservation. 1146 974 * 1147 975 */ 1148 void frame_free_generic(uintptr_t frame, frame_flags_t flags)1149 { 1150 size_t size;976 void frame_free_generic(uintptr_t start, size_t count, frame_flags_t flags) 977 { 978 size_t freed = 0; 1151 979 1152 980 irq_spinlock_lock(&zones.lock, true); 1153 981 1154 /* 1155 * First, find host frame zone for addr. 1156 */ 1157 pfn_t pfn = ADDR2PFN(frame); 1158 size_t znum = find_zone(pfn, 1, 0); 1159 1160 ASSERT(znum != (size_t) -1); 1161 1162 size = zone_frame_free(&zones.info[znum], pfn - zones.info[znum].base); 982 for (size_t i = 0; i < count; i++) { 983 /* 984 * First, find host frame zone for addr. 985 */ 986 pfn_t pfn = ADDR2PFN(start) + i; 987 size_t znum = find_zone(pfn, 1, 0); 988 989 ASSERT(znum != (size_t) -1); 990 991 freed += zone_frame_free(&zones.info[znum], 992 pfn - zones.info[znum].base); 993 } 1163 994 1164 995 irq_spinlock_unlock(&zones.lock, true); … … 1166 997 /* 1167 998 * Signal that some memory has been freed. 999 * Since the mem_avail_mtx is an active mutex, 1000 * we need to disable interruptsto prevent deadlock 1001 * with TLB shootdown. 1168 1002 */ 1169 1170 1171 /* 1172 * Since the mem_avail_mtx is an active mutex, we need to disable interrupts 1173 * to prevent deadlock with TLB shootdown. 1174 */ 1003 1175 1004 ipl_t ipl = interrupts_disable(); 1176 1005 mutex_lock(&mem_avail_mtx); 1006 1177 1007 if (mem_avail_req > 0) 1178 mem_avail_req -= min(mem_avail_req, size);1008 mem_avail_req -= min(mem_avail_req, freed); 1179 1009 1180 1010 if (mem_avail_req == 0) { … … 1182 1012 condvar_broadcast(&mem_avail_cv); 1183 1013 } 1014 1184 1015 mutex_unlock(&mem_avail_mtx); 1185 1016 interrupts_restore(ipl); 1186 1017 1187 1018 if (!(flags & FRAME_NO_RESERVE)) 1188 reserve_free( size);1189 } 1190 1191 void frame_free(uintptr_t frame )1192 { 1193 frame_free_generic(frame, 0);1194 } 1195 1196 void frame_free_noreserve(uintptr_t frame )1197 { 1198 frame_free_generic(frame, FRAME_NO_RESERVE);1019 reserve_free(freed); 1020 } 1021 1022 void frame_free(uintptr_t frame, size_t count) 1023 { 1024 frame_free_generic(frame, count, 0); 1025 } 1026 1027 void frame_free_noreserve(uintptr_t frame, size_t count) 1028 { 1029 frame_free_generic(frame, count, FRAME_NO_RESERVE); 1199 1030 } 1200 1031 … … 1230 1061 irq_spinlock_lock(&zones.lock, true); 1231 1062 1232 size_t i; 1233 for (i = 0; i < count; i++) { 1063 for (size_t i = 0; i < count; i++) { 1234 1064 size_t znum = find_zone(start + i, 1, 0); 1065 1235 1066 if (znum == (size_t) -1) /* PFN not found */ 1236 1067 continue; … … 1257 1088 /* Tell the architecture to create some memory */ 1258 1089 frame_low_arch_init(); 1090 1259 1091 if (config.cpu_active == 1) { 1260 1092 frame_mark_unavailable(ADDR2PFN(KA2PA(config.base)), … … 1263 1095 SIZE2FRAMES(config.stack_size)); 1264 1096 1265 size_t i; 1266 for (i = 0; i < init.cnt; i++) { 1267 pfn_t pfn = ADDR2PFN(init.tasks[i].paddr); 1268 frame_mark_unavailable(pfn, 1097 for (size_t i = 0; i < init.cnt; i++) 1098 frame_mark_unavailable(ADDR2PFN(init.tasks[i].paddr), 1269 1099 SIZE2FRAMES(init.tasks[i].size)); 1270 }1271 1100 1272 1101 if (ballocs.size) … … 1274 1103 SIZE2FRAMES(ballocs.size)); 1275 1104 1276 /* Black list first frame, as allocating NULL would 1105 /* 1106 * Blacklist first frame, as allocating NULL would 1277 1107 * fail in some places 1278 1108 */ 1279 1109 frame_mark_unavailable(0, 1); 1280 1110 } 1111 1281 1112 frame_high_arch_init(); 1282 1113 } … … 1284 1115 /** Adjust bounds of physical memory region according to low/high memory split. 1285 1116 * 1286 * @param low[in] If true, the adjustment is performed to make the region 1287 * fit in the low memory. Otherwise the adjustment is 1288 * performed to make the region fit in the high memory. 1289 * @param basep[inout] Pointer to a variable which contains the region's base 1290 * address and which may receive the adjusted base address. 1291 * @param sizep[inout] Pointer to a variable which contains the region's size 1292 * and which may receive the adjusted size. 1293 * @retun True if the region still exists even after the 1294 * adjustment, false otherwise. 1117 * @param low[in] If true, the adjustment is performed to make the region 1118 * fit in the low memory. Otherwise the adjustment is 1119 * performed to make the region fit in the high memory. 1120 * @param basep[inout] Pointer to a variable which contains the region's base 1121 * address and which may receive the adjusted base address. 1122 * @param sizep[inout] Pointer to a variable which contains the region's size 1123 * and which may receive the adjusted size. 1124 * 1125 * @return True if the region still exists even after the adjustment. 1126 * @return False otherwise. 1127 * 1295 1128 */ 1296 1129 bool frame_adjust_zone_bounds(bool low, uintptr_t *basep, size_t *sizep) 1297 1130 { 1298 1131 uintptr_t limit = KA2PA(config.identity_base) + config.identity_size; 1299 1132 1300 1133 if (low) { 1301 1134 if (*basep > limit) 1302 1135 return false; 1136 1303 1137 if (*basep + *sizep > limit) 1304 1138 *sizep = limit - *basep; … … 1306 1140 if (*basep + *sizep <= limit) 1307 1141 return false; 1142 1308 1143 if (*basep <= limit) { 1309 1144 *sizep -= limit - *basep; … … 1311 1146 } 1312 1147 } 1148 1313 1149 return true; 1314 1150 } … … 1322 1158 1323 1159 uint64_t total = 0; 1324 size_t i;1325 for ( i = 0; i < zones.count; i++)1160 1161 for (size_t i = 0; i < zones.count; i++) 1326 1162 total += (uint64_t) FRAMES2SIZE(zones.info[i].count); 1327 1163 … … 1346 1182 *free = 0; 1347 1183 1348 size_t i; 1349 for (i = 0; i < zones.count; i++) { 1184 for (size_t i = 0; i < zones.count; i++) { 1350 1185 *total += (uint64_t) FRAMES2SIZE(zones.info[i].count); 1351 1186 … … 1375 1210 /* 1376 1211 * Because printing may require allocation of memory, we may not hold 1377 * the frame allocator locks when printing zone statistics. Therefore,1212 * the frame allocator locks when printing zone statistics. Therefore, 1378 1213 * we simply gather the statistics under the protection of the locks and 1379 1214 * print the statistics when the locks have been released. … … 1384 1219 */ 1385 1220 1386 size_t i; 1387 for (i = 0;; i++) { 1221 size_t free_lowmem = 0; 1222 size_t free_highmem = 0; 1223 size_t free_highprio = 0; 1224 1225 for (size_t i = 0;; i++) { 1388 1226 irq_spinlock_lock(&zones.lock, true); 1389 1227 … … 1393 1231 } 1394 1232 1395 uintptr_t base = PFN2ADDR(zones.info[i].base); 1233 pfn_t fbase = zones.info[i].base; 1234 uintptr_t base = PFN2ADDR(fbase); 1396 1235 size_t count = zones.info[i].count; 1397 1236 zone_flags_t flags = zones.info[i].flags; … … 1399 1238 size_t busy_count = zones.info[i].busy_count; 1400 1239 1240 bool available = ((flags & ZONE_AVAILABLE) != 0); 1241 bool lowmem = ((flags & ZONE_LOWMEM) != 0); 1242 bool highmem = ((flags & ZONE_HIGHMEM) != 0); 1243 bool highprio = is_high_priority(fbase, count); 1244 1245 if (available) { 1246 if (lowmem) 1247 free_lowmem += free_count; 1248 1249 if (highmem) 1250 free_highmem += free_count; 1251 1252 if (highprio) { 1253 free_highprio += free_count; 1254 } else { 1255 /* 1256 * Walk all frames of the zone and examine 1257 * all high priority memory to get accurate 1258 * statistics. 1259 */ 1260 1261 for (size_t index = 0; index < count; index++) { 1262 if (is_high_priority(fbase + index, 0)) { 1263 if (!bitmap_get(&zones.info[i].bitmap, index)) 1264 free_highprio++; 1265 } else 1266 break; 1267 } 1268 } 1269 } 1270 1401 1271 irq_spinlock_unlock(&zones.lock, true); 1402 1403 bool available = ((flags & ZONE_AVAILABLE) != 0);1404 1272 1405 1273 printf("%-4zu", i); … … 1426 1294 printf("\n"); 1427 1295 } 1296 1297 printf("\n"); 1298 1299 uint64_t size; 1300 const char *size_suffix; 1301 1302 bin_order_suffix(FRAMES2SIZE(free_lowmem), &size, &size_suffix, 1303 false); 1304 printf("Available low memory: %zu frames (%" PRIu64 " %s)\n", 1305 free_lowmem, size, size_suffix); 1306 1307 bin_order_suffix(FRAMES2SIZE(free_highmem), &size, &size_suffix, 1308 false); 1309 printf("Available high memory: %zu frames (%" PRIu64 " %s)\n", 1310 free_highmem, size, size_suffix); 1311 1312 bin_order_suffix(FRAMES2SIZE(free_highprio), &size, &size_suffix, 1313 false); 1314 printf("Available high priority: %zu frames (%" PRIu64 " %s)\n", 1315 free_highprio, size, size_suffix); 1428 1316 } 1429 1317 … … 1438 1326 size_t znum = (size_t) -1; 1439 1327 1440 size_t i; 1441 for (i = 0; i < zones.count; i++) { 1328 for (size_t i = 0; i < zones.count; i++) { 1442 1329 if ((i == num) || (PFN2ADDR(zones.info[i].base) == num)) { 1443 1330 znum = i; … … 1452 1339 } 1453 1340 1454 uintptr_t base = PFN2ADDR(zones.info[i].base); 1455 zone_flags_t flags = zones.info[i].flags; 1456 size_t count = zones.info[i].count; 1457 size_t free_count = zones.info[i].free_count; 1458 size_t busy_count = zones.info[i].busy_count; 1341 size_t free_lowmem = 0; 1342 size_t free_highmem = 0; 1343 size_t free_highprio = 0; 1344 1345 pfn_t fbase = zones.info[znum].base; 1346 uintptr_t base = PFN2ADDR(fbase); 1347 zone_flags_t flags = zones.info[znum].flags; 1348 size_t count = zones.info[znum].count; 1349 size_t free_count = zones.info[znum].free_count; 1350 size_t busy_count = zones.info[znum].busy_count; 1351 1352 bool available = ((flags & ZONE_AVAILABLE) != 0); 1353 bool lowmem = ((flags & ZONE_LOWMEM) != 0); 1354 bool highmem = ((flags & ZONE_HIGHMEM) != 0); 1355 bool highprio = is_high_priority(fbase, count); 1356 1357 if (available) { 1358 if (lowmem) 1359 free_lowmem = free_count; 1360 1361 if (highmem) 1362 free_highmem = free_count; 1363 1364 if (highprio) { 1365 free_highprio = free_count; 1366 } else { 1367 /* 1368 * Walk all frames of the zone and examine 1369 * all high priority memory to get accurate 1370 * statistics. 1371 */ 1372 1373 for (size_t index = 0; index < count; index++) { 1374 if (is_high_priority(fbase + index, 0)) { 1375 if (!bitmap_get(&zones.info[znum].bitmap, index)) 1376 free_highprio++; 1377 } else 1378 break; 1379 } 1380 } 1381 } 1459 1382 1460 1383 irq_spinlock_unlock(&zones.lock, true); 1461 1462 bool available = ((flags & ZONE_AVAILABLE) != 0);1463 1384 1464 1385 uint64_t size; 1465 1386 const char *size_suffix; 1387 1466 1388 bin_order_suffix(FRAMES2SIZE(count), &size, &size_suffix, false); 1467 1389 1468 printf("Zone number: %zu\n", znum);1469 printf("Zone base address: %p\n", (void *) base);1470 printf("Zone size: %zu frames (%" PRIu64 " %s)\n", count,1390 printf("Zone number: %zu\n", znum); 1391 printf("Zone base address: %p\n", (void *) base); 1392 printf("Zone size: %zu frames (%" PRIu64 " %s)\n", count, 1471 1393 size, size_suffix); 1472 printf("Zone flags: %c%c%c%c%c\n",1394 printf("Zone flags: %c%c%c%c%c\n", 1473 1395 available ? 'A' : '-', 1474 1396 (flags & ZONE_RESERVED) ? 'R' : '-', … … 1480 1402 bin_order_suffix(FRAMES2SIZE(busy_count), &size, &size_suffix, 1481 1403 false); 1482 printf("Allocated space: %zu frames (%" PRIu64 " %s)\n",1404 printf("Allocated space: %zu frames (%" PRIu64 " %s)\n", 1483 1405 busy_count, size, size_suffix); 1406 1484 1407 bin_order_suffix(FRAMES2SIZE(free_count), &size, &size_suffix, 1485 1408 false); 1486 printf("Available space: %zu frames (%" PRIu64 " %s)\n",1409 printf("Available space: %zu frames (%" PRIu64 " %s)\n", 1487 1410 free_count, size, size_suffix); 1411 1412 bin_order_suffix(FRAMES2SIZE(free_lowmem), &size, &size_suffix, 1413 false); 1414 printf("Available low memory: %zu frames (%" PRIu64 " %s)\n", 1415 free_lowmem, size, size_suffix); 1416 1417 bin_order_suffix(FRAMES2SIZE(free_highmem), &size, &size_suffix, 1418 false); 1419 printf("Available high memory: %zu frames (%" PRIu64 " %s)\n", 1420 free_highmem, size, size_suffix); 1421 1422 bin_order_suffix(FRAMES2SIZE(free_highprio), &size, &size_suffix, 1423 false); 1424 printf("Available high priority: %zu frames (%" PRIu64 " %s)\n", 1425 free_highprio, size, size_suffix); 1488 1426 } 1489 1427 } -
kernel/generic/src/mm/km.c
rdba3e2c r8b863a62 239 239 uintptr_t km_temporary_page_get(uintptr_t *framep, frame_flags_t flags) 240 240 { 241 uintptr_t frame;242 uintptr_t page;243 244 241 ASSERT(THREAD); 245 242 ASSERT(framep); 246 243 ASSERT(!(flags & ~(FRAME_NO_RESERVE | FRAME_ATOMIC))); 247 244 248 245 /* 249 246 * Allocate a frame, preferably from high memory. 250 247 */ 251 frame = (uintptr_t) frame_alloc(ONE_FRAME, 252 FRAME_HIGHMEM | FRAME_ATOMIC | flags); 248 uintptr_t page; 249 uintptr_t frame = 250 frame_alloc(1, FRAME_HIGHMEM | FRAME_ATOMIC | flags, 0); 253 251 if (frame) { 254 252 page = km_map(frame, PAGE_SIZE, 255 253 PAGE_READ | PAGE_WRITE | PAGE_CACHEABLE); 256 ASSERT(page); // FIXME 254 255 // FIXME 256 ASSERT(page); 257 257 } else { 258 frame = (uintptr_t) frame_alloc(ONE_FRAME, 259 FRAME_LOWMEM | flags); 258 frame = frame_alloc(1, FRAME_LOWMEM | flags, 0); 260 259 if (!frame) 261 260 return (uintptr_t) NULL; 261 262 262 page = PA2KA(frame); 263 263 } 264 264 265 265 *framep = frame; 266 return page; 266 return page; 267 267 } 268 268 -
kernel/generic/src/mm/page.c
rdba3e2c r8b863a62 169 169 } 170 170 171 int page_find_mapping(uintptr_t virt, void **phys)171 int page_find_mapping(uintptr_t virt, uintptr_t *phys) 172 172 { 173 173 page_table_lock(AS, true); … … 179 179 } 180 180 181 *phys = (void *)PTE_GET_FRAME(pte) +181 *phys = PTE_GET_FRAME(pte) + 182 182 (virt - ALIGN_DOWN(virt, PAGE_SIZE)); 183 183 … … 193 193 * 194 194 */ 195 sysarg_t sys_page_find_mapping(uintptr_t virt, void*phys_ptr)196 { 197 void *phys;195 sysarg_t sys_page_find_mapping(uintptr_t virt, uintptr_t *phys_ptr) 196 { 197 uintptr_t phys; 198 198 int rc = page_find_mapping(virt, &phys); 199 199 if (rc != EOK) -
kernel/generic/src/mm/slab.c
rdba3e2c r8b863a62 182 182 size_t zone = 0; 183 183 184 void *data = frame_alloc_generic(cache->order, FRAME_KA | flags, &zone); 185 if (!data) { 184 uintptr_t data_phys = 185 frame_alloc_generic(cache->frames, flags, 0, &zone); 186 if (!data_phys) 186 187 return NULL; 187 } 188 189 void *data = (void *) PA2KA(data_phys); 188 190 189 191 slab_t *slab; … … 193 195 slab = slab_alloc(slab_extern_cache, flags); 194 196 if (!slab) { 195 frame_free(KA2PA(data) );197 frame_free(KA2PA(data), cache->frames); 196 198 return NULL; 197 199 } 198 200 } else { 199 fsize = (PAGE_SIZE << cache->order);201 fsize = FRAMES2SIZE(cache->frames); 200 202 slab = data + fsize - sizeof(*slab); 201 203 } … … 203 205 /* Fill in slab structures */ 204 206 size_t i; 205 for (i = 0; i < ((size_t) 1 << cache->order); i++)207 for (i = 0; i < cache->frames; i++) 206 208 frame_set_parent(ADDR2PFN(KA2PA(data)) + i, slab, zone); 207 209 … … 225 227 NO_TRACE static size_t slab_space_free(slab_cache_t *cache, slab_t *slab) 226 228 { 227 frame_free(KA2PA(slab->start) );229 frame_free(KA2PA(slab->start), slab->cache->frames); 228 230 if (!(cache->flags & SLAB_CACHE_SLINSIDE)) 229 231 slab_free(slab_extern_cache, slab); … … 231 233 atomic_dec(&cache->allocated_slabs); 232 234 233 return (1 << cache->order);235 return cache->frames; 234 236 } 235 237 … … 558 560 { 559 561 if (cache->flags & SLAB_CACHE_SLINSIDE) 560 return ( (PAGE_SIZE << cache->order)561 - sizeof(slab_t)) /cache->size;562 return (FRAMES2SIZE(cache->frames) - sizeof(slab_t)) / 563 cache->size; 562 564 else 563 return (PAGE_SIZE << cache->order) / cache->size;565 return FRAMES2SIZE(cache->frames) / cache->size; 564 566 } 565 567 … … 570 572 { 571 573 size_t objects = comp_objects(cache); 572 size_t ssize = PAGE_SIZE << cache->order;574 size_t ssize = FRAMES2SIZE(cache->frames); 573 575 574 576 if (cache->flags & SLAB_CACHE_SLINSIDE) … … 634 636 cache->flags |= SLAB_CACHE_SLINSIDE; 635 637 636 /* Minimum slab order */ 637 size_t pages = SIZE2FRAMES(cache->size); 638 639 /* We need the 2^order >= pages */ 640 if (pages == 1) 641 cache->order = 0; 642 else 643 cache->order = fnzb(pages - 1) + 1; 638 /* Minimum slab frames */ 639 cache->frames = SIZE2FRAMES(cache->size); 644 640 645 641 while (badness(cache) > SLAB_MAX_BADNESS(cache)) 646 cache-> order += 1;642 cache->frames <<= 1; 647 643 648 644 cache->objects = comp_objects(cache); … … 810 806 811 807 size_t frames = 0; 812 list_foreach(slab_cache_list, cur) { 813 slab_cache_t *cache = list_get_instance(cur, slab_cache_t, link); 808 list_foreach(slab_cache_list, link, slab_cache_t, cache) { 814 809 frames += _slab_reclaim(cache, flags); 815 810 } … … 871 866 872 867 const char *name = cache->name; 873 uint8_t order = cache->order;868 size_t frames = cache->frames; 874 869 size_t size = cache->size; 875 870 size_t objects = cache->objects; … … 881 876 irq_spinlock_unlock(&slab_cache_lock, true); 882 877 883 printf("%-18s %8zu %8 u %8zu %8ld %8ld %8ld %-5s\n",884 name, size, (1 << order), objects, allocated_slabs,878 printf("%-18s %8zu %8zu %8zu %8ld %8ld %8ld %-5s\n", 879 name, size, frames, objects, allocated_slabs, 885 880 cached_objs, allocated_objs, 886 881 flags & SLAB_CACHE_SLINSIDE ? "in" : "out"); … … 936 931 irq_spinlock_lock(&slab_cache_lock, false); 937 932 938 list_foreach(slab_cache_list, cur) { 939 slab_cache_t *slab = list_get_instance(cur, slab_cache_t, link); 933 list_foreach(slab_cache_list, link, slab_cache_t, slab) { 940 934 if ((slab->flags & SLAB_CACHE_MAGDEFERRED) != 941 935 SLAB_CACHE_MAGDEFERRED) -
kernel/generic/src/proc/program.c
rdba3e2c r8b863a62 49 49 #include <lib/elf_load.h> 50 50 #include <errno.h> 51 #include < print.h>51 #include <log.h> 52 52 #include <syscall/copy.h> 53 53 #include <proc/program.h> … … 155 155 156 156 program_loader = image_addr; 157 printf("Program loader at %p\n", (void *) image_addr);157 log(LF_OTHER, LVL_NOTE, "Program loader at %p", (void *) image_addr); 158 158 159 159 return EOK; … … 181 181 if (!loader) { 182 182 as_destroy(as); 183 printf("Cannot spawn loader as none was registered\n"); 183 log(LF_OTHER, LVL_ERROR, 184 "Cannot spawn loader as none was registered"); 184 185 return ENOENT; 185 186 } … … 189 190 if (prg->loader_status != EE_OK) { 190 191 as_destroy(as); 191 printf("Cannot spawn loader (%s)\n",192 log(LF_OTHER, LVL_ERROR, "Cannot spawn loader (%s)", 192 193 elf_error(prg->loader_status)); 193 194 return ENOENT; -
kernel/generic/src/proc/scheduler.c
rdba3e2c r8b863a62 61 61 #include <cpu.h> 62 62 #include <print.h> 63 #include <log.h> 63 64 #include <debug.h> 64 65 #include <stacktrace.h> … … 517 518 518 519 #ifdef SCHEDULER_VERBOSE 519 printf("cpu%u: tid %" PRIu64 " (priority=%d, ticks=%" PRIu64 520 ", nrdy=%ld)\n", CPU->id, THREAD->tid, THREAD->priority, 520 log(LF_OTHER, LVL_DEBUG, 521 "cpu%u: tid %" PRIu64 " (priority=%d, ticks=%" PRIu64 522 ", nrdy=%" PRIua ")", CPU->id, THREAD->tid, THREAD->priority, 521 523 THREAD->ticks, atomic_get(&CPU->nrdy)); 522 524 #endif … … 663 665 664 666 #ifdef KCPULB_VERBOSE 665 printf("kcpulb%u: TID %" PRIu64 " -> cpu%u, " 666 "nrdy=%ld, avg=%ld\n", CPU->id, t->tid, 667 log(LF_OTHER, LVL_DEBUG, 668 "kcpulb%u: TID %" PRIu64 " -> cpu%u, " 669 "nrdy=%ld, avg=%ld", CPU->id, t->tid, 667 670 CPU->id, atomic_get(&CPU->nrdy), 668 671 atomic_get(&nrdy) / config.cpu_active); … … 739 742 740 743 printf("\trq[%u]: ", i); 741 list_foreach(cpus[cpu].rq[i].rq, cur) { 742 thread_t *thread = list_get_instance(cur, 743 thread_t, rq_link); 744 list_foreach(cpus[cpu].rq[i].rq, rq_link, thread_t, 745 thread) { 744 746 printf("%" PRIu64 "(%s) ", thread->tid, 745 747 thread_states[thread->state]); -
kernel/generic/src/proc/task.c
rdba3e2c r8b863a62 452 452 453 453 /* Current values of threads */ 454 list_foreach(task->threads, cur) { 455 thread_t *thread = list_get_instance(cur, thread_t, th_link); 456 454 list_foreach(task->threads, th_link, thread_t, thread) { 457 455 irq_spinlock_lock(&thread->lock, false); 458 456 … … 484 482 */ 485 483 486 list_foreach(task->threads, cur) { 487 thread_t *thread = list_get_instance(cur, thread_t, th_link); 484 list_foreach(task->threads, th_link, thread_t, thread) { 488 485 bool sleeping = false; 489 486 -
kernel/generic/src/proc/thread.c
rdba3e2c r8b863a62 192 192 kmflags &= ~FRAME_HIGHMEM; 193 193 194 thread->kstack = (uint8_t *) frame_alloc(STACK_FRAMES, FRAME_KA | kmflags); 195 if (!thread->kstack) { 194 uintptr_t stack_phys = 195 frame_alloc(STACK_FRAMES, kmflags, STACK_SIZE - 1); 196 if (!stack_phys) { 196 197 #ifdef CONFIG_FPU 197 198 if (thread->saved_fpu_context) … … 201 202 } 202 203 204 thread->kstack = (uint8_t *) PA2KA(stack_phys); 205 203 206 #ifdef CONFIG_UDEBUG 204 207 mutex_initialize(&thread->udebug.lock, MUTEX_PASSIVE); … … 216 219 thr_destructor_arch(thread); 217 220 218 frame_free(KA2PA(thread->kstack) );221 frame_free(KA2PA(thread->kstack), STACK_FRAMES); 219 222 220 223 #ifdef CONFIG_FPU -
kernel/generic/src/synch/futex.c
rdba3e2c r8b863a62 274 274 mutex_lock(&TASK->futexes_lock); 275 275 276 list_foreach(TASK->futexes.leaf_list, cur) { 277 btree_node_t *node; 276 list_foreach(TASK->futexes.leaf_list, leaf_link, btree_node_t, node) { 278 277 unsigned int i; 279 278 280 node = list_get_instance(cur, btree_node_t, leaf_link);281 279 for (i = 0; i < node->keys; i++) { 282 280 futex_t *ftx; -
kernel/generic/src/syscall/syscall.c
rdba3e2c r8b863a62 56 56 #include <console/console.h> 57 57 #include <udebug/udebug.h> 58 #include <log.h> 58 59 59 60 /** Dispatch system call */ … … 86 87 rc = syscall_table[id](a1, a2, a3, a4, a5, a6); 87 88 } else { 88 printf("Task %" PRIu64": Unknown syscall %#" PRIxn, TASK->taskid, id); 89 log(LF_OTHER, LVL_ERROR, 90 "Task %" PRIu64": Unknown syscall %#" PRIxn, TASK->taskid, id); 89 91 task_kill_self(true); 90 92 } … … 120 122 syshandler_t syscall_table[SYSCALL_END] = { 121 123 /* System management syscalls. */ 122 (syshandler_t) sys_k log,124 (syshandler_t) sys_kio, 123 125 (syshandler_t) sys_tls_set, 124 126 … … 190 192 191 193 /* Kernel console syscalls. */ 192 (syshandler_t) sys_debug_activate_console 194 (syshandler_t) sys_debug_activate_console, 195 196 (syshandler_t) sys_klog, 193 197 }; 194 198 -
kernel/generic/src/sysinfo/stats.c
rdba3e2c r8b863a62 175 175 176 176 /* Walk the B+ tree and count pages */ 177 list_foreach(as->as_area_btree.leaf_list, cur) { 178 btree_node_t *node = 179 list_get_instance(cur, btree_node_t, leaf_link); 180 177 list_foreach(as->as_area_btree.leaf_list, leaf_link, btree_node_t, 178 node) { 181 179 unsigned int i; 182 180 for (i = 0; i < node->keys; i++) { … … 218 216 219 217 /* Walk the B+ tree and count pages */ 220 list_foreach(as->as_area_btree.leaf_list, cur) { 221 btree_node_t *node = 222 list_get_instance(cur, btree_node_t, leaf_link); 223 218 list_foreach(as->as_area_btree.leaf_list, leaf_link, btree_node_t, node) { 224 219 unsigned int i; 225 220 for (i = 0; i < node->keys; i++) { -
kernel/generic/src/time/clock.c
rdba3e2c r8b863a62 81 81 void clock_counter_init(void) 82 82 { 83 void *faddr = frame_alloc(ONE_FRAME, FRAME_ATOMIC);84 if ( !faddr)83 uintptr_t faddr = frame_alloc(1, FRAME_ATOMIC, 0); 84 if (faddr == 0) 85 85 panic("Cannot allocate page for clock."); 86 86 … … 91 91 uptime->useconds = 0; 92 92 93 clock_parea.pbase = (uintptr_t)faddr;93 clock_parea.pbase = faddr; 94 94 clock_parea.frames = 1; 95 95 clock_parea.unpriv = true; -
kernel/generic/src/udebug/udebug.c
rdba3e2c r8b863a62 406 406 407 407 /* Finish debugging of all userspace threads */ 408 list_foreach(task->threads, cur) { 409 thread_t *thread = list_get_instance(cur, thread_t, th_link); 410 408 list_foreach(task->threads, th_link, thread_t, thread) { 411 409 mutex_lock(&thread->udebug.lock); 412 410 -
kernel/generic/src/udebug/udebug_ops.c
rdba3e2c r8b863a62 196 196 /* Set udebug.active on all of the task's userspace threads. */ 197 197 198 list_foreach(TASK->threads, cur) { 199 thread_t *thread = list_get_instance(cur, thread_t, th_link); 200 198 list_foreach(TASK->threads, th_link, thread_t, thread) { 201 199 mutex_lock(&thread->udebug.lock); 202 200 if (thread->uspace) { … … 389 387 390 388 /* FIXME: make sure the thread isn't past debug shutdown... */ 391 list_foreach(TASK->threads, cur) { 392 thread_t *thread = list_get_instance(cur, thread_t, th_link); 393 389 list_foreach(TASK->threads, th_link, thread_t, thread) { 394 390 irq_spinlock_lock(&thread->lock, false); 395 391 bool uspace = thread->uspace;
Note:
See TracChangeset
for help on using the changeset viewer.
