- Timestamp:
- 2011-05-17T07:44:17Z (14 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 04c418d, 2586860, 5e6e50b
- Parents:
- 72cd53d (diff), 0d8a304 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Location:
- kernel
- Files:
-
- 2 added
- 42 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/Makefile
r72cd53d r3375bd4 94 94 -fexec-charset=UTF-8 -fwide-exec-charset=UTF-32$(ENDIANESS) \ 95 95 -finput-charset=UTF-8 -ffreestanding -fno-builtin -nostdlib -nostdinc \ 96 - Wall -Wextra -Wno-unused-parameter -Wmissing-prototypes \96 -std=gnu99 -Wall -Wextra -Wno-unused-parameter -Wmissing-prototypes \ 97 97 -Werror-implicit-function-declaration -Wwrite-strings \ 98 98 -pipe … … 134 134 # 135 135 AFLAGS = 136 LFLAGS = - N-T $(LINK) -M136 LFLAGS = -n -T $(LINK) -M 137 137 138 138 # … … 228 228 generic/src/syscall/syscall.c \ 229 229 generic/src/syscall/copy.c \ 230 generic/src/mm/reserve.c \ 230 231 generic/src/mm/buddy.c \ 231 232 generic/src/mm/frame.c \ -
kernel/arch/abs32le/include/types.h
r72cd53d r3375bd4 40 40 41 41 typedef uint32_t size_t; 42 typedef int32_t ssize_t; 42 43 43 44 typedef uint32_t uintptr_t; -
kernel/arch/amd64/include/types.h
r72cd53d r3375bd4 37 37 38 38 typedef uint64_t size_t; 39 typedef int64_t ssize_t; 39 40 40 41 typedef uint64_t uintptr_t; -
kernel/arch/amd64/src/interrupt.c
r72cd53d r3375bd4 66 66 void istate_decode(istate_t *istate) 67 67 { 68 printf("cs =%#0" PRIx64 "\trip=%p\t" 69 "rfl=%#0" PRIx64 "\terr=%#0" PRIx64 "\n", 70 istate->cs, (void *) istate->rip, 71 istate->rflags, istate->error_word); 68 printf("cs =%0#18" PRIx64 "\trip=%0#18" PRIx64 "\t" 69 "rfl=%0#18" PRIx64 "\terr=%0#18" PRIx64 "\n", 70 istate->cs, istate->rip, istate->rflags, istate->error_word); 72 71 73 72 if (istate_from_uspace(istate)) 74 printf("ss =% #0" PRIx64 "\n", istate->ss);75 76 printf("rax=% #0" PRIx64 "\trbx=%#0" PRIx64 "\t"77 "rcx=% #0" PRIx64 "\trdx=%#0" PRIx64 "\n",73 printf("ss =%0#18" PRIx64 "\n", istate->ss); 74 75 printf("rax=%0#18" PRIx64 "\trbx=%0#18" PRIx64 "\t" 76 "rcx=%0#18" PRIx64 "\trdx=%0#18" PRIx64 "\n", 78 77 istate->rax, istate->rbx, istate->rcx, istate->rdx); 79 78 80 printf("rsi=% p\trdi=%p\trbp=%p\trsp=%p\n",81 (void *) istate->rsi, (void *) istate->rdi,82 (void *)istate->rbp,83 istate_from_uspace(istate) ? ((void *) istate->rsp):84 &istate->rsp);85 86 printf("r8 =% #0" PRIx64 "\tr9 =%#0" PRIx64 "\t"87 "r10=% #0" PRIx64 "\tr11=%#0" PRIx64 "\n",79 printf("rsi=%0#18" PRIx64 "\trdi=%0#18" PRIx64 "\t" 80 "rbp=%0#18" PRIx64 "\trsp=%0#18" PRIx64 "\n", 81 istate->rsi, istate->rdi, istate->rbp, 82 istate_from_uspace(istate) ? istate->rsp : 83 (uintptr_t) &istate->rsp); 84 85 printf("r8 =%0#18" PRIx64 "\tr9 =%0#18" PRIx64 "\t" 86 "r10=%0#18" PRIx64 "\tr11=%0#18" PRIx64 "\n", 88 87 istate->r8, istate->r9, istate->r10, istate->r11); 89 88 90 printf("r12=% #0" PRIx64 "\tr13=%#0" PRIx64 "\t"91 "r14=% #0" PRIx64 "\tr15=%#0" PRIx64 "\n",89 printf("r12=%0#18" PRIx64 "\tr13=%0#18" PRIx64 "\t" 90 "r14=%0#18" PRIx64 "\tr15=%0#18" PRIx64 "\n", 92 91 istate->r12, istate->r13, istate->r14, istate->r15); 93 92 } -
kernel/arch/arm32/include/types.h
r72cd53d r3375bd4 44 44 45 45 typedef uint32_t size_t; 46 typedef int32_t ssize_t; 46 47 47 48 typedef uint32_t uintptr_t; -
kernel/arch/arm32/src/exception.c
r72cd53d r3375bd4 174 174 void istate_decode(istate_t *istate) 175 175 { 176 printf("r0 =% #0" PRIx32 "\tr1 =%#0" PRIx32 "\t"177 "r2 =% #0" PRIx32 "\tr3 =%#0" PRIx32 "\n",176 printf("r0 =%0#10" PRIx32 "\tr1 =%0#10" PRIx32 "\t" 177 "r2 =%0#10" PRIx32 "\tr3 =%0#10" PRIx32 "\n", 178 178 istate->r0, istate->r1, istate->r2, istate->r3); 179 printf("r4 =% #" PRIx32 "\tr5 =%#0" PRIx32 "\t"180 "r6 =% #0" PRIx32 "\tr7 =%#0" PRIx32 "\n",179 printf("r4 =%0#10" PRIx32 "\tr5 =%0#10" PRIx32 "\t" 180 "r6 =%0#10" PRIx32 "\tr7 =%0#10" PRIx32 "\n", 181 181 istate->r4, istate->r5, istate->r6, istate->r7); 182 printf("r8 =%#0" PRIx32 "\tr9 =%#0" PRIx32 "\t" 183 "r10=%#0" PRIx32 "\tfp =%p\n", 184 istate->r8, istate->r9, istate->r10, 185 (void *) istate->fp); 186 printf("r12=%#0" PRIx32 "\tsp =%p\tlr =%p\tspsr=%p\n", 187 istate->r12, (void *) istate->sp, 188 (void *) istate->lr, (void *) istate->spsr); 182 printf("r8 =%0#10" PRIx32 "\tr9 =%0#10" PRIx32 "\t" 183 "r10=%0#10" PRIx32 "\tfp =%0#10" PRIx32 "\n", 184 istate->r8, istate->r9, istate->r10, istate->fp); 185 printf("r12=%0#10" PRIx32 "\tsp =%0#10" PRIx32 "\t" 186 "lr =%0#10" PRIx32 "\tspsr=%0#10" PRIx32 "\n", 187 istate->r12, istate->sp, istate->lr, istate->spsr); 189 188 } 190 189 -
kernel/arch/ia32/Makefile.inc
r72cd53d r3375bd4 35 35 ENDIANESS = LE 36 36 37 CMN1 = -m32 37 CMN1 = -m32 -fno-omit-frame-pointer 38 38 GCC_CFLAGS += $(CMN1) 39 39 ICC_CFLAGS += $(CMN1) -
kernel/arch/ia32/include/barrier.h
r72cd53d r3375bd4 54 54 NO_TRACE static inline void cpuid_serialization(void) 55 55 { 56 #ifndef __IN_SHARED_LIBC__ 56 57 asm volatile ( 57 58 "xorl %%eax, %%eax\n" … … 59 60 ::: "eax", "ebx", "ecx", "edx", "memory" 60 61 ); 62 #else 63 /* Must not clobber PIC register ebx */ 64 asm volatile ( 65 "movl %%ebx, %%esi\n" 66 "xorl %%eax, %%eax\n" 67 "cpuid\n" 68 "movl %%esi, %%ebx\n" 69 ::: "eax", "ecx", "edx", "esi", "memory" 70 ); 71 #endif 61 72 } 62 73 -
kernel/arch/ia32/include/types.h
r72cd53d r3375bd4 37 37 38 38 typedef uint32_t size_t; 39 typedef int32_t ssize_t; 39 40 40 41 typedef uint32_t uintptr_t; -
kernel/arch/ia32/src/interrupt.c
r72cd53d r3375bd4 66 66 void istate_decode(istate_t *istate) 67 67 { 68 printf("cs =%#0" PRIx32 "\teip=%p\t" 69 "efl=%#0" PRIx32 "\terr=%#0" PRIx32 "\n", 70 istate->cs, (void *) istate->eip, 71 istate->eflags, istate->error_word); 72 73 printf("ds =%#0" PRIx32 "\tes =%#0" PRIx32 "\t" 74 "fs =%#0" PRIx32 "\tgs =%#0" PRIx32 "\n", 68 printf("cs =%0#10" PRIx32 "\teip=%0#10" PRIx32 "\t" 69 "efl=%0#10" PRIx32 "\terr=%0#10" PRIx32 "\n", 70 istate->cs, istate->eip, istate->eflags, istate->error_word); 71 72 printf("ds =%0#10" PRIx32 "\tes =%0#10" PRIx32 "\t" 73 "fs =%0#10" PRIx32 "\tgs =%0#10" PRIx32 "\n", 75 74 istate->ds, istate->es, istate->fs, istate->gs); 76 75 77 76 if (istate_from_uspace(istate)) 78 printf("ss =% #0" PRIx32 "\n", istate->ss);79 80 printf("eax=% #0" PRIx32 "\tebx=%#0" PRIx32 "\t"81 "ecx=% #0" PRIx32 "\tedx=%#0" PRIx32 "\n",77 printf("ss =%0#10" PRIx32 "\n", istate->ss); 78 79 printf("eax=%0#10" PRIx32 "\tebx=%0#10" PRIx32 "\t" 80 "ecx=%0#10" PRIx32 "\tedx=%0#10" PRIx32 "\n", 82 81 istate->eax, istate->ebx, istate->ecx, istate->edx); 83 82 84 printf("esi=% p\tedi=%p\tebp=%p\tesp=%p\n",85 (void *) istate->esi, (void *) istate->edi,86 (void *)istate->ebp,87 istate_from_uspace(istate) ? ((void *) istate->esp):88 &istate->esp);83 printf("esi=%0#10" PRIx32 "\tedi=%0#10" PRIx32 "\t" 84 "ebp=%0#10" PRIx32 "\tesp=%0#10" PRIx32 "\n", 85 istate->esi, istate->edi, istate->ebp, 86 istate_from_uspace(istate) ? istate->esp : 87 (uintptr_t) &istate->esp); 89 88 } 90 89 -
kernel/arch/ia64/_link.ld.in
r72cd53d r3375bd4 30 30 hardcoded_kdata_size = .; 31 31 QUAD(kdata_end - kdata_start); 32 __gp = .; 32 33 *(.got .got.*) 33 34 *(.sdata) -
kernel/arch/ia64/include/types.h
r72cd53d r3375bd4 37 37 38 38 typedef uint64_t size_t; 39 typedef int64_t ssize_t; 39 40 40 41 typedef uint64_t uintptr_t; -
kernel/arch/ia64/src/ivt.S
r72cd53d r3375bd4 391 391 392 392 /* 10. call handler */ 393 movl r1 = kernel_image_start393 movl r1 = __gp 394 394 395 395 mov b1 = loc2 -
kernel/arch/ia64/src/start.S
r72cd53d r3375bd4 174 174 175 175 # Initialize gp (Global Pointer) register 176 movl gp = kernel_image_start176 movl gp = __gp 177 177 178 # 178 # 179 179 # Initialize bootinfo on BSP. 180 180 # -
kernel/arch/mips32/include/types.h
r72cd53d r3375bd4 37 37 38 38 typedef uint32_t size_t; 39 typedef int32_t ssize_t; 39 40 40 41 typedef uint32_t uintptr_t; -
kernel/arch/mips32/src/exception.c
r72cd53d r3375bd4 74 74 void istate_decode(istate_t *istate) 75 75 { 76 printf("epc=% p\tsta=%#010" PRIx32 "\t"76 printf("epc=%#010" PRIx32 "\tsta=%#010" PRIx32 "\t" 77 77 "lo =%#010" PRIx32 "\thi =%#010" PRIx32 "\n", 78 (void *) istate->epc, istate->status, 79 istate->lo, istate->hi); 78 istate->epc, istate->status, istate->lo, istate->hi); 80 79 81 80 printf("a0 =%#010" PRIx32 "\ta1 =%#010" PRIx32 "\t" … … 107 106 istate->s8, istate->at, istate->kt0, istate->kt1); 108 107 109 printf("sp =% p\tra =%p\tgp =%p\n",110 (void *) istate->sp, (void *) istate->ra,111 (void *)istate->gp);108 printf("sp =%#010" PRIx32 "\tra =%#010" PRIx32 "\t" 109 "gp =%#010" PRIx32 "\n", 110 istate->sp, istate->ra, istate->gp); 112 111 } 113 112 -
kernel/arch/mips32/src/mm/frame.c
r72cd53d r3375bd4 88 88 /* gxemul devices */ 89 89 if (overlaps(frame << ZERO_PAGE_WIDTH, ZERO_PAGE_SIZE, 90 0x10000000, M B2SIZE(256)))90 0x10000000, MiB2SIZE(256))) 91 91 return false; 92 92 #endif -
kernel/arch/ppc32/include/types.h
r72cd53d r3375bd4 37 37 38 38 typedef uint32_t size_t; 39 typedef int32_t ssize_t; 39 40 40 41 typedef uint32_t uintptr_t; -
kernel/arch/ppc32/src/interrupt.c
r72cd53d r3375bd4 54 54 void istate_decode(istate_t *istate) 55 55 { 56 printf("r0 =% #0" PRIx32 "\tr1 =%p\tr2 =%#0" PRIx32 "\n",57 istate->r0, (void *)istate->sp, istate->r2);56 printf("r0 =%0#10" PRIx32 "\tr1 =%0#10" PRIx32 "\t" 57 "r2 =%0#10" PRIx32 "\n", istate->r0, istate->sp, istate->r2); 58 58 59 printf("r3 =% #0" PRIx32 "\tr4 =%#0" PRIx32 "\tr5 =%#0" PRIx32 "\n",60 istate->r3, istate->r4, istate->r5);59 printf("r3 =%0#10" PRIx32 "\tr4 =%0#10" PRIx32 "\t" 60 "r5 =%0#10" PRIx32 "\n", istate->r3, istate->r4, istate->r5); 61 61 62 printf("r6 =% #0" PRIx32 "\tr7 =%#0" PRIx32 "\tr8 =%#0" PRIx32 "\n",63 istate->r6, istate->r7, istate->r8);62 printf("r6 =%0#10" PRIx32 "\tr7 =%0#10" PRIx32 "\t" 63 "r8 =%0#10" PRIx32 "\n", istate->r6, istate->r7, istate->r8); 64 64 65 printf("r9 =% #0" PRIx32 "\tr10=%#0" PRIx32 "\tr11=%#0" PRIx32 "\n",66 istate->r9, istate->r10, istate->r11);65 printf("r9 =%0#10" PRIx32 "\tr10=%0#10" PRIx32 "\t" 66 "r11=%0#10" PRIx32 "\n", istate->r9, istate->r10, istate->r11); 67 67 68 printf("r12=% #0" PRIx32 "\tr13=%#0" PRIx32 "\tr14=%#0" PRIx32 "\n",69 istate->r12, istate->r13, istate->r14);68 printf("r12=%0#10" PRIx32 "\tr13=%0#10" PRIx32 "\t" 69 "r14=%0#10" PRIx32 "\n", istate->r12, istate->r13, istate->r14); 70 70 71 printf("r15=% #0" PRIx32 "\tr16=%#0" PRIx32 "\tr17=%#0" PRIx32 "\n",72 istate->r15, istate->r16, istate->r17);71 printf("r15=%0#10" PRIx32 "\tr16=%0#10" PRIx32 "\t" 72 "r17=%0#10" PRIx32 "\n", istate->r15, istate->r16, istate->r17); 73 73 74 printf("r18=% #0" PRIx32 "\tr19=%#0" PRIx32 "\tr20=%#0" PRIx32 "\n",75 istate->r18, istate->r19, istate->r20);74 printf("r18=%0#10" PRIx32 "\tr19=%0#10" PRIx32 "\t" 75 "r20=%0#10" PRIx32 "\n", istate->r18, istate->r19, istate->r20); 76 76 77 printf("r21=% #0" PRIx32 "\tr22=%#0" PRIx32 "\tr23=%#0" PRIx32 "\n",78 istate->r21, istate->r22, istate->r23);77 printf("r21=%0#10" PRIx32 "\tr22=%0#10" PRIx32 "\t" 78 "r23=%0#10" PRIx32 "\n", istate->r21, istate->r22, istate->r23); 79 79 80 printf("r24=% #0" PRIx32 "\tr25=%#0" PRIx32 "\tr26=%#0" PRIx32 "\n",81 istate->r24, istate->r25, istate->r26);80 printf("r24=%0#10" PRIx32 "\tr25=%0#10" PRIx32 "\t" 81 "r26=%0#10" PRIx32 "\n", istate->r24, istate->r25, istate->r26); 82 82 83 printf("r27=% #0" PRIx32 "\tr28=%#0" PRIx32 "\tr29=%#0" PRIx32 "\n",84 istate->r27, istate->r28, istate->r29);83 printf("r27=%0#10" PRIx32 "\tr28=%0#10" PRIx32 "\t" 84 "r29=%0#10" PRIx32 "\n", istate->r27, istate->r28, istate->r29); 85 85 86 printf("r30=% #0" PRIx32 "\tr31=%#0" PRIx32 "\n",86 printf("r30=%0#10" PRIx32 "\tr31=%0#10" PRIx32 "\n", 87 87 istate->r30, istate->r31); 88 88 89 printf("cr =% #0" PRIx32 "\tpc =%p\tlr =%p\n",90 istate->cr, (void *) istate->pc, (void *)istate->lr);89 printf("cr =%0#10" PRIx32 "\tpc =%0#10" PRIx32 "\t" 90 "lr =%0#10" PRIx32 "\n", istate->cr, istate->pc, istate->lr); 91 91 92 printf("ctr=% #0" PRIx32 "\txer=%#0" PRIx32 "\tdar=%#0" PRIx32 "\n",93 istate->ctr, istate->xer, istate->dar);92 printf("ctr=%0#10" PRIx32 "\txer=%0#10" PRIx32 "\t" 93 "dar=%0#10" PRIx32 "\n", istate->ctr, istate->xer, istate->dar); 94 94 95 printf("srr1=% p\n", (void *)istate->srr1);95 printf("srr1=%0#10" PRIx32 "\n", istate->srr1); 96 96 } 97 97 -
kernel/arch/sparc64/include/cpu.h
r72cd53d r3375bd4 59 59 #include <arch/asm.h> 60 60 61 #ifdef CONFIG_SMP62 #include <arch/mm/cache.h>63 #endif64 65 66 61 #if defined (SUN4U) 67 62 #include <arch/sun4u/cpu.h> -
kernel/arch/sparc64/include/sun4u/cpu.h
r72cd53d r3375bd4 60 60 #include <trace.h> 61 61 62 #ifdef CONFIG_SMP63 #include <arch/mm/cache.h>64 #endif65 66 62 typedef struct { 67 63 uint32_t mid; /**< Processor ID as read from -
kernel/arch/sparc64/include/types.h
r72cd53d r3375bd4 37 37 38 38 typedef uint64_t size_t; 39 typedef int64_t ssize_t; 39 40 40 41 typedef uint64_t uintptr_t; -
kernel/generic/include/ipc/event.h
r72cd53d r3375bd4 51 51 /** Counter. */ 52 52 size_t counter; 53 /** Masked flag. */ 54 bool masked; 55 /** Unmask callback. */ 56 void (*unmask_cb)(void); 53 57 } event_t; 54 58 55 59 extern void event_init(void); 60 extern void event_cleanup_answerbox(answerbox_t *); 61 extern void event_set_unmask_callback(event_type_t, void (*)(void)); 62 63 #define event_notify_0(e, m) \ 64 event_notify((e), (m), 0, 0, 0, 0, 0) 65 #define event_notify_1(e, m, a1) \ 66 event_notify((e), (m), (a1), 0, 0, 0, 0) 67 #define event_notify_2(e, m, a1, a2) \ 68 event_notify((e), (m), (a1), (a2), 0, 0, 0) 69 #define event_notify_3(e, m, a1, a2, a3) \ 70 event_notify((e), (m), (a1), (a2), (a3), 0, 0) 71 #define event_notify_4(e, m, a1, a2, a3, a4) \ 72 event_notify((e), (m), (a1), (a2), (a3), (a4), 0) 73 #define event_notify_5(e, m, a1, a2, a3, a4, a5) \ 74 event_notify((e), (m), (a1), (a2), (a3), (a4), (a5)) 75 76 extern int event_notify(event_type_t, bool, sysarg_t, sysarg_t, sysarg_t, 77 sysarg_t, sysarg_t); 78 56 79 extern sysarg_t sys_event_subscribe(sysarg_t, sysarg_t); 57 extern bool event_is_subscribed(event_type_t); 58 extern void event_cleanup_answerbox(answerbox_t *); 59 60 #define event_notify_0(e) \ 61 event_notify((e), 0, 0, 0, 0, 0) 62 #define event_notify_1(e, a1) \ 63 event_notify((e), (a1), 0, 0, 0, 0) 64 #define event_notify_2(e, a1, a2) \ 65 event_notify((e), (a1), (a2), 0, 0, 0) 66 #define event_notify_3(e, a1, a2, a3) \ 67 event_notify((e), (a1), (a2), (a3), 0, 0) 68 #define event_notify_4(e, a1, a2, a3, a4) \ 69 event_notify((e), (a1), (a2), (a3), (a4), 0) 70 #define event_notify_5(e, a1, a2, a3, a4, a5) \ 71 event_notify((e), (a1), (a2), (a3), (a4), (a5)) 72 73 extern void event_notify(event_type_t, sysarg_t, sysarg_t, sysarg_t, 74 sysarg_t, sysarg_t); 80 extern sysarg_t sys_event_unmask(sysarg_t); 75 81 76 82 #endif -
kernel/generic/include/ipc/event_types.h
r72cd53d r3375bd4 39 39 /** New data available in kernel log */ 40 40 EVENT_KLOG = 0, 41 /** Returning from kernel console to us erspace */41 /** Returning from kernel console to uspace */ 42 42 EVENT_KCONSOLE, 43 43 /** A task/thread has faulted and will be terminated */ -
kernel/generic/include/macros.h
r72cd53d r3375bd4 95 95 overlaps(KA2PA((x)), (szx), KA2PA((y)), (szy)) 96 96 97 #define SIZE2KB(size) ((size) >> 10) 98 #define SIZE2MB(size) ((size) >> 20) 99 100 #define KB2SIZE(kb) ((kb) << 10) 101 #define MB2SIZE(mb) ((mb) << 20) 97 #define KiB2SIZE(kb) ((kb) << 10) 98 #define MiB2SIZE(mb) ((mb) << 20) 102 99 103 100 #define STRING(arg) STRING_ARG(arg) -
kernel/generic/include/mm/as.h
r72cd53d r3375bd4 238 238 /** Address space area backend structure. */ 239 239 typedef struct mem_backend { 240 bool (* create)(as_area_t *); 241 bool (* resize)(as_area_t *, size_t); 242 void (* share)(as_area_t *); 243 void (* destroy)(as_area_t *); 244 240 245 int (* page_fault)(as_area_t *, uintptr_t, pf_access_t); 241 246 void (* frame_free)(as_area_t *, uintptr_t, uintptr_t); 242 void (* share)(as_area_t *);243 247 } mem_backend_t; 244 248 -
kernel/generic/include/mm/frame.h
r72cd53d r3375bd4 62 62 63 63 /** Convert the frame address to kernel VA. */ 64 #define FRAME_KA 0x 0164 #define FRAME_KA 0x1 65 65 /** Do not panic and do not sleep on failure. */ 66 #define FRAME_ATOMIC 0x 0266 #define FRAME_ATOMIC 0x2 67 67 /** Do not start reclaiming when no free memory. */ 68 #define FRAME_NO_RECLAIM 0x04 68 #define FRAME_NO_RECLAIM 0x4 69 /** Do not reserve / unreserve memory. */ 70 #define FRAME_NO_RESERVE 0x8 69 71 70 72 typedef uint8_t zone_flags_t; 71 73 72 74 /** Available zone (free for allocation) */ 73 #define ZONE_AVAILABLE 0x0 075 #define ZONE_AVAILABLE 0x0 74 76 /** Zone is reserved (not available for allocation) */ 75 #define ZONE_RESERVED 0x 0877 #define ZONE_RESERVED 0x8 76 78 /** Zone is used by firmware (not available for allocation) */ 77 79 #define ZONE_FIRMWARE 0x10 … … 85 87 uint8_t buddy_order; /**< Buddy system block order */ 86 88 link_t buddy_link; /**< Link to the next free block inside 87 one order */89 one order */ 88 90 void *parent; /**< If allocated by slab, this points there */ 89 91 } frame_t; … … 91 93 typedef struct { 92 94 pfn_t base; /**< Frame_no of the first frame 93 in the frames array */95 in the frames array */ 94 96 size_t count; /**< Size of zone */ 95 97 size_t free_count; /**< Number of free frame_t 96 structures */98 structures */ 97 99 size_t busy_count; /**< Number of busy frame_t 98 structures */100 structures */ 99 101 zone_flags_t flags; /**< Type of the zone */ 100 102 101 103 frame_t *frames; /**< Array of frame_t structures 102 in this zone */104 in this zone */ 103 105 buddy_system_t *buddy_system; /**< Buddy system for the zone */ 104 106 } zone_t; … … 146 148 ((~(((sysarg_t) -1) << (order)) & (index)) == 0) 147 149 #define IS_BUDDY_LEFT_BLOCK(zone, frame) \ 148 (((frame_index((zone), (frame)) >> (frame)->buddy_order) & 0x 01) == 0)150 (((frame_index((zone), (frame)) >> (frame)->buddy_order) & 0x1) == 0) 149 151 #define IS_BUDDY_RIGHT_BLOCK(zone, frame) \ 150 (((frame_index((zone), (frame)) >> (frame)->buddy_order) & 0x 01) == 1)152 (((frame_index((zone), (frame)) >> (frame)->buddy_order) & 0x1) == 1) 151 153 #define IS_BUDDY_LEFT_BLOCK_ABS(zone, frame) \ 152 (((frame_index_abs((zone), (frame)) >> (frame)->buddy_order) & 0x 01) == 0)154 (((frame_index_abs((zone), (frame)) >> (frame)->buddy_order) & 0x1) == 0) 153 155 #define IS_BUDDY_RIGHT_BLOCK_ABS(zone, frame) \ 154 (((frame_index_abs((zone), (frame)) >> (frame)->buddy_order) & 0x01) == 1) 155 156 #define frame_alloc(order, flags) \ 157 frame_alloc_generic(order, flags, NULL) 156 (((frame_index_abs((zone), (frame)) >> (frame)->buddy_order) & 0x1) == 1) 158 157 159 158 extern void frame_init(void); 160 159 extern void *frame_alloc_generic(uint8_t, frame_flags_t, size_t *); 160 extern void *frame_alloc(uint8_t, frame_flags_t); 161 extern void *frame_alloc_noreserve(uint8_t, frame_flags_t); 162 extern void frame_free_generic(uintptr_t, frame_flags_t); 161 163 extern void frame_free(uintptr_t); 164 extern void frame_free_noreserve(uintptr_t); 162 165 extern void frame_reference_add(pfn_t); 166 extern size_t frame_total_free_get(void); 163 167 164 extern size_t find_zone(pfn_t frame, size_t count, size_t hint);168 extern size_t find_zone(pfn_t, size_t, size_t); 165 169 extern size_t zone_create(pfn_t, size_t, pfn_t, zone_flags_t); 166 170 extern void *frame_get_parent(pfn_t, size_t); -
kernel/generic/include/str.h
r72cd53d r3375bd4 99 99 extern int str_uint64(const char *, char **, unsigned int, bool, uint64_t *); 100 100 101 extern void order_suffix(const uint64_t val, uint64_t *rv, char *suffix); 101 extern void order_suffix(const uint64_t, uint64_t *, char *); 102 extern void bin_order_suffix(const uint64_t, uint64_t *, const char **, bool); 102 103 103 104 #endif -
kernel/generic/include/syscall/syscall.h
r72cd53d r3375bd4 78 78 79 79 SYS_EVENT_SUBSCRIBE, 80 SYS_EVENT_UNMASK, 80 81 81 82 SYS_CAP_GRANT, -
kernel/generic/src/console/cmd.c
r72cd53d r3375bd4 1107 1107 release_console(); 1108 1108 1109 event_notify_0(EVENT_KCONSOLE );1109 event_notify_0(EVENT_KCONSOLE, false); 1110 1110 indev_pop_character(stdin); 1111 1111 -
kernel/generic/src/console/console.c
r72cd53d r3375bd4 53 53 #include <str.h> 54 54 55 /* 56 * devman produces a lot of output and by giving so many pages 57 * we to allow /app/klog to catch-up. 58 */ 59 #ifdef CONFIG_DEVMAN_EARLY_LAUNCH 60 #define KLOG_PAGES 64 61 #else 62 #define KLOG_PAGES 4 63 #endif 64 55 #define KLOG_PAGES 8 65 56 #define KLOG_LENGTH (KLOG_PAGES * PAGE_SIZE / sizeof(wchar_t)) 66 57 #define KLOG_LATENCY 8 … … 174 165 sysinfo_set_item_val("klog.faddr", NULL, (sysarg_t) faddr); 175 166 sysinfo_set_item_val("klog.pages", NULL, KLOG_PAGES); 167 168 event_set_unmask_callback(EVENT_KLOG, klog_update); 176 169 177 170 spinlock_lock(&klog_lock); … … 274 267 spinlock_lock(&klog_lock); 275 268 276 if ((klog_inited) && (event_is_subscribed(EVENT_KLOG)) && (klog_uspace > 0)) { 277 event_notify_3(EVENT_KLOG, klog_start, klog_len, klog_uspace); 278 klog_uspace = 0; 269 if ((klog_inited) && (klog_uspace > 0)) { 270 if (event_notify_3(EVENT_KLOG, true, klog_start, klog_len, 271 klog_uspace) == EOK) 272 klog_uspace = 0; 279 273 } 280 274 -
kernel/generic/src/ipc/event.c
r72cd53d r3375bd4 48 48 static event_t events[EVENT_END]; 49 49 50 /** Initialize kernel events. */ 50 /** Initialize kernel events. 51 * 52 */ 51 53 void event_init(void) 52 54 { 53 unsigned int i; 54 55 for (i = 0; i < EVENT_END; i++) { 55 for (unsigned int i = 0; i < EVENT_END; i++) { 56 56 spinlock_initialize(&events[i].lock, "event.lock"); 57 57 events[i].answerbox = NULL; 58 58 events[i].counter = 0; 59 59 events[i].imethod = 0; 60 events[i].masked = false; 61 events[i].unmask_cb = NULL; 60 62 } 61 63 } 62 64 65 /** Unsubscribe kernel events associated with an answerbox 66 * 67 * @param answerbox Answerbox to be unsubscribed. 68 * 69 */ 70 void event_cleanup_answerbox(answerbox_t *answerbox) 71 { 72 for (unsigned int i = 0; i < EVENT_END; i++) { 73 spinlock_lock(&events[i].lock); 74 75 if (events[i].answerbox == answerbox) { 76 events[i].answerbox = NULL; 77 events[i].counter = 0; 78 events[i].imethod = 0; 79 events[i].masked = false; 80 } 81 82 spinlock_unlock(&events[i].lock); 83 } 84 } 85 86 /** Define a callback function for the event unmask event. 87 * 88 * @param evno Event type. 89 * @param cb Callback function to be called when the event is unmasked. 90 * 91 */ 92 void event_set_unmask_callback(event_type_t evno, void (*cb)(void)) 93 { 94 ASSERT(evno < EVENT_END); 95 96 spinlock_lock(&events[evno].lock); 97 events[evno].unmask_cb = cb; 98 spinlock_unlock(&events[evno].lock); 99 } 100 101 /** Send kernel notification event 102 * 103 * @param evno Event type. 104 * @param mask Mask further notifications after a successful 105 * sending. 106 * @param a1 First argument. 107 * @param a2 Second argument. 108 * @param a3 Third argument. 109 * @param a4 Fourth argument. 110 * @param a5 Fifth argument. 111 * 112 * @return EOK if notification was successfully sent. 113 * @return ENOMEM if the notification IPC message failed to allocate. 114 * @return EBUSY if the notifications of the given type are 115 * currently masked. 116 * @return ENOENT if the notifications of the given type are 117 * currently not subscribed. 118 * 119 */ 120 int event_notify(event_type_t evno, bool mask, sysarg_t a1, sysarg_t a2, 121 sysarg_t a3, sysarg_t a4, sysarg_t a5) 122 { 123 ASSERT(evno < EVENT_END); 124 125 spinlock_lock(&events[evno].lock); 126 127 int ret; 128 129 if (events[evno].answerbox != NULL) { 130 if (!events[evno].masked) { 131 call_t *call = ipc_call_alloc(FRAME_ATOMIC); 132 133 if (call) { 134 call->flags |= IPC_CALL_NOTIF; 135 call->priv = ++events[evno].counter; 136 137 IPC_SET_IMETHOD(call->data, events[evno].imethod); 138 IPC_SET_ARG1(call->data, a1); 139 IPC_SET_ARG2(call->data, a2); 140 IPC_SET_ARG3(call->data, a3); 141 IPC_SET_ARG4(call->data, a4); 142 IPC_SET_ARG5(call->data, a5); 143 144 irq_spinlock_lock(&events[evno].answerbox->irq_lock, true); 145 list_append(&call->link, &events[evno].answerbox->irq_notifs); 146 irq_spinlock_unlock(&events[evno].answerbox->irq_lock, true); 147 148 waitq_wakeup(&events[evno].answerbox->wq, WAKEUP_FIRST); 149 150 if (mask) 151 events[evno].masked = true; 152 153 ret = EOK; 154 } else 155 ret = ENOMEM; 156 } else 157 ret = EBUSY; 158 } else 159 ret = ENOENT; 160 161 spinlock_unlock(&events[evno].lock); 162 163 return ret; 164 } 165 166 /** Subscribe event notifications 167 * 168 * @param evno Event type. 169 * @param imethod IPC interface and method to be used for 170 * the notifications. 171 * @param answerbox Answerbox to send the notifications to. 172 * 173 * @return EOK if the subscription was successful. 174 * @return EEXISTS if the notifications of the given type are 175 * already subscribed. 176 * 177 */ 63 178 static int event_subscribe(event_type_t evno, sysarg_t imethod, 64 179 answerbox_t *answerbox) 65 180 { 66 if (evno >= EVENT_END) 67 return ELIMIT; 181 ASSERT(evno < EVENT_END); 68 182 69 183 spinlock_lock(&events[evno].lock); … … 75 189 events[evno].imethod = imethod; 76 190 events[evno].counter = 0; 191 events[evno].masked = false; 77 192 res = EOK; 78 193 } else … … 84 199 } 85 200 201 /** Unmask event notifications 202 * 203 * @param evno Event type to unmask. 204 * 205 */ 206 static void event_unmask(event_type_t evno) 207 { 208 void (*cb)(void); 209 ASSERT(evno < EVENT_END); 210 211 spinlock_lock(&events[evno].lock); 212 events[evno].masked = false; 213 cb = events[evno].unmask_cb; 214 spinlock_unlock(&events[evno].lock); 215 216 /* 217 * Check if there is an unmask callback function defined for this event. 218 */ 219 if (cb) 220 cb(); 221 } 222 223 /** Event notification syscall wrapper 224 * 225 * @param evno Event type to subscribe. 226 * @param imethod IPC interface and method to be used for 227 * the notifications. 228 * 229 * @return EOK on success. 230 * @return ELIMIT on unknown event type. 231 * @return EEXISTS if the notifications of the given type are 232 * already subscribed. 233 * 234 */ 86 235 sysarg_t sys_event_subscribe(sysarg_t evno, sysarg_t imethod) 87 236 { 237 if (evno >= EVENT_END) 238 return ELIMIT; 239 88 240 return (sysarg_t) event_subscribe((event_type_t) evno, (sysarg_t) 89 241 imethod, &TASK->answerbox); 90 242 } 91 243 92 bool event_is_subscribed(event_type_t evno) 93 { 94 bool res; 95 96 ASSERT(evno < EVENT_END); 97 98 spinlock_lock(&events[evno].lock); 99 res = events[evno].answerbox != NULL; 100 spinlock_unlock(&events[evno].lock); 101 102 return res; 103 } 104 105 106 void event_cleanup_answerbox(answerbox_t *answerbox) 107 { 108 unsigned int i; 109 110 for (i = 0; i < EVENT_END; i++) { 111 spinlock_lock(&events[i].lock); 112 if (events[i].answerbox == answerbox) { 113 events[i].answerbox = NULL; 114 events[i].counter = 0; 115 events[i].imethod = 0; 116 } 117 spinlock_unlock(&events[i].lock); 118 } 119 } 120 121 void event_notify(event_type_t evno, sysarg_t a1, sysarg_t a2, sysarg_t a3, 122 sysarg_t a4, sysarg_t a5) 123 { 124 ASSERT(evno < EVENT_END); 125 126 spinlock_lock(&events[evno].lock); 127 if (events[evno].answerbox != NULL) { 128 call_t *call = ipc_call_alloc(FRAME_ATOMIC); 129 if (call) { 130 call->flags |= IPC_CALL_NOTIF; 131 call->priv = ++events[evno].counter; 132 IPC_SET_IMETHOD(call->data, events[evno].imethod); 133 IPC_SET_ARG1(call->data, a1); 134 IPC_SET_ARG2(call->data, a2); 135 IPC_SET_ARG3(call->data, a3); 136 IPC_SET_ARG4(call->data, a4); 137 IPC_SET_ARG5(call->data, a5); 138 139 irq_spinlock_lock(&events[evno].answerbox->irq_lock, true); 140 list_append(&call->link, &events[evno].answerbox->irq_notifs); 141 irq_spinlock_unlock(&events[evno].answerbox->irq_lock, true); 142 143 waitq_wakeup(&events[evno].answerbox->wq, WAKEUP_FIRST); 144 } 145 } 146 spinlock_unlock(&events[evno].lock); 244 /** Event notification unmask syscall wrapper 245 * 246 * Note that currently no tests are performed whether the calling 247 * task is entitled to unmask the notifications. However, thanks 248 * to the fact that notification masking is only a performance 249 * optimization, this has probably no security implications. 250 * 251 * @param evno Event type to unmask. 252 * 253 * @return EOK on success. 254 * @return ELIMIT on unknown event type. 255 * 256 */ 257 sysarg_t sys_event_unmask(sysarg_t evno) 258 { 259 if (evno >= EVENT_END) 260 return ELIMIT; 261 262 event_unmask((event_type_t) evno); 263 return EOK; 147 264 } 148 265 -
kernel/generic/src/lib/str.c
r72cd53d r3375bd4 922 922 void order_suffix(const uint64_t val, uint64_t *rv, char *suffix) 923 923 { 924 if (val > 10000000000000000000ULL) {925 *rv = val / 1000000000000000000ULL;924 if (val > UINT64_C(10000000000000000000)) { 925 *rv = val / UINT64_C(1000000000000000000); 926 926 *suffix = 'Z'; 927 } else if (val > 1000000000000000000ULL) {928 *rv = val / 1000000000000000ULL;927 } else if (val > UINT64_C(1000000000000000000)) { 928 *rv = val / UINT64_C(1000000000000000); 929 929 *suffix = 'E'; 930 } else if (val > 1000000000000000ULL) {931 *rv = val / 1000000000000ULL;930 } else if (val > UINT64_C(1000000000000000)) { 931 *rv = val / UINT64_C(1000000000000); 932 932 *suffix = 'T'; 933 } else if (val > 1000000000000ULL) {934 *rv = val / 1000000000ULL;933 } else if (val > UINT64_C(1000000000000)) { 934 *rv = val / UINT64_C(1000000000); 935 935 *suffix = 'G'; 936 } else if (val > 1000000000ULL) {937 *rv = val / 1000000ULL;936 } else if (val > UINT64_C(1000000000)) { 937 *rv = val / UINT64_C(1000000); 938 938 *suffix = 'M'; 939 } else if (val > 1000000ULL) {940 *rv = val / 1000ULL;939 } else if (val > UINT64_C(1000000)) { 940 *rv = val / UINT64_C(1000); 941 941 *suffix = 'k'; 942 942 } else { … … 946 946 } 947 947 948 void bin_order_suffix(const uint64_t val, uint64_t *rv, const char **suffix, 949 bool fixed) 950 { 951 if (val > UINT64_C(1152921504606846976)) { 952 *rv = val / UINT64_C(1125899906842624); 953 *suffix = "EiB"; 954 } else if (val > UINT64_C(1125899906842624)) { 955 *rv = val / UINT64_C(1099511627776); 956 *suffix = "TiB"; 957 } else if (val > UINT64_C(1099511627776)) { 958 *rv = val / UINT64_C(1073741824); 959 *suffix = "GiB"; 960 } else if (val > UINT64_C(1073741824)) { 961 *rv = val / UINT64_C(1048576); 962 *suffix = "MiB"; 963 } else if (val > UINT64_C(1048576)) { 964 *rv = val / UINT64_C(1024); 965 *suffix = "KiB"; 966 } else { 967 *rv = val; 968 if (fixed) 969 *suffix = "B "; 970 else 971 *suffix = "B"; 972 } 973 } 974 948 975 /** @} 949 976 */ -
kernel/generic/src/main/main.c
r72cd53d r3375bd4 71 71 #include <mm/as.h> 72 72 #include <mm/slab.h> 73 #include <mm/reserve.h> 73 74 #include <synch/waitq.h> 74 75 #include <synch/futex.h> … … 217 218 ddi_init(); 218 219 arch_post_mm_init(); 220 reserve_init(); 219 221 arch_pre_smp_init(); 220 222 smp_init(); … … 223 225 slab_enable_cpucache(); 224 226 225 printf("Detected %u CPU(s), %" PRIu64 " MiB free memory\n", 226 config.cpu_count, SIZE2MB(zones_total_size())); 227 uint64_t size; 228 const char *size_suffix; 229 bin_order_suffix(zones_total_size(), &size, &size_suffix, false); 230 printf("Detected %u CPU(s), %" PRIu64 " %s free memory\n", 231 config.cpu_count, size, size_suffix); 227 232 228 233 cpu_init(); -
kernel/generic/src/mm/as.c
r72cd53d r3375bd4 80 80 #include <arch/interrupt.h> 81 81 82 #ifdef CONFIG_VIRT_IDX_DCACHE83 #include <arch/mm/cache.h>84 #endif /* CONFIG_VIRT_IDX_DCACHE */85 86 82 /** 87 83 * Each architecture decides what functions will be used to carry out … … 447 443 else 448 444 memsetb(&area->backend_data, sizeof(area->backend_data), 0); 445 446 if (area->backend && area->backend->create) { 447 if (!area->backend->create(area)) { 448 free(area); 449 mutex_unlock(&as->lock); 450 return NULL; 451 } 452 } 449 453 450 454 btree_create(&area->used_space); … … 690 694 } 691 695 696 if (area->backend && area->backend->resize) { 697 if (!area->backend->resize(area, pages)) { 698 mutex_unlock(&area->lock); 699 mutex_unlock(&as->lock); 700 return ENOMEM; 701 } 702 } 703 692 704 area->pages = pages; 693 705 … … 756 768 return ENOENT; 757 769 } 770 771 if (area->backend && area->backend->destroy) 772 area->backend->destroy(area); 758 773 759 774 uintptr_t base = area->base; -
kernel/generic/src/mm/backend_anon.c
r72cd53d r3375bd4 39 39 #include <mm/as.h> 40 40 #include <mm/page.h> 41 #include <mm/reserve.h> 41 42 #include <genarch/mm/page_pt.h> 42 43 #include <genarch/mm/page_ht.h> … … 51 52 #include <arch.h> 52 53 53 #ifdef CONFIG_VIRT_IDX_DCACHE 54 #include <arch/mm/cache.h> 55 #endif 56 57 static int anon_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access); 58 static void anon_frame_free(as_area_t *area, uintptr_t page, uintptr_t frame);59 static void anon_ share(as_area_t *area);54 static bool anon_create(as_area_t *); 55 static bool anon_resize(as_area_t *, size_t); 56 static void anon_share(as_area_t *); 57 static void anon_destroy(as_area_t *); 58 59 static int anon_page_fault(as_area_t *, uintptr_t, pf_access_t); 60 static void anon_frame_free(as_area_t *, uintptr_t, uintptr_t); 60 61 61 62 mem_backend_t anon_backend = { 63 .create = anon_create, 64 .resize = anon_resize, 65 .share = anon_share, 66 .destroy = anon_destroy, 67 62 68 .page_fault = anon_page_fault, 63 69 .frame_free = anon_frame_free, 64 .share = anon_share65 70 }; 71 72 bool anon_create(as_area_t *area) 73 { 74 return reserve_try_alloc(area->pages); 75 } 76 77 bool anon_resize(as_area_t *area, size_t new_pages) 78 { 79 if (new_pages > area->pages) 80 return reserve_try_alloc(new_pages - area->pages); 81 else if (new_pages < area->pages) 82 reserve_free(area->pages - new_pages); 83 84 return true; 85 } 86 87 /** Share the anonymous address space area. 88 * 89 * Sharing of anonymous area is done by duplicating its entire mapping 90 * to the pagemap. Page faults will primarily search for frames there. 91 * 92 * The address space and address space area must be already locked. 93 * 94 * @param area Address space area to be shared. 95 */ 96 void anon_share(as_area_t *area) 97 { 98 link_t *cur; 99 100 ASSERT(mutex_locked(&area->as->lock)); 101 ASSERT(mutex_locked(&area->lock)); 102 103 /* 104 * Copy used portions of the area to sh_info's page map. 105 */ 106 mutex_lock(&area->sh_info->lock); 107 for (cur = area->used_space.leaf_head.next; 108 cur != &area->used_space.leaf_head; cur = cur->next) { 109 btree_node_t *node; 110 unsigned int i; 111 112 node = list_get_instance(cur, btree_node_t, leaf_link); 113 for (i = 0; i < node->keys; i++) { 114 uintptr_t base = node->key[i]; 115 size_t count = (size_t) node->value[i]; 116 unsigned int j; 117 118 for (j = 0; j < count; j++) { 119 pte_t *pte; 120 121 page_table_lock(area->as, false); 122 pte = page_mapping_find(area->as, 123 base + j * PAGE_SIZE); 124 ASSERT(pte && PTE_VALID(pte) && 125 PTE_PRESENT(pte)); 126 btree_insert(&area->sh_info->pagemap, 127 (base + j * PAGE_SIZE) - area->base, 128 (void *) PTE_GET_FRAME(pte), NULL); 129 page_table_unlock(area->as, false); 130 131 pfn_t pfn = ADDR2PFN(PTE_GET_FRAME(pte)); 132 frame_reference_add(pfn); 133 } 134 135 } 136 } 137 mutex_unlock(&area->sh_info->lock); 138 } 139 140 void anon_destroy(as_area_t *area) 141 { 142 reserve_free(area->pages); 143 } 144 66 145 67 146 /** Service a page fault in the anonymous memory address space area. … … 115 194 } 116 195 if (allocate) { 117 frame = (uintptr_t) frame_alloc(ONE_FRAME, 0); 196 frame = (uintptr_t) frame_alloc_noreserve( 197 ONE_FRAME, 0); 118 198 memsetb((void *) PA2KA(frame), FRAME_SIZE, 0); 119 199 … … 145 225 * the different causes 146 226 */ 147 frame = (uintptr_t) frame_alloc (ONE_FRAME, 0);227 frame = (uintptr_t) frame_alloc_noreserve(ONE_FRAME, 0); 148 228 memsetb((void *) PA2KA(frame), FRAME_SIZE, 0); 149 229 } … … 174 254 ASSERT(mutex_locked(&area->lock)); 175 255 176 frame_free(frame); 177 } 178 179 /** Share the anonymous address space area. 180 * 181 * Sharing of anonymous area is done by duplicating its entire mapping 182 * to the pagemap. Page faults will primarily search for frames there. 183 * 184 * The address space and address space area must be already locked. 185 * 186 * @param area Address space area to be shared. 187 */ 188 void anon_share(as_area_t *area) 189 { 190 link_t *cur; 191 192 ASSERT(mutex_locked(&area->as->lock)); 193 ASSERT(mutex_locked(&area->lock)); 194 195 /* 196 * Copy used portions of the area to sh_info's page map. 197 */ 198 mutex_lock(&area->sh_info->lock); 199 for (cur = area->used_space.leaf_head.next; 200 cur != &area->used_space.leaf_head; cur = cur->next) { 201 btree_node_t *node; 202 unsigned int i; 203 204 node = list_get_instance(cur, btree_node_t, leaf_link); 205 for (i = 0; i < node->keys; i++) { 206 uintptr_t base = node->key[i]; 207 size_t count = (size_t) node->value[i]; 208 unsigned int j; 209 210 for (j = 0; j < count; j++) { 211 pte_t *pte; 212 213 page_table_lock(area->as, false); 214 pte = page_mapping_find(area->as, 215 base + j * PAGE_SIZE); 216 ASSERT(pte && PTE_VALID(pte) && 217 PTE_PRESENT(pte)); 218 btree_insert(&area->sh_info->pagemap, 219 (base + j * PAGE_SIZE) - area->base, 220 (void *) PTE_GET_FRAME(pte), NULL); 221 page_table_unlock(area->as, false); 222 223 pfn_t pfn = ADDR2PFN(PTE_GET_FRAME(pte)); 224 frame_reference_add(pfn); 225 } 226 227 } 228 } 229 mutex_unlock(&area->sh_info->lock); 256 frame_free_noreserve(frame); 230 257 } 231 258 -
kernel/generic/src/mm/backend_elf.c
r72cd53d r3375bd4 43 43 #include <mm/slab.h> 44 44 #include <mm/page.h> 45 #include <mm/reserve.h> 45 46 #include <genarch/mm/page_pt.h> 46 47 #include <genarch/mm/page_ht.h> … … 51 52 #include <arch/barrier.h> 52 53 53 #ifdef CONFIG_VIRT_IDX_DCACHE 54 #include <arch/mm/cache.h> 55 #endif 56 57 static int elf_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access); 58 static void elf_frame_free(as_area_t *area, uintptr_t page, uintptr_t frame);59 static void elf_ share(as_area_t *area);54 static bool elf_create(as_area_t *); 55 static bool elf_resize(as_area_t *, size_t); 56 static void elf_share(as_area_t *); 57 static void elf_destroy(as_area_t *); 58 59 static int elf_page_fault(as_area_t *, uintptr_t, pf_access_t); 60 static void elf_frame_free(as_area_t *, uintptr_t, uintptr_t); 60 61 61 62 mem_backend_t elf_backend = { 63 .create = elf_create, 64 .resize = elf_resize, 65 .share = elf_share, 66 .destroy = elf_destroy, 67 62 68 .page_fault = elf_page_fault, 63 69 .frame_free = elf_frame_free, 64 .share = elf_share65 70 }; 66 71 67 /** Service a page fault in the ELF backend address space area. 68 * 69 * The address space area and page tables must be already locked. 70 * 71 * @param area Pointer to the address space area. 72 * @param addr Faulting virtual address. 73 * @param access Access mode that caused the fault (i.e. 74 * read/write/exec). 75 * 76 * @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK 77 * on success (i.e. serviced). 78 */ 79 int elf_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access) 80 { 81 elf_header_t *elf = area->backend_data.elf; 72 static size_t elf_nonanon_pages_get(as_area_t *area) 73 { 82 74 elf_segment_header_t *entry = area->backend_data.segment; 83 btree_node_t *leaf; 84 uintptr_t base, frame, page, start_anon; 85 size_t i; 86 bool dirty = false; 87 88 ASSERT(page_table_locked(AS)); 89 ASSERT(mutex_locked(&area->lock)); 90 91 if (!as_area_check_access(area, access)) 92 return AS_PF_FAULT; 75 uintptr_t first = ALIGN_UP(entry->p_vaddr, PAGE_SIZE); 76 uintptr_t last = ALIGN_DOWN(entry->p_vaddr + entry->p_filesz, 77 PAGE_SIZE); 78 79 if (entry->p_flags & PF_W) 80 return 0; 81 82 if (last < first) 83 return 0; 84 85 return last - first; 86 } 87 88 bool elf_create(as_area_t *area) 89 { 90 size_t nonanon_pages = elf_nonanon_pages_get(area); 91 92 if (area->pages <= nonanon_pages) 93 return true; 93 94 94 if (addr < ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)) 95 return AS_PF_FAULT; 95 return reserve_try_alloc(area->pages - nonanon_pages); 96 } 97 98 bool elf_resize(as_area_t *area, size_t new_pages) 99 { 100 size_t nonanon_pages = elf_nonanon_pages_get(area); 101 102 if (new_pages > area->pages) { 103 /* The area is growing. */ 104 if (area->pages >= nonanon_pages) 105 return reserve_try_alloc(new_pages - area->pages); 106 else if (new_pages > nonanon_pages) 107 return reserve_try_alloc(new_pages - nonanon_pages); 108 } else if (new_pages < area->pages) { 109 /* The area is shrinking. */ 110 if (new_pages >= nonanon_pages) 111 reserve_free(area->pages - new_pages); 112 else if (area->pages > nonanon_pages) 113 reserve_free(nonanon_pages - new_pages); 114 } 96 115 97 if (addr >= entry->p_vaddr + entry->p_memsz) 98 return AS_PF_FAULT; 99 100 i = (addr - ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)) >> PAGE_WIDTH; 101 base = (uintptr_t) 102 (((void *) elf) + ALIGN_DOWN(entry->p_offset, PAGE_SIZE)); 103 104 /* Virtual address of faulting page*/ 105 page = ALIGN_DOWN(addr, PAGE_SIZE); 106 107 /* Virtual address of the end of initialized part of segment */ 108 start_anon = entry->p_vaddr + entry->p_filesz; 109 110 if (area->sh_info) { 111 bool found = false; 112 113 /* 114 * The address space area is shared. 115 */ 116 117 mutex_lock(&area->sh_info->lock); 118 frame = (uintptr_t) btree_search(&area->sh_info->pagemap, 119 page - area->base, &leaf); 120 if (!frame) { 121 unsigned int i; 122 123 /* 124 * Workaround for valid NULL address. 125 */ 126 127 for (i = 0; i < leaf->keys; i++) { 128 if (leaf->key[i] == page - area->base) { 129 found = true; 130 break; 131 } 132 } 133 } 134 if (frame || found) { 135 frame_reference_add(ADDR2PFN(frame)); 136 page_mapping_insert(AS, addr, frame, 137 as_area_get_flags(area)); 138 if (!used_space_insert(area, page, 1)) 139 panic("Cannot insert used space."); 140 mutex_unlock(&area->sh_info->lock); 141 return AS_PF_OK; 142 } 143 } 144 145 /* 146 * The area is either not shared or the pagemap does not contain the 147 * mapping. 148 */ 149 if (page >= entry->p_vaddr && page + PAGE_SIZE <= start_anon) { 150 /* 151 * Initialized portion of the segment. The memory is backed 152 * directly by the content of the ELF image. Pages are 153 * only copied if the segment is writable so that there 154 * can be more instantions of the same memory ELF image 155 * used at a time. Note that this could be later done 156 * as COW. 157 */ 158 if (entry->p_flags & PF_W) { 159 frame = (uintptr_t)frame_alloc(ONE_FRAME, 0); 160 memcpy((void *) PA2KA(frame), 161 (void *) (base + i * FRAME_SIZE), FRAME_SIZE); 162 if (entry->p_flags & PF_X) { 163 smc_coherence_block((void *) PA2KA(frame), 164 FRAME_SIZE); 165 } 166 dirty = true; 167 } else { 168 frame = KA2PA(base + i * FRAME_SIZE); 169 } 170 } else if (page >= start_anon) { 171 /* 172 * This is the uninitialized portion of the segment. 173 * It is not physically present in the ELF image. 174 * To resolve the situation, a frame must be allocated 175 * and cleared. 176 */ 177 frame = (uintptr_t)frame_alloc(ONE_FRAME, 0); 178 memsetb((void *) PA2KA(frame), FRAME_SIZE, 0); 179 dirty = true; 180 } else { 181 size_t pad_lo, pad_hi; 182 /* 183 * The mixed case. 184 * 185 * The middle part is backed by the ELF image and 186 * the lower and upper parts are anonymous memory. 187 * (The segment can be and often is shorter than 1 page). 188 */ 189 if (page < entry->p_vaddr) 190 pad_lo = entry->p_vaddr - page; 191 else 192 pad_lo = 0; 193 194 if (start_anon < page + PAGE_SIZE) 195 pad_hi = page + PAGE_SIZE - start_anon; 196 else 197 pad_hi = 0; 198 199 frame = (uintptr_t)frame_alloc(ONE_FRAME, 0); 200 memcpy((void *) (PA2KA(frame) + pad_lo), 201 (void *) (base + i * FRAME_SIZE + pad_lo), 202 FRAME_SIZE - pad_lo - pad_hi); 203 if (entry->p_flags & PF_X) { 204 smc_coherence_block((void *) (PA2KA(frame) + pad_lo), 205 FRAME_SIZE - pad_lo - pad_hi); 206 } 207 memsetb((void *) PA2KA(frame), pad_lo, 0); 208 memsetb((void *) (PA2KA(frame) + FRAME_SIZE - pad_hi), pad_hi, 209 0); 210 dirty = true; 211 } 212 213 if (dirty && area->sh_info) { 214 frame_reference_add(ADDR2PFN(frame)); 215 btree_insert(&area->sh_info->pagemap, page - area->base, 216 (void *) frame, leaf); 217 } 218 219 if (area->sh_info) 220 mutex_unlock(&area->sh_info->lock); 221 222 page_mapping_insert(AS, addr, frame, as_area_get_flags(area)); 223 if (!used_space_insert(area, page, 1)) 224 panic("Cannot insert used space."); 225 226 return AS_PF_OK; 227 } 228 229 /** Free a frame that is backed by the ELF backend. 230 * 231 * The address space area and page tables must be already locked. 232 * 233 * @param area Pointer to the address space area. 234 * @param page Page that is mapped to frame. Must be aligned to 235 * PAGE_SIZE. 236 * @param frame Frame to be released. 237 * 238 */ 239 void elf_frame_free(as_area_t *area, uintptr_t page, uintptr_t frame) 240 { 241 elf_segment_header_t *entry = area->backend_data.segment; 242 uintptr_t start_anon; 243 244 ASSERT(page_table_locked(area->as)); 245 ASSERT(mutex_locked(&area->lock)); 246 247 ASSERT(page >= ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)); 248 ASSERT(page < entry->p_vaddr + entry->p_memsz); 249 250 start_anon = entry->p_vaddr + entry->p_filesz; 251 252 if (page >= entry->p_vaddr && page + PAGE_SIZE <= start_anon) { 253 if (entry->p_flags & PF_W) { 254 /* 255 * Free the frame with the copy of writable segment 256 * data. 257 */ 258 frame_free(frame); 259 } 260 } else { 261 /* 262 * The frame is either anonymous memory or the mixed case (i.e. 263 * lower part is backed by the ELF image and the upper is 264 * anonymous). In any case, a frame needs to be freed. 265 */ 266 frame_free(frame); 267 } 116 return true; 268 117 } 269 118 … … 356 205 } 357 206 207 void elf_destroy(as_area_t *area) 208 { 209 size_t nonanon_pages = elf_nonanon_pages_get(area); 210 211 if (area->pages > nonanon_pages) 212 reserve_free(area->pages - nonanon_pages); 213 } 214 215 /** Service a page fault in the ELF backend address space area. 216 * 217 * The address space area and page tables must be already locked. 218 * 219 * @param area Pointer to the address space area. 220 * @param addr Faulting virtual address. 221 * @param access Access mode that caused the fault (i.e. 222 * read/write/exec). 223 * 224 * @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK 225 * on success (i.e. serviced). 226 */ 227 int elf_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access) 228 { 229 elf_header_t *elf = area->backend_data.elf; 230 elf_segment_header_t *entry = area->backend_data.segment; 231 btree_node_t *leaf; 232 uintptr_t base, frame, page, start_anon; 233 size_t i; 234 bool dirty = false; 235 236 ASSERT(page_table_locked(AS)); 237 ASSERT(mutex_locked(&area->lock)); 238 239 if (!as_area_check_access(area, access)) 240 return AS_PF_FAULT; 241 242 if (addr < ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)) 243 return AS_PF_FAULT; 244 245 if (addr >= entry->p_vaddr + entry->p_memsz) 246 return AS_PF_FAULT; 247 248 i = (addr - ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)) >> PAGE_WIDTH; 249 base = (uintptr_t) 250 (((void *) elf) + ALIGN_DOWN(entry->p_offset, PAGE_SIZE)); 251 252 /* Virtual address of faulting page*/ 253 page = ALIGN_DOWN(addr, PAGE_SIZE); 254 255 /* Virtual address of the end of initialized part of segment */ 256 start_anon = entry->p_vaddr + entry->p_filesz; 257 258 if (area->sh_info) { 259 bool found = false; 260 261 /* 262 * The address space area is shared. 263 */ 264 265 mutex_lock(&area->sh_info->lock); 266 frame = (uintptr_t) btree_search(&area->sh_info->pagemap, 267 page - area->base, &leaf); 268 if (!frame) { 269 unsigned int i; 270 271 /* 272 * Workaround for valid NULL address. 273 */ 274 275 for (i = 0; i < leaf->keys; i++) { 276 if (leaf->key[i] == page - area->base) { 277 found = true; 278 break; 279 } 280 } 281 } 282 if (frame || found) { 283 frame_reference_add(ADDR2PFN(frame)); 284 page_mapping_insert(AS, addr, frame, 285 as_area_get_flags(area)); 286 if (!used_space_insert(area, page, 1)) 287 panic("Cannot insert used space."); 288 mutex_unlock(&area->sh_info->lock); 289 return AS_PF_OK; 290 } 291 } 292 293 /* 294 * The area is either not shared or the pagemap does not contain the 295 * mapping. 296 */ 297 if (page >= entry->p_vaddr && page + PAGE_SIZE <= start_anon) { 298 /* 299 * Initialized portion of the segment. The memory is backed 300 * directly by the content of the ELF image. Pages are 301 * only copied if the segment is writable so that there 302 * can be more instantions of the same memory ELF image 303 * used at a time. Note that this could be later done 304 * as COW. 305 */ 306 if (entry->p_flags & PF_W) { 307 frame = (uintptr_t)frame_alloc_noreserve(ONE_FRAME, 0); 308 memcpy((void *) PA2KA(frame), 309 (void *) (base + i * FRAME_SIZE), FRAME_SIZE); 310 if (entry->p_flags & PF_X) { 311 smc_coherence_block((void *) PA2KA(frame), 312 FRAME_SIZE); 313 } 314 dirty = true; 315 } else { 316 frame = KA2PA(base + i * FRAME_SIZE); 317 } 318 } else if (page >= start_anon) { 319 /* 320 * This is the uninitialized portion of the segment. 321 * It is not physically present in the ELF image. 322 * To resolve the situation, a frame must be allocated 323 * and cleared. 324 */ 325 frame = (uintptr_t) frame_alloc_noreserve(ONE_FRAME, 0); 326 memsetb((void *) PA2KA(frame), FRAME_SIZE, 0); 327 dirty = true; 328 } else { 329 size_t pad_lo, pad_hi; 330 /* 331 * The mixed case. 332 * 333 * The middle part is backed by the ELF image and 334 * the lower and upper parts are anonymous memory. 335 * (The segment can be and often is shorter than 1 page). 336 */ 337 if (page < entry->p_vaddr) 338 pad_lo = entry->p_vaddr - page; 339 else 340 pad_lo = 0; 341 342 if (start_anon < page + PAGE_SIZE) 343 pad_hi = page + PAGE_SIZE - start_anon; 344 else 345 pad_hi = 0; 346 347 frame = (uintptr_t) frame_alloc_noreserve(ONE_FRAME, 0); 348 memcpy((void *) (PA2KA(frame) + pad_lo), 349 (void *) (base + i * FRAME_SIZE + pad_lo), 350 FRAME_SIZE - pad_lo - pad_hi); 351 if (entry->p_flags & PF_X) { 352 smc_coherence_block((void *) (PA2KA(frame) + pad_lo), 353 FRAME_SIZE - pad_lo - pad_hi); 354 } 355 memsetb((void *) PA2KA(frame), pad_lo, 0); 356 memsetb((void *) (PA2KA(frame) + FRAME_SIZE - pad_hi), pad_hi, 357 0); 358 dirty = true; 359 } 360 361 if (dirty && area->sh_info) { 362 frame_reference_add(ADDR2PFN(frame)); 363 btree_insert(&area->sh_info->pagemap, page - area->base, 364 (void *) frame, leaf); 365 } 366 367 if (area->sh_info) 368 mutex_unlock(&area->sh_info->lock); 369 370 page_mapping_insert(AS, addr, frame, as_area_get_flags(area)); 371 if (!used_space_insert(area, page, 1)) 372 panic("Cannot insert used space."); 373 374 return AS_PF_OK; 375 } 376 377 /** Free a frame that is backed by the ELF backend. 378 * 379 * The address space area and page tables must be already locked. 380 * 381 * @param area Pointer to the address space area. 382 * @param page Page that is mapped to frame. Must be aligned to 383 * PAGE_SIZE. 384 * @param frame Frame to be released. 385 * 386 */ 387 void elf_frame_free(as_area_t *area, uintptr_t page, uintptr_t frame) 388 { 389 elf_segment_header_t *entry = area->backend_data.segment; 390 uintptr_t start_anon; 391 392 ASSERT(page_table_locked(area->as)); 393 ASSERT(mutex_locked(&area->lock)); 394 395 ASSERT(page >= ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)); 396 ASSERT(page < entry->p_vaddr + entry->p_memsz); 397 398 start_anon = entry->p_vaddr + entry->p_filesz; 399 400 if (page >= entry->p_vaddr && page + PAGE_SIZE <= start_anon) { 401 if (entry->p_flags & PF_W) { 402 /* 403 * Free the frame with the copy of writable segment 404 * data. 405 */ 406 frame_free_noreserve(frame); 407 } 408 } else { 409 /* 410 * The frame is either anonymous memory or the mixed case (i.e. 411 * lower part is backed by the ELF image and the upper is 412 * anonymous). In any case, a frame needs to be freed. 413 */ 414 frame_free_noreserve(frame); 415 } 416 } 417 358 418 /** @} 359 419 */ -
kernel/generic/src/mm/backend_phys.c
r72cd53d r3375bd4 48 48 #include <align.h> 49 49 50 static int phys_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access); 51 static void phys_share(as_area_t *area); 50 static bool phys_create(as_area_t *); 51 static void phys_share(as_area_t *); 52 static void phys_destroy(as_area_t *); 53 54 static int phys_page_fault(as_area_t *, uintptr_t, pf_access_t); 52 55 53 56 mem_backend_t phys_backend = { 57 .create = phys_create, 58 .resize = NULL, 59 .share = phys_share, 60 .destroy = phys_destroy, 61 54 62 .page_fault = phys_page_fault, 55 63 .frame_free = NULL, 56 .share = phys_share57 64 }; 65 66 bool phys_create(as_area_t *area) 67 { 68 return true; 69 } 70 71 /** Share address space area backed by physical memory. 72 * 73 * Do actually nothing as sharing of address space areas 74 * that are backed up by physical memory is very easy. 75 * Note that the function must be defined so that 76 * as_area_share() will succeed. 77 */ 78 void phys_share(as_area_t *area) 79 { 80 ASSERT(mutex_locked(&area->as->lock)); 81 ASSERT(mutex_locked(&area->lock)); 82 } 83 84 85 void phys_destroy(as_area_t *area) 86 { 87 /* Nothing to do. */ 88 } 58 89 59 90 /** Service a page fault in the address space area backed by physical memory. … … 88 119 } 89 120 90 /** Share address space area backed by physical memory.91 *92 * Do actually nothing as sharing of address space areas93 * that are backed up by physical memory is very easy.94 * Note that the function must be defined so that95 * as_area_share() will succeed.96 */97 void phys_share(as_area_t *area)98 {99 ASSERT(mutex_locked(&area->as->lock));100 ASSERT(mutex_locked(&area->lock));101 }102 103 121 /** @} 104 122 */ -
kernel/generic/src/mm/frame.c
r72cd53d r3375bd4 45 45 #include <typedefs.h> 46 46 #include <mm/frame.h> 47 #include <mm/reserve.h> 47 48 #include <mm/as.h> 48 49 #include <panic.h> … … 59 60 #include <macros.h> 60 61 #include <config.h> 62 #include <str.h> 61 63 62 64 zones_t zones; … … 180 182 * 181 183 */ 182 #ifdef CONFIG_DEBUG 183 NO_TRACE static size_t total_frames_free(void) 184 NO_TRACE static size_t frame_total_free_get_internal(void) 184 185 { 185 186 size_t total = 0; 186 187 size_t i; 188 187 189 for (i = 0; i < zones.count; i++) 188 190 total += zones.info[i].free_count; … … 190 192 return total; 191 193 } 192 #endif /* CONFIG_DEBUG */ 194 195 NO_TRACE size_t frame_total_free_get(void) 196 { 197 size_t total; 198 199 irq_spinlock_lock(&zones.lock, true); 200 total = frame_total_free_get_internal(); 201 irq_spinlock_unlock(&zones.lock, true); 202 203 return total; 204 } 205 193 206 194 207 /** Find a zone with a given frames. … … 472 485 * @param frame_idx Frame index relative to zone. 473 486 * 474 */ 475 NO_TRACE static void zone_frame_free(zone_t *zone, size_t frame_idx) 487 * @return Number of freed frames. 488 * 489 */ 490 NO_TRACE static size_t zone_frame_free(zone_t *zone, size_t frame_idx) 476 491 { 477 492 ASSERT(zone_flags_available(zone->flags)); 478 493 479 494 frame_t *frame = &zone->frames[frame_idx]; 480 481 /* Remember frame order */ 482 uint8_t order = frame->buddy_order; 495 size_t size = 0; 483 496 484 497 ASSERT(frame->refcount); 485 498 486 499 if (!--frame->refcount) { 487 buddy_system_free(zone->buddy_system, &frame->buddy_link);488 500 size = 1 << frame->buddy_order; 501 buddy_system_free(zone->buddy_system, &frame->buddy_link); 489 502 /* Update zone information. */ 490 zone->free_count += (1 << order); 491 zone->busy_count -= (1 << order); 492 } 503 zone->free_count += size; 504 zone->busy_count -= size; 505 } 506 507 return size; 493 508 } 494 509 … … 516 531 ASSERT(link); 517 532 zone->free_count--; 533 reserve_force_alloc(1); 518 534 } 519 535 … … 645 661 for (i = 0; i < cframes; i++) { 646 662 zones.info[znum].busy_count++; 647 zone_frame_free(&zones.info[znum],663 (void) zone_frame_free(&zones.info[znum], 648 664 pfn - zones.info[znum].base + i); 649 665 } … … 683 699 /* Free unneeded frames */ 684 700 for (i = count; i < (size_t) (1 << order); i++) 685 zone_frame_free(&zones.info[znum], i + frame_idx);701 (void) zone_frame_free(&zones.info[znum], i + frame_idx); 686 702 } 687 703 … … 695 711 * not to be 2^order size. Once the allocator is running it is no longer 696 712 * possible, merged configuration data occupies more space :-/ 697 *698 * The function uses699 713 * 700 714 */ … … 999 1013 size_t hint = pzone ? (*pzone) : 0; 1000 1014 1015 /* 1016 * If not told otherwise, we must first reserve the memory. 1017 */ 1018 if (!(flags & FRAME_NO_RESERVE)) 1019 reserve_force_alloc(size); 1020 1001 1021 loop: 1002 1022 irq_spinlock_lock(&zones.lock, true); … … 1033 1053 if (flags & FRAME_ATOMIC) { 1034 1054 irq_spinlock_unlock(&zones.lock, true); 1055 if (!(flags & FRAME_NO_RESERVE)) 1056 reserve_free(size); 1035 1057 return NULL; 1036 1058 } 1037 1059 1038 1060 #ifdef CONFIG_DEBUG 1039 size_t avail = total_frames_free();1061 size_t avail = frame_total_free_get_internal(); 1040 1062 #endif 1041 1063 … … 1088 1110 } 1089 1111 1112 void *frame_alloc(uint8_t order, frame_flags_t flags) 1113 { 1114 return frame_alloc_generic(order, flags, NULL); 1115 } 1116 1117 void *frame_alloc_noreserve(uint8_t order, frame_flags_t flags) 1118 { 1119 return frame_alloc_generic(order, flags | FRAME_NO_RESERVE, NULL); 1120 } 1121 1090 1122 /** Free a frame. 1091 1123 * … … 1095 1127 * 1096 1128 * @param frame Physical Address of of the frame to be freed. 1097 * 1098 */ 1099 void frame_free(uintptr_t frame) 1100 { 1129 * @param flags Flags to control memory reservation. 1130 * 1131 */ 1132 void frame_free_generic(uintptr_t frame, frame_flags_t flags) 1133 { 1134 size_t size; 1135 1101 1136 irq_spinlock_lock(&zones.lock, true); 1102 1137 … … 1106 1141 pfn_t pfn = ADDR2PFN(frame); 1107 1142 size_t znum = find_zone(pfn, 1, 0); 1143 1108 1144 1109 1145 ASSERT(znum != (size_t) -1); 1110 1146 1111 zone_frame_free(&zones.info[znum], pfn - zones.info[znum].base);1147 size = zone_frame_free(&zones.info[znum], pfn - zones.info[znum].base); 1112 1148 1113 1149 irq_spinlock_unlock(&zones.lock, true); … … 1118 1154 mutex_lock(&mem_avail_mtx); 1119 1155 if (mem_avail_req > 0) 1120 mem_avail_req --;1156 mem_avail_req -= min(mem_avail_req, size); 1121 1157 1122 1158 if (mem_avail_req == 0) { … … 1125 1161 } 1126 1162 mutex_unlock(&mem_avail_mtx); 1163 1164 if (!(flags & FRAME_NO_RESERVE)) 1165 reserve_free(size); 1166 } 1167 1168 void frame_free(uintptr_t frame) 1169 { 1170 frame_free_generic(frame, 0); 1171 } 1172 1173 void frame_free_noreserve(uintptr_t frame) 1174 { 1175 frame_free_generic(frame, FRAME_NO_RESERVE); 1127 1176 } 1128 1177 … … 1355 1404 bool available = zone_flags_available(flags); 1356 1405 1406 uint64_t size; 1407 const char *size_suffix; 1408 bin_order_suffix(FRAMES2SIZE(count), &size, &size_suffix, false); 1409 1357 1410 printf("Zone number: %zu\n", znum); 1358 1411 printf("Zone base address: %p\n", (void *) base); 1359 printf("Zone size: %zu frames (% zu KiB)\n", count,1360 SIZE2KB(FRAMES2SIZE(count)));1412 printf("Zone size: %zu frames (%" PRIu64 " %s)\n", count, 1413 size, size_suffix); 1361 1414 printf("Zone flags: %c%c%c\n", 1362 1415 available ? 'A' : ' ', … … 1365 1418 1366 1419 if (available) { 1367 printf("Allocated space: %zu frames (%zu KiB)\n", 1368 busy_count, SIZE2KB(FRAMES2SIZE(busy_count))); 1369 printf("Available space: %zu frames (%zu KiB)\n", 1370 free_count, SIZE2KB(FRAMES2SIZE(free_count))); 1420 bin_order_suffix(FRAMES2SIZE(busy_count), &size, &size_suffix, 1421 false); 1422 printf("Allocated space: %zu frames (%" PRIu64 " %s)\n", 1423 busy_count, size, size_suffix); 1424 bin_order_suffix(FRAMES2SIZE(free_count), &size, &size_suffix, 1425 false); 1426 printf("Available space: %zu frames (%" PRIu64 " %s)\n", 1427 free_count, size, size_suffix); 1371 1428 } 1372 1429 } -
kernel/generic/src/proc/scheduler.c
r72cd53d r3375bd4 354 354 355 355 /* 356 * Through the 'THE' structure, we keep track of THREAD, TASK, CPU, VM356 * Through the 'THE' structure, we keep track of THREAD, TASK, CPU, AS 357 357 * and preemption counter. At this point THE could be coming either 358 358 * from THREAD's or CPU's stack. -
kernel/generic/src/proc/task.c
r72cd53d r3375bd4 534 534 */ 535 535 if (notify) { 536 if (event_is_subscribed(EVENT_FAULT)) { 537 /* Notify the subscriber that a fault occurred. */ 538 event_notify_3(EVENT_FAULT, LOWER32(TASK->taskid), 539 UPPER32(TASK->taskid), (sysarg_t) THREAD); 540 536 /* Notify the subscriber that a fault occurred. */ 537 if (event_notify_3(EVENT_FAULT, false, LOWER32(TASK->taskid), 538 UPPER32(TASK->taskid), (sysarg_t) THREAD) == EOK) { 541 539 #ifdef CONFIG_UDEBUG 542 540 /* Wait for a debugging session. */ -
kernel/generic/src/syscall/syscall.c
r72cd53d r3375bd4 166 166 /* Event notification syscalls. */ 167 167 (syshandler_t) sys_event_subscribe, 168 (syshandler_t) sys_event_unmask, 168 169 169 170 /* Capabilities related syscalls. */
Note:
See TracChangeset
for help on using the changeset viewer.