Changeset c621f4aa in mainline for kernel/genarch/src
- Timestamp:
- 2010-07-25T10:11:13Z (16 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 377cce8
- Parents:
- 24a2517 (diff), a2da43c (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)links above to see all the changes relative to each parent. - Location:
- kernel/genarch/src
- Files:
-
- 1 added
- 24 edited
-
acpi/acpi.c (modified) (7 diffs)
-
acpi/madt.c (modified) (5 diffs)
-
drivers/dsrln/dsrlnout.c (modified) (1 diff)
-
drivers/ega/ega.c (modified) (8 diffs)
-
drivers/i8042/i8042.c (modified) (1 diff)
-
drivers/s3c24xx_uart/s3c24xx_uart.c (added)
-
drivers/via-cuda/cuda.c (modified) (1 diff)
-
fb/fb.c (modified) (2 diffs)
-
kbrd/kbrd.c (modified) (1 diff)
-
kbrd/kbrd_pl050.c (modified) (1 diff)
-
kbrd/scanc_mac.c (modified) (1 diff)
-
kbrd/scanc_pc.c (modified) (1 diff)
-
kbrd/scanc_pl050.c (modified) (1 diff)
-
kbrd/scanc_sun.c (modified) (1 diff)
-
mm/as_ht.c (modified) (9 diffs)
-
mm/as_pt.c (modified) (8 diffs)
-
mm/asid.c (modified) (4 diffs)
-
mm/page_ht.c (modified) (14 diffs)
-
mm/page_pt.c (modified) (7 diffs)
-
multiboot/multiboot.c (modified) (1 diff)
-
ofw/ebus.c (modified) (1 diff)
-
ofw/fhc.c (modified) (1 diff)
-
ofw/ofw_tree.c (modified) (3 diffs)
-
ofw/pci.c (modified) (1 diff)
-
srln/srln.c (modified) (1 diff)
Legend:
- Unmodified
- Added
- Removed
-
kernel/genarch/src/acpi/acpi.c
r24a2517 rc621f4aa 33 33 /** 34 34 * @file 35 * @brief Advanced Configuration and Power Interface (ACPI) initialization.36 */ 37 35 * @brief Advanced Configuration and Power Interface (ACPI) initialization. 36 */ 37 38 38 #include <genarch/acpi/acpi.h> 39 39 #include <genarch/acpi/madt.h> … … 43 43 #include <print.h> 44 44 45 #define RSDP_SIGNATURE "RSD PTR "46 #define RSDP_REVISION_OFFS 1545 #define RSDP_SIGNATURE "RSD PTR " 46 #define RSDP_REVISION_OFFS 15 47 47 48 48 #define CMP_SIGNATURE(left, right) \ … … 64 64 }; 65 65 66 static int rsdp_check(uint8_t *rsdp) { 67 struct acpi_rsdp *r = (struct acpi_rsdp *) rsdp; 66 static int rsdp_check(uint8_t *_rsdp) { 67 struct acpi_rsdp *rsdp = (struct acpi_rsdp *) _rsdp; 68 uint8_t sum = 0; 69 uint32_t i; 70 71 for (i = 0; i < 20; i++) 72 sum = (uint8_t) (sum + _rsdp[i]); 73 74 if (sum) 75 return 0; /* bad checksum */ 76 77 if (rsdp->revision == 0) 78 return 1; /* ACPI 1.0 */ 79 80 for (; i < rsdp->length; i++) 81 sum = (uint8_t) (sum + _rsdp[i]); 82 83 return !sum; 84 } 85 86 int acpi_sdt_check(uint8_t *sdt) 87 { 88 struct acpi_sdt_header *hdr = (struct acpi_sdt_header *) sdt; 68 89 uint8_t sum = 0; 69 90 unsigned int i; 70 91 71 for (i = 0; i < 20; i++) 72 sum = (uint8_t) (sum + rsdp[i]); 73 74 if (sum) 75 return 0; /* bad checksum */ 76 77 if (r->revision == 0) 78 return 1; /* ACPI 1.0 */ 79 80 for (; i < r->length; i++) 81 sum = (uint8_t) (sum + rsdp[i]); 82 92 for (i = 0; i < hdr->length; i++) 93 sum = (uint8_t) (sum + sdt[i]); 94 83 95 return !sum; 84 85 }86 87 int acpi_sdt_check(uint8_t *sdt)88 {89 struct acpi_sdt_header *h = (struct acpi_sdt_header *) sdt;90 uint8_t sum = 0;91 unsigned int i;92 93 for (i = 0; i < h->length; i++)94 sum = (uint8_t) (sum + sdt[i]);95 96 return !sum;97 96 } 98 97 99 98 static void map_sdt(struct acpi_sdt_header *sdt) 100 99 { 101 page_mapping_insert(AS_KERNEL, (uintptr_t) sdt, (uintptr_t) sdt, PAGE_NOT_CACHEABLE | PAGE_WRITE); 100 page_table_lock(AS_KERNEL, true); 101 page_mapping_insert(AS_KERNEL, (uintptr_t) sdt, (uintptr_t) sdt, 102 PAGE_NOT_CACHEABLE | PAGE_WRITE); 102 103 map_structure((uintptr_t) sdt, sdt->length); 104 page_table_unlock(AS_KERNEL, true); 103 105 } 104 106 105 107 static void configure_via_rsdt(void) 106 108 { 107 unsigned int i, j, cnt = (acpi_rsdt->header.length - sizeof(struct acpi_sdt_header)) / sizeof(uint32_t); 109 size_t i; 110 size_t j; 111 size_t cnt = (acpi_rsdt->header.length - sizeof(struct acpi_sdt_header)) 112 / sizeof(uint32_t); 108 113 109 114 for (i = 0; i < cnt; i++) { 110 for (j = 0; j < sizeof(signature_map) / sizeof(struct acpi_signature_map); j++) { 111 struct acpi_sdt_header *h = (struct acpi_sdt_header *) (unative_t) acpi_rsdt->entry[i]; 112 113 map_sdt(h); 114 if (CMP_SIGNATURE(h->signature, signature_map[j].signature)) { 115 if (!acpi_sdt_check((uint8_t *) h)) 116 goto next; 117 *signature_map[j].sdt_ptr = h; 118 LOG("%p: ACPI %s\n", *signature_map[j].sdt_ptr, signature_map[j].description); 115 for (j = 0; j < sizeof(signature_map) 116 / sizeof(struct acpi_signature_map); j++) { 117 struct acpi_sdt_header *hdr = 118 (struct acpi_sdt_header *) (unative_t) acpi_rsdt->entry[i]; 119 120 map_sdt(hdr); 121 if (CMP_SIGNATURE(hdr->signature, signature_map[j].signature)) { 122 if (!acpi_sdt_check((uint8_t *) hdr)) 123 break; 124 125 *signature_map[j].sdt_ptr = hdr; 126 LOG("%p: ACPI %s", *signature_map[j].sdt_ptr, 127 signature_map[j].description); 119 128 } 120 129 } 121 next:122 ;123 130 } 124 131 } … … 126 133 static void configure_via_xsdt(void) 127 134 { 128 unsigned int i, j, cnt = (acpi_xsdt->header.length - sizeof(struct acpi_sdt_header)) / sizeof(uint64_t); 135 size_t i; 136 size_t j; 137 size_t cnt = (acpi_xsdt->header.length - sizeof(struct acpi_sdt_header)) 138 / sizeof(uint64_t); 129 139 130 140 for (i = 0; i < cnt; i++) { 131 for (j = 0; j < sizeof(signature_map) / sizeof(struct acpi_signature_map); j++) { 132 struct acpi_sdt_header *h = (struct acpi_sdt_header *) ((uintptr_t) acpi_rsdt->entry[i]); 133 134 map_sdt(h); 135 if (CMP_SIGNATURE(h->signature, signature_map[j].signature)) { 136 if (!acpi_sdt_check((uint8_t *) h)) 137 goto next; 138 *signature_map[j].sdt_ptr = h; 139 LOG("%p: ACPI %s\n", *signature_map[j].sdt_ptr, signature_map[j].description); 141 for (j = 0; j < sizeof(signature_map) 142 / sizeof(struct acpi_signature_map); j++) { 143 struct acpi_sdt_header *hdr = 144 (struct acpi_sdt_header *) ((uintptr_t) acpi_xsdt->entry[i]); 145 146 map_sdt(hdr); 147 if (CMP_SIGNATURE(hdr->signature, signature_map[j].signature)) { 148 if (!acpi_sdt_check((uint8_t *) hdr)) 149 break; 150 151 *signature_map[j].sdt_ptr = hdr; 152 LOG("%p: ACPI %s", *signature_map[j].sdt_ptr, 153 signature_map[j].description); 140 154 } 141 155 } 142 next: 143 ; 144 } 145 156 } 146 157 } 147 158 … … 149 160 { 150 161 uint8_t *addr[2] = { NULL, (uint8_t *) PA2KA(0xe0000) }; 151 int i, j, length[2] = { 1024, 128*1024 }; 162 unsigned int i; 163 unsigned int j; 164 unsigned int length[2] = { 1024, 128 * 1024 }; 152 165 uint64_t *sig = (uint64_t *) RSDP_SIGNATURE; 153 166 154 167 /* 155 168 * Find Root System Description Pointer … … 157 170 * 2. search 128K starting at 0xe0000 158 171 */ 159 172 160 173 addr[0] = (uint8_t *) PA2KA(ebda); 161 174 for (i = (ebda ? 0 : 1); i < 2; i++) { 162 175 for (j = 0; j < length[i]; j += 16) { 163 if (*((uint64_t *) &addr[i][j]) == *sig && rsdp_check(&addr[i][j])) { 176 if ((*((uint64_t *) &addr[i][j]) == *sig) 177 && (rsdp_check(&addr[i][j]))) { 164 178 acpi_rsdp = (struct acpi_rsdp *) &addr[i][j]; 165 179 goto rsdp_found; … … 167 181 } 168 182 } 169 183 170 184 return; 171 185 172 186 rsdp_found: 173 LOG("%p: ACPI Root System Description Pointer \n", acpi_rsdp);174 175 acpi_rsdt = (struct acpi_rsdt *) ( unative_t) acpi_rsdp->rsdt_address;187 LOG("%p: ACPI Root System Description Pointer", acpi_rsdp); 188 189 acpi_rsdt = (struct acpi_rsdt *) ((uintptr_t) acpi_rsdp->rsdt_address); 176 190 if (acpi_rsdp->revision) 177 191 acpi_xsdt = (struct acpi_xsdt *) ((uintptr_t) acpi_rsdp->xsdt_address); 178 192 179 193 if (acpi_rsdt) 180 194 map_sdt((struct acpi_sdt_header *) acpi_rsdt); 195 181 196 if (acpi_xsdt) 182 197 map_sdt((struct acpi_sdt_header *) acpi_xsdt); 183 184 if ( acpi_rsdt && !acpi_sdt_check((uint8_t *) acpi_rsdt)) {198 199 if ((acpi_rsdt) && (!acpi_sdt_check((uint8_t *) acpi_rsdt))) { 185 200 printf("RSDT: bad checksum\n"); 186 201 return; 187 202 } 188 if (acpi_xsdt && !acpi_sdt_check((uint8_t *) acpi_xsdt)) { 203 204 if ((acpi_xsdt) && (!acpi_sdt_check((uint8_t *) acpi_xsdt))) { 189 205 printf("XSDT: bad checksum\n"); 190 206 return; 191 207 } 192 208 193 209 if (acpi_xsdt) 194 210 configure_via_xsdt(); 195 211 else if (acpi_rsdt) 196 212 configure_via_rsdt(); 197 198 213 } 199 214 -
kernel/genarch/src/acpi/madt.c
r24a2517 rc621f4aa 27 27 */ 28 28 29 /** @addtogroup genarch 29 /** @addtogroup genarch 30 30 * @{ 31 31 */ 32 32 /** 33 33 * @file 34 * @brief Multiple APIC Description Table (MADT) parsing.35 */ 36 37 #include < arch/types.h>34 * @brief Multiple APIC Description Table (MADT) parsing. 35 */ 36 37 #include <typedefs.h> 38 38 #include <genarch/acpi/acpi.h> 39 39 #include <genarch/acpi/madt.h> … … 52 52 #ifdef CONFIG_SMP 53 53 54 /** Standard ISA IRQ map; can be overriden by Interrupt Source Override entries of MADT. */ 55 int isa_irq_map[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }; 56 57 static void madt_l_apic_entry(struct madt_l_apic *la, uint32_t index); 58 static void madt_io_apic_entry(struct madt_io_apic *ioa, uint32_t index); 59 static void madt_intr_src_ovrd_entry(struct madt_intr_src_ovrd *override, uint32_t index); 60 static int madt_cmp(void * a, void * b); 54 /** 55 * Standard ISA IRQ map; can be overriden by 56 * Interrupt Source Override entries of MADT. 57 */ 58 static int isa_irq_map[] = 59 { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }; 61 60 62 61 struct madt_l_apic *madt_l_apic_entries = NULL; 63 62 struct madt_io_apic *madt_io_apic_entries = NULL; 64 63 65 size_t madt_l_apic_entry_index = 0; 66 size_t madt_io_apic_entry_index = 0; 67 size_t madt_l_apic_entry_cnt = 0; 68 size_t madt_io_apic_entry_cnt = 0; 69 size_t cpu_count = 0; 70 71 struct madt_apic_header * * madt_entries_index = NULL; 72 unsigned int madt_entries_index_cnt = 0; 73 74 char *entry[] = { 64 static size_t madt_l_apic_entry_index = 0; 65 static size_t madt_io_apic_entry_index = 0; 66 static size_t madt_l_apic_entry_cnt = 0; 67 static size_t madt_io_apic_entry_cnt = 0; 68 69 static struct madt_apic_header **madt_entries_index = NULL; 70 71 const char *entry[] = { 75 72 "L_APIC", 76 73 "IO_APIC", … … 84 81 }; 85 82 86 /* 87 * ACPI MADT Implementation of SMP configuration interface. 88 */ 89 static size_t madt_cpu_count(void); 90 static bool madt_cpu_enabled(size_t i); 91 static bool madt_cpu_bootstrap(size_t i); 92 static uint8_t madt_cpu_apic_id(size_t i); 93 static int madt_irq_to_pin(unsigned int irq); 94 83 static uint8_t madt_cpu_apic_id(size_t i) 84 { 85 ASSERT(i < madt_l_apic_entry_cnt); 86 87 return ((struct madt_l_apic *) 88 madt_entries_index[madt_l_apic_entry_index + i])->apic_id; 89 } 90 91 static bool madt_cpu_enabled(size_t i) 92 { 93 ASSERT(i < madt_l_apic_entry_cnt); 94 95 /* 96 * FIXME: The current local APIC driver limits usable 97 * CPU IDs to 8. 98 * 99 */ 100 if (i > 7) 101 return false; 102 103 return ((struct madt_l_apic *) 104 madt_entries_index[madt_l_apic_entry_index + i])->flags & 0x1; 105 } 106 107 static bool madt_cpu_bootstrap(size_t i) 108 { 109 ASSERT(i < madt_l_apic_entry_cnt); 110 111 return ((struct madt_l_apic *) 112 madt_entries_index[madt_l_apic_entry_index + i])->apic_id == 113 bsp_l_apic; 114 } 115 116 static int madt_irq_to_pin(unsigned int irq) 117 { 118 ASSERT(irq < sizeof(isa_irq_map) / sizeof(int)); 119 120 return isa_irq_map[irq]; 121 } 122 123 /** ACPI MADT Implementation of SMP configuration interface. 124 * 125 */ 95 126 struct smp_config_operations madt_config_operations = { 96 .cpu_count = madt_cpu_count,97 127 .cpu_enabled = madt_cpu_enabled, 98 128 .cpu_bootstrap = madt_cpu_bootstrap, … … 101 131 }; 102 132 103 size_t madt_cpu_count(void) 104 { 105 return madt_l_apic_entry_cnt; 106 } 107 108 bool madt_cpu_enabled(size_t i) 109 { 110 ASSERT(i < madt_l_apic_entry_cnt); 111 return ((struct madt_l_apic *) madt_entries_index[madt_l_apic_entry_index + i])->flags & 0x1; 112 113 } 114 115 bool madt_cpu_bootstrap(size_t i) 116 { 117 ASSERT(i < madt_l_apic_entry_cnt); 118 return ((struct madt_l_apic *) madt_entries_index[madt_l_apic_entry_index + i])->apic_id == l_apic_id(); 119 } 120 121 uint8_t madt_cpu_apic_id(size_t i) 122 { 123 ASSERT(i < madt_l_apic_entry_cnt); 124 return ((struct madt_l_apic *) madt_entries_index[madt_l_apic_entry_index + i])->apic_id; 125 } 126 127 int madt_irq_to_pin(unsigned int irq) 128 { 129 ASSERT(irq < sizeof(isa_irq_map) / sizeof(int)); 130 return isa_irq_map[irq]; 131 } 132 133 int madt_cmp(void * a, void * b) 134 { 135 return 136 (((struct madt_apic_header *) a)->type > ((struct madt_apic_header *) b)->type) ? 137 1 : 138 ((((struct madt_apic_header *) a)->type < ((struct madt_apic_header *) b)->type) ? -1 : 0); 139 } 140 141 void acpi_madt_parse(void) 142 { 143 struct madt_apic_header *end = (struct madt_apic_header *) (((uint8_t *) acpi_madt) + acpi_madt->header.length); 144 struct madt_apic_header *h; 145 146 l_apic = (uint32_t *) (unative_t) acpi_madt->l_apic_address; 147 148 /* calculate madt entries */ 149 for (h = &acpi_madt->apic_header[0]; h < end; h = (struct madt_apic_header *) (((uint8_t *) h) + h->length)) { 150 madt_entries_index_cnt++; 151 } 152 153 /* create madt apic entries index array */ 154 madt_entries_index = (struct madt_apic_header * *) malloc(madt_entries_index_cnt * sizeof(struct madt_apic_header * *), FRAME_ATOMIC); 155 if (!madt_entries_index) 156 panic("Memory allocation error."); 157 158 uint32_t index = 0; 159 160 for (h = &acpi_madt->apic_header[0]; h < end; h = (struct madt_apic_header *) (((uint8_t *) h) + h->length)) { 161 madt_entries_index[index++] = h; 162 } 163 164 /* Quicksort MADT index structure */ 165 qsort(madt_entries_index, madt_entries_index_cnt, sizeof(uintptr_t), &madt_cmp); 166 167 /* Parse MADT entries */ 168 if (madt_entries_index_cnt > 0) { 169 for (index = 0; index < madt_entries_index_cnt - 1; index++) { 170 h = madt_entries_index[index]; 171 switch (h->type) { 172 case MADT_L_APIC: 173 madt_l_apic_entry((struct madt_l_apic *) h, index); 174 break; 175 case MADT_IO_APIC: 176 madt_io_apic_entry((struct madt_io_apic *) h, index); 177 break; 178 case MADT_INTR_SRC_OVRD: 179 madt_intr_src_ovrd_entry((struct madt_intr_src_ovrd *) h, index); 180 break; 181 case MADT_NMI_SRC: 182 case MADT_L_APIC_NMI: 183 case MADT_L_APIC_ADDR_OVRD: 184 case MADT_IO_SAPIC: 185 case MADT_L_SAPIC: 186 case MADT_PLATFORM_INTR_SRC: 187 printf("MADT: skipping %s entry (type=%" PRIu8 ")\n", entry[h->type], h->type); 188 break; 189 190 default: 191 if (h->type >= MADT_RESERVED_SKIP_BEGIN && h->type <= MADT_RESERVED_SKIP_END) { 192 printf("MADT: skipping reserved entry (type=%" PRIu8 ")\n", h->type); 193 } 194 if (h->type >= MADT_RESERVED_OEM_BEGIN) { 195 printf("MADT: skipping OEM entry (type=%" PRIu8 ")\n", h->type); 196 } 197 break; 198 } 199 } 200 } 201 202 if (cpu_count) 203 config.cpu_count = cpu_count; 204 } 205 206 207 void madt_l_apic_entry(struct madt_l_apic *la, uint32_t index) 208 { 209 if (!madt_l_apic_entry_cnt++) { 210 madt_l_apic_entry_index = index; 211 } 212 133 static int madt_cmp(void *a, void *b, void *arg) 134 { 135 uint8_t typea = (*((struct madt_apic_header **) a))->type; 136 uint8_t typeb = (*((struct madt_apic_header **) b))->type; 137 138 if (typea > typeb) 139 return 1; 140 141 if (typea < typeb) 142 return -1; 143 144 return 0; 145 } 146 147 static void madt_l_apic_entry(struct madt_l_apic *la, size_t i) 148 { 149 if (madt_l_apic_entry_cnt == 0) 150 madt_l_apic_entry_index = i; 151 152 madt_l_apic_entry_cnt++; 153 213 154 if (!(la->flags & 0x1)) { 214 155 /* Processor is unusable, skip it. */ … … 216 157 } 217 158 218 cpu_count++; 219 apic_id_mask |= 1<<la->apic_id; 220 } 221 222 void madt_io_apic_entry(struct madt_io_apic *ioa, uint32_t index) 223 { 224 if (!madt_io_apic_entry_cnt++) { 225 /* remember index of the first io apic entry */ 226 madt_io_apic_entry_index = index; 159 apic_id_mask |= 1 << la->apic_id; 160 } 161 162 static void madt_io_apic_entry(struct madt_io_apic *ioa, size_t i) 163 { 164 if (madt_io_apic_entry_cnt == 0) { 165 /* Remember index of the first io apic entry */ 166 madt_io_apic_entry_index = i; 227 167 io_apic = (uint32_t *) (unative_t) ioa->io_apic_address; 228 168 } else { 229 /* currently not supported */ 230 return; 231 } 232 } 233 234 void madt_intr_src_ovrd_entry(struct madt_intr_src_ovrd *override, uint32_t index) 169 /* Currently not supported */ 170 } 171 172 madt_io_apic_entry_cnt++; 173 } 174 175 static void madt_intr_src_ovrd_entry(struct madt_intr_src_ovrd *override, 176 size_t i) 235 177 { 236 178 ASSERT(override->source < sizeof(isa_irq_map) / sizeof(int)); 237 printf("MADT: ignoring %s entry: bus=%" PRIu8 ", source=%" PRIu8 ", global_int=%" PRIu32 ", flags=%#" PRIx16 "\n", 238 entry[override->header.type], override->bus, override->source, 239 override->global_int, override->flags); 179 180 printf("MADT: Ignoring %s entry: bus=%" PRIu8 ", source=%" PRIu8 181 ", global_int=%" PRIu32 ", flags=%#" PRIx16 "\n", 182 entry[override->header.type], override->bus, override->source, 183 override->global_int, override->flags); 184 } 185 186 void acpi_madt_parse(void) 187 { 188 struct madt_apic_header *end = (struct madt_apic_header *) 189 (((uint8_t *) acpi_madt) + acpi_madt->header.length); 190 struct madt_apic_header *hdr; 191 192 l_apic = (uint32_t *) (unative_t) acpi_madt->l_apic_address; 193 194 /* Count MADT entries */ 195 unsigned int madt_entries_index_cnt = 0; 196 for (hdr = acpi_madt->apic_header; hdr < end; 197 hdr = (struct madt_apic_header *) (((uint8_t *) hdr) + hdr->length)) 198 madt_entries_index_cnt++; 199 200 /* Create MADT APIC entries index array */ 201 madt_entries_index = (struct madt_apic_header **) 202 malloc(madt_entries_index_cnt * sizeof(struct madt_apic_header *), 203 FRAME_ATOMIC); 204 if (!madt_entries_index) 205 panic("Memory allocation error."); 206 207 size_t i = 0; 208 209 for (hdr = acpi_madt->apic_header; hdr < end; 210 hdr = (struct madt_apic_header *) (((uint8_t *) hdr) + hdr->length)) { 211 madt_entries_index[i] = hdr; 212 i++; 213 } 214 215 /* Sort MADT index structure */ 216 if (!gsort(madt_entries_index, madt_entries_index_cnt, 217 sizeof(struct madt_apic_header *), madt_cmp, NULL)) 218 panic("Sorting error."); 219 220 /* Parse MADT entries */ 221 for (i = 0; i < madt_entries_index_cnt; i++) { 222 hdr = madt_entries_index[i]; 223 224 switch (hdr->type) { 225 case MADT_L_APIC: 226 madt_l_apic_entry((struct madt_l_apic *) hdr, i); 227 break; 228 case MADT_IO_APIC: 229 madt_io_apic_entry((struct madt_io_apic *) hdr, i); 230 break; 231 case MADT_INTR_SRC_OVRD: 232 madt_intr_src_ovrd_entry((struct madt_intr_src_ovrd *) hdr, i); 233 break; 234 case MADT_NMI_SRC: 235 case MADT_L_APIC_NMI: 236 case MADT_L_APIC_ADDR_OVRD: 237 case MADT_IO_SAPIC: 238 case MADT_L_SAPIC: 239 case MADT_PLATFORM_INTR_SRC: 240 printf("MADT: Skipping %s entry (type=%" PRIu8 ")\n", 241 entry[hdr->type], hdr->type); 242 break; 243 default: 244 if ((hdr->type >= MADT_RESERVED_SKIP_BEGIN) 245 && (hdr->type <= MADT_RESERVED_SKIP_END)) 246 printf("MADT: Skipping reserved entry (type=%" PRIu8 ")\n", 247 hdr->type); 248 249 if (hdr->type >= MADT_RESERVED_OEM_BEGIN) 250 printf("MADT: Skipping OEM entry (type=%" PRIu8 ")\n", 251 hdr->type); 252 253 break; 254 } 255 } 256 257 if (madt_l_apic_entry_cnt > 0) 258 config.cpu_count = madt_l_apic_entry_cnt; 240 259 } 241 260 -
kernel/genarch/src/drivers/dsrln/dsrlnout.c
r24a2517 rc621f4aa 41 41 #include <console/console.h> 42 42 #include <sysinfo/sysinfo.h> 43 #include <str ing.h>43 #include <str.h> 44 44 45 45 typedef struct { -
kernel/genarch/src/drivers/ega/ega.c
r24a2517 rc621f4aa 42 42 #include <arch/mm/page.h> 43 43 #include <synch/spinlock.h> 44 #include < arch/types.h>44 #include <typedefs.h> 45 45 #include <arch/asm.h> 46 46 #include <memstr.h> 47 #include <str ing.h>47 #include <str.h> 48 48 #include <console/chardev.h> 49 49 #include <console/console.h> … … 63 63 64 64 typedef struct { 65 SPINLOCK_DECLARE(lock);65 IRQ_SPINLOCK_DECLARE(lock); 66 66 67 67 uint32_t cursor; … … 71 71 } ega_instance_t; 72 72 73 static void ega_putchar(outdev_t * dev, wchar_t ch, bool silent);74 static void ega_redraw(outdev_t * dev);73 static void ega_putchar(outdev_t *, wchar_t, bool); 74 static void ega_redraw(outdev_t *); 75 75 76 76 static outdev_operations_t egadev_ops = { … … 540 540 ega_instance_t *instance = (ega_instance_t *) dev->data; 541 541 542 ipl_t ipl = interrupts_disable(); 543 spinlock_lock(&instance->lock); 542 irq_spinlock_lock(&instance->lock, true); 544 543 545 544 switch (ch) { … … 564 563 ega_move_cursor(instance, silent); 565 564 566 spinlock_unlock(&instance->lock); 567 interrupts_restore(ipl); 565 irq_spinlock_unlock(&instance->lock, true); 568 566 } 569 567 … … 572 570 ega_instance_t *instance = (ega_instance_t *) dev->data; 573 571 574 ipl_t ipl = interrupts_disable(); 575 spinlock_lock(&instance->lock); 572 irq_spinlock_lock(&instance->lock, true); 576 573 577 574 memcpy(instance->addr, instance->backbuf, EGA_VRAM_SIZE); … … 579 576 ega_show_cursor(instance, silent); 580 577 581 spinlock_unlock(&instance->lock); 582 interrupts_restore(ipl); 578 irq_spinlock_unlock(&instance->lock, true); 583 579 } 584 580 … … 598 594 egadev->data = instance; 599 595 600 spinlock_initialize(&instance->lock, "*ega_lock");596 irq_spinlock_initialize(&instance->lock, "*ega.instance.lock"); 601 597 602 598 instance->base = base; -
kernel/genarch/src/drivers/i8042/i8042.c
r24a2517 rc621f4aa 67 67 i8042_instance_t *instance = irq->instance; 68 68 i8042_t *dev = instance->i8042; 69 uint8_t status;70 69 71 if ( ((status = pio_read_8(&dev->status)) & i8042_BUFFER_FULL_MASK)) {70 if (pio_read_8(&dev->status) & i8042_BUFFER_FULL_MASK) { 72 71 uint8_t data = pio_read_8(&dev->data); 73 72 indev_push_character(instance->kbrdin, data); -
kernel/genarch/src/drivers/via-cuda/cuda.c
r24a2517 rc621f4aa 99 99 instance->snd_bytes = 0; 100 100 101 spinlock_initialize(&instance->dev_lock, "cuda _dev");101 spinlock_initialize(&instance->dev_lock, "cuda.instance.dev_lock"); 102 102 103 103 /* Disable all interrupts from CUDA. */ -
kernel/genarch/src/fb/fb.c
r24a2517 rc621f4aa 49 49 #include <bitops.h> 50 50 #include <print.h> 51 #include <str ing.h>51 #include <str.h> 52 52 #include <ddi/ddi.h> 53 #include < arch/types.h>53 #include <typedefs.h> 54 54 #include <byteorder.h> 55 55 … … 554 554 fbdev->data = instance; 555 555 556 spinlock_initialize(&instance->lock, "*fb _lock");556 spinlock_initialize(&instance->lock, "*fb.instance.lock"); 557 557 instance->rgb_conv = rgb_conv; 558 558 instance->pixelbytes = pixelbytes; -
kernel/genarch/src/kbrd/kbrd.c
r24a2517 rc621f4aa 169 169 indev_initialize("kbrd", &instance->raw, &kbrd_raw_ops); 170 170 171 spinlock_initialize(&instance->keylock, " instance_keylock");171 spinlock_initialize(&instance->keylock, "kbrd.instance.keylock"); 172 172 instance->keyflags = 0; 173 173 instance->lockflags = 0; -
kernel/genarch/src/kbrd/kbrd_pl050.c
r24a2517 rc621f4aa 181 181 indev_initialize("kbrd", &instance->raw, &kbrd_raw_ops); 182 182 183 spinlock_initialize(&instance->keylock, " instance_keylock");183 spinlock_initialize(&instance->keylock, "kbrd_pl050.instance.keylock"); 184 184 instance->keyflags = 0; 185 185 instance->lockflags = 0; -
kernel/genarch/src/kbrd/scanc_mac.c
r24a2517 rc621f4aa 37 37 #include <genarch/kbrd/scanc.h> 38 38 #include <typedefs.h> 39 #include <str ing.h>39 #include <str.h> 40 40 41 41 /** Primary meaning of scancodes. */ -
kernel/genarch/src/kbrd/scanc_pc.c
r24a2517 rc621f4aa 37 37 #include <genarch/kbrd/scanc.h> 38 38 #include <typedefs.h> 39 #include <str ing.h>39 #include <str.h> 40 40 41 41 /** Primary meaning of scancodes. */ -
kernel/genarch/src/kbrd/scanc_pl050.c
r24a2517 rc621f4aa 36 36 #include <genarch/kbrd/scanc.h> 37 37 #include <typedefs.h> 38 #include <str ing.h>38 #include <str.h> 39 39 40 40 -
kernel/genarch/src/kbrd/scanc_sun.c
r24a2517 rc621f4aa 37 37 #include <genarch/kbrd/scanc.h> 38 38 #include <typedefs.h> 39 #include <str ing.h>39 #include <str.h> 40 40 41 41 /** Primary meaning of scancodes. */ -
kernel/genarch/src/mm/as_ht.c
r24a2517 rc621f4aa 30 30 * @{ 31 31 */ 32 32 33 33 /** 34 34 * @file 35 * @brief Address space functions for global page hash table.35 * @brief Address space functions for global page hash table. 36 36 */ 37 37 … … 41 41 #include <mm/as.h> 42 42 #include <mm/frame.h> 43 #include < arch/types.h>43 #include <typedefs.h> 44 44 #include <memstr.h> 45 45 #include <adt/hash_table.h> 46 46 #include <synch/mutex.h> 47 47 48 static pte_t *ht_create( int flags);49 static void ht_destroy(pte_t * page_table);48 static pte_t *ht_create(unsigned int); 49 static void ht_destroy(pte_t *); 50 50 51 static void ht_lock(as_t *as, bool lock); 52 static void ht_unlock(as_t *as, bool unlock); 51 static void ht_lock(as_t *, bool); 52 static void ht_unlock(as_t *, bool); 53 static bool ht_locked(as_t *); 53 54 54 55 as_operations_t as_ht_operations = { … … 57 58 .page_table_lock = ht_lock, 58 59 .page_table_unlock = ht_unlock, 60 .page_table_locked = ht_locked, 59 61 }; 60 62 … … 68 70 * 69 71 * @return Returns NULL. 72 * 70 73 */ 71 pte_t *ht_create( int flags)74 pte_t *ht_create(unsigned int flags) 72 75 { 73 76 if (flags & FLAG_AS_KERNEL) { … … 75 78 mutex_initialize(&page_ht_lock, MUTEX_PASSIVE); 76 79 } 80 77 81 return NULL; 78 82 } … … 83 87 * 84 88 * @param page_table This parameter is ignored. 89 * 85 90 */ 86 91 void ht_destroy(pte_t *page_table) … … 94 99 * Interrupts must be disabled. 95 100 * 96 * @param as Address space.101 * @param as Address space. 97 102 * @param lock If false, do not attempt to lock the address space. 103 * 98 104 */ 99 105 void ht_lock(as_t *as, bool lock) … … 101 107 if (lock) 102 108 mutex_lock(&as->lock); 109 103 110 mutex_lock(&page_ht_lock); 104 111 } … … 109 116 * Interrupts must be disabled. 110 117 * 111 * @param as Address space.118 * @param as Address space. 112 119 * @param unlock If false, do not attempt to lock the address space. 120 * 113 121 */ 114 122 void ht_unlock(as_t *as, bool unlock) 115 123 { 116 124 mutex_unlock(&page_ht_lock); 125 117 126 if (unlock) 118 127 mutex_unlock(&as->lock); 119 128 } 120 129 130 /** Test whether page tables are locked. 131 * 132 * @param as Address space where the page tables belong. 133 * 134 * @return True if the page tables belonging to the address soace 135 * are locked, otherwise false. 136 */ 137 bool ht_locked(as_t *as) 138 { 139 return (mutex_locked(&page_ht_lock) && mutex_locked(&as->lock)); 140 } 141 121 142 /** @} 122 143 */ -
kernel/genarch/src/mm/as_pt.c
r24a2517 rc621f4aa 33 33 /** 34 34 * @file 35 * @brief Address space functions for 4-level hierarchical pagetables.35 * @brief Address space functions for 4-level hierarchical pagetables. 36 36 */ 37 37 … … 43 43 #include <arch/mm/page.h> 44 44 #include <arch/mm/as.h> 45 #include < arch/types.h>45 #include <typedefs.h> 46 46 #include <memstr.h> 47 47 #include <arch.h> 48 48 49 static pte_t *ptl0_create( int flags);50 static void ptl0_destroy(pte_t * page_table);49 static pte_t *ptl0_create(unsigned int); 50 static void ptl0_destroy(pte_t *); 51 51 52 static void pt_lock(as_t *as, bool lock); 53 static void pt_unlock(as_t *as, bool unlock); 52 static void pt_lock(as_t *, bool); 53 static void pt_unlock(as_t *, bool); 54 static bool pt_locked(as_t *); 54 55 55 56 as_operations_t as_pt_operations = { … … 57 58 .page_table_destroy = ptl0_destroy, 58 59 .page_table_lock = pt_lock, 59 .page_table_unlock = pt_unlock 60 .page_table_unlock = pt_unlock, 61 .page_table_locked = pt_locked, 60 62 }; 61 63 … … 67 69 * 68 70 * @return New PTL0. 71 * 69 72 */ 70 pte_t *ptl0_create( int flags)73 pte_t *ptl0_create(unsigned int flags) 71 74 { 72 pte_t *src_ptl0, *dst_ptl0; 73 ipl_t ipl; 74 int table_size; 75 76 dst_ptl0 = (pte_t *) frame_alloc(PTL0_SIZE, FRAME_KA); 77 table_size = FRAME_SIZE << PTL0_SIZE; 78 79 if (flags & FLAG_AS_KERNEL) { 75 pte_t *dst_ptl0 = (pte_t *) frame_alloc(PTL0_SIZE, FRAME_KA); 76 size_t table_size = FRAME_SIZE << PTL0_SIZE; 77 78 if (flags & FLAG_AS_KERNEL) 80 79 memsetb(dst_ptl0, table_size, 0); 81 } else { 82 uintptr_t src, dst; 83 80 else { 84 81 /* 85 82 * Copy the kernel address space portion to new PTL0. 83 * 86 84 */ 87 88 ipl = interrupts_disable(); 89 mutex_lock(&AS_KERNEL->lock); 90 src_ptl0 = (pte_t *) PA2KA((uintptr_t) AS_KERNEL->genarch.page_table); 91 92 src = (uintptr_t) &src_ptl0[PTL0_INDEX(KERNEL_ADDRESS_SPACE_START)]; 93 dst = (uintptr_t) &dst_ptl0[PTL0_INDEX(KERNEL_ADDRESS_SPACE_START)]; 94 85 86 mutex_lock(&AS_KERNEL->lock); 87 88 pte_t *src_ptl0 = 89 (pte_t *) PA2KA((uintptr_t) AS_KERNEL->genarch.page_table); 90 91 uintptr_t src = 92 (uintptr_t) &src_ptl0[PTL0_INDEX(KERNEL_ADDRESS_SPACE_START)]; 93 uintptr_t dst = 94 (uintptr_t) &dst_ptl0[PTL0_INDEX(KERNEL_ADDRESS_SPACE_START)]; 95 95 96 memsetb(dst_ptl0, table_size, 0); 96 memcpy((void *) dst, (void *) src, table_size - (src - (uintptr_t) src_ptl0)); 97 memcpy((void *) dst, (void *) src, 98 table_size - (src - (uintptr_t) src_ptl0)); 99 97 100 mutex_unlock(&AS_KERNEL->lock); 98 interrupts_restore(ipl);99 101 } 100 102 101 103 return (pte_t *) KA2PA((uintptr_t) dst_ptl0); 102 104 } … … 107 109 * 108 110 * @param page_table Physical address of PTL0. 111 * 109 112 */ 110 113 void ptl0_destroy(pte_t *page_table) 111 114 { 112 frame_free((uintptr_t) page_table);115 frame_free((uintptr_t) page_table); 113 116 } 114 117 … … 118 121 * Interrupts must be disabled. 119 122 * 120 * @param as Address space.123 * @param as Address space. 121 124 * @param lock If false, do not attempt to lock the address space. 125 * 122 126 */ 123 127 void pt_lock(as_t *as, bool lock) … … 132 136 * Interrupts must be disabled. 133 137 * 134 * @param as Address space.138 * @param as Address space. 135 139 * @param unlock If false, do not attempt to unlock the address space. 140 * 136 141 */ 137 142 void pt_unlock(as_t *as, bool unlock) … … 141 146 } 142 147 148 /** Test whether page tables are locked. 149 * 150 * @param as Address space where the page tables belong. 151 * 152 * @return True if the page tables belonging to the address soace 153 * are locked, otherwise false. 154 */ 155 bool pt_locked(as_t *as) 156 { 157 return mutex_locked(&as->lock); 158 } 159 143 160 /** @} 144 161 */ -
kernel/genarch/src/mm/asid.c
r24a2517 rc621f4aa 70 70 /** Allocate free address space identifier. 71 71 * 72 * Interrupts must be disabled and inactive_as_with_asid_lock must be held73 * prior to this call74 *75 72 * @return New ASID. 76 73 */ … … 80 77 link_t *tmp; 81 78 as_t *as; 79 80 ASSERT(interrupts_disabled()); 81 ASSERT(spinlock_locked(&asidlock)); 82 82 83 83 /* … … 126 126 * Get the system rid of the stolen ASID. 127 127 */ 128 tlb_shootdown_start(TLB_INVL_ASID, asid, 0, 0);128 ipl_t ipl = tlb_shootdown_start(TLB_INVL_ASID, asid, 0, 0); 129 129 tlb_invalidate_asid(asid); 130 tlb_shootdown_finalize( );130 tlb_shootdown_finalize(ipl); 131 131 } else { 132 132 … … 142 142 * Purge the allocated ASID from TLBs. 143 143 */ 144 tlb_shootdown_start(TLB_INVL_ASID, asid, 0, 0);144 ipl_t ipl = tlb_shootdown_start(TLB_INVL_ASID, asid, 0, 0); 145 145 tlb_invalidate_asid(asid); 146 tlb_shootdown_finalize( );146 tlb_shootdown_finalize(ipl); 147 147 } 148 148 -
kernel/genarch/src/mm/page_ht.c
r24a2517 rc621f4aa 33 33 /** 34 34 * @file 35 * @brief Virtual Address Translation (VAT) for global page hash table.35 * @brief Virtual Address Translation (VAT) for global page hash table. 36 36 */ 37 37 … … 43 43 #include <mm/as.h> 44 44 #include <arch/mm/asid.h> 45 #include < arch/types.h>45 #include <typedefs.h> 46 46 #include <arch/asm.h> 47 47 #include <synch/spinlock.h> … … 52 52 #include <align.h> 53 53 54 static size_t hash(unative_t key[]); 55 static bool compare(unative_t key[], size_t keys, link_t *item); 56 static void remove_callback(link_t *item); 57 58 static void ht_mapping_insert(as_t *as, uintptr_t page, uintptr_t frame, 59 int flags); 60 static void ht_mapping_remove(as_t *as, uintptr_t page); 61 static pte_t *ht_mapping_find(as_t *as, uintptr_t page); 54 static size_t hash(unative_t[]); 55 static bool compare(unative_t[], size_t, link_t *); 56 static void remove_callback(link_t *); 57 58 static void ht_mapping_insert(as_t *, uintptr_t, uintptr_t, unsigned int); 59 static void ht_mapping_remove(as_t *, uintptr_t); 60 static pte_t *ht_mapping_find(as_t *, uintptr_t); 62 61 63 62 /** … … 65 64 * after address space lock and after any address space area 66 65 * locks. 66 * 67 67 */ 68 68 mutex_t page_ht_lock; 69 69 70 /** 71 * Page hash table.70 /** Page hash table. 71 * 72 72 * The page hash table may be accessed only when page_ht_lock is held. 73 * 73 74 */ 74 75 hash_table_t page_ht; … … 93 94 * 94 95 * @return Index into page hash table. 96 * 95 97 */ 96 98 size_t hash(unative_t key[]) … … 98 100 as_t *as = (as_t *) key[KEY_AS]; 99 101 uintptr_t page = (uintptr_t) key[KEY_PAGE]; 100 size_t index;101 102 102 103 /* … … 104 105 * of occurring. Least significant bits of VPN compose the 105 106 * hash index. 106 */ 107 index = ((page >> PAGE_WIDTH) & (PAGE_HT_ENTRIES - 1)); 107 * 108 */ 109 size_t index = ((page >> PAGE_WIDTH) & (PAGE_HT_ENTRIES - 1)); 108 110 109 111 /* … … 111 113 * similar addresses. Least significant bits compose the 112 114 * hash index. 115 * 113 116 */ 114 117 index |= ((unative_t) as) & (PAGE_HT_ENTRIES - 1); … … 119 122 /** Compare page hash table item with page and/or address space. 120 123 * 121 * @param key Array of one or two keys (i.e. page and/or address space).124 * @param key Array of one or two keys (i.e. page and/or address space). 122 125 * @param keys Number of keys passed. 123 126 * @param item Item to compare the keys with. 124 127 * 125 128 * @return true on match, false otherwise. 129 * 126 130 */ 127 131 bool compare(unative_t key[], size_t keys, link_t *item) 128 132 { 129 pte_t *t;130 131 133 ASSERT(item); 132 ASSERT((keys > 0) && (keys <= PAGE_HT_KEYS)); 133 134 ASSERT(keys > 0); 135 ASSERT(keys <= PAGE_HT_KEYS); 136 134 137 /* 135 138 * Convert item to PTE. 136 * /137 t = hash_table_get_instance(item, pte_t, link);138 139 if (keys == PAGE_HT_KEYS) {140 return (key[KEY_AS] == (uintptr_t) t->as) &&141 (key[KEY_PAGE] == t->page);142 } else {143 return (key[KEY_AS] == (uintptr_t) t->as);144 }139 * 140 */ 141 pte_t *pte = hash_table_get_instance(item, pte_t, link); 142 143 if (keys == PAGE_HT_KEYS) 144 return (key[KEY_AS] == (uintptr_t) pte->as) && 145 (key[KEY_PAGE] == pte->page); 146 147 return (key[KEY_AS] == (uintptr_t) pte->as); 145 148 } 146 149 … … 148 151 * 149 152 * @param item Page hash table item being removed. 153 * 150 154 */ 151 155 void remove_callback(link_t *item) 152 156 { 153 pte_t *t;154 155 157 ASSERT(item); 156 158 157 159 /* 158 160 * Convert item to PTE. 159 */ 160 t = hash_table_get_instance(item, pte_t, link); 161 162 free(t); 161 * 162 */ 163 pte_t *pte = hash_table_get_instance(item, pte_t, link); 164 165 free(pte); 163 166 } 164 167 … … 166 169 * 167 170 * Map virtual address page to physical address frame 168 * using flags. 169 * 170 * The page table must be locked and interrupts must be disabled. 171 * 172 * @param as Address space to which page belongs. 173 * @param page Virtual address of the page to be mapped. 171 * using flags. 172 * 173 * @param as Address space to which page belongs. 174 * @param page Virtual address of the page to be mapped. 174 175 * @param frame Physical address of memory frame to which the mapping is done. 175 176 * @param flags Flags to be used for mapping. 176 */ 177 void ht_mapping_insert(as_t *as, uintptr_t page, uintptr_t frame, int flags) 178 { 179 pte_t *t; 177 * 178 */ 179 void ht_mapping_insert(as_t *as, uintptr_t page, uintptr_t frame, 180 unsigned int flags) 181 { 180 182 unative_t key[2] = { 181 183 (uintptr_t) as, 182 184 page = ALIGN_DOWN(page, PAGE_SIZE) 183 185 }; 186 187 ASSERT(page_table_locked(as)); 184 188 185 189 if (!hash_table_find(&page_ht, key)) { 186 t= (pte_t *) malloc(sizeof(pte_t), FRAME_ATOMIC);187 ASSERT( t!= NULL);188 189 t->g = (flags & PAGE_GLOBAL) != 0;190 t->x = (flags & PAGE_EXEC) != 0;191 t->w = (flags & PAGE_WRITE) != 0;192 t->k = !(flags & PAGE_USER);193 t->c = (flags & PAGE_CACHEABLE) != 0;194 t->p = !(flags & PAGE_NOT_PRESENT);195 t->a = false;196 t->d = false;197 198 t->as = as;199 t->page = ALIGN_DOWN(page, PAGE_SIZE);200 t->frame = ALIGN_DOWN(frame, FRAME_SIZE);201 202 hash_table_insert(&page_ht, key, & t->link);190 pte_t *pte = (pte_t *) malloc(sizeof(pte_t), FRAME_ATOMIC); 191 ASSERT(pte != NULL); 192 193 pte->g = (flags & PAGE_GLOBAL) != 0; 194 pte->x = (flags & PAGE_EXEC) != 0; 195 pte->w = (flags & PAGE_WRITE) != 0; 196 pte->k = !(flags & PAGE_USER); 197 pte->c = (flags & PAGE_CACHEABLE) != 0; 198 pte->p = !(flags & PAGE_NOT_PRESENT); 199 pte->a = false; 200 pte->d = false; 201 202 pte->as = as; 203 pte->page = ALIGN_DOWN(page, PAGE_SIZE); 204 pte->frame = ALIGN_DOWN(frame, FRAME_SIZE); 205 206 hash_table_insert(&page_ht, key, &pte->link); 203 207 } 204 208 } … … 210 214 * this call visible. 211 215 * 212 * The page table must be locked and interrupts must be disabled. 213 * 214 * @param as Address space to wich page belongs. 216 * @param as Address space to wich page belongs. 215 217 * @param page Virtual address of the page to be demapped. 218 * 216 219 */ 217 220 void ht_mapping_remove(as_t *as, uintptr_t page) … … 221 224 page = ALIGN_DOWN(page, PAGE_SIZE) 222 225 }; 226 227 ASSERT(page_table_locked(as)); 223 228 224 229 /* … … 234 239 * Find mapping for virtual page. 235 240 * 236 * The page table must be locked and interrupts must be disabled. 237 * 238 * @param as Address space to wich page belongs. 241 * @param as Address space to wich page belongs. 239 242 * @param page Virtual page. 240 243 * 241 244 * @return NULL if there is no such mapping; requested mapping otherwise. 245 * 242 246 */ 243 247 pte_t *ht_mapping_find(as_t *as, uintptr_t page) 244 248 { 245 link_t *hlp;246 pte_t *t = NULL;247 249 unative_t key[2] = { 248 250 (uintptr_t) as, 249 251 page = ALIGN_DOWN(page, PAGE_SIZE) 250 252 }; 251 252 hlp = hash_table_find(&page_ht, key); 253 if (hlp) 254 t = hash_table_get_instance(hlp, pte_t, link); 255 256 return t; 253 254 ASSERT(page_table_locked(as)); 255 256 link_t *cur = hash_table_find(&page_ht, key); 257 if (cur) 258 return hash_table_get_instance(cur, pte_t, link); 259 260 return NULL; 257 261 } 258 262 -
kernel/genarch/src/mm/page_pt.c
r24a2517 rc621f4aa 33 33 /** 34 34 * @file 35 * @brief Virtual Address Translation for hierarchical 4-level page tables.35 * @brief Virtual Address Translation for hierarchical 4-level page tables. 36 36 */ 37 37 … … 42 42 #include <arch/mm/page.h> 43 43 #include <arch/mm/as.h> 44 #include < arch/types.h>44 #include <typedefs.h> 45 45 #include <arch/asm.h> 46 46 #include <memstr.h> 47 47 48 static void pt_mapping_insert(as_t * as, uintptr_t page, uintptr_t frame, int flags);49 static void pt_mapping_remove(as_t * as, uintptr_t page);50 static pte_t *pt_mapping_find(as_t * as, uintptr_t page);48 static void pt_mapping_insert(as_t *, uintptr_t, uintptr_t, unsigned int); 49 static void pt_mapping_remove(as_t *, uintptr_t); 50 static pte_t *pt_mapping_find(as_t *, uintptr_t); 51 51 52 52 page_mapping_operations_t pt_mapping_operations = { … … 61 61 * using flags. 62 62 * 63 * The page table must be locked and interrupts must be disabled. 64 * 65 * @param as Address space to wich page belongs. 66 * @param page Virtual address of the page to be mapped. 63 * @param as Address space to wich page belongs. 64 * @param page Virtual address of the page to be mapped. 67 65 * @param frame Physical address of memory frame to which the mapping is done. 68 66 * @param flags Flags to be used for mapping. 69 */ 70 void pt_mapping_insert(as_t *as, uintptr_t page, uintptr_t frame, int flags) 67 * 68 */ 69 void pt_mapping_insert(as_t *as, uintptr_t page, uintptr_t frame, 70 unsigned int flags) 71 71 { 72 pte_t *ptl0, *ptl1, *ptl2, *ptl3; 73 pte_t *newpt; 74 75 ptl0 = (pte_t *) PA2KA((uintptr_t) as->genarch.page_table); 76 72 pte_t *ptl0 = (pte_t *) PA2KA((uintptr_t) as->genarch.page_table); 73 74 ASSERT(page_table_locked(as)); 75 77 76 if (GET_PTL1_FLAGS(ptl0, PTL0_INDEX(page)) & PAGE_NOT_PRESENT) { 78 newpt = (pte_t *)frame_alloc(PTL1_SIZE, FRAME_KA);77 pte_t *newpt = (pte_t *) frame_alloc(PTL1_SIZE, FRAME_KA); 79 78 memsetb(newpt, FRAME_SIZE << PTL1_SIZE, 0); 80 79 SET_PTL1_ADDRESS(ptl0, PTL0_INDEX(page), KA2PA(newpt)); 81 80 SET_PTL1_FLAGS(ptl0, PTL0_INDEX(page), PAGE_PRESENT | PAGE_USER | PAGE_EXEC | PAGE_CACHEABLE | PAGE_WRITE); 82 81 } 83 84 pt l1 = (pte_t *) PA2KA(GET_PTL1_ADDRESS(ptl0, PTL0_INDEX(page)));85 82 83 pte_t *ptl1 = (pte_t *) PA2KA(GET_PTL1_ADDRESS(ptl0, PTL0_INDEX(page))); 84 86 85 if (GET_PTL2_FLAGS(ptl1, PTL1_INDEX(page)) & PAGE_NOT_PRESENT) { 87 newpt = (pte_t *)frame_alloc(PTL2_SIZE, FRAME_KA);86 pte_t *newpt = (pte_t *) frame_alloc(PTL2_SIZE, FRAME_KA); 88 87 memsetb(newpt, FRAME_SIZE << PTL2_SIZE, 0); 89 88 SET_PTL2_ADDRESS(ptl1, PTL1_INDEX(page), KA2PA(newpt)); 90 89 SET_PTL2_FLAGS(ptl1, PTL1_INDEX(page), PAGE_PRESENT | PAGE_USER | PAGE_EXEC | PAGE_CACHEABLE | PAGE_WRITE); 91 90 } 92 93 pt l2 = (pte_t *) PA2KA(GET_PTL2_ADDRESS(ptl1, PTL1_INDEX(page)));94 91 92 pte_t *ptl2 = (pte_t *) PA2KA(GET_PTL2_ADDRESS(ptl1, PTL1_INDEX(page))); 93 95 94 if (GET_PTL3_FLAGS(ptl2, PTL2_INDEX(page)) & PAGE_NOT_PRESENT) { 96 newpt = (pte_t *)frame_alloc(PTL3_SIZE, FRAME_KA);95 pte_t *newpt = (pte_t *) frame_alloc(PTL3_SIZE, FRAME_KA); 97 96 memsetb(newpt, FRAME_SIZE << PTL3_SIZE, 0); 98 97 SET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page), KA2PA(newpt)); 99 98 SET_PTL3_FLAGS(ptl2, PTL2_INDEX(page), PAGE_PRESENT | PAGE_USER | PAGE_EXEC | PAGE_CACHEABLE | PAGE_WRITE); 100 99 } 101 102 pt l3 = (pte_t *) PA2KA(GET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page)));103 100 101 pte_t *ptl3 = (pte_t *) PA2KA(GET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page))); 102 104 103 SET_FRAME_ADDRESS(ptl3, PTL3_INDEX(page), frame); 105 104 SET_FRAME_FLAGS(ptl3, PTL3_INDEX(page), flags); … … 114 113 * Empty page tables except PTL0 are freed. 115 114 * 116 * The page table must be locked and interrupts must be disabled. 117 * 118 * @param as Address space to wich page belongs. 115 * @param as Address space to wich page belongs. 119 116 * @param page Virtual address of the page to be demapped. 117 * 120 118 */ 121 119 void pt_mapping_remove(as_t *as, uintptr_t page) 122 120 { 123 pte_t *ptl0, *ptl1, *ptl2, *ptl3; 124 bool empty = true; 125 int i; 121 ASSERT(page_table_locked(as)); 126 122 127 123 /* 128 124 * First, remove the mapping, if it exists. 125 * 129 126 */ 130 131 ptl0 = (pte_t *) PA2KA((uintptr_t) as->genarch.page_table); 132 127 128 pte_t *ptl0 = (pte_t *) PA2KA((uintptr_t) as->genarch.page_table); 133 129 if (GET_PTL1_FLAGS(ptl0, PTL0_INDEX(page)) & PAGE_NOT_PRESENT) 134 130 return; 135 136 ptl1 = (pte_t *) PA2KA(GET_PTL1_ADDRESS(ptl0, PTL0_INDEX(page))); 137 131 132 pte_t *ptl1 = (pte_t *) PA2KA(GET_PTL1_ADDRESS(ptl0, PTL0_INDEX(page))); 138 133 if (GET_PTL2_FLAGS(ptl1, PTL1_INDEX(page)) & PAGE_NOT_PRESENT) 139 134 return; 140 141 ptl2 = (pte_t *) PA2KA(GET_PTL2_ADDRESS(ptl1, PTL1_INDEX(page))); 142 135 136 pte_t *ptl2 = (pte_t *) PA2KA(GET_PTL2_ADDRESS(ptl1, PTL1_INDEX(page))); 143 137 if (GET_PTL3_FLAGS(ptl2, PTL2_INDEX(page)) & PAGE_NOT_PRESENT) 144 138 return; 145 146 pt l3 = (pte_t *) PA2KA(GET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page)));147 139 140 pte_t *ptl3 = (pte_t *) PA2KA(GET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page))); 141 148 142 /* Destroy the mapping. Setting to PAGE_NOT_PRESENT is not sufficient. */ 149 143 memsetb(&ptl3[PTL3_INDEX(page)], sizeof(pte_t), 0); 150 144 151 145 /* 152 146 * Second, free all empty tables along the way from PTL3 down to PTL0. 147 * 153 148 */ 154 149 155 /* check PTL3 */ 150 /* Check PTL3 */ 151 bool empty = true; 152 153 unsigned int i; 156 154 for (i = 0; i < PTL3_ENTRIES; i++) { 157 155 if (PTE_VALID(&ptl3[i])) { … … 160 158 } 161 159 } 160 162 161 if (empty) { 163 162 /* 164 163 * PTL3 is empty. 165 164 * Release the frame and remove PTL3 pointer from preceding table. 165 * 166 166 */ 167 167 frame_free(KA2PA((uintptr_t) ptl3)); 168 if (PTL2_ENTRIES) 169 memsetb(&ptl2[PTL2_INDEX(page)], sizeof(pte_t), 0); 170 else if (PTL1_ENTRIES) 171 memsetb(&ptl1[PTL1_INDEX(page)], sizeof(pte_t), 0); 172 else 173 memsetb(&ptl0[PTL0_INDEX(page)], sizeof(pte_t), 0); 168 #if (PTL2_ENTRIES != 0) 169 memsetb(&ptl2[PTL2_INDEX(page)], sizeof(pte_t), 0); 170 #elif (PTL1_ENTRIES != 0) 171 memsetb(&ptl1[PTL1_INDEX(page)], sizeof(pte_t), 0); 172 #else 173 memsetb(&ptl0[PTL0_INDEX(page)], sizeof(pte_t), 0); 174 #endif 174 175 } else { 175 176 /* … … 177 178 * Therefore, there must be a path from PTL0 to PTL3 and 178 179 * thus nothing to free in higher levels. 179 * /180 return;181 }182 183 /* check PTL2, empty is still true */184 if (PTL2_ENTRIES) {185 for (i = 0; i < PTL2_ENTRIES; i++) { 186 if (PTE_VALID(&ptl2[i])) {187 empty = false;188 break;189 }180 * 181 */ 182 return; 183 } 184 185 /* Check PTL2, empty is still true */ 186 #if (PTL2_ENTRIES != 0) 187 for (i = 0; i < PTL2_ENTRIES; i++) { 188 if (PTE_VALID(&ptl2[i])) { 189 empty = false; 190 break; 190 191 } 191 if (empty) { 192 /* 193 * PTL2 is empty. 194 * Release the frame and remove PTL2 pointer from preceding table. 195 */ 196 frame_free(KA2PA((uintptr_t) ptl2)); 197 if (PTL1_ENTRIES) 198 memsetb(&ptl1[PTL1_INDEX(page)], sizeof(pte_t), 0); 199 else 200 memsetb(&ptl0[PTL0_INDEX(page)], sizeof(pte_t), 0); 192 } 193 194 if (empty) { 195 /* 196 * PTL2 is empty. 197 * Release the frame and remove PTL2 pointer from preceding table. 198 * 199 */ 200 frame_free(KA2PA((uintptr_t) ptl2)); 201 #if (PTL1_ENTRIES != 0) 202 memsetb(&ptl1[PTL1_INDEX(page)], sizeof(pte_t), 0); 203 #else 204 memsetb(&ptl0[PTL0_INDEX(page)], sizeof(pte_t), 0); 205 #endif 206 } else { 207 /* 208 * PTL2 is not empty. 209 * Therefore, there must be a path from PTL0 to PTL2 and 210 * thus nothing to free in higher levels. 211 * 212 */ 213 return; 214 } 215 #endif /* PTL2_ENTRIES != 0 */ 216 217 /* check PTL1, empty is still true */ 218 #if (PTL1_ENTRIES != 0) 219 for (i = 0; i < PTL1_ENTRIES; i++) { 220 if (PTE_VALID(&ptl1[i])) { 221 empty = false; 222 break; 201 223 } 202 else { 203 /* 204 * PTL2 is not empty. 205 * Therefore, there must be a path from PTL0 to PTL2 and 206 * thus nothing to free in higher levels. 207 */ 208 return; 209 } 210 } 211 212 /* check PTL1, empty is still true */ 213 if (PTL1_ENTRIES) { 214 for (i = 0; i < PTL1_ENTRIES; i++) { 215 if (PTE_VALID(&ptl1[i])) { 216 empty = false; 217 break; 218 } 219 } 220 if (empty) { 221 /* 222 * PTL1 is empty. 223 * Release the frame and remove PTL1 pointer from preceding table. 224 */ 225 frame_free(KA2PA((uintptr_t) ptl1)); 226 memsetb(&ptl0[PTL0_INDEX(page)], sizeof(pte_t), 0); 227 } 228 } 229 224 } 225 226 if (empty) { 227 /* 228 * PTL1 is empty. 229 * Release the frame and remove PTL1 pointer from preceding table. 230 * 231 */ 232 frame_free(KA2PA((uintptr_t) ptl1)); 233 memsetb(&ptl0[PTL0_INDEX(page)], sizeof(pte_t), 0); 234 } 235 #endif /* PTL1_ENTRIES != 0 */ 230 236 } 231 237 … … 234 240 * Find mapping for virtual page. 235 241 * 236 * The page table must be locked and interrupts must be disabled. 237 * 238 * @param as Address space to which page belongs. 242 * @param as Address space to which page belongs. 239 243 * @param page Virtual page. 240 244 * 241 * @return NULL if there is no such mapping; entry from PTL3 describing the mapping otherwise. 245 * @return NULL if there is no such mapping; entry from PTL3 describing 246 * the mapping otherwise. 247 * 242 248 */ 243 249 pte_t *pt_mapping_find(as_t *as, uintptr_t page) 244 250 { 245 pte_t *ptl0, *ptl1, *ptl2, *ptl3; 246 247 ptl0 = (pte_t *) PA2KA((uintptr_t) as->genarch.page_table); 248 251 ASSERT(page_table_locked(as)); 252 253 pte_t *ptl0 = (pte_t *) PA2KA((uintptr_t) as->genarch.page_table); 249 254 if (GET_PTL1_FLAGS(ptl0, PTL0_INDEX(page)) & PAGE_NOT_PRESENT) 250 255 return NULL; 251 252 ptl1 = (pte_t *) PA2KA(GET_PTL1_ADDRESS(ptl0, PTL0_INDEX(page))); 253 256 257 pte_t *ptl1 = (pte_t *) PA2KA(GET_PTL1_ADDRESS(ptl0, PTL0_INDEX(page))); 254 258 if (GET_PTL2_FLAGS(ptl1, PTL1_INDEX(page)) & PAGE_NOT_PRESENT) 255 259 return NULL; 256 257 ptl2 = (pte_t *) PA2KA(GET_PTL2_ADDRESS(ptl1, PTL1_INDEX(page))); 258 260 261 pte_t *ptl2 = (pte_t *) PA2KA(GET_PTL2_ADDRESS(ptl1, PTL1_INDEX(page))); 259 262 if (GET_PTL3_FLAGS(ptl2, PTL2_INDEX(page)) & PAGE_NOT_PRESENT) 260 263 return NULL; 261 262 pt l3 = (pte_t *) PA2KA(GET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page)));263 264 265 pte_t *ptl3 = (pte_t *) PA2KA(GET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page))); 266 264 267 return &ptl3[PTL3_INDEX(page)]; 265 268 } -
kernel/genarch/src/multiboot/multiboot.c
r24a2517 rc621f4aa 34 34 35 35 #include <genarch/multiboot/multiboot.h> 36 #include <arch/types.h>37 36 #include <typedefs.h> 38 37 #include <config.h> 39 #include <str ing.h>38 #include <str.h> 40 39 #include <macros.h> 41 40 -
kernel/genarch/src/ofw/ebus.c
r24a2517 rc621f4aa 40 40 #include <genarch/ofw/pci.h> 41 41 #include <arch/memstr.h> 42 #include <str ing.h>42 #include <str.h> 43 43 #include <panic.h> 44 44 #include <debug.h> -
kernel/genarch/src/ofw/fhc.c
r24a2517 rc621f4aa 40 40 #include <arch/drivers/fhc.h> 41 41 #include <arch/memstr.h> 42 #include <str ing.h>42 #include <str.h> 43 43 #include <panic.h> 44 44 #include <macros.h> -
kernel/genarch/src/ofw/ofw_tree.c
r24a2517 rc621f4aa 39 39 #include <arch/memstr.h> 40 40 #include <mm/slab.h> 41 #include <str ing.h>41 #include <str.h> 42 42 #include <panic.h> 43 43 #include <print.h> … … 65 65 const char *name) 66 66 { 67 unsigned int i;67 size_t i; 68 68 69 69 for (i = 0; i < node->properties; i++) { … … 170 170 */ 171 171 ofw_tree_node_t *ofw_tree_find_node_by_handle(ofw_tree_node_t *root, 172 uint32_thandle)172 phandle handle) 173 173 { 174 174 ofw_tree_node_t *cur; -
kernel/genarch/src/ofw/pci.c
r24a2517 rc621f4aa 41 41 #include <arch/trap/interrupt.h> 42 42 #include <arch/memstr.h> 43 #include <str ing.h>43 #include <str.h> 44 44 #include <panic.h> 45 45 #include <macros.h> 46 46 47 47 #define PCI_SPACE_MASK 0x03000000 48 #define PCI_ABS_MASK 0x80000000 48 #define PCI_ABS_MASK 0x80000000 49 49 #define PCI_REG_MASK 0x000000ff 50 50 -
kernel/genarch/src/srln/srln.c
r24a2517 rc621f4aa 40 40 #include <proc/thread.h> 41 41 #include <arch.h> 42 #include <str ing.h>42 #include <str.h> 43 43 44 44 static indev_operations_t srln_raw_ops = {
Note:
See TracChangeset
for help on using the changeset viewer.
