source: mainline/kernel/arch/ia32/src/smp/smp.c

Last change on this file was b169619, checked in by Jiří Zárevúcky <zarevucky.jiri@…>, 20 months ago

Deduplicate mem functions

There are a number of functions which are copied between
kernel, libc, and potentially boot too. mem*() functions
are first such offenders. All this duplicate code will
be moved to directory 'common'.

  • Property mode set to 100644
File size: 5.4 KB
RevLine 
[ed0dd65]1/*
[4bb31f7]2 * Copyright (c) 2008 Jakub Jermar
[ed0dd65]3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
[c5429fe]29/** @addtogroup kernel_ia32
[b45c443]30 * @{
31 */
32/** @file
33 */
34
[ed0dd65]35#include <smp/smp.h>
[a26ddd1]36#include <arch/smp/smp.h>
37#include <arch/smp/mps.h>
38#include <arch/smp/ap.h>
[66def8d]39#include <arch/boot/boot.h>
[63e27ef]40#include <assert.h>
[897fd8f1]41#include <errno.h>
[e16e036a]42#include <genarch/acpi/acpi.h>
43#include <genarch/acpi/madt.h>
[ed0dd65]44#include <config.h>
[a26ddd1]45#include <synch/waitq.h>
46#include <arch/pm.h>
[b2e121a]47#include <halt.h>
[a26ddd1]48#include <panic.h>
49#include <arch/asm.h>
50#include <mm/page.h>
[d4673296]51#include <mm/frame.h>
52#include <mm/km.h>
[aafed15]53#include <stdlib.h>
[fc1e4f6]54#include <mm/as.h>
[b2fa1204]55#include <log.h>
[b169619]56#include <memw.h>
[87a5796]57#include <genarch/drivers/i8259/i8259.h>
[49e6c6b4]58#include <cpu.h>
[ed0dd65]59
[5f85c91]60#ifdef CONFIG_SMP
[ed0dd65]61
[a26ddd1]62static struct smp_config_operations *ops = NULL;
63
[ed0dd65]64void smp_init(void)
65{
66 if (acpi_madt) {
67 acpi_madt_parse();
[232e3ec7]68 ops = &madt_config_operations;
[ed0dd65]69 }
[a35b458]70
[a26ddd1]71 if (config.cpu_count == 1) {
[ed0dd65]72 mps_init();
[a26ddd1]73 ops = &mps_config_operations;
74 }
[a35b458]75
[e3ce39b]76 if (config.cpu_count > 1) {
[adec5b45]77 l_apic = (uint32_t *) km_map((uintptr_t) l_apic, PAGE_SIZE,
[a1b9f63]78 PAGE_SIZE, PAGE_WRITE | PAGE_NOT_CACHEABLE);
[adec5b45]79 io_apic = (uint32_t *) km_map((uintptr_t) io_apic, PAGE_SIZE,
[a1b9f63]80 PAGE_SIZE, PAGE_WRITE | PAGE_NOT_CACHEABLE);
[7cb567cd]81 }
[ed0dd65]82}
83
[49e6c6b4]84static void cpu_arch_id_init(void)
85{
[63e27ef]86 assert(ops != NULL);
87 assert(cpus != NULL);
[a35b458]88
[49e6c6b4]89 for (unsigned int i = 0; i < config.cpu_count; ++i) {
90 cpus[i].arch.id = ops->cpu_apic_id(i);
91 }
92}
93
[a26ddd1]94/*
95 * Kernel thread for bringing up application processors. It becomes clear
96 * that we need an arrangement like this (AP's being initialized by a kernel
97 * thread), for a thread has its dedicated stack. (The stack used during the
98 * BSP initialization (prior the very first call to scheduler()) will be used
99 * as an initialization stack for each AP.)
100 */
[7f043c0]101void kmp(void *arg __attribute__((unused)))
[a26ddd1]102{
[c27c988]103 unsigned int i;
[a35b458]104
[63e27ef]105 assert(ops != NULL);
[49e6c6b4]106
107 /*
[1b20da0]108 * SMP initialized, cpus array allocated. Assign each CPU its
[49e6c6b4]109 * physical APIC ID.
110 */
111 cpu_arch_id_init();
[a35b458]112
[a26ddd1]113 /*
114 * We need to access data in frame 0.
115 * We boldly make use of kernel address space mapping.
116 */
[a35b458]117
[a26ddd1]118 /*
119 * Set the warm-reset vector to the real-mode address of 4K-aligned ap_boot()
120 */
[4bb31f7]121 *((uint16_t *) (PA2KA(0x467 + 0))) =
[fe32163]122 (uint16_t) (((uintptr_t) ap_boot) >> 4); /* segment */
123 *((uint16_t *) (PA2KA(0x467 + 2))) = 0; /* offset */
[a35b458]124
[a26ddd1]125 /*
126 * Save 0xa to address 0xf of the CMOS RAM.
127 * BIOS will not do the POST after the INIT signal.
128 */
[fe32163]129 pio_write_8((ioport8_t *) 0x70, 0xf);
130 pio_write_8((ioport8_t *) 0x71, 0xa);
[a35b458]131
[2a103b5]132 i8259_disable_irqs(0xffff);
[a26ddd1]133 apic_init();
[a35b458]134
[fe32163]135 for (i = 0; i < config.cpu_count; i++) {
[a26ddd1]136 /*
137 * Skip processors marked unusable.
138 */
139 if (!ops->cpu_enabled(i))
140 continue;
[a35b458]141
[a26ddd1]142 /*
143 * The bootstrap processor is already up.
144 */
145 if (ops->cpu_bootstrap(i))
146 continue;
[a35b458]147
[99718a2e]148 if (ops->cpu_apic_id(i) == bsp_l_apic) {
[b2fa1204]149 log(LF_ARCH, LVL_ERROR, "kmp: bad processor entry #%u, "
150 "will not send IPI to myself", i);
[a26ddd1]151 continue;
152 }
[a35b458]153
[a26ddd1]154 /*
155 * Prepare new GDT for CPU in question.
156 */
[a35b458]157
[7c3fb9b]158 /*
159 * XXX Flag FRAME_LOW_4_GiB was removed temporarily,
[5f0f29ce]160 * it needs to be replaced by a generic fuctionality of
161 * the memory subsystem
162 */
[fe32163]163 descriptor_t *gdt_new =
[11b285d]164 (descriptor_t *) malloc(GDT_ITEMS * sizeof(descriptor_t));
[4bb31f7]165 if (!gdt_new)
[f651e80]166 panic("Cannot allocate memory for GDT.");
[a35b458]167
[99d6fd0]168 memcpy(gdt_new, gdt, GDT_ITEMS * sizeof(descriptor_t));
169 memsetb(&gdt_new[TSS_DES], sizeof(descriptor_t), 0);
170 protected_ap_gdtr.limit = GDT_ITEMS * sizeof(descriptor_t);
[7f1c620]171 protected_ap_gdtr.base = KA2PA((uintptr_t) gdt_new);
172 gdtr.base = (uintptr_t) gdt_new;
[a35b458]173
[a26ddd1]174 if (l_apic_send_init_ipi(ops->cpu_apic_id(i))) {
175 /*
[c0b45fa]176 * There may be just one AP being initialized at
[a26ddd1]177 * the time. After it comes completely up, it is
178 * supposed to wake us up.
[c0b45fa]179 */
[7c5320c]180 if (semaphore_down_timeout(&ap_completion_semaphore, 1000000) != EOK) {
[b2fa1204]181 log(LF_ARCH, LVL_NOTE, "%s: waiting for cpu%u "
182 "(APIC ID = %d) timed out", __FUNCTION__,
183 i, ops->cpu_apic_id(i));
[7f043c0]184 }
[c0b45fa]185 } else
[b2fa1204]186 log(LF_ARCH, LVL_ERROR, "INIT IPI for l_apic%d failed",
[4bb31f7]187 ops->cpu_apic_id(i));
[a26ddd1]188 }
189}
[ed0dd65]190
[623b49f1]191int smp_irq_to_pin(unsigned int irq)
[a83a802]192{
[63e27ef]193 assert(ops != NULL);
[a83a802]194 return ops->irq_to_pin(irq);
195}
196
[5f85c91]197#endif /* CONFIG_SMP */
[b45c443]198
[06e1e95]199/** @}
[b45c443]200 */
Note: See TracBrowser for help on using the repository browser.