Changeset b1c57a8 in mainline for kernel/arch


Ignore:
Timestamp:
2014-10-09T15:03:55Z (11 years ago)
Author:
Jakub Jermar <jakub@…>
Branches:
lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
Children:
e367939c
Parents:
21799398 (diff), 207e8880 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent.
Message:

Merge from lp:~adam-hraska+lp/helenos/rcu/.

Only merge from the feature branch and resolve all conflicts.

Location:
kernel/arch
Files:
7 added
25 edited

Legend:

Unmodified
Added
Removed
  • kernel/arch/amd64/Makefile.inc

    r21799398 rb1c57a8  
    8484                arch/$(KARCH)/src/smp/ipi.c \
    8585                arch/$(KARCH)/src/smp/mps.c \
     86                arch/$(KARCH)/src/smp/smp_call.c \
    8687                arch/$(KARCH)/src/smp/smp.c
    8788endif
  • kernel/arch/amd64/include/arch/atomic.h

    r21799398 rb1c57a8  
    11/*
    22 * Copyright (c) 2001-2004 Jakub Jermar
     3 * Copyright (c) 2012      Adam Hraska
    34 * All rights reserved.
    45 *
     
    140141}
    141142
     143
     144#define _atomic_cas_impl(pptr, exp_val, new_val, old_val, prefix) \
     145({ \
     146        switch (sizeof(typeof(*(pptr)))) { \
     147        case 1: \
     148                asm volatile ( \
     149                        prefix " cmpxchgb %[newval], %[ptr]\n" \
     150                        : /* Output operands. */ \
     151                        /* Old/current value is returned in eax. */ \
     152                        [oldval] "=a" (old_val), \
     153                        /* (*ptr) will be read and written to, hence "+" */ \
     154                        [ptr] "+m" (*pptr) \
     155                        : /* Input operands. */ \
     156                        /* Expected value must be in eax. */ \
     157                        [expval] "a" (exp_val), \
     158                        /* The new value may be in any register. */ \
     159                        [newval] "r" (new_val) \
     160                        : "memory" \
     161                ); \
     162                break; \
     163        case 2: \
     164                asm volatile ( \
     165                        prefix " cmpxchgw %[newval], %[ptr]\n" \
     166                        : /* Output operands. */ \
     167                        /* Old/current value is returned in eax. */ \
     168                        [oldval] "=a" (old_val), \
     169                        /* (*ptr) will be read and written to, hence "+" */ \
     170                        [ptr] "+m" (*pptr) \
     171                        : /* Input operands. */ \
     172                        /* Expected value must be in eax. */ \
     173                        [expval] "a" (exp_val), \
     174                        /* The new value may be in any register. */ \
     175                        [newval] "r" (new_val) \
     176                        : "memory" \
     177                ); \
     178                break; \
     179        case 4: \
     180                asm volatile ( \
     181                        prefix " cmpxchgl %[newval], %[ptr]\n" \
     182                        : /* Output operands. */ \
     183                        /* Old/current value is returned in eax. */ \
     184                        [oldval] "=a" (old_val), \
     185                        /* (*ptr) will be read and written to, hence "+" */ \
     186                        [ptr] "+m" (*pptr) \
     187                        : /* Input operands. */ \
     188                        /* Expected value must be in eax. */ \
     189                        [expval] "a" (exp_val), \
     190                        /* The new value may be in any register. */ \
     191                        [newval] "r" (new_val) \
     192                        : "memory" \
     193                ); \
     194                break; \
     195        case 8: \
     196                asm volatile ( \
     197                        prefix " cmpxchgq %[newval], %[ptr]\n" \
     198                        : /* Output operands. */ \
     199                        /* Old/current value is returned in eax. */ \
     200                        [oldval] "=a" (old_val), \
     201                        /* (*ptr) will be read and written to, hence "+" */ \
     202                        [ptr] "+m" (*pptr) \
     203                        : /* Input operands. */ \
     204                        /* Expected value must be in eax. */ \
     205                        [expval] "a" (exp_val), \
     206                        /* The new value may be in any register. */ \
     207                        [newval] "r" (new_val) \
     208                        : "memory" \
     209                ); \
     210                break; \
     211        } \
     212})
     213
     214
     215#ifndef local_atomic_cas
     216
     217#define local_atomic_cas(pptr, exp_val, new_val) \
     218({ \
     219        /* Use proper types and avoid name clashes */ \
     220        typeof(*(pptr)) _old_val_cas; \
     221        typeof(*(pptr)) _exp_val_cas = exp_val; \
     222        typeof(*(pptr)) _new_val_cas = new_val; \
     223        _atomic_cas_impl(pptr, _exp_val_cas, _new_val_cas, _old_val_cas, ""); \
     224        \
     225        _old_val_cas; \
     226})
     227
     228#else
     229/* Check if arch/atomic.h does not accidentally include /atomic.h .*/
     230#error Architecture specific cpu local atomics already defined! Check your includes.
    142231#endif
    143232
     233
     234#ifndef local_atomic_exchange
     235/*
     236 * Issuing a xchg instruction always implies lock prefix semantics.
     237 * Therefore, it is cheaper to use a cmpxchg without a lock prefix
     238 * in a loop.
     239 */
     240#define local_atomic_exchange(pptr, new_val) \
     241({ \
     242        /* Use proper types and avoid name clashes */ \
     243        typeof(*(pptr)) _exp_val_x; \
     244        typeof(*(pptr)) _old_val_x; \
     245        typeof(*(pptr)) _new_val_x = new_val; \
     246        \
     247        do { \
     248                _exp_val_x = *pptr; \
     249                _old_val_x = local_atomic_cas(pptr, _exp_val_x, _new_val_x); \
     250        } while (_old_val_x != _exp_val_x); \
     251        \
     252        _old_val_x; \
     253})
     254
     255#else
     256/* Check if arch/atomic.h does not accidentally include /atomic.h .*/
     257#error Architecture specific cpu local atomics already defined! Check your includes.
     258#endif
     259
     260
     261#endif
     262
    144263/** @}
    145264 */
  • kernel/arch/amd64/include/arch/cpu.h

    r21799398 rb1c57a8  
    7373        tss_t *tss;
    7474       
     75        unsigned int id; /** CPU's local, ie physical, APIC ID. */
     76       
    7577        size_t iomapver_copy;  /** Copy of TASK's I/O Permission bitmap generation count. */
    7678} cpu_arch_t;
  • kernel/arch/amd64/include/arch/interrupt.h

    r21799398 rb1c57a8  
    6969#define VECTOR_TLB_SHOOTDOWN_IPI  (IVT_FREEBASE + 1)
    7070#define VECTOR_DEBUG_IPI          (IVT_FREEBASE + 2)
     71#define VECTOR_SMP_CALL_IPI       (IVT_FREEBASE + 3)
    7172
    7273extern void (* disable_irqs_function)(uint16_t);
  • kernel/arch/amd64/src/amd64.c

    r21799398 rb1c57a8  
    168168}
    169169
    170 void arch_post_cpu_init()
     170void arch_post_cpu_init(void)
    171171{
    172172#ifdef CONFIG_SMP
  • kernel/arch/amd64/src/cpu/cpu.c

    r21799398 rb1c57a8  
    158158void cpu_print_report(cpu_t* m)
    159159{
    160         printf("cpu%d: (%s family=%d model=%d stepping=%d) %dMHz\n",
     160        printf("cpu%d: (%s family=%d model=%d stepping=%d apicid=%u) %dMHz\n",
    161161            m->id, vendor_str[m->arch.vendor], m->arch.family, m->arch.model,
    162             m->arch.stepping, m->frequency_mhz);
     162            m->arch.stepping, m->arch.id, m->frequency_mhz);
    163163}
    164164
  • kernel/arch/amd64/src/interrupt.c

    r21799398 rb1c57a8  
    5555#include <symtab.h>
    5656#include <stacktrace.h>
     57#include <smp/smp_call.h>
    5758
    5859/*
     
    161162        trap_virtual_eoi();
    162163        tlb_shootdown_ipi_recv();
     164}
     165
     166static void arch_smp_call_ipi_recv(unsigned int n, istate_t *istate)
     167{
     168        trap_virtual_eoi();
     169        smp_call_ipi_recv();
    163170}
    164171#endif
     
    224231        exc_register(VECTOR_TLB_SHOOTDOWN_IPI, "tlb_shootdown", true,
    225232            (iroutine_t) tlb_shootdown_ipi);
     233        exc_register(VECTOR_SMP_CALL_IPI, "smp_call", true,
     234                (iroutine_t) arch_smp_call_ipi_recv);
    226235#endif
    227236}
  • kernel/arch/arm32/Makefile.inc

    r21799398 rb1c57a8  
    6565        arch/$(KARCH)/src/mm/tlb.c \
    6666        arch/$(KARCH)/src/mm/page_fault.c \
     67        arch/$(KARCH)/src/atomic.c \
    6768        arch/$(KARCH)/src/ras.c
    6869
  • kernel/arch/ia32/Makefile.inc

    r21799398 rb1c57a8  
    8585        arch/$(KARCH)/src/smp/mps.c \
    8686        arch/$(KARCH)/src/smp/smp.c \
     87        arch/$(KARCH)/src/smp/smp_call.c \
    8788        arch/$(KARCH)/src/atomic.S \
    8889        arch/$(KARCH)/src/smp/ipi.c \
  • kernel/arch/ia32/include/arch/atomic.h

    r21799398 rb1c57a8  
    11/*
    22 * Copyright (c) 2001-2004 Jakub Jermar
     3 * Copyright (c) 2012      Adam Hraska
    34 * All rights reserved.
    45 *
     
    113114}
    114115
     116
    115117/** ia32 specific fast spinlock */
    116118NO_TRACE static inline void atomic_lock_arch(atomic_t *val)
     
    142144}
    143145
     146
     147#define _atomic_cas_impl(pptr, exp_val, new_val, old_val, prefix) \
     148({ \
     149        switch (sizeof(typeof(*(pptr)))) { \
     150        case 1: \
     151                asm volatile ( \
     152                        prefix " cmpxchgb %[newval], %[ptr]\n" \
     153                        : /* Output operands. */ \
     154                        /* Old/current value is returned in eax. */ \
     155                        [oldval] "=a" (old_val), \
     156                        /* (*ptr) will be read and written to, hence "+" */ \
     157                        [ptr] "+m" (*pptr) \
     158                        : /* Input operands. */ \
     159                        /* Expected value must be in eax. */ \
     160                        [expval] "a" (exp_val), \
     161                        /* The new value may be in any register. */ \
     162                        [newval] "r" (new_val) \
     163                        : "memory" \
     164                ); \
     165                break; \
     166        case 2: \
     167                asm volatile ( \
     168                        prefix " cmpxchgw %[newval], %[ptr]\n" \
     169                        : /* Output operands. */ \
     170                        /* Old/current value is returned in eax. */ \
     171                        [oldval] "=a" (old_val), \
     172                        /* (*ptr) will be read and written to, hence "+" */ \
     173                        [ptr] "+m" (*pptr) \
     174                        : /* Input operands. */ \
     175                        /* Expected value must be in eax. */ \
     176                        [expval] "a" (exp_val), \
     177                        /* The new value may be in any register. */ \
     178                        [newval] "r" (new_val) \
     179                        : "memory" \
     180                ); \
     181                break; \
     182        case 4: \
     183                asm volatile ( \
     184                        prefix " cmpxchgl %[newval], %[ptr]\n" \
     185                        : /* Output operands. */ \
     186                        /* Old/current value is returned in eax. */ \
     187                        [oldval] "=a" (old_val), \
     188                        /* (*ptr) will be read and written to, hence "+" */ \
     189                        [ptr] "+m" (*pptr) \
     190                        : /* Input operands. */ \
     191                        /* Expected value must be in eax. */ \
     192                        [expval] "a" (exp_val), \
     193                        /* The new value may be in any register. */ \
     194                        [newval] "r" (new_val) \
     195                        : "memory" \
     196                ); \
     197                break; \
     198        } \
     199})
     200
     201
     202#ifndef local_atomic_cas
     203
     204#define local_atomic_cas(pptr, exp_val, new_val) \
     205({ \
     206        /* Use proper types and avoid name clashes */ \
     207        typeof(*(pptr)) _old_val_cas; \
     208        typeof(*(pptr)) _exp_val_cas = exp_val; \
     209        typeof(*(pptr)) _new_val_cas = new_val; \
     210        _atomic_cas_impl(pptr, _exp_val_cas, _new_val_cas, _old_val_cas, ""); \
     211        \
     212        _old_val_cas; \
     213})
     214
     215#else
     216/* Check if arch/atomic.h does not accidentally include /atomic.h .*/
     217#error Architecture specific cpu local atomics already defined! Check your includes.
     218#endif
     219
     220
     221#ifndef local_atomic_exchange
     222/*
     223 * Issuing a xchg instruction always implies lock prefix semantics.
     224 * Therefore, it is cheaper to use a cmpxchg without a lock prefix
     225 * in a loop.
     226 */
     227#define local_atomic_exchange(pptr, new_val) \
     228({ \
     229        /* Use proper types and avoid name clashes */ \
     230        typeof(*(pptr)) _exp_val_x; \
     231        typeof(*(pptr)) _old_val_x; \
     232        typeof(*(pptr)) _new_val_x = new_val; \
     233        \
     234        do { \
     235                _exp_val_x = *pptr; \
     236                _old_val_x = local_atomic_cas(pptr, _exp_val_x, _new_val_x); \
     237        } while (_old_val_x != _exp_val_x); \
     238        \
     239        _old_val_x; \
     240})
     241
     242#else
     243/* Check if arch/atomic.h does not accidentally include /atomic.h .*/
     244#error Architecture specific cpu local atomics already defined! Check your includes.
     245#endif
     246
     247
    144248#endif
    145249
  • kernel/arch/ia32/include/arch/cpu.h

    r21799398 rb1c57a8  
    6161        unsigned int stepping;
    6262        cpuid_feature_info_t fi;
    63        
     63
     64        unsigned int id; /** CPU's local, ie physical, APIC ID. */
     65
    6466        tss_t *tss;
    6567       
  • kernel/arch/ia32/include/arch/interrupt.h

    r21799398 rb1c57a8  
    6969#define VECTOR_TLB_SHOOTDOWN_IPI  (IVT_FREEBASE + 1)
    7070#define VECTOR_DEBUG_IPI          (IVT_FREEBASE + 2)
     71#define VECTOR_SMP_CALL_IPI       (IVT_FREEBASE + 3)
    7172
    7273extern void (* disable_irqs_function)(uint16_t);
  • kernel/arch/ia32/include/arch/smp/apic.h

    r21799398 rb1c57a8  
    353353extern void l_apic_init(void);
    354354extern void l_apic_eoi(void);
     355extern int l_apic_send_custom_ipi(uint8_t, uint8_t);
    355356extern int l_apic_broadcast_custom_ipi(uint8_t);
    356357extern int l_apic_send_init_ipi(uint8_t);
  • kernel/arch/ia32/src/cpu/cpu.c

    r21799398 rb1c57a8  
    160160void cpu_print_report(cpu_t* cpu)
    161161{
    162         printf("cpu%u: (%s family=%u model=%u stepping=%u) %" PRIu16 " MHz\n",
    163                 cpu->id, vendor_str[cpu->arch.vendor], cpu->arch.family,
    164                 cpu->arch.model, cpu->arch.stepping, cpu->frequency_mhz);
     162        printf("cpu%u: (%s family=%u model=%u stepping=%u apicid=%u) %" PRIu16
     163                " MHz\n", cpu->id, vendor_str[cpu->arch.vendor], cpu->arch.family,
     164                cpu->arch.model, cpu->arch.stepping, cpu->arch.id, cpu->frequency_mhz);
    165165}
    166166
  • kernel/arch/ia32/src/ia32.c

    r21799398 rb1c57a8  
    122122}
    123123
    124 void arch_post_cpu_init()
     124void arch_post_cpu_init(void)
    125125{
    126126#ifdef CONFIG_SMP
  • kernel/arch/ia32/src/interrupt.c

    r21799398 rb1c57a8  
    5454#include <symtab.h>
    5555#include <stacktrace.h>
     56#include <smp/smp_call.h>
     57#include <proc/task.h>
    5658
    5759/*
     
    170172        tlb_shootdown_ipi_recv();
    171173}
     174
     175static void arch_smp_call_ipi_recv(unsigned int n, istate_t *istate)
     176{
     177        trap_virtual_eoi();
     178        smp_call_ipi_recv();
     179}
    172180#endif
    173181
     
    230238        exc_register(VECTOR_TLB_SHOOTDOWN_IPI, "tlb_shootdown", true,
    231239            (iroutine_t) tlb_shootdown_ipi);
     240        exc_register(VECTOR_SMP_CALL_IPI, "smp_call", true,
     241            (iroutine_t) arch_smp_call_ipi_recv);
    232242#endif
    233243}
  • kernel/arch/ia32/src/smp/apic.c

    r21799398 rb1c57a8  
    264264}
    265265
    266 #define DELIVS_PENDING_SILENT_RETRIES   4       
    267 
     266/* Waits for the destination cpu to accept the previous ipi. */
    268267static void l_apic_wait_for_delivery(void)
    269268{
    270269        icr_t icr;
    271         unsigned retries = 0;
    272 
     270       
    273271        do {
    274                 if (retries++ > DELIVS_PENDING_SILENT_RETRIES) {
    275                         retries = 0;
    276 #ifdef CONFIG_DEBUG
    277                         log(LF_ARCH, LVL_DEBUG, "IPI is pending.");
    278 #endif
    279                         delay(20);
    280                 }
    281272                icr.lo = l_apic[ICRlo];
    282         } while (icr.delivs == DELIVS_PENDING);
    283        
     273        } while (icr.delivs != DELIVS_IDLE);
     274}
     275
     276/** Send one CPU an IPI vector.
     277 *
     278 * @param apicid Physical APIC ID of the destination CPU.
     279 * @param vector Interrupt vector to be sent.
     280 *
     281 * @return 0 on failure, 1 on success.
     282 */
     283int l_apic_send_custom_ipi(uint8_t apicid, uint8_t vector)
     284{
     285        icr_t icr;
     286
     287        /* Wait for a destination cpu to accept our previous ipi. */
     288        l_apic_wait_for_delivery();
     289       
     290        icr.lo = l_apic[ICRlo];
     291        icr.hi = l_apic[ICRhi];
     292       
     293        icr.delmod = DELMOD_FIXED;
     294        icr.destmod = DESTMOD_PHYS;
     295        icr.level = LEVEL_ASSERT;
     296        icr.shorthand = SHORTHAND_NONE;
     297        icr.trigger_mode = TRIGMOD_LEVEL;
     298        icr.vector = vector;
     299        icr.dest = apicid;
     300
     301        /* Send the IPI by writing to l_apic[ICRlo]. */
     302        l_apic[ICRhi] = icr.hi;
     303        l_apic[ICRlo] = icr.lo;
     304       
     305        return apic_poll_errors();
    284306}
    285307
     
    294316{
    295317        icr_t icr;
     318
     319        /* Wait for a destination cpu to accept our previous ipi. */
     320        l_apic_wait_for_delivery();
    296321       
    297322        icr.lo = l_apic[ICRlo];
     
    304329       
    305330        l_apic[ICRlo] = icr.lo;
    306 
    307         l_apic_wait_for_delivery();
    308331       
    309332        return apic_poll_errors();
  • kernel/arch/ia32/src/smp/smp.c

    r21799398 rb1c57a8  
    5555#include <memstr.h>
    5656#include <arch/drivers/i8259.h>
     57#include <cpu.h>
    5758
    5859#ifdef CONFIG_SMP
     
    7778                io_apic = (uint32_t *) km_map((uintptr_t) io_apic, PAGE_SIZE,
    7879                    PAGE_WRITE | PAGE_NOT_CACHEABLE);
     80        }
     81}
     82
     83static void cpu_arch_id_init(void)
     84{
     85        ASSERT(ops != NULL);
     86        ASSERT(cpus != NULL);
     87       
     88        for (unsigned int i = 0; i < config.cpu_count; ++i) {
     89                cpus[i].arch.id = ops->cpu_apic_id(i);
    7990        }
    8091}
     
    92103       
    93104        ASSERT(ops != NULL);
     105
     106        /*
     107         * SMP initialized, cpus array allocated. Assign each CPU its
     108         * physical APIC ID.
     109         */
     110        cpu_arch_id_init();
    94111       
    95112        /*
  • kernel/arch/ia64/Makefile.inc

    r21799398 rb1c57a8  
    6161        arch/$(KARCH)/src/ddi/ddi.c \
    6262        arch/$(KARCH)/src/smp/smp.c \
     63        arch/$(KARCH)/src/smp/smp_call.c \
    6364        arch/$(KARCH)/src/drivers/it.c
    6465
  • kernel/arch/mips32/Makefile.inc

    r21799398 rb1c57a8  
    7171        arch/$(KARCH)/src/fpu_context.c \
    7272        arch/$(KARCH)/src/smp/smp.c \
     73        arch/$(KARCH)/src/smp/smp_call.c \
    7374        arch/$(KARCH)/src/machine_func.c
    7475
  • kernel/arch/sparc64/Makefile.inc

    r21799398 rb1c57a8  
    100100        ARCH_SOURCES += \
    101101                arch/$(KARCH)/src/smp/$(USARCH)/smp.c \
     102                arch/$(KARCH)/src/smp/$(USARCH)/smp_call.c \
    102103                arch/$(KARCH)/src/smp/$(USARCH)/ipi.c
    103104endif
  • kernel/arch/sparc64/include/arch/barrier.h

    r21799398 rb1c57a8  
    3737
    3838#include <trace.h>
     39
     40#ifdef KERNEL
     41#include <arch/common.h>
     42#else
     43#include <libarch/common.h>
     44#endif
    3945
    4046/*
  • kernel/arch/sparc64/include/arch/interrupt.h

    r21799398 rb1c57a8  
    4747
    4848enum {
    49         IPI_TLB_SHOOTDOWN = VECTOR_TLB_SHOOTDOWN_IPI
     49        IPI_TLB_SHOOTDOWN = VECTOR_TLB_SHOOTDOWN_IPI,
     50        IPI_SMP_CALL
    5051};
    5152
  • kernel/arch/sparc64/src/debug/stacktrace.c

    r21799398 rb1c57a8  
    3636#include <syscall/copy.h>
    3737#include <typedefs.h>
     38#include <proc/thread.h>
    3839
    3940#include <arch.h>
  • kernel/arch/sparc64/src/smp/sun4u/ipi.c

    r21799398 rb1c57a8  
    3434
    3535#include <smp/ipi.h>
     36#include <arch/smp/sun4u/ipi.h>
    3637#include <cpu.h>
    3738#include <arch.h>
     
    4041#include <config.h>
    4142#include <mm/tlb.h>
     43#include <smp/smp_call.h>
    4244#include <arch/interrupt.h>
    4345#include <arch/trap/interrupt.h>
     
    171173}
    172174
     175
     176/*
     177 * Deliver an IPI to the specified processors (except the current one).
     178 *
     179 * Interrupts must be disabled.
     180 *
     181 * @param cpu_id Destination cpu id (index into cpus array). Must not
     182 *               be the current cpu.
     183 * @param ipi    IPI number.
     184 */
     185void ipi_unicast_arch(unsigned int cpu_id, int ipi)
     186{
     187        ASSERT(&cpus[cpu_id] != CPU);
     188       
     189        if (ipi == IPI_SMP_CALL) {
     190                cross_call(cpus[cpu_id].arch.mid, smp_call_ipi_recv);
     191        } else {
     192                panic("Unknown IPI (%d).\n", ipi);
     193                return;
     194        }
     195}
     196
    173197/** @}
    174198 */
Note: See TracChangeset for help on using the changeset viewer.