Changeset b1c57a8 in mainline for kernel/arch/amd64


Ignore:
Timestamp:
2014-10-09T15:03:55Z (11 years ago)
Author:
Jakub Jermar <jakub@…>
Branches:
lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
Children:
e367939c
Parents:
21799398 (diff), 207e8880 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent.
Message:

Merge from lp:~adam-hraska+lp/helenos/rcu/.

Only merge from the feature branch and resolve all conflicts.

Location:
kernel/arch/amd64
Files:
1 added
7 edited

Legend:

Unmodified
Added
Removed
  • kernel/arch/amd64/Makefile.inc

    r21799398 rb1c57a8  
    8484                arch/$(KARCH)/src/smp/ipi.c \
    8585                arch/$(KARCH)/src/smp/mps.c \
     86                arch/$(KARCH)/src/smp/smp_call.c \
    8687                arch/$(KARCH)/src/smp/smp.c
    8788endif
  • kernel/arch/amd64/include/arch/atomic.h

    r21799398 rb1c57a8  
    11/*
    22 * Copyright (c) 2001-2004 Jakub Jermar
     3 * Copyright (c) 2012      Adam Hraska
    34 * All rights reserved.
    45 *
     
    140141}
    141142
     143
     144#define _atomic_cas_impl(pptr, exp_val, new_val, old_val, prefix) \
     145({ \
     146        switch (sizeof(typeof(*(pptr)))) { \
     147        case 1: \
     148                asm volatile ( \
     149                        prefix " cmpxchgb %[newval], %[ptr]\n" \
     150                        : /* Output operands. */ \
     151                        /* Old/current value is returned in eax. */ \
     152                        [oldval] "=a" (old_val), \
     153                        /* (*ptr) will be read and written to, hence "+" */ \
     154                        [ptr] "+m" (*pptr) \
     155                        : /* Input operands. */ \
     156                        /* Expected value must be in eax. */ \
     157                        [expval] "a" (exp_val), \
     158                        /* The new value may be in any register. */ \
     159                        [newval] "r" (new_val) \
     160                        : "memory" \
     161                ); \
     162                break; \
     163        case 2: \
     164                asm volatile ( \
     165                        prefix " cmpxchgw %[newval], %[ptr]\n" \
     166                        : /* Output operands. */ \
     167                        /* Old/current value is returned in eax. */ \
     168                        [oldval] "=a" (old_val), \
     169                        /* (*ptr) will be read and written to, hence "+" */ \
     170                        [ptr] "+m" (*pptr) \
     171                        : /* Input operands. */ \
     172                        /* Expected value must be in eax. */ \
     173                        [expval] "a" (exp_val), \
     174                        /* The new value may be in any register. */ \
     175                        [newval] "r" (new_val) \
     176                        : "memory" \
     177                ); \
     178                break; \
     179        case 4: \
     180                asm volatile ( \
     181                        prefix " cmpxchgl %[newval], %[ptr]\n" \
     182                        : /* Output operands. */ \
     183                        /* Old/current value is returned in eax. */ \
     184                        [oldval] "=a" (old_val), \
     185                        /* (*ptr) will be read and written to, hence "+" */ \
     186                        [ptr] "+m" (*pptr) \
     187                        : /* Input operands. */ \
     188                        /* Expected value must be in eax. */ \
     189                        [expval] "a" (exp_val), \
     190                        /* The new value may be in any register. */ \
     191                        [newval] "r" (new_val) \
     192                        : "memory" \
     193                ); \
     194                break; \
     195        case 8: \
     196                asm volatile ( \
     197                        prefix " cmpxchgq %[newval], %[ptr]\n" \
     198                        : /* Output operands. */ \
     199                        /* Old/current value is returned in eax. */ \
     200                        [oldval] "=a" (old_val), \
     201                        /* (*ptr) will be read and written to, hence "+" */ \
     202                        [ptr] "+m" (*pptr) \
     203                        : /* Input operands. */ \
     204                        /* Expected value must be in eax. */ \
     205                        [expval] "a" (exp_val), \
     206                        /* The new value may be in any register. */ \
     207                        [newval] "r" (new_val) \
     208                        : "memory" \
     209                ); \
     210                break; \
     211        } \
     212})
     213
     214
     215#ifndef local_atomic_cas
     216
     217#define local_atomic_cas(pptr, exp_val, new_val) \
     218({ \
     219        /* Use proper types and avoid name clashes */ \
     220        typeof(*(pptr)) _old_val_cas; \
     221        typeof(*(pptr)) _exp_val_cas = exp_val; \
     222        typeof(*(pptr)) _new_val_cas = new_val; \
     223        _atomic_cas_impl(pptr, _exp_val_cas, _new_val_cas, _old_val_cas, ""); \
     224        \
     225        _old_val_cas; \
     226})
     227
     228#else
     229/* Check if arch/atomic.h does not accidentally include /atomic.h .*/
     230#error Architecture specific cpu local atomics already defined! Check your includes.
    142231#endif
    143232
     233
     234#ifndef local_atomic_exchange
     235/*
     236 * Issuing a xchg instruction always implies lock prefix semantics.
     237 * Therefore, it is cheaper to use a cmpxchg without a lock prefix
     238 * in a loop.
     239 */
     240#define local_atomic_exchange(pptr, new_val) \
     241({ \
     242        /* Use proper types and avoid name clashes */ \
     243        typeof(*(pptr)) _exp_val_x; \
     244        typeof(*(pptr)) _old_val_x; \
     245        typeof(*(pptr)) _new_val_x = new_val; \
     246        \
     247        do { \
     248                _exp_val_x = *pptr; \
     249                _old_val_x = local_atomic_cas(pptr, _exp_val_x, _new_val_x); \
     250        } while (_old_val_x != _exp_val_x); \
     251        \
     252        _old_val_x; \
     253})
     254
     255#else
     256/* Check if arch/atomic.h does not accidentally include /atomic.h .*/
     257#error Architecture specific cpu local atomics already defined! Check your includes.
     258#endif
     259
     260
     261#endif
     262
    144263/** @}
    145264 */
  • kernel/arch/amd64/include/arch/cpu.h

    r21799398 rb1c57a8  
    7373        tss_t *tss;
    7474       
     75        unsigned int id; /** CPU's local, ie physical, APIC ID. */
     76       
    7577        size_t iomapver_copy;  /** Copy of TASK's I/O Permission bitmap generation count. */
    7678} cpu_arch_t;
  • kernel/arch/amd64/include/arch/interrupt.h

    r21799398 rb1c57a8  
    6969#define VECTOR_TLB_SHOOTDOWN_IPI  (IVT_FREEBASE + 1)
    7070#define VECTOR_DEBUG_IPI          (IVT_FREEBASE + 2)
     71#define VECTOR_SMP_CALL_IPI       (IVT_FREEBASE + 3)
    7172
    7273extern void (* disable_irqs_function)(uint16_t);
  • kernel/arch/amd64/src/amd64.c

    r21799398 rb1c57a8  
    168168}
    169169
    170 void arch_post_cpu_init()
     170void arch_post_cpu_init(void)
    171171{
    172172#ifdef CONFIG_SMP
  • kernel/arch/amd64/src/cpu/cpu.c

    r21799398 rb1c57a8  
    158158void cpu_print_report(cpu_t* m)
    159159{
    160         printf("cpu%d: (%s family=%d model=%d stepping=%d) %dMHz\n",
     160        printf("cpu%d: (%s family=%d model=%d stepping=%d apicid=%u) %dMHz\n",
    161161            m->id, vendor_str[m->arch.vendor], m->arch.family, m->arch.model,
    162             m->arch.stepping, m->frequency_mhz);
     162            m->arch.stepping, m->arch.id, m->frequency_mhz);
    163163}
    164164
  • kernel/arch/amd64/src/interrupt.c

    r21799398 rb1c57a8  
    5555#include <symtab.h>
    5656#include <stacktrace.h>
     57#include <smp/smp_call.h>
    5758
    5859/*
     
    161162        trap_virtual_eoi();
    162163        tlb_shootdown_ipi_recv();
     164}
     165
     166static void arch_smp_call_ipi_recv(unsigned int n, istate_t *istate)
     167{
     168        trap_virtual_eoi();
     169        smp_call_ipi_recv();
    163170}
    164171#endif
     
    224231        exc_register(VECTOR_TLB_SHOOTDOWN_IPI, "tlb_shootdown", true,
    225232            (iroutine_t) tlb_shootdown_ipi);
     233        exc_register(VECTOR_SMP_CALL_IPI, "smp_call", true,
     234                (iroutine_t) arch_smp_call_ipi_recv);
    226235#endif
    227236}
Note: See TracChangeset for help on using the changeset viewer.