Index: kernel/arch/amd64/include/arch/atomic.h
===================================================================
--- kernel/arch/amd64/include/arch/atomic.h	(revision 0aee9b4c5afa08b0cd8ed8c15949d5a7208167d5)
+++ kernel/arch/amd64/include/arch/atomic.h	(revision 5e3fa9dbfffc978cdcfdaaea9fc6a50dbb0bca1a)
@@ -1,4 +1,5 @@
 /*
  * Copyright (c) 2001-2004 Jakub Jermar
+ * Copyright (c) 2012      Adam Hraska
  * All rights reserved.
  *
@@ -140,6 +141,124 @@
 }
 
+
+#define _atomic_cas_impl(pptr, exp_val, new_val, old_val, prefix) \
+({ \
+	switch (sizeof(typeof(*(pptr)))) { \
+	case 1: \
+		asm volatile ( \
+			prefix " cmpxchgb %[newval], %[ptr]\n" \
+			: /* Output operands. */ \
+			/* Old/current value is returned in eax. */ \
+			[oldval] "=a" (old_val), \
+			/* (*ptr) will be read and written to, hence "+" */ \
+			[ptr] "+m" (*pptr) \
+			: /* Input operands. */ \
+			/* Expected value must be in eax. */ \
+			[expval] "a" (exp_val), \
+			/* The new value may be in any register. */ \
+			[newval] "r" (new_val) \
+			: "memory" \
+		); \
+		break; \
+	case 2: \
+		asm volatile ( \
+			prefix " cmpxchgw %[newval], %[ptr]\n" \
+			: /* Output operands. */ \
+			/* Old/current value is returned in eax. */ \
+			[oldval] "=a" (old_val), \
+			/* (*ptr) will be read and written to, hence "+" */ \
+			[ptr] "+m" (*pptr) \
+			: /* Input operands. */ \
+			/* Expected value must be in eax. */ \
+			[expval] "a" (exp_val), \
+			/* The new value may be in any register. */ \
+			[newval] "r" (new_val) \
+			: "memory" \
+		); \
+		break; \
+	case 4: \
+		asm volatile ( \
+			prefix " cmpxchgl %[newval], %[ptr]\n" \
+			: /* Output operands. */ \
+			/* Old/current value is returned in eax. */ \
+			[oldval] "=a" (old_val), \
+			/* (*ptr) will be read and written to, hence "+" */ \
+			[ptr] "+m" (*pptr) \
+			: /* Input operands. */ \
+			/* Expected value must be in eax. */ \
+			[expval] "a" (exp_val), \
+			/* The new value may be in any register. */ \
+			[newval] "r" (new_val) \
+			: "memory" \
+		); \
+		break; \
+	case 8: \
+		asm volatile ( \
+			prefix " cmpxchgq %[newval], %[ptr]\n" \
+			: /* Output operands. */ \
+			/* Old/current value is returned in eax. */ \
+			[oldval] "=a" (old_val), \
+			/* (*ptr) will be read and written to, hence "+" */ \
+			[ptr] "+m" (*pptr) \
+			: /* Input operands. */ \
+			/* Expected value must be in eax. */ \
+			[expval] "a" (exp_val), \
+			/* The new value may be in any register. */ \
+			[newval] "r" (new_val) \
+			: "memory" \
+		); \
+		break; \
+	} \
+})
+
+
+#ifndef local_atomic_cas
+
+#define local_atomic_cas(pptr, exp_val, new_val) \
+({ \
+	/* Use proper types and avoid name clashes */ \
+	typeof(*(pptr)) _old_val_cas; \
+	typeof(*(pptr)) _exp_val_cas = exp_val; \
+	typeof(*(pptr)) _new_val_cas = new_val; \
+	_atomic_cas_impl(pptr, _exp_val_cas, _new_val_cas, _old_val_cas, ""); \
+	\
+	_old_val_cas; \
+})
+
+#else
+/* Check if arch/atomic.h does not accidentally include /atomic.h .*/
+#error Architecture specific cpu local atomics already defined! Check your includes.
 #endif
 
+
+#ifndef local_atomic_exchange
+/* 
+ * Issuing a xchg instruction always implies lock prefix semantics.
+ * Therefore, it is cheaper to use a cmpxchg without a lock prefix 
+ * in a loop.
+ */
+#define local_atomic_exchange(pptr, new_val) \
+({ \
+	/* Use proper types and avoid name clashes */ \
+	typeof(*(pptr)) _exp_val_x; \
+	typeof(*(pptr)) _old_val_x; \
+	typeof(*(pptr)) _new_val_x = new_val; \
+	\
+	do { \
+		_exp_val_x = *pptr; \
+		_old_val_x = local_atomic_cas(pptr, _exp_val_x, _new_val_x); \
+	} while (_old_val_x != _exp_val_x); \
+	\
+	_old_val_x; \
+})
+
+#else
+/* Check if arch/atomic.h does not accidentally include /atomic.h .*/
+#error Architecture specific cpu local atomics already defined! Check your includes.
+#endif
+
+
+#endif
+
 /** @}
  */
Index: kernel/arch/amd64/include/arch/cpu.h
===================================================================
--- kernel/arch/amd64/include/arch/cpu.h	(revision 0aee9b4c5afa08b0cd8ed8c15949d5a7208167d5)
+++ kernel/arch/amd64/include/arch/cpu.h	(revision 5e3fa9dbfffc978cdcfdaaea9fc6a50dbb0bca1a)
@@ -73,4 +73,6 @@
 	tss_t *tss;
 	
+	unsigned int id; /** CPU's local, ie physical, APIC ID. */
+	
 	size_t iomapver_copy;  /** Copy of TASK's I/O Permission bitmap generation count. */
 } cpu_arch_t;
Index: kernel/arch/amd64/include/arch/interrupt.h
===================================================================
--- kernel/arch/amd64/include/arch/interrupt.h	(revision 0aee9b4c5afa08b0cd8ed8c15949d5a7208167d5)
+++ kernel/arch/amd64/include/arch/interrupt.h	(revision 5e3fa9dbfffc978cdcfdaaea9fc6a50dbb0bca1a)
@@ -69,4 +69,5 @@
 #define VECTOR_TLB_SHOOTDOWN_IPI  (IVT_FREEBASE + 1)
 #define VECTOR_DEBUG_IPI          (IVT_FREEBASE + 2)
+#define VECTOR_SMP_CALL_IPI       (IVT_FREEBASE + 3)
 
 extern void (* disable_irqs_function)(uint16_t);
