Index: kernel/arch/amd64/include/atomic.h
===================================================================
--- kernel/arch/amd64/include/atomic.h	(revision f1c7755075766af5ee998c23d3234987a8f09572)
+++ kernel/arch/amd64/include/atomic.h	(revision bc216a07a86e40d74455b7f4d6fb8f65e195443d)
@@ -141,5 +141,5 @@
 
 
-#define _atomic_cas_ptr_impl(pptr, exp_val, new_val, old_val, prefix) \
+#define _atomic_cas_impl(pptr, exp_val, new_val, old_val, prefix) \
 	asm volatile ( \
 		prefix " cmpxchgq %[newval], %[ptr]\n" \
@@ -162,5 +162,5 @@
 {
 	void *old_val;
-	_atomic_cas_ptr_impl(pptr, exp_val, new_val, old_val, "lock\n");
+	_atomic_cas_impl(pptr, exp_val, new_val, old_val, "lock\n");
 	return old_val;
 }
@@ -174,20 +174,44 @@
 {
 	void *old_val;
-	_atomic_cas_ptr_impl(pptr, exp_val, new_val, old_val, "");
+	_atomic_cas_impl(pptr, exp_val, new_val, old_val, "");
 	return old_val;
 }
 
-/** Atomicaly sets *ptr to new_val and returns the previous value. */
-NO_TRACE static inline void * atomic_swap_ptr(void **pptr, void *new_val)
-{
-	void *new_in_old_out = new_val;
-	
-	asm volatile (
-		"xchgq %[val], %[pptr]\n"
-		: [val] "+r" (new_in_old_out),
-		  [pptr] "+m" (*pptr)
-	);
-	
-	return new_in_old_out;
+
+#define _atomic_swap_impl(pptr, new_val) \
+({ \
+	typeof(*(pptr)) new_in_old_out = new_val; \
+	asm volatile ( \
+		"xchgq %[val], %[p_ptr]\n" \
+		: [val] "+r" (new_in_old_out), \
+		  [p_ptr] "+m" (*pptr) \
+	); \
+	\
+	new_in_old_out; \
+})
+
+/* 
+ * Issuing a xchg instruction always implies lock prefix semantics.
+ * Therefore, it is cheaper to use a cmpxchg without a lock prefix 
+ * in a loop.
+ */
+#define _atomic_swap_local_impl(pptr, new_val) \
+({ \
+	typeof(*(pptr)) exp_val; \
+	typeof(*(pptr)) old_val; \
+	\
+	do { \
+		exp_val = *pptr; \
+		_atomic_cas_impl(pptr, exp_val, new_val, old_val, ""); \
+	} while (old_val != exp_val); \
+	\
+	old_val; \
+})
+
+
+/** Atomicaly sets *ptr to val and returns the previous value. */
+NO_TRACE static inline void * atomic_set_return_ptr(void **pptr, void *val)
+{
+	return _atomic_swap_impl(pptr, val);
 }
 
@@ -197,25 +221,32 @@
  * NOT atomic wrt to other cpus.
  */
-NO_TRACE static inline void * atomic_swap_ptr_local(void **pptr, void *new_val)
-{
-	/* 
-	 * Issuing a xchg instruction always implies lock prefix semantics.
-	 * Therefore, it is cheaper to use a cmpxchg without a lock prefix 
-	 * in a loop.
-	 */
-	void *exp_val;
-	void *old_val;
-	
-	do {
-		exp_val = *pptr;
-		old_val = atomic_cas_ptr_local(pptr, exp_val, new_val);
-	} while (old_val != exp_val);
-	
-	return old_val;
+NO_TRACE static inline void * atomic_set_return_ptr_local(
+	void **pptr, void *new_val)
+{
+	return _atomic_swap_local_impl(pptr, new_val);
+}
+
+/** Atomicaly sets *ptr to val and returns the previous value. */
+NO_TRACE static inline native_t atomic_set_return_native_t(
+	native_t *p, native_t val)
+{
+	return _atomic_swap_impl(p, val);
+}
+
+/** Sets *ptr to new_val and returns the previous value. NOT smp safe.
+ * 
+ * This function is only atomic wrt to local interrupts and it is
+ * NOT atomic wrt to other cpus.
+ */
+NO_TRACE static inline native_t atomic_set_return_native_t_local(
+	native_t *p, native_t new_val)
+{
+	return _atomic_swap_local_impl(p, new_val);
 }
 
 
 #undef _atomic_cas_ptr_impl
-
+#undef _atomic_swap_impl
+#undef _atomic_swap_local_impl
 
 #endif
Index: kernel/arch/ia32/include/atomic.h
===================================================================
--- kernel/arch/ia32/include/atomic.h	(revision f1c7755075766af5ee998c23d3234987a8f09572)
+++ kernel/arch/ia32/include/atomic.h	(revision bc216a07a86e40d74455b7f4d6fb8f65e195443d)
@@ -144,5 +144,5 @@
 
 
-#define _atomic_cas_ptr_impl(pptr, exp_val, new_val, old_val, prefix) \
+#define _atomic_cas_impl(pptr, exp_val, new_val, old_val, prefix) \
 	asm volatile ( \
 		prefix " cmpxchgl %[newval], %[ptr]\n" \
@@ -165,5 +165,5 @@
 {
 	void *old_val;
-	_atomic_cas_ptr_impl(pptr, exp_val, new_val, old_val, "lock\n");
+	_atomic_cas_impl(pptr, exp_val, new_val, old_val, "lock\n");
 	return old_val;
 }
@@ -177,21 +177,44 @@
 {
 	void *old_val;
-	_atomic_cas_ptr_impl(pptr, exp_val, new_val, old_val, "");
+	_atomic_cas_impl(pptr, exp_val, new_val, old_val, "");
 	return old_val;
 }
 
 
-/** Atomicaly sets *ptr to new_val and returns the previous value. */
-NO_TRACE static inline void * atomic_swap_ptr(void **pptr, void *new_val)
-{
-	void *new_in_old_out = new_val;
-	
-	asm volatile (
-		"xchgl %[val], %[pptr]\n"
-		: [val] "+r" (new_in_old_out),
-		  [pptr] "+m" (*pptr)
-	);
-	
-	return new_in_old_out;
+#define _atomic_swap_impl(pptr, new_val) \
+({ \
+	typeof(*(pptr)) new_in_old_out = new_val; \
+	asm volatile ( \
+		"xchgl %[val], %[p_ptr]\n" \
+		: [val] "+r" (new_in_old_out), \
+		  [p_ptr] "+m" (*pptr) \
+	); \
+	\
+	new_in_old_out; \
+})
+
+/* 
+ * Issuing a xchg instruction always implies lock prefix semantics.
+ * Therefore, it is cheaper to use a cmpxchg without a lock prefix 
+ * in a loop.
+ */
+#define _atomic_swap_local_impl(pptr, new_val) \
+({ \
+	typeof(*(pptr)) exp_val; \
+	typeof(*(pptr)) old_val; \
+	\
+	do { \
+		exp_val = *pptr; \
+		_atomic_cas_impl(pptr, exp_val, new_val, old_val, ""); \
+	} while (old_val != exp_val); \
+	\
+	old_val; \
+})
+
+
+/** Atomicaly sets *ptr to val and returns the previous value. */
+NO_TRACE static inline void * atomic_set_return_ptr(void **pptr, void *val)
+{
+	return _atomic_swap_impl(pptr, val);
 }
 
@@ -201,23 +224,32 @@
  * NOT atomic wrt to other cpus.
  */
-NO_TRACE static inline void * atomic_swap_ptr_local(void **pptr, void *new_val)
-{
-	/* 
-	 * Issuing a xchg instruction always implies lock prefix semantics.
-	 * Therefore, it is cheaper to use a cmpxchg without a lock prefix 
-	 * in a loop.
-	 */
-	void *exp_val;
-	void *old_val;
-	
-	do {
-		exp_val = *pptr;
-		old_val = atomic_cas_ptr_local(pptr, exp_val, new_val);
-	} while (old_val != exp_val);
-	
-	return old_val;
-}
+NO_TRACE static inline void * atomic_set_return_ptr_local(
+	void **pptr, void *new_val)
+{
+	return _atomic_swap_local_impl(pptr, new_val);
+}
+
+/** Atomicaly sets *ptr to val and returns the previous value. */
+NO_TRACE static inline native_t atomic_set_return_native_t(
+	native_t *p, native_t val)
+{
+	return _atomic_swap_impl(p, val);
+}
+
+/** Sets *ptr to new_val and returns the previous value. NOT smp safe.
+ * 
+ * This function is only atomic wrt to local interrupts and it is
+ * NOT atomic wrt to other cpus.
+ */
+NO_TRACE static inline native_t atomic_set_return_native_t_local(
+	native_t *p, native_t new_val)
+{
+	return _atomic_swap_local_impl(p, new_val);
+}
+
 
 #undef _atomic_cas_ptr_impl
+#undef _atomic_swap_impl
+#undef _atomic_swap_local_impl
 
 #endif
Index: kernel/test/atomic/atomic1.c
===================================================================
--- kernel/test/atomic/atomic1.c	(revision f1c7755075766af5ee998c23d3234987a8f09572)
+++ kernel/test/atomic/atomic1.c	(revision bc216a07a86e40d74455b7f4d6fb8f65e195443d)
@@ -72,8 +72,8 @@
 	
 	ptr = 0;
-	if (atomic_swap_ptr(&ptr, a_ptr) != 0) 
-		return "Failed atomic_swap_ptr()";
-	if (atomic_swap_ptr_local(&ptr, 0) != a_ptr || ptr != 0) 
-		return "Failed atomic_swap_ptr_local()";
+	if (atomic_set_return_ptr(&ptr, a_ptr) != 0) 
+		return "Failed atomic_set_return_ptr()";
+	if (atomic_set_return_ptr_local(&ptr, 0) != a_ptr || ptr != 0) 
+		return "Failed atomic_set_return_ptr_local()";
 	
 	return NULL;
