patch-2.3.30 linux/include/asm-i386/system.h

Next file: linux/include/asm-m68k/io.h
Previous file: linux/include/asm-i386/spinlock.h
Back to the patch index
Back to the overall index

diff -u --recursive --new-file v2.3.29/linux/include/asm-i386/system.h linux/include/asm-i386/system.h
@@ -122,11 +122,61 @@
 
 #define nop() __asm__ __volatile__ ("nop")
 
-#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
+#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
+
 #define tas(ptr) (xchg((ptr),1))
 
 struct __xchg_dummy { unsigned long a[100]; };
 #define __xg(x) ((struct __xchg_dummy *)(x))
+
+
+/*
+ * The semantics of XCHGCMP8B are a bit strange, this is why
+ * there is a loop and the loading of %%eax and %%edx has to
+ * be inside. This inlines well in most cases, the cached
+ * cost is around ~38 cycles. (in the future we might want
+ * to do an SIMD/3DNOW!/MMX/FPU 64-bit store here, but that
+ * might have an implicit FPU-save as a cost, so it's not
+ * clear which path to go.)
+ */
+extern inline void __set_64bit (unsigned long long * ptr,
+		unsigned int low, unsigned int high)
+{
+__asm__ __volatile__ (
+	"1:	movl (%0), %%eax;
+		movl 4(%0), %%edx;
+		cmpxchg8b (%0);
+		jnz 1b"
+	::		"D"(ptr),
+			"b"(low),
+			"c"(high)
+	:
+			"ax","dx","memory");
+}
+
+extern void inline __set_64bit_constant (unsigned long long *ptr,
+						 unsigned long long value)
+{
+	__set_64bit(ptr,(unsigned int)(value), (unsigned int)((value)>>32ULL));
+}
+#define ll_low(x)	*(((unsigned int*)&(x))+0)
+#define ll_high(x)	*(((unsigned int*)&(x))+1)
+
+extern void inline __set_64bit_var (unsigned long long *ptr,
+			 unsigned long long value)
+{
+	__set_64bit(ptr,ll_low(value), ll_high(value));
+}
+
+#define set_64bit(ptr,value) \
+(__builtin_constant_p(value) ? \
+ __set_64bit_constant(ptr, value) : \
+ __set_64bit_var(ptr, value) )
+
+#define _set_64bit(ptr,value) \
+(__builtin_constant_p(value) ? \
+ __set_64bit(ptr, (unsigned int)(value), (unsigned int)((value)>>32ULL) ) : \
+ __set_64bit(ptr, ll_low(value), ll_high(value)) )
 
 /*
  * Note: no "lock" prefix even on SMP: xchg always implies lock anyway

FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)