patch-1.3.48 linux/include/asm-mips/system.h
Next file: linux/include/asm-mips/termbits.h
Previous file: linux/include/asm-mips/sysmips.h
Back to the patch index
Back to the overall index
- Lines: 290
- Date:
Wed Dec 13 12:39:47 1995
- Orig file:
v1.3.47/linux/include/asm-mips/system.h
- Orig date:
Fri Jan 13 20:37:06 1995
diff -u --recursive --new-file v1.3.47/linux/include/asm-mips/system.h linux/include/asm-mips/system.h
@@ -5,81 +5,108 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1994 by Ralf Baechle
+ * Copyright (C) 1994, 1995 by Ralf Baechle
*/
-
#ifndef __ASM_MIPS_SYSTEM_H
#define __ASM_MIPS_SYSTEM_H
-#include <linux/types.h>
-#include <asm/segment.h>
-#include <asm/mipsregs.h>
-#include <asm/mipsconfig.h>
+#include <linux/kernel.h>
-/*
- * (Currently empty to support debugging)
- */
-#define move_to_user_mode() \
-__asm__ __volatile__ ( \
+#if defined (__R4000__)
+#define sti() \
+__asm__ __volatile__( \
".set\tnoreorder\n\t" \
".set\tnoat\n\t" \
- "la\t$1,1f\n\t" \
- "subu\t$1,$1,%0\n\t" \
- "jr\t$1\n\t" \
"mfc0\t$1,$12\n\t" \
- "1:ori\t$1,0x00\n\t" \
+ "ori\t$1,0x1f\n\t" \
+ "xori\t$1,0x1e\n\t" \
"mtc0\t$1,$12\n\t" \
- "subu\t$29,%0\n\t" \
".set\tat\n\t" \
".set\treorder" \
: /* no outputs */ \
- : "r" (KERNELBASE));
+ : /* no inputs */ \
+ : "$1")
-#if defined (__R4000__)
-#define sti() \
+#define cli() \
__asm__ __volatile__( \
+ ".set\tnoreorder\n\t" \
".set\tnoat\n\t" \
- "mfc0\t$1,"STR(CP0_STATUS)"\n\t" \
- "ori\t$1,$1,0x1f\n\t" \
- "xori\t$1,$1,0x1e\n\t" \
- "mtc0\t$1,"STR(CP0_STATUS)"\n\t" \
- ".set\tat" \
+ "mfc0\t$1,$12\n\t" \
+ "ori\t$1,1\n\t" \
+ "xori\t$1,1\n\t" \
+ "mtc0\t$1,$12\n\t" \
+ "nop\n\t" \
+ "nop\n\t" \
+ "nop\n\t" \
+ ".set\tat\n\t" \
+ ".set\treorder" \
: /* no outputs */ \
: /* no inputs */ \
: "$1")
-#define cli() \
+#else /* !defined (__R4000__) */
+/*
+ * Untested goodies for the R3000 based DECstation et al.
+ */
+#define sti() \
__asm__ __volatile__( \
+ ".set\tnoreorder\n\t" \
".set\tnoat\n\t" \
- "mfc0\t$1,"STR(CP0_STATUS)"\n\t" \
- "ori\t$1,$1,1\n\t" \
- "xori\t$1,$1,1\n\t" \
- "mtc0\t$1,"STR(CP0_STATUS)"\n\t" \
- ".set\tat" \
+ "mfc0\t$1,$12\n\t" \
+ "ori\t$1,0x01\n\t" \
+ "mtc0\t$1,$12\n\t" \
+ ".set\tat\n\t" \
+ ".set\treorder" \
+ : /* no outputs */ \
+ : /* no inputs */ \
+ : "$1")
+
+#define cli() \
+__asm__ __volatile__( \
+ ".set\tnoreorder\n\t" \
+ ".set\tnoat\n\t" \
+ "mfc0\t$1,$12\n\t" \
+ "ori\t$1,1\n\t" \
+ "xori\t$1,1\n\t" \
+ "mtc0\t$1,$12\n\t" \
+ ".set\tat\n\t" \
+ ".set\treorder" \
: /* no outputs */ \
: /* no inputs */ \
: "$1")
-#else /* !defined (__R4000__) */
-/*
- * Cheese - I don't have a R3000 manual
- */
-#error "Yikes - write cli()/sti() macros for R3000!"
#endif /* !defined (__R4000__) */
#define nop() __asm__ __volatile__ ("nop")
#define save_flags(x) \
__asm__ __volatile__( \
- "mfc0\t%0,$12" \
+ ".set\tnoreorder\n\t" \
+ "mfc0\t%0,$12\n\t" \
+ ".set\treorder" \
: "=r" (x)) \
#define restore_flags(x) \
__asm__ __volatile__( \
- "mtc0\t%0,$12" \
+ ".set\tnoreorder\n\t" \
+ "mtc0\t%0,$12\n\t" \
+ "nop\n\t" \
+ "nop\n\t" \
+ "nop\n\t" \
+ ".set\treorder" \
: /* no output */ \
: "r" (x)) \
-extern inline unsigned long xchg_u8(char * m, unsigned long val)
+#define sync_mem() \
+__asm__ __volatile__( \
+ ".set\tnoreorder\n\t" \
+ "sync\n\t" \
+ ".set\treorder") \
+
+/*
+ * The 8 and 16 bit variants have to disable interrupts temporarily.
+ * Both are currently unused.
+ */
+extern inline unsigned long xchg_u8(volatile char * m, unsigned long val)
{
unsigned long flags, retval;
@@ -92,7 +119,7 @@
return retval;
}
-extern inline unsigned long xchg_u16(short * m, unsigned long val)
+extern inline unsigned long xchg_u16(volatile short * m, unsigned long val)
{
unsigned long flags, retval;
@@ -107,9 +134,9 @@
/*
* For 32 and 64 bit operands we can take advantage of ll and sc.
- * The later isn't currently being used.
+ * FIXME: This doesn't work for R3000 machines.
*/
-extern inline unsigned long xchg_u32(int * m, unsigned long val)
+extern inline unsigned long xchg_u32(volatile int * m, unsigned long val)
{
unsigned long dummy;
@@ -119,17 +146,20 @@
"ll\t%0,(%1)\n"
"1:\tmove\t$1,%2\n\t"
"sc\t$1,(%1)\n\t"
- "beqzl\t%3,1b\n\t"
+ "beqzl\t$1,1b\n\t"
"ll\t%0,(%1)\n\t"
".set\tat\n\t"
".set\treorder"
: "=r" (val), "=r" (m), "=r" (dummy)
- : "1" (*m), "2" (val));
+ : "1" (m), "2" (val));
return val;
}
-extern inline unsigned long xchg_u64(long * m, unsigned long val)
+/*
+ * Only used for 64 bit kernel.
+ */
+extern inline unsigned long xchg_u64(volatile long * m, unsigned long val)
{
unsigned long dummy;
@@ -139,65 +169,52 @@
"lld\t%0,(%1)\n"
"1:\tmove\t$1,%2\n\t"
"scd\t$1,(%1)\n\t"
- "beqzl\t%3,1b\n\t"
+ "beqzl\t$1,1b\n\t"
"ll\t%0,(%1)\n\t"
".set\tat\n\t"
".set\treorder"
: "=r" (val), "=r" (m), "=r" (dummy)
- : "1" (*m), "2" (val));
+ : "1" (m), "2" (val));
return val;
}
-#if 0
-extern inline int tas(char * m)
-{
- return xchg_u8(m,1);
-}
-#endif
+#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
+#define tas(ptr) (xchg((ptr),1))
+
+/*
+ * This function doesn't exist, so you'll get a linker error
+ * if something tries to do an invalid xchg().
+ *
+ * This only works if the compiler isn't horribly bad at optimizing.
+ * gcc-2.5.8 reportedly can't handle this, but I define that one to
+ * be dead anyway.
+ */
+extern void __xchg_called_with_bad_pointer(void);
-extern inline void * xchg_ptr(void * m, void * val)
+static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
{
- return (void *) xchg_u32(m, (unsigned long) val);
+ switch (size) {
+ case 1:
+ return xchg_u8(ptr, x);
+ case 2:
+ return xchg_u16(ptr, x);
+ case 4:
+ return xchg_u32(ptr, x);
+ case 8:
+ return xchg_u64(ptr, x);
+ }
+ __xchg_called_with_bad_pointer();
+ return x;
}
-extern ulong IRQ_vectors[256];
-extern ulong exception_handlers[256];
+extern unsigned long IRQ_vectors[16];
+extern unsigned long exception_handlers[32];
-#define set_intr_gate(n,addr) \
- IRQ_vectors[n] = (ulong) (addr)
+#define set_int_vector(n,addr) \
+ IRQ_vectors[n] = (unsigned long) (addr)
#define set_except_vector(n,addr) \
- exception_handlers[n] = (ulong) (addr)
-
-/*
- * atomic exchange of one word
- */
-#if defined (__R4000__)
-#define atomic_exchange(m,r) \
- __asm__ __volatile__( \
- ".set\tnoreorder\n\t" \
- "ll\t%0,(%2)\n" \
- "1:\tmove\t$8,%1\n\t" \
- "sc\t$8,(%2)\n\t" \
- "beql\t$0,$8,1b\n\t" \
- "ll\t%0,(%2)\n\t" \
- ".set\treorder" \
- : "=r" (r) \
- : "r" (r), "r" (&(m)) \
- : "$8","memory")
-#else
-#define atomic_exchange(m,r) \
- { \
- unsigned long flags; \
- unsigned long tmp; \
- save_flags(flags); \
- cli(); \
- tmp = (m); \
- (m) = (r); \
- (r) = tmp; \
- restore_flags(flags); \
- }
-#endif
+ exception_handlers[n] = (unsigned long) (addr)
#endif /* __ASM_MIPS_SYSTEM_H */
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen, slshen@lbl.gov
with Sam's (original) version of this