patch-2.3.23 linux/include/asm-sh/io.h

Next file: linux/include/asm-sh/ioctl.h
Previous file: linux/include/asm-sh/ide.h
Back to the patch index
Back to the overall index

diff -u --recursive --new-file v2.3.22/linux/include/asm-sh/io.h linux/include/asm-sh/io.h
@@ -1,6 +1,18 @@
 #ifndef __ASM_SH_IO_H
 #define __ASM_SH_IO_H
-/* XXXXXXXXXXXXXXXXX */
+
+/*
+ * Convention:
+ *    read{b,w,l}/write{b,w,l} are for PCI,
+ *    while in{b,w,l}/out{b,w,l} are for ISA
+ * These may (will) be platform specific function.
+ *
+ * In addition, we have 
+ *   ctrl_in{b,w,l}/ctrl_out{b,w,l} for SuperH specific I/O.
+ *   which are processor specific.
+ */
+
+#include <asm/cache.h>
 
 #define virt_to_bus virt_to_phys
 #define bus_to_virt phys_to_virt
@@ -20,7 +32,7 @@
        return *(volatile unsigned long*)addr;
 }
 
-extern __inline__ void writeb(unsigned short b, unsigned long addr)
+extern __inline__ void writeb(unsigned char b, unsigned long addr)
 {
        *(volatile unsigned char*)addr = b;
 }
@@ -75,6 +87,36 @@
        return writel(b,addr);
 }
 
+extern __inline__ unsigned long ctrl_inb(unsigned long addr)
+{
+       return *(volatile unsigned char*)addr;
+}
+
+extern __inline__ unsigned long ctrl_inw(unsigned long addr)
+{
+       return *(volatile unsigned short*)addr;
+}
+
+extern __inline__ unsigned long ctrl_inl(unsigned long addr)
+{
+       return *(volatile unsigned long*)addr;
+}
+
+extern __inline__ void ctrl_outb(unsigned char b, unsigned long addr)
+{
+       *(volatile unsigned char*)addr = b;
+}
+
+extern __inline__ void ctrl_outw(unsigned short b, unsigned long addr)
+{
+       *(volatile unsigned short*)addr = b;
+}
+
+extern __inline__ void ctrl_outl(unsigned int b, unsigned long addr)
+{
+        *(volatile unsigned long*)addr = b;
+}
+
 #define inb_p inb
 #define outb_p outb
 
@@ -93,7 +135,7 @@
 
 extern __inline__ void * phys_to_virt(unsigned long address)
 {
-	return (void *)KSEG0ADDR(address);
+	return (void *)P1SEGADDR(address);
 }
 
 extern void * ioremap(unsigned long phys_addr, unsigned long size);
@@ -115,7 +157,7 @@
  */
 extern __inline__ void * ioremap(unsigned long offset, unsigned long size)
 {
-	return (void *) KSEG1ADDR(offset);
+	return (void *) P2SEGADDR(offset);
 }
 
 /*
@@ -125,7 +167,7 @@
  */
 extern __inline__ void * ioremap_nocache (unsigned long offset, unsigned long size)
 {
-	return (void *) KSEG1ADDR(offset);
+	return (void *) P2SEGADDR(offset);
 }
 
 extern __inline__ void iounmap(void *addr)
@@ -148,11 +190,30 @@
 	return retval;
 }
 
-/* Nothing to do */
+/*
+ * The caches on some architectures aren't dma-coherent and have need to
+ * handle this in software.  There are three types of operations that
+ * can be applied to dma buffers.
+ *
+ *  - dma_cache_wback_inv(start, size) makes caches and RAM coherent by
+ *    writing the content of the caches back to memory, if necessary.
+ *    The function also invalidates the affected part of the caches as
+ *    necessary before DMA transfers from outside to memory.
+ *  - dma_cache_inv(start, size) invalidates the affected parts of the
+ *    caches.  Dirty lines of the caches may be written back or simply
+ *    be discarded.  This operation is necessary before dma operations
+ *    to the memory.
+ *  - dma_cache_wback(start, size) writes back any dirty lines but does
+ *    not invalidate the cache.  This can be used before DMA reads from
+ *    memory,
+ */
 
-#define dma_cache_inv(_start,_size)		do { } while (0)
-#define dma_cache_wback(_start,_size)		do { } while (0)
-#define dma_cache_wback_inv(_start,_size)	do { } while (0)
+#define dma_cache_wback_inv(_start,_size) \
+    cache_flush_area((unsigned long)(_start),((unsigned long)(_start)+(_size)))
+#define dma_cache_inv(_start,_size) \
+    cache_purge_area((unsigned long)(_start),((unsigned long)(_start)+(_size)))
+#define dma_cache_wback(_start,_size) \
+    cache_wback_area((unsigned long)(_start),((unsigned long)(_start)+(_size)))
 
 #endif /* __KERNEL__ */
 #endif /* __ASM_SH_IO_H */

FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)