patch-2.4.20 linux-2.4.20/include/asm-mips64/mips64_cache.h

Next file: linux-2.4.20/include/asm-mips64/mipsregs.h
Previous file: linux-2.4.20/include/asm-mips64/mips-boards/seadint.h
Back to the patch index
Back to the overall index

diff -urN linux-2.4.19/include/asm-mips64/mips64_cache.h linux-2.4.20/include/asm-mips64/mips64_cache.h
@@ -0,0 +1,305 @@
+/*
+ * mips64_cache.h
+ *
+ * Carsten Langgaard, carstenl@mips.com
+ * Copyright (C) 2002 MIPS Technologies, Inc.  All rights reserved.
+ *
+ * ########################################################################
+ *
+ *  This program is free software; you can distribute it and/or modify it
+ *  under the terms of the GNU General Public License (Version 2) as
+ *  published by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope it will be useful, but WITHOUT
+ *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ *  for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * ########################################################################
+ *
+ * Inline assembly cache operations.
+ *
+ * This file is the original r4cache.c file with modification that makes the
+ * cache handling more generic.
+ *
+ * FIXME: Handle split L2 caches.
+ *
+ */
+#ifndef _MIPS_MIPS64_CACHE_H
+#define _MIPS_MIPS64_CACHE_H
+
+#include <asm/asm.h>
+#include <asm/cacheops.h>
+
+static inline void flush_icache_line_indexed(unsigned long addr)
+{
+	unsigned long waystep = icache_size/mips_cpu.icache.ways;
+	unsigned int way;
+
+	for (way = 0; way < mips_cpu.icache.ways; way++)
+	{
+		__asm__ __volatile__(
+			".set noreorder\n\t"
+			"cache %1, (%0)\n\t"
+			".set reorder"
+			:
+			: "r" (addr),
+			"i" (Index_Invalidate_I));
+
+		addr += waystep;
+	}
+}
+
+static inline void flush_dcache_line_indexed(unsigned long addr)
+{
+	unsigned long waystep = dcache_size/mips_cpu.dcache.ways;
+	unsigned int way;
+
+	for (way = 0; way < mips_cpu.dcache.ways; way++)
+	{
+		__asm__ __volatile__(
+			".set noreorder\n\t"
+			"cache %1, (%0)\n\t"
+			".set reorder"
+			:
+			: "r" (addr),
+			"i" (Index_Writeback_Inv_D));
+
+		addr += waystep;
+	}
+}
+
+static inline void flush_scache_line_indexed(unsigned long addr)
+{
+	unsigned long waystep = scache_size/mips_cpu.scache.ways;
+	unsigned int way;
+
+	for (way = 0; way < mips_cpu.scache.ways; way++)
+	{
+		__asm__ __volatile__(
+			".set noreorder\n\t"
+			"cache %1, (%0)\n\t"
+			".set reorder"
+			:
+			: "r" (addr),
+			"i" (Index_Writeback_Inv_SD));
+
+		addr += waystep;
+	}
+}
+
+static inline void flush_icache_line(unsigned long addr)
+{
+	__asm__ __volatile__(
+		".set noreorder\n\t"
+		"cache %1, (%0)\n\t"
+		".set reorder"
+		:
+		: "r" (addr),
+		  "i" (Hit_Invalidate_I));
+}
+
+static inline void flush_dcache_line(unsigned long addr)
+{
+	__asm__ __volatile__(
+		".set noreorder\n\t"
+		"cache %1, (%0)\n\t"
+		".set reorder"
+		:
+		: "r" (addr),
+		  "i" (Hit_Writeback_Inv_D));
+}
+
+static inline void invalidate_dcache_line(unsigned long addr)
+{
+	__asm__ __volatile__(
+		".set noreorder\n\t"
+		"cache %1, (%0)\n\t"
+		".set reorder"
+		:
+		: "r" (addr),
+		  "i" (Hit_Invalidate_D));
+}
+
+static inline void invalidate_scache_line(unsigned long addr)
+{
+	__asm__ __volatile__(
+		".set noreorder\n\t"
+		"cache %1, (%0)\n\t"
+		".set reorder"
+		:
+		: "r" (addr),
+		  "i" (Hit_Invalidate_SD));
+}
+
+static inline void flush_scache_line(unsigned long addr)
+{
+	__asm__ __volatile__(
+		".set noreorder\n\t"
+		"cache %1, (%0)\n\t"
+		".set reorder"
+		:
+		: "r" (addr),
+		  "i" (Hit_Writeback_Inv_SD));
+}
+
+/*
+ * The next two are for badland addresses like signal trampolines.
+ */
+static inline void protected_flush_icache_line(unsigned long addr)
+{
+	__asm__ __volatile__(
+		".set noreorder\n\t"
+		"1:\tcache %1,(%0)\n"
+		"2:\t.set reorder\n\t"
+		".section\t__ex_table,\"a\"\n\t"
+		".dword\t1b,2b\n\t"
+		".previous"
+		:
+		: "r" (addr), "i" (Hit_Invalidate_I));
+}
+
+static inline void protected_writeback_dcache_line(unsigned long addr)
+{
+	__asm__ __volatile__(
+		".set noreorder\n\t"
+		"1:\tcache %1,(%0)\n"
+		"2:\t.set reorder\n\t"
+		".section\t__ex_table,\"a\"\n\t"
+		".dword\t1b,2b\n\t"
+		".previous"
+		:
+		: "r" (addr), "i" (Hit_Writeback_D));
+}
+
+#define cache_unroll(base,op)			\
+	__asm__ __volatile__("			\
+		.set noreorder;			\
+		cache %1, (%0);			\
+		.set reorder"			\
+		:				\
+		: "r" (base),			\
+		  "i" (op));
+
+
+static inline void blast_dcache(void)
+{
+	unsigned long start = KSEG0;
+	unsigned long end = (start + dcache_size);
+
+	while(start < end) {
+		cache_unroll(start,Index_Writeback_Inv_D);
+		start += dc_lsize;
+	}
+}
+
+static inline void blast_dcache_page(unsigned long page)
+{
+	unsigned long start = page;
+	unsigned long end = (start + PAGE_SIZE);
+
+	while(start < end) {
+		cache_unroll(start,Hit_Writeback_Inv_D);
+		start += dc_lsize;
+	}
+}
+
+static inline void blast_dcache_page_indexed(unsigned long page)
+{
+	unsigned long start;
+	unsigned long end = (page + PAGE_SIZE);
+	unsigned long waystep = dcache_size/mips_cpu.dcache.ways;
+	unsigned int way;
+
+	for (way = 0; way < mips_cpu.dcache.ways; way++) {
+		start = page + way*waystep;
+		while(start < end) {
+			cache_unroll(start,Index_Writeback_Inv_D);
+			start += dc_lsize;
+		}
+	}
+}
+
+static inline void blast_icache(void)
+{
+	unsigned long start = KSEG0;
+	unsigned long end = (start + icache_size);
+
+	while(start < end) {
+		cache_unroll(start,Index_Invalidate_I);
+		start += ic_lsize;
+	}
+}
+
+static inline void blast_icache_page(unsigned long page)
+{
+	unsigned long start = page;
+	unsigned long end = (start + PAGE_SIZE);
+
+	while(start < end) {
+		cache_unroll(start,Hit_Invalidate_I);
+		start += ic_lsize;
+	}
+}
+
+static inline void blast_icache_page_indexed(unsigned long page)
+{
+	unsigned long start;
+	unsigned long end = (page + PAGE_SIZE);
+	unsigned long waystep = icache_size/mips_cpu.icache.ways;
+	unsigned int way;
+
+	for (way = 0; way < mips_cpu.icache.ways; way++) {
+		start = page + way*waystep;
+		while(start < end) {
+			cache_unroll(start,Index_Invalidate_I);
+			start += ic_lsize;
+		}
+	}
+}
+
+static inline void blast_scache(void)
+{
+	unsigned long start = KSEG0;
+	unsigned long end = KSEG0 + scache_size;
+
+	while(start < end) {
+		cache_unroll(start,Index_Writeback_Inv_SD);
+		start += sc_lsize;
+	}
+}
+
+static inline void blast_scache_page(unsigned long page)
+{
+	unsigned long start = page;
+	unsigned long end = page + PAGE_SIZE;
+
+	while(start < end) {
+		cache_unroll(start,Hit_Writeback_Inv_SD);
+		start += sc_lsize;
+	}
+}
+
+static inline void blast_scache_page_indexed(unsigned long page)
+{
+	unsigned long start;
+	unsigned long end = (page + PAGE_SIZE);
+	unsigned long waystep = scache_size/mips_cpu.scache.ways;
+	unsigned int way;
+
+	for (way = 0; way < mips_cpu.scache.ways; way++) {
+		start = page + way*waystep;
+		while(start < end) {
+			cache_unroll(start,Index_Writeback_Inv_SD);
+			start += sc_lsize;
+		}
+	}
+}
+
+#endif /* !(_MIPS_MIPS64_CACHE_H) */
+
+

FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)