patch-2.2.8 linux/arch/sparc/mm/iommu.c
Next file: linux/arch/sparc64/kernel/cpu.c
Previous file: linux/arch/sparc/kernel/sys_sparc.c
Back to the patch index
Back to the overall index
- Lines: 86
- Date:
Tue May 11 08:24:31 1999
- Orig file:
v2.2.7/linux/arch/sparc/mm/iommu.c
- Orig date:
Fri May 8 23:14:46 1998
diff -u --recursive --new-file v2.2.7/linux/arch/sparc/mm/iommu.c linux/arch/sparc/mm/iommu.c
@@ -1,4 +1,4 @@
-/* $Id: iommu.c,v 1.9 1998/04/15 14:58:37 jj Exp $
+/* $Id: iommu.c,v 1.10 1999/05/07 17:03:34 jj Exp $
* iommu.c: IOMMU specific routines for memory management.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
@@ -51,8 +51,7 @@
unsigned long tmp;
struct iommu_struct *iommu;
struct linux_prom_registers iommu_promregs[PROMREG_MAX];
- int i, j, k, l, m;
- struct iommu_alloc { unsigned long addr; int next; } *ia;
+ int i;
iommu = kmalloc(sizeof(struct iommu_struct), GFP_ATOMIC);
prom_getproperty(iommund, "reg", (void *) iommu_promregs,
@@ -97,62 +96,18 @@
ptsize = (ptsize >> PAGE_SHIFT) * sizeof(iopte_t);
/* Stupid alignment constraints give me a headache.
- We want to get very large aligned memory area, larger than
- maximum what get_free_pages gives us (128K): we need
- 256K or 512K or 1M or 2M aligned to its size. */
- ia = (struct iommu_alloc *) kmalloc (sizeof(struct iommu_alloc) * 128, GFP_ATOMIC);
- for (i = 0; i < 128; i++) {
- ia[i].addr = 0;
- ia[i].next = -1;
- }
- k = 0;
- for (i = 0; i < 128; i++) {
- ia[i].addr = __get_free_pages(GFP_DMA, 5);
- if (ia[i].addr <= ia[k].addr) {
- if (i) {
- ia[i].next = k;
- k = i;
- }
- } else {
- for (m = k, l = ia[k].next; l != -1; m = l, l = ia[l].next)
- if (ia[i].addr <= ia[l].addr) {
- ia[i].next = l;
- ia[m].next = i;
- }
- if (l == -1)
- ia[m].next = i;
- }
- for (m = -1, j = 0, l = k; l != -1; l = ia[l].next) {
- if (!(ia[l].addr & (ptsize - 1))) {
- tmp = ia[l].addr;
- m = l;
- j = 128 * 1024;
- } else if (m != -1) {
- if (ia[l].addr != tmp + j)
- m = -1;
- else {
- j += 128 * 1024;
- if (j == ptsize) {
- break;
- }
- }
- }
- }
- if (l != -1)
+ We need 256K or 512K or 1M or 2M area aligned to
+ its size and current gfp will fortunately give
+ it to us. */
+ for (i = 6; i < 9; i++)
+ if ((1 << (i + PAGE_SHIFT)) == ptsize)
break;
- }
- if (i == 128) {
+ tmp = __get_free_pages(GFP_DMA, i);
+ if (!tmp) {
prom_printf("Could not allocate iopte of size 0x%08x\n", ptsize);
prom_halt();
}
- for (l = m, j = 0; j < ptsize; j += 128 * 1024, l = ia[l].next)
- ia[l].addr = 0;
- for (l = k; l != -1; l = ia[l].next)
- if (ia[l].addr)
- free_pages(ia[l].addr, 5);
- kfree (ia);
iommu->lowest = iommu->page_table = (iopte_t *)tmp;
-
/* Initialize new table. */
flush_cache_all();
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)