patch-2.4.20 linux-2.4.20/arch/ppc/kernel/idle.c

Next file: linux-2.4.20/arch/ppc/kernel/idle_6xx.S
Previous file: linux-2.4.20/arch/ppc/kernel/i8259.h
Back to the patch index
Back to the overall index

diff -urN linux-2.4.19/arch/ppc/kernel/idle.c linux-2.4.20/arch/ppc/kernel/idle.c
@@ -1,5 +1,5 @@
 /*
- * BK Id: SCCS/s.idle.c 1.20 03/19/02 15:04:39 benh
+ * BK Id: %F% %I% %G% %U% %#%
  */
 /*
  * Idle daemon for PowerPC.  Idle daemon will handle any action
@@ -33,23 +33,19 @@
 #include <asm/cache.h>
 #include <asm/cputable.h>
 
-void zero_paged(void);
-void power_save(void);
+unsigned long zero_paged_on;
+unsigned long powersave_nap;
+unsigned long powersave_lowspeed;
 
-unsigned long zero_paged_on = 0;
-unsigned long powersave_nap = 0;
-
-unsigned long *zero_cache;    /* head linked list of pre-zero'd pages */
-atomic_t zerototal;      /* # pages zero'd over time */
-atomic_t zeropage_hits;  /* # zero'd pages request that we've done */
-atomic_t zero_sz;	      /* # currently pre-zero'd pages */
-atomic_t zeropage_calls; /* # zero'd pages request that've been made */
+#ifdef CONFIG_6xx
+extern void power_save_6xx(void);
+#endif
 
 int idled(void)
 {
 	int do_power_save = 0;
 
-	/* Check if CPU can powersave */
+	/* Check if CPU can powersave (get rid of that soon!) */
 	if (cur_cpu_spec[smp_processor_id()]->cpu_features &
 		(CPU_FTR_CAN_DOZE | CPU_FTR_CAN_NAP))
 		do_power_save = 1;
@@ -73,8 +69,10 @@
 			}
 		}
 #endif
+#ifdef CONFIG_6xx
 		if (do_power_save && !current->need_resched)
-			power_save();
+			power_save_6xx();
+#endif /* CONFIG_6xx */			
 
 		if (current->need_resched) {
 			schedule();
@@ -93,191 +91,3 @@
 	idled();
 	return 0; 
 }
-
-#if 0
-/*
- * Returns a pre-zero'd page from the list otherwise returns
- * NULL.
- */
-unsigned long get_zero_page_fast(void)
-{
-	unsigned long page = 0;
-
-	atomic_inc(&zero_cache_calls);
-	if ( zero_quicklist )
-	{
-		/* atomically remove this page from the list */
-		register unsigned long tmp;
-		asm (	"101:lwarx  %1,0,%3\n"  /* reserve zero_cache */
-			"    lwz    %0,0(%1)\n" /* get next -- new zero_cache */
-			"    stwcx. %0,0,%3\n"  /* update zero_cache */
-			"    bne-   101b\n"     /* if lost reservation try again */
-			: "=&r" (tmp), "=&r" (page), "+m" (zero_cache)
-			: "r" (&zero_quicklist)
-			: "cc" );
-#ifdef CONFIG_SMP
-		/* if another cpu beat us above this can happen -- Cort */
-		if ( page == 0 ) 
-			return 0;
-#endif /* CONFIG_SMP */		
-		/* we can update zerocount after the fact since it is not
-		 * used for anything but control of a loop which doesn't
-		 * matter since it won't affect anything if it zeros one
-		 * less page -- Cort
-		 */
-		atomic_inc((atomic_t *)&zero_cache_hits);
-		atomic_dec((atomic_t *)&zero_cache_sz);
-		
-		/* zero out the pointer to next in the page */
-		*(unsigned long *)page = 0;
-		return page;
-	}
-	return 0;
-}
-
-/*
- * Experimental stuff to zero out pages in the idle task
- * to speed up get_free_pages(). Zero's out pages until
- * we've reached the limit of zero'd pages.  We handle
- * reschedule()'s in here so when we return we know we've
- * zero'd all we need to for now.
- */
-int zero_cache_water[2] = { 25, 96 }; /* high and low water marks for zero cache */
-void zero_paged(void)
-{
-	unsigned long pageptr = 0;	/* current page being zero'd */
-	unsigned long bytecount = 0;  
-        register unsigned long tmp;
-	pte_t *pte;
-
-	if ( atomic_read(&zero_cache_sz) >= zero_cache_water[0] )
-		return;
-	while ( (atomic_read(&zero_cache_sz) < zero_cache_water[1]) && (!current->need_resched) )
-	{
-		/*
-		 * Mark a page as reserved so we can mess with it
-		 * If we're interrupted we keep this page and our place in it
-		 * since we validly hold it and it's reserved for us.
-		 */
-		pageptr = __get_free_pages(GFP_ATOMIC, 0);
-		if ( !pageptr )
-			return;
-		
-		if ( current->need_resched )
-			schedule();
-		
-		/*
-		 * Make the page no cache so we don't blow our cache with 0's
-		 */
-		pte = find_pte(&init_mm, pageptr);
-		if ( !pte )
-		{
-			printk("pte NULL in zero_paged()\n");
-			return;
-		}
-		
-		pte_uncache(*pte);
-		flush_tlb_page(find_vma(&init_mm,pageptr),pageptr);
-		/*
-		 * Important here to not take time away from real processes.
-		 */
-		for ( bytecount = 0; bytecount < PAGE_SIZE ; bytecount += 4 )
-		{
-			if ( current->need_resched )
-				schedule();
-			*(unsigned long *)(bytecount + pageptr) = 0;
-		}
-		
-		/*
-		 * If we finished zero-ing out a page add this page to
-		 * the zero_cache atomically -- we can't use
-		 * down/up since we can't sleep in idle.
-		 * Disabling interrupts is also a bad idea since we would
-		 * steal time away from real processes.
-		 * We can also have several zero_paged's running
-		 * on different processors so we can't interfere with them.
-		 * So we update the list atomically without locking it.
-		 * -- Cort
-		 */
-		
-		/* turn cache on for this page */
-		pte_cache(*pte);
-		flush_tlb_page(find_vma(&init_mm,pageptr),pageptr);
-		/* atomically add this page to the list */
-		asm (	"101:lwarx  %0,0,%2\n"  /* reserve zero_cache */
-			"    stw    %0,0(%3)\n" /* update *pageptr */
-#ifdef CONFIG_SMP
-			"    sync\n"            /* let store settle */
-#endif			
-			"    stwcx. %3,0,%2\n"  /* update zero_cache in mem */
-			"    bne-   101b\n"     /* if lost reservation try again */
-			: "=&r" (tmp), "+m" (zero_quicklist)
-			: "r" (&zero_quicklist), "r" (pageptr)
-			: "cc" );
-		/*
-		 * This variable is used in the above loop and nowhere
-		 * else so the worst that could happen is we would
-		 * zero out one more or one less page than we want
-		 * per processor on the machine.  This is because
-		 * we could add our page to the list but not have
-		 * zerocount updated yet when another processor
-		 * reads it.  -- Cort
-		 */
-		atomic_inc((atomic_t *)&zero_cache_sz);
-		atomic_inc((atomic_t *)&zero_cache_total);
-	}
-}
-#endif /* 0 */
-
-#define DSSALL		.long	(0x1f<<26)+(0x10<<21)+(0x336<<1)
-
-void power_save(void)
-{
-	unsigned long hid0;
-	int nap = powersave_nap;
-	
-	/* 7450 has no DOZE mode mode, we return if powersave_nap
-	 * isn't enabled
-	 */
-	if (!(nap || (cur_cpu_spec[smp_processor_id()]->cpu_features & CPU_FTR_CAN_DOZE)))
-		return;
-	/*
-	 * Disable interrupts to prevent a lost wakeup
-	 * when going to sleep.  This is necessary even with
-	 * RTLinux since we are not guaranteed an interrupt
-	 * didn't come in and is waiting for a __sti() before
-	 * emulating one.  This way, we really do hard disable.
-	 * 
-	 * We assume that we're sti-ed when we come in here.  We
-	 * are in the idle loop so if we're cli-ed then it's a bug
-	 * anyway.
-	 *  -- Cort
-	 */
-	_nmask_and_or_msr(MSR_EE, 0);
-	if (!current->need_resched)
-	{
-		__asm__ __volatile__("mfspr %0,1008" : "=r" (hid0) :);
-		hid0 &= ~(HID0_NAP | HID0_SLEEP | HID0_DOZE);
-		hid0 |= (powersave_nap? HID0_NAP: HID0_DOZE) | HID0_DPM;
-		__asm__ __volatile__("mtspr 1008,%0" : : "r" (hid0));
-		/* Flush pending data streams, consider this instruction
-		 * exist on all altivec capable CPUs
-		 */
-		__asm__ __volatile__(
-			"98:	" stringify(DSSALL) "\n"
-			"	sync\n"
-			"99:\n"
-			".section __ftr_fixup,\"a\"\n"
-			"	.long %0\n"
-			"	.long %1\n"
-			"	.long 98b\n"
-			"	.long 99b\n"
-			".previous" : : "i" (CPU_FTR_ALTIVEC), "i" (CPU_FTR_ALTIVEC));
-		
-		/* set the POW bit in the MSR, and enable interrupts
-		 * so we wake up sometime! */
-		_nmask_and_or_msr(0, MSR_POW | MSR_EE);
-	}
-	_nmask_and_or_msr(0, MSR_EE);
-}
-

FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)