patch-2.3.48 linux/arch/mips/kernel/process.c

Next file: linux/arch/mips/kernel/ptrace.c
Previous file: linux/arch/mips/kernel/proc.c
Back to the patch index
Back to the overall index

diff -u --recursive --new-file v2.3.47/linux/arch/mips/kernel/process.c linux/arch/mips/kernel/process.c
@@ -1,10 +1,11 @@
-/* $Id: process.c,v 1.12 1999/06/17 13:25:46 ralf Exp $
+/* $Id: process.c,v 1.18 2000/01/29 01:41:59 ralf Exp $
  *
  * This file is subject to the terms and conditions of the GNU General Public
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
  *
- * Copyright (C) 1994 - 1998 by Ralf Baechle and others.
+ * Copyright (C) 1994 - 1999 by Ralf Baechle and others.
+ * Copyright (C) 1999 Silicon Graphics, Inc.
  */
 #include <linux/errno.h>
 #include <linux/sched.h>
@@ -28,23 +29,30 @@
 #include <asm/uaccess.h>
 #include <asm/io.h>
 #include <asm/elf.h>
+#include <asm/isadep.h>
 
-struct task_struct *last_task_used_math = NULL;
-
-asmlinkage void ret_from_sys_call(void);
-
-/*
- * Do necessary setup to start up a newly executed thread.
- */
-void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp)
+void cpu_idle(void)
 {
-	/* New thread looses kernel privileges. */
-	regs->cp0_status = (regs->cp0_status & ~(ST0_CU0|ST0_KSU)) | KSU_USER;
-	regs->cp0_epc = pc;
-	regs->regs[29] = sp;
-	current->tss.current_ds = USER_DS;
+	/* endless idle loop with no priority at all */
+	current->priority = 0;
+	current->counter = -100;
+	init_idle();
+
+	while (1) {
+		while (!current->need_resched)
+			if (wait_available)
+				__asm__(".set\tmips3\n\t"
+					"wait\n\t"
+					".set\tmips0");
+		schedule();
+		check_pgt_cache();
+	}
 }
 
+struct task_struct *last_task_used_math = NULL;
+
+asmlinkage void ret_from_fork(void);
+
 void exit_thread(void)
 {
 	/* Forget lazy fpu state */
@@ -65,21 +73,18 @@
 	}
 }
 
-void release_thread(struct task_struct *dead_task)
-{
-}
-
 int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
                  struct task_struct * p, struct pt_regs * regs)
 {
 	struct pt_regs * childregs;
 	long childksp;
+	extern void save_fp(void*);
 
 	childksp = (unsigned long)p + KERNEL_STACK_SIZE - 32;
 
 	if (last_task_used_math == current) {
 		set_cp0_status(ST0_CU1, ST0_CU1);
-		r4xx0_save_fp(p);
+		save_fp(p);
 	}
 	/* set up new TSS. */
 	childregs = (struct pt_regs *) childksp - 1;
@@ -98,22 +103,21 @@
 	if (childregs->cp0_status & ST0_CU0) {
 		childregs->regs[28] = (unsigned long) p;
 		childregs->regs[29] = childksp;
-		p->tss.current_ds = KERNEL_DS;
+		p->thread.current_ds = KERNEL_DS;
 	} else {
 		childregs->regs[29] = usp;
-		p->tss.current_ds = USER_DS;
+		p->thread.current_ds = USER_DS;
 	}
-	p->tss.reg29 = (unsigned long) childregs;
-	p->tss.reg31 = (unsigned long) ret_from_sys_call;
+	p->thread.reg29 = (unsigned long) childregs;
+	p->thread.reg31 = (unsigned long) ret_from_fork;
 
 	/*
 	 * New tasks loose permission to use the fpu. This accelerates context
 	 * switching for most programs since they don't use the fpu.
 	 */
-	p->tss.cp0_status = read_32bit_cp0_register(CP0_STATUS) &
-                            ~(ST0_CU3|ST0_CU2|ST0_CU1|ST0_KSU);
+	p->thread.cp0_status = read_32bit_cp0_register(CP0_STATUS) &
+                            ~(ST0_CU3|ST0_CU2|ST0_CU1|KU_MASK);
 	childregs->cp0_status &= ~(ST0_CU3|ST0_CU2|ST0_CU1);
-	p->mm->context = 0;
 
 	return 0;
 }
@@ -121,11 +125,11 @@
 /* Fill in the fpu structure for a core dump.. */
 int dump_fpu(struct pt_regs *regs, elf_fpregset_t *r)
 {
-	/* We actually store the FPU info in the task->tss
+	/* We actually store the FPU info in the task->thread
 	 * area.
 	 */
 	if(regs->cp0_status & ST0_CU1) {
-		memcpy(r, &current->tss.fpu, sizeof(current->tss.fpu));
+		memcpy(r, &current->thread.fpu, sizeof(current->thread.fpu));
 		return 1;
 	}
 	return 0; /* Task didn't use the fpu at all. */
@@ -143,7 +147,7 @@
 	dump->u_ssize =
 		(current->mm->start_stack - dump->start_stack + PAGE_SIZE - 1) >> PAGE_SHIFT;
 	memcpy(&dump->regs[0], regs, sizeof(struct pt_regs));
-	memcpy(&dump->regs[EF_SIZE/4], &current->tss.fpu, sizeof(current->tss.fpu));
+	memcpy(&dump->regs[EF_SIZE/4], &current->thread.fpu, sizeof(current->thread.fpu));
 }
 
 /*
@@ -195,15 +199,24 @@
 {
 	unsigned long schedule_frame;
 	unsigned long pc;
+
 	if (!p || p == current || p->state == TASK_RUNNING)
 		return 0;
-	/*
-	 * The same comment as on the Alpha applies here, too ...
-	 */
-	pc = thread_saved_pc(&p->tss);
-	if (pc >= (unsigned long) interruptible_sleep_on && pc < (unsigned long) add_timer) {
-		schedule_frame = ((unsigned long *)(long)p->tss.reg30)[16];
-		return (unsigned long)((unsigned long *)schedule_frame)[11];
+
+	pc = thread_saved_pc(&p->thread);
+	if (pc == (unsigned long) interruptible_sleep_on
+	    || pc == (unsigned long) sleep_on) {
+		schedule_frame = ((unsigned long *)p->thread.reg30)[9];
+		return ((unsigned long *)schedule_frame)[15];
+	}
+	if (pc == (unsigned long) interruptible_sleep_on_timeout
+	    || pc == (unsigned long) sleep_on_timeout) {
+		schedule_frame = ((unsigned long *)p->thread.reg30)[9];
+		return ((unsigned long *)schedule_frame)[16];
 	}
+	if (pc >= first_sched && pc < last_sched) {
+		printk(KERN_DEBUG "Bug in %s\n", __FUNCTION__);
+	}
+
 	return pc;
 }

FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)