patch-2.4.20 linux-2.4.20/arch/x86_64/kernel/init_task.c

Next file: linux-2.4.20/arch/x86_64/kernel/io_apic.c
Previous file: linux-2.4.20/arch/x86_64/kernel/i8259.c
Back to the patch index
Back to the overall index

diff -urN linux-2.4.19/arch/x86_64/kernel/init_task.c linux-2.4.20/arch/x86_64/kernel/init_task.c
@@ -0,0 +1,38 @@
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/config.h>
+
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+#include <asm/desc.h>
+
+static struct fs_struct init_fs = INIT_FS;
+static struct files_struct init_files = INIT_FILES;
+static struct signal_struct init_signals = INIT_SIGNALS;
+struct mm_struct init_mm = INIT_MM(init_mm);
+
+/*
+ * Initial task structure.
+ *
+ * We need to make sure that this is 8192-byte aligned due to the
+ * way process stacks are handled. This is done by having a special
+ * "init_task" linker map entry..
+ */
+union task_union init_task_union 
+	__attribute__((__section__(".data.init_task"))) =
+		{ INIT_TASK(init_task_union.task) };
+
+/*
+ * per-CPU TSS segments. Threads are completely 'soft' on Linux,
+ * no more per-task TSS's. The TSS size is kept cacheline-aligned
+ * so they are allowed to end up in the .data.cacheline_aligned
+ * section. Since TSS's are completely CPU-local, we want them
+ * on exact cacheline boundaries, to eliminate cacheline ping-pong.
+ */ 
+struct tss_struct init_tss[NR_CPUS] __cacheline_aligned;
+
+
+#define ALIGN_TO_4K __attribute__((section(".data.init_task")))
+
+pgd_t boot_vmalloc_pgt[512]  ALIGN_TO_4K;

FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)