--- linux-2.4.25-paolo/arch/i386/kernel/ldt.c	2004-09-11 19:25:37.632440992 +0200
+++ linux-2.4.25-paolo/arch/i386/kernel/ldt.c	2004-08-29 17:21:52.276721288 +0200
@@ -18,6 +18,7 @@
 #include <asm/system.h>
 #include <asm/ldt.h>
 #include <asm/desc.h>
+#include <asm/mmu_context.h>
 
 #ifdef CONFIG_SMP /* avoids "defined but not used" warnig */
 static void flush_ldt(void *mm)
@@ -53,7 +54,7 @@
 	wmb();
 	pc->ldt = newldt;
 	pc->size = mincount;
-	if (reload) {
+	if (reload && (&current->active_mm->context == pc)) {
 		load_LDT(pc);
 #ifdef CONFIG_SMP
 		if (current->mm->cpu_vm_mask != (1<<smp_processor_id()))
@@ -86,6 +87,12 @@
  * we do not have to muck with descriptors here, that is
  * done in switch_mm() as needed.
  */
+void init_new_empty_context(struct mm_struct *mm)
+{
+	init_MUTEX(&mm->context.sem);
+	mm->context.size = 0;
+}
+
 int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
 {
 	struct mm_struct * old_mm;
@@ -102,6 +109,26 @@
 	return retval;
 }
 
+int copy_context(struct mm_struct *mm, struct mm_struct *old_mm)
+{
+	int err = 0;
+	mm_context_t *new, *old;
+	new = &mm->context;
+	old = &old_mm->context;
+	if (old_mm && old_mm->context.size > 0) {
+		down(&old_mm->context.sem);
+		err = alloc_ldt(new, old->size, 0);
+		if (err < 0) {
+			up(&old_mm->context.sem);
+			goto out;
+		}
+		memcpy(new->ldt, old->ldt, old->size*LDT_ENTRY_SIZE);
+		up(&old_mm->context.sem);
+	}
+out:
+	return err;
+}
+
 /*
  * No need to lock the MM as we are the last user
  * Do not touch the ldt register, we are already
@@ -118,11 +145,10 @@
 	}
 }
 
-static int read_ldt(void * ptr, unsigned long bytecount)
+static int read_ldt(struct mm_struct * mm, void * ptr, unsigned long bytecount)
 {
 	int err;
 	unsigned long size;
-	struct mm_struct * mm = current->mm;
 
 	if (!mm->context.size)
 		return 0;
@@ -166,9 +192,8 @@
 	return err;
 }
 
-static int write_ldt(void * ptr, unsigned long bytecount, int oldmode)
+static int write_ldt(struct mm_struct * mm, void * ptr, unsigned long bytecount, int oldmode)
 {
-	struct mm_struct * mm = current->mm;
 	__u32 entry_1, entry_2, *lp;
 	int error;
 	struct modify_ldt_ldt_s ldt_info;
@@ -192,7 +217,7 @@
 
 	down(&mm->context.sem);
 	if (ldt_info.entry_number >= mm->context.size) {
-		error = alloc_ldt(&current->mm->context, ldt_info.entry_number+1, 1);
+		error = alloc_ldt(&mm->context, ldt_info.entry_number+1, 1);
 		if (error < 0)
 			goto out_unlock;
 	}
@@ -243,20 +268,24 @@
-asmlinkage int sys_modify_ldt(int func, void *ptr, unsigned long bytecount)
+int modify_ldt(struct mm_struct * mm, int func, void *ptr, unsigned long bytecount)
 {
 	int ret = -ENOSYS;
 
 	switch (func) {
 	case 0:
-		ret = read_ldt(ptr, bytecount);
+		ret = read_ldt(mm, ptr, bytecount);
 		break;
 	case 1:
-		ret = write_ldt(ptr, bytecount, 1);
+		ret = write_ldt(mm, ptr, bytecount, 1);
 		break;
 	case 2:
 		ret = read_default_ldt(ptr, bytecount);
 		break;
 	case 0x11:
-		ret = write_ldt(ptr, bytecount, 0);
+		ret = write_ldt(mm, ptr, bytecount, 0);
 		break;
 	}
 	return ret;
 }
+
+asmlinkage int sys_modify_ldt(int func, void *ptr, unsigned long bytecount) {
+	return modify_ldt(current->mm, func, ptr, bytecount);
+}
--- linux-2.4.25-paolo/arch/i386/kernel/ptrace.c	2004-08-29 17:21:51.920775400 +0200
+++ linux-2.4.25-paolo/arch/i386/kernel/ptrace.c	2004-08-29 17:21:52.616669608 +0200
@@ -147,6 +147,11 @@
 	put_stack_long(child, EFL_OFFSET, tmp);
 }
 
+extern int modify_ldt(struct mm_struct *mm, int func, void *ptr,
+		      unsigned long bytecount);
+
+extern struct mm_struct *proc_mm_get_mm(int fd);
+
 asmlinkage int sys_ptrace(long request, long pid, long addr, long data)
 {
 	struct task_struct *child;
@@ -270,6 +275,7 @@
 		  }
 		  break;
 
+	case PTRACE_SYSEMU: /* continue and replace next syscall */
 	case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
 	case PTRACE_CONT: { /* restart after signal. */
 		long tmp;
@@ -277,6 +283,10 @@
 		ret = -EIO;
 		if ((unsigned long) data > _NSIG)
 			break;
+		if (request == PTRACE_SYSEMU)
+			child->ptrace |= PT_SYSEMU;
+		else
+			child->ptrace &= ~PT_SYSEMU;
 		if (request == PTRACE_SYSCALL)
 			child->ptrace |= PT_TRACESYS;
 		else
@@ -315,7 +325,7 @@
 		ret = -EIO;
 		if ((unsigned long) data > _NSIG)
 			break;
-		child->ptrace &= ~PT_TRACESYS;
+		child->ptrace &= ~(PT_TRACESYS|PT_SYSEMU);
 		if ((child->ptrace & PT_DTRACE) == 0) {
 			/* Spurious delayed TF traps may occur */
 			child->ptrace |= PT_DTRACE;
@@ -419,6 +429,53 @@
 		break;
 	}
 
+	case PTRACE_FAULTINFO: {
+		struct ptrace_faultinfo fault;
+
+		fault = ((struct ptrace_faultinfo)
+			{ .is_write	= child->thread.error_code,
+			  .addr		= child->thread.cr2 });
+		ret = copy_to_user((unsigned long *) data, &fault,
+				   sizeof(fault));
+		break;
+	}
+	case PTRACE_SIGPENDING:
+		ret = copy_to_user((unsigned long *) data,
+				   &child->pending.signal,
+				   sizeof(child->pending.signal));
+		break;
+
+	case PTRACE_LDT: {
+		struct ptrace_ldt ldt;
+
+		if(copy_from_user(&ldt, (unsigned long *) data,
+				  sizeof(ldt))){
+			ret = -EIO;
+			break;
+		}
+		ret = modify_ldt(child->mm, ldt.func, ldt.ptr, ldt.bytecount);
+		break;
+	}
+
+#if 1 /* CONFIG_PROC_MM*/
+	case PTRACE_SWITCH_MM: {
+		struct mm_struct *old = child->mm;
+		struct mm_struct *new = proc_mm_get_mm(data);
+
+		if(IS_ERR(new)){
+			ret = PTR_ERR(new);
+			break;
+		}
+
+		atomic_inc(&new->mm_users);
+		child->mm = new;
+		child->active_mm = new;
+		mmput(old);
+		ret = 0;
+		break;
+	}
+#endif
+
 	default:
 		ret = -EIO;
 		break;
@@ -430,6 +487,29 @@
 	return ret;
 }
 
+asmlinkage void syscall_emulate(void)
+{
+	if ((current->ptrace & (PT_PTRACED|PT_SYSEMU)) !=
+			(PT_PTRACED|PT_SYSEMU))
+		return;
+	/* the 0x80 provides a way for the tracing parent to distinguish
+	   between a syscall stop and SIGTRAP delivery */
+	current->exit_code = SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
+					? 0x80 : 0);
+	current->state = TASK_STOPPED;
+	notify_parent(current, SIGCHLD);
+	schedule();
+	/*
+	 * this isn't the same as continuing with a signal, but it will do
+	 * for normal use.  strace only continues with a signal if the
+	 * stopping signal is not SIGTRAP.  -brl
+	 */
+	if (current->exit_code) {
+		send_sig(current->exit_code, current, 1);
+		current->exit_code = 0;
+	}
+}
+
 asmlinkage void syscall_trace(void)
 {
 	if ((current->ptrace & (PT_PTRACED|PT_TRACESYS)) !=
--- linux-2.4.25/arch/i386/kernel/sys_i386.c~host-skas3-2.4.25-no_CONFIG	2004-08-29 17:21:51.100900040 +0200
+++ linux-2.4.25-paolo/arch/i386/kernel/sys_i386.c	2004-08-29 17:21:51.111898368 +0200
@@ -40,7 +40,7 @@ asmlinkage int sys_pipe(unsigned long * 
 }
 
 /* common code for old and new mmaps */
-static inline long do_mmap2(
+long do_mmap2(struct mm_struct *mm,
 	unsigned long addr, unsigned long len,
 	unsigned long prot, unsigned long flags,
 	unsigned long fd, unsigned long pgoff)
@@ -55,9 +55,9 @@ static inline long do_mmap2(
 			goto out;
 	}
 
-	down_write(&current->mm->mmap_sem);
-	error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
-	up_write(&current->mm->mmap_sem);
+	down_write(&mm->mmap_sem);
+	error = do_mmap_pgoff(mm, file, addr, len, prot, flags, pgoff);
+	up_write(&mm->mmap_sem);
 
 	if (file)
 		fput(file);
@@ -69,7 +69,7 @@ asmlinkage long sys_mmap2(unsigned long 
 	unsigned long prot, unsigned long flags,
 	unsigned long fd, unsigned long pgoff)
 {
-	return do_mmap2(addr, len, prot, flags, fd, pgoff);
+	return do_mmap2(current->mm, addr, len, prot, flags, fd, pgoff);
 }
 
 /*
@@ -100,7 +100,7 @@ asmlinkage int old_mmap(struct mmap_arg_
 	if (a.offset & ~PAGE_MASK)
 		goto out;
 
-	err = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT);
+	err = do_mmap2(current->mm, a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT);
 out:
 	return err;
 }
--- linux-2.4.25/include/asm-i386/processor.h~host-skas3-2.4.25-no_CONFIG	2004-08-29 17:21:51.101899888 +0200
+++ linux-2.4.25-paolo/include/asm-i386/processor.h	2004-08-29 17:21:51.111898368 +0200
@@ -433,6 +433,8 @@ extern int arch_kernel_thread(int (*fn)(
 static inline void copy_segments(struct task_struct *p, struct mm_struct * mm) { }
 static inline void release_segments(struct mm_struct * mm) { }
 
+extern int __init_new_context(struct mm_struct *old_mm, struct mm_struct *mm);
+
 /*
  * Return saved PC of a blocked thread.
  */
--- linux-2.4.25/include/asm-i386/ptrace.h~host-skas3-2.4.25-no_CONFIG	2004-08-29 17:21:51.102899736 +0200
+++ linux-2.4.25-paolo/include/asm-i386/ptrace.h	2004-08-29 17:21:51.111898368 +0200
@@ -51,6 +51,22 @@ struct pt_regs {
 
 #define PTRACE_SETOPTIONS         21
 
+struct ptrace_faultinfo {
+	int is_write;
+	unsigned long addr;
+};
+
+struct ptrace_ldt {
+	int func;
+  	void *ptr;
+	unsigned long bytecount;
+};
+
+#define PTRACE_FAULTINFO 52
+#define PTRACE_SIGPENDING 53
+#define PTRACE_LDT 54
+#define PTRACE_SWITCH_MM 55
+
 /* options set using PTRACE_SETOPTIONS */
 #define PTRACE_O_TRACESYSGOOD     0x00000001
 
--- linux-2.4.25/include/linux/mm.h~host-skas3-2.4.25-no_CONFIG	2004-08-29 17:21:51.103899584 +0200
+++ linux-2.4.25-paolo/include/linux/mm.h	2004-08-29 17:21:51.112898216 +0200
@@ -505,6 +505,9 @@ extern int ptrace_check_attach(struct ta
 int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start,
 		int len, int write, int force, struct page **pages, struct vm_area_struct **vmas);
 
+extern long do_mprotect(struct mm_struct *mm, unsigned long start,
+			size_t len, unsigned long prot);
+
 /*
  * On a two-level page table, this ends up being trivial. Thus the
  * inlining and the symmetry break with pte_alloc() that does all
@@ -552,9 +555,10 @@ extern void exit_mmap(struct mm_struct *
 
 extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
 
-extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
-	unsigned long len, unsigned long prot,
-	unsigned long flag, unsigned long pgoff);
+extern unsigned long do_mmap_pgoff(struct mm_struct *mm,
+				   struct file *file, unsigned long addr,
+				   unsigned long len, unsigned long prot,
+				   unsigned long flag, unsigned long pgoff);
 
 static inline unsigned long do_mmap(struct file *file, unsigned long addr,
 	unsigned long len, unsigned long prot,
@@ -564,7 +568,7 @@ static inline unsigned long do_mmap(stru
 	if ((offset + PAGE_ALIGN(len)) < offset)
 		goto out;
 	if (!(offset & ~PAGE_MASK))
-		ret = do_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
+		ret = do_mmap_pgoff(current->mm, file, addr, len, prot, flag, offset >> PAGE_SHIFT);
 out:
 	return ret;
 }
--- /dev/null	2004-06-25 17:47:25.000000000 +0200
+++ linux-2.4.25-paolo/include/linux/proc_mm.h	2004-08-29 17:21:51.112898216 +0200
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
+ * Licensed under the GPL
+ */
+
+#ifndef __PROC_MM_H
+#define __PROC_MM_H
+
+#define MM_MMAP 54
+#define MM_MUNMAP 55
+#define MM_MPROTECT 56
+#define MM_COPY_SEGMENTS 57
+
+struct mm_mmap {
+	unsigned long addr;
+	unsigned long len;
+	unsigned long prot;
+	unsigned long flags;
+	unsigned long fd;
+	unsigned long offset;
+};
+
+struct mm_munmap {
+	unsigned long addr;
+	unsigned long len;
+};
+
+struct mm_mprotect {
+	unsigned long addr;
+	unsigned long len;
+        unsigned int prot;
+};
+
+struct proc_mm_op {
+	int op;
+	union {
+		struct mm_mmap mmap;
+		struct mm_munmap munmap;
+	        struct mm_mprotect mprotect;
+		int copy_segments;
+	} u;
+};
+
+#endif
--- linux-2.4.25/mm/Makefile~host-skas3-2.4.25-no_CONFIG	2004-08-29 17:21:51.105899280 +0200
+++ linux-2.4.25-paolo/mm/Makefile	2004-08-29 17:21:51.112898216 +0200
@@ -17,5 +17,6 @@ obj-y	 := memory.o mmap.o filemap.o mpro
 	    shmem.o
 
 obj-$(CONFIG_HIGHMEM) += highmem.o
+obj-y += proc_mm.o
 
 include $(TOPDIR)/Rules.make
--- linux-2.4.25/mm/mmap.c~host-skas3-2.4.25-no_CONFIG	2004-08-29 17:21:51.106899128 +0200
+++ linux-2.4.25-paolo/mm/mmap.c	2004-08-29 17:21:51.112898216 +0200
@@ -391,10 +391,11 @@ static int vma_merge(struct mm_struct * 
 	return 0;
 }
 
-unsigned long do_mmap_pgoff(struct file * file, unsigned long addr, unsigned long len,
-	unsigned long prot, unsigned long flags, unsigned long pgoff)
+unsigned long do_mmap_pgoff(struct mm_struct *mm, struct file * file,
+			    unsigned long addr, unsigned long len,
+			    unsigned long prot, unsigned long flags,
+			    unsigned long pgoff)
 {
-	struct mm_struct * mm = current->mm;
 	struct vm_area_struct * vma, * prev;
 	unsigned int vm_flags;
 	int correct_wcount = 0;
--- linux-2.4.25-paolo/mm/mprotect.c	2004-08-29 17:21:51.113898064 +0200
+++ linux-2.4.25-paolo/mm/mprotect.c	2004-08-29 17:21:52.974615192 +0200
@@ -71,23 +71,24 @@
 	} while (address && (address < end));
 }
 
-static void change_protection(unsigned long start, unsigned long end, pgprot_t newprot)
+static void change_protection(struct mm_struct *mm,
+	unsigned long start, unsigned long end, pgprot_t newprot)
 {
 	pgd_t *dir;
 	unsigned long beg = start;
 
-	dir = pgd_offset(current->mm, start);
-	flush_cache_range(current->mm, beg, end);
+	dir = pgd_offset(mm, start);
+	flush_cache_range(mm, beg, end);
 	if (start >= end)
 		BUG();
-	spin_lock(&current->mm->page_table_lock);
+	spin_lock(&mm->page_table_lock);
 	do {
 		change_pmd_range(dir, start, end - start, newprot);
 		start = (start + PGDIR_SIZE) & PGDIR_MASK;
 		dir++;
 	} while (start && (start < end));
-	spin_unlock(&current->mm->page_table_lock);
-	flush_tlb_range(current->mm, beg, end);
+	spin_unlock(&mm->page_table_lock);
+	flush_tlb_range(mm, beg, end);
 	return;
 }
 
@@ -125,6 +126,7 @@
 	int newflags, pgprot_t prot)
 {
 	struct vm_area_struct * n, * prev = *pprev;
+	struct mm_struct * mm = vma->vm_mm;
 
 	*pprev = vma;
 
@@ -153,7 +155,7 @@
 	lock_vma_mappings(vma);
 	spin_lock(&vma->vm_mm->page_table_lock);
 	vma->vm_start = end;
-	__insert_vm_struct(current->mm, n);
+	__insert_vm_struct(mm, n);
 	spin_unlock(&vma->vm_mm->page_table_lock);
 	unlock_vma_mappings(vma);
 
@@ -165,6 +167,7 @@
 	int newflags, pgprot_t prot)
 {
 	struct vm_area_struct * n;
+	struct mm_struct * mm = vma->vm_mm;
 
 	n = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
 	if (!n)
@@ -182,7 +185,7 @@
 	lock_vma_mappings(vma);
 	spin_lock(&vma->vm_mm->page_table_lock);
 	vma->vm_end = start;
-	__insert_vm_struct(current->mm, n);
+	__insert_vm_struct(mm, n);
 	spin_unlock(&vma->vm_mm->page_table_lock);
 	unlock_vma_mappings(vma);
 
@@ -196,6 +199,7 @@
 	int newflags, pgprot_t prot)
 {
 	struct vm_area_struct * left, * right;
+	struct mm_struct * mm = vma->vm_mm;
 
 	left = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
 	if (!left)
@@ -226,8 +230,8 @@
 	vma->vm_start = start;
 	vma->vm_end = end;
 	vma->vm_flags = newflags;
-	__insert_vm_struct(current->mm, left);
-	__insert_vm_struct(current->mm, right);
+	__insert_vm_struct(mm, left);
+	__insert_vm_struct(mm, right);
 	spin_unlock(&vma->vm_mm->page_table_lock);
 	unlock_vma_mappings(vma);
 
@@ -241,6 +245,7 @@
 {
 	pgprot_t newprot;
 	int error;
+	struct mm_struct * mm = vma->vm_mm;
 
 	if (newflags == vma->vm_flags) {
 		*pprev = vma;
@@ -260,11 +265,12 @@
 	if (error)
 		return error;
 
-	change_protection(start, end, newprot);
+	change_protection(mm, start, end, newprot);
 	return 0;
 }
 
-asmlinkage long sys_mprotect(unsigned long start, size_t len, unsigned long prot)
+long do_mprotect(struct mm_struct *mm, unsigned long start, size_t len,
+		 unsigned long prot)
 {
 	unsigned long nstart, end, tmp;
 	struct vm_area_struct * vma, * next, * prev;
@@ -281,9 +287,9 @@
 	if (end == start)
 		return 0;
 
-	down_write(&current->mm->mmap_sem);
+	down_write(&mm->mmap_sem);
 
-	vma = find_vma_prev(current->mm, start, &prev);
+	vma = find_vma_prev(mm, start, &prev);
 	error = -ENOMEM;
 	if (!vma || vma->vm_start > start)
 		goto out;
@@ -335,3 +341,8 @@
-	up_write(&current->mm->mmap_sem);
+	up_write(&mm->mmap_sem);
 	return error;
 }
+
+asmlinkage long sys_mprotect(unsigned long start, size_t len, unsigned long prot)
+{
+	return(do_mprotect(current->mm, start, len, prot));
+}
--- linux-2.4.25-paolo/mm/proc_mm.c	2004-08-29 17:21:52.277721136 +0200
+++ linux-2.4.25-paolo/mm/proc_mm.c	2004-08-29 17:21:52.974615192 +0200
@@ -0,0 +1,181 @@
+/*
+ * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
+ * Licensed under the GPL
+ */
+
+#include "linux/proc_fs.h"
+#include "linux/proc_mm.h"
+#include "linux/file.h"
+#include "linux/mman.h"
+#include "asm/uaccess.h"
+#include "asm/mmu_context.h"
+
+static struct file_operations proc_mm_fops;
+
+struct mm_struct *proc_mm_get_mm(int fd)
+{
+	struct mm_struct *ret = ERR_PTR(-EBADF);
+	struct file *file;
+
+	file = fget(fd);
+	if (!file)
+		goto out;
+
+	ret = ERR_PTR(-EINVAL);
+	if(file->f_op != &proc_mm_fops)
+		goto out_fput;
+
+	ret = file->private_data;
+
+ out_fput:
+	fput(file);
+ out:
+	return(ret);
+}
+
+extern long do_mmap2(struct mm_struct *mm, unsigned long addr,
+		     unsigned long len, unsigned long prot,
+		     unsigned long flags, unsigned long fd,
+		     unsigned long pgoff);
+
+static ssize_t write_proc_mm(struct file *file, const char *buffer,
+			     size_t count, loff_t *ppos)
+{
+	struct mm_struct *mm = file->private_data;
+	struct proc_mm_op req;
+	int n, ret;
+
+	if(count > sizeof(req))
+		return(-EINVAL);
+
+	n = copy_from_user(&req, buffer, count);
+	if(n != 0)
+		return(-EFAULT);
+
+	ret = count;
+	switch(req.op){
+	case MM_MMAP: {
+		struct mm_mmap *map = &req.u.mmap;
+
+		/* Nobody ever noticed it, but do_mmap_pgoff() calls
+		 * get_unmapped_area() which checks current->mm, if
+		 * MAP_FIXED is not set, so mmap() could replace
+		 * an old mapping.
+		 */
+		if (! (map->flags & MAP_FIXED))
+			return(-EINVAL);
+
+		ret = do_mmap2(mm, map->addr, map->len, map->prot,
+			       map->flags, map->fd, map->offset >> PAGE_SHIFT);
+		if((ret & ~PAGE_MASK) == 0)
+			ret = count;
+
+		break;
+	}
+	case MM_MUNMAP: {
+		struct mm_munmap *unmap = &req.u.munmap;
+
+		down_write(&mm->mmap_sem);
+		ret = do_munmap(mm, unmap->addr, unmap->len);
+		up_write(&mm->mmap_sem);
+
+		if(ret == 0)
+			ret = count;
+		break;
+	}
+	case MM_MPROTECT: {
+		struct mm_mprotect *protect = &req.u.mprotect;
+
+		ret = do_mprotect(mm, protect->addr, protect->len,
+				  protect->prot);
+		if(ret == 0)
+			ret = count;
+		break;
+	}
+
+	case MM_COPY_SEGMENTS: {
+		struct mm_struct *from = proc_mm_get_mm(req.u.copy_segments);
+
+		if(IS_ERR(from)){
+			ret = PTR_ERR(from);
+			break;
+		}
+
+		/*mm_copy_segments(from, mm);*/
+		ret = copy_context(mm, from);
+		if(ret == 0)
+			ret = count;
+		break;
+	}
+	default:
+		ret = -EINVAL;
+		break;
+	}
+
+	return(ret);
+}
+
+static int open_proc_mm(struct inode *inode, struct file *file)
+{
+	struct mm_struct *mm = mm_alloc();
+	int ret;
+
+	ret = -ENOMEM;
+	if(mm == NULL)
+		goto out_mem;
+
+	init_new_empty_context(mm);
+
+	spin_lock(&mmlist_lock);
+	list_add(&mm->mmlist, &current->mm->mmlist);
+	mmlist_nr++;
+	spin_unlock(&mmlist_lock);
+
+	file->private_data = mm;
+
+	return(0);
+
+ out_mem:
+	return(ret);
+}
+
+static int release_proc_mm(struct inode *inode, struct file *file)
+{
+	struct mm_struct *mm = file->private_data;
+
+	mmput(mm);
+	return(0);
+}
+
+static struct file_operations proc_mm_fops = {
+	.open		= open_proc_mm,
+	.release	= release_proc_mm,
+	.write		= write_proc_mm,
+};
+
+static int make_proc_mm(void)
+{
+	struct proc_dir_entry *ent;
+
+	ent = create_proc_entry("mm", 0222, &proc_root);
+	if(ent == NULL){
+		printk("make_proc_mm : Failed to register /proc/mm\n");
+		return(0);
+	}
+	ent->proc_fops = &proc_mm_fops;
+
+	return(0);
+}
+
+__initcall(make_proc_mm);
+
+/*
+ * Overrides for Emacs so that we follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only.  This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-file-style: "linux"
+ * End:
+ */
--- linux-2.4.25-paolo/arch/i386/kernel/entry.S	2004-08-29 17:21:51.920775400 +0200
+++ linux-2.4.25-paolo/arch/i386/kernel/entry.S	2004-08-29 17:21:52.616669608 +0200
@@ -203,6 +203,8 @@
 	pushl %eax			# save orig_eax
 	SAVE_ALL
 	GET_CURRENT(%ebx)
+	testb $0x20,tsk_ptrace(%ebx)	# PT_SYSEMU
+	jne emulatesys
 	testb $0x02,tsk_ptrace(%ebx)	# PT_TRACESYS
 	jne tracesys
 	cmpl $(NR_syscalls),%eax
@@ -237,6 +239,10 @@
 	jmp restore_all
 
 	ALIGN
+emulatesys:
+	call SYMBOL_NAME(syscall_emulate)
+	jmp ret_from_sys_call
+	ALIGN
 tracesys:
 	movl $-ENOSYS,EAX(%esp)
 	call SYMBOL_NAME(syscall_trace)
--- linux-2.4.25-paolo/include/linux/ptrace.h	2004-08-29 17:21:51.920775400 +0200
+++ linux-2.4.25-paolo/include/linux/ptrace.h	2004-08-29 17:21:52.616669608 +0200
@@ -20,6 +20,7 @@
 #define PTRACE_DETACH		0x11
 
 #define PTRACE_SYSCALL		  24
+#define PTRACE_SYSEMU		  31
 
 #include <asm/ptrace.h>
 
--- linux-2.4.25-paolo/include/linux/sched.h	2004-08-29 17:21:51.921775248 +0200
+++ linux-2.4.25-paolo/include/linux/sched.h	2004-08-29 17:21:52.617669456 +0200
@@ -445,6 +445,7 @@
 #define PT_DTRACE	0x00000004	/* delayed trace (used on m68k, i386) */
 #define PT_TRACESYSGOOD	0x00000008
 #define PT_PTRACE_CAP	0x00000010	/* ptracer can follow suid-exec */
+#define PT_SYSEMU	0x00000020	/* syscall emulation for UML */
 
 #define is_dumpable(tsk)    ((tsk)->task_dumpable && (tsk)->mm && (tsk)->mm->dumpable)
 
--- linux-2.4.25/include/asm-i386/mmu_context.h~skas-avoid-ldt	2004-08-29 17:21:52.273721744 +0200
+++ linux-2.4.25-paolo/include/asm-i386/mmu_context.h	2004-08-29 17:21:52.276721288 +0200
@@ -13,6 +13,8 @@
  */
 int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
 void destroy_context(struct mm_struct *mm);
+int copy_context(struct mm_struct *mm, struct mm_struct *old_mm);
+void init_new_empty_context(struct mm_struct *mm);
 
 #ifdef CONFIG_SMP
 
