--- /dev/null
+diff -Naur host/arch/i386/config.in host-ptrace/arch/i386/config.in
+--- host/arch/i386/config.in Fri Aug 9 15:57:14 2002
++++ host-ptrace/arch/i386/config.in Sun Nov 10 18:40:09 2002
+@@ -291,6 +291,8 @@
+ bool ' Use real mode APM BIOS call to power off' CONFIG_APM_REAL_MODE_POWER_OFF
+ fi
+
++bool '/proc/mm' CONFIG_PROC_MM
++
+ endmenu
+
+ source drivers/mtd/Config.in
+diff -Naur host/arch/i386/kernel/ldt.c host-ptrace/arch/i386/kernel/ldt.c
+--- host/arch/i386/kernel/ldt.c Fri Oct 26 00:01:41 2001
++++ host-ptrace/arch/i386/kernel/ldt.c Sun Nov 3 18:37:48 2002
+@@ -24,11 +24,12 @@
+ * assured by user-space anyway. Writes are atomic, to protect
+ * the security checks done on new descriptors.
+ */
+-static int read_ldt(void * ptr, unsigned long bytecount)
++static int read_ldt(struct task_struct *task, void * ptr,
++ unsigned long bytecount)
+ {
+ int err;
+ unsigned long size;
+- struct mm_struct * mm = current->mm;
++ struct mm_struct * mm = task->mm;
+
+ err = 0;
+ if (!mm->context.segments)
+@@ -64,9 +65,10 @@
+ return err;
+ }
+
+-static int write_ldt(void * ptr, unsigned long bytecount, int oldmode)
++static int write_ldt(struct task_struct *task, void * ptr,
++ unsigned long bytecount, int oldmode)
+ {
+- struct mm_struct * mm = current->mm;
++ struct mm_struct * mm = task->mm;
+ __u32 entry_1, entry_2, *lp;
+ int error;
+ struct modify_ldt_ldt_s ldt_info;
+@@ -148,23 +150,29 @@
+ return error;
+ }
+
+-asmlinkage int sys_modify_ldt(int func, void *ptr, unsigned long bytecount)
++int modify_ldt(struct task_struct *task, int func, void *ptr,
++ unsigned long bytecount)
+ {
+ int ret = -ENOSYS;
+
+ switch (func) {
+ case 0:
+- ret = read_ldt(ptr, bytecount);
++ ret = read_ldt(task, ptr, bytecount);
+ break;
+ case 1:
+- ret = write_ldt(ptr, bytecount, 1);
++ ret = write_ldt(task, ptr, bytecount, 1);
+ break;
+ case 2:
+ ret = read_default_ldt(ptr, bytecount);
+ break;
+ case 0x11:
+- ret = write_ldt(ptr, bytecount, 0);
++ ret = write_ldt(task, ptr, bytecount, 0);
+ break;
+ }
+ return ret;
++}
++
++asmlinkage int sys_modify_ldt(int func, void *ptr, unsigned long bytecount)
++{
++ return(modify_ldt(current, func, ptr, bytecount));
+ }
+diff -Naur host/arch/i386/kernel/process.c host-ptrace/arch/i386/kernel/process.c
+--- host/arch/i386/kernel/process.c Fri Aug 9 15:57:14 2002
++++ host-ptrace/arch/i386/kernel/process.c Wed Nov 6 22:12:45 2002
+@@ -551,13 +551,11 @@
+ * we do not have to muck with descriptors here, that is
+ * done in switch_mm() as needed.
+ */
+-void copy_segments(struct task_struct *p, struct mm_struct *new_mm)
++void mm_copy_segments(struct mm_struct *old_mm, struct mm_struct *new_mm)
+ {
+- struct mm_struct * old_mm;
+ void *old_ldt, *ldt;
+
+ ldt = NULL;
+- old_mm = current->mm;
+ if (old_mm && (old_ldt = old_mm->context.segments) != NULL) {
+ /*
+ * Completely new LDT, we initialize it from the parent:
+@@ -570,6 +568,16 @@
+ }
+ new_mm->context.segments = ldt;
+ new_mm->context.cpuvalid = ~0UL; /* valid on all CPU's - they can't have stale data */
++}
++
++void copy_segments(struct task_struct *p, struct mm_struct *new_mm)
++{
++ mm_copy_segments(current->mm, new_mm);
++}
++
++void copy_task_segments(struct task_struct *from, struct mm_struct *new_mm)
++{
++ mm_copy_segments(from->mm, new_mm);
+ }
+
+ /*
+diff -Naur host/arch/i386/kernel/ptrace.c host-ptrace/arch/i386/kernel/ptrace.c
+--- host/arch/i386/kernel/ptrace.c Fri Aug 9 15:57:14 2002
++++ host-ptrace/arch/i386/kernel/ptrace.c Mon Nov 11 19:03:38 2002
+@@ -147,6 +147,11 @@
+ put_stack_long(child, EFL_OFFSET, tmp);
+ }
+
++extern int modify_ldt(struct task_struct *task, int func, void *ptr,
++ unsigned long bytecount);
++
++extern struct mm_struct *proc_mm_get_mm(int fd);
++
+ asmlinkage int sys_ptrace(long request, long pid, long addr, long data)
+ {
+ struct task_struct *child;
+@@ -415,6 +420,53 @@
+ child->ptrace |= PT_TRACESYSGOOD;
+ else
+ child->ptrace &= ~PT_TRACESYSGOOD;
++ ret = 0;
++ break;
++ }
++
++ case PTRACE_FAULTINFO: {
++ struct ptrace_faultinfo fault;
++
++ fault = ((struct ptrace_faultinfo)
++ { .is_write = child->thread.error_code,
++ .addr = child->thread.cr2 });
++ ret = copy_to_user((unsigned long *) data, &fault,
++ sizeof(fault));
++ if(ret)
++ break;
++ break;
++ }
++ case PTRACE_SIGPENDING:
++ ret = copy_to_user((unsigned long *) data,
++ &child->pending.signal,
++ sizeof(child->pending.signal));
++ break;
++
++ case PTRACE_LDT: {
++ struct ptrace_ldt ldt;
++
++ if(copy_from_user(&ldt, (unsigned long *) data,
++ sizeof(ldt))){
++ ret = -EIO;
++ break;
++ }
++ ret = modify_ldt(child, ldt.func, ldt.ptr, ldt.bytecount);
++ break;
++ }
++
++ case PTRACE_SWITCH_MM: {
++ struct mm_struct *old = child->mm;
++ struct mm_struct *new = proc_mm_get_mm(data);
++
++ if(IS_ERR(new)){
++ ret = PTR_ERR(new);
++ break;
++ }
++
++ atomic_inc(&new->mm_users);
++ child->mm = new;
++ child->active_mm = new;
++ mmput(old);
+ ret = 0;
+ break;
+ }
+diff -Naur host/arch/i386/kernel/sys_i386.c host-ptrace/arch/i386/kernel/sys_i386.c
+--- host/arch/i386/kernel/sys_i386.c Mon Mar 19 15:35:09 2001
++++ host-ptrace/arch/i386/kernel/sys_i386.c Mon Nov 11 17:23:25 2002
+@@ -40,7 +40,7 @@
+ }
+
+ /* common code for old and new mmaps */
+-static inline long do_mmap2(
++long do_mmap2(struct mm_struct *mm,
+ unsigned long addr, unsigned long len,
+ unsigned long prot, unsigned long flags,
+ unsigned long fd, unsigned long pgoff)
+@@ -55,9 +55,9 @@
+ goto out;
+ }
+
+- down_write(¤t->mm->mmap_sem);
+- error = do_mmap(file, addr, len, prot, flags, pgoff << PAGE_SHIFT);
+- up_write(¤t->mm->mmap_sem);
++ down_write(&mm->mmap_sem);
++ error = do_mmap_pgoff(mm, file, addr, len, prot, flags, pgoff);
++ up_write(&mm->mmap_sem);
+
+ if (file)
+ fput(file);
+@@ -69,7 +69,7 @@
+ unsigned long prot, unsigned long flags,
+ unsigned long fd, unsigned long pgoff)
+ {
+- return do_mmap2(addr, len, prot, flags, fd, pgoff);
++ return do_mmap2(current->mm, addr, len, prot, flags, fd, pgoff);
+ }
+
+ /*
+@@ -100,7 +100,7 @@
+ if (a.offset & ~PAGE_MASK)
+ goto out;
+
+- err = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT);
++ err = do_mmap2(current->mm, a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT);
+ out:
+ return err;
+ }
+diff -Naur host/include/asm-i386/processor.h host-ptrace/include/asm-i386/processor.h
+--- host/include/asm-i386/processor.h Sun Nov 10 18:47:37 2002
++++ host-ptrace/include/asm-i386/processor.h Mon Nov 11 17:33:30 2002
+@@ -436,6 +436,8 @@
+ extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
+
+ /* Copy and release all segment info associated with a VM */
++extern void mm_copy_segments(struct mm_struct *old_mm,
++ struct mm_struct *new_mm);
+ extern void copy_segments(struct task_struct *p, struct mm_struct * mm);
+ extern void release_segments(struct mm_struct * mm);
+
+diff -Naur host/include/asm-i386/ptrace.h host-ptrace/include/asm-i386/ptrace.h
+--- host/include/asm-i386/ptrace.h Sun Sep 23 19:20:51 2001
++++ host-ptrace/include/asm-i386/ptrace.h Sun Nov 10 18:36:22 2002
+@@ -51,6 +51,22 @@
+
+ #define PTRACE_SETOPTIONS 21
+
++struct ptrace_faultinfo {
++ int is_write;
++ unsigned long addr;
++};
++
++struct ptrace_ldt {
++ int func;
++ void *ptr;
++ unsigned long bytecount;
++};
++
++#define PTRACE_FAULTINFO 52
++#define PTRACE_SIGPENDING 53
++#define PTRACE_LDT 54
++#define PTRACE_SWITCH_MM 55
++
+ /* options set using PTRACE_SETOPTIONS */
+ #define PTRACE_O_TRACESYSGOOD 0x00000001
+
+diff -Naur host/include/linux/mm.h host-ptrace/include/linux/mm.h
+--- host/include/linux/mm.h Fri Aug 30 15:03:44 2002
++++ host-ptrace/include/linux/mm.h Mon Nov 11 19:08:53 2002
+@@ -492,6 +492,9 @@
+ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start,
+ int len, int write, int force, struct page **pages, struct vm_area_struct **vmas);
+
++extern long do_mprotect(struct mm_struct *mm, unsigned long start,
++ size_t len, unsigned long prot);
++
+ /*
+ * On a two-level page table, this ends up being trivial. Thus the
+ * inlining and the symmetry break with pte_alloc() that does all
+@@ -539,9 +542,10 @@
+
+ extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
+
+-extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
+- unsigned long len, unsigned long prot,
+- unsigned long flag, unsigned long pgoff);
++extern unsigned long do_mmap_pgoff(struct mm_struct *mm,
++ struct file *file, unsigned long addr,
++ unsigned long len, unsigned long prot,
++ unsigned long flag, unsigned long pgoff);
+
+ static inline unsigned long do_mmap(struct file *file, unsigned long addr,
+ unsigned long len, unsigned long prot,
+@@ -551,7 +555,7 @@
+ if ((offset + PAGE_ALIGN(len)) < offset)
+ goto out;
+ if (!(offset & ~PAGE_MASK))
+- ret = do_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
++ ret = do_mmap_pgoff(current->mm, file, addr, len, prot, flag, offset >> PAGE_SHIFT);
+
+ #ifdef CONFIG_GRKERNSEC_PAX_SEGMEXEC
+ #define BAD_ADDR(x) ((unsigned long)(x) > TASK_SIZE)
+diff -Naur host/include/linux/proc_mm.h host-ptrace/include/linux/proc_mm.h
+--- host/include/linux/proc_mm.h Wed Dec 31 19:00:00 1969
++++ host-ptrace/include/linux/proc_mm.h Mon Nov 11 17:41:09 2002
+@@ -0,0 +1,44 @@
++/*
++ * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
++ * Licensed under the GPL
++ */
++
++#ifndef __PROC_MM_H
++#define __PROC_MM_H
++
++#define MM_MMAP 54
++#define MM_MUNMAP 55
++#define MM_MPROTECT 56
++#define MM_COPY_SEGMENTS 57
++
++struct mm_mmap {
++ unsigned long addr;
++ unsigned long len;
++ unsigned long prot;
++ unsigned long flags;
++ unsigned long fd;
++ unsigned long offset;
++};
++
++struct mm_munmap {
++ unsigned long addr;
++ unsigned long len;
++};
++
++struct mm_mprotect {
++ unsigned long addr;
++ unsigned long len;
++ unsigned int prot;
++};
++
++struct proc_mm_op {
++ int op;
++ union {
++ struct mm_mmap mmap;
++ struct mm_munmap munmap;
++ struct mm_mprotect mprotect;
++ int copy_segments;
++ } u;
++};
++
++#endif
+diff -Naur host/mm/Makefile host-ptrace/mm/Makefile
+--- host/mm/Makefile Fri Aug 9 15:57:31 2002
++++ host-ptrace/mm/Makefile Sun Nov 10 18:37:33 2002
+@@ -17,5 +17,6 @@
+ shmem.o
+
+ obj-$(CONFIG_HIGHMEM) += highmem.o
++obj-$(CONFIG_PROC_MM) += proc_mm.o
+
+ include $(TOPDIR)/Rules.make
+diff -Naur host/mm/mmap.c host-ptrace/mm/mmap.c
+--- host/mm/mmap.c Fri Aug 9 15:57:31 2002
++++ host-ptrace/mm/mmap.c Mon Nov 11 17:24:18 2002
+@@ -390,10 +390,11 @@
+ return 0;
+ }
+
+-unsigned long do_mmap_pgoff(struct file * file, unsigned long addr, unsigned long len,
+- unsigned long prot, unsigned long flags, unsigned long pgoff)
++unsigned long do_mmap_pgoff(struct mm_struct *mm, struct file * file,
++ unsigned long addr, unsigned long len,
++ unsigned long prot, unsigned long flags,
++ unsigned long pgoff)
+ {
+- struct mm_struct * mm = current->mm;
+ struct vm_area_struct * vma, * prev;
+ unsigned int vm_flags;
+ int correct_wcount = 0;
+diff -Naur host/mm/mprotect.c host-ptrace/mm/mprotect.c
+--- host/mm/mprotect.c Fri Aug 9 15:57:31 2002
++++ host-ptrace/mm/mprotect.c Mon Nov 11 17:47:58 2002
+@@ -264,7 +264,8 @@
+ return 0;
+ }
+
+-asmlinkage long sys_mprotect(unsigned long start, size_t len, unsigned long prot)
++long do_mprotect(struct mm_struct *mm, unsigned long start, size_t len,
++ unsigned long prot)
+ {
+ unsigned long nstart, end, tmp;
+ struct vm_area_struct * vma, * next, * prev;
+@@ -281,9 +282,9 @@
+ if (end == start)
+ return 0;
+
+- down_write(¤t->mm->mmap_sem);
++ down_write(&mm->mmap_sem);
+
+- vma = find_vma_prev(current->mm, start, &prev);
++ vma = find_vma_prev(mm, start, &prev);
+ error = -ENOMEM;
+ if (!vma || vma->vm_start > start)
+ goto out;
+@@ -332,6 +333,11 @@
+ }
+ out:
+
+- up_write(¤t->mm->mmap_sem);
++ up_write(&mm->mmap_sem);
+ return error;
++}
++
++asmlinkage long sys_mprotect(unsigned long start, size_t len, unsigned long prot)
++{
++ return(do_mprotect(current->mm, start, len, prot));
+ }
+diff -Naur host/mm/proc_mm.c host-ptrace/mm/proc_mm.c
+--- host/mm/proc_mm.c Wed Dec 31 19:00:00 1969
++++ host-ptrace/mm/proc_mm.c Mon Nov 11 19:07:52 2002
+@@ -0,0 +1,173 @@
++/*
++ * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
++ * Licensed under the GPL
++ */
++
++#include "linux/proc_fs.h"
++#include "linux/proc_mm.h"
++#include "linux/file.h"
++#include "asm/uaccess.h"
++#include "asm/mmu_context.h"
++
++static struct file_operations proc_mm_fops;
++
++struct mm_struct *proc_mm_get_mm(int fd)
++{
++ struct mm_struct *ret = ERR_PTR(-EBADF);
++ struct file *file;
++
++ file = fget(fd);
++ if (!file)
++ goto out;
++
++ ret = ERR_PTR(-EINVAL);
++ if(file->f_op != &proc_mm_fops)
++ goto out_fput;
++
++ ret = file->private_data;
++
++ out_fput:
++ fput(file);
++ out:
++ return(ret);
++}
++
++extern long do_mmap2(struct mm_struct *mm, unsigned long addr,
++ unsigned long len, unsigned long prot,
++ unsigned long flags, unsigned long fd,
++ unsigned long pgoff);
++
++static ssize_t write_proc_mm(struct file *file, const char *buffer,
++ size_t count, loff_t *ppos)
++{
++ struct mm_struct *mm = file->private_data;
++ struct proc_mm_op req;
++ int n, ret;
++
++ if(count > sizeof(req))
++ return(-EINVAL);
++
++ n = copy_from_user(&req, buffer, count);
++ if(n != 0)
++ return(-EFAULT);
++
++ ret = count;
++ switch(req.op){
++ case MM_MMAP: {
++ struct mm_mmap *map = &req.u.mmap;
++
++ ret = do_mmap2(mm, map->addr, map->len, map->prot,
++ map->flags, map->fd, map->offset >> PAGE_SHIFT);
++ if((ret & ~PAGE_MASK) == 0)
++ ret = count;
++
++ break;
++ }
++ case MM_MUNMAP: {
++ struct mm_munmap *unmap = &req.u.munmap;
++
++ down_write(&mm->mmap_sem);
++ ret = do_munmap(mm, unmap->addr, unmap->len);
++ up_write(&mm->mmap_sem);
++
++ if(ret == 0)
++ ret = count;
++ break;
++ }
++ case MM_MPROTECT: {
++ struct mm_mprotect *protect = &req.u.mprotect;
++
++ ret = do_mprotect(mm, protect->addr, protect->len,
++ protect->prot);
++ if(ret == 0)
++ ret = count;
++ break;
++ }
++
++ case MM_COPY_SEGMENTS: {
++ struct mm_struct *from = proc_mm_get_mm(req.u.copy_segments);
++
++ if(IS_ERR(from)){
++ ret = PTR_ERR(from);
++ break;
++ }
++
++ mm_copy_segments(from, mm);
++ break;
++ }
++ default:
++ ret = -EINVAL;
++ break;
++ }
++
++ return(ret);
++}
++
++static int open_proc_mm(struct inode *inode, struct file *file)
++{
++ struct mm_struct *mm = mm_alloc();
++ int ret;
++
++ ret = -ENOMEM;
++ if(mm == NULL)
++ goto out_mem;
++
++ ret = init_new_context(current, mm);
++ if(ret)
++ goto out_free;
++
++ spin_lock(&mmlist_lock);
++ list_add(&mm->mmlist, ¤t->mm->mmlist);
++ mmlist_nr++;
++ spin_unlock(&mmlist_lock);
++
++ file->private_data = mm;
++
++ return(0);
++
++ out_free:
++ mmput(mm);
++ out_mem:
++ return(ret);
++}
++
++static int release_proc_mm(struct inode *inode, struct file *file)
++{
++ struct mm_struct *mm = file->private_data;
++
++ mmput(mm);
++ return(0);
++}
++
++static struct file_operations proc_mm_fops = {
++ .open = open_proc_mm,
++ .release = release_proc_mm,
++ .write = write_proc_mm,
++};
++
++static int make_proc_mm(void)
++{
++ struct proc_dir_entry *ent;
++
++ ent = create_proc_entry("mm", 0222, &proc_root);
++ if(ent == NULL){
++ printk("make_proc_mm : Failed to register /proc/mm\n");
++ return(0);
++ }
++ ent->proc_fops = &proc_mm_fops;
++
++ return(0);
++}
++
++__initcall(make_proc_mm);
++
++/*
++ * Overrides for Emacs so that we follow Linus's tabbing style.
++ * Emacs will notice this stuff at the end of the file and automatically
++ * adjust the settings for this buffer only. This must remain at the end
++ * of the file.
++ * ---------------------------------------------------------------------------
++ * Local variables:
++ * c-file-style: "linux"
++ * End:
++ */