diff -urN linux-2.4.19-pre10-ac2/arch/i386/kernel/entry.S linux/arch/i386/kernel/entry.S
--- linux-2.4.19-pre10-ac2/arch/i386/kernel/entry.S	Thu Jun  6 11:15:44 2002
+++ linux/arch/i386/kernel/entry.S	Tue Jun 18 18:21:39 2002
@@ -639,8 +639,8 @@
  	.long SYMBOL_NAME(sys_tkill)
 	.long SYMBOL_NAME(sys_ni_syscall)	/* reserved for sendfile64 */
 	.long SYMBOL_NAME(sys_ni_syscall)	/* 240 reserved for futex */
-	.long SYMBOL_NAME(sys_ni_syscall)	/* reserved for sched_setaffinity */
-	.long SYMBOL_NAME(sys_ni_syscall)	/* reserved for sched_getaffinity */
+	.long SYMBOL_NAME(sys_sched_setaffinity)
+	.long SYMBOL_NAME(sys_sched_getaffinity)
 
 	.rept NR_syscalls-(.-sys_call_table)/4
 		.long SYMBOL_NAME(sys_ni_syscall)
diff -urN linux-2.4.19-pre10-ac2/arch/ppc/kernel/misc.S linux/arch/ppc/kernel/misc.S
--- linux-2.4.19-pre10-ac2/arch/ppc/kernel/misc.S	Thu Jun  6 08:55:09 2002
+++ linux/arch/ppc/kernel/misc.S	Tue Jun 18 18:21:39 2002
@@ -1162,6 +1162,22 @@
 	.long sys_mincore
 	.long sys_gettid
 	.long sys_tkill
+	.long sys_ni_syscall
+	.long sys_ni_syscall	/* 210 */
+	.long sys_ni_syscall
+	.long sys_ni_syscall
+	.long sys_ni_syscall
+	.long sys_ni_syscall
+	.long sys_ni_syscall	/* 215 */
+	.long sys_ni_syscall
+	.long sys_ni_syscall
+	.long sys_ni_syscall
+	.long sys_ni_syscall
+	.long sys_ni_syscall	/* 220 */
+	.long sys_ni_syscall
+	.long sys_sched_setaffinity
+	.long sys_sched_getaffinity
+
 	.rept NR_syscalls-(.-sys_call_table)/4
 		.long sys_ni_syscall
 	.endr
diff -urN linux-2.4.19-pre10-ac2/include/asm-ppc/unistd.h linux/include/asm-ppc/unistd.h
--- linux-2.4.19-pre10-ac2/include/asm-ppc/unistd.h	Thu Jun  6 08:54:12 2002
+++ linux/include/asm-ppc/unistd.h	Tue Jun 18 18:21:39 2002
@@ -216,6 +216,8 @@
 #define __NR_mincore		206
 #define __NR_gettid		207
 #define __NR_tkill		208
+#define __NR_sched_setaffinity	222
+#define __NR_sched_getaffinity	223
 
 #define __NR(n)	#n
 
diff -urN linux-2.4.19-pre10-ac2/include/linux/capability.h linux/include/linux/capability.h
--- linux-2.4.19-pre10-ac2/include/linux/capability.h	Thu Jun  6 08:54:06 2002
+++ linux/include/linux/capability.h	Tue Jun 18 18:21:39 2002
@@ -243,6 +243,7 @@
 /* Allow use of FIFO and round-robin (realtime) scheduling on own
    processes and setting the scheduling algorithm used by another
    process. */
+/* Allow setting cpu affinity on other processes */
 
 #define CAP_SYS_NICE         23
 
diff -urN linux-2.4.19-pre10-ac2/kernel/sched.c linux/kernel/sched.c
--- linux-2.4.19-pre10-ac2/kernel/sched.c	Thu Jun  6 11:16:04 2002
+++ linux/kernel/sched.c	Tue Jun 18 18:22:35 2002
@@ -1177,6 +1177,97 @@
 	return retval;
 }
 
+/**
+ * sys_sched_setaffinity - set the cpu affinity of a process
+ * @pid: pid of the process
+ * @len: length in bytes of the bitmask pointed to by user_mask_ptr
+ * @user_mask_ptr: user-space pointer to the new cpu mask
+ */
+asmlinkage int sys_sched_setaffinity(pid_t pid, unsigned int len,
+				     unsigned long *user_mask_ptr)
+{
+	unsigned long new_mask;
+	task_t *p;
+	int retval;
+
+	if (len < sizeof(new_mask))
+		return -EINVAL;
+
+	if (copy_from_user(&new_mask, user_mask_ptr, sizeof(new_mask)))
+		return -EFAULT;
+
+	new_mask &= cpu_online_map;
+	if (!new_mask)
+		return -EINVAL;
+
+	/*
+	 * We cannot hold a lock across a call to set_cpus_allowed, however
+	 * we need to assure our task does not slip out from under us.  Since
+	 * we are only concerned that its task_struct remains, we can pin it
+	 * here and decrement the usage count when we are done.
+	 */
+	read_lock(&tasklist_lock);
+
+	p = find_process_by_pid(pid);
+	if (!p) {
+		read_unlock(&tasklist_lock);
+		return -ESRCH;
+	}
+
+	get_task_struct(p);
+	read_unlock(&tasklist_lock);
+
+	retval = -EPERM;
+	if ((current->euid != p->euid) && (current->euid != p->uid) &&
+			!capable(CAP_SYS_NICE))
+		goto out_unlock;
+
+	retval = 0;
+	set_cpus_allowed(p, new_mask);
+
+out_unlock:
+	free_task_struct(p);
+	return retval;
+}
+
+/**
+ * sys_sched_getaffinity - get the cpu affinity of a process
+ * @pid: pid of the process
+ * @len: length in bytes of the bitmask pointed to by user_mask_ptr
+ * @user_mask_ptr: user-space pointer to hold the current cpu mask
+ */
+asmlinkage int sys_sched_getaffinity(pid_t pid, unsigned int len,
+				     unsigned long *user_mask_ptr)
+{
+	unsigned long mask;
+	unsigned int real_len;
+	task_t *p;
+	int retval;
+
+	real_len = sizeof(mask);
+
+	if (len < real_len)
+		return -EINVAL;
+
+	read_lock(&tasklist_lock);
+
+	retval = -ESRCH;
+	p = find_process_by_pid(pid);
+	if (!p)
+		goto out_unlock;
+
+	retval = 0;
+	mask = p->cpus_allowed & cpu_online_map;
+
+out_unlock:
+	read_unlock(&tasklist_lock);
+	if (retval)
+		return retval;
+	if (copy_to_user(user_mask_ptr, &mask, real_len))
+		return -EFAULT;
+	return real_len;
+}
+
 asmlinkage long sys_sched_yield(void)
 {
 	runqueue_t *rq;
