diff -Nru linux-2.5.0.vanilla/Makefile linux-2.5.0.lats/Makefile
--- linux-2.5.0.vanilla/Makefile	Thu Nov 22 22:23:44 2001
+++ linux-2.5.0.lats/Makefile	Tue Nov 27 10:34:53 2001
@@ -1,7 +1,7 @@
 VERSION = 2
 PATCHLEVEL = 5
 SUBLEVEL = 0
-EXTRAVERSION =
+EXTRAVERSION = lats
 
 KERNELRELEASE=$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION)
 
diff -Nru linux-2.5.0.vanilla/drivers/char/Makefile linux-2.5.0.lats/drivers/char/Makefile
--- linux-2.5.0.vanilla/drivers/char/Makefile	Sun Nov 11 10:09:32 2001
+++ linux-2.5.0.lats/drivers/char/Makefile	Tue Nov 27 10:34:22 2001
@@ -16,7 +16,7 @@
 
 O_TARGET := char.o
 
-obj-y	 += mem.o tty_io.o n_tty.o tty_ioctl.o raw.o pty.o misc.o random.o
+obj-y	 += mem.o tty_io.o n_tty.o tty_ioctl.o raw.o pty.o misc.o random.o latsched.o
 
 # All of the (potential) objects that export symbols.
 # This list comes from 'grep -l EXPORT_SYMBOL *.[hc]'.
diff -Nru linux-2.5.0.vanilla/drivers/char/latsched.c linux-2.5.0.lats/drivers/char/latsched.c
--- linux-2.5.0.vanilla/drivers/char/latsched.c	Wed Dec 31 16:00:00 1969
+++ linux-2.5.0.lats/drivers/char/latsched.c	Tue Nov 27 10:34:22 2001
@@ -0,0 +1,177 @@
+/*
+ *  linux/kernel/latsched.c
+ *
+ *  Kernel scheduler latency tester
+ *
+ *  Copyright (C) 2001, Davide Libenzi <davidel@xmailserver.org>
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/signal.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+#include <linux/poll.h>
+#include <linux/miscdevice.h>
+#include <linux/random.h>
+#include <linux/smp_lock.h>
+#include <linux/wrapper.h>
+#include <linux/string.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+#include <asm/bitops.h>
+#include <asm/uaccess.h>
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/atomic.h>
+
+#include <linux/latsched.h>
+
+
+
+
+
+#define DEBUG	0
+#ifdef DEBUG
+#define DPRINTK(x)	printk x
+#define DNPRINTK(n,x)	if (n <= DEBUG) printk x
+#else
+#define DPRINTK(x)
+#define DNPRINTK(n,x)
+#endif
+
+
+
+struct latsched {
+
+};
+
+
+
+static int open_latsched(struct inode *inode, struct file *file);
+static int close_latsched(struct inode *inode, struct file *file);
+static int ioctl_latsched(struct inode *inode, struct file *file,
+		unsigned int cmd, unsigned long arg);
+
+
+static struct file_operations latsched_fops = {
+	ioctl: ioctl_latsched,
+	open: open_latsched,
+	release: close_latsched
+};
+
+static struct miscdevice latsched = {
+	LATSCHED_MINOR, "latsched", &latsched_fops
+};
+
+
+
+
+
+
+
+static int open_latsched(struct inode *inode, struct file *file)
+{
+	int res;
+	struct latsched *ls;
+
+	if (!(ls = kmalloc(sizeof(struct latsched), GFP_KERNEL)))
+		return -ENOMEM;
+
+	memset(ls, 0, sizeof(*ls));
+
+
+	file->private_data = ls;
+
+	MOD_INC_USE_COUNT;
+
+	DNPRINTK(3, (KERN_INFO "[%p] /dev/latsched: open() ls=%p\n", current, ls));
+	return 0;
+}
+
+
+static int close_latsched(struct inode *inode, struct file *file)
+{
+	struct latsched *ls = (struct latsched *) file->private_data;
+
+	kfree(ls);
+
+	MOD_DEC_USE_COUNT;
+
+	DNPRINTK(3, (KERN_INFO "[%p] /dev/latsched: close() ls=%p\n", current, ls));
+	return 0;
+}
+
+
+static int ioctl_latsched(struct inode *inode, struct file *file,
+		unsigned int cmd, unsigned long arg)
+{
+	int res;
+	struct latsched *ls = (struct latsched *) file->private_data;
+	struct lsctl_getdata lsgd;
+
+	switch (cmd) {
+	case LS_START:
+		res = latsched_start(1);
+
+		DNPRINTK(3, (KERN_INFO "[%p] /dev/latsched: ioctl(%p, LS_START) == %d\n",
+				current, ls, res));
+		return res;
+
+	case LS_STOP:
+		res = latsched_start(0);
+
+		DNPRINTK(3, (KERN_INFO "[%p] /dev/latsched: ioctl(%p, LS_STOP) == %d\n",
+				current, ls, res));
+		return res;
+
+	case LS_FETCH:
+		if ((res = verify_area(VERIFY_WRITE, (void *) arg, sizeof(struct lsctl_getdata))))
+			return res;
+		__copy_from_user(&lsgd, (void *) arg, sizeof(struct lsctl_getdata));
+		if ((res = verify_area(VERIFY_WRITE, (void *) lsgd.data, lsgd.size * sizeof(struct latsched_sample))))
+			return res;
+
+		if (!(res = latsched_getdata(&lsgd)))
+			__copy_to_user((void *) arg, &lsgd, sizeof(struct lsctl_getdata));
+
+		DNPRINTK(3, (KERN_INFO "[%p] /dev/latsched: ioctl(%p, LS_FETCH, %d) == %d\n",
+				current, ls, lsgd.cpu, res));
+		return res;
+
+	case LS_SAMPLES:
+		res = latsched_setsamples((int) arg);
+
+		DNPRINTK(3, (KERN_INFO "[%p] /dev/latsched: ioctl(%p, LS_SAMPLES, %lu) == %d\n",
+				current, ls, arg, res));
+		return res;
+	}
+
+	return -EINVAL;
+}
+
+
+
+
+int __init init_latsched(void)
+{
+
+	misc_register(&latsched);
+
+	printk(KERN_INFO "[%p] /dev/latsched: driver installed.\n", current);
+
+	return 0;
+}
+
+
+module_init(init_latsched);
+
diff -Nru linux-2.5.0.vanilla/include/linux/latsched.h linux-2.5.0.lats/include/linux/latsched.h
--- linux-2.5.0.vanilla/include/linux/latsched.h	Wed Dec 31 16:00:00 1969
+++ linux-2.5.0.lats/include/linux/latsched.h	Tue Nov 27 10:34:22 2001
@@ -0,0 +1,41 @@
+/*
+ *  linux/include/linux/latsched.h
+ *
+ *  Kernel scheduler latency tester
+ *
+ *  Copyright (C) 2001, Davide Libenzi <davidel@xmailserver.org>
+ *
+ */
+
+#ifndef _LINUX_LATSCHED_H
+#define _LINUX_LATSCHED_H
+
+#include <asm/timex.h>
+
+#define LATSCHED_MINOR	117
+#define STD_LATSCHED_SAMPLES	1024
+
+struct latsched_sample {
+	cycles_t lss_in, lss_out;
+	pid_t lss_pid;
+};
+struct latsched_data {
+	struct latsched_sample *ls_data;
+	int ls_size;
+	int ls_curr;
+};
+struct lsctl_getdata {
+	int cpu;
+	int size;
+	struct latsched_sample *data;
+	int rsize;
+};
+
+#define LS_START	_IO('P', 1)
+#define LS_STOP		_IO('P', 2)
+#define LS_FETCH	_IOWR('P', 3, struct lsctl_getdata)
+#define LS_SAMPLES	_IOR('P', 4, int)
+
+
+#endif	/* #ifndef _LINUX_LATSCHED_H */
+
diff -Nru linux-2.5.0.vanilla/include/linux/sched.h linux-2.5.0.lats/include/linux/sched.h
--- linux-2.5.0.vanilla/include/linux/sched.h	Thu Nov 22 11:46:19 2001
+++ linux-2.5.0.lats/include/linux/sched.h	Tue Nov 27 10:34:22 2001
@@ -26,6 +26,7 @@
 #include <linux/signal.h>
 #include <linux/securebits.h>
 #include <linux/fs_struct.h>
+#include <linux/latsched.h>
 
 struct exec_domain;
 
@@ -142,6 +143,10 @@
 extern spinlock_t runqueue_lock;
 extern spinlock_t mmlist_lock;
 
+extern void latsched_init(void);
+extern int latsched_start(int on);
+extern int latsched_setsamples(int nsamps);
+extern int latsched_getdata(struct lsctl_getdata *lsgd);
 extern void sched_init(void);
 extern void init_idle(void);
 extern void show_state(void);
diff -Nru linux-2.5.0.vanilla/init/main.c linux-2.5.0.lats/init/main.c
--- linux-2.5.0.vanilla/init/main.c	Fri Nov  9 14:15:00 2001
+++ linux-2.5.0.lats/init/main.c	Tue Nov 27 10:34:22 2001
@@ -617,6 +617,7 @@
 	 *	make syscalls (and thus be locked).
 	 */
 	smp_init();
+	latsched_init();
 	rest_init();
 }
 
diff -Nru linux-2.5.0.vanilla/kernel/sched.c linux-2.5.0.lats/kernel/sched.c
--- linux-2.5.0.vanilla/kernel/sched.c	Wed Nov 21 16:25:48 2001
+++ linux-2.5.0.lats/kernel/sched.c	Tue Nov 27 10:36:18 2001
@@ -29,6 +29,8 @@
 #include <linux/completion.h>
 #include <linux/prefetch.h>
 #include <linux/compiler.h>
+#include <linux/slab.h>
+#include <linux/latsched.h>
 
 #include <asm/uaccess.h>
 #include <asm/mmu_context.h>
@@ -102,12 +104,33 @@
 	struct schedule_data {
 		struct task_struct * curr;
 		cycles_t last_schedule;
+		struct latsched_data ls;
 	} schedule_data;
 	char __pad [SMP_CACHE_BYTES];
 } aligned_data [NR_CPUS] __cacheline_aligned = { {{&init_task,0}}};
 
+#ifdef CONFIG_SMP
+
 #define cpu_curr(cpu) aligned_data[(cpu)].schedule_data.curr
 #define last_schedule(cpu) aligned_data[(cpu)].schedule_data.last_schedule
+#define latsched_data(cpu) aligned_data[(cpu)].schedule_data.ls.ls_data
+#define latsched_samp(cpu, idx) aligned_data[(cpu)].schedule_data.ls.ls_data[(idx)]
+#define latsched_size(cpu) aligned_data[(cpu)].schedule_data.ls.ls_size
+#define latsched_curr(cpu) aligned_data[(cpu)].schedule_data.ls.ls_curr
+
+#else	/* #ifdef CONFIG_SMP */
+
+#define cpu_curr(cpu) aligned_data[0].schedule_data.curr
+#define last_schedule(cpu) aligned_data[0].schedule_data.last_schedule
+#define latsched_data(cpu) aligned_data[0].schedule_data.ls.ls_data
+#define latsched_samp(cpu, idx) aligned_data[0].schedule_data.ls.ls_data[(idx)]
+#define latsched_size(cpu) aligned_data[0].schedule_data.ls.ls_size
+#define latsched_curr(cpu) aligned_data[0].schedule_data.ls.ls_curr
+
+#endif	/* #ifdef CONFIG_SMP */
+
+
+static atomic_t lss_enabled = ATOMIC_INIT(0);
 
 struct kernel_stat kstat;
 extern struct task_struct *child_reaper;
@@ -531,7 +554,7 @@
  * tasks can run. It can not be killed, and it cannot sleep. The 'state'
  * information in task[0] is never used.
  */
-asmlinkage void schedule(void)
+static inline void __schedule(void)
 {
 	struct schedule_data * sched_data;
 	struct task_struct *prev, *next, *p;
@@ -1327,3 +1350,143 @@
 	atomic_inc(&init_mm.mm_count);
 	enter_lazy_tlb(&init_mm, current, cpu);
 }
+
+
+void __init latsched_init(void)
+{
+	int ii, size;
+
+	size = STD_LATSCHED_SAMPLES;
+	for (ii = 0; ii < smp_num_cpus; ii++) {
+		if ((latsched_data(ii) = kmalloc(size * sizeof(struct latsched_sample), GFP_KERNEL)))
+			memset(latsched_data(ii), 0, size * sizeof(struct latsched_sample));
+		latsched_size(ii) = size;
+		latsched_curr(ii) = 0;
+	}
+}
+
+
+asmlinkage void schedule(void)
+{
+	int this_cpu;
+	unsigned long flags;
+	cycles_t cycls;
+
+	if (atomic_read(&lss_enabled)) {
+		local_irq_save(flags);
+		this_cpu = current->processor;
+		latsched_samp(this_cpu, latsched_curr(this_cpu)).lss_pid = -1;
+		latsched_samp(this_cpu, latsched_curr(this_cpu)).lss_in = get_cycles();
+		local_irq_restore(flags);
+	}
+
+	__schedule();
+
+	cycls = get_cycles();
+	if (atomic_read(&lss_enabled)) {
+		local_irq_save(flags);
+		this_cpu = current->processor;
+		if (latsched_samp(this_cpu, latsched_curr(this_cpu)).lss_pid == -1) {
+			latsched_samp(this_cpu, latsched_curr(this_cpu)).lss_out = cycls;
+			latsched_samp(this_cpu, latsched_curr(this_cpu)).lss_pid = current->pid;
+			if (++latsched_curr(this_cpu) >= latsched_size(this_cpu))
+				latsched_curr(this_cpu) = 0;
+		}
+		local_irq_restore(flags);
+	}
+}
+
+
+int latsched_start(int on)
+{
+	int res;
+
+	cli();
+	if (on) {
+		if (!atomic_read(&lss_enabled)) {
+			int ii;
+
+			for (ii = 0; ii < smp_num_cpus; ii++) {
+				res = -ENOMEM;
+				if (!latsched_data(ii) &&
+						!(latsched_data(ii) = kmalloc(latsched_size(ii) * sizeof(struct latsched_sample), GFP_KERNEL)))
+					goto out;
+				memset(latsched_data(ii), 0, latsched_size(ii) * sizeof(struct latsched_sample));
+				latsched_curr(ii) = 0;
+			}
+			atomic_set(&lss_enabled, 1);
+		}
+	} else
+		atomic_set(&lss_enabled, 0);
+	res = 0;
+out:
+	sti();
+	return res;
+}
+
+
+int latsched_setsamples(int nsamps)
+{
+	int ii, res, size = nsamps;
+
+	cli();
+	res = -EBUSY;
+	if (atomic_read(&lss_enabled))
+		goto out;
+	for (ii = 0; ii < smp_num_cpus; ii++) {
+		if (latsched_data(ii))
+			kfree(latsched_data(ii));
+		res = -ENOMEM;
+		if (!(latsched_data(ii) = kmalloc(size * sizeof(struct latsched_sample), GFP_KERNEL)))
+			goto out;
+		memset(latsched_data(ii), 0, size * sizeof(struct latsched_sample));
+		latsched_size(ii) = size;
+		latsched_curr(ii) = 0;
+	}
+	res = 0;
+out:
+	sti();
+	return res;
+}
+
+
+int latsched_getdata(struct lsctl_getdata *lsgd)
+{
+	int res;
+
+	cli();
+	res = -EBUSY;
+	if (atomic_read(&lss_enabled))
+		goto out;
+	res = -EINVAL;
+	if (lsgd->cpu < 0 || lsgd->cpu >= smp_num_cpus)
+		goto out;
+	if (latsched_samp(lsgd->cpu, latsched_size(lsgd->cpu) - 1).lss_pid != 0) {
+		int size, csize;
+		struct latsched_sample *data = lsgd->data;
+
+		lsgd->rsize = size = latsched_size(lsgd->cpu);
+		if (lsgd->rsize > lsgd->size)
+			lsgd->rsize = size = lsgd->size;
+		csize = latsched_size(lsgd->cpu) - latsched_curr(lsgd->cpu);
+		if (csize > size)
+			csize = size;
+		if (csize)
+			__copy_to_user(data, &latsched_samp(lsgd->cpu, latsched_curr(lsgd->cpu)),
+					csize * sizeof(struct latsched_sample));
+		data += csize;
+		size -= csize;
+		if (size)
+			__copy_to_user(data, &latsched_samp(lsgd->cpu, 0),
+					size * sizeof(struct latsched_sample));
+	} else {
+		lsgd->rsize = latsched_curr(lsgd->cpu);
+		__copy_to_user(lsgd->data, &latsched_samp(lsgd->cpu, 0),
+				lsgd->rsize * sizeof(struct latsched_sample));
+	}
+	res = 0;
+out:
+	sti();
+	return res;
+}
+
