diff -ur --exclude-from /home/axboe/cdrom/exclude /opt/kernel/linux-2.3.99-pre7-6/drivers/block/DAC960.c linux/drivers/block/DAC960.c
--- /opt/kernel/linux-2.3.99-pre7-6/drivers/block/DAC960.c	Sun May  7 02:33:44 2000
+++ linux/drivers/block/DAC960.c	Sat May  6 00:00:46 2000
@@ -1363,9 +1363,8 @@
   Command->SegmentCount = Request->nr_segments;
   Command->BufferHeader = Request->bh;
   RequestBuffer = Request->buffer;
-  Request->rq_status = RQ_INACTIVE;
   blkdev_dequeue_request(Request);
-  wake_up(&wait_for_request);
+  blkdev_release_request(Request);
   if (Command->SegmentCount == 1)
     {
       DAC960_CommandMailbox_T *CommandMailbox = &Command->CommandMailbox;
diff -ur --exclude-from /home/axboe/cdrom/exclude /opt/kernel/linux-2.3.99-pre7-6/drivers/block/elevator.c linux/drivers/block/elevator.c
--- /opt/kernel/linux-2.3.99-pre7-6/drivers/block/elevator.c	Mon Mar 13 04:32:57 2000
+++ linux/drivers/block/elevator.c	Wed May  3 03:42:37 2000
@@ -4,6 +4,16 @@
  *  Block device elevator/IO-scheduler.
  *
  *  Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
+ *
+ * 30042000 Jens Axboe <axboe@suse.de> :
+ *
+ * Split the elevator a bit so that it is possible to choose a different
+ * one or even write a new "plug in". There are three pieces:
+ * - elevator_fn, inserts a new request in the queue list
+ * - elevator_merge_fn, decides whether a new buffer can be merged with
+ *   an existing request
+ * - elevator_dequeue_fn, called when a request is taken off the active list
+ *
  */
 
 #include <linux/fs.h>
@@ -12,9 +22,9 @@
 #include <linux/blk.h>
 #include <asm/uaccess.h>
 
-static void elevator_default(struct request * req, elevator_t * elevator,
-			     struct list_head * real_head,
-			     struct list_head * head, int orig_latency)
+void elevator_default(struct request *req, elevator_t * elevator,
+		      struct list_head * real_head,
+		      struct list_head * head, int orig_latency)
 {
 	struct list_head * entry = real_head, * point = NULL;
 	struct request * tmp;
@@ -22,6 +32,12 @@
 	int latency = orig_latency -= elevator->nr_segments, pass = 0;
 	int point_latency = 0xbeefbeef;
 
+	if (list_empty(real_head)) {
+		req->elevator_sequence = elevator_sequence(elevator, orig_latency);
+		list_add(&req->queue, real_head);
+		return;
+	}
+
 	while ((entry = entry->prev) != head) {
 		if (!point && latency >= 0) {
 			point = entry;
@@ -49,19 +65,130 @@
 	req->elevator_sequence = elevator_sequence(elevator, latency);
 }
 
+int elevator_default_merge(request_queue_t *q, struct request **req,
+			   struct buffer_head *bh, int rw,
+			   int *max_sectors, int *max_segments)
+{
+	struct list_head *entry, *head = &q->queue_head;
+	unsigned int count = bh->b_size >> 9;
+	elevator_t *elevator = &q->elevator;
+	int orig_latency, latency, sequence, action, starving = 0;
+
+	/*
+	 * Avoid write-bombs as not to hurt interactiveness of reads
+	 */
+	if (rw == WRITE)
+		*max_segments = elevator->max_bomb_segments;
+
+	latency = orig_latency = elevator_request_latency(elevator, rw);
+	sequence = elevator->sequence;
+	
+	if (q->head_active && !q->plugged)
+		head = head->next;
+
+	entry = head;
+	while ((entry = entry->prev) != head && !starving) {
+		*req = blkdev_entry_to_request(entry);
+		latency += (*req)->nr_segments;
+		if (elevator_sequence_before((*req)->elevator_sequence, sequence))
+			starving = 1;
+		if (latency < 0)
+			continue;
+		if ((*req)->sem)
+			continue;
+		if ((*req)->cmd != rw)
+			continue;
+		if ((*req)->nr_sectors + count > *max_sectors)
+			continue;
+		if ((*req)->rq_dev != bh->b_rdev)
+			continue;
+		if ((*req)->sector + (*req)->nr_sectors == bh->b_rsector) {
+			if (latency - (*req)->nr_segments < 0)
+				break;
+			action = ELEVATOR_BACK_MERGE;
+		} else if ((*req)->sector - count == bh->b_rsector) {
+			if (starving)
+				break;
+			action = ELEVATOR_FRONT_MERGE;
+		} else {
+			continue;
+		}
+		q->elevator.sequence++;
+		return action;
+	}
+	return ELEVATOR_NO_MERGE;
+}
+
+inline void elevator_default_dequeue(struct request *req)
+{
+	if (req->cmd == READ)
+		req->e->read_pendings--;
+
+	req->e->nr_segments -= req->nr_segments;
+}
+
+/*
+ * No request sorting, just add it to the back of the list
+ */
+void elevator_noop(struct request *req, elevator_t *elevator,
+		   struct list_head *real_head, struct list_head *head,
+		   int orig_latency)
+{
+	list_add_tail(&req->queue, real_head);
+}
+
+/*
+ * See if we can find a request that is buffer can be coalesced with.
+ */
+int elevator_noop_merge(request_queue_t *q, struct request **req,
+			struct buffer_head *bh, int rw,
+			int *max_sectors, int *max_segments)
+{
+	struct list_head *entry, *head = &q->queue_head;
+	unsigned int count = bh->b_size >> 9;
+
+	if (q->head_active && !q->plugged)
+		head = head->next;
+
+	entry = head;
+	while ((entry = entry->prev) != head) {
+		*req = blkdev_entry_to_request(entry);
+		if ((*req)->sem)
+			continue;
+		if ((*req)->cmd != rw)
+			continue;
+		if ((*req)->nr_sectors + count > *max_sectors)
+			continue;
+		if ((*req)->rq_dev != bh->b_rdev)
+			continue;
+		if ((*req)->sector + (*req)->nr_sectors == bh->b_rsector)
+			return ELEVATOR_BACK_MERGE;
+		if ((*req)->sector - count == bh->b_rsector)
+			return ELEVATOR_FRONT_MERGE;
+	}
+	return ELEVATOR_NO_MERGE;
+}
+
+/*
+ * The noop "elevator" does not do any accounting
+ */
+void elevator_noop_dequeue(struct request *req) {}
+
 #ifdef ELEVATOR_DEBUG
-void elevator_debug(request_queue_t * q, kdev_t dev)
+void elevator_default_debug(request_queue_t * q, kdev_t dev)
 {
 	int read_pendings = 0, nr_segments = 0;
 	elevator_t * elevator = &q->elevator;
 	struct list_head * entry = &q->queue_head;
 	static int counter;
 
+	if (elevator->elevator_fn != elevator_default)
+		return;
+
 	if (counter++ % 100)
 		return;
 
-	while ((entry = entry->prev) != &q->queue_head)
-	{
+	while ((entry = entry->prev) != &q->queue_head) {
 		struct request * req;
 
 		req = blkdev_entry_to_request(entry);
@@ -81,16 +208,14 @@
 		nr_segments += req->nr_segments;
 	}
 
-	if (read_pendings != elevator->read_pendings)
-	{
+	if (read_pendings != elevator->read_pendings) {
 		printk(KERN_WARNING
 		       "%s: elevator read_pendings %d should be %d\n",
 		       kdevname(dev), elevator->read_pendings,
 		       read_pendings);
 		elevator->read_pendings = read_pendings;
 	}
-	if (nr_segments != elevator->nr_segments)
-	{
+	if (nr_segments != elevator->nr_segments) {
 		printk(KERN_WARNING
 		       "%s: elevator nr_segments %d should be %d\n",
 		       kdevname(dev), elevator->nr_segments,
@@ -102,49 +227,42 @@
 
 int blkelvget_ioctl(elevator_t * elevator, blkelv_ioctl_arg_t * arg)
 {
-	int ret;
 	blkelv_ioctl_arg_t output;
 
 	output.queue_ID			= elevator;
 	output.read_latency		= elevator->read_latency;
 	output.write_latency		= elevator->write_latency;
 	output.max_bomb_segments	= elevator->max_bomb_segments;
+	strcpy(output.elevator_name, elevator->elevator_name);
 
-	ret = -EFAULT;
 	if (copy_to_user(arg, &output, sizeof(blkelv_ioctl_arg_t)))
-		goto out;
-	ret = 0;
- out:
-	return ret;
+		return -EFAULT;
+
+	return 0;
 }
 
 int blkelvset_ioctl(elevator_t * elevator, const blkelv_ioctl_arg_t * arg)
 {
 	blkelv_ioctl_arg_t input;
-	int ret;
 
-	ret = -EFAULT;
 	if (copy_from_user(&input, arg, sizeof(blkelv_ioctl_arg_t)))
-		goto out;
+		return -EFAULT;
 
-	ret = -EINVAL;
 	if (input.read_latency < 0)
-		goto out;
+		return -EINVAL;
 	if (input.write_latency < 0)
-		goto out;
+		return -EINVAL;
 	if (input.max_bomb_segments <= 0)
-		goto out;
+		return -EINVAL;
 
 	elevator->read_latency		= input.read_latency;
 	elevator->write_latency		= input.write_latency;
 	elevator->max_bomb_segments	= input.max_bomb_segments;
 
-	ret = 0;
- out:
-	return ret;
+	return 0;
 }
 
-void elevator_init(elevator_t * elevator)
+void elevator_init(elevator_t * elevator, elevator_t type)
 {
-	*elevator = ELEVATOR_DEFAULTS;
+	*elevator = type;
 }
diff -ur --exclude-from /home/axboe/cdrom/exclude /opt/kernel/linux-2.3.99-pre7-6/drivers/block/ll_rw_blk.c linux/drivers/block/ll_rw_blk.c
--- /opt/kernel/linux-2.3.99-pre7-6/drivers/block/ll_rw_blk.c	Thu May  4 15:42:31 2000
+++ linux/drivers/block/ll_rw_blk.c	Sat May  6 18:12:24 2000
@@ -4,6 +4,7 @@
  * Copyright (C) 1991, 1992 Linus Torvalds
  * Copyright (C) 1994,      Karl Keyte: Added support for disk statistics
  * Elevator latency, (C) 2000  Andrea Arcangeli <andrea@suse.de> SuSE
+ * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de>
  */
 
 /*
@@ -37,14 +38,16 @@
 #endif
 
 /*
- * The request-struct contains all necessary data
- * to load a nr of sectors into memory
+ * For the allocated request tables
  */
-static struct request all_requests[NR_REQUEST];
+static kmem_cache_t *request_cachep;
 
 /*
  * The "disk" task queue is used to start the actual requests
- * after a plug
+ * after a plug.
+ *
+ * Nowadays, it is mainly used when the memory pressure gets too high. When
+ * we can, we fire individual queues instead.
  */
 DECLARE_TASK_QUEUE(tq_disk);
 
@@ -62,11 +65,6 @@
  */
 spinlock_t io_request_lock = SPIN_LOCK_UNLOCKED;
 
-/*
- * used to wait on when there are no free requests
- */
-DECLARE_WAIT_QUEUE_HEAD(wait_for_request);
-
 /* This specifies how many sectors to read ahead on the disk. */
 
 int read_ahead[MAX_BLKDEV];
@@ -148,8 +146,22 @@
 	return ret;
 }
 
+/*
+ * Hopefully the low level driver has finished any out standing requests
+ * before first...
+ */
 void blk_cleanup_queue(request_queue_t * q)
 {
+	struct list_head *entry, *head;
+	struct request *rq;
+	int i = 0;
+
+	entry = head = &q->request_freelist;
+	while ((entry = entry->next) != head) {
+		rq = list_entry(entry, struct request, table);
+		kmem_cache_free(request_cachep, rq);
+		i++;
+	}
 	memset(q, 0, sizeof(*q));
 }
 
@@ -237,10 +249,33 @@
 	queue_task(&q->plug_tq, &tq_disk);
 }
 
+static void blk_init_free_list(request_queue_t *q)
+{
+	struct request *rq;
+	int i;
+
+	/*
+	 * Divide requests in half between read and write. This used to
+	 * be a 2/3 advantage for reads, but now reads can steal from
+	 * the write free list.
+	 */
+	for (i = 0; i < QUEUE_NR_REQUESTS; i++) {
+		rq = kmem_cache_alloc(request_cachep, SLAB_KERNEL);
+		rq->rq_status = RQ_INACTIVE;
+		list_add(&rq->table, &q->request_freelist);
+	}
+
+	q->queue_requests = 0;
+	init_waitqueue_head(&q->wait_for_request);
+	spin_lock_init(&q->request_lock);
+}
+
 void blk_init_queue(request_queue_t * q, request_fn_proc * rfn)
 {
 	INIT_LIST_HEAD(&q->queue_head);
-	elevator_init(&q->elevator);
+	INIT_LIST_HEAD(&q->request_freelist);
+	elevator_init(&q->elevator, ELEVATOR_DEFAULT);
+	blk_init_free_list(q);
 	q->request_fn     	= rfn;
 	q->back_merge_fn       	= ll_back_merge_fn;
 	q->front_merge_fn      	= ll_front_merge_fn;
@@ -268,84 +303,73 @@
 	request_queue_t * q = (request_queue_t *) data;
 	unsigned long flags;
 
-	spin_lock_irqsave(&io_request_lock,flags);
+	spin_lock_irqsave(&io_request_lock, flags);
 	if (q->plugged) {
 		q->plugged = 0;
 		if (!list_empty(&q->queue_head))
 			(q->request_fn)(q);
 	}
-	spin_unlock_irqrestore(&io_request_lock,flags);
+	spin_unlock_irqrestore(&io_request_lock, flags);
 }
 
+#define blkdev_free_rq(list) list_entry((list)->next, struct request, table);
 /*
- * look for a free request in the first N entries.
- * NOTE: interrupts must be disabled on the way in (on SMP the request queue
- * spinlock has to be aquired), and will still be disabled on the way out.
+ * Get a free request. io_request_lock must be held and interrupts
+ * disabled on the way in.
  */
-static inline struct request * get_request(int n, kdev_t dev)
+static inline struct request *get_request(request_queue_t *q, int rw)
 {
-	static struct request *prev_found = NULL, *prev_limit = NULL;
-	register struct request *req, *limit;
-
-	if (n <= 0)
-		panic("get_request(%d): impossible!\n", n);
+	struct request *rq;
 
-	limit = all_requests + n;
-	if (limit != prev_limit) {
-		prev_limit = limit;
-		prev_found = all_requests;
-	}
-	req = prev_found;
-	for (;;) {
-		req = ((req > all_requests) ? req : limit) - 1;
-		if (req->rq_status == RQ_INACTIVE)
-			break;
-		if (req == prev_found)
-			return NULL;
-	}
-	prev_found = req;
-	req->rq_status = RQ_ACTIVE;
-	req->rq_dev = dev;
-	req->special = NULL;
-	return req;
+	if (list_empty(&q->request_freelist))
+		return NULL;
+
+	if ((q->queue_requests > QUEUE_WRITES_MAX) && (rw == WRITE))
+		return NULL;
+
+	rq = blkdev_free_rq(&q->request_freelist);
+	list_del(&rq->table);
+	rq->rq_status = RQ_ACTIVE;
+	rq->special = NULL;
+	rq->q = q;
+	q->queue_requests++;
+	return rq;
 }
 
 /*
- * wait until a free request in the first N entries is available.
+ * No available requests for this queue, unplug the device.
  */
-static struct request * __get_request_wait(int n, kdev_t dev)
+static struct request *__get_request_wait(request_queue_t *q, int rw)
 {
-	register struct request *req;
+	register struct request *rq;
 	DECLARE_WAITQUEUE(wait, current);
-	unsigned long flags;
 
-	add_wait_queue_exclusive(&wait_for_request, &wait);
+	add_wait_queue_exclusive(&q->wait_for_request, &wait);
 	for (;;) {
-		__set_current_state(TASK_UNINTERRUPTIBLE|TASK_EXCLUSIVE);
-		spin_lock_irqsave(&io_request_lock,flags);
-		req = get_request(n, dev);
-		spin_unlock_irqrestore(&io_request_lock,flags);
-		if (req)
+		__set_current_state(TASK_UNINTERRUPTIBLE | TASK_EXCLUSIVE);
+		spin_lock_irq(&io_request_lock);
+		rq = get_request(q, rw);
+		spin_unlock_irq(&io_request_lock);
+		if (rq)
 			break;
-		run_task_queue(&tq_disk);
+		generic_unplug_device(q);
 		schedule();
 	}
-	remove_wait_queue(&wait_for_request, &wait);
+	remove_wait_queue(&q->wait_for_request, &wait);
 	current->state = TASK_RUNNING;
-	return req;
+	return rq;
 }
 
-static inline struct request * get_request_wait(int n, kdev_t dev)
+static inline struct request *get_request_wait(request_queue_t *q, int rw)
 {
-	register struct request *req;
-	unsigned long flags;
+	register struct request *rq;
 
-	spin_lock_irqsave(&io_request_lock,flags);
-	req = get_request(n, dev);
-	spin_unlock_irqrestore(&io_request_lock,flags);
-	if (req)
-		return req;
-	return __get_request_wait(n, dev);
+	spin_lock_irq(&io_request_lock);
+	rq = get_request(q, rw);
+	spin_unlock_irq(&io_request_lock);
+	if (rq)
+		return rq;
+	return __get_request_wait(q, rw);
 }
 
 /* RO fail safe mechanism */
@@ -422,35 +446,41 @@
  */
 
 static inline void add_request(request_queue_t * q, struct request * req,
-			       struct list_head * head, int latency)
+			       struct list_head *head, int lat)
 {
 	int major;
 
 	drive_stat_acct(req, req->nr_sectors, 1);
-
-	if (list_empty(head)) {
-		req->elevator_sequence = elevator_sequence(&q->elevator, latency);
-		list_add(&req->queue, &q->queue_head);
-		return;
-	}
-	q->elevator.elevator_fn(req, &q->elevator, &q->queue_head, head, latency);
-
 	/*
+	 * let selected elevator insert the request
+	 */
+	q->elevator.elevator_fn(req, &q->elevator, &q->queue_head, head, lat);
+
+        /*
 	 * FIXME(eric) I don't understand why there is a need for this
 	 * special case code.  It clearly doesn't fit any more with
 	 * the new queueing architecture, and it got added in 2.3.10.
 	 * I am leaving this in here until I hear back from the COMPAQ
 	 * people.
-	 */
+         */
 	major = MAJOR(req->rq_dev);
 	if (major >= COMPAQ_SMART2_MAJOR+0 && major <= COMPAQ_SMART2_MAJOR+7)
-	{
 		(q->request_fn)(q);
-	}
-
 	if (major >= DAC960_MAJOR+0 && major <= DAC960_MAJOR+7)
-	{
 		(q->request_fn)(q);
+}
+
+void inline blkdev_release_request(struct request *req)
+{
+	req->rq_status = RQ_INACTIVE;
+
+	/*
+	 * Request may not have originated from ll_rw_blk
+	 */
+	if (req->q) {
+		list_add(&req->table, &req->q->request_freelist);
+		req->q->queue_requests--;
+		wake_up(&req->q->wait_for_request);
 	}
 }
 
@@ -478,13 +508,12 @@
 	if(!(q->merge_requests_fn)(q, req, next, max_segments))
 		return;
 
-	elevator_merge_requests(&q->elevator, req, next);
+	elevator_merge_requests(req, next);
 	req->bhtail->b_reqnext = next->bh;
 	req->bhtail = next->bhtail;
 	req->nr_sectors = req->hard_nr_sectors += next->hard_nr_sectors;
-	next->rq_status = RQ_INACTIVE;
 	list_del(&next->queue);
-	wake_up (&wait_for_request);
+	blkdev_release_request(next);
 }
 
 static inline void attempt_back_merge(request_queue_t * q,
@@ -512,18 +541,16 @@
 }
 
 static inline void __make_request(request_queue_t * q, int rw,
-			   struct buffer_head * bh)
+				  struct buffer_head * bh)
 {
 	int major = MAJOR(bh->b_rdev);
 	unsigned int sector, count;
 	int max_segments = MAX_SEGMENTS;
-	struct request * req;
-	int rw_ahead, max_req, max_sectors;
-	unsigned long flags;
-
-	int orig_latency, latency, starving, sequence;
-	struct list_head * entry, * head = &q->queue_head;
-	elevator_t * elevator;
+	struct request * req = NULL;
+	int rw_ahead, max_sectors, el_ret;
+	struct list_head *head = &q->queue_head;
+	int latency;
+	elevator_t *elevator = &q->elevator;
 
 	count = bh->b_size >> 9;
 	sector = bh->b_rsector;
@@ -557,7 +584,6 @@
 			if (buffer_uptodate(bh)) /* Hmmph! Already have it */
 				goto end_io;
 			kstat.pgpgin++;
-			max_req = NR_REQUEST;	/* reads take precedence */
 			break;
 		case WRITERAW:
 			rw = WRITE;
@@ -574,7 +600,6 @@
 			 * requests are only for reads.
 			 */
 			kstat.pgpgout++;
-			max_req = (NR_REQUEST * 2) / 3;
 			break;
 		default:
 			BUG();
@@ -599,153 +624,80 @@
 
 /* look for a free request. */
 	/*
-	 * Loop uses two requests, 1 for loop and 1 for the real device.
-	 * Cut max_req in half to avoid running out and deadlocking.
-	 */
-	 if ((major == LOOP_MAJOR) || (major == NBD_MAJOR))
-		max_req >>= 1;
-
-	/*
 	 * Try to coalesce the new request with old requests
 	 */
 	max_sectors = get_max_sectors(bh->b_rdev);
 
-	elevator = &q->elevator;
-	orig_latency = elevator_request_latency(elevator, rw);
+	latency = elevator_request_latency(elevator, rw);
 
 	/*
 	 * Now we acquire the request spinlock, we have to be mega careful
 	 * not to schedule or do something nonatomic
 	 */
-	spin_lock_irqsave(&io_request_lock,flags);
-	elevator_debug(q, bh->b_rdev);
+	spin_lock_irq(&io_request_lock);
+	elevator_default_debug(q, bh->b_rdev);
 
 	if (list_empty(head)) {
 		q->plug_device_fn(q, bh->b_rdev); /* is atomic */
 		goto get_rq;
 	}
 
-	/* avoid write-bombs to not hurt iteractiveness of reads */
-	if (rw != READ && elevator->read_pendings)
-		max_segments = elevator->max_bomb_segments;
-
-	sequence = elevator->sequence;
-	latency = orig_latency - elevator->nr_segments;
-	starving = 0;
-	entry = head;
+	el_ret = elevator->elevator_merge_fn(q, &req, bh, rw, &max_sectors, &max_segments);
+	switch (el_ret) {
 
-	/*
-	 * The scsi disk and cdrom drivers completely remove the request
-	 * from the queue when they start processing an entry.  For this
-	 * reason it is safe to continue to add links to the top entry
-	 * for those devices.
-	 *
-	 * All other drivers need to jump over the first entry, as that
-	 * entry may be busy being processed and we thus can't change
-	 * it.
-	 */
-	if (q->head_active && !q->plugged)
-		head = head->next;
-
-	while ((entry = entry->prev) != head && !starving) {
-		req = blkdev_entry_to_request(entry);
-		if (!req->q)
-			break;
-		latency += req->nr_segments;
-		if (elevator_sequence_before(req->elevator_sequence, sequence))
-			starving = 1;
-		if (latency < 0)
-			continue;
-
-		if (req->sem)
-			continue;
-		if (req->cmd != rw)
-			continue;
-		if (req->nr_sectors + count > max_sectors)
-			continue;
-		if (req->rq_dev != bh->b_rdev)
-			continue;
-		/* Can we add it to the end of this request? */
-		if (req->sector + req->nr_sectors == sector) {
-			if (latency - req->nr_segments < 0)
-				break;
-			/*
-			 * The merge_fn is a more advanced way
-			 * of accomplishing the same task.  Instead
-			 * of applying a fixed limit of some sort
-			 * we instead define a function which can
-			 * determine whether or not it is safe to
-			 * merge the request or not.
-			 *
-			 * See if this queue has rules that
-			 * may suggest that we shouldn't merge
-			 * this 
-			 */
-			if(!(q->back_merge_fn)(q, req, bh, max_segments))
+		case ELEVATOR_BACK_MERGE:
+			if (!q->back_merge_fn(q, req, bh, max_segments))
 				break;
 			req->bhtail->b_reqnext = bh;
 			req->bhtail = bh;
-		    	req->nr_sectors = req->hard_nr_sectors += count;
+			req->nr_sectors = req->hard_nr_sectors += count;
+			req->e = elevator;
 			drive_stat_acct(req, count, 0);
-
-			elevator_merge_after(elevator, req, latency);
-
-			/* Can we now merge this req with the next? */
 			attempt_back_merge(q, req, max_sectors, max_segments);
-		/* or to the beginning? */
-		} else if (req->sector - count == sector) {
-			if (starving)
-				break;
-			/*
-			 * The merge_fn is a more advanced way
-			 * of accomplishing the same task.  Instead
-			 * of applying a fixed limit of some sort
-			 * we instead define a function which can
-			 * determine whether or not it is safe to
-			 * merge the request or not.
-			 *
-			 * See if this queue has rules that
-			 * may suggest that we shouldn't merge
-			 * this 
-			 */
-			if(!(q->front_merge_fn)(q, req, bh, max_segments))
+			goto out;
+
+		case ELEVATOR_FRONT_MERGE:
+			if (!q->front_merge_fn(q, req, bh, max_segments))
 				break;
-		    	bh->b_reqnext = req->bh;
-		    	req->bh = bh;
-		    	req->buffer = bh->b_data;
-		    	req->current_nr_sectors = count;
-		    	req->sector = req->hard_sector = sector;
-		    	req->nr_sectors = req->hard_nr_sectors += count;
+			bh->b_reqnext = req->bh;
+			req->bh = bh;
+			req->buffer = bh->b_data;
+			req->current_nr_sectors = count;
+			req->sector = req->hard_sector = sector;
+			req->nr_sectors = req->hard_nr_sectors += count;
+			req->e = elevator;
 			drive_stat_acct(req, count, 0);
-
-			elevator_merge_before(elevator, req, latency);
-
 			attempt_front_merge(q, head, req, max_sectors, max_segments);
-		} else
-			continue;
-
-		q->elevator.sequence++;
-		spin_unlock_irqrestore(&io_request_lock,flags);
-	    	return;
+			goto out;
+		/*
+		 * elevator says don't/can't merge. get new request
+		 */
+		case ELEVATOR_NO_MERGE:
+			break;
 
+		default:
+			printk("elevator returned crap (%d)\n", el_ret);
+			BUG();
 	}
-
-/* find an unused request. */
-get_rq:
-	req = get_request(max_req, bh->b_rdev);
-
+		
 	/*
-	 * if no request available: if rw_ahead, forget it,
-	 * otherwise try again blocking..
+	 * Grab a free request from the freelist. Read first try their
+	 * own queue - if that is empty, we steal from the write list.
+	 * Writes must block if the write list is empty, and read aheads
+	 * are not crucial.
 	 */
-	if (!req) {
-		spin_unlock_irqrestore(&io_request_lock,flags);
+get_rq:
+	if ((req = get_request(q, rw)) == NULL) {
+		spin_unlock_irq(&io_request_lock);
 		if (rw_ahead)
 			goto end_io;
-		req = __get_request_wait(max_req, bh->b_rdev);
-		spin_lock_irqsave(&io_request_lock,flags);
 
-		/* revalidate elevator */
+		req = __get_request_wait(q, rw);
+		spin_lock_irq(&io_request_lock);
+
+		/*
+		 * revalidate elevator, queue request_lock was dropped
+		 */
 		head = &q->queue_head;
 		if (q->head_active && !q->plugged)
 			head = head->next;
@@ -763,13 +715,13 @@
 	req->sem = NULL;
 	req->bh = bh;
 	req->bhtail = bh;
-	req->q = q;
-	add_request(q, req, head, orig_latency);
-	elevator_account_request(elevator, req);
-
-	spin_unlock_irqrestore(&io_request_lock, flags);
+	req->rq_dev = bh->b_rdev;
+	req->e = elevator;
+	add_request(q, req, head, latency);
+	elevator_account_request(req);
+out:
+	spin_unlock_irq(&io_request_lock);
 	return;
-
 end_io:
 	bh->b_end_io(bh, test_bit(BH_Uptodate, &bh->b_state));
 }
@@ -785,7 +737,6 @@
 
 int generic_make_request (request_queue_t *q, int rw, struct buffer_head * bh)
 {
-	unsigned long flags;
 	int ret;
 
 	/*
@@ -793,7 +744,6 @@
 	 * still free to implement/resolve their own stacking
 	 * by explicitly returning 0)
 	 */
-
 	while (q->make_request_fn) {
 		ret = q->make_request_fn(q, rw, bh);
 		if (ret > 0) {
@@ -807,10 +757,10 @@
 	 * the IO request? (normal case)
 	 */
 	__make_request(q, rw, bh);
-	spin_lock_irqsave(&io_request_lock,flags);
+	spin_lock_irq(&io_request_lock);
 	if (q && !q->plugged)
 		(q->request_fn)(q);
-	spin_unlock_irqrestore(&io_request_lock,flags);
+	spin_unlock_irq(&io_request_lock);
 
 	return 0;
 }
@@ -949,31 +899,31 @@
 
 void end_that_request_last(struct request *req)
 {
-	if (req->q)
+	if (req->e) {
+		printk("end_that_request_last called with non-dequeued req\n");
 		BUG();
+	}
 	if (req->sem != NULL)
 		up(req->sem);
-	req->rq_status = RQ_INACTIVE;
-	wake_up(&wait_for_request);
+
+	blkdev_release_request(req);
 }
 
 int __init blk_dev_init(void)
 {
-	struct request * req;
 	struct blk_dev_struct *dev;
 
-	for (dev = blk_dev + MAX_BLKDEV; dev-- != blk_dev;) {
+	request_cachep = kmem_cache_create("blkdev_requests",
+					   sizeof(struct request),
+					   0, SLAB_HWCACHE_ALIGN, NULL, NULL);
+
+	for (dev = blk_dev + MAX_BLKDEV; dev-- != blk_dev;)
 		dev->queue = NULL;
-		blk_init_queue(&dev->request_queue, NULL);
-	}
 
-	req = all_requests + NR_REQUEST;
-	while (--req >= all_requests) {
-		req->rq_status = RQ_INACTIVE;
-	}
 	memset(ro_bits,0,sizeof(ro_bits));
 	memset(max_readahead, 0, sizeof(max_readahead));
 	memset(max_sectors, 0, sizeof(max_sectors));
+
 #ifdef CONFIG_AMIGA_Z2RAM
 	z2_init();
 #endif
@@ -1095,3 +1045,4 @@
 EXPORT_SYMBOL(blk_queue_pluggable);
 EXPORT_SYMBOL(blk_queue_make_request);
 EXPORT_SYMBOL(generic_make_request);
+EXPORT_SYMBOL(blkdev_release_request);
diff -ur --exclude-from /home/axboe/cdrom/exclude /opt/kernel/linux-2.3.99-pre7-6/drivers/block/loop.c linux/drivers/block/loop.c
--- /opt/kernel/linux-2.3.99-pre7-6/drivers/block/loop.c	Sun May  7 02:33:45 2000
+++ linux/drivers/block/loop.c	Sat May  6 00:00:46 2000
@@ -818,6 +818,7 @@
 	if (devfs_unregister_blkdev(MAJOR_NR, "loop") != 0)
 		printk(KERN_WARNING "loop: cannot unregister blkdev\n");
 
+	blk_cleanup_queue(BLK_DEFAULT_QUEUE(MAJOR_NR));
 	kfree (loop_dev);
 	kfree (loop_sizes);
 	kfree (loop_blksizes);
diff -ur --exclude-from /home/axboe/cdrom/exclude /opt/kernel/linux-2.3.99-pre7-6/drivers/ide/ide-disk.c linux/drivers/ide/ide-disk.c
--- /opt/kernel/linux-2.3.99-pre7-6/drivers/ide/ide-disk.c	Thu May  4 15:42:32 2000
+++ linux/drivers/ide/ide-disk.c	Thu May  4 16:32:18 2000
@@ -688,13 +688,12 @@
 
 static int set_nowerr(ide_drive_t *drive, int arg)
 {
-	unsigned long flags;
-
-	if (ide_spin_wait_hwgroup(drive, &flags))
+	if (ide_spin_wait_hwgroup(drive))
 		return -EBUSY;
+
 	drive->nowerr = arg;
 	drive->bad_wstat = arg ? BAD_R_STAT : BAD_W_STAT;
-	spin_unlock_irqrestore(&io_request_lock, flags);
+	spin_unlock_irq(&io_request_lock);
 	return 0;
 }
 
diff -ur --exclude-from /home/axboe/cdrom/exclude /opt/kernel/linux-2.3.99-pre7-6/drivers/ide/ide-probe.c linux/drivers/ide/ide-probe.c
--- /opt/kernel/linux-2.3.99-pre7-6/drivers/ide/ide-probe.c	Thu May  4 15:42:32 2000
+++ linux/drivers/ide/ide-probe.c	Sat May  6 15:48:26 2000
@@ -118,6 +118,7 @@
 				type = ide_cdrom;	/* Early cdrom models used zero */
 			case ide_cdrom:
 				drive->removable = 1;
+				printk("with%s notify ", id->command_set_1 & 8 ? "" : "out");
 #ifdef CONFIG_PPC
 				/* kludge for Apple PowerBook internal zip */
 				if (!strstr(id->model, "CD-ROM") && strstr(id->model, "ZIP")) {
@@ -775,8 +776,7 @@
 
 static int hwif_init (ide_hwif_t *hwif)
 {
-	ide_drive_t *drive;
-	void (*rfn)(request_queue_t *);
+	request_queue_t *q;
 	
 	if (!hwif->present)
 		return 0;
@@ -795,39 +795,7 @@
 #endif /* CONFIG_BLK_DEV_HD */
 	
 	hwif->present = 0; /* we set it back to 1 if all is ok below */
-	switch (hwif->major) {
-	case IDE0_MAJOR: rfn = &do_ide0_request; break;
-#if MAX_HWIFS > 1
-	case IDE1_MAJOR: rfn = &do_ide1_request; break;
-#endif
-#if MAX_HWIFS > 2
-	case IDE2_MAJOR: rfn = &do_ide2_request; break;
-#endif
-#if MAX_HWIFS > 3
-	case IDE3_MAJOR: rfn = &do_ide3_request; break;
-#endif
-#if MAX_HWIFS > 4
-	case IDE4_MAJOR: rfn = &do_ide4_request; break;
-#endif
-#if MAX_HWIFS > 5
-	case IDE5_MAJOR: rfn = &do_ide5_request; break;
-#endif
-#if MAX_HWIFS > 6
-	case IDE6_MAJOR: rfn = &do_ide6_request; break;
-#endif
-#if MAX_HWIFS > 7
-	case IDE7_MAJOR: rfn = &do_ide7_request; break;
-#endif
-#if MAX_HWIFS > 8
-	case IDE8_MAJOR: rfn = &do_ide8_request; break;
-#endif
-#if MAX_HWIFS > 9
-	case IDE9_MAJOR: rfn = &do_ide9_request; break;
-#endif
-	default:
-		printk("%s: request_fn NOT DEFINED\n", hwif->name);
-		return (hwif->present = 0);
-	}
+
 	if (devfs_register_blkdev (hwif->major, hwif->name, ide_fops)) {
 		printk("%s: UNABLE TO GET MAJOR NUMBER %d\n", hwif->name, hwif->major);
 		return (hwif->present = 0);
@@ -860,19 +828,13 @@
 	read_ahead[hwif->major] = 8;	/* (4kB) */
 	hwif->present = 1;	/* success */
 
-	/*
-	 * FIXME(eric) - This needs to be tested.  I *think* that this
-	 * is correct.   Also, I believe that there is no longer any
-	 * reason to have multiple functions (do_ide[0-7]_request)
-	 * functions - the queuedata field could be used to indicate
-	 * the correct hardware group - either this, or we could add
-	 * a new field to request_queue_t to hold this information.
-	 */
-	drive = &hwif->drives[0];
-	blk_init_queue(&drive->queue, rfn);
-
-	drive = &hwif->drives[1];
-	blk_init_queue(&drive->queue, rfn);
+	q = &hwif->drives[0].queue;
+	q->queuedata = hwif->hwgroup;
+	blk_init_queue(q, do_ide_request);
+
+	q = &hwif->drives[1].queue;
+	q->queuedata = hwif->hwgroup;
+	blk_init_queue(q, do_ide_request);
 
 #if (DEBUG_SPINLOCK > 0)
 {
diff -ur --exclude-from /home/axboe/cdrom/exclude /opt/kernel/linux-2.3.99-pre7-6/drivers/ide/ide.c linux/drivers/ide/ide.c
--- /opt/kernel/linux-2.3.99-pre7-6/drivers/ide/ide.c	Sun May  7 02:33:45 2000
+++ linux/drivers/ide/ide.c	Sat May  6 00:00:46 2000
@@ -771,7 +771,7 @@
 	spin_lock_irqsave(&io_request_lock, flags);
 	blkdev_dequeue_request(rq);
 	HWGROUP(drive)->rq = NULL;
-	rq->rq_status = RQ_INACTIVE;
+	blkdev_release_request(rq);
 	spin_unlock_irqrestore(&io_request_lock, flags);
 	if (rq->sem != NULL)
 		up(rq->sem);	/* inform originator that rq has been serviced */
@@ -1225,7 +1225,7 @@
  * the driver.  This makes the driver much more friendlier to shared IRQs
  * than previous designs, while remaining 100% (?) SMP safe and capable.
  */
-static void ide_do_request (ide_hwgroup_t *hwgroup, int masked_irq)
+static void ide_do_request(ide_hwgroup_t *hwgroup, int masked_irq)
 {
 	ide_drive_t	*drive;
 	ide_hwif_t	*hwif;
@@ -1313,73 +1313,13 @@
 	return &hwif->drives[DEVICE_NR(dev) & 1].queue;
 }
 
-void do_ide0_request (request_queue_t *q)
-{
-	ide_do_request (ide_hwifs[0].hwgroup, 0);
-}
-
-#if MAX_HWIFS > 1
-void do_ide1_request (request_queue_t *q)
-{
-	ide_do_request (ide_hwifs[1].hwgroup, 0);
-}
-#endif /* MAX_HWIFS > 1 */
-
-#if MAX_HWIFS > 2
-void do_ide2_request (request_queue_t *q)
-{
-	ide_do_request (ide_hwifs[2].hwgroup, 0);
-}
-#endif /* MAX_HWIFS > 2 */
-
-#if MAX_HWIFS > 3
-void do_ide3_request (request_queue_t *q)
-{
-	ide_do_request (ide_hwifs[3].hwgroup, 0);
-}
-#endif /* MAX_HWIFS > 3 */
-
-#if MAX_HWIFS > 4
-void do_ide4_request (request_queue_t *q)
-{
-	ide_do_request (ide_hwifs[4].hwgroup, 0);
-}
-#endif /* MAX_HWIFS > 4 */
-
-#if MAX_HWIFS > 5
-void do_ide5_request (request_queue_t *q)
-{
-	ide_do_request (ide_hwifs[5].hwgroup, 0);
-}
-#endif /* MAX_HWIFS > 5 */
-
-#if MAX_HWIFS > 6
-void do_ide6_request (request_queue_t *q)
-{
-	ide_do_request (ide_hwifs[6].hwgroup, 0);
-}
-#endif /* MAX_HWIFS > 6 */
-
-#if MAX_HWIFS > 7
-void do_ide7_request (request_queue_t *q)
-{
-	ide_do_request (ide_hwifs[7].hwgroup, 0);
-}
-#endif /* MAX_HWIFS > 7 */
-
-#if MAX_HWIFS > 8
-void do_ide8_request (request_queue_t *q)
-{
-	ide_do_request (ide_hwifs[8].hwgroup, 0);
-}
-#endif /* MAX_HWIFS > 8 */
-
-#if MAX_HWIFS > 9
-void do_ide9_request (request_queue_t *q)
+/*
+ * Passes the stuff to ide_do_request
+ */
+void do_ide_request(request_queue_t *q)
 {
-	ide_do_request (ide_hwifs[9].hwgroup, 0);
+	ide_do_request(q->queuedata, 0);
 }
-#endif /* MAX_HWIFS > 9 */
 
 /*
  * ide_timer_expiry() is our timeout function for all drive operations.
@@ -1656,16 +1596,8 @@
  */
 void ide_init_drive_cmd (struct request *rq)
 {
-	rq->buffer = NULL;
+	memset(rq, 0, sizeof(*rq));
 	rq->cmd = IDE_DRIVE_CMD;
-	rq->sector = 0;
-	rq->nr_sectors = 0;
-	rq->nr_segments = 0;
-	rq->current_nr_sectors = 0;
-	rq->sem = NULL;
-	rq->bh = NULL;
-	rq->bhtail = NULL;
-	rq->q = NULL;
 }
 
 /*
@@ -2304,24 +2236,24 @@
 	return val;
 }
 
-int ide_spin_wait_hwgroup (ide_drive_t *drive, unsigned long *flags)
+int ide_spin_wait_hwgroup (ide_drive_t *drive)
 {
 	ide_hwgroup_t *hwgroup = HWGROUP(drive);
 	unsigned long timeout = jiffies + (3 * HZ);
 
-	spin_lock_irqsave(&io_request_lock, *flags);
+	spin_lock_irq(&io_request_lock);
 	while (hwgroup->busy) {
-		unsigned long lflags;
-		spin_unlock_irqrestore(&io_request_lock, *flags);
-		__save_flags(lflags);	/* local CPU only */
+		unsigned long flags;
+		spin_unlock_irq(&io_request_lock);
+		__save_flags(flags);	/* local CPU only */
 		__sti();		/* local CPU only; needed for jiffies */
 		if (0 < (signed long)(jiffies - timeout)) {
-			__restore_flags(lflags);	/* local CPU only */
+			__restore_flags(flags);
 			printk("%s: channel busy\n", drive->name);
 			return -EBUSY;
 		}
-		__restore_flags(lflags);	/* local CPU only */
-		spin_lock_irqsave(&io_request_lock, *flags);
+		__restore_flags(flags);	/* local CPU only */
+		spin_lock_irq(&io_request_lock);
 	}
 	return 0;
 }
@@ -2333,7 +2265,6 @@
  */
 int ide_write_setting (ide_drive_t *drive, ide_settings_t *setting, int val)
 {
-	unsigned long flags;
 	int i;
 	u32 *p;
 
@@ -2345,7 +2276,7 @@
 		return -EINVAL;
 	if (setting->set)
 		return setting->set(drive, val);
-	if (ide_spin_wait_hwgroup(drive, &flags))
+	if (ide_spin_wait_hwgroup(drive))
 		return -EBUSY;
 	switch (setting->data_type) {
 		case TYPE_BYTE:
@@ -2363,7 +2294,7 @@
 				*p = val;
 			break;
 	}
-	spin_unlock_irqrestore(&io_request_lock, flags);
+	spin_unlock_irq(&io_request_lock);
 	return 0;
 }
 
@@ -3512,36 +3443,8 @@
 EXPORT_SYMBOL(ide_intr);
 EXPORT_SYMBOL(ide_fops);
 EXPORT_SYMBOL(ide_get_queue);
-EXPORT_SYMBOL(do_ide0_request);
 EXPORT_SYMBOL(ide_add_generic_settings);
 EXPORT_SYMBOL(ide_devfs_handle);
-#if MAX_HWIFS > 1
-EXPORT_SYMBOL(do_ide1_request);
-#endif /* MAX_HWIFS > 1 */
-#if MAX_HWIFS > 2
-EXPORT_SYMBOL(do_ide2_request);
-#endif /* MAX_HWIFS > 2 */
-#if MAX_HWIFS > 3
-EXPORT_SYMBOL(do_ide3_request);
-#endif /* MAX_HWIFS > 3 */
-#if MAX_HWIFS > 4
-EXPORT_SYMBOL(do_ide4_request);
-#endif /* MAX_HWIFS > 4 */
-#if MAX_HWIFS > 5
-EXPORT_SYMBOL(do_ide5_request);
-#endif /* MAX_HWIFS > 5 */
-#if MAX_HWIFS > 6
-EXPORT_SYMBOL(do_ide6_request);
-#endif /* MAX_HWIFS > 6 */
-#if MAX_HWIFS > 7
-EXPORT_SYMBOL(do_ide7_request);
-#endif /* MAX_HWIFS > 7 */
-#if MAX_HWIFS > 8
-EXPORT_SYMBOL(do_ide8_request);
-#endif /* MAX_HWIFS > 8 */
-#if MAX_HWIFS > 9
-EXPORT_SYMBOL(do_ide9_request);
-#endif /* MAX_HWIFS > 9 */
 
 /*
  * Driver module
diff -ur --exclude-from /home/axboe/cdrom/exclude /opt/kernel/linux-2.3.99-pre7-6/drivers/scsi/scsi.c linux/drivers/scsi/scsi.c
--- /opt/kernel/linux-2.3.99-pre7-6/drivers/scsi/scsi.c	Thu May  4 15:42:36 2000
+++ linux/drivers/scsi/scsi.c	Wed May  3 03:42:37 2000
@@ -2561,7 +2561,6 @@
 			}
 		}
 	}
-	printk("wait_for_request = %p\n", &wait_for_request);
 #endif	/* CONFIG_SCSI_LOGGING */ /* } */
 }
 #endif				/* CONFIG_PROC_FS */
diff -ur --exclude-from /home/axboe/cdrom/exclude /opt/kernel/linux-2.3.99-pre7-6/drivers/scsi/scsi_lib.c linux/drivers/scsi/scsi_lib.c
--- /opt/kernel/linux-2.3.99-pre7-6/drivers/scsi/scsi_lib.c	Thu May  4 15:42:36 2000
+++ linux/drivers/scsi/scsi_lib.c	Wed May  3 03:42:37 2000
@@ -1019,8 +1019,7 @@
 			 * We have copied the data out of the request block - it is now in
 			 * a field in SCpnt.  Release the request block.
 			 */
-			req->rq_status = RQ_INACTIVE;
-			wake_up(&wait_for_request);
+			blkdev_release_request(req);
 		}
 		/*
 		 * Now it is finally safe to release the lock.  We are
diff -ur --exclude-from /home/axboe/cdrom/exclude /opt/kernel/linux-2.3.99-pre7-6/drivers/scsi/scsi_scan.c linux/drivers/scsi/scsi_scan.c
--- /opt/kernel/linux-2.3.99-pre7-6/drivers/scsi/scsi_scan.c	Sat Mar 18 20:16:21 2000
+++ linux/drivers/scsi/scsi_scan.c	Sat May  6 18:08:30 2000
@@ -414,6 +414,7 @@
 		for (dqptr = shpnt->host_queue; dqptr != SDpnt; dqptr = dqptr->next)
 			continue;
 		if (dqptr) {
+			blk_cleanup_queue(&dqptr->request_queue);
 			prev = dqptr->prev;
 			next = dqptr->next;
 			if (prev)
diff -ur --exclude-from /home/axboe/cdrom/exclude /opt/kernel/linux-2.3.99-pre7-6/include/linux/blk.h linux/include/linux/blk.h
--- /opt/kernel/linux-2.3.99-pre7-6/include/linux/blk.h	Thu May  4 15:42:39 2000
+++ linux/include/linux/blk.h	Sat May  6 16:54:45 2000
@@ -14,13 +14,6 @@
 extern spinlock_t io_request_lock;
 
 /*
- * NR_REQUEST is the number of entries in the request-queue.
- * NOTE that writes may use only the low 2/3 of these: reads
- * take precedence.
- */
-#define NR_REQUEST	256
-
-/*
  * Initialization functions.
  */
 extern int isp16_init(void);
@@ -94,12 +87,9 @@
 
 extern inline void blkdev_dequeue_request(struct request * req)
 {
-	if (req->q)
-	{
-		if (req->cmd == READ)
-			req->q->elevator.read_pendings--;
-		req->q->elevator.nr_segments -= req->nr_segments;
-		req->q = NULL;
+	if (req->e) {
+		req->e->dequeue_fn(req);
+		req->e = NULL;
 	}
 	list_del(&req->queue);
 }
diff -ur --exclude-from /home/axboe/cdrom/exclude /opt/kernel/linux-2.3.99-pre7-6/include/linux/blkdev.h linux/include/linux/blkdev.h
--- /opt/kernel/linux-2.3.99-pre7-6/include/linux/blkdev.h	Wed Apr 12 02:31:49 2000
+++ linux/include/linux/blkdev.h	Sat May  6 16:54:45 2000
@@ -9,6 +9,8 @@
 
 struct request_queue;
 typedef struct request_queue request_queue_t;
+struct elevator_s;
+typedef struct elevator_s elevator_t;
 
 /*
  * Ok, this is an expanded form so that we can use the same
@@ -19,7 +21,11 @@
 struct request {
 	struct list_head queue;
 	int elevator_sequence;
+	struct list_head table;
 
+	/*
+	 * queue free list belongs to
+	 */
 	volatile int rq_status;	/* should split this into a few status bits */
 #define RQ_INACTIVE		(-1)
 #define RQ_ACTIVE		1
@@ -41,7 +47,8 @@
 	struct semaphore * sem;
 	struct buffer_head * bh;
 	struct buffer_head * bhtail;
-	request_queue_t * q;
+	request_queue_t *q;
+	elevator_t *e;
 };
 
 #include <linux/elevator.h>
@@ -60,11 +67,25 @@
 typedef void (plug_device_fn) (request_queue_t *q, kdev_t device);
 typedef void (unplug_device_fn) (void *q);
 
+/*
+ * Default nr free requests per queue
+ */
+#define QUEUE_NR_REQUESTS	512
+#define QUEUE_WRITES_MAX	((2 * QUEUE_NR_REQUESTS) / 3)
+
 struct request_queue
 {
-	struct list_head queue_head;
-	/* together with queue_head for cacheline sharing */
-	elevator_t elevator;
+	/*
+	 * the queue request freelist, one for reads and one for writes
+	 */
+	struct list_head	request_freelist;
+	int			queue_requests;
+
+	/*
+	 * Together with queue_head for cacheline sharing
+	 */
+	struct list_head	queue_head;
+	elevator_t		elevator;
 
 	request_fn_proc		* request_fn;
 	merge_request_fn	* back_merge_fn;
@@ -76,22 +97,33 @@
 	 * The queue owner gets to use this for whatever they like.
 	 * ll_rw_blk doesn't touch it.
 	 */
-	void                    * queuedata;
+	void			* queuedata;
 
 	/*
 	 * This is used to remove the plug when tq_disk runs.
 	 */
-	struct tq_struct          plug_tq;
+	struct tq_struct	plug_tq;
 	/*
 	 * Boolean that indicates whether this queue is plugged or not.
 	 */
-	char			  plugged;
+	char			plugged;
 
 	/*
 	 * Boolean that indicates whether current_request is active or
 	 * not.
 	 */
-	char			  head_active;
+	char			head_active;
+
+	/*
+	 * Is meant to protect the queue in the future instead of
+	 * io_request_lock
+	 */
+	spinlock_t		request_lock;
+
+	/*
+	 * Tasks wait here for free request
+	 */
+	wait_queue_head_t	wait_for_request;
 };
 
 struct blk_dev_struct {
@@ -118,13 +150,13 @@
 
 extern struct sec_size * blk_sec[MAX_BLKDEV];
 extern struct blk_dev_struct blk_dev[MAX_BLKDEV];
-extern wait_queue_head_t wait_for_request;
 extern void grok_partitions(struct gendisk *dev, int drive, unsigned minors, long size);
 extern void register_disk(struct gendisk *dev, kdev_t first, unsigned minors, struct block_device_operations *ops, long size);
 extern void generic_unplug_device(void * data);
 extern int generic_make_request(request_queue_t *q, int rw,
 						struct buffer_head * bh);
-extern request_queue_t * blk_get_queue(kdev_t dev);
+extern request_queue_t *blk_get_queue(kdev_t dev);
+extern void blkdev_release_request(struct request *);
 
 /*
  * Access functions for manipulating queue properties
diff -ur --exclude-from /home/axboe/cdrom/exclude /opt/kernel/linux-2.3.99-pre7-6/include/linux/elevator.h linux/include/linux/elevator.h
--- /opt/kernel/linux-2.3.99-pre7-6/include/linux/elevator.h	Mon Mar 13 04:32:58 2000
+++ linux/include/linux/elevator.h	Wed May  3 03:42:37 2000
@@ -3,13 +3,15 @@
 
 #define ELEVATOR_DEBUG
 
-struct elevator_s;
-typedef struct elevator_s elevator_t;
-
 typedef void (elevator_fn) (struct request *, elevator_t *,
 			    struct list_head *,
 			    struct list_head *, int);
 
+typedef int (elevator_merge_fn) (request_queue_t *, struct request **,
+				 struct buffer_head *, int, int *, int *);
+
+typedef void (elevator_dequeue_fn) (struct request *);
+
 struct elevator_s
 {
 	int sequence;
@@ -21,29 +23,26 @@
 	unsigned int nr_segments;
 	int read_pendings;
 
+	char elevator_name[16];
+
 	elevator_fn * elevator_fn;
+	elevator_merge_fn *elevator_merge_fn;
+	elevator_dequeue_fn *dequeue_fn;
 };
 
-#define ELEVATOR_DEFAULTS				\
-((elevator_t) {						\
-	0,			/* sequence */		\
-							\
-	128,			/* read_latency */	\
-	8192,			/* write_latency */	\
-	4,			/* max_bomb_segments */	\
-							\
-	0,			/* nr_segments */	\
-	0,			/* read_pendings */	\
-							\
-	elevator_default,	/* elevator_fn */	\
-	})
-
+void elevator_default(struct request *, elevator_t *, struct list_head *, struct list_head *, int);
+int elevator_default_merge(request_queue_t *, struct request **, struct buffer_head *, int, int *, int *);
+void elevator_default_dequeue(struct request *);
+void elevator_noop(struct request *, elevator_t *, struct list_head *, struct list_head *, int);
+int elevator_noop_merge(request_queue_t *, struct request **, struct buffer_head *, int, int *, int *);
+void elevator_noop_dequeue(struct request *);
 
 typedef struct blkelv_ioctl_arg_s {
 	void * queue_ID;
 	int read_latency;
 	int write_latency;
 	int max_bomb_segments;
+	char elevator_name[16];
 } blkelv_ioctl_arg_t;
 
 #define BLKELVGET   _IO(0x12,106)
@@ -52,13 +51,12 @@
 extern int blkelvget_ioctl(elevator_t *, blkelv_ioctl_arg_t *);
 extern int blkelvset_ioctl(elevator_t *, const blkelv_ioctl_arg_t *);
 
-
-extern void elevator_init(elevator_t *);
+extern void elevator_init(elevator_t *, elevator_t);
 
 #ifdef ELEVATOR_DEBUG
-extern void elevator_debug(request_queue_t *, kdev_t);
+extern void elevator_default_debug(request_queue_t *, kdev_t);
 #else
-#define elevator_debug(a,b) do { } while(0)
+#define elevator_default_debug(a,b) do { } while(0)
 #endif
 
 #define elevator_sequence_after(a,b) ((int)((b)-(a)) < 0)
@@ -67,6 +65,13 @@
 #define elevator_sequence_before_eq(a,b) elevator_sequence_after_eq(b,a)
 
 /*
+ * Return values from elevator merger
+ */
+#define ELEVATOR_NO_MERGE	0
+#define ELEVATOR_FRONT_MERGE	1
+#define ELEVATOR_BACK_MERGE	2
+
+/*
  * This is used in the elevator algorithm.  We don't prioritise reads
  * over writes any more --- although reads are more time-critical than
  * writes, by treating them equally we increase filesystem throughput.
@@ -77,12 +82,12 @@
 	   (s1)->sector < (s2)->sector)) ||	\
 	 (s1)->rq_dev < (s2)->rq_dev)
 
-static inline void elevator_merge_requests(elevator_t * e, struct request * req, struct request * next)
+static inline void elevator_merge_requests(struct request * req, struct request * next)
 {
 	if (elevator_sequence_before(next->elevator_sequence, req->elevator_sequence))
 		req->elevator_sequence = next->elevator_sequence;
 	if (req->cmd == READ)
-		e->read_pendings--;
+		req->e->read_pendings--;
 
 }
 
@@ -91,23 +96,23 @@
 	return latency + e->sequence;
 }
 
-#define elevator_merge_before(q, req, lat)	__elevator_merge((q), (req), (lat), 0)
-#define elevator_merge_after(q, req, lat)	__elevator_merge((q), (req), (lat), 1)
-static inline void __elevator_merge(elevator_t * elevator, struct request * req, int latency, int after)
+#define elevator_merge_before(req, lat)	__elevator_merge((req), (lat), 0)
+#define elevator_merge_after(req, lat)	__elevator_merge((req), (lat), 1)
+static inline void __elevator_merge(struct request * req, int latency, int after)
 {
-	int sequence = elevator_sequence(elevator, latency);
+	int sequence = elevator_sequence(req->e, latency);
 	if (after)
 		sequence -= req->nr_segments;
 	if (elevator_sequence_before(sequence, req->elevator_sequence))
 		req->elevator_sequence = sequence;
 }
 
-static inline void elevator_account_request(elevator_t * elevator, struct request * req)
+static inline void elevator_account_request(struct request * req)
 {
-	elevator->sequence++;
+	req->e->sequence++;
 	if (req->cmd == READ)
-		elevator->read_pendings++;
-	elevator->nr_segments++;
+		req->e->read_pendings++;
+	req->e->nr_segments++;
 }
 
 static inline int elevator_request_latency(elevator_t * elevator, int rw)
@@ -120,5 +125,41 @@
 
 	return latency;
 }
+
+#define ELEVATOR_DEFAULT					\
+((elevator_t) {							\
+	0,				/* sequence */		\
+								\
+	100000,				/* read_latency */	\
+	100000,				/* write_latency */	\
+	128,				/* max_bomb_segments */	\
+								\
+	0,				/* nr_segments */	\
+	0,				/* read_pendings */	\
+								\
+	"default",			/* elevator_name */	\
+								\
+	elevator_default,		/* elevator_fn */	\
+	elevator_default_merge,		/* elevator_merge_fn */ \
+	elevator_default_dequeue,	/* dequeue_fn */	\
+	})
+
+#define ELEVATOR_NOOP						\
+((elevator_t) {							\
+	0,				/* sequence */		\
+								\
+	0,				/* read_latency */	\
+	0,				/* write_latency */	\
+	0,				/* max_bomb_segments */	\
+								\
+	0,				/* nr_segments */	\
+	0,				/* read_pendings */	\
+								\
+	"noop",				/* elevator_name */	\
+								\
+	elevator_noop,			/* elevator_fn */	\
+	elevator_noop_merge,		/* elevator_merge_fn */ \
+	elevator_noop_dequeue,		/* dequeue_fn */	\
+	})
 
 #endif
diff -ur --exclude-from /home/axboe/cdrom/exclude /opt/kernel/linux-2.3.99-pre7-6/include/linux/ide.h linux/include/linux/ide.h
--- /opt/kernel/linux-2.3.99-pre7-6/include/linux/ide.h	Thu May  4 15:42:39 2000
+++ linux/include/linux/ide.h	Sat May  6 16:55:20 2000
@@ -769,37 +769,10 @@
  */
 int drive_is_flashcard (ide_drive_t *drive);
 
-int  ide_spin_wait_hwgroup(ide_drive_t *drive, unsigned long *flags);
+int ide_spin_wait_hwgroup(ide_drive_t *drive);
 void ide_timer_expiry (unsigned long data);
 void ide_intr (int irq, void *dev_id, struct pt_regs *regs);
-void do_ide0_request (request_queue_t * q);
-#if MAX_HWIFS > 1
-void do_ide1_request (request_queue_t * q);
-#endif
-#if MAX_HWIFS > 2
-void do_ide2_request (request_queue_t * q);
-#endif
-#if MAX_HWIFS > 3
-void do_ide3_request (request_queue_t * q);
-#endif
-#if MAX_HWIFS > 4
-void do_ide4_request (request_queue_t * q);
-#endif
-#if MAX_HWIFS > 5
-void do_ide5_request (request_queue_t * q);
-#endif
-#if MAX_HWIFS > 6
-void do_ide6_request (request_queue_t * q);
-#endif
-#if MAX_HWIFS > 7
-void do_ide7_request (request_queue_t * q);
-#endif
-#if MAX_HWIFS > 8
-void do_ide8_request (request_queue_t * q);
-#endif
-#if MAX_HWIFS > 9
-void do_ide9_request (request_queue_t * q);
-#endif
+void do_ide_request (request_queue_t * q);
 void ide_init_subdrivers (void);
 
 #ifndef _IDE_C
diff -ur --exclude-from /home/axboe/cdrom/exclude /opt/kernel/linux-2.3.99-pre7-6/kernel/ksyms.c linux/kernel/ksyms.c
--- /opt/kernel/linux-2.3.99-pre7-6/kernel/ksyms.c	Sun May  7 02:33:47 2000
+++ linux/kernel/ksyms.c	Sat May  6 00:00:47 2000
@@ -270,7 +270,6 @@
 /* block device driver support */
 EXPORT_SYMBOL(block_read);
 EXPORT_SYMBOL(block_write);
-EXPORT_SYMBOL(wait_for_request);
 EXPORT_SYMBOL(blksize_size);
 EXPORT_SYMBOL(hardsect_size);
 EXPORT_SYMBOL(blk_size);
