From: Mikulas Patocka <mpatocka@redhat.com>

Track in-flight reads from each snapshot that get mapped back to the origin.
The read requests are added to a hash table indexed by chunk number.
For each read to the origin, an entry is allocated from mempool.

This is needed for my further patch, fixing read-vs-realloc race condition.

Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>

---
 drivers/md/dm-snap.c |   87 +++++++++++++++++++++++++++++++++++++++++++--------
 drivers/md/dm-snap.h |    7 ++++
 2 files changed, 82 insertions(+), 12 deletions(-)

Index: linux/drivers/md/dm-snap.c
===================================================================
--- linux.orig/drivers/md/dm-snap.c	2008-05-06 11:44:50.000000000 +0100
+++ linux/drivers/md/dm-snap.c	2008-05-06 14:39:16.000000000 +0100
@@ -40,6 +40,11 @@
  */
 #define SNAPSHOT_PAGES (((1UL << 20) >> PAGE_SHIFT) ? : 1)
 
+/*
+ * The size of mempool
+ */
+#define MIN_IOS 256
+
 static struct workqueue_struct *ksnapd;
 static void flush_queued_bios(struct work_struct *work);
 
@@ -93,6 +98,13 @@ static struct kmem_cache *exception_cach
 static struct kmem_cache *pending_cache;
 static mempool_t *pending_pool;
 
+struct read_track {
+	struct hlist_node read_list;
+	chunk_t chunk;
+};
+
+static struct kmem_cache *read_track_cache;
+
 /*
  * One of these per registered origin, held in the snapshot_origins hash
  */
@@ -482,6 +494,7 @@ static int set_chunk_size(struct dm_snap
 static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
 {
 	struct dm_snapshot *s;
+	int i;
 	int r = -EINVAL;
 	char persistent;
 	char *origin_path;
@@ -564,11 +577,22 @@ static int snapshot_ctr(struct dm_target
 		goto bad5;
 	}
 
+	s->read_track_pool = mempool_create_slab_pool(MIN_IOS, read_track_cache);
+	if (!s->read_track_pool) {
+		ti->error = "Could not allocate mempool for read tracking";
+		goto bad6;
+	}
+
+	for (i = 0; i < READ_TRACK_HASH_SIZE; i++) {
+		INIT_HLIST_HEAD(&s->read_track_hash[i]);
+	}
+	spin_lock_init(&s->read_track_lock);
+
 	/* Metadata must only be loaded into one table at once */
 	r = s->store.read_metadata(&s->store);
 	if (r < 0) {
 		ti->error = "Failed to read snapshot metadata";
-		goto bad6;
+		goto bad7;
 	} else if (r > 0) {
 		s->valid = 0;
 		DMWARN("Snapshot is marked invalid.");
@@ -582,7 +606,7 @@ static int snapshot_ctr(struct dm_target
 	if (register_snapshot(s)) {
 		r = -EINVAL;
 		ti->error = "Cannot register snapshot origin";
-		goto bad6;
+		goto bad7;
 	}
 
 	ti->private = s;
@@ -590,6 +614,9 @@ static int snapshot_ctr(struct dm_target
 
 	return 0;
 
+ bad7:
+	mempool_destroy(s->read_track_pool);
+
  bad6:
 	dm_kcopyd_client_destroy(s->kcopyd_client);
 
@@ -624,6 +651,7 @@ static void __free_exceptions(struct dm_
 
 static void snapshot_dtr(struct dm_target *ti)
 {
+	int i;
 	struct dm_snapshot *s = ti->private;
 
 	flush_workqueue(ksnapd);
@@ -632,6 +660,12 @@ static void snapshot_dtr(struct dm_targe
 	/* After this returns there can be no new kcopyd jobs. */
 	unregister_snapshot(s);
 
+	for (i = 0; i < READ_TRACK_HASH_SIZE; i++) {
+		BUG_ON(!hlist_empty(&s->read_track_hash[i]));
+	}
+
+	mempool_destroy(s->read_track_pool);
+
 	__free_exceptions(s);
 
 	dm_put_device(ti, s->origin);
@@ -974,14 +1008,15 @@ static int snapshot_map(struct dm_target
 			start_copy(pe);
 			goto out;
 		}
-	} else
-		/*
-		 * FIXME: this read path scares me because we
-		 * always use the origin when we have a pending
-		 * exception.  However I can't think of a
-		 * situation where this is wrong - ejt.
-		 */
+	} else {
+		struct read_track *rt = mempool_alloc(s->read_track_pool, GFP_NOIO);
+		map_context->ptr = rt;
 		bio->bi_bdev = s->origin->bdev;
+		spin_lock_irq(&s->read_track_lock);
+		rt->chunk = chunk;
+		hlist_add_head(&rt->read_list, &s->read_track_hash[READ_TRACK_HASH(rt->chunk)]);
+		spin_unlock_irq(&s->read_track_lock);
+	}
 
  out_unlock:
 	up_write(&s->lock);
@@ -989,6 +1024,23 @@ static int snapshot_map(struct dm_target
 	return r;
 }
 
+static int snapshot_end_io(struct dm_target *ti, struct bio *bio,
+			   int error, union map_info *map_context)
+{
+	struct dm_snapshot *s = ti->private;
+	struct read_track *rt = map_context->ptr;
+
+	if (rt) {
+		unsigned long flags;
+		spin_lock_irqsave(&s->read_track_lock, flags);
+		hlist_del(&rt->read_list);
+		spin_unlock_irqrestore(&s->read_track_lock, flags);
+		mempool_free(rt, s->read_track_pool);
+	}
+
+	return 0;
+}
+
 static void snapshot_resume(struct dm_target *ti)
 {
 	struct dm_snapshot *s = ti->private;
@@ -1266,6 +1318,7 @@ static struct target_type snapshot_targe
 	.ctr     = snapshot_ctr,
 	.dtr     = snapshot_dtr,
 	.map     = snapshot_map,
+	.end_io  = snapshot_end_io,
 	.resume  = snapshot_resume,
 	.status  = snapshot_status,
 };
@@ -1306,24 +1359,33 @@ static int __init dm_snapshot_init(void)
 		goto bad4;
 	}
 
+	read_track_cache = KMEM_CACHE(read_track, 0);
+	if (!read_track_cache) {
+		DMERR("Couldn't create read track cache.");
+		r = -ENOMEM;
+		goto bad5;
+	}
+
 	pending_pool = mempool_create_slab_pool(128, pending_cache);
 	if (!pending_pool) {
 		DMERR("Couldn't create pending pool.");
 		r = -ENOMEM;
-		goto bad5;
+		goto bad6;
 	}
 
 	ksnapd = create_singlethread_workqueue("ksnapd");
 	if (!ksnapd) {
 		DMERR("Failed to create ksnapd workqueue.");
 		r = -ENOMEM;
-		goto bad6;
+		goto bad7;
 	}
 
 	return 0;
 
-      bad6:
+      bad7:
 	mempool_destroy(pending_pool);
+      bad6:
+	kmem_cache_destroy(read_track_cache);
       bad5:
 	kmem_cache_destroy(pending_cache);
       bad4:
@@ -1355,6 +1417,7 @@ static void __exit dm_snapshot_exit(void
 	mempool_destroy(pending_pool);
 	kmem_cache_destroy(pending_cache);
 	kmem_cache_destroy(exception_cache);
+	kmem_cache_destroy(read_track_cache);
 }
 
 /* Module hooks */
Index: linux/drivers/md/dm-snap.h
===================================================================
--- linux.orig/drivers/md/dm-snap.h	2008-05-06 11:44:50.000000000 +0100
+++ linux/drivers/md/dm-snap.h	2008-05-06 14:39:16.000000000 +0100
@@ -130,6 +130,9 @@ struct exception_store {
 	void *context;
 };
 
+#define READ_TRACK_HASH_SIZE	16
+#define READ_TRACK_HASH(x)	((unsigned long)(x) & (READ_TRACK_HASH_SIZE - 1))
+
 struct dm_snapshot {
 	struct rw_semaphore lock;
 	struct dm_target *ti;
@@ -174,6 +177,10 @@ struct dm_snapshot {
 	/* Queue of snapshot writes for ksnapd to flush */
 	struct bio_list queued_bios;
 	struct work_struct queued_bios_work;
+
+	mempool_t *read_track_pool;
+	spinlock_t read_track_lock;
+	struct hlist_head read_track_hash[READ_TRACK_HASH_SIZE];
 };
 
 /*
