Use dm's map_context instead of the bi_bdev hack.

---
 drivers/md/dm-thin.c |   47 +++++++++++++++++------------------------------
 1 file changed, 17 insertions(+), 30 deletions(-)

Index: linux-3.0-rc7/drivers/md/dm-thin.c
===================================================================
--- linux-3.0-rc7.orig/drivers/md/dm-thin.c
+++ linux-3.0-rc7/drivers/md/dm-thin.c
@@ -614,28 +614,6 @@ static struct pool *pool_table_lookup(st
 /*----------------------------------------------------------------*/
 
 /*
- * We need to maintain an association between a bio and a thin target
- * when deferring a bio and handing it from the individual thin target to the
- * workqueue which is shared across all thin targets.
- *
- * To avoid another mempool allocation or lookups in an auxillary table,
- * we borrow the bi_bdev field for this purpose, as we know it is
- * not used while the bio is being processed and we know the value it holds.
- */
-// FIXME Can this use map_context instead?
-static void set_tc(struct bio *bio, struct thin_c *tc)
-{
-	bio->bi_bdev = (struct block_device *)tc;
-}
-
-static struct thin_c *get_tc(struct bio *bio)
-{
-	return (struct thin_c *)bio->bi_bdev;
-}
-
-/*----------------------------------------------------------------*/
-
-/*
  * This section of code contains the logic for processing a thin devices' IO.
  * Much of the code depends on pool object resources (lists, workqueues, etc)
  * but most is exclusively called from the thin target rather than the thin-pool
@@ -869,7 +847,7 @@ static void cell_remap_and_issue_except(
 
 static void retry_later(struct bio *bio)
 {
-	struct thin_c *tc = get_tc(bio);
+	struct thin_c *tc = dm_get_mapinfo(bio)->ptr;
 	struct pool *pool = tc->pool;
 	unsigned long flags;
 
@@ -995,9 +973,14 @@ static void process_shared_bio(struct th
 	struct pool *pool = tc->pool;
 	struct endio_hook *endio_hook;
 
+	/*
+	 * If cell is already occupied, then sharing is already
+	 * in the process of being broken so we have nothing
+	 * futher to do here.
+	 */
 	build_data_key(tc->td, lookup_result->block, &key);
 	if (bio_detain_if_occupied(pool->prison, &key, bio, &cell))
-		return; /* already underway */
+		return;
 
 	if (bio_data_dir(bio) == WRITE)
 		break_sharing(tc, bio, block, &key, lookup_result);
@@ -1085,7 +1068,7 @@ static void process_bios(struct pool *po
 	spin_unlock_irqrestore(&pool->lock, flags);
 
 	while ((bio = bio_list_pop(&bios))) {
-		struct thin_c *tc = get_tc(bio);
+		struct thin_c *tc = dm_get_mapinfo(bio)->ptr;
 
 		if (bio->bi_rw & REQ_DISCARD)
 			process_discard(tc, bio);
@@ -1160,8 +1143,6 @@ static void defer_bio(struct thin_c *tc,
 	unsigned long flags;
 	struct pool *pool = tc->pool;
 
-	set_tc(bio, tc);
-
 	spin_lock_irqsave(&pool->lock, flags);
 	bio_list_add(&pool->deferred_bios, bio);
 	spin_unlock_irqrestore(&pool->lock, flags);
@@ -1170,10 +1151,11 @@ static void defer_bio(struct thin_c *tc,
 }
 
 /*
- * Non-blocking function designed to be called from the targets map
+ * Non-blocking function designed to be called from the target's map
  * function.
  */
-static int bio_map(struct dm_target *ti, struct bio *bio)
+static int bio_map(struct dm_target *ti, struct bio *bio,
+		   union map_info *map_context)
 {
 	int r;
 	struct thin_c *tc = ti->private;
@@ -1194,6 +1176,11 @@ static int bio_map(struct dm_target *ti,
 		return DM_MAPIO_SUBMITTED;
 	}
 
+	/*
+	 * Save the thin context for easy access from the deferred bio later.
+	 */
+	map_context->ptr = tc;
+
 	if (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA)) {
 		defer_bio(tc, bio);
 		return DM_MAPIO_SUBMITTED;
@@ -2067,7 +2054,7 @@ static int thin_map(struct dm_target *ti
 {
 	bio->bi_sector -= ti->begin;
 
-	return bio_map(ti, bio);
+	return bio_map(ti, bio, map_context);
 }
 
 static int thin_status(struct dm_target *ti, status_type_t type,
