[Fold into 3]
---
 drivers/md/dm-thin-metadata.c |  687 +++++++++++++++++++++---------------------
 drivers/md/dm-thin-metadata.h |   63 ++-
 drivers/md/dm-thin.c          |  325 +++++++++----------
 3 files changed, 536 insertions(+), 539 deletions(-)

Index: linux-3.0-rc7/drivers/md/dm-thin-metadata.c
===================================================================
--- linux-3.0-rc7.orig/drivers/md/dm-thin-metadata.c
+++ linux-3.0-rc7/drivers/md/dm-thin-metadata.c
@@ -14,7 +14,7 @@
 
 /*----------------------------------------------------------------*/
 
-#define DM_MSG_PREFIX   "thin metadata"
+#define DM_MSG_PREFIX   "thin-metadata"
 
 #define THIN_SUPERBLOCK_MAGIC 27022010
 #define THIN_SUPERBLOCK_LOCATION 0
@@ -100,15 +100,15 @@ struct dm_thin_metadata {
 	struct dm_block *sblock;
 	dm_block_t root;
 	dm_block_t details_root;
-	struct list_head ms_devices;
+	struct list_head thin_devices;
 	uint64_t trans_id;
 	unsigned long flags;
 	sector_t data_block_size;
 };
 
-struct dm_ms_device {
+struct dm_thin_device {
 	struct list_head list;
-	struct dm_thin_metadata *mmd;
+	struct dm_thin_metadata *tmd;
 	dm_thin_dev_t id;
 
 	int open_count;
@@ -283,13 +283,13 @@ static int superblock_all_zeroes(struct 
 	return dm_bm_unlock(b);
 }
 
-static struct dm_thin_metadata *alloc_mmd(struct dm_block_manager *bm,
+static struct dm_thin_metadata *alloc_tmd(struct dm_block_manager *bm,
 					  dm_block_t nr_blocks, int create)
 {
 	int r;
 	struct dm_space_map *sm, *data_sm;
 	struct dm_transaction_manager *tm;
-	struct dm_thin_metadata *mmd;
+	struct dm_thin_metadata *tmd;
 	struct dm_block *sblock;
 
 	if (create) {
@@ -345,70 +345,70 @@ static struct dm_thin_metadata *alloc_mm
 		dm_tm_unlock(tm, sblock);
 	}
 
-	mmd = kmalloc(sizeof(*mmd), GFP_KERNEL);
-	if (!mmd) {
+	tmd = kmalloc(sizeof(*tmd), GFP_KERNEL);
+	if (!tmd) {
 		DMERR("could not allocate metadata struct");
 		r = -ENOMEM;
 		goto bad;
 	}
 
-	mmd->bm = bm;
-	mmd->metadata_sm = sm;
-	mmd->data_sm = data_sm;
-	mmd->tm = tm;
-	mmd->nb_tm = dm_tm_create_non_blocking_clone(tm);
-	if (!mmd->nb_tm) {
+	tmd->bm = bm;
+	tmd->metadata_sm = sm;
+	tmd->data_sm = data_sm;
+	tmd->tm = tm;
+	tmd->nb_tm = dm_tm_create_non_blocking_clone(tm);
+	if (!tmd->nb_tm) {
 		DMERR("could not create clone tm");
 		r = -ENOMEM;
 		goto bad;
 	}
 
-	mmd->sblock = NULL;
+	tmd->sblock = NULL;
 
-	mmd->info.tm = tm;
-	mmd->info.levels = 2;
-	mmd->info.value_type.context = mmd->data_sm;
-	mmd->info.value_type.size = sizeof(__le64);
-	mmd->info.value_type.inc = data_block_inc;
-	mmd->info.value_type.dec = data_block_dec;
-	mmd->info.value_type.equal = data_block_equal;
-
-	memcpy(&mmd->nb_info, &mmd->info, sizeof(mmd->nb_info));
-	mmd->nb_info.tm = mmd->nb_tm;
-
-	mmd->tl_info.tm = tm;
-	mmd->tl_info.levels = 1;
-	mmd->tl_info.value_type.context = &mmd->info;
-	mmd->tl_info.value_type.size = sizeof(__le64);
-	mmd->tl_info.value_type.inc = subtree_inc;
-	mmd->tl_info.value_type.dec = subtree_dec;
-	mmd->tl_info.value_type.equal = subtree_equal;
-
-	mmd->bl_info.tm = tm;
-	mmd->bl_info.levels = 1;
-	mmd->bl_info.value_type.context = mmd->data_sm;
-	mmd->bl_info.value_type.size = sizeof(__le64);
-	mmd->bl_info.value_type.inc = data_block_inc;
-	mmd->bl_info.value_type.dec = data_block_dec;
-	mmd->bl_info.value_type.equal = data_block_equal;
-
-	mmd->details_info.tm = tm;
-	mmd->details_info.levels = 1;
-	mmd->details_info.value_type.context = NULL;
-	mmd->details_info.value_type.size = sizeof(struct device_details);
-	mmd->details_info.value_type.inc = NULL;
-	mmd->details_info.value_type.dec = NULL;
-	mmd->details_info.value_type.equal = NULL;
-
-	mmd->root = 0;
-
-	init_rwsem(&mmd->root_lock);
-	mmd->time = 0;
-	mmd->need_commit = 0;
-	mmd->details_root = 0;
-	INIT_LIST_HEAD(&mmd->ms_devices);
+	tmd->info.tm = tm;
+	tmd->info.levels = 2;
+	tmd->info.value_type.context = tmd->data_sm;
+	tmd->info.value_type.size = sizeof(__le64);
+	tmd->info.value_type.inc = data_block_inc;
+	tmd->info.value_type.dec = data_block_dec;
+	tmd->info.value_type.equal = data_block_equal;
+
+	memcpy(&tmd->nb_info, &tmd->info, sizeof(tmd->nb_info));
+	tmd->nb_info.tm = tmd->nb_tm;
+
+	tmd->tl_info.tm = tm;
+	tmd->tl_info.levels = 1;
+	tmd->tl_info.value_type.context = &tmd->info;
+	tmd->tl_info.value_type.size = sizeof(__le64);
+	tmd->tl_info.value_type.inc = subtree_inc;
+	tmd->tl_info.value_type.dec = subtree_dec;
+	tmd->tl_info.value_type.equal = subtree_equal;
+
+	tmd->bl_info.tm = tm;
+	tmd->bl_info.levels = 1;
+	tmd->bl_info.value_type.context = tmd->data_sm;
+	tmd->bl_info.value_type.size = sizeof(__le64);
+	tmd->bl_info.value_type.inc = data_block_inc;
+	tmd->bl_info.value_type.dec = data_block_dec;
+	tmd->bl_info.value_type.equal = data_block_equal;
+
+	tmd->details_info.tm = tm;
+	tmd->details_info.levels = 1;
+	tmd->details_info.value_type.context = NULL;
+	tmd->details_info.value_type.size = sizeof(struct device_details);
+	tmd->details_info.value_type.inc = NULL;
+	tmd->details_info.value_type.dec = NULL;
+	tmd->details_info.value_type.equal = NULL;
+
+	tmd->root = 0;
+
+	init_rwsem(&tmd->root_lock);
+	tmd->time = 0;
+	tmd->need_commit = 0;
+	tmd->details_root = 0;
+	INIT_LIST_HEAD(&tmd->thin_devices);
 
-	return mmd;
+	return tmd;
 
 bad:
 	dm_tm_destroy(tm);
@@ -418,28 +418,28 @@ bad:
 	return ERR_PTR(r);
 }
 
-static int begin_transaction(struct dm_thin_metadata *mmd)
+static int begin_transaction(struct dm_thin_metadata *tmd)
 {
 	int r;
 	u32 features;
 	struct thin_super_block *sb;
 
-	/* dm_thin_metadata_commit() resets mmd->sblock */
-	WARN_ON(mmd->sblock);
-	mmd->need_commit = 0;
+	/* dm_thin_metadata_commit() resets tmd->sblock */
+	WARN_ON(tmd->sblock);
+	tmd->need_commit = 0;
 	/* superblock is unlocked via dm_tm_commit() */
-	r = dm_bm_write_lock(mmd->bm, THIN_SUPERBLOCK_LOCATION,
-			     &sb_validator_, &mmd->sblock);
+	r = dm_bm_write_lock(tmd->bm, THIN_SUPERBLOCK_LOCATION,
+			     &sb_validator_, &tmd->sblock);
 	if (r)
 		return r;
 
-	sb = dm_block_data(mmd->sblock);
-	mmd->time = __le32_to_cpu(sb->time);
-	mmd->root = __le64_to_cpu(sb->data_mapping_root);
-	mmd->details_root = __le64_to_cpu(sb->device_details_root);
-	mmd->trans_id = __le64_to_cpu(sb->trans_id);
-	mmd->flags = __le32_to_cpu(sb->flags);
-	mmd->data_block_size = __le32_to_cpu(sb->data_block_size);
+	sb = dm_block_data(tmd->sblock);
+	tmd->time = __le32_to_cpu(sb->time);
+	tmd->root = __le64_to_cpu(sb->data_mapping_root);
+	tmd->details_root = __le64_to_cpu(sb->device_details_root);
+	tmd->trans_id = __le64_to_cpu(sb->trans_id);
+	tmd->flags = __le32_to_cpu(sb->flags);
+	tmd->data_block_size = __le32_to_cpu(sb->data_block_size);
 
 	features = __le32_to_cpu(sb->incompat_flags) &
 		~THIN_FEATURE_INCOMPAT_SUPP;
@@ -451,7 +451,7 @@ static int begin_transaction(struct dm_t
 	}
 
 	/* check for read-only metadata to skip the following RDWR checks */
-	if (get_disk_ro(mmd->bdev->bd_disk))
+	if (get_disk_ro(tmd->bdev->bd_disk))
 		return 0;
 
 	features = __le32_to_cpu(sb->compat_ro_flags) &
@@ -471,7 +471,7 @@ dm_thin_metadata_open(struct block_devic
 {
 	int r;
 	struct thin_super_block *sb;
-	struct dm_thin_metadata *mmd;
+	struct dm_thin_metadata *tmd;
 	sector_t bdev_size = i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
 	struct dm_block_manager *bm;
 	int create;
@@ -489,28 +489,28 @@ dm_thin_metadata_open(struct block_devic
 		return ERR_PTR(r);
 	}
 
-	mmd = alloc_mmd(bm, 0, create);
-	if (IS_ERR(mmd)) {
-		/* alloc_mmd() destroys the block manager on failure */
-		return mmd; /* already an ERR_PTR */
+	tmd = alloc_tmd(bm, 0, create);
+	if (IS_ERR(tmd)) {
+		/* alloc_tmd() destroys the block manager on failure */
+		return tmd; /* already an ERR_PTR */
 	}
-	mmd->bdev = bdev;
+	tmd->bdev = bdev;
 
 	if (!create) {
-		r = begin_transaction(mmd);
+		r = begin_transaction(tmd);
 		if (r < 0)
 			goto bad;
-		return mmd;
+		return tmd;
 	}
 
 	/* Create */
-	if (!mmd->sblock) {
-		r = begin_transaction(mmd);
+	if (!tmd->sblock) {
+		r = begin_transaction(tmd);
 		if (r < 0)
 			goto bad;
 	}
 
-	sb = dm_block_data(mmd->sblock);
+	sb = dm_block_data(tmd->sblock);
 	sb->magic = __cpu_to_le64(THIN_SUPERBLOCK_MAGIC);
 	sb->version = __cpu_to_le32(THIN_VERSION);
 	sb->time = 0;
@@ -518,322 +518,329 @@ dm_thin_metadata_open(struct block_devic
 	sb->metadata_nr_blocks = __cpu_to_le64(bdev_size >> SECTOR_TO_BLOCK_SHIFT);
 	sb->data_block_size = __cpu_to_le32(data_block_size);
 
-	r = dm_btree_empty(&mmd->info, &mmd->root);
+	r = dm_btree_empty(&tmd->info, &tmd->root);
 	if (r < 0)
 		goto bad;
 
-	r = dm_btree_empty(&mmd->details_info, &mmd->details_root);
+	r = dm_btree_empty(&tmd->details_info, &tmd->details_root);
 	if (r < 0) {
 		DMERR("couldn't create devices root");
 		goto bad;
 	}
 
-	mmd->flags = 0;
-	mmd->need_commit = 1;
-	r = dm_thin_metadata_commit(mmd);
+	tmd->flags = 0;
+	tmd->need_commit = 1;
+	r = dm_thin_metadata_commit(tmd);
 	if (r < 0) {
 		DMERR("%s: dm_thin_metadata_commit() failed, error = %d",
 		      __func__, r);
 		goto bad;
 	}
 
-	return mmd;
+	return tmd;
 bad:
-	if (dm_thin_metadata_close(mmd) < 0)
+	if (dm_thin_metadata_close(tmd) < 0)
 		DMWARN("%s: dm_thin_metadata_close() failed.", __func__);
 	return ERR_PTR(r);
 }
 
-int dm_thin_metadata_close(struct dm_thin_metadata *mmd)
+int dm_thin_metadata_close(struct dm_thin_metadata *tmd)
 {
 	int r;
 	unsigned open_devices = 0;
-	struct dm_ms_device *msd, *tmp;
+	struct dm_thin_device *td, *tmp;
 
-	down_read(&mmd->root_lock);
-	list_for_each_entry_safe(msd, tmp, &mmd->ms_devices, list) {
-		if (msd->open_count)
+	down_read(&tmd->root_lock);
+	list_for_each_entry_safe(td, tmp, &tmd->thin_devices, list) {
+		if (td->open_count)
 			open_devices++;
 		else {
-			list_del(&msd->list);
-			kfree(msd);
+			list_del(&td->list);
+			kfree(td);
 		}
 	}
-	up_read(&mmd->root_lock);
+	up_read(&tmd->root_lock);
 
 	if (open_devices) {
-		DMERR("attempt to close mmd when %u device(s) are still open",
+		DMERR("attempt to close tmd when %u device(s) are still open",
 		       open_devices);
 		return -EBUSY;
 	}
 
-	if (mmd->sblock) {
-		r = dm_thin_metadata_commit(mmd);
+	if (tmd->sblock) {
+		r = dm_thin_metadata_commit(tmd);
 		if (r)
 			DMWARN("%s: dm_thin_metadata_commit() failed, error = %d",
 			       __func__, r);
 	}
 
-	dm_tm_destroy(mmd->tm);
-	dm_tm_destroy(mmd->nb_tm);
-	dm_block_manager_destroy(mmd->bm);
-	dm_sm_destroy(mmd->metadata_sm);
-	dm_sm_destroy(mmd->data_sm);
-	kfree(mmd);
+	dm_tm_destroy(tmd->tm);
+	dm_tm_destroy(tmd->nb_tm);
+	dm_block_manager_destroy(tmd->bm);
+	dm_sm_destroy(tmd->metadata_sm);
+	dm_sm_destroy(tmd->data_sm);
+	kfree(tmd);
 
 	return 0;
 }
 
-static int __open_device(struct dm_thin_metadata *mmd,
+int
+dm_thin_metadata_rebind_block_device(struct dm_thin_metadata *tmd,
+				     struct block_device *bdev)
+{
+	return dm_bm_rebind_block_device(tmd->bm, bdev);
+}
+
+static int __open_device(struct dm_thin_metadata *tmd,
 			 dm_thin_dev_t dev, int create,
-			 struct dm_ms_device **msd)
+			 struct dm_thin_device **td)
 {
 	int r, changed = 0;
-	struct dm_ms_device *msd2;
+	struct dm_thin_device *td2;
 	uint64_t key = dev;
 	struct device_details details;
 
 	/* check the device isn't already open */
-	list_for_each_entry(msd2, &mmd->ms_devices, list)
-		if (msd2->id == dev) {
-			msd2->open_count++;
-			*msd = msd2;
+	list_for_each_entry(td2, &tmd->thin_devices, list)
+		if (td2->id == dev) {
+			td2->open_count++;
+			*td = td2;
 			return 0;
 		}
 
 	/* check the device exists */
-	r = dm_btree_lookup(&mmd->details_info, mmd->details_root,
+	r = dm_btree_lookup(&tmd->details_info, tmd->details_root,
 			    &key, &details);
 	if (r) {
 		if (r == -ENODATA && create) {
 			changed = 1;
 			details.mapped_blocks = 0;
-			details.transaction_id = __cpu_to_le64(mmd->trans_id);
-			details.creation_time = __cpu_to_le32(mmd->time);
-			details.snapshotted_time = __cpu_to_le32(mmd->time);
+			details.transaction_id = __cpu_to_le64(tmd->trans_id);
+			details.creation_time = __cpu_to_le32(tmd->time);
+			details.snapshotted_time = __cpu_to_le32(tmd->time);
 
 		} else
 			return r;
 	}
 
-	*msd = kmalloc(sizeof(**msd), GFP_NOIO);
-	if (!*msd)
+	*td = kmalloc(sizeof(**td), GFP_NOIO);
+	if (!*td)
 		return -ENOMEM;
 
-	(*msd)->mmd = mmd;
-	(*msd)->id = dev;
-	(*msd)->open_count = 1;
-	(*msd)->changed = changed;
-	(*msd)->mapped_blocks = __le64_to_cpu(details.mapped_blocks);
-	(*msd)->transaction_id = __le64_to_cpu(details.transaction_id);
-	(*msd)->creation_time = __le32_to_cpu(details.creation_time);
-	(*msd)->snapshotted_time = __le32_to_cpu(details.snapshotted_time);
+	(*td)->tmd = tmd;
+	(*td)->id = dev;
+	(*td)->open_count = 1;
+	(*td)->changed = changed;
+	(*td)->mapped_blocks = __le64_to_cpu(details.mapped_blocks);
+	(*td)->transaction_id = __le64_to_cpu(details.transaction_id);
+	(*td)->creation_time = __le32_to_cpu(details.creation_time);
+	(*td)->snapshotted_time = __le32_to_cpu(details.snapshotted_time);
 
-	list_add(&(*msd)->list, &mmd->ms_devices);
+	list_add(&(*td)->list, &tmd->thin_devices);
 
 	return 0;
 }
 
-static void __close_device(struct dm_ms_device *msd)
+static void __close_device(struct dm_thin_device *td)
 {
-	--msd->open_count;
+	--td->open_count;
 }
 
-static int __create_thin(struct dm_thin_metadata *mmd,
+static int __create_thin(struct dm_thin_metadata *tmd,
 			 dm_thin_dev_t dev)
 {
 	int r;
 	dm_block_t dev_root;
 	uint64_t key = dev;
 	struct device_details detail;
-	struct dm_ms_device *msd;
+	struct dm_thin_device *td;
 	__le64 value;
 
-	r = dm_btree_lookup(&mmd->details_info, mmd->details_root,
+	r = dm_btree_lookup(&tmd->details_info, tmd->details_root,
 			    &key, &detail);
 	if (!r)
 		return -EEXIST;
 
 	/* create an empty btree for the mappings */
-	r = dm_btree_empty(&mmd->bl_info, &dev_root);
+	r = dm_btree_empty(&tmd->bl_info, &dev_root);
 	if (r)
 		return r;
 
 	/* insert it into the main mapping tree */
 	value = __cpu_to_le64(dev_root);
-	r = dm_btree_insert(&mmd->tl_info, mmd->root, &key, &value, &mmd->root);
+	r = dm_btree_insert(&tmd->tl_info, tmd->root, &key, &value, &tmd->root);
 	if (r) {
-		dm_btree_del(&mmd->bl_info, dev_root);
+		dm_btree_del(&tmd->bl_info, dev_root);
 		return r;
 	}
 
-	r = __open_device(mmd, dev, 1, &msd);
+	r = __open_device(tmd, dev, 1, &td);
 	if (r) {
-		__close_device(msd);
-		dm_btree_remove(&mmd->tl_info, mmd->root, &key, &mmd->root);
-		dm_btree_del(&mmd->bl_info, dev_root);
+		__close_device(td);
+		dm_btree_remove(&tmd->tl_info, tmd->root, &key, &tmd->root);
+		dm_btree_del(&tmd->bl_info, dev_root);
 		return r;
 	}
-	msd->changed = 1;
-	__close_device(msd);
+	td->changed = 1;
+	__close_device(td);
 
 	return r;
 }
 
-int dm_thin_metadata_create_thin(struct dm_thin_metadata *mmd,
+int dm_thin_metadata_create_thin(struct dm_thin_metadata *tmd,
 				 dm_thin_dev_t dev)
 {
 	int r;
 
-	down_write(&mmd->root_lock);
-	r = __create_thin(mmd, dev);
-	up_write(&mmd->root_lock);
+	down_write(&tmd->root_lock);
+	r = __create_thin(tmd, dev);
+	up_write(&tmd->root_lock);
 
 	return r;
 }
 
-static int __set_snapshot_details(struct dm_thin_metadata *mmd,
-				  struct dm_ms_device *snap,
+static int __set_snapshot_details(struct dm_thin_metadata *tmd,
+				  struct dm_thin_device *snap,
 				  dm_thin_dev_t origin, uint32_t time)
 {
 	int r;
-	struct dm_ms_device *msd;
+	struct dm_thin_device *td;
 
-	r = __open_device(mmd, origin, 0, &msd);
+	r = __open_device(tmd, origin, 0, &td);
 	if (r)
 		return r;
 
-	msd->changed = 1;
-	msd->snapshotted_time = time;
+	td->changed = 1;
+	td->snapshotted_time = time;
 
-	snap->mapped_blocks = msd->mapped_blocks;
+	snap->mapped_blocks = td->mapped_blocks;
 	snap->snapshotted_time = time;
-	__close_device(msd);
+	__close_device(td);
 
 	return 0;
 }
 
-static int __create_snap(struct dm_thin_metadata *mmd,
+static int __create_snap(struct dm_thin_metadata *tmd,
 			 dm_thin_dev_t dev, dm_thin_dev_t origin)
 {
 	int r;
 	dm_block_t origin_root, snap_root;
 	uint64_t key = origin, dev_key = dev;
-	struct dm_ms_device *msd;
+	struct dm_thin_device *td;
 	struct device_details detail;
 	__le64 value;
 
 	/* check this device is unused */
-	r = dm_btree_lookup(&mmd->details_info, mmd->details_root,
+	r = dm_btree_lookup(&tmd->details_info, tmd->details_root,
 			    &dev_key, &detail);
 	if (!r)
 		return -EEXIST;
 
 	/* find the mapping tree for the origin */
-	r = dm_btree_lookup(&mmd->tl_info, mmd->root, &key, &value);
+	r = dm_btree_lookup(&tmd->tl_info, tmd->root, &key, &value);
 	if (r)
 		return r;
 	origin_root = __le64_to_cpu(value);
 
 	/* clone the origin */
-	r = dm_btree_clone(&mmd->bl_info, origin_root, &snap_root);
+	r = dm_btree_clone(&tmd->bl_info, origin_root, &snap_root);
 	if (r)
 		return r;
 
 	/* insert into the main mapping tree */
 	value = __cpu_to_le64(snap_root);
 	key = dev;
-	r = dm_btree_insert(&mmd->tl_info, mmd->root, &key, &value, &mmd->root);
+	r = dm_btree_insert(&tmd->tl_info, tmd->root, &key, &value, &tmd->root);
 	if (r) {
-		dm_btree_del(&mmd->bl_info, snap_root);
+		dm_btree_del(&tmd->bl_info, snap_root);
 		return r;
 	}
 
-	mmd->time++;
+	tmd->time++;
 
-	r = __open_device(mmd, dev, 1, &msd);
+	r = __open_device(tmd, dev, 1, &td);
 	if (r)
 		goto bad;
 
-	r = __set_snapshot_details(mmd, msd, origin, mmd->time);
+	r = __set_snapshot_details(tmd, td, origin, tmd->time);
 	if (r)
 		goto bad;
 
-	__close_device(msd);
+	__close_device(td);
 	return 0;
 
 bad:
-	__close_device(msd);
-	dm_btree_remove(&mmd->tl_info, mmd->root, &key, &mmd->root);
-	dm_btree_remove(&mmd->details_info, mmd->details_root,
-			&key, &mmd->details_root);
+	__close_device(td);
+	dm_btree_remove(&tmd->tl_info, tmd->root, &key, &tmd->root);
+	dm_btree_remove(&tmd->details_info, tmd->details_root,
+			&key, &tmd->details_root);
 	return r;
 }
 
-int dm_thin_metadata_create_snap(struct dm_thin_metadata *mmd,
+int dm_thin_metadata_create_snap(struct dm_thin_metadata *tmd,
 				 dm_thin_dev_t dev,
 				 dm_thin_dev_t origin)
 {
 	int r;
 
-	down_write(&mmd->root_lock);
-	r = __create_snap(mmd, dev, origin);
-	up_write(&mmd->root_lock);
+	down_write(&tmd->root_lock);
+	r = __create_snap(tmd, dev, origin);
+	up_write(&tmd->root_lock);
 
 	return r;
 }
 
-static int __delete_device(struct dm_thin_metadata *mmd,
+static int __delete_device(struct dm_thin_metadata *tmd,
 			   dm_thin_dev_t dev)
 {
 	int r;
 	uint64_t key = dev;
-	struct dm_ms_device *msd;
+	struct dm_thin_device *td;
 
 	/* TODO: failure should mark the transaction invalid */
-	r = __open_device(mmd, dev, 0, &msd);
+	r = __open_device(tmd, dev, 0, &td);
 	if (r)
 		return r;
 
-	if (msd->open_count > 1) {
-		__close_device(msd);
+	if (td->open_count > 1) {
+		__close_device(td);
 		return -EBUSY;
 	}
 
-	list_del(&msd->list);
-	kfree(msd);
-	r = dm_btree_remove(&mmd->details_info, mmd->details_root,
-			    &key, &mmd->details_root);
+	list_del(&td->list);
+	kfree(td);
+	r = dm_btree_remove(&tmd->details_info, tmd->details_root,
+			    &key, &tmd->details_root);
 	if (r)
 		return r;
 
-	r = dm_btree_remove(&mmd->tl_info, mmd->root, &key, &mmd->root);
+	r = dm_btree_remove(&tmd->tl_info, tmd->root, &key, &tmd->root);
 	if (r)
 		return r;
 
-	mmd->need_commit = 1;
+	tmd->need_commit = 1;
 	return 0;
 }
 
-int dm_thin_metadata_delete_device(struct dm_thin_metadata *mmd,
+int dm_thin_metadata_delete_device(struct dm_thin_metadata *tmd,
 				   dm_thin_dev_t dev)
 {
 	int r;
 
-	down_write(&mmd->root_lock);
-	r = __delete_device(mmd, dev);
-	up_write(&mmd->root_lock);
+	down_write(&tmd->root_lock);
+	r = __delete_device(tmd, dev);
+	up_write(&tmd->root_lock);
 
 	return r;
 }
 
-static int __trim_thin_dev(struct dm_ms_device *msd, sector_t new_size)
+static int __trim_thin_dev(struct dm_thin_device *td, sector_t new_size)
 {
-	struct dm_thin_metadata *mmd = msd->mmd;
+	struct dm_thin_metadata *tmd = td->tmd;
 	/* FIXME: convert new size to blocks */
-	uint64_t key[2] = { msd->id, new_size - 1 };
+	uint64_t key[2] = { td->id, new_size - 1 };
 
-	msd->changed = 1;
+	td->changed = 1;
 
 	/*
 	 * We need to truncate all the extraneous mappings.
@@ -841,136 +848,136 @@ static int __trim_thin_dev(struct dm_ms_
 	 * FIXME: We have to be careful to do this atomically.
 	 * Perhaps clone the bottom layer first so we can revert?
 	 */
-	return dm_btree_del_gt(&mmd->info, mmd->root, key, &mmd->root);
+	return dm_btree_del_gt(&tmd->info, tmd->root, key, &tmd->root);
 }
 
-int dm_thin_metadata_trim_thin_dev(struct dm_thin_metadata *mmd,
+int dm_thin_metadata_trim_thin_dev(struct dm_thin_metadata *tmd,
 				   dm_thin_dev_t dev,
 				   sector_t new_size)
 {
 	int r;
-	struct dm_ms_device *msd;
+	struct dm_thin_device *td;
 
-	down_write(&mmd->root_lock);
-	r = __open_device(mmd, dev, 1, &msd);
+	down_write(&tmd->root_lock);
+	r = __open_device(tmd, dev, 1, &td);
 	if (r)
 		DMERR("couldn't open virtual device");
 	else {
-		r = __trim_thin_dev(msd, new_size);
-		__close_device(msd);
+		r = __trim_thin_dev(td, new_size);
+		__close_device(td);
 	}
 
 	/* FIXME: update mapped_blocks */
 
-	up_write(&mmd->root_lock);
+	up_write(&tmd->root_lock);
 
 	return r;
 }
 
-int dm_thin_metadata_set_transaction_id(struct dm_thin_metadata *mmd,
+int dm_thin_metadata_set_transaction_id(struct dm_thin_metadata *tmd,
 					uint64_t current_id,
 					uint64_t new_id)
 {
-	down_write(&mmd->root_lock);
-	if (mmd->trans_id != current_id) {
-		up_write(&mmd->root_lock);
+	down_write(&tmd->root_lock);
+	if (tmd->trans_id != current_id) {
+		up_write(&tmd->root_lock);
 		DMERR("mismatched transaction id");
 		return -EINVAL;
 	}
 
-	mmd->trans_id = new_id;
-	mmd->need_commit = 1;
-	up_write(&mmd->root_lock);
+	tmd->trans_id = new_id;
+	tmd->need_commit = 1;
+	up_write(&tmd->root_lock);
 
 	return 0;
 }
 
-int dm_thin_metadata_get_transaction_id(struct dm_thin_metadata *mmd,
+int dm_thin_metadata_get_transaction_id(struct dm_thin_metadata *tmd,
 					uint64_t *result)
 {
-	down_read(&mmd->root_lock);
-	*result = mmd->trans_id;
-	up_read(&mmd->root_lock);
+	down_read(&tmd->root_lock);
+	*result = tmd->trans_id;
+	up_read(&tmd->root_lock);
 
 	return 0;
 }
 
-int dm_thin_metadata_hold_root(struct dm_thin_metadata *mmd)
+int dm_thin_metadata_hold_root(struct dm_thin_metadata *tmd)
 {
 	/* FIXME implement */
 
 	return 0;
 }
 
-int dm_thin_metadata_get_held_root(struct dm_thin_metadata *mmd,
+int dm_thin_metadata_get_held_root(struct dm_thin_metadata *tmd,
 				   dm_block_t *result)
 {
 	struct thin_super_block *sb;
 
-	down_read(&mmd->root_lock);
-	sb = dm_block_data(mmd->sblock);
+	down_read(&tmd->root_lock);
+	sb = dm_block_data(tmd->sblock);
 	*result = __le64_to_cpu(sb->held_root);
-	up_read(&mmd->root_lock);
+	up_read(&tmd->root_lock);
 
 	return 0;
 }
 
-int dm_thin_metadata_open_device(struct dm_thin_metadata *mmd,
+int dm_thin_metadata_open_device(struct dm_thin_metadata *tmd,
 				 dm_thin_dev_t dev,
-				 struct dm_ms_device **msd)
+				 struct dm_thin_device **td)
 {
 	int r;
 
-	down_write(&mmd->root_lock);
-	r = __open_device(mmd, dev, 0, msd);
-	up_write(&mmd->root_lock);
+	down_write(&tmd->root_lock);
+	r = __open_device(tmd, dev, 0, td);
+	up_write(&tmd->root_lock);
 
 	return r;
 }
 
-int dm_thin_metadata_close_device(struct dm_ms_device *msd)
+int dm_thin_metadata_close_device(struct dm_thin_device *td)
 {
-	down_write(&msd->mmd->root_lock);
-	__close_device(msd);
-	up_write(&msd->mmd->root_lock);
+	down_write(&td->tmd->root_lock);
+	__close_device(td);
+	up_write(&td->tmd->root_lock);
 
 	return 0;
 }
 
-dm_thin_dev_t dm_thin_device_dev(struct dm_ms_device *msd)
+dm_thin_dev_t dm_thin_device_dev(struct dm_thin_device *td)
 {
-	return msd->id;
+	return td->id;
 }
 
-static int __snapshotted_since(struct dm_ms_device *msd, uint32_t time)
+static int __snapshotted_since(struct dm_thin_device *td, uint32_t time)
 {
-	return msd->snapshotted_time > time;
+	return td->snapshotted_time > time;
 }
 
-int dm_thin_metadata_lookup(struct dm_ms_device *msd,
+int dm_thin_metadata_lookup(struct dm_thin_device *td,
 			    dm_block_t block, int can_block,
 			    struct dm_thin_lookup_result *result)
 {
 	int r;
 	uint64_t keys[2], dm_block_time = 0;
 	__le64 value;
-	struct dm_thin_metadata *mmd = msd->mmd;
+	struct dm_thin_metadata *tmd = td->tmd;
 
-	keys[0] = msd->id;
+	keys[0] = td->id;
 	keys[1] = block;
 
 	if (can_block) {
-		down_read(&mmd->root_lock);
-		r = dm_btree_lookup(&mmd->info, mmd->root, keys, &value);
+		down_read(&tmd->root_lock);
+		r = dm_btree_lookup(&tmd->info, tmd->root, keys, &value);
 		if (!r)
 			dm_block_time = __le64_to_cpu(value);
-		up_read(&mmd->root_lock);
+		up_read(&tmd->root_lock);
 
-	} else if (down_read_trylock(&mmd->root_lock)) {
-		r = dm_btree_lookup(&mmd->nb_info, mmd->root, keys, &value);
+	} else if (down_read_trylock(&tmd->root_lock)) {
+		r = dm_btree_lookup(&tmd->nb_info, tmd->root, keys, &value);
 		if (!r)
 			dm_block_time = __le64_to_cpu(value);
-		up_read(&mmd->root_lock);
+		up_read(&tmd->root_lock);
 
 	} else
 		return -EWOULDBLOCK;
@@ -981,125 +988,125 @@ int dm_thin_metadata_lookup(struct dm_ms
 		unpack_dm_block_time(dm_block_time, &exception_block,
 				     &exception_time);
 		result->block = exception_block;
-		result->shared = __snapshotted_since(msd, exception_time);
+		result->shared = __snapshotted_since(td, exception_time);
 	}
 
 	return r;
 }
 
-static int __insert(struct dm_ms_device *msd,
+static int __insert(struct dm_thin_device *td,
 		    dm_block_t block, dm_block_t data_block)
 {
 	int r, inserted;
 	dm_block_t keys[2];
 	__le64 value;
-	struct dm_thin_metadata *mmd = msd->mmd;
+	struct dm_thin_metadata *tmd = td->tmd;
 
-	keys[0] = msd->id;
+	keys[0] = td->id;
 	keys[1] = block;
 
-	mmd->need_commit = 1;
-	value = __cpu_to_le64(pack_dm_block_time(data_block, mmd->time));
+	tmd->need_commit = 1;
+	value = __cpu_to_le64(pack_dm_block_time(data_block, tmd->time));
 
-	r = dm_btree_insert_notify(&mmd->info, mmd->root, keys, &value,
-				   &mmd->root, &inserted);
+	r = dm_btree_insert_notify(&tmd->info, tmd->root, keys, &value,
+				   &tmd->root, &inserted);
 	if (r)
 		return r;
 
 	if (inserted) {
-		msd->mapped_blocks++;
-		msd->changed = 1;
+		td->mapped_blocks++;
+		td->changed = 1;
 	}
 
 	return 0;
 }
 
-int dm_thin_metadata_insert(struct dm_ms_device *msd,
+int dm_thin_metadata_insert(struct dm_thin_device *td,
 			    dm_block_t block, dm_block_t data_block)
 {
 	int r;
 
-	down_write(&msd->mmd->root_lock);
-	r = __insert(msd, block, data_block);
-	up_write(&msd->mmd->root_lock);
+	down_write(&td->tmd->root_lock);
+	r = __insert(td, block, data_block);
+	up_write(&td->tmd->root_lock);
 
 	return r;
 }
 
-static int __remove(struct dm_ms_device *msd, dm_block_t block)
+static int __remove(struct dm_thin_device *td, dm_block_t block)
 {
 	int r;
-	struct dm_thin_metadata *mmd = msd->mmd;
-	dm_block_t keys[2] = { msd->id, block };
+	struct dm_thin_metadata *tmd = td->tmd;
+	dm_block_t keys[2] = { td->id, block };
 
-	r = dm_btree_remove(&mmd->info, mmd->root, keys, &mmd->root);
+	r = dm_btree_remove(&tmd->info, tmd->root, keys, &tmd->root);
 	if (r)
 		return r;
 
-	mmd->need_commit = 1;
+	tmd->need_commit = 1;
 	return 0;
 }
 
-int dm_thin_metadata_remove(struct dm_ms_device *msd, dm_block_t block)
+int dm_thin_metadata_remove(struct dm_thin_device *td, dm_block_t block)
 {
 	int r;
 
-	down_write(&msd->mmd->root_lock);
-	r = __remove(msd, block);
-	up_write(&msd->mmd->root_lock);
+	down_write(&td->tmd->root_lock);
+	r = __remove(td, block);
+	up_write(&td->tmd->root_lock);
 
 	return r;
 }
 
-int dm_thin_metadata_alloc_data_block(struct dm_ms_device *msd,
+int dm_thin_metadata_alloc_data_block(struct dm_thin_device *td,
 				      dm_block_t *result)
 {
 	int r;
-	struct dm_thin_metadata *mmd = msd->mmd;
+	struct dm_thin_metadata *tmd = td->tmd;
 
-	down_write(&mmd->root_lock);
-	r = dm_sm_new_block(mmd->data_sm, result);
-	mmd->need_commit = 1;
-	up_write(&mmd->root_lock);
+	down_write(&tmd->root_lock);
+	r = dm_sm_new_block(tmd->data_sm, result);
+	tmd->need_commit = 1;
+	up_write(&tmd->root_lock);
 
 	return r;
 }
 
-static int __write_changed_details(struct dm_thin_metadata *mmd)
+static int __write_changed_details(struct dm_thin_metadata *tmd)
 {
 	int r;
-	struct dm_ms_device *msd, *tmp;
+	struct dm_thin_device *td, *tmp;
 
-	list_for_each_entry_safe(msd, tmp, &mmd->ms_devices, list) {
-		if (msd->changed) {
+	list_for_each_entry_safe(td, tmp, &tmd->thin_devices, list) {
+		if (td->changed) {
 			struct device_details dd;
-			uint64_t key = msd->id;
+			uint64_t key = td->id;
 
-			dd.mapped_blocks = __cpu_to_le64(msd->mapped_blocks);
-			dd.transaction_id = __cpu_to_le64(msd->transaction_id);
-			dd.creation_time = __cpu_to_le32(msd->creation_time);
-			dd.snapshotted_time = __cpu_to_le32(msd->snapshotted_time);
+			dd.mapped_blocks = __cpu_to_le64(td->mapped_blocks);
+			dd.transaction_id = __cpu_to_le64(td->transaction_id);
+			dd.creation_time = __cpu_to_le32(td->creation_time);
+			dd.snapshotted_time = __cpu_to_le32(td->snapshotted_time);
 
-			r = dm_btree_insert(&mmd->details_info, mmd->details_root,
-					    &key, &dd, &mmd->details_root);
+			r = dm_btree_insert(&tmd->details_info, tmd->details_root,
+					    &key, &dd, &tmd->details_root);
 			if (r)
 				return r;
 
-			if (msd->open_count)
-				msd->changed = 0;
+			if (td->open_count)
+				td->changed = 0;
 			else {
-				list_del(&msd->list);
-				kfree(msd);
+				list_del(&td->list);
+				kfree(td);
 			}
 
-			mmd->need_commit = 1;
+			tmd->need_commit = 1;
 		}
 	}
 
 	return 0;
 }
 
-int dm_thin_metadata_commit(struct dm_thin_metadata *mmd)
+int dm_thin_metadata_commit(struct dm_thin_metadata *tmd)
 {
 	/*
 	 * FIXME: associated pool should be made read-only on
@@ -1112,143 +1119,143 @@ int dm_thin_metadata_commit(struct dm_th
 	/* We want to know if/when the thin_super_block exceeds a 512b sector */
 	BUILD_BUG_ON(sizeof(struct thin_super_block) > 512);
 
-	down_write(&mmd->root_lock);
-	r = __write_changed_details(mmd);
+	down_write(&tmd->root_lock);
+	r = __write_changed_details(tmd);
 	if (r < 0)
 		goto out;
 
-	if (!mmd->need_commit)
+	if (!tmd->need_commit)
 		goto out;
 
-	r = dm_tm_pre_commit(mmd->tm);
+	r = dm_tm_pre_commit(tmd->tm);
 	if (r < 0)
 		goto out;
 
-	r = dm_sm_root_size(mmd->metadata_sm, &len);
+	r = dm_sm_root_size(tmd->metadata_sm, &len);
 	if (r < 0)
 		goto out;
 
-	sb = dm_block_data(mmd->sblock);
-	sb->time = __cpu_to_le32(mmd->time);
-	sb->data_mapping_root = __cpu_to_le64(mmd->root);
-	sb->device_details_root = __cpu_to_le64(mmd->details_root);
-	sb->trans_id = __cpu_to_le64(mmd->trans_id);
-	sb->flags = __cpu_to_le32(mmd->flags);
-	r = dm_sm_copy_root(mmd->metadata_sm, &sb->metadata_space_map_root, len);
+	sb = dm_block_data(tmd->sblock);
+	sb->time = __cpu_to_le32(tmd->time);
+	sb->data_mapping_root = __cpu_to_le64(tmd->root);
+	sb->device_details_root = __cpu_to_le64(tmd->details_root);
+	sb->trans_id = __cpu_to_le64(tmd->trans_id);
+	sb->flags = __cpu_to_le32(tmd->flags);
+	r = dm_sm_copy_root(tmd->metadata_sm, &sb->metadata_space_map_root, len);
 	if (r < 0)
 		goto out;
 
-	r = dm_sm_copy_root(mmd->data_sm, &sb->data_space_map_root, len);
+	r = dm_sm_copy_root(tmd->data_sm, &sb->data_space_map_root, len);
 	if (r < 0)
 		goto out;
 
-	r = dm_tm_commit(mmd->tm, mmd->sblock);
+	r = dm_tm_commit(tmd->tm, tmd->sblock);
 	if (r < 0)
 		goto out;
 
 	/* open the next transaction */
-	mmd->sblock = NULL;
+	tmd->sblock = NULL;
 
-	r = begin_transaction(mmd);
+	r = begin_transaction(tmd);
 out:
-	up_write(&mmd->root_lock);
+	up_write(&tmd->root_lock);
 	return r;
 }
 
-int dm_thin_metadata_get_free_blocks(struct dm_thin_metadata *mmd,
+int dm_thin_metadata_get_free_blocks(struct dm_thin_metadata *tmd,
 				     dm_block_t *result)
 {
 	int r;
 
-	down_read(&mmd->root_lock);
-	r = dm_sm_get_nr_free(mmd->data_sm, result);
-	up_read(&mmd->root_lock);
+	down_read(&tmd->root_lock);
+	r = dm_sm_get_nr_free(tmd->data_sm, result);
+	up_read(&tmd->root_lock);
 
 	return r;
 }
 
 int
-dm_thin_metadata_get_free_blocks_metadata(struct dm_thin_metadata *mmd,
+dm_thin_metadata_get_free_blocks_metadata(struct dm_thin_metadata *tmd,
 					  dm_block_t *result)
 {
 	int r;
 
-	down_read(&mmd->root_lock);
-	r = dm_sm_get_nr_free(mmd->metadata_sm, result);
-	up_read(&mmd->root_lock);
+	down_read(&tmd->root_lock);
+	r = dm_sm_get_nr_free(tmd->metadata_sm, result);
+	up_read(&tmd->root_lock);
 
 	return r;
 }
 
-int dm_thin_metadata_get_data_block_size(struct dm_thin_metadata *mmd,
+int dm_thin_metadata_get_data_block_size(struct dm_thin_metadata *tmd,
 					 sector_t *result)
 {
-	down_read(&mmd->root_lock);
-	*result = mmd->data_block_size;
-	up_read(&mmd->root_lock);
+	down_read(&tmd->root_lock);
+	*result = tmd->data_block_size;
+	up_read(&tmd->root_lock);
 
 	return 0;
 }
 
-int dm_thin_metadata_get_data_dev_size(struct dm_thin_metadata *mmd,
+int dm_thin_metadata_get_data_dev_size(struct dm_thin_metadata *tmd,
 				       dm_block_t *result)
 {
 	int r;
 
-	down_read(&mmd->root_lock);
-	r = dm_sm_get_nr_blocks(mmd->data_sm, result);
-	up_read(&mmd->root_lock);
+	down_read(&tmd->root_lock);
+	r = dm_sm_get_nr_blocks(tmd->data_sm, result);
+	up_read(&tmd->root_lock);
 
 	return r;
 }
 
-int dm_thin_metadata_get_mapped_count(struct dm_ms_device *msd,
+int dm_thin_metadata_get_mapped_count(struct dm_thin_device *td,
 				      dm_block_t *result)
 {
-	struct dm_thin_metadata *mmd = msd->mmd;
+	struct dm_thin_metadata *tmd = td->tmd;
 
-	down_read(&mmd->root_lock);
-	*result = msd->mapped_blocks;
-	up_read(&mmd->root_lock);
+	down_read(&tmd->root_lock);
+	*result = td->mapped_blocks;
+	up_read(&tmd->root_lock);
 
 	return 0;
 }
 
-static int __highest_block(struct dm_ms_device *msd, dm_block_t *result)
+static int __highest_block(struct dm_thin_device *td, dm_block_t *result)
 {
 	int r;
 	__le64 value;
 	dm_block_t thin_root;
-	struct dm_thin_metadata *mmd = msd->mmd;
+	struct dm_thin_metadata *tmd = td->tmd;
 
-	r = dm_btree_lookup(&mmd->tl_info, mmd->root, &msd->id, &value);
+	r = dm_btree_lookup(&tmd->tl_info, tmd->root, &td->id, &value);
 	if (r)
 		return r;
 
 	thin_root = __le64_to_cpu(value);
-	return dm_btree_find_highest_key(&mmd->bl_info, thin_root, result);
+	return dm_btree_find_highest_key(&tmd->bl_info, thin_root, result);
 }
 
-int dm_thin_metadata_get_highest_mapped_block(struct dm_ms_device *msd,
+int dm_thin_metadata_get_highest_mapped_block(struct dm_thin_device *td,
 					      dm_block_t *result)
 {
 	int r;
-	struct dm_thin_metadata *mmd = msd->mmd;
+	struct dm_thin_metadata *tmd = td->tmd;
 
-	down_read(&mmd->root_lock);
-	r = __highest_block(msd, result);
-	up_read(&mmd->root_lock);
+	down_read(&tmd->root_lock);
+	r = __highest_block(td, result);
+	up_read(&tmd->root_lock);
 
 	return r;
 }
 
-static int __resize_data_dev(struct dm_thin_metadata *mmd,
+static int __resize_data_dev(struct dm_thin_metadata *tmd,
 			     dm_block_t new_count)
 {
 	int r;
 	dm_block_t old_count;
 
-	r = dm_sm_get_nr_blocks(mmd->data_sm, &old_count);
+	r = dm_sm_get_nr_blocks(tmd->data_sm, &old_count);
 	if (r)
 		return r;
 
@@ -1258,22 +1265,22 @@ static int __resize_data_dev(struct dm_t
 	}
 
 	if (new_count > old_count) {
-		r = dm_sm_extend(mmd->data_sm, new_count - old_count);
+		r = dm_sm_extend(tmd->data_sm, new_count - old_count);
 		if (!r)
-			mmd->need_commit = 1;
+			tmd->need_commit = 1;
 		return r;
 	} else
 		return 0;
 }
 
-int dm_thin_metadata_resize_data_dev(struct dm_thin_metadata *mmd,
+int dm_thin_metadata_resize_data_dev(struct dm_thin_metadata *tmd,
 				     dm_block_t new_count)
 {
 	int r;
 
-	down_write(&mmd->root_lock);
-	r = __resize_data_dev(mmd, new_count);
-	up_write(&mmd->root_lock);
+	down_write(&tmd->root_lock);
+	r = __resize_data_dev(tmd, new_count);
+	up_write(&tmd->root_lock);
 
 	return r;
 }
Index: linux-3.0-rc7/drivers/md/dm-thin-metadata.h
===================================================================
--- linux-3.0-rc7.orig/drivers/md/dm-thin-metadata.h
+++ linux-3.0-rc7/drivers/md/dm-thin-metadata.h
@@ -12,7 +12,7 @@
 /*----------------------------------------------------------------*/
 
 struct dm_thin_metadata;
-struct dm_ms_device;
+struct dm_thin_device;
 typedef uint64_t dm_thin_dev_t;
 
 /*
@@ -22,7 +22,14 @@ struct dm_thin_metadata *
 dm_thin_metadata_open(struct block_device *bdev,
 		      sector_t data_block_size);
 
-int dm_thin_metadata_close(struct dm_thin_metadata *mmd);
+int dm_thin_metadata_close(struct dm_thin_metadata *tmd);
+
+/*
+ * This does not currently resize the metadata device, but should eventually.
+ */
+int
+dm_thin_metadata_rebind_block_device(struct dm_thin_metadata *tmd,
+				     struct block_device *bdev);
 
 /*
  * Compat feature flags.  Any incompat flags beyond the ones
@@ -35,7 +42,7 @@ int dm_thin_metadata_close(struct dm_thi
 /*
  * Device creation/deletion.
  */
-int dm_thin_metadata_create_thin(struct dm_thin_metadata *mmd,
+int dm_thin_metadata_create_thin(struct dm_thin_metadata *tmd,
 				 dm_thin_dev_t dev);
 
 /*
@@ -44,7 +51,7 @@ int dm_thin_metadata_create_thin(struct 
  * You can only snapshot a quiesced origin.  i.e. one that is either
  * suspended or not instanced at all.
  */
-int dm_thin_metadata_create_snap(struct dm_thin_metadata *mmd,
+int dm_thin_metadata_create_snap(struct dm_thin_metadata *tmd,
 				 dm_thin_dev_t dev,
 				 dm_thin_dev_t origin);
 
@@ -53,7 +60,7 @@ int dm_thin_metadata_create_snap(struct 
  * when that device is open, operations on that device will just start
  * failing.  You still need to call close() on the device.
  */
-int dm_thin_metadata_delete_device(struct dm_thin_metadata *mmd,
+int dm_thin_metadata_delete_device(struct dm_thin_metadata *tmd,
 				   dm_thin_dev_t dev);
 
 /*
@@ -61,7 +68,7 @@ int dm_thin_metadata_delete_device(struc
  * highest mapped block.  This trimming function allows the user to remove
  * mappings above a certain virtual block.
  */
-int dm_thin_metadata_trim_thin_dev(struct dm_thin_metadata *mmd,
+int dm_thin_metadata_trim_thin_dev(struct dm_thin_metadata *tmd,
 				   dm_thin_dev_t dev,
 				   sector_t new_size);
 
@@ -69,24 +76,24 @@ int dm_thin_metadata_trim_thin_dev(struc
  * Commits _all_ metadata changes: device creation, deletion, mapping
  * updates.
  */
-int dm_thin_metadata_commit(struct dm_thin_metadata *mmd);
+int dm_thin_metadata_commit(struct dm_thin_metadata *tmd);
 
 /*
  * Set/get userspace transaction id
  */
-int dm_thin_metadata_set_transaction_id(struct dm_thin_metadata *mmd,
+int dm_thin_metadata_set_transaction_id(struct dm_thin_metadata *tmd,
 					uint64_t current_id,
 					uint64_t new_id);
 
-int dm_thin_metadata_get_transaction_id(struct dm_thin_metadata *mmd,
+int dm_thin_metadata_get_transaction_id(struct dm_thin_metadata *tmd,
 					uint64_t *result);
 
 /*
  * hold/get root for userspace transaction
  */
-int dm_thin_metadata_hold_root(struct dm_thin_metadata *mmd);
+int dm_thin_metadata_hold_root(struct dm_thin_metadata *tmd);
 
-int dm_thin_metadata_get_held_root(struct dm_thin_metadata *mmd,
+int dm_thin_metadata_get_held_root(struct dm_thin_metadata *tmd,
 				   dm_block_t *result);
 
 /*
@@ -96,13 +103,13 @@ int dm_thin_metadata_get_held_root(struc
 /*
  * Opening the same device more than once will fail with -EBUSY.
  */
-int dm_thin_metadata_open_device(struct dm_thin_metadata *mmd,
+int dm_thin_metadata_open_device(struct dm_thin_metadata *tmd,
 				 dm_thin_dev_t dev,
-				 struct dm_ms_device **msd);
+				 struct dm_thin_device **td);
 
-int dm_thin_metadata_close_device(struct dm_ms_device *msd);
+int dm_thin_metadata_close_device(struct dm_thin_device *td);
 
-dm_thin_dev_t dm_thin_device_dev(struct dm_ms_device *msd);
+dm_thin_dev_t dm_thin_device_dev(struct dm_thin_device *td);
 
 struct dm_thin_lookup_result {
 	dm_block_t block;
@@ -115,48 +122,48 @@ struct dm_thin_lookup_result {
  *   -ENODATA iff that mapping is not present.
  *   0 success
  */
-int dm_thin_metadata_lookup(struct dm_ms_device *msd,
+int dm_thin_metadata_lookup(struct dm_thin_device *td,
 			    dm_block_t block, int can_block,
 			    struct dm_thin_lookup_result *result);
 
 /* Inserts a new mapping */
-int dm_thin_metadata_insert(struct dm_ms_device *msd, dm_block_t block,
+int dm_thin_metadata_insert(struct dm_thin_device *td, dm_block_t block,
 			    dm_block_t data_block);
 
-int dm_thin_metadata_remove(struct dm_ms_device *msd,
+int dm_thin_metadata_remove(struct dm_thin_device *td,
 			    dm_block_t block);
 
-int dm_thin_metadata_thin_highest_mapped_block(struct dm_ms_device *msd,
+int dm_thin_metadata_thin_highest_mapped_block(struct dm_thin_device *td,
 					       dm_block_t *highest_mapped);
 
-/* FIXME: why are these passed an msd, rather than an mmd ? */
-int dm_thin_metadata_alloc_data_block(struct dm_ms_device *msd,
+/* FIXME: why are these passed an td, rather than an tmd ? */
+int dm_thin_metadata_alloc_data_block(struct dm_thin_device *td,
 				      dm_block_t *result);
 
-int dm_thin_metadata_get_free_blocks(struct dm_thin_metadata *mmd,
+int dm_thin_metadata_get_free_blocks(struct dm_thin_metadata *tmd,
 				     dm_block_t *result);
 
 int
-dm_thin_metadata_get_free_blocks_metadata(struct dm_thin_metadata *mmd,
+dm_thin_metadata_get_free_blocks_metadata(struct dm_thin_metadata *tmd,
 					  dm_block_t *result);
 
-int dm_thin_metadata_get_data_block_size(struct dm_thin_metadata *mmd,
+int dm_thin_metadata_get_data_block_size(struct dm_thin_metadata *tmd,
 					 sector_t *result);
 
-int dm_thin_metadata_get_data_dev_size(struct dm_thin_metadata *mmd,
+int dm_thin_metadata_get_data_dev_size(struct dm_thin_metadata *tmd,
 				       dm_block_t *result);
 
-int dm_thin_metadata_get_mapped_count(struct dm_ms_device *msd,
+int dm_thin_metadata_get_mapped_count(struct dm_thin_device *td,
 				      dm_block_t *result);
 
-int dm_thin_metadata_get_highest_mapped_block(struct dm_ms_device *msd,
+int dm_thin_metadata_get_highest_mapped_block(struct dm_thin_device *td,
 					      dm_block_t *result);
 
 /*
  * Returns -ENOSPC if the new size is too small and already allocated
  * blocks would be lost.
  */
-int dm_thin_metadata_resize_data_dev(struct dm_thin_metadata *mmd,
+int dm_thin_metadata_resize_data_dev(struct dm_thin_metadata *tmd,
 				     dm_block_t new_size);
 
 /*----------------------------------------------------------------*/
Index: linux-3.0-rc7/drivers/md/dm-thin.c
===================================================================
--- linux-3.0-rc7.orig/drivers/md/dm-thin.c
+++ linux-3.0-rc7/drivers/md/dm-thin.c
@@ -408,19 +408,19 @@ static int ds_add_work(struct deferred_s
 /*
  * Key building.
  */
-static void build_data_key(struct dm_ms_device *msd,
+static void build_data_key(struct dm_thin_device *td,
 			   dm_block_t b, struct cell_key *key)
 {
 	key->virtual = 0;
-	key->dev = dm_thin_device_dev(msd);
+	key->dev = dm_thin_device_dev(td);
 	key->block = b;
 }
 
-static void build_virtual_key(struct dm_ms_device *msd, dm_block_t b,
+static void build_virtual_key(struct dm_thin_device *td, dm_block_t b,
 			      struct cell_key *key)
 {
 	key->virtual = 1;
-	key->dev = dm_thin_device_dev(msd);
+	key->dev = dm_thin_device_dev(td);
 	key->block = b;
 }
 
@@ -436,8 +436,7 @@ struct pool {
 	struct dm_target *ti;	/* only set if a pool target is bound */
 
 	struct block_device *pool_dev;
-	struct block_device *metadata_dev;
-	struct dm_thin_metadata *mmd;
+	struct dm_thin_metadata *tmd;
 
 	uint32_t sectors_per_block;
 	unsigned block_shift;
@@ -476,7 +475,7 @@ struct new_mapping {
 	int prepared;
 
 	struct pool *pool;
-	struct dm_ms_device *msd;
+	struct dm_thin_device *td;
 	dm_block_t virt_block;
 	dm_block_t data_block;
 	struct cell *cell;
@@ -500,6 +499,7 @@ struct pool_c {
 	struct dm_target *ti;
 	struct pool *pool;
 	struct dm_dev *data_dev;
+	struct dm_dev *metadata_dev;
 	struct dm_target_callbacks callbacks;
 
 	sector_t low_water_mark;
@@ -519,7 +519,7 @@ struct thin_c {
 	 * (where as the pool_dev above remains constant).
 	 */
 	struct pool *pool;
-	struct dm_ms_device *msd;
+	struct dm_thin_device *td;
 };
 
 struct endio_hook {
@@ -538,18 +538,18 @@ struct endio_hook {
 #define TABLE_PRIME 27 /* Largest prime smaller than table size. */
 #define	TABLE_SHIFT 5  /* Shift fitting prime. */
 
-static struct dm_thin_bdev_table {
+static struct dm_thin_pool_table {
 	spinlock_t lock;
 	struct hlist_head buckets[TABLE_SIZE];
-} dm_thin_bdev_table;
+} dm_thin_pool_table;
 
-static void bdev_table_init(void)
+static void pool_table_init(void)
 {
 	unsigned i;
 
-	spin_lock_init(&dm_thin_bdev_table.lock);
+	spin_lock_init(&dm_thin_pool_table.lock);
 	for (i = 0; i < TABLE_SIZE; i++)
-		INIT_HLIST_HEAD(dm_thin_bdev_table.buckets + i);
+		INIT_HLIST_HEAD(dm_thin_pool_table.buckets + i);
 }
 
 static unsigned hash_bdev(struct block_device *bdev)
@@ -560,29 +560,29 @@ static unsigned hash_bdev(struct block_d
 	return ((p * TABLE_PRIME) >> TABLE_SHIFT) & (TABLE_SIZE - 1);
 }
 
-static void bdev_table_insert(struct pool *pool)
+static void pool_table_insert(struct pool *pool)
 {
 	unsigned bucket = hash_bdev(pool->pool_dev);
 
-	spin_lock(&dm_thin_bdev_table.lock);
-	hlist_add_head(&pool->hlist, dm_thin_bdev_table.buckets + bucket);
-	spin_unlock(&dm_thin_bdev_table.lock);
+	spin_lock(&dm_thin_pool_table.lock);
+	hlist_add_head(&pool->hlist, dm_thin_pool_table.buckets + bucket);
+	spin_unlock(&dm_thin_pool_table.lock);
 }
 
-static void bdev_table_remove(struct pool *pool)
+static void pool_table_remove(struct pool *pool)
 {
-	spin_lock(&dm_thin_bdev_table.lock);
+	spin_lock(&dm_thin_pool_table.lock);
 	hlist_del(&pool->hlist);
-	spin_unlock(&dm_thin_bdev_table.lock);
+	spin_unlock(&dm_thin_pool_table.lock);
 }
 
-static struct pool *bdev_table_lookup(struct block_device *bdev)
+static struct pool *pool_table_lookup(struct block_device *bdev)
 {
 	unsigned bucket = hash_bdev(bdev);
 	struct hlist_node *n;
 	struct pool *pool;
 
-	hlist_for_each_entry(pool, n, dm_thin_bdev_table.buckets + bucket, hlist)
+	hlist_for_each_entry(pool, n, dm_thin_pool_table.buckets + bucket, hlist)
 		if (pool->pool_dev == bdev)
 			return pool;
 
@@ -602,11 +602,11 @@ static void set_ti(struct bio *bio, stru
 	bio->bi_bdev = (struct block_device *) ti;
 }
 
-static struct dm_ms_device *get_msd(struct bio *bio)
+static struct dm_thin_device *get_td(struct bio *bio)
 {
 	struct dm_target *ti = (struct dm_target *) bio->bi_bdev;
-	struct thin_c *mc = ti->private;
-	return mc->msd;
+	struct thin_c *tc = ti->private;
+	return tc->td;
 }
 
 static struct dm_target *get_ti(struct bio *bio)
@@ -616,6 +616,12 @@ static struct dm_target *get_ti(struct b
 
 /*----------------------------------------------------------------*/
 
+/*
+ * This section of code contains the logic for processing a thin devices
+ * io.  Even though it's a 'pool' object being passed in, they're almost
+ * exclusively called from the thin target rather than the thin-pool
+ * target.
+ */
 static dm_block_t get_bio_block(struct pool *pool, struct bio *bio)
 {
 	return bio->bi_sector >> pool->block_shift;
@@ -632,7 +638,7 @@ static void remap_and_issue(struct pool 
 			    dm_block_t block)
 {
 	if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) {
-		int r = dm_thin_metadata_commit(pool->mmd);
+		int r = dm_thin_metadata_commit(pool->tmd);
 		if (r) {
 			DMERR("%s: dm_thin_metadata_commit() failed, error = %d",
 			      __func__, r);
@@ -715,7 +721,7 @@ static int io_covers_block(struct pool *
 		(bio->bi_size == (pool->sectors_per_block << SECTOR_SHIFT));
 }
 
-static void schedule_copy(struct pool *pool, struct dm_ms_device *msd,
+static void schedule_copy(struct pool *pool, struct dm_thin_device *td,
 			  dm_block_t virt_block, dm_block_t data_origin,
 			  dm_block_t data_dest, struct cell *cell,
 			  struct bio *bio)
@@ -726,7 +732,7 @@ static void schedule_copy(struct pool *p
 	INIT_LIST_HEAD(&m->list);
 	m->prepared = 0;
 	m->pool = pool;
-	m->msd = msd;
+	m->td = td;
 	m->virt_block = virt_block;
 	m->data_block = data_dest;
 	m->cell = cell;
@@ -766,7 +772,7 @@ static void schedule_copy(struct pool *p
 	}
 }
 
-static void schedule_zero(struct pool *pool, struct dm_ms_device *msd,
+static void schedule_zero(struct pool *pool, struct dm_thin_device *td,
 			  dm_block_t virt_block, dm_block_t data_block,
 			  struct cell *cell, struct bio *bio)
 {
@@ -775,7 +781,7 @@ static void schedule_zero(struct pool *p
 	INIT_LIST_HEAD(&m->list);
 	m->prepared = 0;
 	m->pool = pool;
-	m->msd = msd;
+	m->td = td;
 	m->virt_block = virt_block;
 	m->data_block = data_block;
 	m->cell = cell;
@@ -837,8 +843,8 @@ static void cell_remap_and_issue_except(
 static void retry_later(struct bio *bio)
 {
 	struct dm_target *ti = get_ti(bio);
-	struct thin_c *mc = ti->private;
-	struct pool *pool = mc->pool;
+	struct thin_c *tc = ti->private;
+	struct pool *pool = tc->pool;
 	unsigned long flags;
 
 	/* push it onto the retry list */
@@ -847,14 +853,14 @@ static void retry_later(struct bio *bio)
 	spin_unlock_irqrestore(&pool->lock, flags);
 }
 
-static int alloc_data_block(struct pool *pool, struct dm_ms_device *msd,
+static int alloc_data_block(struct pool *pool, struct dm_thin_device *td,
 			    dm_block_t *result)
 {
 	int r;
 	dm_block_t free_blocks;
 	unsigned long flags;
 
-	r = dm_thin_metadata_get_free_blocks(pool->mmd, &free_blocks);
+	r = dm_thin_metadata_get_free_blocks(pool->tmd, &free_blocks);
 	if (r)
 		return r;
 
@@ -865,21 +871,21 @@ static int alloc_data_block(struct pool 
 		dm_table_event(pool->ti->table);
 	}
 
-	r = dm_thin_metadata_alloc_data_block(msd, result);
+	r = dm_thin_metadata_alloc_data_block(td, result);
 	if (r)
 		return r;
 
 	return 0;
 }
 
-static void process_discard(struct pool *pool, struct dm_ms_device *msd,
+static void process_discard(struct pool *pool, struct dm_thin_device *td,
 			    struct bio *bio)
 {
 	int r;
 	dm_block_t block = get_bio_block(pool, bio);
 	struct dm_thin_lookup_result lookup_result;
 
-	r = dm_thin_metadata_lookup(msd, block, 1, &lookup_result);
+	r = dm_thin_metadata_lookup(td, block, 1, &lookup_result);
 	switch (r) {
 	case 0:
 		if (lookup_result.shared)
@@ -891,7 +897,7 @@ static void process_discard(struct pool 
 			bio_endio(bio, 0);
 
 		else {
-			r = dm_thin_metadata_remove(msd, block);
+			r = dm_thin_metadata_remove(td, block);
 			if (r) {
 				DMERR("dm_thin_metadata_remove() failed");
 				bio_io_error(bio);
@@ -927,7 +933,7 @@ static void no_space(struct cell *cell)
 		retry_later(bio);
 }
 
-static void break_sharing(struct pool *pool, struct dm_ms_device *msd,
+static void break_sharing(struct pool *pool, struct dm_thin_device *td,
 			  struct bio *bio, dm_block_t block, struct cell_key *key,
 			  struct dm_thin_lookup_result *lookup_result)
 {
@@ -937,10 +943,10 @@ static void break_sharing(struct pool *p
 
 	bio_detain(pool->prison, key, bio, &cell);
 
-	r = alloc_data_block(pool, msd, &data_block);
+	r = alloc_data_block(pool, td, &data_block);
 	switch (r) {
 	case 0:
-		schedule_copy(pool, msd, block, lookup_result->block,
+		schedule_copy(pool, td, block, lookup_result->block,
 			      data_block, cell, bio);
 		break;
 
@@ -955,19 +961,19 @@ static void break_sharing(struct pool *p
 	}
 }
 
-static void process_shared_bio(struct pool *pool, struct dm_ms_device *msd,
+static void process_shared_bio(struct pool *pool, struct dm_thin_device *td,
 			       struct bio *bio, dm_block_t block,
 			       struct dm_thin_lookup_result *lookup_result)
 {
 	struct cell *cell;
 	struct cell_key key;
 
-	build_data_key(msd, lookup_result->block, &key);
+	build_data_key(td, lookup_result->block, &key);
 	if (bio_detain_if_occupied(pool->prison, &key, bio, &cell))
 		return; /* already underway */
 
 	if (bio_data_dir(bio) == WRITE)
-		break_sharing(pool, msd, bio, block, &key, lookup_result);
+		break_sharing(pool, td, bio, block, &key, lookup_result);
 	else {
 		struct endio_hook *h = mempool_alloc(pool->endio_hook_pool,
 						     GFP_NOIO);
@@ -984,7 +990,7 @@ static void process_shared_bio(struct po
 	}
 }
 
-static void provision_block(struct pool *pool, struct dm_ms_device *msd,
+static void provision_block(struct pool *pool, struct dm_thin_device *td,
 			    struct bio *bio, dm_block_t block)
 {
 	int r;
@@ -992,14 +998,14 @@ static void provision_block(struct pool 
 	struct cell *cell;
 	struct cell_key key;
 
-	build_virtual_key(msd, block, &key);
+	build_virtual_key(td, block, &key);
 	if (bio_detain(pool->prison, &key, bio, &cell))
 		return; /* already underway */
 
-	r = alloc_data_block(pool, msd, &data_block);
+	r = alloc_data_block(pool, td, &data_block);
 	switch (r) {
 	case 0:
-		schedule_zero(pool, msd, block, data_block, cell, bio);
+		schedule_zero(pool, td, block, data_block, cell, bio);
 		break;
 
 	case -ENOSPC:
@@ -1013,18 +1019,18 @@ static void provision_block(struct pool 
 	}
 }
 
-static void process_bio(struct pool *pool, struct dm_ms_device *msd,
+static void process_bio(struct pool *pool, struct dm_thin_device *td,
 			struct bio *bio)
 {
 	int r;
 	dm_block_t block = get_bio_block(pool, bio);
 	struct dm_thin_lookup_result lookup_result;
 
-	r = dm_thin_metadata_lookup(msd, block, 1, &lookup_result);
+	r = dm_thin_metadata_lookup(td, block, 1, &lookup_result);
 	switch (r) {
 	case 0:
 		if (lookup_result.shared)
-			process_shared_bio(pool, msd, bio, block, &lookup_result);
+			process_shared_bio(pool, td, bio, block, &lookup_result);
 		else
 			remap_and_issue(pool, bio, lookup_result.block);
 		break;
@@ -1035,7 +1041,7 @@ static void process_bio(struct pool *poo
 			zero_fill_bio(bio);
 			bio_endio(bio, 0);
 		} else
-			provision_block(pool, msd, bio, block);
+			provision_block(pool, td, bio, block);
 		break;
 
 	default:
@@ -1058,12 +1064,12 @@ static void process_bios(struct pool *po
 	spin_unlock_irqrestore(&pool->lock, flags);
 
 	while ((bio = bio_list_pop(&bios))) {
-		struct dm_ms_device *msd = get_msd(bio);
+		struct dm_thin_device *td = get_td(bio);
 
 		if (bio->bi_rw & REQ_DISCARD)
-			process_discard(pool, msd, bio);
+			process_discard(pool, td, bio);
 		else
-			process_bio(pool, msd, bio);
+			process_bio(pool, td, bio);
 	}
 }
 
@@ -1092,7 +1098,7 @@ static void process_prepared_mappings(st
 			bio->bi_private = m->bi_private;
 		}
 
-		r = dm_thin_metadata_insert(m->msd, m->virt_block,
+		r = dm_thin_metadata_insert(m->td, m->virt_block,
 					    m->data_block);
 		if (r) {
 			DMERR("dm_thin_metadata_insert() failed");
@@ -1144,8 +1150,8 @@ static int bio_map(struct pool *pool, st
 {
 	int r;
 	dm_block_t block = get_bio_block(pool, bio);
-	struct thin_c *mc = ti->private;
-	struct dm_ms_device *msd = mc->msd;
+	struct thin_c *tc = ti->private;
+	struct dm_thin_device *td = tc->td;
 	struct dm_thin_lookup_result result;
 
 	/*
@@ -1165,7 +1171,7 @@ static int bio_map(struct pool *pool, st
 		return DM_MAPIO_SUBMITTED;
 	}
 
-	r = dm_thin_metadata_lookup(msd, block, 0, &result);
+	r = dm_thin_metadata_lookup(td, block, 0, &result);
 	switch (r) {
 	case 0:
 		if (unlikely(result.shared)) {
@@ -1255,6 +1261,7 @@ static int bind_control_target(struct po
 	pool->low_water_mark = dm_div_up(pt->low_water_mark,
 					 pool->sectors_per_block);
 	pool->zero_new_blocks = pt->zero_new_blocks;
+	dm_thin_metadata_rebind_block_device(pool->tmd, pt->metadata_dev->bdev);
 	return 0;
 }
 
@@ -1269,9 +1276,8 @@ static void unbind_control_target(struct
  *--------------------------------------------------------------*/
 static void pool_destroy(struct pool *pool)
 {
-	if (dm_thin_metadata_close(pool->mmd) < 0)
+	if (dm_thin_metadata_close(pool->tmd) < 0)
 		DMWARN("%s: dm_thin_metadata_close() failed.", __func__);
-	blkdev_put(pool->metadata_dev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
 
 	prison_destroy(pool->prison);
 	dm_kcopyd_client_destroy(pool->copier);
@@ -1287,55 +1293,19 @@ static void pool_destroy(struct pool *po
 	kfree(pool);
 }
 
-/*
- * The lifetime of the pool object is potentially longer than that of the
- * pool target.  thin_get_device() is very similar to
- * dm_get_device() except it doesn't associate the device with the target,
- * which would prevent the target to be destroyed.
- */
-static struct block_device *thin_get_device(const char *metadata_path, fmode_t mode)
-{
-	dev_t uninitialized_var(dev);
-	unsigned int major, minor;
-	struct block_device *bdev;
-
-	if (sscanf(metadata_path, "%u:%u", &major, &minor) == 2) {
-		/* Extract the major/minor numbers */
-		dev = MKDEV(major, minor);
-		if (MAJOR(dev) != major || MINOR(dev) != minor)
-			return ERR_PTR(-EOVERFLOW);
-		bdev = blkdev_get_by_dev(dev, mode, &thin_get_device);
-	} else
-		bdev = blkdev_get_by_path(metadata_path, mode, &thin_get_device);
-
-	if (!bdev)
-		return ERR_PTR(-EINVAL);
-
-	return bdev;
-}
-
-static struct pool *pool_create(const char *metadata_path,
+static struct pool *pool_create(struct block_device *metadata_dev,
 				unsigned long block_size, char **error)
 {
 	int r;
 	void *err_p;
 	struct pool *pool;
-	struct dm_thin_metadata *mmd;
-	struct block_device *metadata_dev;
-	fmode_t metadata_dev_mode = FMODE_READ | FMODE_WRITE | FMODE_EXCL;
-
-	metadata_dev = thin_get_device(metadata_path, metadata_dev_mode);
-	if (IS_ERR(metadata_dev)) {
-		r = PTR_ERR(metadata_dev);
-		*error = "Error opening metadata block device";
-		return ERR_PTR(r);
-	}
+	struct dm_thin_metadata *tmd;
 
-	mmd = dm_thin_metadata_open(metadata_dev, block_size);
-	if (IS_ERR(mmd)) {
+	tmd = dm_thin_metadata_open(metadata_dev, block_size);
+	if (IS_ERR(tmd)) {
 		*error = "Error creating metadata object";
-		err_p = mmd; /* already an ERR_PTR */
-		goto bad_mmd_open;
+		err_p = tmd; /* already an ERR_PTR */
+		return err_p;
 	}
 
 	pool = kmalloc(sizeof(*pool), GFP_KERNEL);
@@ -1345,8 +1315,7 @@ static struct pool *pool_create(const ch
 		goto bad_pool;
 	}
 
-	pool->metadata_dev = metadata_dev;
-	pool->mmd = mmd;
+	pool->tmd = tmd;
 	pool->sectors_per_block = block_size;
 	pool->block_shift = ffs(block_size) - 1;
 	pool->offset_mask = block_size - 1;
@@ -1427,10 +1396,8 @@ bad_kcopyd_client:
 bad_prison:
 	kfree(pool);
 bad_pool:
-	if (dm_thin_metadata_close(mmd))
+	if (dm_thin_metadata_close(tmd))
 		DMWARN("%s: dm_thin_metadata_close() failed.", __func__);
-bad_mmd_open:
-	blkdev_put(metadata_dev, metadata_dev_mode);
 
 	return err_p;
 }
@@ -1447,18 +1414,17 @@ static void pool_dec(struct pool *pool)
 }
 
 static struct pool *pool_find(struct block_device *pool_bdev,
-			      const char *metadata_path,
+			      struct block_device *metadata_dev,
 			      unsigned long block_size,
 			      char **error)
 {
 	struct pool *pool;
 
-	pool = bdev_table_lookup(pool_bdev);
+	pool = pool_table_lookup(pool_bdev);
 	if (pool)
-// AGK FIXME metadata_path not validated here
 		pool_inc(pool);
 	else
-		pool = pool_create(metadata_path, block_size, error);
+		pool = pool_create(metadata_dev, block_size, error);
 
 	return pool;
 }
@@ -1470,6 +1436,7 @@ static void pool_dtr(struct dm_target *t
 {
 	struct pool_c *pt = ti->private;
 
+	dm_put_device(ti, pt->metadata_dev);
 	dm_put_device(ti, pt->data_dev);
 	unbind_control_target(pt->pool, ti);
 	pool_dec(pt->pool);
@@ -1517,12 +1484,12 @@ static int parse_pool_features(struct dm
 
 /*
  * thin-pool <metadata dev> <data dev>
- *	     <data block size (sectors)>
- *	     <low water mark (sectors)>
- *	     [<#feature args> [<arg>]*]
+ *           <data block size (sectors)>
+ *           <low water mark (sectors)>
+ *           [<#feature args> [<arg>]*]
  *
  * Optional feature arguments are:
- *	     skip_block_zeroing: skips the zeroing of newly-provisioned blocks
+ *           skip_block_zeroing: skips the zeroing of newly-provisioned blocks.
  */
 static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
 {
@@ -1535,6 +1502,7 @@ static int pool_ctr(struct dm_target *ti
 	unsigned long block_size;
 	dm_block_t low_water;
 	const char *metadata_path;
+	struct dm_dev *metadata_dev;
 	char *end;
 
 	if (argc < 4) {
@@ -1545,11 +1513,16 @@ static int pool_ctr(struct dm_target *ti
 	as.argv = argv;
 
 	metadata_path = argv[0];
+	r = dm_get_device(ti, metadata_path, FMODE_READ | FMODE_WRITE, &metadata_dev);
+	if (r) {
+		ti->error = "Error opening metadata block device";
+		return r;
+	}
 
 	r = dm_get_device(ti, argv[1], FMODE_READ | FMODE_WRITE, &data_dev);
 	if (r) {
 		ti->error = "Error getting data device";
-		return r;
+		goto out_md;
 	}
 
 	block_size = simple_strtoul(argv[2], &end, 10);
@@ -1577,7 +1550,7 @@ static int pool_ctr(struct dm_target *ti
 	if (r)
 		goto out;
 
-	pool = pool_find(get_target_bdev(ti), metadata_path,
+	pool = pool_find(get_target_bdev(ti), metadata_dev->bdev,
 			 block_size, &ti->error);
 	if (IS_ERR(pool)) {
 		r = PTR_ERR(pool);
@@ -1592,6 +1565,7 @@ static int pool_ctr(struct dm_target *ti
 	}
 	pt->pool = pool;
 	pt->ti = ti;
+	pt->metadata_dev = metadata_dev;
 	pt->data_dev = data_dev;
 	pt->low_water_mark = low_water;
 	pt->zero_new_blocks = pf.zero_new_blocks;
@@ -1603,6 +1577,10 @@ static int pool_ctr(struct dm_target *ti
 	dm_table_add_target_callbacks(ti->table, &pt->callbacks);
 
 	return 0;
+
+out_md:
+	dm_put_device(ti, metadata_dev);
+
 out:
 	dm_put_device(ti, data_dev);
 	return r;
@@ -1649,7 +1627,7 @@ static int pool_preresume(struct dm_targ
 		return r;
 
 	data_size = ti->len >> pool->block_shift;
-	r = dm_thin_metadata_get_data_dev_size(pool->mmd, &sb_data_size);
+	r = dm_thin_metadata_get_data_dev_size(pool->tmd, &sb_data_size);
 	if (r) {
 		DMERR("failed to retrieve data device size");
 		return r;
@@ -1661,13 +1639,13 @@ static int pool_preresume(struct dm_targ
 		return -EINVAL;
 
 	} else if (data_size > sb_data_size) {
-		r = dm_thin_metadata_resize_data_dev(pool->mmd, data_size);
+		r = dm_thin_metadata_resize_data_dev(pool->tmd, data_size);
 		if (r) {
 			DMERR("failed to resize data device");
 			return r;
 		}
 
-		r = dm_thin_metadata_commit(pool->mmd);
+		r = dm_thin_metadata_commit(pool->tmd);
 		if (r) {
 			DMERR("%s: dm_thin_metadata_commit() failed, error = %d",
 			      __func__, r);
@@ -1684,7 +1662,7 @@ static int pool_preresume(struct dm_targ
 
 	/* The pool object is only present if the pool is active */
 	pool->pool_dev = get_target_bdev(ti);
-	bdev_table_insert(pool);
+	pool_table_insert(pool);
 
 	return 0;
 }
@@ -1695,7 +1673,7 @@ static void pool_presuspend(struct dm_ta
 	struct pool_c *pt = ti->private;
 	struct pool *pool = pt->pool;
 
-	r = dm_thin_metadata_commit(pool->mmd);
+	r = dm_thin_metadata_commit(pool->tmd);
 	if (r < 0) {
 		DMERR("%s: dm_thin_metadata_commit() failed, error = %d",
 		      __func__, r);
@@ -1708,7 +1686,7 @@ static void pool_postsuspend(struct dm_t
 	struct pool_c *pt = ti->private;
 	struct pool *pool = pt->pool;
 
-	bdev_table_remove(pool);
+	pool_table_remove(pool);
 	pool->pool_dev = NULL;
 }
 
@@ -1743,7 +1721,7 @@ static int pool_message(struct dm_target
 			return -EINVAL;
 		}
 
-		r = dm_thin_metadata_create_thin(pool->mmd, dev_id);
+		r = dm_thin_metadata_create_thin(pool->tmd, dev_id);
 		if (r) {
 			ti->error = "Creation of thin provisioned device failed";
 			return r;
@@ -1769,7 +1747,7 @@ static int pool_message(struct dm_target
 			return -EINVAL;
 		}
 
-		r = dm_thin_metadata_create_snap(pool->mmd, dev_id, origin_id);
+		r = dm_thin_metadata_create_snap(pool->tmd, dev_id, origin_id);
 		if (r) {
 			ti->error = "Creation of snapshot failed";
 			return r;
@@ -1787,7 +1765,7 @@ static int pool_message(struct dm_target
 			return -EINVAL;
 		}
 
-		r = dm_thin_metadata_delete_device(pool->mmd, dev_id);
+		r = dm_thin_metadata_delete_device(pool->tmd, dev_id);
 
 	} else if (!strcmp(argv[0], "trim")) {
 		sector_t new_size;
@@ -1810,7 +1788,7 @@ static int pool_message(struct dm_target
 		}
 
 		r = dm_thin_metadata_trim_thin_dev(
-			pool->mmd, dev_id,
+			pool->tmd, dev_id,
 			dm_div_up(new_size, pool->sectors_per_block));
 		if (r) {
 			ti->error = "Couldn't trim thin device";
@@ -1837,7 +1815,7 @@ static int pool_message(struct dm_target
 			return -EINVAL;
 		}
 
-		r = dm_thin_metadata_set_transaction_id(pool->mmd,
+		r = dm_thin_metadata_set_transaction_id(pool->tmd,
 							old_id, new_id);
 		if (r) {
 			ti->error = "Setting userspace transaction id failed";
@@ -1847,7 +1825,7 @@ static int pool_message(struct dm_target
 		return -EINVAL;
 
 	if (!r) {
-		r = dm_thin_metadata_commit(pool->mmd);
+		r = dm_thin_metadata_commit(pool->tmd);
 		if (r)
 			DMERR("%s: dm_thin_metadata_commit() failed, error = %d",
 			      __func__, r);
@@ -1872,22 +1850,22 @@ static int pool_status(struct dm_target 
 
 	switch (type) {
 	case STATUSTYPE_INFO:
-		r = dm_thin_metadata_get_transaction_id(pool->mmd,
+		r = dm_thin_metadata_get_transaction_id(pool->tmd,
 							&transaction_id);
 		if (r)
 			return r;
 
-		r = dm_thin_metadata_get_free_blocks(pool->mmd,
+		r = dm_thin_metadata_get_free_blocks(pool->tmd,
 						     &nr_free_blocks_data);
 		if (r)
 			return r;
 
-		r = dm_thin_metadata_get_free_blocks_metadata(pool->mmd,
+		r = dm_thin_metadata_get_free_blocks_metadata(pool->tmd,
 							      &nr_free_blocks_metadata);
 		if (r)
 			return r;
 
-		r = dm_thin_metadata_get_held_root(pool->mmd, &held_root);
+		r = dm_thin_metadata_get_held_root(pool->tmd, &held_root);
 		if (r)
 			return r;
 
@@ -1904,7 +1882,7 @@ static int pool_status(struct dm_target 
 
 	case STATUSTYPE_TABLE:
 		DMEMIT("%s %s %lu %lu ",
-		       format_dev_t(buf, pool->metadata_dev->bd_dev),
+		       format_dev_t(buf, pt->metadata_dev->bdev->bd_dev),
 		       format_dev_t(buf2, pt->data_dev->bdev->bd_dev),
 		       (unsigned long) pool->sectors_per_block,
 		       (unsigned long) pt->low_water_mark);
@@ -1969,12 +1947,12 @@ static struct target_type pool_target = 
 
 static void thin_dtr(struct dm_target *ti)
 {
-	struct thin_c *mc = ti->private;
+	struct thin_c *tc = ti->private;
 
-	pool_dec(mc->pool);
-	dm_thin_metadata_close_device(mc->msd);
-	dm_put_device(ti, mc->pool_dev);
-	kfree(mc);
+	pool_dec(tc->pool);
+	dm_thin_metadata_close_device(tc->td);
+	dm_put_device(ti, tc->pool_dev);
+	kfree(tc);
 }
 
 /*
@@ -1988,7 +1966,7 @@ static void thin_dtr(struct dm_target *t
 static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
 {
 	int r;
-	struct thin_c *mc;
+	struct thin_c *tc;
 	struct dm_dev *pool_dev;
 	char *end;
 
@@ -1997,8 +1975,8 @@ static int thin_ctr(struct dm_target *ti
 		return -EINVAL;
 	}
 
-	mc = ti->private = kzalloc(sizeof(*mc), GFP_KERNEL);
-	if (!mc) {
+	tc = ti->private = kzalloc(sizeof(*tc), GFP_KERNEL);
+	if (!tc) {
 		ti->error = "Out of memory";
 		return -ENOMEM;
 	}
@@ -2008,42 +1986,47 @@ static int thin_ctr(struct dm_target *ti
 		ti->error = "Error opening pool device";
 		goto bad_pool_dev;
 	}
-	mc->pool_dev = pool_dev;
+	tc->pool_dev = pool_dev;
 
-	mc->dev_id = simple_strtoull(argv[1], &end, 10);
+	tc->dev_id = simple_strtoull(argv[1], &end, 10);
 	if (*end) {
 		ti->error = "Invalid device id";
 		r = -EINVAL;
 		goto bad_dev_id;
 	}
 
-	mc->pool = bdev_table_lookup(mc->pool_dev->bdev);
-	if (!mc->pool) {
+	tc->pool = pool_table_lookup(tc->pool_dev->bdev);
+	if (!tc->pool) {
 		ti->error = "Couldn't find pool object";
 		r = -EINVAL;
 		goto bad_pool_lookup;
 	}
-	pool_inc(mc->pool);
+	pool_inc(tc->pool);
 
-	r = dm_thin_metadata_open_device(mc->pool->mmd, mc->dev_id, &mc->msd);
+	r = dm_thin_metadata_open_device(tc->pool->tmd, tc->dev_id, &tc->td);
 	if (r) {
 		ti->error = "Couldn't open thin internal device";
 		goto bad_thin_open;
 	}
 
-	ti->split_io = mc->pool->sectors_per_block;
+	ti->split_io = tc->pool->sectors_per_block;
 	ti->num_flush_requests = 1;
 	ti->num_discard_requests = 1;
+	/*
+	 * allow discards to issued to the thin device even
+	 * if the pool's data device doesn't support them.
+	 */
+	ti->discards_supported = 1;
 
 	return 0;
 
 bad_thin_open:
-	pool_dec(mc->pool);
+	pool_dec(tc->pool);
 bad_pool_lookup:
 bad_dev_id:
-	dm_put_device(ti, mc->pool_dev);
+	dm_put_device(ti, tc->pool_dev);
 bad_pool_dev:
-	kfree(mc);
+	kfree(tc);
 
 	return r;
 }
@@ -2051,10 +2034,10 @@ bad_pool_dev:
 static int thin_map(struct dm_target *ti, struct bio *bio,
 		    union map_info *map_context)
 {
-	struct thin_c *mc = ti->private;
+	struct thin_c *tc = ti->private;
 
 	bio->bi_sector -= ti->begin;
-	return bio_map(mc->pool, ti, bio);
+	return bio_map(tc->pool, ti, bio);
 }
 
 static int thin_status(struct dm_target *ti, status_type_t type,
@@ -2064,33 +2047,33 @@ static int thin_status(struct dm_target 
 	ssize_t sz = 0;
 	dm_block_t mapped, highest;
 	char buf[BDEVNAME_SIZE];
-	struct thin_c *mc = ti->private;
+	struct thin_c *tc = ti->private;
 
-	if (mc->msd) {
+	if (tc->td) {
 		switch (type) {
 		case STATUSTYPE_INFO:
-			r = dm_thin_metadata_get_mapped_count(mc->msd,
+			r = dm_thin_metadata_get_mapped_count(tc->td,
 							      &mapped);
 			if (r)
 				return r;
 
-			r = dm_thin_metadata_get_highest_mapped_block(mc->msd,
+			r = dm_thin_metadata_get_highest_mapped_block(tc->td,
 								      &highest);
 			if (r < 0)
 				return r;
 
-			DMEMIT("%llu ", mapped * mc->pool->sectors_per_block);
+			DMEMIT("%llu ", mapped * tc->pool->sectors_per_block);
 			if (r)
 				DMEMIT("%llu", ((highest + 1) *
-						mc->pool->sectors_per_block) - 1);
+						tc->pool->sectors_per_block) - 1);
 			else
 				DMEMIT("-");
 			break;
 
 		case STATUSTYPE_TABLE:
 			DMEMIT("%s %lu",
-			       format_dev_t(buf, mc->pool_dev->bdev->bd_dev),
-			       (unsigned long) mc->dev_id);
+			       format_dev_t(buf, tc->pool_dev->bdev->bd_dev),
+			       (unsigned long) tc->dev_id);
 			break;
 		}
 	} else {
@@ -2103,8 +2086,8 @@ static int thin_status(struct dm_target 
 static int thin_bvec_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
 			   struct bio_vec *biovec, int max_size)
 {
-	struct thin_c *mc = ti->private;
-	struct pool *pool = mc->pool;
+	struct thin_c *tc = ti->private;
+	struct pool *pool = tc->pool;
 
 	/*
 	 * We fib here, because the space may not have been provisioned yet
@@ -2118,24 +2101,24 @@ static int thin_bvec_merge(struct dm_tar
 static int thin_iterate_devices(struct dm_target *ti,
 				iterate_devices_callout_fn fn, void *data)
 {
-	struct thin_c *mc = ti->private;
+	struct thin_c *tc = ti->private;
 	struct pool *pool;
 
-	pool = bdev_table_lookup(mc->pool_dev->bdev);
+	pool = pool_table_lookup(tc->pool_dev->bdev);
 	if (!pool) {
 		DMERR("%s: Couldn't find pool object", __func__);
 		return -EINVAL;
 	}
 
-	return fn(ti, mc->pool_dev, 0, pool->sectors_per_block, data);
+	return fn(ti, tc->pool_dev, 0, pool->sectors_per_block, data);
 }
 
 static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
 {
-	struct thin_c *mc = ti->private;
+	struct thin_c *tc = ti->private;
 	struct pool *pool;
 
-	pool = bdev_table_lookup(mc->pool_dev->bdev);
+	pool = pool_table_lookup(tc->pool_dev->bdev);
 	if (!pool) {
 		DMERR("%s: Couldn't find pool object", __func__);
 		return;
@@ -2177,7 +2160,7 @@ static int __init dm_thin_init(void)
 	if (r)
 		dm_unregister_target(&thin_target);
 
-	bdev_table_init();
+	pool_table_init();
 	return r;
 }
 
