dm.c: protect md->map with a rw spin lock rather than the md->lock
semaphore.  Also ensure that everyone accesses md->map through
dm_get_table(), rather than directly.
--- diff/drivers/md/dm-table.c	2004-03-09 11:51:50.000000000 +0000
+++ source/drivers/md/dm-table.c	2004-03-09 11:52:55.000000000 +0000
@@ -279,6 +279,9 @@ void dm_table_get(struct dm_table *t)
 
 void dm_table_put(struct dm_table *t)
 {
+	if (!t)
+		return;
+
 	if (atomic_dec_and_test(&t->holders))
 		table_destroy(t);
 }
--- diff/drivers/md/dm.c	2004-03-09 11:51:50.000000000 +0000
+++ source/drivers/md/dm.c	2004-03-09 11:52:58.000000000 +0000
@@ -49,7 +49,7 @@ struct target_io {
 
 struct mapped_device {
 	struct rw_semaphore lock;
-	rwlock_t maplock;
+	rwlock_t map_lock;
 	atomic_t holders;
 
 	unsigned long flags;
@@ -238,6 +238,24 @@ static int queue_io(struct mapped_device
 	return 0;		/* deferred successfully */
 }
 
+/*
+ * Everyone (including functions in this file), should use this
+ * function to access the md->map field, and make sure they call
+ * dm_table_put() when finished.
+ */
+struct dm_table *dm_get_table(struct mapped_device *md)
+{
+	struct dm_table *t;
+
+	read_lock(&md->map_lock);
+	t = md->map;
+	if (t)
+		dm_table_get(t);
+	read_unlock(&md->map_lock);
+
+	return t;
+}
+
 /*-----------------------------------------------------------------
  * CRUD START:
  *   A more elegant soln is in the works that uses the queue
@@ -346,6 +364,7 @@ static void __map_bio(struct dm_target *
 
 struct clone_info {
 	struct mapped_device *md;
+	struct dm_table *map;
 	struct bio *bio;
 	struct dm_io *io;
 	sector_t sector;
@@ -399,7 +418,7 @@ static struct bio *clone_bio(struct bio 
 static void __clone_and_map(struct clone_info *ci)
 {
 	struct bio *clone, *bio = ci->bio;
-	struct dm_target *ti = dm_table_find_target(ci->md->map, ci->sector);
+	struct dm_target *ti = dm_table_find_target(ci->map, ci->sector);
 	sector_t len = 0, max = max_io_len(ci->md, ci->sector, ti);
 	struct target_io *tio;
 
@@ -460,7 +479,7 @@ static void __clone_and_map(struct clone
 
 		ci->sector += max;
 		ci->sector_count -= max;
-		ti = dm_table_find_target(ci->md->map, ci->sector);
+		ti = dm_table_find_target(ci->map, ci->sector);
 
 		len = to_sector(bv->bv_len) - max;
 		clone = split_bvec(bio, ci->sector, ci->idx,
@@ -484,6 +503,12 @@ static void __split_bio(struct mapped_de
 {
 	struct clone_info ci;
 
+	ci.map = dm_get_table(md);
+	if (!ci.map) {
+		bio_io_error(bio, bio->bi_size);
+		return;
+	}
+
 	ci.md = md;
 	ci.bio = bio;
 	ci.io = alloc_io(md);
@@ -501,22 +526,12 @@ static void __split_bio(struct mapped_de
 
 	/* drop the extra reference count */
 	dec_pending(ci.io, 0);
+	dm_table_put(ci.map);
 }
 /*-----------------------------------------------------------------
  * CRUD END
  *---------------------------------------------------------------*/
 
-
-static inline void __dm_request(struct mapped_device *md, struct bio *bio)
-{
-	if (!md->map) {
-		bio_io_error(bio, bio->bi_size);
-		return;
-	}
-
-	__split_bio(md, bio);
-}
-
 /*
  * The request function that just remaps the bio built up by
  * dm_merge_bvec.
@@ -555,7 +570,7 @@ static int dm_request(request_queue_t *q
 		down_read(&md->lock);
 	}
 
-	__dm_request(md, bio);
+	__split_bio(md, bio);
 	up_read(&md->lock);
 	return 0;
 }
@@ -563,15 +578,15 @@ static int dm_request(request_queue_t *q
 static int dm_any_congested(void *congested_data, int bdi_bits)
 {
 	int r;
-	struct mapped_device *md = congested_data;
+	struct mapped_device *md = (struct mapped_device *) congested_data;
+	struct dm_table *map = dm_get_table(md);
 
-	read_lock(&md->maplock);
-	if (md->map == NULL || test_bit(DMF_BLOCK_IO, &md->flags))
+	if (!map || test_bit(DMF_BLOCK_IO, &md->flags))
 		r = bdi_bits;
 	else
-		r = dm_table_any_congested(md->map, bdi_bits);
-	read_unlock(&md->maplock);
+		r = dm_table_any_congested(map, bdi_bits);
 
+	dm_table_put(map);
 	return r;
 }
 
@@ -646,7 +661,7 @@ static struct mapped_device *alloc_dev(u
 
 	memset(md, 0, sizeof(*md));
 	init_rwsem(&md->lock);
-	rwlock_init(&md->maplock);
+	rwlock_init(&md->map_lock);
 	atomic_set(&md->holders, 1);
 
 	md->queue = blk_alloc_queue(GFP_KERNEL);
@@ -746,12 +761,12 @@ static int __bind(struct mapped_device *
 	if (size == 0)
 		return 0;
 
-	write_lock(&md->maplock);
+	write_lock(&md->map_lock);
 	md->map = t;
-	write_unlock(&md->maplock);
-	dm_table_event_callback(md->map, event_callback, md);
+	write_unlock(&md->map_lock);
 
 	dm_table_get(t);
+	dm_table_event_callback(md->map, event_callback, md);
 	dm_table_set_restrictions(t, q);
 	return 0;
 }
@@ -764,9 +779,9 @@ static void __unbind(struct mapped_devic
 		return;
 
 	dm_table_event_callback(map, NULL, NULL);
-	write_lock(&md->maplock);
+	write_lock(&md->map_lock);
 	md->map = NULL;
-	write_unlock(&md->maplock);
+	write_unlock(&md->map_lock);
 	dm_table_put(map);
 }
 
@@ -803,12 +818,16 @@ void dm_get(struct mapped_device *md)
 
 void dm_put(struct mapped_device *md)
 {
+	struct dm_table *map = dm_get_table(md);
+
 	if (atomic_dec_and_test(&md->holders)) {
-		if (!test_bit(DMF_SUSPENDED, &md->flags) && md->map)
-			dm_table_suspend_targets(md->map);
+		if (!test_bit(DMF_SUSPENDED, &md->flags) && map)
+			dm_table_suspend_targets(map);
 		__unbind(md);
 		free_dev(md);
 	}
+
+	dm_table_put(map);
 }
 
 /*
@@ -821,7 +840,7 @@ static void __flush_deferred_io(struct m
 	while (c) {
 		n = c->bi_next;
 		c->bi_next = NULL;
-		__dm_request(md, c);
+		__split_bio(md, c);
 		c = n;
 	}
 }
@@ -859,6 +878,7 @@ int dm_swap_table(struct mapped_device *
  */
 int dm_suspend(struct mapped_device *md)
 {
+	struct dm_table *map;
 	DECLARE_WAITQUEUE(wait, current);
 
 	down_write(&md->lock);
@@ -894,8 +914,11 @@ int dm_suspend(struct mapped_device *md)
 	down_write(&md->lock);
 	remove_wait_queue(&md->wait, &wait);
 	set_bit(DMF_SUSPENDED, &md->flags);
-	if (md->map)
-		dm_table_suspend_targets(md->map);
+
+	map = dm_get_table(md);
+	if (map)
+		dm_table_suspend_targets(map);
+	dm_table_put(map);
 	up_write(&md->lock);
 
 	return 0;
@@ -904,22 +927,25 @@ int dm_suspend(struct mapped_device *md)
 int dm_resume(struct mapped_device *md)
 {
 	struct bio *def;
+	struct dm_table *map = dm_get_table(md);
 
 	down_write(&md->lock);
-	if (!md->map ||
+	if (!map ||
 	    !test_bit(DMF_SUSPENDED, &md->flags) ||
-	    !dm_table_get_size(md->map)) {
+	    !dm_table_get_size(map)) {
 		up_write(&md->lock);
+		dm_table_put(map);
 		return -EINVAL;
 	}
 
-	dm_table_resume_targets(md->map);
+	dm_table_resume_targets(map);
 	clear_bit(DMF_SUSPENDED, &md->flags);
 	clear_bit(DMF_BLOCK_IO, &md->flags);
 
 	def = bio_list_get(&md->deferred);
 	__flush_deferred_io(md, def);
 	up_write(&md->lock);
+	dm_table_put(map);
 
 	blk_run_queues();
 
@@ -971,19 +997,6 @@ struct gendisk *dm_disk(struct mapped_de
 	return md->disk;
 }
 
-struct dm_table *dm_get_table(struct mapped_device *md)
-{
-	struct dm_table *t;
-
-	down_read(&md->lock);
-	t = md->map;
-	if (t)
-		dm_table_get(t);
-	up_read(&md->lock);
-
-	return t;
-}
-
 int dm_suspended(struct mapped_device *md)
 {
 	return test_bit(DMF_SUSPENDED, &md->flags);