Skip to content

Commit beed763

Browse files
Mikulas Patockagregkh
Mikulas Patocka
authored andcommitted
dm writecache: fix data corruption when reloading the target
commit 31b2212 upstream. The dm-writecache reads metadata in the target constructor. However, when we reload the target, there could be another active instance running on the same device. This is the sequence of operations when doing a reload: 1. construct new target 2. suspend old target 3. resume new target 4. destroy old target Metadata that were written by the old target between steps 1 and 2 would not be visible by the new target. Fix the data corruption by loading the metadata in the resume handler. Also, validate block_size is at least as large as both the devices' logical block size and only read 1 block from the metadata during target constructor -- no need to read entirety of metadata now that it is done during resume. Fixes: 48debaf ("dm: add writecache target") Cc: [email protected] # v4.18+ Signed-off-by: Mikulas Patocka <[email protected]> Signed-off-by: Mike Snitzer <[email protected]> Signed-off-by: Greg Kroah-Hartman <[email protected]>
1 parent 969b9cb commit beed763

File tree

1 file changed

+37
-15
lines changed

1 file changed

+37
-15
lines changed

drivers/md/dm-writecache.c

Lines changed: 37 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -878,6 +878,24 @@ static int writecache_alloc_entries(struct dm_writecache *wc)
878878
return 0;
879879
}
880880

881+
static int writecache_read_metadata(struct dm_writecache *wc, sector_t n_sectors)
882+
{
883+
struct dm_io_region region;
884+
struct dm_io_request req;
885+
886+
region.bdev = wc->ssd_dev->bdev;
887+
region.sector = wc->start_sector;
888+
region.count = n_sectors;
889+
req.bi_op = REQ_OP_READ;
890+
req.bi_op_flags = REQ_SYNC;
891+
req.mem.type = DM_IO_VMA;
892+
req.mem.ptr.vma = (char *)wc->memory_map;
893+
req.client = wc->dm_io;
894+
req.notify.fn = NULL;
895+
896+
return dm_io(&req, 1, &region, NULL);
897+
}
898+
881899
static void writecache_resume(struct dm_target *ti)
882900
{
883901
struct dm_writecache *wc = ti->private;
@@ -888,8 +906,18 @@ static void writecache_resume(struct dm_target *ti)
888906

889907
wc_lock(wc);
890908

891-
if (WC_MODE_PMEM(wc))
909+
if (WC_MODE_PMEM(wc)) {
892910
persistent_memory_invalidate_cache(wc->memory_map, wc->memory_map_size);
911+
} else {
912+
r = writecache_read_metadata(wc, wc->metadata_sectors);
913+
if (r) {
914+
size_t sb_entries_offset;
915+
writecache_error(wc, r, "unable to read metadata: %d", r);
916+
sb_entries_offset = offsetof(struct wc_memory_superblock, entries);
917+
memset((char *)wc->memory_map + sb_entries_offset, -1,
918+
(wc->metadata_sectors << SECTOR_SHIFT) - sb_entries_offset);
919+
}
920+
}
893921

894922
wc->tree = RB_ROOT;
895923
INIT_LIST_HEAD(&wc->lru);
@@ -1984,6 +2012,12 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
19842012
ti->error = "Invalid block size";
19852013
goto bad;
19862014
}
2015+
if (wc->block_size < bdev_logical_block_size(wc->dev->bdev) ||
2016+
wc->block_size < bdev_logical_block_size(wc->ssd_dev->bdev)) {
2017+
r = -EINVAL;
2018+
ti->error = "Block size is smaller than device logical block size";
2019+
goto bad;
2020+
}
19872021
wc->block_size_bits = __ffs(wc->block_size);
19882022

19892023
wc->max_writeback_jobs = MAX_WRITEBACK_JOBS;
@@ -2072,8 +2106,6 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
20722106
goto bad;
20732107
}
20742108
} else {
2075-
struct dm_io_region region;
2076-
struct dm_io_request req;
20772109
size_t n_blocks, n_metadata_blocks;
20782110
uint64_t n_bitmap_bits;
20792111

@@ -2130,19 +2162,9 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
21302162
goto bad;
21312163
}
21322164

2133-
region.bdev = wc->ssd_dev->bdev;
2134-
region.sector = wc->start_sector;
2135-
region.count = wc->metadata_sectors;
2136-
req.bi_op = REQ_OP_READ;
2137-
req.bi_op_flags = REQ_SYNC;
2138-
req.mem.type = DM_IO_VMA;
2139-
req.mem.ptr.vma = (char *)wc->memory_map;
2140-
req.client = wc->dm_io;
2141-
req.notify.fn = NULL;
2142-
2143-
r = dm_io(&req, 1, &region, NULL);
2165+
r = writecache_read_metadata(wc, wc->block_size >> SECTOR_SHIFT);
21442166
if (r) {
2145-
ti->error = "Unable to read metadata";
2167+
ti->error = "Unable to read first block of metadata";
21462168
goto bad;
21472169
}
21482170
}

0 commit comments

Comments
 (0)