Skip to content

Commit 7889936

Browse files
Mikulas Patockagregkh
Mikulas Patocka
authored andcommitted
dm writecache: fix data corruption when reloading the target
commit 31b2212 upstream. The dm-writecache reads metadata in the target constructor. However, when we reload the target, there could be another active instance running on the same device. This is the sequence of operations when doing a reload: 1. construct new target 2. suspend old target 3. resume new target 4. destroy old target Metadata that were written by the old target between steps 1 and 2 would not be visible by the new target. Fix the data corruption by loading the metadata in the resume handler. Also, validate block_size is at least as large as both the devices' logical block size and only read 1 block from the metadata during target constructor -- no need to read entirety of metadata now that it is done during resume. Fixes: 48debaf ("dm: add writecache target") Cc: [email protected] # v4.18+ Signed-off-by: Mikulas Patocka <[email protected]> Signed-off-by: Mike Snitzer <[email protected]> Signed-off-by: Greg Kroah-Hartman <[email protected]>
1 parent d4440a7 commit 7889936

File tree

1 file changed

+37
-15
lines changed

1 file changed

+37
-15
lines changed

drivers/md/dm-writecache.c

Lines changed: 37 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -884,6 +884,24 @@ static int writecache_alloc_entries(struct dm_writecache *wc)
884884
return 0;
885885
}
886886

887+
static int writecache_read_metadata(struct dm_writecache *wc, sector_t n_sectors)
888+
{
889+
struct dm_io_region region;
890+
struct dm_io_request req;
891+
892+
region.bdev = wc->ssd_dev->bdev;
893+
region.sector = wc->start_sector;
894+
region.count = n_sectors;
895+
req.bi_op = REQ_OP_READ;
896+
req.bi_op_flags = REQ_SYNC;
897+
req.mem.type = DM_IO_VMA;
898+
req.mem.ptr.vma = (char *)wc->memory_map;
899+
req.client = wc->dm_io;
900+
req.notify.fn = NULL;
901+
902+
return dm_io(&req, 1, &region, NULL);
903+
}
904+
887905
static void writecache_resume(struct dm_target *ti)
888906
{
889907
struct dm_writecache *wc = ti->private;
@@ -894,8 +912,18 @@ static void writecache_resume(struct dm_target *ti)
894912

895913
wc_lock(wc);
896914

897-
if (WC_MODE_PMEM(wc))
915+
if (WC_MODE_PMEM(wc)) {
898916
persistent_memory_invalidate_cache(wc->memory_map, wc->memory_map_size);
917+
} else {
918+
r = writecache_read_metadata(wc, wc->metadata_sectors);
919+
if (r) {
920+
size_t sb_entries_offset;
921+
writecache_error(wc, r, "unable to read metadata: %d", r);
922+
sb_entries_offset = offsetof(struct wc_memory_superblock, entries);
923+
memset((char *)wc->memory_map + sb_entries_offset, -1,
924+
(wc->metadata_sectors << SECTOR_SHIFT) - sb_entries_offset);
925+
}
926+
}
899927

900928
wc->tree = RB_ROOT;
901929
INIT_LIST_HEAD(&wc->lru);
@@ -1978,6 +2006,12 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
19782006
ti->error = "Invalid block size";
19792007
goto bad;
19802008
}
2009+
if (wc->block_size < bdev_logical_block_size(wc->dev->bdev) ||
2010+
wc->block_size < bdev_logical_block_size(wc->ssd_dev->bdev)) {
2011+
r = -EINVAL;
2012+
ti->error = "Block size is smaller than device logical block size";
2013+
goto bad;
2014+
}
19812015
wc->block_size_bits = __ffs(wc->block_size);
19822016

19832017
wc->max_writeback_jobs = MAX_WRITEBACK_JOBS;
@@ -2066,8 +2100,6 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
20662100
goto bad;
20672101
}
20682102
} else {
2069-
struct dm_io_region region;
2070-
struct dm_io_request req;
20712103
size_t n_blocks, n_metadata_blocks;
20722104
uint64_t n_bitmap_bits;
20732105

@@ -2124,19 +2156,9 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
21242156
goto bad;
21252157
}
21262158

2127-
region.bdev = wc->ssd_dev->bdev;
2128-
region.sector = wc->start_sector;
2129-
region.count = wc->metadata_sectors;
2130-
req.bi_op = REQ_OP_READ;
2131-
req.bi_op_flags = REQ_SYNC;
2132-
req.mem.type = DM_IO_VMA;
2133-
req.mem.ptr.vma = (char *)wc->memory_map;
2134-
req.client = wc->dm_io;
2135-
req.notify.fn = NULL;
2136-
2137-
r = dm_io(&req, 1, &region, NULL);
2159+
r = writecache_read_metadata(wc, wc->block_size >> SECTOR_SHIFT);
21382160
if (r) {
2139-
ti->error = "Unable to read metadata";
2161+
ti->error = "Unable to read first block of metadata";
21402162
goto bad;
21412163
}
21422164
}

0 commit comments

Comments
 (0)