dm cache: optimize dirty bit checking with find_next_bit when resizing
commit f484697e619a83ecc370443a34746379ad99d204 upstream. When shrinking the fast device, dm-cache iteratively searches for a dirty bit among the cache blocks to be dropped, which is less efficient. Use find_next_bit instead, as it is twice as fast as the iterative approach with test_bit. Signed-off-by: Ming-Hung Tsai <mtsai@redhat.com> Fixes: f494a9c6b1b6 ("dm cache: cache shrinking support") Cc: stable@vger.kernel.org Signed-off-by: Mikulas Patocka <mpatocka@redhat.com> Acked-by: Joe Thornber <thornber@redhat.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
5b93a0a383
commit
6ebcfe4aa8
1 changed files with 8 additions and 8 deletions
|
@ -2965,14 +2965,14 @@ static bool can_resize(struct cache *cache, dm_cblock_t new_size)
|
|||
/*
|
||||
* We can't drop a dirty block when shrinking the cache.
|
||||
*/
|
||||
while (from_cblock(new_size) < from_cblock(cache->cache_size)) {
|
||||
if (is_dirty(cache, new_size)) {
|
||||
DMERR("%s: unable to shrink cache; cache block %llu is dirty",
|
||||
cache_device_name(cache),
|
||||
(unsigned long long) from_cblock(new_size));
|
||||
return false;
|
||||
}
|
||||
new_size = to_cblock(from_cblock(new_size) + 1);
|
||||
new_size = to_cblock(find_next_bit(cache->dirty_bitset,
|
||||
from_cblock(cache->cache_size),
|
||||
from_cblock(new_size)));
|
||||
if (new_size != cache->cache_size) {
|
||||
DMERR("%s: unable to shrink cache; cache block %llu is dirty",
|
||||
cache_device_name(cache),
|
||||
(unsigned long long) from_cblock(new_size));
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
|
|
Loading…
Reference in a new issue