mm: add support for verifying page sanitization

Signed-off-by: Daniel Micay <danielmicay@gmail.com>
Signed-off-by: anupritaisno1 <www.anuprita804@gmail.com>
This commit is contained in:
Daniel Micay 2017-05-03 21:54:56 -04:00 committed by Kreciorek
parent 4e3b773c8c
commit 7617e31b3c
3 changed files with 20 additions and 0 deletions

View file

@ -282,6 +282,13 @@ static inline void tag_clear_highpage(struct page *page)
#endif
static inline void verify_zero_highpage(struct page *page)
{
void *kaddr = kmap_atomic(page);
BUG_ON(memchr_inv(kaddr, 0, PAGE_SIZE));
kunmap_atomic(kaddr);
}
static inline void zero_user_segments(struct page *page,
unsigned start1, unsigned end1,
unsigned start2, unsigned end2)

View file

@ -2391,6 +2391,12 @@ static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags
{
post_alloc_hook(page, order, gfp_flags);
if (IS_ENABLED(CONFIG_PAGE_SANITIZE_VERIFY) && want_init_on_free()) {
int i;
for (i = 0; i < (1 << order); i++)
verify_zero_highpage(page + i);
}
if (order && (gfp_flags & __GFP_COMP))
prep_compound_page(page, order);

View file

@ -238,6 +238,13 @@ config INIT_ON_FREE_DEFAULT_ON
touching "cold" memory areas. Most cases see 3-5% impact. Some
synthetic workloads have measured as high as 8%.
config PAGE_SANITIZE_VERIFY
bool "Verify sanitized pages"
default y
help
When init_on_free is enabled, verify that newly allocated pages
are zeroed to detect write-after-free bugs.
endmenu
endmenu