Skip to content

Commit

Permalink
page-alloc: make scrub_one_page() static
Browse files Browse the repository at this point in the history
Before starting to alter its properties, restrict the function's
visibility. The only external user is mem-paging, which we can
accommodate by different means.

Also move the function up in its source file, so we won't need to
forward-declare it. Constify its parameter at the same time.

Signed-off-by: Jan Beulich <[email protected]>
Acked-by: Julien Grall <[email protected]>
  • Loading branch information
jbeulich committed Nov 26, 2024
1 parent a24f1c0 commit c8e3e39
Show file tree
Hide file tree
Showing 5 changed files with 27 additions and 30 deletions.
6 changes: 0 additions & 6 deletions xen/arch/x86/include/asm/mem_paging.h
Original file line number Diff line number Diff line change
Expand Up @@ -12,12 +12,6 @@

int mem_paging_memop(XEN_GUEST_HANDLE_PARAM(xen_mem_paging_op_t) arg);

#ifdef CONFIG_MEM_PAGING
# define mem_paging_enabled(d) vm_event_check_ring((d)->vm_event_paging)
#else
# define mem_paging_enabled(d) false
#endif

#endif /*__ASM_X86_MEM_PAGING_H__ */

/*
Expand Down
3 changes: 0 additions & 3 deletions xen/arch/x86/mm/mem_paging.c
Original file line number Diff line number Diff line change
Expand Up @@ -304,9 +304,6 @@ static int evict(struct domain *d, gfn_t gfn)
ret = p2m_set_entry(p2m, gfn, INVALID_MFN, PAGE_ORDER_4K,
p2m_ram_paged, a);

/* Clear content before returning the page to Xen */
scrub_one_page(page);

/* Track number of paged gfns */
atomic_inc(&d->paged_pages);

Expand Down
40 changes: 21 additions & 19 deletions xen/common/page_alloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -137,6 +137,7 @@
#include <xen/sections.h>
#include <xen/softirq.h>
#include <xen/spinlock.h>
#include <xen/vm_event.h>

#include <asm/flushtlb.h>
#include <asm/page.h>
Expand Down Expand Up @@ -774,6 +775,21 @@ static void page_list_add_scrub(struct page_info *pg, unsigned int node,
#endif
#define SCRUB_BYTE_PATTERN (SCRUB_PATTERN & 0xff)

static void scrub_one_page(const struct page_info *pg)
{
if ( unlikely(pg->count_info & PGC_broken) )
return;

#ifndef NDEBUG
/* Avoid callers relying on allocations returning zeroed pages. */
unmap_domain_page(memset(__map_domain_page(pg),
SCRUB_BYTE_PATTERN, PAGE_SIZE));
#else
/* For a production build, clear_page() is the fastest way to scrub. */
clear_domain_page(_mfn(page_to_mfn(pg)));
#endif
}

static void poison_one_page(struct page_info *pg)
{
#ifdef CONFIG_SCRUB_DEBUG
Expand Down Expand Up @@ -2548,10 +2564,12 @@ void free_domheap_pages(struct page_info *pg, unsigned int order)
/*
* Normally we expect a domain to clear pages before freeing them,
* if it cares about the secrecy of their contents. However, after
* a domain has died we assume responsibility for erasure. We do
* scrub regardless if option scrub_domheap is set.
* a domain has died or if it has mem-paging enabled we assume
* responsibility for erasure. We do scrub regardless if option
* scrub_domheap is set.
*/
scrub = d->is_dying || scrub_debug || opt_scrub_domheap;
scrub = d->is_dying || mem_paging_enabled(d) ||
scrub_debug || opt_scrub_domheap;
}
else
{
Expand Down Expand Up @@ -2635,22 +2653,6 @@ static __init int cf_check pagealloc_keyhandler_init(void)
}
__initcall(pagealloc_keyhandler_init);


void scrub_one_page(struct page_info *pg)
{
if ( unlikely(pg->count_info & PGC_broken) )
return;

#ifndef NDEBUG
/* Avoid callers relying on allocations returning zeroed pages. */
unmap_domain_page(memset(__map_domain_page(pg),
SCRUB_BYTE_PATTERN, PAGE_SIZE));
#else
/* For a production build, clear_page() is the fastest way to scrub. */
clear_domain_page(_mfn(page_to_mfn(pg)));
#endif
}

static void cf_check dump_heap(unsigned char key)
{
s_time_t now = NOW();
Expand Down
2 changes: 0 additions & 2 deletions xen/include/xen/mm.h
Original file line number Diff line number Diff line change
Expand Up @@ -532,8 +532,6 @@ static inline unsigned int get_order_from_pages(unsigned long nr_pages)
return order;
}

void scrub_one_page(struct page_info *pg);

#ifndef arch_free_heap_page
#define arch_free_heap_page(d, pg) \
page_list_del(pg, page_to_list(d, pg))
Expand Down
6 changes: 6 additions & 0 deletions xen/include/xen/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -1203,6 +1203,12 @@ static always_inline bool is_iommu_enabled(const struct domain *d)
return evaluate_nospec(d->options & XEN_DOMCTL_CDF_iommu);
}

#ifdef CONFIG_MEM_PAGING
# define mem_paging_enabled(d) vm_event_check_ring((d)->vm_event_paging)
#else
# define mem_paging_enabled(d) false
#endif

extern bool sched_smt_power_savings;
extern bool sched_disable_smt_switching;

Expand Down

0 comments on commit c8e3e39

Please sign in to comment.