Merge branch 'akpm' (patches from Andrew)

Merge more updates from Andrew Morton:
 "The post-linux-next material.

  7 patches.

  Subsystems affected by this patch series (all mm): debug,
  slab-generic, migration, memcg, and kasan"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
  kasan: add kasan mode messages when kasan init
  mm: unexport {,un}lock_page_memcg
  mm: unexport folio_memcg_{,un}lock
  mm/migrate.c: remove MIGRATE_PFN_LOCKED
  mm: migrate: simplify the file-backed pages validation when migrating its mapping
  mm: allow only SLUB on PREEMPT_RT
  mm/page_owner.c: modify the type of argument "order" in some functions
This commit is contained in:
Linus Torvalds 2021-11-11 14:31:47 -08:00
commit dbf4989618
14 changed files with 63 additions and 152 deletions

View file

@ -360,7 +360,7 @@ between device driver specific code and shared common code:
system memory page, locks the page with ``lock_page()``, and fills in the system memory page, locks the page with ``lock_page()``, and fills in the
``dst`` array entry with:: ``dst`` array entry with::
dst[i] = migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED; dst[i] = migrate_pfn(page_to_pfn(dpage));
Now that the driver knows that this page is being migrated, it can Now that the driver knows that this page is being migrated, it can
invalidate device private MMU mappings and copy device private memory invalidate device private MMU mappings and copy device private memory

View file

@ -310,7 +310,7 @@ void __init kasan_init(void)
kasan_init_depth(); kasan_init_depth();
#if defined(CONFIG_KASAN_GENERIC) #if defined(CONFIG_KASAN_GENERIC)
/* CONFIG_KASAN_SW_TAGS also requires kasan_init_sw_tags(). */ /* CONFIG_KASAN_SW_TAGS also requires kasan_init_sw_tags(). */
pr_info("KernelAddressSanitizer initialized\n"); pr_info("KernelAddressSanitizer initialized (generic)\n");
#endif #endif
} }

View file

@ -560,7 +560,7 @@ static int __kvmppc_svm_page_out(struct vm_area_struct *vma,
gpa, 0, page_shift); gpa, 0, page_shift);
if (ret == U_SUCCESS) if (ret == U_SUCCESS)
*mig.dst = migrate_pfn(pfn) | MIGRATE_PFN_LOCKED; *mig.dst = migrate_pfn(pfn);
else { else {
unlock_page(dpage); unlock_page(dpage);
__free_page(dpage); __free_page(dpage);
@ -774,7 +774,7 @@ static int kvmppc_svm_page_in(struct vm_area_struct *vma,
} }
} }
*mig.dst = migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED; *mig.dst = migrate_pfn(page_to_pfn(dpage));
migrate_vma_pages(&mig); migrate_vma_pages(&mig);
out_finalize: out_finalize:
migrate_vma_finalize(&mig); migrate_vma_finalize(&mig);

View file

@ -317,7 +317,6 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
migrate->dst[i] = svm_migrate_addr_to_pfn(adev, dst[i]); migrate->dst[i] = svm_migrate_addr_to_pfn(adev, dst[i]);
svm_migrate_get_vram_page(prange, migrate->dst[i]); svm_migrate_get_vram_page(prange, migrate->dst[i]);
migrate->dst[i] = migrate_pfn(migrate->dst[i]); migrate->dst[i] = migrate_pfn(migrate->dst[i]);
migrate->dst[i] |= MIGRATE_PFN_LOCKED;
src[i] = dma_map_page(dev, spage, 0, PAGE_SIZE, src[i] = dma_map_page(dev, spage, 0, PAGE_SIZE,
DMA_TO_DEVICE); DMA_TO_DEVICE);
r = dma_mapping_error(dev, src[i]); r = dma_mapping_error(dev, src[i]);
@ -610,7 +609,6 @@ svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
dst[i] >> PAGE_SHIFT, page_to_pfn(dpage)); dst[i] >> PAGE_SHIFT, page_to_pfn(dpage));
migrate->dst[i] = migrate_pfn(page_to_pfn(dpage)); migrate->dst[i] = migrate_pfn(page_to_pfn(dpage));
migrate->dst[i] |= MIGRATE_PFN_LOCKED;
j++; j++;
} }

View file

@ -166,7 +166,7 @@ static vm_fault_t nouveau_dmem_fault_copy_one(struct nouveau_drm *drm,
goto error_dma_unmap; goto error_dma_unmap;
mutex_unlock(&svmm->mutex); mutex_unlock(&svmm->mutex);
args->dst[0] = migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED; args->dst[0] = migrate_pfn(page_to_pfn(dpage));
return 0; return 0;
error_dma_unmap: error_dma_unmap:
@ -602,7 +602,7 @@ static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm,
((paddr >> PAGE_SHIFT) << NVIF_VMM_PFNMAP_V0_ADDR_SHIFT); ((paddr >> PAGE_SHIFT) << NVIF_VMM_PFNMAP_V0_ADDR_SHIFT);
if (src & MIGRATE_PFN_WRITE) if (src & MIGRATE_PFN_WRITE)
*pfn |= NVIF_VMM_PFNMAP_V0_W; *pfn |= NVIF_VMM_PFNMAP_V0_W;
return migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED; return migrate_pfn(page_to_pfn(dpage));
out_dma_unmap: out_dma_unmap:
dma_unmap_page(dev, *dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL); dma_unmap_page(dev, *dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);

View file

@ -110,7 +110,6 @@ static inline int migrate_misplaced_page(struct page *page,
*/ */
#define MIGRATE_PFN_VALID (1UL << 0) #define MIGRATE_PFN_VALID (1UL << 0)
#define MIGRATE_PFN_MIGRATE (1UL << 1) #define MIGRATE_PFN_MIGRATE (1UL << 1)
#define MIGRATE_PFN_LOCKED (1UL << 2)
#define MIGRATE_PFN_WRITE (1UL << 3) #define MIGRATE_PFN_WRITE (1UL << 3)
#define MIGRATE_PFN_SHIFT 6 #define MIGRATE_PFN_SHIFT 6

View file

@ -8,9 +8,9 @@
extern struct static_key_false page_owner_inited; extern struct static_key_false page_owner_inited;
extern struct page_ext_operations page_owner_ops; extern struct page_ext_operations page_owner_ops;
extern void __reset_page_owner(struct page *page, unsigned int order); extern void __reset_page_owner(struct page *page, unsigned short order);
extern void __set_page_owner(struct page *page, extern void __set_page_owner(struct page *page,
unsigned int order, gfp_t gfp_mask); unsigned short order, gfp_t gfp_mask);
extern void __split_page_owner(struct page *page, unsigned int nr); extern void __split_page_owner(struct page *page, unsigned int nr);
extern void __folio_copy_owner(struct folio *newfolio, struct folio *old); extern void __folio_copy_owner(struct folio *newfolio, struct folio *old);
extern void __set_page_owner_migrate_reason(struct page *page, int reason); extern void __set_page_owner_migrate_reason(struct page *page, int reason);
@ -18,14 +18,14 @@ extern void __dump_page_owner(const struct page *page);
extern void pagetypeinfo_showmixedcount_print(struct seq_file *m, extern void pagetypeinfo_showmixedcount_print(struct seq_file *m,
pg_data_t *pgdat, struct zone *zone); pg_data_t *pgdat, struct zone *zone);
static inline void reset_page_owner(struct page *page, unsigned int order) static inline void reset_page_owner(struct page *page, unsigned short order)
{ {
if (static_branch_unlikely(&page_owner_inited)) if (static_branch_unlikely(&page_owner_inited))
__reset_page_owner(page, order); __reset_page_owner(page, order);
} }
static inline void set_page_owner(struct page *page, static inline void set_page_owner(struct page *page,
unsigned int order, gfp_t gfp_mask) unsigned short order, gfp_t gfp_mask)
{ {
if (static_branch_unlikely(&page_owner_inited)) if (static_branch_unlikely(&page_owner_inited))
__set_page_owner(page, order, gfp_mask); __set_page_owner(page, order, gfp_mask);
@ -52,7 +52,7 @@ static inline void dump_page_owner(const struct page *page)
__dump_page_owner(page); __dump_page_owner(page);
} }
#else #else
static inline void reset_page_owner(struct page *page, unsigned int order) static inline void reset_page_owner(struct page *page, unsigned short order)
{ {
} }
static inline void set_page_owner(struct page *page, static inline void set_page_owner(struct page *page,
@ -60,7 +60,7 @@ static inline void set_page_owner(struct page *page,
{ {
} }
static inline void split_page_owner(struct page *page, static inline void split_page_owner(struct page *page,
unsigned int order) unsigned short order)
{ {
} }
static inline void folio_copy_owner(struct folio *newfolio, struct folio *folio) static inline void folio_copy_owner(struct folio *newfolio, struct folio *folio)

View file

@ -1896,6 +1896,7 @@ choice
config SLAB config SLAB
bool "SLAB" bool "SLAB"
depends on !PREEMPT_RT
select HAVE_HARDENED_USERCOPY_ALLOCATOR select HAVE_HARDENED_USERCOPY_ALLOCATOR
help help
The regular slab allocator that is established and known to work The regular slab allocator that is established and known to work
@ -1916,6 +1917,7 @@ config SLUB
config SLOB config SLOB
depends on EXPERT depends on EXPERT
bool "SLOB (Simple Allocator)" bool "SLOB (Simple Allocator)"
depends on !PREEMPT_RT
help help
SLOB replaces the stock allocator with a drastically simpler SLOB replaces the stock allocator with a drastically simpler
allocator. SLOB is generally more space efficient but allocator. SLOB is generally more space efficient but

View file

@ -613,8 +613,7 @@ static void dmirror_migrate_alloc_and_copy(struct migrate_vma *args,
*/ */
rpage->zone_device_data = dmirror; rpage->zone_device_data = dmirror;
*dst = migrate_pfn(page_to_pfn(dpage)) | *dst = migrate_pfn(page_to_pfn(dpage));
MIGRATE_PFN_LOCKED;
if ((*src & MIGRATE_PFN_WRITE) || if ((*src & MIGRATE_PFN_WRITE) ||
(!spage && args->vma->vm_flags & VM_WRITE)) (!spage && args->vma->vm_flags & VM_WRITE))
*dst |= MIGRATE_PFN_WRITE; *dst |= MIGRATE_PFN_WRITE;
@ -1137,7 +1136,7 @@ static vm_fault_t dmirror_devmem_fault_alloc_and_copy(struct migrate_vma *args,
lock_page(dpage); lock_page(dpage);
xa_erase(&dmirror->pt, addr >> PAGE_SHIFT); xa_erase(&dmirror->pt, addr >> PAGE_SHIFT);
copy_highpage(dpage, spage); copy_highpage(dpage, spage);
*dst = migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED; *dst = migrate_pfn(page_to_pfn(dpage));
if (*src & MIGRATE_PFN_WRITE) if (*src & MIGRATE_PFN_WRITE)
*dst |= MIGRATE_PFN_WRITE; *dst |= MIGRATE_PFN_WRITE;
} }

View file

@ -106,6 +106,16 @@ static int __init early_kasan_flag_stacktrace(char *arg)
} }
early_param("kasan.stacktrace", early_kasan_flag_stacktrace); early_param("kasan.stacktrace", early_kasan_flag_stacktrace);
static inline const char *kasan_mode_info(void)
{
if (kasan_mode == KASAN_MODE_ASYNC)
return "async";
else if (kasan_mode == KASAN_MODE_ASYMM)
return "asymm";
else
return "sync";
}
/* kasan_init_hw_tags_cpu() is called for each CPU. */ /* kasan_init_hw_tags_cpu() is called for each CPU. */
void kasan_init_hw_tags_cpu(void) void kasan_init_hw_tags_cpu(void)
{ {
@ -177,7 +187,9 @@ void __init kasan_init_hw_tags(void)
break; break;
} }
pr_info("KernelAddressSanitizer initialized\n"); pr_info("KernelAddressSanitizer initialized (hw-tags, mode=%s, stacktrace=%s)\n",
kasan_mode_info(),
kasan_stack_collection_enabled() ? "on" : "off");
} }
void kasan_alloc_pages(struct page *page, unsigned int order, gfp_t flags) void kasan_alloc_pages(struct page *page, unsigned int order, gfp_t flags)

View file

@ -42,7 +42,7 @@ void __init kasan_init_sw_tags(void)
for_each_possible_cpu(cpu) for_each_possible_cpu(cpu)
per_cpu(prng_state, cpu) = (u32)get_cycles(); per_cpu(prng_state, cpu) = (u32)get_cycles();
pr_info("KernelAddressSanitizer initialized\n"); pr_info("KernelAddressSanitizer initialized (sw-tags)\n");
} }
/* /*

View file

@ -2058,13 +2058,11 @@ again:
memcg->move_lock_task = current; memcg->move_lock_task = current;
memcg->move_lock_flags = flags; memcg->move_lock_flags = flags;
} }
EXPORT_SYMBOL(folio_memcg_lock);
void lock_page_memcg(struct page *page) void lock_page_memcg(struct page *page)
{ {
folio_memcg_lock(page_folio(page)); folio_memcg_lock(page_folio(page));
} }
EXPORT_SYMBOL(lock_page_memcg);
static void __folio_memcg_unlock(struct mem_cgroup *memcg) static void __folio_memcg_unlock(struct mem_cgroup *memcg)
{ {
@ -2092,13 +2090,11 @@ void folio_memcg_unlock(struct folio *folio)
{ {
__folio_memcg_unlock(folio_memcg(folio)); __folio_memcg_unlock(folio_memcg(folio));
} }
EXPORT_SYMBOL(folio_memcg_unlock);
void unlock_page_memcg(struct page *page) void unlock_page_memcg(struct page *page)
{ {
folio_memcg_unlock(page_folio(page)); folio_memcg_unlock(page_folio(page));
} }
EXPORT_SYMBOL(unlock_page_memcg);
struct obj_stock { struct obj_stock {
#ifdef CONFIG_MEMCG_KMEM #ifdef CONFIG_MEMCG_KMEM

View file

@ -404,12 +404,6 @@ int folio_migrate_mapping(struct address_space *mapping,
newzone = folio_zone(newfolio); newzone = folio_zone(newfolio);
xas_lock_irq(&xas); xas_lock_irq(&xas);
if (folio_ref_count(folio) != expected_count ||
xas_load(&xas) != folio) {
xas_unlock_irq(&xas);
return -EAGAIN;
}
if (!folio_ref_freeze(folio, expected_count)) { if (!folio_ref_freeze(folio, expected_count)) {
xas_unlock_irq(&xas); xas_unlock_irq(&xas);
return -EAGAIN; return -EAGAIN;
@ -2368,7 +2362,6 @@ again:
* can't be dropped from it). * can't be dropped from it).
*/ */
get_page(page); get_page(page);
migrate->cpages++;
/* /*
* Optimize for the common case where page is only mapped once * Optimize for the common case where page is only mapped once
@ -2378,7 +2371,7 @@ again:
if (trylock_page(page)) { if (trylock_page(page)) {
pte_t swp_pte; pte_t swp_pte;
mpfn |= MIGRATE_PFN_LOCKED; migrate->cpages++;
ptep_get_and_clear(mm, addr, ptep); ptep_get_and_clear(mm, addr, ptep);
/* Setup special migration page table entry */ /* Setup special migration page table entry */
@ -2412,6 +2405,9 @@ again:
if (pte_present(pte)) if (pte_present(pte))
unmapped++; unmapped++;
} else {
put_page(page);
mpfn = 0;
} }
next: next:
@ -2516,15 +2512,17 @@ static bool migrate_vma_check_page(struct page *page)
} }
/* /*
* migrate_vma_prepare() - lock pages and isolate them from the lru * migrate_vma_unmap() - replace page mapping with special migration pte entry
* @migrate: migrate struct containing all migration information * @migrate: migrate struct containing all migration information
* *
* This locks pages that have been collected by migrate_vma_collect(). Once each * Isolate pages from the LRU and replace mappings (CPU page table pte) with a
* page is locked it is isolated from the lru (for non-device pages). Finally, * special migration pte entry and check if it has been pinned. Pinned pages are
* the ref taken by migrate_vma_collect() is dropped, as locked pages cannot be * restored because we cannot migrate them.
* migrated by concurrent kernel threads. *
* This is the last step before we call the device driver callback to allocate
* destination memory and copy contents of original page over to new page.
*/ */
static void migrate_vma_prepare(struct migrate_vma *migrate) static void migrate_vma_unmap(struct migrate_vma *migrate)
{ {
const unsigned long npages = migrate->npages; const unsigned long npages = migrate->npages;
const unsigned long start = migrate->start; const unsigned long start = migrate->start;
@ -2533,32 +2531,12 @@ static void migrate_vma_prepare(struct migrate_vma *migrate)
lru_add_drain(); lru_add_drain();
for (i = 0; (i < npages) && migrate->cpages; i++) { for (i = 0; i < npages; i++) {
struct page *page = migrate_pfn_to_page(migrate->src[i]); struct page *page = migrate_pfn_to_page(migrate->src[i]);
bool remap = true;
if (!page) if (!page)
continue; continue;
if (!(migrate->src[i] & MIGRATE_PFN_LOCKED)) {
/*
* Because we are migrating several pages there can be
* a deadlock between 2 concurrent migration where each
* are waiting on each other page lock.
*
* Make migrate_vma() a best effort thing and backoff
* for any page we can not lock right away.
*/
if (!trylock_page(page)) {
migrate->src[i] = 0;
migrate->cpages--;
put_page(page);
continue;
}
remap = false;
migrate->src[i] |= MIGRATE_PFN_LOCKED;
}
/* ZONE_DEVICE pages are not on LRU */ /* ZONE_DEVICE pages are not on LRU */
if (!is_zone_device_page(page)) { if (!is_zone_device_page(page)) {
if (!PageLRU(page) && allow_drain) { if (!PageLRU(page) && allow_drain) {
@ -2568,16 +2546,9 @@ static void migrate_vma_prepare(struct migrate_vma *migrate)
} }
if (isolate_lru_page(page)) { if (isolate_lru_page(page)) {
if (remap) { migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; migrate->cpages--;
migrate->cpages--; restore++;
restore++;
} else {
migrate->src[i] = 0;
unlock_page(page);
migrate->cpages--;
put_page(page);
}
continue; continue;
} }
@ -2585,80 +2556,20 @@ static void migrate_vma_prepare(struct migrate_vma *migrate)
put_page(page); put_page(page);
} }
if (!migrate_vma_check_page(page)) { if (page_mapped(page))
if (remap) {
migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
migrate->cpages--;
restore++;
if (!is_zone_device_page(page)) {
get_page(page);
putback_lru_page(page);
}
} else {
migrate->src[i] = 0;
unlock_page(page);
migrate->cpages--;
if (!is_zone_device_page(page))
putback_lru_page(page);
else
put_page(page);
}
}
}
for (i = 0, addr = start; i < npages && restore; i++, addr += PAGE_SIZE) {
struct page *page = migrate_pfn_to_page(migrate->src[i]);
if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE))
continue;
remove_migration_pte(page, migrate->vma, addr, page);
migrate->src[i] = 0;
unlock_page(page);
put_page(page);
restore--;
}
}
/*
* migrate_vma_unmap() - replace page mapping with special migration pte entry
* @migrate: migrate struct containing all migration information
*
* Replace page mapping (CPU page table pte) with a special migration pte entry
* and check again if it has been pinned. Pinned pages are restored because we
* cannot migrate them.
*
* This is the last step before we call the device driver callback to allocate
* destination memory and copy contents of original page over to new page.
*/
static void migrate_vma_unmap(struct migrate_vma *migrate)
{
const unsigned long npages = migrate->npages;
const unsigned long start = migrate->start;
unsigned long addr, i, restore = 0;
for (i = 0; i < npages; i++) {
struct page *page = migrate_pfn_to_page(migrate->src[i]);
if (!page || !(migrate->src[i] & MIGRATE_PFN_MIGRATE))
continue;
if (page_mapped(page)) {
try_to_migrate(page, 0); try_to_migrate(page, 0);
if (page_mapped(page))
goto restore;
}
if (migrate_vma_check_page(page)) if (page_mapped(page) || !migrate_vma_check_page(page)) {
if (!is_zone_device_page(page)) {
get_page(page);
putback_lru_page(page);
}
migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
migrate->cpages--;
restore++;
continue; continue;
}
restore:
migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
migrate->cpages--;
restore++;
} }
for (addr = start, i = 0; i < npages && restore; addr += PAGE_SIZE, i++) { for (addr = start, i = 0; i < npages && restore; addr += PAGE_SIZE, i++) {
@ -2671,12 +2582,8 @@ restore:
migrate->src[i] = 0; migrate->src[i] = 0;
unlock_page(page); unlock_page(page);
put_page(page);
restore--; restore--;
if (is_zone_device_page(page))
put_page(page);
else
putback_lru_page(page);
} }
} }
@ -2699,8 +2606,8 @@ restore:
* it for all those entries (ie with MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE * it for all those entries (ie with MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE
* flag set). Once these are allocated and copied, the caller must update each * flag set). Once these are allocated and copied, the caller must update each
* corresponding entry in the dst array with the pfn value of the destination * corresponding entry in the dst array with the pfn value of the destination
* page and with the MIGRATE_PFN_VALID and MIGRATE_PFN_LOCKED flags set * page and with MIGRATE_PFN_VALID. Destination pages must be locked via
* (destination pages must have their struct pages locked, via lock_page()). * lock_page().
* *
* Note that the caller does not have to migrate all the pages that are marked * Note that the caller does not have to migrate all the pages that are marked
* with MIGRATE_PFN_MIGRATE flag in src array unless this is a migration from * with MIGRATE_PFN_MIGRATE flag in src array unless this is a migration from
@ -2769,8 +2676,6 @@ int migrate_vma_setup(struct migrate_vma *args)
migrate_vma_collect(args); migrate_vma_collect(args);
if (args->cpages)
migrate_vma_prepare(args);
if (args->cpages) if (args->cpages)
migrate_vma_unmap(args); migrate_vma_unmap(args);

View file

@ -125,7 +125,7 @@ static noinline depot_stack_handle_t save_stack(gfp_t flags)
return handle; return handle;
} }
void __reset_page_owner(struct page *page, unsigned int order) void __reset_page_owner(struct page *page, unsigned short order)
{ {
int i; int i;
struct page_ext *page_ext; struct page_ext *page_ext;
@ -149,7 +149,7 @@ void __reset_page_owner(struct page *page, unsigned int order)
static inline void __set_page_owner_handle(struct page_ext *page_ext, static inline void __set_page_owner_handle(struct page_ext *page_ext,
depot_stack_handle_t handle, depot_stack_handle_t handle,
unsigned int order, gfp_t gfp_mask) unsigned short order, gfp_t gfp_mask)
{ {
struct page_owner *page_owner; struct page_owner *page_owner;
int i; int i;
@ -169,7 +169,7 @@ static inline void __set_page_owner_handle(struct page_ext *page_ext,
} }
} }
noinline void __set_page_owner(struct page *page, unsigned int order, noinline void __set_page_owner(struct page *page, unsigned short order,
gfp_t gfp_mask) gfp_t gfp_mask)
{ {
struct page_ext *page_ext = lookup_page_ext(page); struct page_ext *page_ext = lookup_page_ext(page);