mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-28 17:41:50 +00:00
Merge branch 'akpm' (fixes from Andrew)
Merge patches from Andrew Morton: "13 fixes" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: mm: place page->pmd_huge_pte to right union MAINTAINERS: add keyboard driver to Hyper-V file list x86, mm: do not leak page->ptl for pmd page tables ipc,shm: correct error return value in shmctl (SHM_UNLOCK) mm, mempolicy: silence gcc warning block/partitions/efi.c: fix bound check ARM: drivers/rtc/rtc-at91rm9200.c: disable interrupts at shutdown mm: hugetlbfs: fix hugetlbfs optimization kernel: remove CONFIG_USE_GENERIC_SMP_HELPERS cleanly ipc,shm: fix shm_file deletion races mm: thp: give transparent hugepage code a separate copy_page checkpatch: fix "Use of uninitialized value" warnings configfs: fix race between dentry put and lookup
This commit is contained in:
commit
a5d6e63323
16 changed files with 227 additions and 124 deletions
|
@ -63,9 +63,9 @@ levels.
|
||||||
PMD split lock enabling requires pgtable_pmd_page_ctor() call on PMD table
|
PMD split lock enabling requires pgtable_pmd_page_ctor() call on PMD table
|
||||||
allocation and pgtable_pmd_page_dtor() on freeing.
|
allocation and pgtable_pmd_page_dtor() on freeing.
|
||||||
|
|
||||||
Allocation usually happens in pmd_alloc_one(), freeing in pmd_free(), but
|
Allocation usually happens in pmd_alloc_one(), freeing in pmd_free() and
|
||||||
make sure you cover all PMD table allocation / freeing paths: i.e X86_PAE
|
pmd_free_tlb(), but make sure you cover all PMD table allocation / freeing
|
||||||
preallocate few PMDs on pgd_alloc().
|
paths: i.e X86_PAE preallocate few PMDs on pgd_alloc().
|
||||||
|
|
||||||
With everything in place you can set CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK.
|
With everything in place you can set CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK.
|
||||||
|
|
||||||
|
|
|
@ -4065,6 +4065,7 @@ F: arch/x86/include/uapi/asm/hyperv.h
|
||||||
F: arch/x86/kernel/cpu/mshyperv.c
|
F: arch/x86/kernel/cpu/mshyperv.c
|
||||||
F: drivers/hid/hid-hyperv.c
|
F: drivers/hid/hid-hyperv.c
|
||||||
F: drivers/hv/
|
F: drivers/hv/
|
||||||
|
F: drivers/input/serio/hyperv-keyboard.c
|
||||||
F: drivers/net/hyperv/
|
F: drivers/net/hyperv/
|
||||||
F: drivers/scsi/storvsc_drv.c
|
F: drivers/scsi/storvsc_drv.c
|
||||||
F: drivers/video/hyperv_fb.c
|
F: drivers/video/hyperv_fb.c
|
||||||
|
|
|
@ -61,6 +61,7 @@ void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
|
||||||
#if PAGETABLE_LEVELS > 2
|
#if PAGETABLE_LEVELS > 2
|
||||||
void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
|
void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
|
||||||
{
|
{
|
||||||
|
struct page *page = virt_to_page(pmd);
|
||||||
paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT);
|
paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT);
|
||||||
/*
|
/*
|
||||||
* NOTE! For PAE, any changes to the top page-directory-pointer-table
|
* NOTE! For PAE, any changes to the top page-directory-pointer-table
|
||||||
|
@ -69,7 +70,8 @@ void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
|
||||||
#ifdef CONFIG_X86_PAE
|
#ifdef CONFIG_X86_PAE
|
||||||
tlb->need_flush_all = 1;
|
tlb->need_flush_all = 1;
|
||||||
#endif
|
#endif
|
||||||
tlb_remove_page(tlb, virt_to_page(pmd));
|
pgtable_pmd_page_dtor(page);
|
||||||
|
tlb_remove_page(tlb, page);
|
||||||
}
|
}
|
||||||
|
|
||||||
#if PAGETABLE_LEVELS > 3
|
#if PAGETABLE_LEVELS > 3
|
||||||
|
|
|
@ -96,6 +96,7 @@
|
||||||
* - Code works, detects all the partitions.
|
* - Code works, detects all the partitions.
|
||||||
*
|
*
|
||||||
************************************************************/
|
************************************************************/
|
||||||
|
#include <linux/kernel.h>
|
||||||
#include <linux/crc32.h>
|
#include <linux/crc32.h>
|
||||||
#include <linux/ctype.h>
|
#include <linux/ctype.h>
|
||||||
#include <linux/math64.h>
|
#include <linux/math64.h>
|
||||||
|
@ -715,8 +716,8 @@ int efi_partition(struct parsed_partitions *state)
|
||||||
efi_guid_unparse(&ptes[i].unique_partition_guid, info->uuid);
|
efi_guid_unparse(&ptes[i].unique_partition_guid, info->uuid);
|
||||||
|
|
||||||
/* Naively convert UTF16-LE to 7 bits. */
|
/* Naively convert UTF16-LE to 7 bits. */
|
||||||
label_max = min(sizeof(info->volname) - 1,
|
label_max = min(ARRAY_SIZE(info->volname) - 1,
|
||||||
sizeof(ptes[i].partition_name));
|
ARRAY_SIZE(ptes[i].partition_name));
|
||||||
info->volname[label_max] = 0;
|
info->volname[label_max] = 0;
|
||||||
while (label_count < label_max) {
|
while (label_count < label_max) {
|
||||||
u8 c = ptes[i].partition_name[label_count] & 0xff;
|
u8 c = ptes[i].partition_name[label_count] & 0xff;
|
||||||
|
|
|
@ -223,7 +223,7 @@ static void null_softirq_done_fn(struct request *rq)
|
||||||
blk_end_request_all(rq, 0);
|
blk_end_request_all(rq, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
#if defined(CONFIG_SMP) && defined(CONFIG_USE_GENERIC_SMP_HELPERS)
|
#ifdef CONFIG_SMP
|
||||||
|
|
||||||
static void null_ipi_cmd_end_io(void *data)
|
static void null_ipi_cmd_end_io(void *data)
|
||||||
{
|
{
|
||||||
|
@ -260,7 +260,7 @@ static void null_cmd_end_ipi(struct nullb_cmd *cmd)
|
||||||
put_cpu();
|
put_cpu();
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* CONFIG_SMP && CONFIG_USE_GENERIC_SMP_HELPERS */
|
#endif /* CONFIG_SMP */
|
||||||
|
|
||||||
static inline void null_handle_cmd(struct nullb_cmd *cmd)
|
static inline void null_handle_cmd(struct nullb_cmd *cmd)
|
||||||
{
|
{
|
||||||
|
@ -270,7 +270,7 @@ static inline void null_handle_cmd(struct nullb_cmd *cmd)
|
||||||
end_cmd(cmd);
|
end_cmd(cmd);
|
||||||
break;
|
break;
|
||||||
case NULL_IRQ_SOFTIRQ:
|
case NULL_IRQ_SOFTIRQ:
|
||||||
#if defined(CONFIG_SMP) && defined(CONFIG_USE_GENERIC_SMP_HELPERS)
|
#ifdef CONFIG_SMP
|
||||||
null_cmd_end_ipi(cmd);
|
null_cmd_end_ipi(cmd);
|
||||||
#else
|
#else
|
||||||
end_cmd(cmd);
|
end_cmd(cmd);
|
||||||
|
@ -571,7 +571,7 @@ static int __init null_init(void)
|
||||||
{
|
{
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
|
|
||||||
#if !defined(CONFIG_SMP) || !defined(CONFIG_USE_GENERIC_SMP_HELPERS)
|
#if !defined(CONFIG_SMP)
|
||||||
if (irqmode == NULL_IRQ_SOFTIRQ) {
|
if (irqmode == NULL_IRQ_SOFTIRQ) {
|
||||||
pr_warn("null_blk: softirq completions not available.\n");
|
pr_warn("null_blk: softirq completions not available.\n");
|
||||||
pr_warn("null_blk: using direct completions.\n");
|
pr_warn("null_blk: using direct completions.\n");
|
||||||
|
|
|
@ -428,6 +428,14 @@ static int __exit at91_rtc_remove(struct platform_device *pdev)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void at91_rtc_shutdown(struct platform_device *pdev)
|
||||||
|
{
|
||||||
|
/* Disable all interrupts */
|
||||||
|
at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD | AT91_RTC_ALARM |
|
||||||
|
AT91_RTC_SECEV | AT91_RTC_TIMEV |
|
||||||
|
AT91_RTC_CALEV);
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_PM_SLEEP
|
#ifdef CONFIG_PM_SLEEP
|
||||||
|
|
||||||
/* AT91RM9200 RTC Power management control */
|
/* AT91RM9200 RTC Power management control */
|
||||||
|
@ -466,6 +474,7 @@ static SIMPLE_DEV_PM_OPS(at91_rtc_pm_ops, at91_rtc_suspend, at91_rtc_resume);
|
||||||
|
|
||||||
static struct platform_driver at91_rtc_driver = {
|
static struct platform_driver at91_rtc_driver = {
|
||||||
.remove = __exit_p(at91_rtc_remove),
|
.remove = __exit_p(at91_rtc_remove),
|
||||||
|
.shutdown = at91_rtc_shutdown,
|
||||||
.driver = {
|
.driver = {
|
||||||
.name = "at91_rtc",
|
.name = "at91_rtc",
|
||||||
.owner = THIS_MODULE,
|
.owner = THIS_MODULE,
|
||||||
|
|
|
@ -56,10 +56,19 @@ static void configfs_d_iput(struct dentry * dentry,
|
||||||
struct configfs_dirent *sd = dentry->d_fsdata;
|
struct configfs_dirent *sd = dentry->d_fsdata;
|
||||||
|
|
||||||
if (sd) {
|
if (sd) {
|
||||||
BUG_ON(sd->s_dentry != dentry);
|
|
||||||
/* Coordinate with configfs_readdir */
|
/* Coordinate with configfs_readdir */
|
||||||
spin_lock(&configfs_dirent_lock);
|
spin_lock(&configfs_dirent_lock);
|
||||||
|
/* Coordinate with configfs_attach_attr where will increase
|
||||||
|
* sd->s_count and update sd->s_dentry to new allocated one.
|
||||||
|
* Only set sd->dentry to null when this dentry is the only
|
||||||
|
* sd owner.
|
||||||
|
* If not do so, configfs_d_iput may run just after
|
||||||
|
* configfs_attach_attr and set sd->s_dentry to null
|
||||||
|
* even it's still in use.
|
||||||
|
*/
|
||||||
|
if (atomic_read(&sd->s_count) <= 2)
|
||||||
sd->s_dentry = NULL;
|
sd->s_dentry = NULL;
|
||||||
|
|
||||||
spin_unlock(&configfs_dirent_lock);
|
spin_unlock(&configfs_dirent_lock);
|
||||||
configfs_put(sd);
|
configfs_put(sd);
|
||||||
}
|
}
|
||||||
|
@ -416,8 +425,11 @@ static int configfs_attach_attr(struct configfs_dirent * sd, struct dentry * den
|
||||||
struct configfs_attribute * attr = sd->s_element;
|
struct configfs_attribute * attr = sd->s_element;
|
||||||
int error;
|
int error;
|
||||||
|
|
||||||
|
spin_lock(&configfs_dirent_lock);
|
||||||
dentry->d_fsdata = configfs_get(sd);
|
dentry->d_fsdata = configfs_get(sd);
|
||||||
sd->s_dentry = dentry;
|
sd->s_dentry = dentry;
|
||||||
|
spin_unlock(&configfs_dirent_lock);
|
||||||
|
|
||||||
error = configfs_create(dentry, (attr->ca_mode & S_IALLUGO) | S_IFREG,
|
error = configfs_create(dentry, (attr->ca_mode & S_IALLUGO) | S_IFREG,
|
||||||
configfs_init_file);
|
configfs_init_file);
|
||||||
if (error) {
|
if (error) {
|
||||||
|
|
|
@ -31,6 +31,7 @@ struct hugepage_subpool *hugepage_new_subpool(long nr_blocks);
|
||||||
void hugepage_put_subpool(struct hugepage_subpool *spool);
|
void hugepage_put_subpool(struct hugepage_subpool *spool);
|
||||||
|
|
||||||
int PageHuge(struct page *page);
|
int PageHuge(struct page *page);
|
||||||
|
int PageHeadHuge(struct page *page_head);
|
||||||
|
|
||||||
void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
|
void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
|
||||||
int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
|
int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
|
||||||
|
@ -69,7 +70,6 @@ int dequeue_hwpoisoned_huge_page(struct page *page);
|
||||||
bool isolate_huge_page(struct page *page, struct list_head *list);
|
bool isolate_huge_page(struct page *page, struct list_head *list);
|
||||||
void putback_active_hugepage(struct page *page);
|
void putback_active_hugepage(struct page *page);
|
||||||
bool is_hugepage_active(struct page *page);
|
bool is_hugepage_active(struct page *page);
|
||||||
void copy_huge_page(struct page *dst, struct page *src);
|
|
||||||
|
|
||||||
#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
|
#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
|
||||||
pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
|
pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
|
||||||
|
@ -104,6 +104,11 @@ static inline int PageHuge(struct page *page)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline int PageHeadHuge(struct page *page_head)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
|
static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
@ -140,9 +145,6 @@ static inline int dequeue_hwpoisoned_huge_page(struct page *page)
|
||||||
#define isolate_huge_page(p, l) false
|
#define isolate_huge_page(p, l) false
|
||||||
#define putback_active_hugepage(p) do {} while (0)
|
#define putback_active_hugepage(p) do {} while (0)
|
||||||
#define is_hugepage_active(x) false
|
#define is_hugepage_active(x) false
|
||||||
static inline void copy_huge_page(struct page *dst, struct page *src)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
|
static inline unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
|
||||||
unsigned long address, unsigned long end, pgprot_t newprot)
|
unsigned long address, unsigned long end, pgprot_t newprot)
|
||||||
|
|
|
@ -65,9 +65,6 @@ struct page {
|
||||||
* this page is only used to
|
* this page is only used to
|
||||||
* free other pages.
|
* free other pages.
|
||||||
*/
|
*/
|
||||||
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && USE_SPLIT_PMD_PTLOCKS
|
|
||||||
pgtable_t pmd_huge_pte; /* protected by page->ptl */
|
|
||||||
#endif
|
|
||||||
};
|
};
|
||||||
|
|
||||||
union {
|
union {
|
||||||
|
@ -135,6 +132,9 @@ struct page {
|
||||||
|
|
||||||
struct list_head list; /* slobs list of pages */
|
struct list_head list; /* slobs list of pages */
|
||||||
struct slab *slab_page; /* slab fields */
|
struct slab *slab_page; /* slab fields */
|
||||||
|
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && USE_SPLIT_PMD_PTLOCKS
|
||||||
|
pgtable_t pmd_huge_pte; /* protected by page->ptl */
|
||||||
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Remainder is not double word aligned */
|
/* Remainder is not double word aligned */
|
||||||
|
|
37
ipc/shm.c
37
ipc/shm.c
|
@ -208,15 +208,18 @@ static void shm_open(struct vm_area_struct *vma)
|
||||||
*/
|
*/
|
||||||
static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
|
static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
|
||||||
{
|
{
|
||||||
|
struct file *shm_file;
|
||||||
|
|
||||||
|
shm_file = shp->shm_file;
|
||||||
|
shp->shm_file = NULL;
|
||||||
ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||||
shm_rmid(ns, shp);
|
shm_rmid(ns, shp);
|
||||||
shm_unlock(shp);
|
shm_unlock(shp);
|
||||||
if (!is_file_hugepages(shp->shm_file))
|
if (!is_file_hugepages(shm_file))
|
||||||
shmem_lock(shp->shm_file, 0, shp->mlock_user);
|
shmem_lock(shm_file, 0, shp->mlock_user);
|
||||||
else if (shp->mlock_user)
|
else if (shp->mlock_user)
|
||||||
user_shm_unlock(file_inode(shp->shm_file)->i_size,
|
user_shm_unlock(file_inode(shm_file)->i_size, shp->mlock_user);
|
||||||
shp->mlock_user);
|
fput(shm_file);
|
||||||
fput (shp->shm_file);
|
|
||||||
ipc_rcu_putref(shp, shm_rcu_free);
|
ipc_rcu_putref(shp, shm_rcu_free);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -974,15 +977,25 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
|
||||||
ipc_lock_object(&shp->shm_perm);
|
ipc_lock_object(&shp->shm_perm);
|
||||||
if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) {
|
if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) {
|
||||||
kuid_t euid = current_euid();
|
kuid_t euid = current_euid();
|
||||||
err = -EPERM;
|
|
||||||
if (!uid_eq(euid, shp->shm_perm.uid) &&
|
if (!uid_eq(euid, shp->shm_perm.uid) &&
|
||||||
!uid_eq(euid, shp->shm_perm.cuid))
|
!uid_eq(euid, shp->shm_perm.cuid)) {
|
||||||
|
err = -EPERM;
|
||||||
goto out_unlock0;
|
goto out_unlock0;
|
||||||
if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK))
|
}
|
||||||
|
if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK)) {
|
||||||
|
err = -EPERM;
|
||||||
goto out_unlock0;
|
goto out_unlock0;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
shm_file = shp->shm_file;
|
shm_file = shp->shm_file;
|
||||||
|
|
||||||
|
/* check if shm_destroy() is tearing down shp */
|
||||||
|
if (shm_file == NULL) {
|
||||||
|
err = -EIDRM;
|
||||||
|
goto out_unlock0;
|
||||||
|
}
|
||||||
|
|
||||||
if (is_file_hugepages(shm_file))
|
if (is_file_hugepages(shm_file))
|
||||||
goto out_unlock0;
|
goto out_unlock0;
|
||||||
|
|
||||||
|
@ -1101,6 +1114,14 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
|
|
||||||
ipc_lock_object(&shp->shm_perm);
|
ipc_lock_object(&shp->shm_perm);
|
||||||
|
|
||||||
|
/* check if shm_destroy() is tearing down shp */
|
||||||
|
if (shp->shm_file == NULL) {
|
||||||
|
ipc_unlock_object(&shp->shm_perm);
|
||||||
|
err = -EIDRM;
|
||||||
|
goto out_unlock;
|
||||||
|
}
|
||||||
|
|
||||||
path = shp->shm_file->f_path;
|
path = shp->shm_file->f_path;
|
||||||
path_get(&path);
|
path_get(&path);
|
||||||
shp->shm_nattch++;
|
shp->shm_nattch++;
|
||||||
|
|
51
mm/hugetlb.c
51
mm/hugetlb.c
|
@ -476,40 +476,6 @@ static int vma_has_reserves(struct vm_area_struct *vma, long chg)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void copy_gigantic_page(struct page *dst, struct page *src)
|
|
||||||
{
|
|
||||||
int i;
|
|
||||||
struct hstate *h = page_hstate(src);
|
|
||||||
struct page *dst_base = dst;
|
|
||||||
struct page *src_base = src;
|
|
||||||
|
|
||||||
for (i = 0; i < pages_per_huge_page(h); ) {
|
|
||||||
cond_resched();
|
|
||||||
copy_highpage(dst, src);
|
|
||||||
|
|
||||||
i++;
|
|
||||||
dst = mem_map_next(dst, dst_base, i);
|
|
||||||
src = mem_map_next(src, src_base, i);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void copy_huge_page(struct page *dst, struct page *src)
|
|
||||||
{
|
|
||||||
int i;
|
|
||||||
struct hstate *h = page_hstate(src);
|
|
||||||
|
|
||||||
if (unlikely(pages_per_huge_page(h) > MAX_ORDER_NR_PAGES)) {
|
|
||||||
copy_gigantic_page(dst, src);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
might_sleep();
|
|
||||||
for (i = 0; i < pages_per_huge_page(h); i++) {
|
|
||||||
cond_resched();
|
|
||||||
copy_highpage(dst + i, src + i);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static void enqueue_huge_page(struct hstate *h, struct page *page)
|
static void enqueue_huge_page(struct hstate *h, struct page *page)
|
||||||
{
|
{
|
||||||
int nid = page_to_nid(page);
|
int nid = page_to_nid(page);
|
||||||
|
@ -736,6 +702,23 @@ int PageHuge(struct page *page)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(PageHuge);
|
EXPORT_SYMBOL_GPL(PageHuge);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* PageHeadHuge() only returns true for hugetlbfs head page, but not for
|
||||||
|
* normal or transparent huge pages.
|
||||||
|
*/
|
||||||
|
int PageHeadHuge(struct page *page_head)
|
||||||
|
{
|
||||||
|
compound_page_dtor *dtor;
|
||||||
|
|
||||||
|
if (!PageHead(page_head))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
dtor = get_compound_page_dtor(page_head);
|
||||||
|
|
||||||
|
return dtor == free_huge_page;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(PageHeadHuge);
|
||||||
|
|
||||||
pgoff_t __basepage_index(struct page *page)
|
pgoff_t __basepage_index(struct page *page)
|
||||||
{
|
{
|
||||||
struct page *page_head = compound_head(page);
|
struct page *page_head = compound_head(page);
|
||||||
|
|
|
@ -2950,7 +2950,7 @@ void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
p += snprintf(p, maxlen, policy_modes[mode]);
|
p += snprintf(p, maxlen, "%s", policy_modes[mode]);
|
||||||
|
|
||||||
if (flags & MPOL_MODE_FLAGS) {
|
if (flags & MPOL_MODE_FLAGS) {
|
||||||
p += snprintf(p, buffer + maxlen - p, "=");
|
p += snprintf(p, buffer + maxlen - p, "=");
|
||||||
|
|
48
mm/migrate.c
48
mm/migrate.c
|
@ -441,6 +441,54 @@ int migrate_huge_page_move_mapping(struct address_space *mapping,
|
||||||
return MIGRATEPAGE_SUCCESS;
|
return MIGRATEPAGE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Gigantic pages are so large that we do not guarantee that page++ pointer
|
||||||
|
* arithmetic will work across the entire page. We need something more
|
||||||
|
* specialized.
|
||||||
|
*/
|
||||||
|
static void __copy_gigantic_page(struct page *dst, struct page *src,
|
||||||
|
int nr_pages)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
struct page *dst_base = dst;
|
||||||
|
struct page *src_base = src;
|
||||||
|
|
||||||
|
for (i = 0; i < nr_pages; ) {
|
||||||
|
cond_resched();
|
||||||
|
copy_highpage(dst, src);
|
||||||
|
|
||||||
|
i++;
|
||||||
|
dst = mem_map_next(dst, dst_base, i);
|
||||||
|
src = mem_map_next(src, src_base, i);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void copy_huge_page(struct page *dst, struct page *src)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
int nr_pages;
|
||||||
|
|
||||||
|
if (PageHuge(src)) {
|
||||||
|
/* hugetlbfs page */
|
||||||
|
struct hstate *h = page_hstate(src);
|
||||||
|
nr_pages = pages_per_huge_page(h);
|
||||||
|
|
||||||
|
if (unlikely(nr_pages > MAX_ORDER_NR_PAGES)) {
|
||||||
|
__copy_gigantic_page(dst, src, nr_pages);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
/* thp page */
|
||||||
|
BUG_ON(!PageTransHuge(src));
|
||||||
|
nr_pages = hpage_nr_pages(src);
|
||||||
|
}
|
||||||
|
|
||||||
|
for (i = 0; i < nr_pages; i++) {
|
||||||
|
cond_resched();
|
||||||
|
copy_highpage(dst + i, src + i);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Copy the page to its new location
|
* Copy the page to its new location
|
||||||
*/
|
*/
|
||||||
|
|
99
mm/swap.c
99
mm/swap.c
|
@ -82,19 +82,6 @@ static void __put_compound_page(struct page *page)
|
||||||
|
|
||||||
static void put_compound_page(struct page *page)
|
static void put_compound_page(struct page *page)
|
||||||
{
|
{
|
||||||
/*
|
|
||||||
* hugetlbfs pages cannot be split from under us. If this is a
|
|
||||||
* hugetlbfs page, check refcount on head page and release the page if
|
|
||||||
* the refcount becomes zero.
|
|
||||||
*/
|
|
||||||
if (PageHuge(page)) {
|
|
||||||
page = compound_head(page);
|
|
||||||
if (put_page_testzero(page))
|
|
||||||
__put_compound_page(page);
|
|
||||||
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (unlikely(PageTail(page))) {
|
if (unlikely(PageTail(page))) {
|
||||||
/* __split_huge_page_refcount can run under us */
|
/* __split_huge_page_refcount can run under us */
|
||||||
struct page *page_head = compound_trans_head(page);
|
struct page *page_head = compound_trans_head(page);
|
||||||
|
@ -111,14 +98,31 @@ static void put_compound_page(struct page *page)
|
||||||
* still hot on arches that do not support
|
* still hot on arches that do not support
|
||||||
* this_cpu_cmpxchg_double().
|
* this_cpu_cmpxchg_double().
|
||||||
*/
|
*/
|
||||||
if (PageSlab(page_head)) {
|
if (PageSlab(page_head) || PageHeadHuge(page_head)) {
|
||||||
if (PageTail(page)) {
|
if (likely(PageTail(page))) {
|
||||||
|
/*
|
||||||
|
* __split_huge_page_refcount
|
||||||
|
* cannot race here.
|
||||||
|
*/
|
||||||
|
VM_BUG_ON(!PageHead(page_head));
|
||||||
|
atomic_dec(&page->_mapcount);
|
||||||
if (put_page_testzero(page_head))
|
if (put_page_testzero(page_head))
|
||||||
VM_BUG_ON(1);
|
VM_BUG_ON(1);
|
||||||
|
if (put_page_testzero(page_head))
|
||||||
atomic_dec(&page->_mapcount);
|
__put_compound_page(page_head);
|
||||||
goto skip_lock_tail;
|
return;
|
||||||
} else
|
} else
|
||||||
|
/*
|
||||||
|
* __split_huge_page_refcount
|
||||||
|
* run before us, "page" was a
|
||||||
|
* THP tail. The split
|
||||||
|
* page_head has been freed
|
||||||
|
* and reallocated as slab or
|
||||||
|
* hugetlbfs page of smaller
|
||||||
|
* order (only possible if
|
||||||
|
* reallocated as slab on
|
||||||
|
* x86).
|
||||||
|
*/
|
||||||
goto skip_lock;
|
goto skip_lock;
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
|
@ -132,8 +136,27 @@ static void put_compound_page(struct page *page)
|
||||||
/* __split_huge_page_refcount run before us */
|
/* __split_huge_page_refcount run before us */
|
||||||
compound_unlock_irqrestore(page_head, flags);
|
compound_unlock_irqrestore(page_head, flags);
|
||||||
skip_lock:
|
skip_lock:
|
||||||
if (put_page_testzero(page_head))
|
if (put_page_testzero(page_head)) {
|
||||||
|
/*
|
||||||
|
* The head page may have been
|
||||||
|
* freed and reallocated as a
|
||||||
|
* compound page of smaller
|
||||||
|
* order and then freed again.
|
||||||
|
* All we know is that it
|
||||||
|
* cannot have become: a THP
|
||||||
|
* page, a compound page of
|
||||||
|
* higher order, a tail page.
|
||||||
|
* That is because we still
|
||||||
|
* hold the refcount of the
|
||||||
|
* split THP tail and
|
||||||
|
* page_head was the THP head
|
||||||
|
* before the split.
|
||||||
|
*/
|
||||||
|
if (PageHead(page_head))
|
||||||
|
__put_compound_page(page_head);
|
||||||
|
else
|
||||||
__put_single_page(page_head);
|
__put_single_page(page_head);
|
||||||
|
}
|
||||||
out_put_single:
|
out_put_single:
|
||||||
if (put_page_testzero(page))
|
if (put_page_testzero(page))
|
||||||
__put_single_page(page);
|
__put_single_page(page);
|
||||||
|
@ -155,7 +178,6 @@ out_put_single:
|
||||||
VM_BUG_ON(atomic_read(&page->_count) != 0);
|
VM_BUG_ON(atomic_read(&page->_count) != 0);
|
||||||
compound_unlock_irqrestore(page_head, flags);
|
compound_unlock_irqrestore(page_head, flags);
|
||||||
|
|
||||||
skip_lock_tail:
|
|
||||||
if (put_page_testzero(page_head)) {
|
if (put_page_testzero(page_head)) {
|
||||||
if (PageHead(page_head))
|
if (PageHead(page_head))
|
||||||
__put_compound_page(page_head);
|
__put_compound_page(page_head);
|
||||||
|
@ -198,30 +220,32 @@ bool __get_page_tail(struct page *page)
|
||||||
* proper PT lock that already serializes against
|
* proper PT lock that already serializes against
|
||||||
* split_huge_page().
|
* split_huge_page().
|
||||||
*/
|
*/
|
||||||
bool got = false;
|
|
||||||
struct page *page_head;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* If this is a hugetlbfs page it cannot be split under us. Simply
|
|
||||||
* increment refcount for the head page.
|
|
||||||
*/
|
|
||||||
if (PageHuge(page)) {
|
|
||||||
page_head = compound_head(page);
|
|
||||||
atomic_inc(&page_head->_count);
|
|
||||||
got = true;
|
|
||||||
} else {
|
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
bool got = false;
|
||||||
|
struct page *page_head = compound_trans_head(page);
|
||||||
|
|
||||||
page_head = compound_trans_head(page);
|
if (likely(page != page_head && get_page_unless_zero(page_head))) {
|
||||||
if (likely(page != page_head &&
|
|
||||||
get_page_unless_zero(page_head))) {
|
|
||||||
|
|
||||||
/* Ref to put_compound_page() comment. */
|
/* Ref to put_compound_page() comment. */
|
||||||
if (PageSlab(page_head)) {
|
if (PageSlab(page_head) || PageHeadHuge(page_head)) {
|
||||||
if (likely(PageTail(page))) {
|
if (likely(PageTail(page))) {
|
||||||
|
/*
|
||||||
|
* This is a hugetlbfs page or a slab
|
||||||
|
* page. __split_huge_page_refcount
|
||||||
|
* cannot race here.
|
||||||
|
*/
|
||||||
|
VM_BUG_ON(!PageHead(page_head));
|
||||||
__get_page_tail_foll(page, false);
|
__get_page_tail_foll(page, false);
|
||||||
return true;
|
return true;
|
||||||
} else {
|
} else {
|
||||||
|
/*
|
||||||
|
* __split_huge_page_refcount run
|
||||||
|
* before us, "page" was a THP
|
||||||
|
* tail. The split page_head has been
|
||||||
|
* freed and reallocated as slab or
|
||||||
|
* hugetlbfs page of smaller order
|
||||||
|
* (only possible if reallocated as
|
||||||
|
* slab on x86).
|
||||||
|
*/
|
||||||
put_page(page_head);
|
put_page(page_head);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@ -243,7 +267,6 @@ bool __get_page_tail(struct page *page)
|
||||||
if (unlikely(!got))
|
if (unlikely(!got))
|
||||||
put_page(page_head);
|
put_page(page_head);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
return got;
|
return got;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(__get_page_tail);
|
EXPORT_SYMBOL(__get_page_tail);
|
||||||
|
|
|
@ -224,7 +224,7 @@ source "net/hsr/Kconfig"
|
||||||
|
|
||||||
config RPS
|
config RPS
|
||||||
boolean
|
boolean
|
||||||
depends on SMP && SYSFS && USE_GENERIC_SMP_HELPERS
|
depends on SMP && SYSFS
|
||||||
default y
|
default y
|
||||||
|
|
||||||
config RFS_ACCEL
|
config RFS_ACCEL
|
||||||
|
@ -235,7 +235,7 @@ config RFS_ACCEL
|
||||||
|
|
||||||
config XPS
|
config XPS
|
||||||
boolean
|
boolean
|
||||||
depends on SMP && USE_GENERIC_SMP_HELPERS
|
depends on SMP
|
||||||
default y
|
default y
|
||||||
|
|
||||||
config NETPRIO_CGROUP
|
config NETPRIO_CGROUP
|
||||||
|
|
|
@ -3289,6 +3289,7 @@ sub process {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (!defined $suppress_whiletrailers{$linenr} &&
|
if (!defined $suppress_whiletrailers{$linenr} &&
|
||||||
|
defined($stat) && defined($cond) &&
|
||||||
$line =~ /\b(?:if|while|for)\s*\(/ && $line !~ /^.\s*#/) {
|
$line =~ /\b(?:if|while|for)\s*\(/ && $line !~ /^.\s*#/) {
|
||||||
my ($s, $c) = ($stat, $cond);
|
my ($s, $c) = ($stat, $cond);
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue