mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-05 22:28:00 +00:00
mprotect: prevent alteration of the PAT bits
There is a defect in mprotect, which lets the user change the page cache type bits by-passing the kernel reserve_memtype and free_memtype wrappers. Fix the problem by not letting mprotect change the PAT bits. Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Hugh Dickins <hugh@veritas.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
44c81433e8
commit
1c12c4cf94
2 changed files with 23 additions and 4 deletions
|
@ -57,7 +57,8 @@
|
||||||
#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | \
|
#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | \
|
||||||
_PAGE_DIRTY)
|
_PAGE_DIRTY)
|
||||||
|
|
||||||
#define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
|
#define _PAGE_CHG_MASK (PTE_MASK | _PAGE_PCD | _PAGE_PWT | \
|
||||||
|
_PAGE_ACCESSED | _PAGE_DIRTY)
|
||||||
|
|
||||||
#define _PAGE_CACHE_MASK (_PAGE_PCD | _PAGE_PWT)
|
#define _PAGE_CACHE_MASK (_PAGE_PCD | _PAGE_PWT)
|
||||||
#define _PAGE_CACHE_WB (0)
|
#define _PAGE_CACHE_WB (0)
|
||||||
|
@ -288,12 +289,21 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
|
||||||
* Chop off the NX bit (if present), and add the NX portion of
|
* Chop off the NX bit (if present), and add the NX portion of
|
||||||
* the newprot (if present):
|
* the newprot (if present):
|
||||||
*/
|
*/
|
||||||
val &= _PAGE_CHG_MASK & ~_PAGE_NX;
|
val &= _PAGE_CHG_MASK;
|
||||||
val |= pgprot_val(newprot) & __supported_pte_mask;
|
val |= pgprot_val(newprot) & (~_PAGE_CHG_MASK) & __supported_pte_mask;
|
||||||
|
|
||||||
return __pte(val);
|
return __pte(val);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* mprotect needs to preserve PAT bits when updating vm_page_prot */
|
||||||
|
#define pgprot_modify pgprot_modify
|
||||||
|
static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
|
||||||
|
{
|
||||||
|
pgprotval_t preservebits = pgprot_val(oldprot) & _PAGE_CHG_MASK;
|
||||||
|
pgprotval_t addbits = pgprot_val(newprot);
|
||||||
|
return __pgprot(preservebits | addbits);
|
||||||
|
}
|
||||||
|
|
||||||
#define pte_pgprot(x) __pgprot(pte_val(x) & (0xfff | _PAGE_NX))
|
#define pte_pgprot(x) __pgprot(pte_val(x) & (0xfff | _PAGE_NX))
|
||||||
|
|
||||||
#define canon_pgprot(p) __pgprot(pgprot_val(p) & __supported_pte_mask)
|
#define canon_pgprot(p) __pgprot(pgprot_val(p) & __supported_pte_mask)
|
||||||
|
|
|
@ -26,6 +26,13 @@
|
||||||
#include <asm/cacheflush.h>
|
#include <asm/cacheflush.h>
|
||||||
#include <asm/tlbflush.h>
|
#include <asm/tlbflush.h>
|
||||||
|
|
||||||
|
#ifndef pgprot_modify
|
||||||
|
static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
|
||||||
|
{
|
||||||
|
return newprot;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
static void change_pte_range(struct mm_struct *mm, pmd_t *pmd,
|
static void change_pte_range(struct mm_struct *mm, pmd_t *pmd,
|
||||||
unsigned long addr, unsigned long end, pgprot_t newprot,
|
unsigned long addr, unsigned long end, pgprot_t newprot,
|
||||||
int dirty_accountable)
|
int dirty_accountable)
|
||||||
|
@ -192,7 +199,9 @@ success:
|
||||||
* held in write mode.
|
* held in write mode.
|
||||||
*/
|
*/
|
||||||
vma->vm_flags = newflags;
|
vma->vm_flags = newflags;
|
||||||
vma->vm_page_prot = vm_get_page_prot(newflags);
|
vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
|
||||||
|
vm_get_page_prot(newflags));
|
||||||
|
|
||||||
if (vma_wants_writenotify(vma)) {
|
if (vma_wants_writenotify(vma)) {
|
||||||
vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
|
vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
|
||||||
dirty_accountable = 1;
|
dirty_accountable = 1;
|
||||||
|
|
Loading…
Add table
Reference in a new issue