x86: Use new cache mode type in memtype related functions

Instead of directly using the cache mode bits in the pte switch to
using the cache mode type.

Based-on-patch-by: Stefan Bader <stefan.bader@canonical.com>
Signed-off-by: Juergen Gross <jgross@suse.com>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Cc: stefan.bader@canonical.com
Cc: xen-devel@lists.xensource.com
Cc: konrad.wilk@oracle.com
Cc: ville.syrjala@linux.intel.com
Cc: david.vrabel@citrix.com
Cc: jbeulich@suse.com
Cc: toshi.kani@hp.com
Cc: plagnioj@jcrosoft.com
Cc: tomi.valkeinen@ti.com
Cc: bhelgaas@google.com
Link: http://lkml.kernel.org/r/1415019724-4317-14-git-send-email-jgross@suse.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
Juergen Gross 2014-11-03 14:01:59 +01:00 committed by Thomas Gleixner
parent b14097bd91
commit e00c8cc93c
7 changed files with 96 additions and 90 deletions

View file

@ -9,10 +9,10 @@
/* /*
* X86 PAT uses page flags WC and Uncached together to keep track of * X86 PAT uses page flags WC and Uncached together to keep track of
* memory type of pages that have backing page struct. X86 PAT supports 3 * memory type of pages that have backing page struct. X86 PAT supports 3
* different memory types, _PAGE_CACHE_WB, _PAGE_CACHE_WC and * different memory types, _PAGE_CACHE_MODE_WB, _PAGE_CACHE_MODE_WC and
* _PAGE_CACHE_UC_MINUS and fourth state where page's memory type has not * _PAGE_CACHE_MODE_UC_MINUS and fourth state where page's memory type has not
* been changed from its default (value of -1 used to denote this). * been changed from its default (value of -1 used to denote this).
* Note we do not support _PAGE_CACHE_UC here. * Note we do not support _PAGE_CACHE_MODE_UC here.
*/ */
#define _PGMT_DEFAULT 0 #define _PGMT_DEFAULT 0
@ -22,36 +22,40 @@
#define _PGMT_MASK (1UL << PG_uncached | 1UL << PG_arch_1) #define _PGMT_MASK (1UL << PG_uncached | 1UL << PG_arch_1)
#define _PGMT_CLEAR_MASK (~_PGMT_MASK) #define _PGMT_CLEAR_MASK (~_PGMT_MASK)
static inline unsigned long get_page_memtype(struct page *pg) static inline enum page_cache_mode get_page_memtype(struct page *pg)
{ {
unsigned long pg_flags = pg->flags & _PGMT_MASK; unsigned long pg_flags = pg->flags & _PGMT_MASK;
if (pg_flags == _PGMT_DEFAULT) if (pg_flags == _PGMT_DEFAULT)
return -1; return -1;
else if (pg_flags == _PGMT_WC) else if (pg_flags == _PGMT_WC)
return _PAGE_CACHE_WC; return _PAGE_CACHE_MODE_WC;
else if (pg_flags == _PGMT_UC_MINUS) else if (pg_flags == _PGMT_UC_MINUS)
return _PAGE_CACHE_UC_MINUS; return _PAGE_CACHE_MODE_UC_MINUS;
else else
return _PAGE_CACHE_WB; return _PAGE_CACHE_MODE_WB;
} }
static inline void set_page_memtype(struct page *pg, unsigned long memtype) static inline void set_page_memtype(struct page *pg,
enum page_cache_mode memtype)
{ {
unsigned long memtype_flags = _PGMT_DEFAULT; unsigned long memtype_flags;
unsigned long old_flags; unsigned long old_flags;
unsigned long new_flags; unsigned long new_flags;
switch (memtype) { switch (memtype) {
case _PAGE_CACHE_WC: case _PAGE_CACHE_MODE_WC:
memtype_flags = _PGMT_WC; memtype_flags = _PGMT_WC;
break; break;
case _PAGE_CACHE_UC_MINUS: case _PAGE_CACHE_MODE_UC_MINUS:
memtype_flags = _PGMT_UC_MINUS; memtype_flags = _PGMT_UC_MINUS;
break; break;
case _PAGE_CACHE_WB: case _PAGE_CACHE_MODE_WB:
memtype_flags = _PGMT_WB; memtype_flags = _PGMT_WB;
break; break;
default:
memtype_flags = _PGMT_DEFAULT;
break;
} }
do { do {
@ -60,8 +64,14 @@ static inline void set_page_memtype(struct page *pg, unsigned long memtype)
} while (cmpxchg(&pg->flags, old_flags, new_flags) != old_flags); } while (cmpxchg(&pg->flags, old_flags, new_flags) != old_flags);
} }
#else #else
static inline unsigned long get_page_memtype(struct page *pg) { return -1; } static inline enum page_cache_mode get_page_memtype(struct page *pg)
static inline void set_page_memtype(struct page *pg, unsigned long memtype) { } {
return -1;
}
static inline void set_page_memtype(struct page *pg,
enum page_cache_mode memtype)
{
}
#endif #endif
/* /*

View file

@ -13,7 +13,7 @@ static const int pat_enabled;
extern void pat_init(void); extern void pat_init(void);
extern int reserve_memtype(u64 start, u64 end, extern int reserve_memtype(u64 start, u64 end,
unsigned long req_type, unsigned long *ret_type); enum page_cache_mode req_pcm, enum page_cache_mode *ret_pcm);
extern int free_memtype(u64 start, u64 end); extern int free_memtype(u64 start, u64 end);
extern int kernel_map_sync_memtype(u64 base, unsigned long size, extern int kernel_map_sync_memtype(u64 base, unsigned long size,

View file

@ -83,7 +83,6 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
const unsigned long unaligned_size = size; const unsigned long unaligned_size = size;
struct vm_struct *area; struct vm_struct *area;
enum page_cache_mode new_pcm; enum page_cache_mode new_pcm;
unsigned long new_prot_val;
pgprot_t prot; pgprot_t prot;
int retval; int retval;
void __iomem *ret_addr; void __iomem *ret_addr;
@ -135,14 +134,12 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
size = PAGE_ALIGN(last_addr+1) - phys_addr; size = PAGE_ALIGN(last_addr+1) - phys_addr;
retval = reserve_memtype(phys_addr, (u64)phys_addr + size, retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
cachemode2protval(pcm), &new_prot_val); pcm, &new_pcm);
if (retval) { if (retval) {
printk(KERN_ERR "ioremap reserve_memtype failed %d\n", retval); printk(KERN_ERR "ioremap reserve_memtype failed %d\n", retval);
return NULL; return NULL;
} }
new_pcm = pgprot2cachemode(__pgprot(new_prot_val));
if (pcm != new_pcm) { if (pcm != new_pcm) {
if (!is_new_memtype_allowed(phys_addr, size, pcm, new_pcm)) { if (!is_new_memtype_allowed(phys_addr, size, pcm, new_pcm)) {
printk(KERN_ERR printk(KERN_ERR

View file

@ -1451,7 +1451,7 @@ int set_memory_uc(unsigned long addr, int numpages)
* for now UC MINUS. see comments in ioremap_nocache() * for now UC MINUS. see comments in ioremap_nocache()
*/ */
ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE, ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
_PAGE_CACHE_UC_MINUS, NULL); _PAGE_CACHE_MODE_UC_MINUS, NULL);
if (ret) if (ret)
goto out_err; goto out_err;
@ -1479,7 +1479,7 @@ static int _set_memory_array(unsigned long *addr, int addrinarray,
*/ */
for (i = 0; i < addrinarray; i++) { for (i = 0; i < addrinarray; i++) {
ret = reserve_memtype(__pa(addr[i]), __pa(addr[i]) + PAGE_SIZE, ret = reserve_memtype(__pa(addr[i]), __pa(addr[i]) + PAGE_SIZE,
cachemode2protval(new_type), NULL); new_type, NULL);
if (ret) if (ret)
goto out_free; goto out_free;
} }
@ -1544,7 +1544,7 @@ int set_memory_wc(unsigned long addr, int numpages)
return set_memory_uc(addr, numpages); return set_memory_uc(addr, numpages);
ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE, ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
_PAGE_CACHE_WC, NULL); _PAGE_CACHE_MODE_WC, NULL);
if (ret) if (ret)
goto out_err; goto out_err;
@ -1662,8 +1662,7 @@ static int _set_pages_array(struct page **pages, int addrinarray,
continue; continue;
start = page_to_pfn(pages[i]) << PAGE_SHIFT; start = page_to_pfn(pages[i]) << PAGE_SHIFT;
end = start + PAGE_SIZE; end = start + PAGE_SIZE;
if (reserve_memtype(start, end, cachemode2protval(new_type), if (reserve_memtype(start, end, new_type, NULL))
NULL))
goto err_out; goto err_out;
} }

View file

@ -139,20 +139,21 @@ static DEFINE_SPINLOCK(memtype_lock); /* protects memtype accesses */
* The intersection is based on "Effective Memory Type" tables in IA-32 * The intersection is based on "Effective Memory Type" tables in IA-32
* SDM vol 3a * SDM vol 3a
*/ */
static unsigned long pat_x_mtrr_type(u64 start, u64 end, unsigned long req_type) static unsigned long pat_x_mtrr_type(u64 start, u64 end,
enum page_cache_mode req_type)
{ {
/* /*
* Look for MTRR hint to get the effective type in case where PAT * Look for MTRR hint to get the effective type in case where PAT
* request is for WB. * request is for WB.
*/ */
if (req_type == _PAGE_CACHE_WB) { if (req_type == _PAGE_CACHE_MODE_WB) {
u8 mtrr_type; u8 mtrr_type;
mtrr_type = mtrr_type_lookup(start, end); mtrr_type = mtrr_type_lookup(start, end);
if (mtrr_type != MTRR_TYPE_WRBACK) if (mtrr_type != MTRR_TYPE_WRBACK)
return _PAGE_CACHE_UC_MINUS; return _PAGE_CACHE_MODE_UC_MINUS;
return _PAGE_CACHE_WB; return _PAGE_CACHE_MODE_WB;
} }
return req_type; return req_type;
@ -207,25 +208,26 @@ static int pat_pagerange_is_ram(resource_size_t start, resource_size_t end)
* - Find the memtype of all the pages in the range, look for any conflicts * - Find the memtype of all the pages in the range, look for any conflicts
* - In case of no conflicts, set the new memtype for pages in the range * - In case of no conflicts, set the new memtype for pages in the range
*/ */
static int reserve_ram_pages_type(u64 start, u64 end, unsigned long req_type, static int reserve_ram_pages_type(u64 start, u64 end,
unsigned long *new_type) enum page_cache_mode req_type,
enum page_cache_mode *new_type)
{ {
struct page *page; struct page *page;
u64 pfn; u64 pfn;
if (req_type == _PAGE_CACHE_UC) { if (req_type == _PAGE_CACHE_MODE_UC) {
/* We do not support strong UC */ /* We do not support strong UC */
WARN_ON_ONCE(1); WARN_ON_ONCE(1);
req_type = _PAGE_CACHE_UC_MINUS; req_type = _PAGE_CACHE_MODE_UC_MINUS;
} }
for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) { for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
unsigned long type; enum page_cache_mode type;
page = pfn_to_page(pfn); page = pfn_to_page(pfn);
type = get_page_memtype(page); type = get_page_memtype(page);
if (type != -1) { if (type != -1) {
printk(KERN_INFO "reserve_ram_pages_type failed [mem %#010Lx-%#010Lx], track 0x%lx, req 0x%lx\n", pr_info("reserve_ram_pages_type failed [mem %#010Lx-%#010Lx], track 0x%x, req 0x%x\n",
start, end - 1, type, req_type); start, end - 1, type, req_type);
if (new_type) if (new_type)
*new_type = type; *new_type = type;
@ -258,21 +260,21 @@ static int free_ram_pages_type(u64 start, u64 end)
/* /*
* req_type typically has one of the: * req_type typically has one of the:
* - _PAGE_CACHE_WB * - _PAGE_CACHE_MODE_WB
* - _PAGE_CACHE_WC * - _PAGE_CACHE_MODE_WC
* - _PAGE_CACHE_UC_MINUS * - _PAGE_CACHE_MODE_UC_MINUS
* - _PAGE_CACHE_UC * - _PAGE_CACHE_MODE_UC
* *
* If new_type is NULL, function will return an error if it cannot reserve the * If new_type is NULL, function will return an error if it cannot reserve the
* region with req_type. If new_type is non-NULL, function will return * region with req_type. If new_type is non-NULL, function will return
* available type in new_type in case of no error. In case of any error * available type in new_type in case of no error. In case of any error
* it will return a negative return value. * it will return a negative return value.
*/ */
int reserve_memtype(u64 start, u64 end, unsigned long req_type, int reserve_memtype(u64 start, u64 end, enum page_cache_mode req_type,
unsigned long *new_type) enum page_cache_mode *new_type)
{ {
struct memtype *new; struct memtype *new;
unsigned long actual_type; enum page_cache_mode actual_type;
int is_range_ram; int is_range_ram;
int err = 0; int err = 0;
@ -281,10 +283,10 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
if (!pat_enabled) { if (!pat_enabled) {
/* This is identical to page table setting without PAT */ /* This is identical to page table setting without PAT */
if (new_type) { if (new_type) {
if (req_type == _PAGE_CACHE_WC) if (req_type == _PAGE_CACHE_MODE_WC)
*new_type = _PAGE_CACHE_UC_MINUS; *new_type = _PAGE_CACHE_MODE_UC_MINUS;
else else
*new_type = req_type & _PAGE_CACHE_MASK; *new_type = req_type;
} }
return 0; return 0;
} }
@ -292,7 +294,7 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
/* Low ISA region is always mapped WB in page table. No need to track */ /* Low ISA region is always mapped WB in page table. No need to track */
if (x86_platform.is_untracked_pat_range(start, end)) { if (x86_platform.is_untracked_pat_range(start, end)) {
if (new_type) if (new_type)
*new_type = _PAGE_CACHE_WB; *new_type = _PAGE_CACHE_MODE_WB;
return 0; return 0;
} }
@ -302,7 +304,7 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
* tools and ACPI tools). Use WB request for WB memory and use * tools and ACPI tools). Use WB request for WB memory and use
* UC_MINUS otherwise. * UC_MINUS otherwise.
*/ */
actual_type = pat_x_mtrr_type(start, end, req_type & _PAGE_CACHE_MASK); actual_type = pat_x_mtrr_type(start, end, req_type);
if (new_type) if (new_type)
*new_type = actual_type; *new_type = actual_type;
@ -408,7 +410,7 @@ static enum page_cache_mode lookup_memtype(u64 paddr)
if (pat_pagerange_is_ram(paddr, paddr + PAGE_SIZE)) { if (pat_pagerange_is_ram(paddr, paddr + PAGE_SIZE)) {
struct page *page; struct page *page;
page = pfn_to_page(paddr >> PAGE_SHIFT); page = pfn_to_page(paddr >> PAGE_SHIFT);
rettype = pgprot2cachemode(__pgprot(get_page_memtype(page))); rettype = get_page_memtype(page);
/* /*
* -1 from get_page_memtype() implies RAM page is in its * -1 from get_page_memtype() implies RAM page is in its
* default state and not reserved, and hence of type WB * default state and not reserved, and hence of type WB
@ -423,7 +425,7 @@ static enum page_cache_mode lookup_memtype(u64 paddr)
entry = rbt_memtype_lookup(paddr); entry = rbt_memtype_lookup(paddr);
if (entry != NULL) if (entry != NULL)
rettype = pgprot2cachemode(__pgprot(entry->type)); rettype = entry->type;
else else
rettype = _PAGE_CACHE_MODE_UC_MINUS; rettype = _PAGE_CACHE_MODE_UC_MINUS;
@ -447,18 +449,14 @@ int io_reserve_memtype(resource_size_t start, resource_size_t end,
resource_size_t size = end - start; resource_size_t size = end - start;
enum page_cache_mode req_type = *type; enum page_cache_mode req_type = *type;
enum page_cache_mode new_type; enum page_cache_mode new_type;
unsigned long new_prot;
int ret; int ret;
WARN_ON_ONCE(iomem_map_sanity_check(start, size)); WARN_ON_ONCE(iomem_map_sanity_check(start, size));
ret = reserve_memtype(start, end, cachemode2protval(req_type), ret = reserve_memtype(start, end, req_type, &new_type);
&new_prot);
if (ret) if (ret)
goto out_err; goto out_err;
new_type = pgprot2cachemode(__pgprot(new_prot));
if (!is_new_memtype_allowed(start, size, req_type, new_type)) if (!is_new_memtype_allowed(start, size, req_type, new_type))
goto out_free; goto out_free;
@ -524,13 +522,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
unsigned long size, pgprot_t *vma_prot) unsigned long size, pgprot_t *vma_prot)
{ {
unsigned long flags = _PAGE_CACHE_WB; enum page_cache_mode pcm = _PAGE_CACHE_MODE_WB;
if (!range_is_allowed(pfn, size)) if (!range_is_allowed(pfn, size))
return 0; return 0;
if (file->f_flags & O_DSYNC) if (file->f_flags & O_DSYNC)
flags = _PAGE_CACHE_UC_MINUS; pcm = _PAGE_CACHE_MODE_UC_MINUS;
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
/* /*
@ -547,12 +545,12 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
boot_cpu_has(X86_FEATURE_CYRIX_ARR) || boot_cpu_has(X86_FEATURE_CYRIX_ARR) ||
boot_cpu_has(X86_FEATURE_CENTAUR_MCR)) && boot_cpu_has(X86_FEATURE_CENTAUR_MCR)) &&
(pfn << PAGE_SHIFT) >= __pa(high_memory)) { (pfn << PAGE_SHIFT) >= __pa(high_memory)) {
flags = _PAGE_CACHE_UC; pcm = _PAGE_CACHE_MODE_UC;
} }
#endif #endif
*vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) | *vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) |
flags); cachemode2protval(pcm));
return 1; return 1;
} }
@ -583,7 +581,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size,
printk(KERN_INFO "%s:%d ioremap_change_attr failed %s " printk(KERN_INFO "%s:%d ioremap_change_attr failed %s "
"for [mem %#010Lx-%#010Lx]\n", "for [mem %#010Lx-%#010Lx]\n",
current->comm, current->pid, current->comm, current->pid,
cattr_name(cachemode2protval(pcm)), cattr_name(pcm),
base, (unsigned long long)(base + size-1)); base, (unsigned long long)(base + size-1));
return -EINVAL; return -EINVAL;
} }
@ -600,8 +598,8 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
{ {
int is_ram = 0; int is_ram = 0;
int ret; int ret;
unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK); enum page_cache_mode want_pcm = pgprot2cachemode(*vma_prot);
unsigned long flags = want_flags; enum page_cache_mode pcm = want_pcm;
is_ram = pat_pagerange_is_ram(paddr, paddr + size); is_ram = pat_pagerange_is_ram(paddr, paddr + size);
@ -614,38 +612,36 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
if (!pat_enabled) if (!pat_enabled)
return 0; return 0;
flags = cachemode2protval(lookup_memtype(paddr)); pcm = lookup_memtype(paddr);
if (want_flags != flags) { if (want_pcm != pcm) {
printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n", printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
current->comm, current->pid, current->comm, current->pid,
cattr_name(want_flags), cattr_name(want_pcm),
(unsigned long long)paddr, (unsigned long long)paddr,
(unsigned long long)(paddr + size - 1), (unsigned long long)(paddr + size - 1),
cattr_name(flags)); cattr_name(pcm));
*vma_prot = __pgprot((pgprot_val(*vma_prot) & *vma_prot = __pgprot((pgprot_val(*vma_prot) &
(~_PAGE_CACHE_MASK)) | (~_PAGE_CACHE_MASK)) |
flags); cachemode2protval(pcm));
} }
return 0; return 0;
} }
ret = reserve_memtype(paddr, paddr + size, want_flags, &flags); ret = reserve_memtype(paddr, paddr + size, want_pcm, &pcm);
if (ret) if (ret)
return ret; return ret;
if (flags != want_flags) { if (pcm != want_pcm) {
if (strict_prot || if (strict_prot ||
!is_new_memtype_allowed(paddr, size, !is_new_memtype_allowed(paddr, size, want_pcm, pcm)) {
pgprot2cachemode(__pgprot(want_flags)),
pgprot2cachemode(__pgprot(flags)))) {
free_memtype(paddr, paddr + size); free_memtype(paddr, paddr + size);
printk(KERN_ERR "%s:%d map pfn expected mapping type %s" printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
" for [mem %#010Lx-%#010Lx], got %s\n", " for [mem %#010Lx-%#010Lx], got %s\n",
current->comm, current->pid, current->comm, current->pid,
cattr_name(want_flags), cattr_name(want_pcm),
(unsigned long long)paddr, (unsigned long long)paddr,
(unsigned long long)(paddr + size - 1), (unsigned long long)(paddr + size - 1),
cattr_name(flags)); cattr_name(pcm));
return -EINVAL; return -EINVAL;
} }
/* /*
@ -654,11 +650,10 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
*/ */
*vma_prot = __pgprot((pgprot_val(*vma_prot) & *vma_prot = __pgprot((pgprot_val(*vma_prot) &
(~_PAGE_CACHE_MASK)) | (~_PAGE_CACHE_MASK)) |
flags); cachemode2protval(pcm));
} }
if (kernel_map_sync_memtype(paddr, size, if (kernel_map_sync_memtype(paddr, size, pcm) < 0) {
pgprot2cachemode(__pgprot(flags))) < 0) {
free_memtype(paddr, paddr + size); free_memtype(paddr, paddr + size);
return -EINVAL; return -EINVAL;
} }
@ -799,7 +794,8 @@ void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
pgprot_t pgprot_writecombine(pgprot_t prot) pgprot_t pgprot_writecombine(pgprot_t prot)
{ {
if (pat_enabled) if (pat_enabled)
return __pgprot(pgprot_val(prot) | _PAGE_CACHE_WC); return __pgprot(pgprot_val(prot) |
cachemode2protval(_PAGE_CACHE_MODE_WC));
else else
return pgprot_noncached(prot); return pgprot_noncached(prot);
} }

View file

@ -10,30 +10,32 @@ struct memtype {
u64 start; u64 start;
u64 end; u64 end;
u64 subtree_max_end; u64 subtree_max_end;
unsigned long type; enum page_cache_mode type;
struct rb_node rb; struct rb_node rb;
}; };
static inline char *cattr_name(unsigned long flags) static inline char *cattr_name(enum page_cache_mode pcm)
{ {
switch (flags & _PAGE_CACHE_MASK) { switch (pcm) {
case _PAGE_CACHE_UC: return "uncached"; case _PAGE_CACHE_MODE_UC: return "uncached";
case _PAGE_CACHE_UC_MINUS: return "uncached-minus"; case _PAGE_CACHE_MODE_UC_MINUS: return "uncached-minus";
case _PAGE_CACHE_WB: return "write-back"; case _PAGE_CACHE_MODE_WB: return "write-back";
case _PAGE_CACHE_WC: return "write-combining"; case _PAGE_CACHE_MODE_WC: return "write-combining";
default: return "broken"; case _PAGE_CACHE_MODE_WT: return "write-through";
case _PAGE_CACHE_MODE_WP: return "write-protected";
default: return "broken";
} }
} }
#ifdef CONFIG_X86_PAT #ifdef CONFIG_X86_PAT
extern int rbt_memtype_check_insert(struct memtype *new, extern int rbt_memtype_check_insert(struct memtype *new,
unsigned long *new_type); enum page_cache_mode *new_type);
extern struct memtype *rbt_memtype_erase(u64 start, u64 end); extern struct memtype *rbt_memtype_erase(u64 start, u64 end);
extern struct memtype *rbt_memtype_lookup(u64 addr); extern struct memtype *rbt_memtype_lookup(u64 addr);
extern int rbt_memtype_copy_nth_element(struct memtype *out, loff_t pos); extern int rbt_memtype_copy_nth_element(struct memtype *out, loff_t pos);
#else #else
static inline int rbt_memtype_check_insert(struct memtype *new, static inline int rbt_memtype_check_insert(struct memtype *new,
unsigned long *new_type) enum page_cache_mode *new_type)
{ return 0; } { return 0; }
static inline struct memtype *rbt_memtype_erase(u64 start, u64 end) static inline struct memtype *rbt_memtype_erase(u64 start, u64 end)
{ return NULL; } { return NULL; }

View file

@ -122,11 +122,12 @@ static struct memtype *memtype_rb_exact_match(struct rb_root *root,
static int memtype_rb_check_conflict(struct rb_root *root, static int memtype_rb_check_conflict(struct rb_root *root,
u64 start, u64 end, u64 start, u64 end,
unsigned long reqtype, unsigned long *newtype) enum page_cache_mode reqtype,
enum page_cache_mode *newtype)
{ {
struct rb_node *node; struct rb_node *node;
struct memtype *match; struct memtype *match;
int found_type = reqtype; enum page_cache_mode found_type = reqtype;
match = memtype_rb_lowest_match(&memtype_rbroot, start, end); match = memtype_rb_lowest_match(&memtype_rbroot, start, end);
if (match == NULL) if (match == NULL)
@ -187,7 +188,8 @@ static void memtype_rb_insert(struct rb_root *root, struct memtype *newdata)
rb_insert_augmented(&newdata->rb, root, &memtype_rb_augment_cb); rb_insert_augmented(&newdata->rb, root, &memtype_rb_augment_cb);
} }
int rbt_memtype_check_insert(struct memtype *new, unsigned long *ret_type) int rbt_memtype_check_insert(struct memtype *new,
enum page_cache_mode *ret_type)
{ {
int err = 0; int err = 0;