mirror of
https://github.com/Fishwaldo/linux-bl808.git
synced 2025-06-17 20:25:19 +00:00
xen: MFN/GFN/BFN terminology changes for 4.3-rc0
- Use the correct GFN/BFN terms more consistently. -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQEcBAABAgAGBQJV8VRMAAoJEFxbo/MsZsTRiGQH/i/jrAJUJfrFC2PINaA2gDwe O0dlrkCiSgAYChGmxxxXZQSPM5Po5+EbT/dLjZ/uvSooeorM9RYY/mFo7ut/qLep 4pyQUuwGtebWGBZTrj9sygUVXVhgJnyoZxskNUbhj9zvP7hb9++IiI78mzne6cpj lCh/7Z2dgpfRcKlNRu+qpzP79Uc7OqIfDK+IZLrQKlXa7IQDJTQYoRjbKpfCtmMV BEG3kN9ESx5tLzYiAfxvaxVXl9WQFEoktqe9V8IgOQlVRLgJ2DQWS6vmraGrokWM 3HDOCHtRCXlPhu1Vnrp0R9OgqWbz8FJnmVAndXT8r3Nsjjmd0aLwhJx7YAReO/4= =JDia -----END PGP SIGNATURE----- Merge tag 'for-linus-4.3-rc0b-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip Pull xen terminology fixes from David Vrabel: "Use the correct GFN/BFN terms more consistently" * tag 'for-linus-4.3-rc0b-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip: xen/xenbus: Rename the variable xen_store_mfn to xen_store_gfn xen/privcmd: Further s/MFN/GFN/ clean-up hvc/xen: Further s/MFN/GFN clean-up video/xen-fbfront: Further s/MFN/GFN clean-up xen/tmem: Use xen_page_to_gfn rather than pfn_to_gfn xen: Use correctly the Xen memory terminologies arm/xen: implement correctly pfn_to_mfn xen: Make clear that swiotlb and biomerge are dealing with DMA address
This commit is contained in:
commit
06ab838c20
29 changed files with 198 additions and 158 deletions
|
@ -441,7 +441,7 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
|
|||
/* Update direct mapping, invalidate P2M, and add to balloon. */
|
||||
for (i = 0; i < nr_pages; i++) {
|
||||
pfn = frame_list[i];
|
||||
frame_list[i] = pfn_to_mfn(pfn);
|
||||
frame_list[i] = pfn_to_gfn(pfn);
|
||||
page = pfn_to_page(pfn);
|
||||
|
||||
#ifdef CONFIG_XEN_HAVE_PVMMU
|
||||
|
|
|
@ -6,10 +6,10 @@
|
|||
bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
|
||||
const struct bio_vec *vec2)
|
||||
{
|
||||
unsigned long mfn1 = pfn_to_mfn(page_to_pfn(vec1->bv_page));
|
||||
unsigned long mfn2 = pfn_to_mfn(page_to_pfn(vec2->bv_page));
|
||||
unsigned long bfn1 = pfn_to_bfn(page_to_pfn(vec1->bv_page));
|
||||
unsigned long bfn2 = pfn_to_bfn(page_to_pfn(vec2->bv_page));
|
||||
|
||||
return __BIOVEC_PHYS_MERGEABLE(vec1, vec2) &&
|
||||
((mfn1 == mfn2) || ((mfn1+1) == mfn2));
|
||||
((bfn1 == bfn2) || ((bfn1+1) == bfn2));
|
||||
}
|
||||
EXPORT_SYMBOL(xen_biovec_phys_mergeable);
|
||||
|
|
|
@ -1688,7 +1688,7 @@ void __init xen_init_IRQ(void)
|
|||
struct physdev_pirq_eoi_gmfn eoi_gmfn;
|
||||
|
||||
pirq_eoi_map = (void *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
|
||||
eoi_gmfn.gmfn = virt_to_mfn(pirq_eoi_map);
|
||||
eoi_gmfn.gmfn = virt_to_gfn(pirq_eoi_map);
|
||||
rc = HYPERVISOR_physdev_op(PHYSDEVOP_pirq_eoi_gmfn_v2, &eoi_gmfn);
|
||||
/* TODO: No PVH support for PIRQ EOI */
|
||||
if (rc != 0) {
|
||||
|
|
|
@ -111,7 +111,7 @@ static int init_control_block(int cpu,
|
|||
for (i = 0; i < EVTCHN_FIFO_MAX_QUEUES; i++)
|
||||
q->head[i] = 0;
|
||||
|
||||
init_control.control_gfn = virt_to_mfn(control_block);
|
||||
init_control.control_gfn = virt_to_gfn(control_block);
|
||||
init_control.offset = 0;
|
||||
init_control.vcpu = cpu;
|
||||
|
||||
|
@ -167,7 +167,7 @@ static int evtchn_fifo_setup(struct irq_info *info)
|
|||
/* Mask all events in this page before adding it. */
|
||||
init_array_page(array_page);
|
||||
|
||||
expand_array.array_gfn = virt_to_mfn(array_page);
|
||||
expand_array.array_gfn = virt_to_gfn(array_page);
|
||||
|
||||
ret = HYPERVISOR_event_channel_op(EVTCHNOP_expand_array, &expand_array);
|
||||
if (ret < 0)
|
||||
|
|
|
@ -142,7 +142,8 @@ static int add_grefs(struct ioctl_gntalloc_alloc_gref *op,
|
|||
|
||||
/* Grant foreign access to the page. */
|
||||
rc = gnttab_grant_foreign_access(op->domid,
|
||||
pfn_to_mfn(page_to_pfn(gref->page)), readonly);
|
||||
xen_page_to_gfn(gref->page),
|
||||
readonly);
|
||||
if (rc < 0)
|
||||
goto undo;
|
||||
gref_ids[i] = gref->gref_id = rc;
|
||||
|
|
|
@ -80,7 +80,7 @@ static int xen_suspend(void *data)
|
|||
* is resuming in a new domain.
|
||||
*/
|
||||
si->cancelled = HYPERVISOR_suspend(xen_pv_domain()
|
||||
? virt_to_mfn(xen_start_info)
|
||||
? virt_to_gfn(xen_start_info)
|
||||
: 0);
|
||||
|
||||
xen_arch_post_suspend(si->cancelled);
|
||||
|
|
|
@ -193,16 +193,16 @@ static int traverse_pages_block(unsigned nelem, size_t size,
|
|||
return ret;
|
||||
}
|
||||
|
||||
struct mmap_mfn_state {
|
||||
struct mmap_gfn_state {
|
||||
unsigned long va;
|
||||
struct vm_area_struct *vma;
|
||||
domid_t domain;
|
||||
};
|
||||
|
||||
static int mmap_mfn_range(void *data, void *state)
|
||||
static int mmap_gfn_range(void *data, void *state)
|
||||
{
|
||||
struct privcmd_mmap_entry *msg = data;
|
||||
struct mmap_mfn_state *st = state;
|
||||
struct mmap_gfn_state *st = state;
|
||||
struct vm_area_struct *vma = st->vma;
|
||||
int rc;
|
||||
|
||||
|
@ -216,7 +216,7 @@ static int mmap_mfn_range(void *data, void *state)
|
|||
((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end))
|
||||
return -EINVAL;
|
||||
|
||||
rc = xen_remap_domain_mfn_range(vma,
|
||||
rc = xen_remap_domain_gfn_range(vma,
|
||||
msg->va & PAGE_MASK,
|
||||
msg->mfn, msg->npages,
|
||||
vma->vm_page_prot,
|
||||
|
@ -236,7 +236,7 @@ static long privcmd_ioctl_mmap(void __user *udata)
|
|||
struct vm_area_struct *vma;
|
||||
int rc;
|
||||
LIST_HEAD(pagelist);
|
||||
struct mmap_mfn_state state;
|
||||
struct mmap_gfn_state state;
|
||||
|
||||
/* We only support privcmd_ioctl_mmap_batch for auto translated. */
|
||||
if (xen_feature(XENFEAT_auto_translated_physmap))
|
||||
|
@ -273,7 +273,7 @@ static long privcmd_ioctl_mmap(void __user *udata)
|
|||
|
||||
rc = traverse_pages(mmapcmd.num, sizeof(struct privcmd_mmap_entry),
|
||||
&pagelist,
|
||||
mmap_mfn_range, &state);
|
||||
mmap_gfn_range, &state);
|
||||
|
||||
|
||||
out_up:
|
||||
|
@ -299,18 +299,18 @@ struct mmap_batch_state {
|
|||
int global_error;
|
||||
int version;
|
||||
|
||||
/* User-space mfn array to store errors in the second pass for V1. */
|
||||
xen_pfn_t __user *user_mfn;
|
||||
/* User-space gfn array to store errors in the second pass for V1. */
|
||||
xen_pfn_t __user *user_gfn;
|
||||
/* User-space int array to store errors in the second pass for V2. */
|
||||
int __user *user_err;
|
||||
};
|
||||
|
||||
/* auto translated dom0 note: if domU being created is PV, then mfn is
|
||||
* mfn(addr on bus). If it's auto xlated, then mfn is pfn (input to HAP).
|
||||
/* auto translated dom0 note: if domU being created is PV, then gfn is
|
||||
* mfn(addr on bus). If it's auto xlated, then gfn is pfn (input to HAP).
|
||||
*/
|
||||
static int mmap_batch_fn(void *data, int nr, void *state)
|
||||
{
|
||||
xen_pfn_t *mfnp = data;
|
||||
xen_pfn_t *gfnp = data;
|
||||
struct mmap_batch_state *st = state;
|
||||
struct vm_area_struct *vma = st->vma;
|
||||
struct page **pages = vma->vm_private_data;
|
||||
|
@ -321,8 +321,8 @@ static int mmap_batch_fn(void *data, int nr, void *state)
|
|||
cur_pages = &pages[st->index];
|
||||
|
||||
BUG_ON(nr < 0);
|
||||
ret = xen_remap_domain_mfn_array(st->vma, st->va & PAGE_MASK, mfnp, nr,
|
||||
(int *)mfnp, st->vma->vm_page_prot,
|
||||
ret = xen_remap_domain_gfn_array(st->vma, st->va & PAGE_MASK, gfnp, nr,
|
||||
(int *)gfnp, st->vma->vm_page_prot,
|
||||
st->domain, cur_pages);
|
||||
|
||||
/* Adjust the global_error? */
|
||||
|
@ -347,22 +347,22 @@ static int mmap_return_error(int err, struct mmap_batch_state *st)
|
|||
|
||||
if (st->version == 1) {
|
||||
if (err) {
|
||||
xen_pfn_t mfn;
|
||||
xen_pfn_t gfn;
|
||||
|
||||
ret = get_user(mfn, st->user_mfn);
|
||||
ret = get_user(gfn, st->user_gfn);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
/*
|
||||
* V1 encodes the error codes in the 32bit top
|
||||
* nibble of the mfn (with its known
|
||||
* nibble of the gfn (with its known
|
||||
* limitations vis-a-vis 64 bit callers).
|
||||
*/
|
||||
mfn |= (err == -ENOENT) ?
|
||||
gfn |= (err == -ENOENT) ?
|
||||
PRIVCMD_MMAPBATCH_PAGED_ERROR :
|
||||
PRIVCMD_MMAPBATCH_MFN_ERROR;
|
||||
return __put_user(mfn, st->user_mfn++);
|
||||
return __put_user(gfn, st->user_gfn++);
|
||||
} else
|
||||
st->user_mfn++;
|
||||
st->user_gfn++;
|
||||
} else { /* st->version == 2 */
|
||||
if (err)
|
||||
return __put_user(err, st->user_err++);
|
||||
|
@ -388,7 +388,7 @@ static int mmap_return_errors(void *data, int nr, void *state)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* Allocate pfns that are then mapped with gmfns from foreign domid. Update
|
||||
/* Allocate pfns that are then mapped with gfns from foreign domid. Update
|
||||
* the vma with the page info to use later.
|
||||
* Returns: 0 if success, otherwise -errno
|
||||
*/
|
||||
|
@ -526,7 +526,7 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
|
|||
|
||||
if (state.global_error) {
|
||||
/* Write back errors in second pass. */
|
||||
state.user_mfn = (xen_pfn_t *)m.arr;
|
||||
state.user_gfn = (xen_pfn_t *)m.arr;
|
||||
state.user_err = m.err;
|
||||
ret = traverse_pages_block(m.num, sizeof(xen_pfn_t),
|
||||
&pagelist, mmap_return_errors, &state);
|
||||
|
@ -587,7 +587,7 @@ static void privcmd_close(struct vm_area_struct *vma)
|
|||
if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages)
|
||||
return;
|
||||
|
||||
rc = xen_unmap_domain_mfn_range(vma, numpgs, pages);
|
||||
rc = xen_unmap_domain_gfn_range(vma, numpgs, pages);
|
||||
if (rc == 0)
|
||||
free_xenballooned_pages(numpgs, pages);
|
||||
else
|
||||
|
|
|
@ -82,8 +82,8 @@ static u64 start_dma_addr;
|
|||
*/
|
||||
static inline dma_addr_t xen_phys_to_bus(phys_addr_t paddr)
|
||||
{
|
||||
unsigned long mfn = pfn_to_mfn(PFN_DOWN(paddr));
|
||||
dma_addr_t dma = (dma_addr_t)mfn << PAGE_SHIFT;
|
||||
unsigned long bfn = pfn_to_bfn(PFN_DOWN(paddr));
|
||||
dma_addr_t dma = (dma_addr_t)bfn << PAGE_SHIFT;
|
||||
|
||||
dma |= paddr & ~PAGE_MASK;
|
||||
|
||||
|
@ -92,7 +92,7 @@ static inline dma_addr_t xen_phys_to_bus(phys_addr_t paddr)
|
|||
|
||||
static inline phys_addr_t xen_bus_to_phys(dma_addr_t baddr)
|
||||
{
|
||||
unsigned long pfn = mfn_to_pfn(PFN_DOWN(baddr));
|
||||
unsigned long pfn = bfn_to_pfn(PFN_DOWN(baddr));
|
||||
dma_addr_t dma = (dma_addr_t)pfn << PAGE_SHIFT;
|
||||
phys_addr_t paddr = dma;
|
||||
|
||||
|
@ -110,15 +110,15 @@ static int check_pages_physically_contiguous(unsigned long pfn,
|
|||
unsigned int offset,
|
||||
size_t length)
|
||||
{
|
||||
unsigned long next_mfn;
|
||||
unsigned long next_bfn;
|
||||
int i;
|
||||
int nr_pages;
|
||||
|
||||
next_mfn = pfn_to_mfn(pfn);
|
||||
next_bfn = pfn_to_bfn(pfn);
|
||||
nr_pages = (offset + length + PAGE_SIZE-1) >> PAGE_SHIFT;
|
||||
|
||||
for (i = 1; i < nr_pages; i++) {
|
||||
if (pfn_to_mfn(++pfn) != ++next_mfn)
|
||||
if (pfn_to_bfn(++pfn) != ++next_bfn)
|
||||
return 0;
|
||||
}
|
||||
return 1;
|
||||
|
@ -138,8 +138,8 @@ static inline int range_straddles_page_boundary(phys_addr_t p, size_t size)
|
|||
|
||||
static int is_xen_swiotlb_buffer(dma_addr_t dma_addr)
|
||||
{
|
||||
unsigned long mfn = PFN_DOWN(dma_addr);
|
||||
unsigned long pfn = mfn_to_local_pfn(mfn);
|
||||
unsigned long bfn = PFN_DOWN(dma_addr);
|
||||
unsigned long pfn = bfn_to_local_pfn(bfn);
|
||||
phys_addr_t paddr;
|
||||
|
||||
/* If the address is outside our domain, it CAN
|
||||
|
|
|
@ -129,21 +129,17 @@ static int xen_tmem_new_pool(struct tmem_pool_uuid uuid,
|
|||
/* xen generic tmem ops */
|
||||
|
||||
static int xen_tmem_put_page(u32 pool_id, struct tmem_oid oid,
|
||||
u32 index, unsigned long pfn)
|
||||
u32 index, struct page *page)
|
||||
{
|
||||
unsigned long gmfn = xen_pv_domain() ? pfn_to_mfn(pfn) : pfn;
|
||||
|
||||
return xen_tmem_op(TMEM_PUT_PAGE, pool_id, oid, index,
|
||||
gmfn, 0, 0, 0);
|
||||
xen_page_to_gfn(page), 0, 0, 0);
|
||||
}
|
||||
|
||||
static int xen_tmem_get_page(u32 pool_id, struct tmem_oid oid,
|
||||
u32 index, unsigned long pfn)
|
||||
u32 index, struct page *page)
|
||||
{
|
||||
unsigned long gmfn = xen_pv_domain() ? pfn_to_mfn(pfn) : pfn;
|
||||
|
||||
return xen_tmem_op(TMEM_GET_PAGE, pool_id, oid, index,
|
||||
gmfn, 0, 0, 0);
|
||||
xen_page_to_gfn(page), 0, 0, 0);
|
||||
}
|
||||
|
||||
static int xen_tmem_flush_page(u32 pool_id, struct tmem_oid oid, u32 index)
|
||||
|
@ -173,14 +169,13 @@ static void tmem_cleancache_put_page(int pool, struct cleancache_filekey key,
|
|||
{
|
||||
u32 ind = (u32) index;
|
||||
struct tmem_oid oid = *(struct tmem_oid *)&key;
|
||||
unsigned long pfn = page_to_pfn(page);
|
||||
|
||||
if (pool < 0)
|
||||
return;
|
||||
if (ind != index)
|
||||
return;
|
||||
mb(); /* ensure page is quiescent; tmem may address it with an alias */
|
||||
(void)xen_tmem_put_page((u32)pool, oid, ind, pfn);
|
||||
(void)xen_tmem_put_page((u32)pool, oid, ind, page);
|
||||
}
|
||||
|
||||
static int tmem_cleancache_get_page(int pool, struct cleancache_filekey key,
|
||||
|
@ -188,7 +183,6 @@ static int tmem_cleancache_get_page(int pool, struct cleancache_filekey key,
|
|||
{
|
||||
u32 ind = (u32) index;
|
||||
struct tmem_oid oid = *(struct tmem_oid *)&key;
|
||||
unsigned long pfn = page_to_pfn(page);
|
||||
int ret;
|
||||
|
||||
/* translate return values to linux semantics */
|
||||
|
@ -196,7 +190,7 @@ static int tmem_cleancache_get_page(int pool, struct cleancache_filekey key,
|
|||
return -1;
|
||||
if (ind != index)
|
||||
return -1;
|
||||
ret = xen_tmem_get_page((u32)pool, oid, ind, pfn);
|
||||
ret = xen_tmem_get_page((u32)pool, oid, ind, page);
|
||||
if (ret == 1)
|
||||
return 0;
|
||||
else
|
||||
|
@ -287,7 +281,6 @@ static int tmem_frontswap_store(unsigned type, pgoff_t offset,
|
|||
{
|
||||
u64 ind64 = (u64)offset;
|
||||
u32 ind = (u32)offset;
|
||||
unsigned long pfn = page_to_pfn(page);
|
||||
int pool = tmem_frontswap_poolid;
|
||||
int ret;
|
||||
|
||||
|
@ -296,7 +289,7 @@ static int tmem_frontswap_store(unsigned type, pgoff_t offset,
|
|||
if (ind64 != ind)
|
||||
return -1;
|
||||
mb(); /* ensure page is quiescent; tmem may address it with an alias */
|
||||
ret = xen_tmem_put_page(pool, oswiz(type, ind), iswiz(ind), pfn);
|
||||
ret = xen_tmem_put_page(pool, oswiz(type, ind), iswiz(ind), page);
|
||||
/* translate Xen tmem return values to linux semantics */
|
||||
if (ret == 1)
|
||||
return 0;
|
||||
|
@ -313,7 +306,6 @@ static int tmem_frontswap_load(unsigned type, pgoff_t offset,
|
|||
{
|
||||
u64 ind64 = (u64)offset;
|
||||
u32 ind = (u32)offset;
|
||||
unsigned long pfn = page_to_pfn(page);
|
||||
int pool = tmem_frontswap_poolid;
|
||||
int ret;
|
||||
|
||||
|
@ -321,7 +313,7 @@ static int tmem_frontswap_load(unsigned type, pgoff_t offset,
|
|||
return -1;
|
||||
if (ind64 != ind)
|
||||
return -1;
|
||||
ret = xen_tmem_get_page(pool, oswiz(type, ind), iswiz(ind), pfn);
|
||||
ret = xen_tmem_get_page(pool, oswiz(type, ind), iswiz(ind), page);
|
||||
/* translate Xen tmem return values to linux semantics */
|
||||
if (ret == 1)
|
||||
return 0;
|
||||
|
|
|
@ -380,7 +380,7 @@ int xenbus_grant_ring(struct xenbus_device *dev, void *vaddr,
|
|||
|
||||
for (i = 0; i < nr_pages; i++) {
|
||||
err = gnttab_grant_foreign_access(dev->otherend_id,
|
||||
virt_to_mfn(vaddr), 0);
|
||||
virt_to_gfn(vaddr), 0);
|
||||
if (err < 0) {
|
||||
xenbus_dev_fatal(dev, err,
|
||||
"granting access to ring page");
|
||||
|
|
|
@ -49,7 +49,7 @@ static long xenbus_alloc(domid_t domid)
|
|||
goto out_err;
|
||||
|
||||
gnttab_grant_foreign_access_ref(GNTTAB_RESERVED_XENSTORE, domid,
|
||||
virt_to_mfn(xen_store_interface), 0 /* writable */);
|
||||
virt_to_gfn(xen_store_interface), 0 /* writable */);
|
||||
|
||||
arg.dom = DOMID_SELF;
|
||||
arg.remote_dom = domid;
|
||||
|
|
|
@ -75,7 +75,7 @@ EXPORT_SYMBOL_GPL(xen_store_interface);
|
|||
enum xenstore_init xen_store_domain_type;
|
||||
EXPORT_SYMBOL_GPL(xen_store_domain_type);
|
||||
|
||||
static unsigned long xen_store_mfn;
|
||||
static unsigned long xen_store_gfn;
|
||||
|
||||
static BLOCKING_NOTIFIER_HEAD(xenstore_chain);
|
||||
|
||||
|
@ -711,9 +711,7 @@ static int __init xenstored_local_init(void)
|
|||
if (!page)
|
||||
goto out_err;
|
||||
|
||||
xen_store_mfn = xen_start_info->store_mfn =
|
||||
pfn_to_mfn(virt_to_phys((void *)page) >>
|
||||
PAGE_SHIFT);
|
||||
xen_store_gfn = xen_start_info->store_mfn = virt_to_gfn((void *)page);
|
||||
|
||||
/* Next allocate a local port which xenstored can bind to */
|
||||
alloc_unbound.dom = DOMID_SELF;
|
||||
|
@ -787,12 +785,12 @@ static int __init xenbus_init(void)
|
|||
err = xenstored_local_init();
|
||||
if (err)
|
||||
goto out_error;
|
||||
xen_store_interface = mfn_to_virt(xen_store_mfn);
|
||||
xen_store_interface = gfn_to_virt(xen_store_gfn);
|
||||
break;
|
||||
case XS_PV:
|
||||
xen_store_evtchn = xen_start_info->store_evtchn;
|
||||
xen_store_mfn = xen_start_info->store_mfn;
|
||||
xen_store_interface = mfn_to_virt(xen_store_mfn);
|
||||
xen_store_gfn = xen_start_info->store_mfn;
|
||||
xen_store_interface = gfn_to_virt(xen_store_gfn);
|
||||
break;
|
||||
case XS_HVM:
|
||||
err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v);
|
||||
|
@ -802,9 +800,9 @@ static int __init xenbus_init(void)
|
|||
err = hvm_get_parameter(HVM_PARAM_STORE_PFN, &v);
|
||||
if (err)
|
||||
goto out_error;
|
||||
xen_store_mfn = (unsigned long)v;
|
||||
xen_store_gfn = (unsigned long)v;
|
||||
xen_store_interface =
|
||||
xen_remap(xen_store_mfn << PAGE_SHIFT, PAGE_SIZE);
|
||||
xen_remap(xen_store_gfn << PAGE_SHIFT, PAGE_SIZE);
|
||||
break;
|
||||
default:
|
||||
pr_warn("Xenstore state unknown\n");
|
||||
|
|
|
@ -38,8 +38,8 @@
|
|||
#include <xen/interface/xen.h>
|
||||
#include <xen/interface/memory.h>
|
||||
|
||||
/* map fgmfn of domid to lpfn in the current domain */
|
||||
static int map_foreign_page(unsigned long lpfn, unsigned long fgmfn,
|
||||
/* map fgfn of domid to lpfn in the current domain */
|
||||
static int map_foreign_page(unsigned long lpfn, unsigned long fgfn,
|
||||
unsigned int domid)
|
||||
{
|
||||
int rc;
|
||||
|
@ -49,7 +49,7 @@ static int map_foreign_page(unsigned long lpfn, unsigned long fgmfn,
|
|||
.size = 1,
|
||||
.space = XENMAPSPACE_gmfn_foreign,
|
||||
};
|
||||
xen_ulong_t idx = fgmfn;
|
||||
xen_ulong_t idx = fgfn;
|
||||
xen_pfn_t gpfn = lpfn;
|
||||
int err = 0;
|
||||
|
||||
|
@ -62,13 +62,13 @@ static int map_foreign_page(unsigned long lpfn, unsigned long fgmfn,
|
|||
}
|
||||
|
||||
struct remap_data {
|
||||
xen_pfn_t *fgmfn; /* foreign domain's gmfn */
|
||||
xen_pfn_t *fgfn; /* foreign domain's gfn */
|
||||
pgprot_t prot;
|
||||
domid_t domid;
|
||||
struct vm_area_struct *vma;
|
||||
int index;
|
||||
struct page **pages;
|
||||
struct xen_remap_mfn_info *info;
|
||||
struct xen_remap_gfn_info *info;
|
||||
int *err_ptr;
|
||||
int mapped;
|
||||
};
|
||||
|
@ -82,20 +82,20 @@ static int remap_pte_fn(pte_t *ptep, pgtable_t token, unsigned long addr,
|
|||
pte_t pte = pte_mkspecial(pfn_pte(pfn, info->prot));
|
||||
int rc;
|
||||
|
||||
rc = map_foreign_page(pfn, *info->fgmfn, info->domid);
|
||||
rc = map_foreign_page(pfn, *info->fgfn, info->domid);
|
||||
*info->err_ptr++ = rc;
|
||||
if (!rc) {
|
||||
set_pte_at(info->vma->vm_mm, addr, ptep, pte);
|
||||
info->mapped++;
|
||||
}
|
||||
info->fgmfn++;
|
||||
info->fgfn++;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int xen_xlate_remap_gfn_array(struct vm_area_struct *vma,
|
||||
unsigned long addr,
|
||||
xen_pfn_t *mfn, int nr,
|
||||
xen_pfn_t *gfn, int nr,
|
||||
int *err_ptr, pgprot_t prot,
|
||||
unsigned domid,
|
||||
struct page **pages)
|
||||
|
@ -108,7 +108,7 @@ int xen_xlate_remap_gfn_array(struct vm_area_struct *vma,
|
|||
x86 PVOPS */
|
||||
BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO)));
|
||||
|
||||
data.fgmfn = mfn;
|
||||
data.fgfn = gfn;
|
||||
data.prot = prot;
|
||||
data.domid = domid;
|
||||
data.vma = vma;
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue