mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-03-30 11:04:25 +00:00
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/djm/tmem
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/djm/tmem: xen: cleancache shim to Xen Transcendent Memory ocfs2: add cleancache support ext4: add cleancache support btrfs: add cleancache support ext3: add cleancache support mm/fs: add hooks to support cleancache mm: cleancache core ops functions and config fs: add field to superblock to support cleancache mm/fs: cleancache documentation Fix up trivial conflict in fs/btrfs/extent_io.c due to includes
This commit is contained in:
commit
f8d613e2a6
21 changed files with 1027 additions and 0 deletions
11
Documentation/ABI/testing/sysfs-kernel-mm-cleancache
Normal file
11
Documentation/ABI/testing/sysfs-kernel-mm-cleancache
Normal file
|
@ -0,0 +1,11 @@
|
||||||
|
What: /sys/kernel/mm/cleancache/
|
||||||
|
Date: April 2011
|
||||||
|
Contact: Dan Magenheimer <dan.magenheimer@oracle.com>
|
||||||
|
Description:
|
||||||
|
/sys/kernel/mm/cleancache/ contains a number of files which
|
||||||
|
record a count of various cleancache operations
|
||||||
|
(sum across all filesystems):
|
||||||
|
succ_gets
|
||||||
|
failed_gets
|
||||||
|
puts
|
||||||
|
flushes
|
278
Documentation/vm/cleancache.txt
Normal file
278
Documentation/vm/cleancache.txt
Normal file
|
@ -0,0 +1,278 @@
|
||||||
|
MOTIVATION
|
||||||
|
|
||||||
|
Cleancache is a new optional feature provided by the VFS layer that
|
||||||
|
potentially dramatically increases page cache effectiveness for
|
||||||
|
many workloads in many environments at a negligible cost.
|
||||||
|
|
||||||
|
Cleancache can be thought of as a page-granularity victim cache for clean
|
||||||
|
pages that the kernel's pageframe replacement algorithm (PFRA) would like
|
||||||
|
to keep around, but can't since there isn't enough memory. So when the
|
||||||
|
PFRA "evicts" a page, it first attempts to use cleancache code to
|
||||||
|
put the data contained in that page into "transcendent memory", memory
|
||||||
|
that is not directly accessible or addressable by the kernel and is
|
||||||
|
of unknown and possibly time-varying size.
|
||||||
|
|
||||||
|
Later, when a cleancache-enabled filesystem wishes to access a page
|
||||||
|
in a file on disk, it first checks cleancache to see if it already
|
||||||
|
contains it; if it does, the page of data is copied into the kernel
|
||||||
|
and a disk access is avoided.
|
||||||
|
|
||||||
|
Transcendent memory "drivers" for cleancache are currently implemented
|
||||||
|
in Xen (using hypervisor memory) and zcache (using in-kernel compressed
|
||||||
|
memory) and other implementations are in development.
|
||||||
|
|
||||||
|
FAQs are included below.
|
||||||
|
|
||||||
|
IMPLEMENTATION OVERVIEW
|
||||||
|
|
||||||
|
A cleancache "backend" that provides transcendent memory registers itself
|
||||||
|
to the kernel's cleancache "frontend" by calling cleancache_register_ops,
|
||||||
|
passing a pointer to a cleancache_ops structure with funcs set appropriately.
|
||||||
|
Note that cleancache_register_ops returns the previous settings so that
|
||||||
|
chaining can be performed if desired. The functions provided must conform to
|
||||||
|
certain semantics as follows:
|
||||||
|
|
||||||
|
Most important, cleancache is "ephemeral". Pages which are copied into
|
||||||
|
cleancache have an indefinite lifetime which is completely unknowable
|
||||||
|
by the kernel and so may or may not still be in cleancache at any later time.
|
||||||
|
Thus, as its name implies, cleancache is not suitable for dirty pages.
|
||||||
|
Cleancache has complete discretion over what pages to preserve and what
|
||||||
|
pages to discard and when.
|
||||||
|
|
||||||
|
Mounting a cleancache-enabled filesystem should call "init_fs" to obtain a
|
||||||
|
pool id which, if positive, must be saved in the filesystem's superblock;
|
||||||
|
a negative return value indicates failure. A "put_page" will copy a
|
||||||
|
(presumably about-to-be-evicted) page into cleancache and associate it with
|
||||||
|
the pool id, a file key, and a page index into the file. (The combination
|
||||||
|
of a pool id, a file key, and an index is sometimes called a "handle".)
|
||||||
|
A "get_page" will copy the page, if found, from cleancache into kernel memory.
|
||||||
|
A "flush_page" will ensure the page no longer is present in cleancache;
|
||||||
|
a "flush_inode" will flush all pages associated with the specified file;
|
||||||
|
and, when a filesystem is unmounted, a "flush_fs" will flush all pages in
|
||||||
|
all files specified by the given pool id and also surrender the pool id.
|
||||||
|
|
||||||
|
An "init_shared_fs", like init_fs, obtains a pool id but tells cleancache
|
||||||
|
to treat the pool as shared using a 128-bit UUID as a key. On systems
|
||||||
|
that may run multiple kernels (such as hard partitioned or virtualized
|
||||||
|
systems) that may share a clustered filesystem, and where cleancache
|
||||||
|
may be shared among those kernels, calls to init_shared_fs that specify the
|
||||||
|
same UUID will receive the same pool id, thus allowing the pages to
|
||||||
|
be shared. Note that any security requirements must be imposed outside
|
||||||
|
of the kernel (e.g. by "tools" that control cleancache). Or a
|
||||||
|
cleancache implementation can simply disable shared_init by always
|
||||||
|
returning a negative value.
|
||||||
|
|
||||||
|
If a get_page is successful on a non-shared pool, the page is flushed (thus
|
||||||
|
making cleancache an "exclusive" cache). On a shared pool, the page
|
||||||
|
is NOT flushed on a successful get_page so that it remains accessible to
|
||||||
|
other sharers. The kernel is responsible for ensuring coherency between
|
||||||
|
cleancache (shared or not), the page cache, and the filesystem, using
|
||||||
|
cleancache flush operations as required.
|
||||||
|
|
||||||
|
Note that cleancache must enforce put-put-get coherency and get-get
|
||||||
|
coherency. For the former, if two puts are made to the same handle but
|
||||||
|
with different data, say AAA by the first put and BBB by the second, a
|
||||||
|
subsequent get can never return the stale data (AAA). For get-get coherency,
|
||||||
|
if a get for a given handle fails, subsequent gets for that handle will
|
||||||
|
never succeed unless preceded by a successful put with that handle.
|
||||||
|
|
||||||
|
Last, cleancache provides no SMP serialization guarantees; if two
|
||||||
|
different Linux threads are simultaneously putting and flushing a page
|
||||||
|
with the same handle, the results are indeterminate. Callers must
|
||||||
|
lock the page to ensure serial behavior.
|
||||||
|
|
||||||
|
CLEANCACHE PERFORMANCE METRICS
|
||||||
|
|
||||||
|
Cleancache monitoring is done by sysfs files in the
|
||||||
|
/sys/kernel/mm/cleancache directory. The effectiveness of cleancache
|
||||||
|
can be measured (across all filesystems) with:
|
||||||
|
|
||||||
|
succ_gets - number of gets that were successful
|
||||||
|
failed_gets - number of gets that failed
|
||||||
|
puts - number of puts attempted (all "succeed")
|
||||||
|
flushes - number of flushes attempted
|
||||||
|
|
||||||
|
A backend implementatation may provide additional metrics.
|
||||||
|
|
||||||
|
FAQ
|
||||||
|
|
||||||
|
1) Where's the value? (Andrew Morton)
|
||||||
|
|
||||||
|
Cleancache provides a significant performance benefit to many workloads
|
||||||
|
in many environments with negligible overhead by improving the
|
||||||
|
effectiveness of the pagecache. Clean pagecache pages are
|
||||||
|
saved in transcendent memory (RAM that is otherwise not directly
|
||||||
|
addressable to the kernel); fetching those pages later avoids "refaults"
|
||||||
|
and thus disk reads.
|
||||||
|
|
||||||
|
Cleancache (and its sister code "frontswap") provide interfaces for
|
||||||
|
this transcendent memory (aka "tmem"), which conceptually lies between
|
||||||
|
fast kernel-directly-addressable RAM and slower DMA/asynchronous devices.
|
||||||
|
Disallowing direct kernel or userland reads/writes to tmem
|
||||||
|
is ideal when data is transformed to a different form and size (such
|
||||||
|
as with compression) or secretly moved (as might be useful for write-
|
||||||
|
balancing for some RAM-like devices). Evicted page-cache pages (and
|
||||||
|
swap pages) are a great use for this kind of slower-than-RAM-but-much-
|
||||||
|
faster-than-disk transcendent memory, and the cleancache (and frontswap)
|
||||||
|
"page-object-oriented" specification provides a nice way to read and
|
||||||
|
write -- and indirectly "name" -- the pages.
|
||||||
|
|
||||||
|
In the virtual case, the whole point of virtualization is to statistically
|
||||||
|
multiplex physical resources across the varying demands of multiple
|
||||||
|
virtual machines. This is really hard to do with RAM and efforts to
|
||||||
|
do it well with no kernel change have essentially failed (except in some
|
||||||
|
well-publicized special-case workloads). Cleancache -- and frontswap --
|
||||||
|
with a fairly small impact on the kernel, provide a huge amount
|
||||||
|
of flexibility for more dynamic, flexible RAM multiplexing.
|
||||||
|
Specifically, the Xen Transcendent Memory backend allows otherwise
|
||||||
|
"fallow" hypervisor-owned RAM to not only be "time-shared" between multiple
|
||||||
|
virtual machines, but the pages can be compressed and deduplicated to
|
||||||
|
optimize RAM utilization. And when guest OS's are induced to surrender
|
||||||
|
underutilized RAM (e.g. with "self-ballooning"), page cache pages
|
||||||
|
are the first to go, and cleancache allows those pages to be
|
||||||
|
saved and reclaimed if overall host system memory conditions allow.
|
||||||
|
|
||||||
|
And the identical interface used for cleancache can be used in
|
||||||
|
physical systems as well. The zcache driver acts as a memory-hungry
|
||||||
|
device that stores pages of data in a compressed state. And
|
||||||
|
the proposed "RAMster" driver shares RAM across multiple physical
|
||||||
|
systems.
|
||||||
|
|
||||||
|
2) Why does cleancache have its sticky fingers so deep inside the
|
||||||
|
filesystems and VFS? (Andrew Morton and Christoph Hellwig)
|
||||||
|
|
||||||
|
The core hooks for cleancache in VFS are in most cases a single line
|
||||||
|
and the minimum set are placed precisely where needed to maintain
|
||||||
|
coherency (via cleancache_flush operations) between cleancache,
|
||||||
|
the page cache, and disk. All hooks compile into nothingness if
|
||||||
|
cleancache is config'ed off and turn into a function-pointer-
|
||||||
|
compare-to-NULL if config'ed on but no backend claims the ops
|
||||||
|
functions, or to a compare-struct-element-to-negative if a
|
||||||
|
backend claims the ops functions but a filesystem doesn't enable
|
||||||
|
cleancache.
|
||||||
|
|
||||||
|
Some filesystems are built entirely on top of VFS and the hooks
|
||||||
|
in VFS are sufficient, so don't require an "init_fs" hook; the
|
||||||
|
initial implementation of cleancache didn't provide this hook.
|
||||||
|
But for some filesystems (such as btrfs), the VFS hooks are
|
||||||
|
incomplete and one or more hooks in fs-specific code are required.
|
||||||
|
And for some other filesystems, such as tmpfs, cleancache may
|
||||||
|
be counterproductive. So it seemed prudent to require a filesystem
|
||||||
|
to "opt in" to use cleancache, which requires adding a hook in
|
||||||
|
each filesystem. Not all filesystems are supported by cleancache
|
||||||
|
only because they haven't been tested. The existing set should
|
||||||
|
be sufficient to validate the concept, the opt-in approach means
|
||||||
|
that untested filesystems are not affected, and the hooks in the
|
||||||
|
existing filesystems should make it very easy to add more
|
||||||
|
filesystems in the future.
|
||||||
|
|
||||||
|
The total impact of the hooks to existing fs and mm files is only
|
||||||
|
about 40 lines added (not counting comments and blank lines).
|
||||||
|
|
||||||
|
3) Why not make cleancache asynchronous and batched so it can
|
||||||
|
more easily interface with real devices with DMA instead
|
||||||
|
of copying each individual page? (Minchan Kim)
|
||||||
|
|
||||||
|
The one-page-at-a-time copy semantics simplifies the implementation
|
||||||
|
on both the frontend and backend and also allows the backend to
|
||||||
|
do fancy things on-the-fly like page compression and
|
||||||
|
page deduplication. And since the data is "gone" (copied into/out
|
||||||
|
of the pageframe) before the cleancache get/put call returns,
|
||||||
|
a great deal of race conditions and potential coherency issues
|
||||||
|
are avoided. While the interface seems odd for a "real device"
|
||||||
|
or for real kernel-addressable RAM, it makes perfect sense for
|
||||||
|
transcendent memory.
|
||||||
|
|
||||||
|
4) Why is non-shared cleancache "exclusive"? And where is the
|
||||||
|
page "flushed" after a "get"? (Minchan Kim)
|
||||||
|
|
||||||
|
The main reason is to free up space in transcendent memory and
|
||||||
|
to avoid unnecessary cleancache_flush calls. If you want inclusive,
|
||||||
|
the page can be "put" immediately following the "get". If
|
||||||
|
put-after-get for inclusive becomes common, the interface could
|
||||||
|
be easily extended to add a "get_no_flush" call.
|
||||||
|
|
||||||
|
The flush is done by the cleancache backend implementation.
|
||||||
|
|
||||||
|
5) What's the performance impact?
|
||||||
|
|
||||||
|
Performance analysis has been presented at OLS'09 and LCA'10.
|
||||||
|
Briefly, performance gains can be significant on most workloads,
|
||||||
|
especially when memory pressure is high (e.g. when RAM is
|
||||||
|
overcommitted in a virtual workload); and because the hooks are
|
||||||
|
invoked primarily in place of or in addition to a disk read/write,
|
||||||
|
overhead is negligible even in worst case workloads. Basically
|
||||||
|
cleancache replaces I/O with memory-copy-CPU-overhead; on older
|
||||||
|
single-core systems with slow memory-copy speeds, cleancache
|
||||||
|
has little value, but in newer multicore machines, especially
|
||||||
|
consolidated/virtualized machines, it has great value.
|
||||||
|
|
||||||
|
6) How do I add cleancache support for filesystem X? (Boaz Harrash)
|
||||||
|
|
||||||
|
Filesystems that are well-behaved and conform to certain
|
||||||
|
restrictions can utilize cleancache simply by making a call to
|
||||||
|
cleancache_init_fs at mount time. Unusual, misbehaving, or
|
||||||
|
poorly layered filesystems must either add additional hooks
|
||||||
|
and/or undergo extensive additional testing... or should just
|
||||||
|
not enable the optional cleancache.
|
||||||
|
|
||||||
|
Some points for a filesystem to consider:
|
||||||
|
|
||||||
|
- The FS should be block-device-based (e.g. a ram-based FS such
|
||||||
|
as tmpfs should not enable cleancache)
|
||||||
|
- To ensure coherency/correctness, the FS must ensure that all
|
||||||
|
file removal or truncation operations either go through VFS or
|
||||||
|
add hooks to do the equivalent cleancache "flush" operations
|
||||||
|
- To ensure coherency/correctness, either inode numbers must
|
||||||
|
be unique across the lifetime of the on-disk file OR the
|
||||||
|
FS must provide an "encode_fh" function.
|
||||||
|
- The FS must call the VFS superblock alloc and deactivate routines
|
||||||
|
or add hooks to do the equivalent cleancache calls done there.
|
||||||
|
- To maximize performance, all pages fetched from the FS should
|
||||||
|
go through the do_mpag_readpage routine or the FS should add
|
||||||
|
hooks to do the equivalent (cf. btrfs)
|
||||||
|
- Currently, the FS blocksize must be the same as PAGESIZE. This
|
||||||
|
is not an architectural restriction, but no backends currently
|
||||||
|
support anything different.
|
||||||
|
- A clustered FS should invoke the "shared_init_fs" cleancache
|
||||||
|
hook to get best performance for some backends.
|
||||||
|
|
||||||
|
7) Why not use the KVA of the inode as the key? (Christoph Hellwig)
|
||||||
|
|
||||||
|
If cleancache would use the inode virtual address instead of
|
||||||
|
inode/filehandle, the pool id could be eliminated. But, this
|
||||||
|
won't work because cleancache retains pagecache data pages
|
||||||
|
persistently even when the inode has been pruned from the
|
||||||
|
inode unused list, and only flushes the data page if the file
|
||||||
|
gets removed/truncated. So if cleancache used the inode kva,
|
||||||
|
there would be potential coherency issues if/when the inode
|
||||||
|
kva is reused for a different file. Alternately, if cleancache
|
||||||
|
flushed the pages when the inode kva was freed, much of the value
|
||||||
|
of cleancache would be lost because the cache of pages in cleanache
|
||||||
|
is potentially much larger than the kernel pagecache and is most
|
||||||
|
useful if the pages survive inode cache removal.
|
||||||
|
|
||||||
|
8) Why is a global variable required?
|
||||||
|
|
||||||
|
The cleancache_enabled flag is checked in all of the frequently-used
|
||||||
|
cleancache hooks. The alternative is a function call to check a static
|
||||||
|
variable. Since cleancache is enabled dynamically at runtime, systems
|
||||||
|
that don't enable cleancache would suffer thousands (possibly
|
||||||
|
tens-of-thousands) of unnecessary function calls per second. So the
|
||||||
|
global variable allows cleancache to be enabled by default at compile
|
||||||
|
time, but have insignificant performance impact when cleancache remains
|
||||||
|
disabled at runtime.
|
||||||
|
|
||||||
|
9) Does cleanache work with KVM?
|
||||||
|
|
||||||
|
The memory model of KVM is sufficiently different that a cleancache
|
||||||
|
backend may have less value for KVM. This remains to be tested,
|
||||||
|
especially in an overcommitted system.
|
||||||
|
|
||||||
|
10) Does cleancache work in userspace? It sounds useful for
|
||||||
|
memory hungry caches like web browsers. (Jamie Lokier)
|
||||||
|
|
||||||
|
No plans yet, though we agree it sounds useful, at least for
|
||||||
|
apps that bypass the page cache (e.g. O_DIRECT).
|
||||||
|
|
||||||
|
Last updated: Dan Magenheimer, April 13 2011
|
|
@ -447,6 +447,13 @@ HYPERVISOR_hvm_op(int op, void *arg)
|
||||||
return _hypercall2(unsigned long, hvm_op, op, arg);
|
return _hypercall2(unsigned long, hvm_op, op, arg);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline int
|
||||||
|
HYPERVISOR_tmem_op(
|
||||||
|
struct tmem_op *op)
|
||||||
|
{
|
||||||
|
return _hypercall1(int, tmem_op, op);
|
||||||
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
MULTI_fpu_taskswitch(struct multicall_entry *mcl, int set)
|
MULTI_fpu_taskswitch(struct multicall_entry *mcl, int set)
|
||||||
{
|
{
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
obj-y += grant-table.o features.o events.o manage.o balloon.o
|
obj-y += grant-table.o features.o events.o manage.o balloon.o
|
||||||
obj-y += xenbus/
|
obj-y += xenbus/
|
||||||
|
obj-y += tmem.o
|
||||||
|
|
||||||
nostackp := $(call cc-option, -fno-stack-protector)
|
nostackp := $(call cc-option, -fno-stack-protector)
|
||||||
CFLAGS_features.o := $(nostackp)
|
CFLAGS_features.o := $(nostackp)
|
||||||
|
|
264
drivers/xen/tmem.c
Normal file
264
drivers/xen/tmem.c
Normal file
|
@ -0,0 +1,264 @@
|
||||||
|
/*
|
||||||
|
* Xen implementation for transcendent memory (tmem)
|
||||||
|
*
|
||||||
|
* Copyright (C) 2009-2010 Oracle Corp. All rights reserved.
|
||||||
|
* Author: Dan Magenheimer
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <linux/kernel.h>
|
||||||
|
#include <linux/types.h>
|
||||||
|
#include <linux/init.h>
|
||||||
|
#include <linux/pagemap.h>
|
||||||
|
#include <linux/cleancache.h>
|
||||||
|
|
||||||
|
#include <xen/xen.h>
|
||||||
|
#include <xen/interface/xen.h>
|
||||||
|
#include <asm/xen/hypercall.h>
|
||||||
|
#include <asm/xen/page.h>
|
||||||
|
#include <asm/xen/hypervisor.h>
|
||||||
|
|
||||||
|
#define TMEM_CONTROL 0
|
||||||
|
#define TMEM_NEW_POOL 1
|
||||||
|
#define TMEM_DESTROY_POOL 2
|
||||||
|
#define TMEM_NEW_PAGE 3
|
||||||
|
#define TMEM_PUT_PAGE 4
|
||||||
|
#define TMEM_GET_PAGE 5
|
||||||
|
#define TMEM_FLUSH_PAGE 6
|
||||||
|
#define TMEM_FLUSH_OBJECT 7
|
||||||
|
#define TMEM_READ 8
|
||||||
|
#define TMEM_WRITE 9
|
||||||
|
#define TMEM_XCHG 10
|
||||||
|
|
||||||
|
/* Bits for HYPERVISOR_tmem_op(TMEM_NEW_POOL) */
|
||||||
|
#define TMEM_POOL_PERSIST 1
|
||||||
|
#define TMEM_POOL_SHARED 2
|
||||||
|
#define TMEM_POOL_PAGESIZE_SHIFT 4
|
||||||
|
#define TMEM_VERSION_SHIFT 24
|
||||||
|
|
||||||
|
|
||||||
|
struct tmem_pool_uuid {
|
||||||
|
u64 uuid_lo;
|
||||||
|
u64 uuid_hi;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct tmem_oid {
|
||||||
|
u64 oid[3];
|
||||||
|
};
|
||||||
|
|
||||||
|
#define TMEM_POOL_PRIVATE_UUID { 0, 0 }
|
||||||
|
|
||||||
|
/* flags for tmem_ops.new_pool */
|
||||||
|
#define TMEM_POOL_PERSIST 1
|
||||||
|
#define TMEM_POOL_SHARED 2
|
||||||
|
|
||||||
|
/* xen tmem foundation ops/hypercalls */
|
||||||
|
|
||||||
|
static inline int xen_tmem_op(u32 tmem_cmd, u32 tmem_pool, struct tmem_oid oid,
|
||||||
|
u32 index, unsigned long gmfn, u32 tmem_offset, u32 pfn_offset, u32 len)
|
||||||
|
{
|
||||||
|
struct tmem_op op;
|
||||||
|
int rc = 0;
|
||||||
|
|
||||||
|
op.cmd = tmem_cmd;
|
||||||
|
op.pool_id = tmem_pool;
|
||||||
|
op.u.gen.oid[0] = oid.oid[0];
|
||||||
|
op.u.gen.oid[1] = oid.oid[1];
|
||||||
|
op.u.gen.oid[2] = oid.oid[2];
|
||||||
|
op.u.gen.index = index;
|
||||||
|
op.u.gen.tmem_offset = tmem_offset;
|
||||||
|
op.u.gen.pfn_offset = pfn_offset;
|
||||||
|
op.u.gen.len = len;
|
||||||
|
set_xen_guest_handle(op.u.gen.gmfn, (void *)gmfn);
|
||||||
|
rc = HYPERVISOR_tmem_op(&op);
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int xen_tmem_new_pool(struct tmem_pool_uuid uuid,
|
||||||
|
u32 flags, unsigned long pagesize)
|
||||||
|
{
|
||||||
|
struct tmem_op op;
|
||||||
|
int rc = 0, pageshift;
|
||||||
|
|
||||||
|
for (pageshift = 0; pagesize != 1; pageshift++)
|
||||||
|
pagesize >>= 1;
|
||||||
|
flags |= (pageshift - 12) << TMEM_POOL_PAGESIZE_SHIFT;
|
||||||
|
flags |= TMEM_SPEC_VERSION << TMEM_VERSION_SHIFT;
|
||||||
|
op.cmd = TMEM_NEW_POOL;
|
||||||
|
op.u.new.uuid[0] = uuid.uuid_lo;
|
||||||
|
op.u.new.uuid[1] = uuid.uuid_hi;
|
||||||
|
op.u.new.flags = flags;
|
||||||
|
rc = HYPERVISOR_tmem_op(&op);
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* xen generic tmem ops */
|
||||||
|
|
||||||
|
static int xen_tmem_put_page(u32 pool_id, struct tmem_oid oid,
|
||||||
|
u32 index, unsigned long pfn)
|
||||||
|
{
|
||||||
|
unsigned long gmfn = xen_pv_domain() ? pfn_to_mfn(pfn) : pfn;
|
||||||
|
|
||||||
|
return xen_tmem_op(TMEM_PUT_PAGE, pool_id, oid, index,
|
||||||
|
gmfn, 0, 0, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int xen_tmem_get_page(u32 pool_id, struct tmem_oid oid,
|
||||||
|
u32 index, unsigned long pfn)
|
||||||
|
{
|
||||||
|
unsigned long gmfn = xen_pv_domain() ? pfn_to_mfn(pfn) : pfn;
|
||||||
|
|
||||||
|
return xen_tmem_op(TMEM_GET_PAGE, pool_id, oid, index,
|
||||||
|
gmfn, 0, 0, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int xen_tmem_flush_page(u32 pool_id, struct tmem_oid oid, u32 index)
|
||||||
|
{
|
||||||
|
return xen_tmem_op(TMEM_FLUSH_PAGE, pool_id, oid, index,
|
||||||
|
0, 0, 0, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int xen_tmem_flush_object(u32 pool_id, struct tmem_oid oid)
|
||||||
|
{
|
||||||
|
return xen_tmem_op(TMEM_FLUSH_OBJECT, pool_id, oid, 0, 0, 0, 0, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int xen_tmem_destroy_pool(u32 pool_id)
|
||||||
|
{
|
||||||
|
struct tmem_oid oid = { { 0 } };
|
||||||
|
|
||||||
|
return xen_tmem_op(TMEM_DESTROY_POOL, pool_id, oid, 0, 0, 0, 0, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
int tmem_enabled;
|
||||||
|
|
||||||
|
static int __init enable_tmem(char *s)
|
||||||
|
{
|
||||||
|
tmem_enabled = 1;
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
__setup("tmem", enable_tmem);
|
||||||
|
|
||||||
|
/* cleancache ops */
|
||||||
|
|
||||||
|
static void tmem_cleancache_put_page(int pool, struct cleancache_filekey key,
|
||||||
|
pgoff_t index, struct page *page)
|
||||||
|
{
|
||||||
|
u32 ind = (u32) index;
|
||||||
|
struct tmem_oid oid = *(struct tmem_oid *)&key;
|
||||||
|
unsigned long pfn = page_to_pfn(page);
|
||||||
|
|
||||||
|
if (pool < 0)
|
||||||
|
return;
|
||||||
|
if (ind != index)
|
||||||
|
return;
|
||||||
|
mb(); /* ensure page is quiescent; tmem may address it with an alias */
|
||||||
|
(void)xen_tmem_put_page((u32)pool, oid, ind, pfn);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int tmem_cleancache_get_page(int pool, struct cleancache_filekey key,
|
||||||
|
pgoff_t index, struct page *page)
|
||||||
|
{
|
||||||
|
u32 ind = (u32) index;
|
||||||
|
struct tmem_oid oid = *(struct tmem_oid *)&key;
|
||||||
|
unsigned long pfn = page_to_pfn(page);
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
/* translate return values to linux semantics */
|
||||||
|
if (pool < 0)
|
||||||
|
return -1;
|
||||||
|
if (ind != index)
|
||||||
|
return -1;
|
||||||
|
ret = xen_tmem_get_page((u32)pool, oid, ind, pfn);
|
||||||
|
if (ret == 1)
|
||||||
|
return 0;
|
||||||
|
else
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void tmem_cleancache_flush_page(int pool, struct cleancache_filekey key,
|
||||||
|
pgoff_t index)
|
||||||
|
{
|
||||||
|
u32 ind = (u32) index;
|
||||||
|
struct tmem_oid oid = *(struct tmem_oid *)&key;
|
||||||
|
|
||||||
|
if (pool < 0)
|
||||||
|
return;
|
||||||
|
if (ind != index)
|
||||||
|
return;
|
||||||
|
(void)xen_tmem_flush_page((u32)pool, oid, ind);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void tmem_cleancache_flush_inode(int pool, struct cleancache_filekey key)
|
||||||
|
{
|
||||||
|
struct tmem_oid oid = *(struct tmem_oid *)&key;
|
||||||
|
|
||||||
|
if (pool < 0)
|
||||||
|
return;
|
||||||
|
(void)xen_tmem_flush_object((u32)pool, oid);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void tmem_cleancache_flush_fs(int pool)
|
||||||
|
{
|
||||||
|
if (pool < 0)
|
||||||
|
return;
|
||||||
|
(void)xen_tmem_destroy_pool((u32)pool);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int tmem_cleancache_init_fs(size_t pagesize)
|
||||||
|
{
|
||||||
|
struct tmem_pool_uuid uuid_private = TMEM_POOL_PRIVATE_UUID;
|
||||||
|
|
||||||
|
return xen_tmem_new_pool(uuid_private, 0, pagesize);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int tmem_cleancache_init_shared_fs(char *uuid, size_t pagesize)
|
||||||
|
{
|
||||||
|
struct tmem_pool_uuid shared_uuid;
|
||||||
|
|
||||||
|
shared_uuid.uuid_lo = *(u64 *)uuid;
|
||||||
|
shared_uuid.uuid_hi = *(u64 *)(&uuid[8]);
|
||||||
|
return xen_tmem_new_pool(shared_uuid, TMEM_POOL_SHARED, pagesize);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int use_cleancache = 1;
|
||||||
|
|
||||||
|
static int __init no_cleancache(char *s)
|
||||||
|
{
|
||||||
|
use_cleancache = 0;
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
__setup("nocleancache", no_cleancache);
|
||||||
|
|
||||||
|
static struct cleancache_ops tmem_cleancache_ops = {
|
||||||
|
.put_page = tmem_cleancache_put_page,
|
||||||
|
.get_page = tmem_cleancache_get_page,
|
||||||
|
.flush_page = tmem_cleancache_flush_page,
|
||||||
|
.flush_inode = tmem_cleancache_flush_inode,
|
||||||
|
.flush_fs = tmem_cleancache_flush_fs,
|
||||||
|
.init_shared_fs = tmem_cleancache_init_shared_fs,
|
||||||
|
.init_fs = tmem_cleancache_init_fs
|
||||||
|
};
|
||||||
|
|
||||||
|
static int __init xen_tmem_init(void)
|
||||||
|
{
|
||||||
|
struct cleancache_ops old_ops;
|
||||||
|
|
||||||
|
if (!xen_domain())
|
||||||
|
return 0;
|
||||||
|
#ifdef CONFIG_CLEANCACHE
|
||||||
|
BUG_ON(sizeof(struct cleancache_filekey) != sizeof(struct tmem_oid));
|
||||||
|
if (tmem_enabled && use_cleancache) {
|
||||||
|
char *s = "";
|
||||||
|
old_ops = cleancache_register_ops(&tmem_cleancache_ops);
|
||||||
|
if (old_ops.init_fs != NULL)
|
||||||
|
s = " (WARNING: cleancache_ops overridden)";
|
||||||
|
printk(KERN_INFO "cleancache enabled, RAM provided by "
|
||||||
|
"Xen Transcendent Memory%s\n", s);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
module_init(xen_tmem_init)
|
|
@ -11,6 +11,7 @@
|
||||||
#include <linux/writeback.h>
|
#include <linux/writeback.h>
|
||||||
#include <linux/pagevec.h>
|
#include <linux/pagevec.h>
|
||||||
#include <linux/prefetch.h>
|
#include <linux/prefetch.h>
|
||||||
|
#include <linux/cleancache.h>
|
||||||
#include "extent_io.h"
|
#include "extent_io.h"
|
||||||
#include "extent_map.h"
|
#include "extent_map.h"
|
||||||
#include "compat.h"
|
#include "compat.h"
|
||||||
|
@ -2016,6 +2017,13 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
|
||||||
|
|
||||||
set_page_extent_mapped(page);
|
set_page_extent_mapped(page);
|
||||||
|
|
||||||
|
if (!PageUptodate(page)) {
|
||||||
|
if (cleancache_get_page(page) == 0) {
|
||||||
|
BUG_ON(blocksize != PAGE_SIZE);
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
end = page_end;
|
end = page_end;
|
||||||
while (1) {
|
while (1) {
|
||||||
lock_extent(tree, start, end, GFP_NOFS);
|
lock_extent(tree, start, end, GFP_NOFS);
|
||||||
|
@ -2149,6 +2157,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
|
||||||
cur = cur + iosize;
|
cur = cur + iosize;
|
||||||
page_offset += iosize;
|
page_offset += iosize;
|
||||||
}
|
}
|
||||||
|
out:
|
||||||
if (!nr) {
|
if (!nr) {
|
||||||
if (!PageError(page))
|
if (!PageError(page))
|
||||||
SetPageUptodate(page);
|
SetPageUptodate(page);
|
||||||
|
|
|
@ -39,6 +39,7 @@
|
||||||
#include <linux/miscdevice.h>
|
#include <linux/miscdevice.h>
|
||||||
#include <linux/magic.h>
|
#include <linux/magic.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
|
#include <linux/cleancache.h>
|
||||||
#include "compat.h"
|
#include "compat.h"
|
||||||
#include "ctree.h"
|
#include "ctree.h"
|
||||||
#include "disk-io.h"
|
#include "disk-io.h"
|
||||||
|
@ -624,6 +625,7 @@ static int btrfs_fill_super(struct super_block *sb,
|
||||||
sb->s_root = root_dentry;
|
sb->s_root = root_dentry;
|
||||||
|
|
||||||
save_mount_options(sb, data);
|
save_mount_options(sb, data);
|
||||||
|
cleancache_init_fs(sb);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
fail_close:
|
fail_close:
|
||||||
|
|
|
@ -41,6 +41,7 @@
|
||||||
#include <linux/bitops.h>
|
#include <linux/bitops.h>
|
||||||
#include <linux/mpage.h>
|
#include <linux/mpage.h>
|
||||||
#include <linux/bit_spinlock.h>
|
#include <linux/bit_spinlock.h>
|
||||||
|
#include <linux/cleancache.h>
|
||||||
|
|
||||||
static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
|
static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
|
||||||
|
|
||||||
|
@ -269,6 +270,10 @@ void invalidate_bdev(struct block_device *bdev)
|
||||||
invalidate_bh_lrus();
|
invalidate_bh_lrus();
|
||||||
lru_add_drain_all(); /* make sure all lru add caches are flushed */
|
lru_add_drain_all(); /* make sure all lru add caches are flushed */
|
||||||
invalidate_mapping_pages(mapping, 0, -1);
|
invalidate_mapping_pages(mapping, 0, -1);
|
||||||
|
/* 99% of the time, we don't need to flush the cleancache on the bdev.
|
||||||
|
* But, for the strange corners, lets be cautious
|
||||||
|
*/
|
||||||
|
cleancache_flush_inode(mapping);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(invalidate_bdev);
|
EXPORT_SYMBOL(invalidate_bdev);
|
||||||
|
|
||||||
|
|
|
@ -36,6 +36,7 @@
|
||||||
#include <linux/quotaops.h>
|
#include <linux/quotaops.h>
|
||||||
#include <linux/seq_file.h>
|
#include <linux/seq_file.h>
|
||||||
#include <linux/log2.h>
|
#include <linux/log2.h>
|
||||||
|
#include <linux/cleancache.h>
|
||||||
|
|
||||||
#include <asm/uaccess.h>
|
#include <asm/uaccess.h>
|
||||||
|
|
||||||
|
@ -1367,6 +1368,7 @@ static int ext3_setup_super(struct super_block *sb, struct ext3_super_block *es,
|
||||||
} else {
|
} else {
|
||||||
ext3_msg(sb, KERN_INFO, "using internal journal");
|
ext3_msg(sb, KERN_INFO, "using internal journal");
|
||||||
}
|
}
|
||||||
|
cleancache_init_fs(sb);
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -38,6 +38,7 @@
|
||||||
#include <linux/ctype.h>
|
#include <linux/ctype.h>
|
||||||
#include <linux/log2.h>
|
#include <linux/log2.h>
|
||||||
#include <linux/crc16.h>
|
#include <linux/crc16.h>
|
||||||
|
#include <linux/cleancache.h>
|
||||||
#include <asm/uaccess.h>
|
#include <asm/uaccess.h>
|
||||||
|
|
||||||
#include <linux/kthread.h>
|
#include <linux/kthread.h>
|
||||||
|
@ -1948,6 +1949,7 @@ static int ext4_setup_super(struct super_block *sb, struct ext4_super_block *es,
|
||||||
EXT4_INODES_PER_GROUP(sb),
|
EXT4_INODES_PER_GROUP(sb),
|
||||||
sbi->s_mount_opt, sbi->s_mount_opt2);
|
sbi->s_mount_opt, sbi->s_mount_opt2);
|
||||||
|
|
||||||
|
cleancache_init_fs(sb);
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -27,6 +27,7 @@
|
||||||
#include <linux/writeback.h>
|
#include <linux/writeback.h>
|
||||||
#include <linux/backing-dev.h>
|
#include <linux/backing-dev.h>
|
||||||
#include <linux/pagevec.h>
|
#include <linux/pagevec.h>
|
||||||
|
#include <linux/cleancache.h>
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* I/O completion handler for multipage BIOs.
|
* I/O completion handler for multipage BIOs.
|
||||||
|
@ -271,6 +272,12 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
|
||||||
SetPageMappedToDisk(page);
|
SetPageMappedToDisk(page);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (fully_mapped && blocks_per_page == 1 && !PageUptodate(page) &&
|
||||||
|
cleancache_get_page(page) == 0) {
|
||||||
|
SetPageUptodate(page);
|
||||||
|
goto confused;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This page will go to BIO. Do we need to send this BIO off first?
|
* This page will go to BIO. Do we need to send this BIO off first?
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -41,6 +41,7 @@
|
||||||
#include <linux/mount.h>
|
#include <linux/mount.h>
|
||||||
#include <linux/seq_file.h>
|
#include <linux/seq_file.h>
|
||||||
#include <linux/quotaops.h>
|
#include <linux/quotaops.h>
|
||||||
|
#include <linux/cleancache.h>
|
||||||
|
|
||||||
#define CREATE_TRACE_POINTS
|
#define CREATE_TRACE_POINTS
|
||||||
#include "ocfs2_trace.h"
|
#include "ocfs2_trace.h"
|
||||||
|
@ -2352,6 +2353,7 @@ static int ocfs2_initialize_super(struct super_block *sb,
|
||||||
mlog_errno(status);
|
mlog_errno(status);
|
||||||
goto bail;
|
goto bail;
|
||||||
}
|
}
|
||||||
|
cleancache_init_shared_fs((char *)&uuid_net_key, sb);
|
||||||
|
|
||||||
bail:
|
bail:
|
||||||
return status;
|
return status;
|
||||||
|
|
|
@ -31,6 +31,7 @@
|
||||||
#include <linux/mutex.h>
|
#include <linux/mutex.h>
|
||||||
#include <linux/backing-dev.h>
|
#include <linux/backing-dev.h>
|
||||||
#include <linux/rculist_bl.h>
|
#include <linux/rculist_bl.h>
|
||||||
|
#include <linux/cleancache.h>
|
||||||
#include "internal.h"
|
#include "internal.h"
|
||||||
|
|
||||||
|
|
||||||
|
@ -112,6 +113,7 @@ static struct super_block *alloc_super(struct file_system_type *type)
|
||||||
s->s_maxbytes = MAX_NON_LFS;
|
s->s_maxbytes = MAX_NON_LFS;
|
||||||
s->s_op = &default_op;
|
s->s_op = &default_op;
|
||||||
s->s_time_gran = 1000000000;
|
s->s_time_gran = 1000000000;
|
||||||
|
s->cleancache_poolid = -1;
|
||||||
}
|
}
|
||||||
out:
|
out:
|
||||||
return s;
|
return s;
|
||||||
|
@ -177,6 +179,7 @@ void deactivate_locked_super(struct super_block *s)
|
||||||
{
|
{
|
||||||
struct file_system_type *fs = s->s_type;
|
struct file_system_type *fs = s->s_type;
|
||||||
if (atomic_dec_and_test(&s->s_active)) {
|
if (atomic_dec_and_test(&s->s_active)) {
|
||||||
|
cleancache_flush_fs(s);
|
||||||
fs->kill_sb(s);
|
fs->kill_sb(s);
|
||||||
/*
|
/*
|
||||||
* We need to call rcu_barrier so all the delayed rcu free
|
* We need to call rcu_barrier so all the delayed rcu free
|
||||||
|
|
122
include/linux/cleancache.h
Normal file
122
include/linux/cleancache.h
Normal file
|
@ -0,0 +1,122 @@
|
||||||
|
#ifndef _LINUX_CLEANCACHE_H
|
||||||
|
#define _LINUX_CLEANCACHE_H
|
||||||
|
|
||||||
|
#include <linux/fs.h>
|
||||||
|
#include <linux/exportfs.h>
|
||||||
|
#include <linux/mm.h>
|
||||||
|
|
||||||
|
#define CLEANCACHE_KEY_MAX 6
|
||||||
|
|
||||||
|
/*
|
||||||
|
* cleancache requires every file with a page in cleancache to have a
|
||||||
|
* unique key unless/until the file is removed/truncated. For some
|
||||||
|
* filesystems, the inode number is unique, but for "modern" filesystems
|
||||||
|
* an exportable filehandle is required (see exportfs.h)
|
||||||
|
*/
|
||||||
|
struct cleancache_filekey {
|
||||||
|
union {
|
||||||
|
ino_t ino;
|
||||||
|
__u32 fh[CLEANCACHE_KEY_MAX];
|
||||||
|
u32 key[CLEANCACHE_KEY_MAX];
|
||||||
|
} u;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct cleancache_ops {
|
||||||
|
int (*init_fs)(size_t);
|
||||||
|
int (*init_shared_fs)(char *uuid, size_t);
|
||||||
|
int (*get_page)(int, struct cleancache_filekey,
|
||||||
|
pgoff_t, struct page *);
|
||||||
|
void (*put_page)(int, struct cleancache_filekey,
|
||||||
|
pgoff_t, struct page *);
|
||||||
|
void (*flush_page)(int, struct cleancache_filekey, pgoff_t);
|
||||||
|
void (*flush_inode)(int, struct cleancache_filekey);
|
||||||
|
void (*flush_fs)(int);
|
||||||
|
};
|
||||||
|
|
||||||
|
extern struct cleancache_ops
|
||||||
|
cleancache_register_ops(struct cleancache_ops *ops);
|
||||||
|
extern void __cleancache_init_fs(struct super_block *);
|
||||||
|
extern void __cleancache_init_shared_fs(char *, struct super_block *);
|
||||||
|
extern int __cleancache_get_page(struct page *);
|
||||||
|
extern void __cleancache_put_page(struct page *);
|
||||||
|
extern void __cleancache_flush_page(struct address_space *, struct page *);
|
||||||
|
extern void __cleancache_flush_inode(struct address_space *);
|
||||||
|
extern void __cleancache_flush_fs(struct super_block *);
|
||||||
|
extern int cleancache_enabled;
|
||||||
|
|
||||||
|
#ifdef CONFIG_CLEANCACHE
|
||||||
|
static inline bool cleancache_fs_enabled(struct page *page)
|
||||||
|
{
|
||||||
|
return page->mapping->host->i_sb->cleancache_poolid >= 0;
|
||||||
|
}
|
||||||
|
static inline bool cleancache_fs_enabled_mapping(struct address_space *mapping)
|
||||||
|
{
|
||||||
|
return mapping->host->i_sb->cleancache_poolid >= 0;
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
#define cleancache_enabled (0)
|
||||||
|
#define cleancache_fs_enabled(_page) (0)
|
||||||
|
#define cleancache_fs_enabled_mapping(_page) (0)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The shim layer provided by these inline functions allows the compiler
|
||||||
|
* to reduce all cleancache hooks to nothingness if CONFIG_CLEANCACHE
|
||||||
|
* is disabled, to a single global variable check if CONFIG_CLEANCACHE
|
||||||
|
* is enabled but no cleancache "backend" has dynamically enabled it,
|
||||||
|
* and, for the most frequent cleancache ops, to a single global variable
|
||||||
|
* check plus a superblock element comparison if CONFIG_CLEANCACHE is enabled
|
||||||
|
* and a cleancache backend has dynamically enabled cleancache, but the
|
||||||
|
* filesystem referenced by that cleancache op has not enabled cleancache.
|
||||||
|
* As a result, CONFIG_CLEANCACHE can be enabled by default with essentially
|
||||||
|
* no measurable performance impact.
|
||||||
|
*/
|
||||||
|
|
||||||
|
static inline void cleancache_init_fs(struct super_block *sb)
|
||||||
|
{
|
||||||
|
if (cleancache_enabled)
|
||||||
|
__cleancache_init_fs(sb);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void cleancache_init_shared_fs(char *uuid, struct super_block *sb)
|
||||||
|
{
|
||||||
|
if (cleancache_enabled)
|
||||||
|
__cleancache_init_shared_fs(uuid, sb);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int cleancache_get_page(struct page *page)
|
||||||
|
{
|
||||||
|
int ret = -1;
|
||||||
|
|
||||||
|
if (cleancache_enabled && cleancache_fs_enabled(page))
|
||||||
|
ret = __cleancache_get_page(page);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void cleancache_put_page(struct page *page)
|
||||||
|
{
|
||||||
|
if (cleancache_enabled && cleancache_fs_enabled(page))
|
||||||
|
__cleancache_put_page(page);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void cleancache_flush_page(struct address_space *mapping,
|
||||||
|
struct page *page)
|
||||||
|
{
|
||||||
|
/* careful... page->mapping is NULL sometimes when this is called */
|
||||||
|
if (cleancache_enabled && cleancache_fs_enabled_mapping(mapping))
|
||||||
|
__cleancache_flush_page(mapping, page);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void cleancache_flush_inode(struct address_space *mapping)
|
||||||
|
{
|
||||||
|
if (cleancache_enabled && cleancache_fs_enabled_mapping(mapping))
|
||||||
|
__cleancache_flush_inode(mapping);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void cleancache_flush_fs(struct super_block *sb)
|
||||||
|
{
|
||||||
|
if (cleancache_enabled)
|
||||||
|
__cleancache_flush_fs(sb);
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif /* _LINUX_CLEANCACHE_H */
|
|
@ -1428,6 +1428,11 @@ struct super_block {
|
||||||
*/
|
*/
|
||||||
char __rcu *s_options;
|
char __rcu *s_options;
|
||||||
const struct dentry_operations *s_d_op; /* default d_op for dentries */
|
const struct dentry_operations *s_d_op; /* default d_op for dentries */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Saved pool identifier for cleancache (-1 means none)
|
||||||
|
*/
|
||||||
|
int cleancache_poolid;
|
||||||
};
|
};
|
||||||
|
|
||||||
extern struct timespec current_fs_time(struct super_block *sb);
|
extern struct timespec current_fs_time(struct super_block *sb);
|
||||||
|
|
|
@ -58,6 +58,7 @@
|
||||||
#define __HYPERVISOR_event_channel_op 32
|
#define __HYPERVISOR_event_channel_op 32
|
||||||
#define __HYPERVISOR_physdev_op 33
|
#define __HYPERVISOR_physdev_op 33
|
||||||
#define __HYPERVISOR_hvm_op 34
|
#define __HYPERVISOR_hvm_op 34
|
||||||
|
#define __HYPERVISOR_tmem_op 38
|
||||||
|
|
||||||
/* Architecture-specific hypercall definitions. */
|
/* Architecture-specific hypercall definitions. */
|
||||||
#define __HYPERVISOR_arch_0 48
|
#define __HYPERVISOR_arch_0 48
|
||||||
|
@ -461,6 +462,27 @@ typedef uint8_t xen_domain_handle_t[16];
|
||||||
#define __mk_unsigned_long(x) x ## UL
|
#define __mk_unsigned_long(x) x ## UL
|
||||||
#define mk_unsigned_long(x) __mk_unsigned_long(x)
|
#define mk_unsigned_long(x) __mk_unsigned_long(x)
|
||||||
|
|
||||||
|
#define TMEM_SPEC_VERSION 1
|
||||||
|
|
||||||
|
struct tmem_op {
|
||||||
|
uint32_t cmd;
|
||||||
|
int32_t pool_id;
|
||||||
|
union {
|
||||||
|
struct { /* for cmd == TMEM_NEW_POOL */
|
||||||
|
uint64_t uuid[2];
|
||||||
|
uint32_t flags;
|
||||||
|
} new;
|
||||||
|
struct {
|
||||||
|
uint64_t oid[3];
|
||||||
|
uint32_t index;
|
||||||
|
uint32_t tmem_offset;
|
||||||
|
uint32_t pfn_offset;
|
||||||
|
uint32_t len;
|
||||||
|
GUEST_HANDLE(void) gmfn; /* guest machine page frame */
|
||||||
|
} gen;
|
||||||
|
} u;
|
||||||
|
};
|
||||||
|
|
||||||
#else /* __ASSEMBLY__ */
|
#else /* __ASSEMBLY__ */
|
||||||
|
|
||||||
/* In assembly code we cannot use C numeric constant suffixes. */
|
/* In assembly code we cannot use C numeric constant suffixes. */
|
||||||
|
|
23
mm/Kconfig
23
mm/Kconfig
|
@ -347,3 +347,26 @@ config NEED_PER_CPU_KM
|
||||||
depends on !SMP
|
depends on !SMP
|
||||||
bool
|
bool
|
||||||
default y
|
default y
|
||||||
|
|
||||||
|
config CLEANCACHE
|
||||||
|
bool "Enable cleancache driver to cache clean pages if tmem is present"
|
||||||
|
default n
|
||||||
|
help
|
||||||
|
Cleancache can be thought of as a page-granularity victim cache
|
||||||
|
for clean pages that the kernel's pageframe replacement algorithm
|
||||||
|
(PFRA) would like to keep around, but can't since there isn't enough
|
||||||
|
memory. So when the PFRA "evicts" a page, it first attempts to use
|
||||||
|
cleancacne code to put the data contained in that page into
|
||||||
|
"transcendent memory", memory that is not directly accessible or
|
||||||
|
addressable by the kernel and is of unknown and possibly
|
||||||
|
time-varying size. And when a cleancache-enabled
|
||||||
|
filesystem wishes to access a page in a file on disk, it first
|
||||||
|
checks cleancache to see if it already contains it; if it does,
|
||||||
|
the page is copied into the kernel and a disk access is avoided.
|
||||||
|
When a transcendent memory driver is available (such as zcache or
|
||||||
|
Xen transcendent memory), a significant I/O reduction
|
||||||
|
may be achieved. When none is available, all cleancache calls
|
||||||
|
are reduced to a single pointer-compare-against-NULL resulting
|
||||||
|
in a negligible performance hit.
|
||||||
|
|
||||||
|
If unsure, say Y to enable cleancache
|
||||||
|
|
|
@ -49,3 +49,4 @@ obj-$(CONFIG_MEMORY_FAILURE) += memory-failure.o
|
||||||
obj-$(CONFIG_HWPOISON_INJECT) += hwpoison-inject.o
|
obj-$(CONFIG_HWPOISON_INJECT) += hwpoison-inject.o
|
||||||
obj-$(CONFIG_DEBUG_KMEMLEAK) += kmemleak.o
|
obj-$(CONFIG_DEBUG_KMEMLEAK) += kmemleak.o
|
||||||
obj-$(CONFIG_DEBUG_KMEMLEAK_TEST) += kmemleak-test.o
|
obj-$(CONFIG_DEBUG_KMEMLEAK_TEST) += kmemleak-test.o
|
||||||
|
obj-$(CONFIG_CLEANCACHE) += cleancache.o
|
||||||
|
|
244
mm/cleancache.c
Normal file
244
mm/cleancache.c
Normal file
|
@ -0,0 +1,244 @@
|
||||||
|
/*
|
||||||
|
* Cleancache frontend
|
||||||
|
*
|
||||||
|
* This code provides the generic "frontend" layer to call a matching
|
||||||
|
* "backend" driver implementation of cleancache. See
|
||||||
|
* Documentation/vm/cleancache.txt for more information.
|
||||||
|
*
|
||||||
|
* Copyright (C) 2009-2010 Oracle Corp. All rights reserved.
|
||||||
|
* Author: Dan Magenheimer
|
||||||
|
*
|
||||||
|
* This work is licensed under the terms of the GNU GPL, version 2.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <linux/module.h>
|
||||||
|
#include <linux/fs.h>
|
||||||
|
#include <linux/exportfs.h>
|
||||||
|
#include <linux/mm.h>
|
||||||
|
#include <linux/cleancache.h>
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This global enablement flag may be read thousands of times per second
|
||||||
|
* by cleancache_get/put/flush even on systems where cleancache_ops
|
||||||
|
* is not claimed (e.g. cleancache is config'ed on but remains
|
||||||
|
* disabled), so is preferred to the slower alternative: a function
|
||||||
|
* call that checks a non-global.
|
||||||
|
*/
|
||||||
|
int cleancache_enabled;
|
||||||
|
EXPORT_SYMBOL(cleancache_enabled);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* cleancache_ops is set by cleancache_ops_register to contain the pointers
|
||||||
|
* to the cleancache "backend" implementation functions.
|
||||||
|
*/
|
||||||
|
static struct cleancache_ops cleancache_ops;
|
||||||
|
|
||||||
|
/* useful stats available in /sys/kernel/mm/cleancache */
|
||||||
|
static unsigned long cleancache_succ_gets;
|
||||||
|
static unsigned long cleancache_failed_gets;
|
||||||
|
static unsigned long cleancache_puts;
|
||||||
|
static unsigned long cleancache_flushes;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* register operations for cleancache, returning previous thus allowing
|
||||||
|
* detection of multiple backends and possible nesting
|
||||||
|
*/
|
||||||
|
struct cleancache_ops cleancache_register_ops(struct cleancache_ops *ops)
|
||||||
|
{
|
||||||
|
struct cleancache_ops old = cleancache_ops;
|
||||||
|
|
||||||
|
cleancache_ops = *ops;
|
||||||
|
cleancache_enabled = 1;
|
||||||
|
return old;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(cleancache_register_ops);
|
||||||
|
|
||||||
|
/* Called by a cleancache-enabled filesystem at time of mount */
|
||||||
|
void __cleancache_init_fs(struct super_block *sb)
|
||||||
|
{
|
||||||
|
sb->cleancache_poolid = (*cleancache_ops.init_fs)(PAGE_SIZE);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(__cleancache_init_fs);
|
||||||
|
|
||||||
|
/* Called by a cleancache-enabled clustered filesystem at time of mount */
|
||||||
|
void __cleancache_init_shared_fs(char *uuid, struct super_block *sb)
|
||||||
|
{
|
||||||
|
sb->cleancache_poolid =
|
||||||
|
(*cleancache_ops.init_shared_fs)(uuid, PAGE_SIZE);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(__cleancache_init_shared_fs);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If the filesystem uses exportable filehandles, use the filehandle as
|
||||||
|
* the key, else use the inode number.
|
||||||
|
*/
|
||||||
|
static int cleancache_get_key(struct inode *inode,
|
||||||
|
struct cleancache_filekey *key)
|
||||||
|
{
|
||||||
|
int (*fhfn)(struct dentry *, __u32 *fh, int *, int);
|
||||||
|
int len = 0, maxlen = CLEANCACHE_KEY_MAX;
|
||||||
|
struct super_block *sb = inode->i_sb;
|
||||||
|
|
||||||
|
key->u.ino = inode->i_ino;
|
||||||
|
if (sb->s_export_op != NULL) {
|
||||||
|
fhfn = sb->s_export_op->encode_fh;
|
||||||
|
if (fhfn) {
|
||||||
|
struct dentry d;
|
||||||
|
d.d_inode = inode;
|
||||||
|
len = (*fhfn)(&d, &key->u.fh[0], &maxlen, 0);
|
||||||
|
if (len <= 0 || len == 255)
|
||||||
|
return -1;
|
||||||
|
if (maxlen > CLEANCACHE_KEY_MAX)
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* "Get" data from cleancache associated with the poolid/inode/index
|
||||||
|
* that were specified when the data was put to cleanache and, if
|
||||||
|
* successful, use it to fill the specified page with data and return 0.
|
||||||
|
* The pageframe is unchanged and returns -1 if the get fails.
|
||||||
|
* Page must be locked by caller.
|
||||||
|
*/
|
||||||
|
int __cleancache_get_page(struct page *page)
|
||||||
|
{
|
||||||
|
int ret = -1;
|
||||||
|
int pool_id;
|
||||||
|
struct cleancache_filekey key = { .u.key = { 0 } };
|
||||||
|
|
||||||
|
VM_BUG_ON(!PageLocked(page));
|
||||||
|
pool_id = page->mapping->host->i_sb->cleancache_poolid;
|
||||||
|
if (pool_id < 0)
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
if (cleancache_get_key(page->mapping->host, &key) < 0)
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
ret = (*cleancache_ops.get_page)(pool_id, key, page->index, page);
|
||||||
|
if (ret == 0)
|
||||||
|
cleancache_succ_gets++;
|
||||||
|
else
|
||||||
|
cleancache_failed_gets++;
|
||||||
|
out:
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(__cleancache_get_page);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* "Put" data from a page to cleancache and associate it with the
|
||||||
|
* (previously-obtained per-filesystem) poolid and the page's,
|
||||||
|
* inode and page index. Page must be locked. Note that a put_page
|
||||||
|
* always "succeeds", though a subsequent get_page may succeed or fail.
|
||||||
|
*/
|
||||||
|
void __cleancache_put_page(struct page *page)
|
||||||
|
{
|
||||||
|
int pool_id;
|
||||||
|
struct cleancache_filekey key = { .u.key = { 0 } };
|
||||||
|
|
||||||
|
VM_BUG_ON(!PageLocked(page));
|
||||||
|
pool_id = page->mapping->host->i_sb->cleancache_poolid;
|
||||||
|
if (pool_id >= 0 &&
|
||||||
|
cleancache_get_key(page->mapping->host, &key) >= 0) {
|
||||||
|
(*cleancache_ops.put_page)(pool_id, key, page->index, page);
|
||||||
|
cleancache_puts++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(__cleancache_put_page);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Flush any data from cleancache associated with the poolid and the
|
||||||
|
* page's inode and page index so that a subsequent "get" will fail.
|
||||||
|
*/
|
||||||
|
void __cleancache_flush_page(struct address_space *mapping, struct page *page)
|
||||||
|
{
|
||||||
|
/* careful... page->mapping is NULL sometimes when this is called */
|
||||||
|
int pool_id = mapping->host->i_sb->cleancache_poolid;
|
||||||
|
struct cleancache_filekey key = { .u.key = { 0 } };
|
||||||
|
|
||||||
|
if (pool_id >= 0) {
|
||||||
|
VM_BUG_ON(!PageLocked(page));
|
||||||
|
if (cleancache_get_key(mapping->host, &key) >= 0) {
|
||||||
|
(*cleancache_ops.flush_page)(pool_id, key, page->index);
|
||||||
|
cleancache_flushes++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(__cleancache_flush_page);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Flush all data from cleancache associated with the poolid and the
|
||||||
|
* mappings's inode so that all subsequent gets to this poolid/inode
|
||||||
|
* will fail.
|
||||||
|
*/
|
||||||
|
void __cleancache_flush_inode(struct address_space *mapping)
|
||||||
|
{
|
||||||
|
int pool_id = mapping->host->i_sb->cleancache_poolid;
|
||||||
|
struct cleancache_filekey key = { .u.key = { 0 } };
|
||||||
|
|
||||||
|
if (pool_id >= 0 && cleancache_get_key(mapping->host, &key) >= 0)
|
||||||
|
(*cleancache_ops.flush_inode)(pool_id, key);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(__cleancache_flush_inode);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Called by any cleancache-enabled filesystem at time of unmount;
|
||||||
|
* note that pool_id is surrendered and may be reutrned by a subsequent
|
||||||
|
* cleancache_init_fs or cleancache_init_shared_fs
|
||||||
|
*/
|
||||||
|
void __cleancache_flush_fs(struct super_block *sb)
|
||||||
|
{
|
||||||
|
if (sb->cleancache_poolid >= 0) {
|
||||||
|
int old_poolid = sb->cleancache_poolid;
|
||||||
|
sb->cleancache_poolid = -1;
|
||||||
|
(*cleancache_ops.flush_fs)(old_poolid);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(__cleancache_flush_fs);
|
||||||
|
|
||||||
|
#ifdef CONFIG_SYSFS
|
||||||
|
|
||||||
|
/* see Documentation/ABI/xxx/sysfs-kernel-mm-cleancache */
|
||||||
|
|
||||||
|
#define CLEANCACHE_SYSFS_RO(_name) \
|
||||||
|
static ssize_t cleancache_##_name##_show(struct kobject *kobj, \
|
||||||
|
struct kobj_attribute *attr, char *buf) \
|
||||||
|
{ \
|
||||||
|
return sprintf(buf, "%lu\n", cleancache_##_name); \
|
||||||
|
} \
|
||||||
|
static struct kobj_attribute cleancache_##_name##_attr = { \
|
||||||
|
.attr = { .name = __stringify(_name), .mode = 0444 }, \
|
||||||
|
.show = cleancache_##_name##_show, \
|
||||||
|
}
|
||||||
|
|
||||||
|
CLEANCACHE_SYSFS_RO(succ_gets);
|
||||||
|
CLEANCACHE_SYSFS_RO(failed_gets);
|
||||||
|
CLEANCACHE_SYSFS_RO(puts);
|
||||||
|
CLEANCACHE_SYSFS_RO(flushes);
|
||||||
|
|
||||||
|
static struct attribute *cleancache_attrs[] = {
|
||||||
|
&cleancache_succ_gets_attr.attr,
|
||||||
|
&cleancache_failed_gets_attr.attr,
|
||||||
|
&cleancache_puts_attr.attr,
|
||||||
|
&cleancache_flushes_attr.attr,
|
||||||
|
NULL,
|
||||||
|
};
|
||||||
|
|
||||||
|
static struct attribute_group cleancache_attr_group = {
|
||||||
|
.attrs = cleancache_attrs,
|
||||||
|
.name = "cleancache",
|
||||||
|
};
|
||||||
|
|
||||||
|
#endif /* CONFIG_SYSFS */
|
||||||
|
|
||||||
|
static int __init init_cleancache(void)
|
||||||
|
{
|
||||||
|
#ifdef CONFIG_SYSFS
|
||||||
|
int err;
|
||||||
|
|
||||||
|
err = sysfs_create_group(mm_kobj, &cleancache_attr_group);
|
||||||
|
#endif /* CONFIG_SYSFS */
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
module_init(init_cleancache)
|
11
mm/filemap.c
11
mm/filemap.c
|
@ -34,6 +34,7 @@
|
||||||
#include <linux/hardirq.h> /* for BUG_ON(!in_atomic()) only */
|
#include <linux/hardirq.h> /* for BUG_ON(!in_atomic()) only */
|
||||||
#include <linux/memcontrol.h>
|
#include <linux/memcontrol.h>
|
||||||
#include <linux/mm_inline.h> /* for page_is_file_cache() */
|
#include <linux/mm_inline.h> /* for page_is_file_cache() */
|
||||||
|
#include <linux/cleancache.h>
|
||||||
#include "internal.h"
|
#include "internal.h"
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -118,6 +119,16 @@ void __delete_from_page_cache(struct page *page)
|
||||||
{
|
{
|
||||||
struct address_space *mapping = page->mapping;
|
struct address_space *mapping = page->mapping;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* if we're uptodate, flush out into the cleancache, otherwise
|
||||||
|
* invalidate any existing cleancache entries. We can't leave
|
||||||
|
* stale data around in the cleancache once our page is gone
|
||||||
|
*/
|
||||||
|
if (PageUptodate(page) && PageMappedToDisk(page))
|
||||||
|
cleancache_put_page(page);
|
||||||
|
else
|
||||||
|
cleancache_flush_page(mapping, page);
|
||||||
|
|
||||||
radix_tree_delete(&mapping->page_tree, page->index);
|
radix_tree_delete(&mapping->page_tree, page->index);
|
||||||
page->mapping = NULL;
|
page->mapping = NULL;
|
||||||
mapping->nrpages--;
|
mapping->nrpages--;
|
||||||
|
|
|
@ -19,6 +19,7 @@
|
||||||
#include <linux/task_io_accounting_ops.h>
|
#include <linux/task_io_accounting_ops.h>
|
||||||
#include <linux/buffer_head.h> /* grr. try_to_release_page,
|
#include <linux/buffer_head.h> /* grr. try_to_release_page,
|
||||||
do_invalidatepage */
|
do_invalidatepage */
|
||||||
|
#include <linux/cleancache.h>
|
||||||
#include "internal.h"
|
#include "internal.h"
|
||||||
|
|
||||||
|
|
||||||
|
@ -51,6 +52,7 @@ void do_invalidatepage(struct page *page, unsigned long offset)
|
||||||
static inline void truncate_partial_page(struct page *page, unsigned partial)
|
static inline void truncate_partial_page(struct page *page, unsigned partial)
|
||||||
{
|
{
|
||||||
zero_user_segment(page, partial, PAGE_CACHE_SIZE);
|
zero_user_segment(page, partial, PAGE_CACHE_SIZE);
|
||||||
|
cleancache_flush_page(page->mapping, page);
|
||||||
if (page_has_private(page))
|
if (page_has_private(page))
|
||||||
do_invalidatepage(page, partial);
|
do_invalidatepage(page, partial);
|
||||||
}
|
}
|
||||||
|
@ -214,6 +216,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
|
||||||
pgoff_t next;
|
pgoff_t next;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
cleancache_flush_inode(mapping);
|
||||||
if (mapping->nrpages == 0)
|
if (mapping->nrpages == 0)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
@ -291,6 +294,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
|
||||||
pagevec_release(&pvec);
|
pagevec_release(&pvec);
|
||||||
mem_cgroup_uncharge_end();
|
mem_cgroup_uncharge_end();
|
||||||
}
|
}
|
||||||
|
cleancache_flush_inode(mapping);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(truncate_inode_pages_range);
|
EXPORT_SYMBOL(truncate_inode_pages_range);
|
||||||
|
|
||||||
|
@ -440,6 +444,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
|
||||||
int did_range_unmap = 0;
|
int did_range_unmap = 0;
|
||||||
int wrapped = 0;
|
int wrapped = 0;
|
||||||
|
|
||||||
|
cleancache_flush_inode(mapping);
|
||||||
pagevec_init(&pvec, 0);
|
pagevec_init(&pvec, 0);
|
||||||
next = start;
|
next = start;
|
||||||
while (next <= end && !wrapped &&
|
while (next <= end && !wrapped &&
|
||||||
|
@ -498,6 +503,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
|
||||||
mem_cgroup_uncharge_end();
|
mem_cgroup_uncharge_end();
|
||||||
cond_resched();
|
cond_resched();
|
||||||
}
|
}
|
||||||
|
cleancache_flush_inode(mapping);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range);
|
EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range);
|
||||||
|
|
Loading…
Add table
Reference in a new issue