pipe: kill ->map() and ->unmap()

all pipe_buffer_operations have the same instances of those...

Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
This commit is contained in:
Al Viro 2014-02-02 21:09:54 -05:00
parent 58bda1da4b
commit fbb32750a6
7 changed files with 29 additions and 100 deletions

View file

@ -225,52 +225,6 @@ static void anon_pipe_buf_release(struct pipe_inode_info *pipe,
page_cache_release(page);
}
/**
* generic_pipe_buf_map - virtually map a pipe buffer
* @pipe: the pipe that the buffer belongs to
* @buf: the buffer that should be mapped
* @atomic: whether to use an atomic map
*
* Description:
* This function returns a kernel virtual address mapping for the
* pipe_buffer passed in @buf. If @atomic is set, an atomic map is provided
* and the caller has to be careful not to fault before calling
* the unmap function.
*
* Note that this function calls kmap_atomic() if @atomic != 0.
*/
void *generic_pipe_buf_map(struct pipe_inode_info *pipe,
struct pipe_buffer *buf, int atomic)
{
if (atomic) {
buf->flags |= PIPE_BUF_FLAG_ATOMIC;
return kmap_atomic(buf->page);
}
return kmap(buf->page);
}
EXPORT_SYMBOL(generic_pipe_buf_map);
/**
* generic_pipe_buf_unmap - unmap a previously mapped pipe buffer
* @pipe: the pipe that the buffer belongs to
* @buf: the buffer that should be unmapped
* @map_data: the data that the mapping function returned
*
* Description:
* This function undoes the mapping that ->map() provided.
*/
void generic_pipe_buf_unmap(struct pipe_inode_info *pipe,
struct pipe_buffer *buf, void *map_data)
{
if (buf->flags & PIPE_BUF_FLAG_ATOMIC) {
buf->flags &= ~PIPE_BUF_FLAG_ATOMIC;
kunmap_atomic(map_data);
} else
kunmap(buf->page);
}
EXPORT_SYMBOL(generic_pipe_buf_unmap);
/**
* generic_pipe_buf_steal - attempt to take ownership of a &pipe_buffer
* @pipe: the pipe that the buffer belongs to
@ -351,8 +305,6 @@ EXPORT_SYMBOL(generic_pipe_buf_release);
static const struct pipe_buf_operations anon_pipe_buf_ops = {
.can_merge = 1,
.map = generic_pipe_buf_map,
.unmap = generic_pipe_buf_unmap,
.confirm = generic_pipe_buf_confirm,
.release = anon_pipe_buf_release,
.steal = generic_pipe_buf_steal,
@ -361,8 +313,6 @@ static const struct pipe_buf_operations anon_pipe_buf_ops = {
static const struct pipe_buf_operations packet_pipe_buf_ops = {
.can_merge = 0,
.map = generic_pipe_buf_map,
.unmap = generic_pipe_buf_unmap,
.confirm = generic_pipe_buf_confirm,
.release = anon_pipe_buf_release,
.steal = generic_pipe_buf_steal,
@ -410,9 +360,15 @@ pipe_read(struct kiocb *iocb, const struct iovec *_iov,
atomic = !iov_fault_in_pages_write(iov, chars);
redo:
addr = ops->map(pipe, buf, atomic);
if (atomic)
addr = kmap_atomic(buf->page);
else
addr = kmap(buf->page);
error = pipe_iov_copy_to_user(iov, addr + buf->offset, chars, atomic);
ops->unmap(pipe, buf, addr);
if (atomic)
kunmap_atomic(addr);
else
kunmap(buf->page);
if (unlikely(error)) {
/*
* Just retry with the slow path if we failed.
@ -538,10 +494,16 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
iov_fault_in_pages_read(iov, chars);
redo1:
addr = ops->map(pipe, buf, atomic);
if (atomic)
addr = kmap_atomic(buf->page);
else
addr = kmap(buf->page);
error = pipe_iov_copy_from_user(offset + addr, iov,
chars, atomic);
ops->unmap(pipe, buf, addr);
if (atomic)
kunmap_atomic(addr);
else
kunmap(buf->page);
ret = error;
do_wakeup = 1;
if (error) {