mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-03-17 20:54:10 +00:00
Merge branch 'splice-net-switch-over-users-of-sendpage-and-remove-it'
David Howells says: ==================== splice, net: Switch over users of sendpage() and remove it Here's the final set of patches towards the removal of sendpage. All the drivers that use sendpage() get switched over to using sendmsg() with MSG_SPLICE_PAGES. The following changes are made: (1) Make the protocol drivers behave according to MSG_MORE, not MSG_SENDPAGE_NOTLAST. The latter is restricted to turning on MSG_MORE in the sendpage() wrappers. (2) Fix ocfs2 to allocate its global protocol buffers with folio_alloc() rather than kzalloc() so as not to invoke the !sendpage_ok warning in skb_splice_from_iter(). (3) Make ceph/rds, skb_send_sock, dlm, nvme, smc, ocfs2, drbd and iscsi use sendmsg(), not sendpage and make them specify MSG_MORE instead of MSG_SENDPAGE_NOTLAST. (4) Kill off sendpage and clean up MSG_SENDPAGE_NOTLAST. Link: https://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next.git/commit/?id=51c78a4d532efe9543a4df019ff405f05c6157f6 # part 1 Link: https://lore.kernel.org/r/20230616161301.622169-1-dhowells@redhat.com/ # v1 Link: https://lore.kernel.org/r/20230617121146.716077-1-dhowells@redhat.com/ # v2 Link: https://lore.kernel.org/r/20230620145338.1300897-1-dhowells@redhat.com/ # v3 ==================== Link: https://lore.kernel.org/r/20230623225513.2732256-1-dhowells@redhat.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
9ae440b8fd
88 changed files with 236 additions and 754 deletions
|
@ -240,11 +240,11 @@ offsets into ``msg``, respectively.
|
|||
If a program of type ``BPF_PROG_TYPE_SK_MSG`` is run on a ``msg`` it can only
|
||||
parse data that the (``data``, ``data_end``) pointers have already consumed.
|
||||
For ``sendmsg()`` hooks this is likely the first scatterlist element. But for
|
||||
calls relying on the ``sendpage`` handler (e.g., ``sendfile()``) this will be
|
||||
the range (**0**, **0**) because the data is shared with user space and by
|
||||
default the objective is to avoid allowing user space to modify data while (or
|
||||
after) BPF verdict is being decided. This helper can be used to pull in data
|
||||
and to set the start and end pointers to given values. Data will be copied if
|
||||
calls relying on MSG_SPLICE_PAGES (e.g., ``sendfile()``) this will be the
|
||||
range (**0**, **0**) because the data is shared with user space and by default
|
||||
the objective is to avoid allowing user space to modify data while (or after)
|
||||
BPF verdict is being decided. This helper can be used to pull in data and to
|
||||
set the start and end pointers to given values. Data will be copied if
|
||||
necessary (i.e., if data was not linear and if start and end pointers do not
|
||||
point to the same chunk).
|
||||
|
||||
|
|
|
@ -521,8 +521,6 @@ prototypes::
|
|||
int (*fsync) (struct file *, loff_t start, loff_t end, int datasync);
|
||||
int (*fasync) (int, struct file *, int);
|
||||
int (*lock) (struct file *, int, struct file_lock *);
|
||||
ssize_t (*sendpage) (struct file *, struct page *, int, size_t,
|
||||
loff_t *, int);
|
||||
unsigned long (*get_unmapped_area)(struct file *, unsigned long,
|
||||
unsigned long, unsigned long, unsigned long);
|
||||
int (*check_flags)(int);
|
||||
|
|
|
@ -1086,7 +1086,6 @@ This describes how the VFS can manipulate an open file. As of kernel
|
|||
int (*fsync) (struct file *, loff_t, loff_t, int datasync);
|
||||
int (*fasync) (int, struct file *, int);
|
||||
int (*lock) (struct file *, int, struct file_lock *);
|
||||
ssize_t (*sendpage) (struct file *, struct page *, int, size_t, loff_t *, int);
|
||||
unsigned long (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
|
||||
int (*check_flags)(int);
|
||||
int (*flock) (struct file *, int, struct file_lock *);
|
||||
|
|
|
@ -269,8 +269,8 @@ a single application thread handles flows with many different flow hashes.
|
|||
rps_sock_flow_table is a global flow table that contains the *desired* CPU
|
||||
for flows: the CPU that is currently processing the flow in userspace.
|
||||
Each table value is a CPU index that is updated during calls to recvmsg
|
||||
and sendmsg (specifically, inet_recvmsg(), inet_sendmsg(), inet_sendpage()
|
||||
and tcp_splice_read()).
|
||||
and sendmsg (specifically, inet_recvmsg(), inet_sendmsg() and
|
||||
tcp_splice_read()).
|
||||
|
||||
When the scheduler moves a thread to a new CPU while it has outstanding
|
||||
receive packets on the old CPU, packets may arrive out of order. To
|
||||
|
|
|
@ -482,7 +482,6 @@ static const struct proto_ops alg_proto_ops = {
|
|||
.listen = sock_no_listen,
|
||||
.shutdown = sock_no_shutdown,
|
||||
.mmap = sock_no_mmap,
|
||||
.sendpage = sock_no_sendpage,
|
||||
.sendmsg = sock_no_sendmsg,
|
||||
.recvmsg = sock_no_recvmsg,
|
||||
|
||||
|
@ -1106,33 +1105,6 @@ unlock:
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(af_alg_sendmsg);
|
||||
|
||||
/**
|
||||
* af_alg_sendpage - sendpage system call handler
|
||||
* @sock: socket of connection to user space to write to
|
||||
* @page: data to send
|
||||
* @offset: offset into page to begin sending
|
||||
* @size: length of data
|
||||
* @flags: message send/receive flags
|
||||
*
|
||||
* This is a generic implementation of sendpage to fill ctx->tsgl_list.
|
||||
*/
|
||||
ssize_t af_alg_sendpage(struct socket *sock, struct page *page,
|
||||
int offset, size_t size, int flags)
|
||||
{
|
||||
struct bio_vec bvec;
|
||||
struct msghdr msg = {
|
||||
.msg_flags = flags | MSG_SPLICE_PAGES,
|
||||
};
|
||||
|
||||
if (flags & MSG_SENDPAGE_NOTLAST)
|
||||
msg.msg_flags |= MSG_MORE;
|
||||
|
||||
bvec_set_page(&bvec, page, size, offset);
|
||||
iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, size);
|
||||
return sock_sendmsg(sock, &msg);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(af_alg_sendpage);
|
||||
|
||||
/**
|
||||
* af_alg_free_resources - release resources required for crypto request
|
||||
* @areq: Request holding the TX and RX SGL
|
||||
|
|
|
@ -9,10 +9,10 @@
|
|||
* The following concept of the memory management is used:
|
||||
*
|
||||
* The kernel maintains two SGLs, the TX SGL and the RX SGL. The TX SGL is
|
||||
* filled by user space with the data submitted via sendpage. Filling up
|
||||
* the TX SGL does not cause a crypto operation -- the data will only be
|
||||
* tracked by the kernel. Upon receipt of one recvmsg call, the caller must
|
||||
* provide a buffer which is tracked with the RX SGL.
|
||||
* filled by user space with the data submitted via sendmsg (maybe with
|
||||
* MSG_SPLICE_PAGES). Filling up the TX SGL does not cause a crypto operation
|
||||
* -- the data will only be tracked by the kernel. Upon receipt of one recvmsg
|
||||
* call, the caller must provide a buffer which is tracked with the RX SGL.
|
||||
*
|
||||
* During the processing of the recvmsg operation, the cipher request is
|
||||
* allocated and prepared. As part of the recvmsg operation, the processed
|
||||
|
@ -370,7 +370,6 @@ static struct proto_ops algif_aead_ops = {
|
|||
|
||||
.release = af_alg_release,
|
||||
.sendmsg = aead_sendmsg,
|
||||
.sendpage = af_alg_sendpage,
|
||||
.recvmsg = aead_recvmsg,
|
||||
.poll = af_alg_poll,
|
||||
};
|
||||
|
@ -422,18 +421,6 @@ static int aead_sendmsg_nokey(struct socket *sock, struct msghdr *msg,
|
|||
return aead_sendmsg(sock, msg, size);
|
||||
}
|
||||
|
||||
static ssize_t aead_sendpage_nokey(struct socket *sock, struct page *page,
|
||||
int offset, size_t size, int flags)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = aead_check_key(sock);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
return af_alg_sendpage(sock, page, offset, size, flags);
|
||||
}
|
||||
|
||||
static int aead_recvmsg_nokey(struct socket *sock, struct msghdr *msg,
|
||||
size_t ignored, int flags)
|
||||
{
|
||||
|
@ -461,7 +448,6 @@ static struct proto_ops algif_aead_ops_nokey = {
|
|||
|
||||
.release = af_alg_release,
|
||||
.sendmsg = aead_sendmsg_nokey,
|
||||
.sendpage = aead_sendpage_nokey,
|
||||
.recvmsg = aead_recvmsg_nokey,
|
||||
.poll = af_alg_poll,
|
||||
};
|
||||
|
|
|
@ -174,7 +174,6 @@ static struct proto_ops algif_rng_ops = {
|
|||
.bind = sock_no_bind,
|
||||
.accept = sock_no_accept,
|
||||
.sendmsg = sock_no_sendmsg,
|
||||
.sendpage = sock_no_sendpage,
|
||||
|
||||
.release = af_alg_release,
|
||||
.recvmsg = rng_recvmsg,
|
||||
|
@ -192,7 +191,6 @@ static struct proto_ops __maybe_unused algif_rng_test_ops = {
|
|||
.mmap = sock_no_mmap,
|
||||
.bind = sock_no_bind,
|
||||
.accept = sock_no_accept,
|
||||
.sendpage = sock_no_sendpage,
|
||||
|
||||
.release = af_alg_release,
|
||||
.recvmsg = rng_test_recvmsg,
|
||||
|
|
|
@ -194,7 +194,6 @@ static struct proto_ops algif_skcipher_ops = {
|
|||
|
||||
.release = af_alg_release,
|
||||
.sendmsg = skcipher_sendmsg,
|
||||
.sendpage = af_alg_sendpage,
|
||||
.recvmsg = skcipher_recvmsg,
|
||||
.poll = af_alg_poll,
|
||||
};
|
||||
|
@ -246,18 +245,6 @@ static int skcipher_sendmsg_nokey(struct socket *sock, struct msghdr *msg,
|
|||
return skcipher_sendmsg(sock, msg, size);
|
||||
}
|
||||
|
||||
static ssize_t skcipher_sendpage_nokey(struct socket *sock, struct page *page,
|
||||
int offset, size_t size, int flags)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = skcipher_check_key(sock);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
return af_alg_sendpage(sock, page, offset, size, flags);
|
||||
}
|
||||
|
||||
static int skcipher_recvmsg_nokey(struct socket *sock, struct msghdr *msg,
|
||||
size_t ignored, int flags)
|
||||
{
|
||||
|
@ -285,7 +272,6 @@ static struct proto_ops algif_skcipher_ops_nokey = {
|
|||
|
||||
.release = af_alg_release,
|
||||
.sendmsg = skcipher_sendmsg_nokey,
|
||||
.sendpage = skcipher_sendpage_nokey,
|
||||
.recvmsg = skcipher_recvmsg_nokey,
|
||||
.poll = af_alg_poll,
|
||||
};
|
||||
|
|
|
@ -1540,6 +1540,8 @@ static int _drbd_send_page(struct drbd_peer_device *peer_device, struct page *pa
|
|||
int offset, size_t size, unsigned msg_flags)
|
||||
{
|
||||
struct socket *socket = peer_device->connection->data.socket;
|
||||
struct msghdr msg = { .msg_flags = msg_flags, };
|
||||
struct bio_vec bvec;
|
||||
int len = size;
|
||||
int err = -EIO;
|
||||
|
||||
|
@ -1549,15 +1551,17 @@ static int _drbd_send_page(struct drbd_peer_device *peer_device, struct page *pa
|
|||
* put_page(); and would cause either a VM_BUG directly, or
|
||||
* __page_cache_release a page that would actually still be referenced
|
||||
* by someone, leading to some obscure delayed Oops somewhere else. */
|
||||
if (drbd_disable_sendpage || !sendpage_ok(page))
|
||||
return _drbd_no_send_page(peer_device, page, offset, size, msg_flags);
|
||||
if (!drbd_disable_sendpage && sendpage_ok(page))
|
||||
msg.msg_flags |= MSG_NOSIGNAL | MSG_SPLICE_PAGES;
|
||||
|
||||
msg_flags |= MSG_NOSIGNAL;
|
||||
drbd_update_congested(peer_device->connection);
|
||||
do {
|
||||
int sent;
|
||||
|
||||
sent = socket->ops->sendpage(socket, page, offset, len, msg_flags);
|
||||
bvec_set_page(&bvec, page, offset, len);
|
||||
iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, len);
|
||||
|
||||
sent = sock_sendmsg(socket, &msg);
|
||||
if (sent <= 0) {
|
||||
if (sent == -EAGAIN) {
|
||||
if (we_should_drop_the_connection(peer_device->connection, socket))
|
||||
|
|
|
@ -325,8 +325,7 @@ static int siw_tcp_sendpages(struct socket *s, struct page **page, int offset,
|
|||
{
|
||||
struct bio_vec bvec;
|
||||
struct msghdr msg = {
|
||||
.msg_flags = (MSG_MORE | MSG_DONTWAIT | MSG_SENDPAGE_NOTLAST |
|
||||
MSG_SPLICE_PAGES),
|
||||
.msg_flags = (MSG_MORE | MSG_DONTWAIT | MSG_SPLICE_PAGES),
|
||||
};
|
||||
struct sock *sk = s->sk;
|
||||
int i = 0, rv = 0, sent = 0;
|
||||
|
@ -335,7 +334,7 @@ static int siw_tcp_sendpages(struct socket *s, struct page **page, int offset,
|
|||
size_t bytes = min_t(size_t, PAGE_SIZE - offset, size);
|
||||
|
||||
if (size + offset <= PAGE_SIZE)
|
||||
msg.msg_flags &= ~MSG_SENDPAGE_NOTLAST;
|
||||
msg.msg_flags &= ~MSG_MORE;
|
||||
|
||||
tcp_rate_check_app_limited(sk);
|
||||
bvec_set_page(&bvec, page[i], bytes, offset);
|
||||
|
|
|
@ -569,8 +569,6 @@ int chtls_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
|
|||
int chtls_recvmsg(struct sock *sk, struct msghdr *msg,
|
||||
size_t len, int flags, int *addr_len);
|
||||
void chtls_splice_eof(struct socket *sock);
|
||||
int chtls_sendpage(struct sock *sk, struct page *page,
|
||||
int offset, size_t size, int flags);
|
||||
int send_tx_flowc_wr(struct sock *sk, int compl,
|
||||
u32 snd_nxt, u32 rcv_nxt);
|
||||
void chtls_tcp_push(struct sock *sk, int flags);
|
||||
|
|
|
@ -1246,20 +1246,6 @@ void chtls_splice_eof(struct socket *sock)
|
|||
release_sock(sk);
|
||||
}
|
||||
|
||||
int chtls_sendpage(struct sock *sk, struct page *page,
|
||||
int offset, size_t size, int flags)
|
||||
{
|
||||
struct msghdr msg = { .msg_flags = flags | MSG_SPLICE_PAGES, };
|
||||
struct bio_vec bvec;
|
||||
|
||||
if (flags & MSG_SENDPAGE_NOTLAST)
|
||||
msg.msg_flags |= MSG_MORE;
|
||||
|
||||
bvec_set_page(&bvec, page, size, offset);
|
||||
iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, size);
|
||||
return chtls_sendmsg(sk, &msg, size);
|
||||
}
|
||||
|
||||
static void chtls_select_window(struct sock *sk)
|
||||
{
|
||||
struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
|
||||
|
|
|
@ -607,7 +607,6 @@ static void __init chtls_init_ulp_ops(void)
|
|||
chtls_cpl_prot.shutdown = chtls_shutdown;
|
||||
chtls_cpl_prot.sendmsg = chtls_sendmsg;
|
||||
chtls_cpl_prot.splice_eof = chtls_splice_eof;
|
||||
chtls_cpl_prot.sendpage = chtls_sendpage;
|
||||
chtls_cpl_prot.recvmsg = chtls_recvmsg;
|
||||
chtls_cpl_prot.setsockopt = chtls_setsockopt;
|
||||
chtls_cpl_prot.getsockopt = chtls_getsockopt;
|
||||
|
|
|
@ -997,25 +997,28 @@ static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
|
|||
u32 h2cdata_left = req->h2cdata_left;
|
||||
|
||||
while (true) {
|
||||
struct bio_vec bvec;
|
||||
struct msghdr msg = {
|
||||
.msg_flags = MSG_DONTWAIT | MSG_SPLICE_PAGES,
|
||||
};
|
||||
struct page *page = nvme_tcp_req_cur_page(req);
|
||||
size_t offset = nvme_tcp_req_cur_offset(req);
|
||||
size_t len = nvme_tcp_req_cur_length(req);
|
||||
bool last = nvme_tcp_pdu_last_send(req, len);
|
||||
int req_data_sent = req->data_sent;
|
||||
int ret, flags = MSG_DONTWAIT;
|
||||
int ret;
|
||||
|
||||
if (last && !queue->data_digest && !nvme_tcp_queue_more(queue))
|
||||
flags |= MSG_EOR;
|
||||
msg.msg_flags |= MSG_EOR;
|
||||
else
|
||||
flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
|
||||
msg.msg_flags |= MSG_MORE;
|
||||
|
||||
if (sendpage_ok(page)) {
|
||||
ret = kernel_sendpage(queue->sock, page, offset, len,
|
||||
flags);
|
||||
} else {
|
||||
ret = sock_no_sendpage(queue->sock, page, offset, len,
|
||||
flags);
|
||||
}
|
||||
if (!sendpage_ok(page))
|
||||
msg.msg_flags &= ~MSG_SPLICE_PAGES,
|
||||
|
||||
bvec_set_page(&bvec, page, len, offset);
|
||||
iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, len);
|
||||
ret = sock_sendmsg(queue->sock, &msg);
|
||||
if (ret <= 0)
|
||||
return ret;
|
||||
|
||||
|
@ -1054,22 +1057,24 @@ static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req)
|
|||
{
|
||||
struct nvme_tcp_queue *queue = req->queue;
|
||||
struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req);
|
||||
struct bio_vec bvec;
|
||||
struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_SPLICE_PAGES, };
|
||||
bool inline_data = nvme_tcp_has_inline_data(req);
|
||||
u8 hdgst = nvme_tcp_hdgst_len(queue);
|
||||
int len = sizeof(*pdu) + hdgst - req->offset;
|
||||
int flags = MSG_DONTWAIT;
|
||||
int ret;
|
||||
|
||||
if (inline_data || nvme_tcp_queue_more(queue))
|
||||
flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
|
||||
msg.msg_flags |= MSG_MORE;
|
||||
else
|
||||
flags |= MSG_EOR;
|
||||
msg.msg_flags |= MSG_EOR;
|
||||
|
||||
if (queue->hdr_digest && !req->offset)
|
||||
nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
|
||||
|
||||
ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
|
||||
offset_in_page(pdu) + req->offset, len, flags);
|
||||
bvec_set_virt(&bvec, (void *)pdu + req->offset, len);
|
||||
iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, len);
|
||||
ret = sock_sendmsg(queue->sock, &msg);
|
||||
if (unlikely(ret <= 0))
|
||||
return ret;
|
||||
|
||||
|
@ -1093,6 +1098,8 @@ static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req)
|
|||
{
|
||||
struct nvme_tcp_queue *queue = req->queue;
|
||||
struct nvme_tcp_data_pdu *pdu = nvme_tcp_req_data_pdu(req);
|
||||
struct bio_vec bvec;
|
||||
struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_MORE, };
|
||||
u8 hdgst = nvme_tcp_hdgst_len(queue);
|
||||
int len = sizeof(*pdu) - req->offset + hdgst;
|
||||
int ret;
|
||||
|
@ -1101,13 +1108,11 @@ static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req)
|
|||
nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
|
||||
|
||||
if (!req->h2cdata_left)
|
||||
ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
|
||||
offset_in_page(pdu) + req->offset, len,
|
||||
MSG_DONTWAIT | MSG_MORE | MSG_SENDPAGE_NOTLAST);
|
||||
else
|
||||
ret = sock_no_sendpage(queue->sock, virt_to_page(pdu),
|
||||
offset_in_page(pdu) + req->offset, len,
|
||||
MSG_DONTWAIT | MSG_MORE);
|
||||
msg.msg_flags |= MSG_SPLICE_PAGES;
|
||||
|
||||
bvec_set_virt(&bvec, (void *)pdu + req->offset, len);
|
||||
iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, len);
|
||||
ret = sock_sendmsg(queue->sock, &msg);
|
||||
if (unlikely(ret <= 0))
|
||||
return ret;
|
||||
|
||||
|
|
|
@ -576,13 +576,17 @@ static void nvmet_tcp_execute_request(struct nvmet_tcp_cmd *cmd)
|
|||
|
||||
static int nvmet_try_send_data_pdu(struct nvmet_tcp_cmd *cmd)
|
||||
{
|
||||
struct msghdr msg = {
|
||||
.msg_flags = MSG_DONTWAIT | MSG_MORE | MSG_SPLICE_PAGES,
|
||||
};
|
||||
struct bio_vec bvec;
|
||||
u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
|
||||
int left = sizeof(*cmd->data_pdu) - cmd->offset + hdgst;
|
||||
int ret;
|
||||
|
||||
ret = kernel_sendpage(cmd->queue->sock, virt_to_page(cmd->data_pdu),
|
||||
offset_in_page(cmd->data_pdu) + cmd->offset,
|
||||
left, MSG_DONTWAIT | MSG_MORE | MSG_SENDPAGE_NOTLAST);
|
||||
bvec_set_virt(&bvec, (void *)cmd->data_pdu + cmd->offset, left);
|
||||
iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, left);
|
||||
ret = sock_sendmsg(cmd->queue->sock, &msg);
|
||||
if (ret <= 0)
|
||||
return ret;
|
||||
|
||||
|
@ -603,17 +607,21 @@ static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
|
|||
int ret;
|
||||
|
||||
while (cmd->cur_sg) {
|
||||
struct msghdr msg = {
|
||||
.msg_flags = MSG_DONTWAIT | MSG_SPLICE_PAGES,
|
||||
};
|
||||
struct page *page = sg_page(cmd->cur_sg);
|
||||
struct bio_vec bvec;
|
||||
u32 left = cmd->cur_sg->length - cmd->offset;
|
||||
int flags = MSG_DONTWAIT;
|
||||
|
||||
if ((!last_in_batch && cmd->queue->send_list_len) ||
|
||||
cmd->wbytes_done + left < cmd->req.transfer_len ||
|
||||
queue->data_digest || !queue->nvme_sq.sqhd_disabled)
|
||||
flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
|
||||
msg.msg_flags |= MSG_MORE;
|
||||
|
||||
ret = kernel_sendpage(cmd->queue->sock, page, cmd->offset,
|
||||
left, flags);
|
||||
bvec_set_page(&bvec, page, left, cmd->offset);
|
||||
iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, left);
|
||||
ret = sock_sendmsg(cmd->queue->sock, &msg);
|
||||
if (ret <= 0)
|
||||
return ret;
|
||||
|
||||
|
@ -649,18 +657,20 @@ static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
|
|||
static int nvmet_try_send_response(struct nvmet_tcp_cmd *cmd,
|
||||
bool last_in_batch)
|
||||
{
|
||||
struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_SPLICE_PAGES, };
|
||||
struct bio_vec bvec;
|
||||
u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
|
||||
int left = sizeof(*cmd->rsp_pdu) - cmd->offset + hdgst;
|
||||
int flags = MSG_DONTWAIT;
|
||||
int ret;
|
||||
|
||||
if (!last_in_batch && cmd->queue->send_list_len)
|
||||
flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
|
||||
msg.msg_flags |= MSG_MORE;
|
||||
else
|
||||
flags |= MSG_EOR;
|
||||
msg.msg_flags |= MSG_EOR;
|
||||
|
||||
ret = kernel_sendpage(cmd->queue->sock, virt_to_page(cmd->rsp_pdu),
|
||||
offset_in_page(cmd->rsp_pdu) + cmd->offset, left, flags);
|
||||
bvec_set_virt(&bvec, (void *)cmd->rsp_pdu + cmd->offset, left);
|
||||
iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, left);
|
||||
ret = sock_sendmsg(cmd->queue->sock, &msg);
|
||||
if (ret <= 0)
|
||||
return ret;
|
||||
cmd->offset += ret;
|
||||
|
@ -677,18 +687,20 @@ static int nvmet_try_send_response(struct nvmet_tcp_cmd *cmd,
|
|||
|
||||
static int nvmet_try_send_r2t(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
|
||||
{
|
||||
struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_SPLICE_PAGES, };
|
||||
struct bio_vec bvec;
|
||||
u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
|
||||
int left = sizeof(*cmd->r2t_pdu) - cmd->offset + hdgst;
|
||||
int flags = MSG_DONTWAIT;
|
||||
int ret;
|
||||
|
||||
if (!last_in_batch && cmd->queue->send_list_len)
|
||||
flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
|
||||
msg.msg_flags |= MSG_MORE;
|
||||
else
|
||||
flags |= MSG_EOR;
|
||||
msg.msg_flags |= MSG_EOR;
|
||||
|
||||
ret = kernel_sendpage(cmd->queue->sock, virt_to_page(cmd->r2t_pdu),
|
||||
offset_in_page(cmd->r2t_pdu) + cmd->offset, left, flags);
|
||||
bvec_set_virt(&bvec, (void *)cmd->r2t_pdu + cmd->offset, left);
|
||||
iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, left);
|
||||
ret = sock_sendmsg(cmd->queue->sock, &msg);
|
||||
if (ret <= 0)
|
||||
return ret;
|
||||
cmd->offset += ret;
|
||||
|
|
|
@ -301,35 +301,32 @@ static int iscsi_sw_tcp_xmit_segment(struct iscsi_tcp_conn *tcp_conn,
|
|||
|
||||
while (!iscsi_tcp_segment_done(tcp_conn, segment, 0, r)) {
|
||||
struct scatterlist *sg;
|
||||
struct msghdr msg = {};
|
||||
struct bio_vec bv;
|
||||
unsigned int offset, copy;
|
||||
int flags = 0;
|
||||
|
||||
r = 0;
|
||||
offset = segment->copied;
|
||||
copy = segment->size - offset;
|
||||
|
||||
if (segment->total_copied + segment->size < segment->total_size)
|
||||
flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
|
||||
msg.msg_flags |= MSG_MORE;
|
||||
|
||||
if (tcp_sw_conn->queue_recv)
|
||||
flags |= MSG_DONTWAIT;
|
||||
msg.msg_flags |= MSG_DONTWAIT;
|
||||
|
||||
/* Use sendpage if we can; else fall back to sendmsg */
|
||||
if (!segment->data) {
|
||||
if (!tcp_conn->iscsi_conn->datadgst_en)
|
||||
msg.msg_flags |= MSG_SPLICE_PAGES;
|
||||
sg = segment->sg;
|
||||
offset += segment->sg_offset + sg->offset;
|
||||
r = tcp_sw_conn->sendpage(sk, sg_page(sg), offset,
|
||||
copy, flags);
|
||||
bvec_set_page(&bv, sg_page(sg), copy, offset);
|
||||
} else {
|
||||
struct msghdr msg = { .msg_flags = flags };
|
||||
struct kvec iov = {
|
||||
.iov_base = segment->data + offset,
|
||||
.iov_len = copy
|
||||
};
|
||||
|
||||
r = kernel_sendmsg(sk, &msg, &iov, 1, copy);
|
||||
bvec_set_virt(&bv, segment->data + offset, copy);
|
||||
}
|
||||
iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bv, 1, copy);
|
||||
|
||||
r = sock_sendmsg(sk, &msg);
|
||||
if (r < 0) {
|
||||
iscsi_tcp_segment_unmap(segment);
|
||||
return r;
|
||||
|
@ -746,7 +743,6 @@ iscsi_sw_tcp_conn_bind(struct iscsi_cls_session *cls_session,
|
|||
sock_no_linger(sk);
|
||||
|
||||
iscsi_sw_tcp_conn_set_callbacks(conn);
|
||||
tcp_sw_conn->sendpage = tcp_sw_conn->sock->ops->sendpage;
|
||||
/*
|
||||
* set receive state machine into initial state
|
||||
*/
|
||||
|
@ -777,8 +773,6 @@ static int iscsi_sw_tcp_conn_set_param(struct iscsi_cls_conn *cls_conn,
|
|||
return -ENOTCONN;
|
||||
}
|
||||
iscsi_set_param(cls_conn, param, buf, buflen);
|
||||
tcp_sw_conn->sendpage = conn->datadgst_en ?
|
||||
sock_no_sendpage : tcp_sw_conn->sock->ops->sendpage;
|
||||
mutex_unlock(&tcp_sw_conn->sock_lock);
|
||||
break;
|
||||
case ISCSI_PARAM_MAX_R2T:
|
||||
|
|
|
@ -47,8 +47,6 @@ struct iscsi_sw_tcp_conn {
|
|||
/* MIB custom statistics */
|
||||
uint32_t sendpage_failures_cnt;
|
||||
uint32_t discontiguous_hdr_cnt;
|
||||
|
||||
ssize_t (*sendpage)(struct socket *, struct page *, int, size_t, int);
|
||||
};
|
||||
|
||||
struct iscsi_sw_tcp_host {
|
||||
|
|
|
@ -1129,6 +1129,8 @@ int iscsit_fe_sendpage_sg(
|
|||
struct iscsit_conn *conn)
|
||||
{
|
||||
struct scatterlist *sg = cmd->first_data_sg;
|
||||
struct bio_vec bvec;
|
||||
struct msghdr msghdr = { .msg_flags = MSG_SPLICE_PAGES, };
|
||||
struct kvec iov;
|
||||
u32 tx_hdr_size, data_len;
|
||||
u32 offset = cmd->first_data_sg_off;
|
||||
|
@ -1172,17 +1174,18 @@ send_hdr:
|
|||
u32 space = (sg->length - offset);
|
||||
u32 sub_len = min_t(u32, data_len, space);
|
||||
send_pg:
|
||||
tx_sent = conn->sock->ops->sendpage(conn->sock,
|
||||
sg_page(sg), sg->offset + offset, sub_len, 0);
|
||||
bvec_set_page(&bvec, sg_page(sg), sub_len, sg->offset + offset);
|
||||
iov_iter_bvec(&msghdr.msg_iter, ITER_SOURCE, &bvec, 1, sub_len);
|
||||
|
||||
tx_sent = conn->sock->ops->sendmsg(conn->sock, &msghdr,
|
||||
sub_len);
|
||||
if (tx_sent != sub_len) {
|
||||
if (tx_sent == -EAGAIN) {
|
||||
pr_err("tcp_sendpage() returned"
|
||||
" -EAGAIN\n");
|
||||
pr_err("sendmsg/splice returned -EAGAIN\n");
|
||||
goto send_pg;
|
||||
}
|
||||
|
||||
pr_err("tcp_sendpage() failure: %d\n",
|
||||
tx_sent);
|
||||
pr_err("sendmsg/splice failure: %d\n", tx_sent);
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
|
|
@ -1395,8 +1395,11 @@ int dlm_lowcomms_resend_msg(struct dlm_msg *msg)
|
|||
/* Send a message */
|
||||
static int send_to_sock(struct connection *con)
|
||||
{
|
||||
const int msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL;
|
||||
struct writequeue_entry *e;
|
||||
struct bio_vec bvec;
|
||||
struct msghdr msg = {
|
||||
.msg_flags = MSG_SPLICE_PAGES | MSG_DONTWAIT | MSG_NOSIGNAL,
|
||||
};
|
||||
int len, offset, ret;
|
||||
|
||||
spin_lock_bh(&con->writequeue_lock);
|
||||
|
@ -1412,8 +1415,9 @@ static int send_to_sock(struct connection *con)
|
|||
WARN_ON_ONCE(len == 0 && e->users == 0);
|
||||
spin_unlock_bh(&con->writequeue_lock);
|
||||
|
||||
ret = kernel_sendpage(con->sock, e->page, offset, len,
|
||||
msg_flags);
|
||||
bvec_set_page(&bvec, e->page, len, offset);
|
||||
iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, len);
|
||||
ret = sock_sendmsg(con->sock, &msg);
|
||||
trace_dlm_send(con->nodeid, ret);
|
||||
if (ret == -EAGAIN || ret == 0) {
|
||||
lock_sock(con->sock->sk);
|
||||
|
|
|
@ -936,7 +936,7 @@ nfsd_open_verified(struct svc_rqst *rqstp, struct svc_fh *fhp, int may_flags,
|
|||
|
||||
/*
|
||||
* Grab and keep cached pages associated with a file in the svc_rqst
|
||||
* so that they can be passed to the network sendmsg/sendpage routines
|
||||
* so that they can be passed to the network sendmsg routines
|
||||
* directly. They will be released after the sending has completed.
|
||||
*
|
||||
* Return values: Number of bytes consumed, or -EIO if there are no
|
||||
|
|
|
@ -930,19 +930,22 @@ out:
|
|||
}
|
||||
|
||||
static void o2net_sendpage(struct o2net_sock_container *sc,
|
||||
void *kmalloced_virt,
|
||||
size_t size)
|
||||
void *virt, size_t size)
|
||||
{
|
||||
struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num);
|
||||
struct msghdr msg = {};
|
||||
struct bio_vec bv;
|
||||
ssize_t ret;
|
||||
|
||||
bvec_set_virt(&bv, virt, size);
|
||||
iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bv, 1, size);
|
||||
|
||||
while (1) {
|
||||
msg.msg_flags = MSG_DONTWAIT | MSG_SPLICE_PAGES;
|
||||
mutex_lock(&sc->sc_send_lock);
|
||||
ret = sc->sc_sock->ops->sendpage(sc->sc_sock,
|
||||
virt_to_page(kmalloced_virt),
|
||||
offset_in_page(kmalloced_virt),
|
||||
size, MSG_DONTWAIT);
|
||||
ret = sock_sendmsg(sc->sc_sock, &msg);
|
||||
mutex_unlock(&sc->sc_send_lock);
|
||||
|
||||
if (ret == size)
|
||||
break;
|
||||
if (ret == (ssize_t)-EAGAIN) {
|
||||
|
@ -2087,18 +2090,24 @@ void o2net_stop_listening(struct o2nm_node *node)
|
|||
|
||||
int o2net_init(void)
|
||||
{
|
||||
struct folio *folio;
|
||||
void *p;
|
||||
unsigned long i;
|
||||
|
||||
o2quo_init();
|
||||
|
||||
o2net_debugfs_init();
|
||||
|
||||
o2net_hand = kzalloc(sizeof(struct o2net_handshake), GFP_KERNEL);
|
||||
o2net_keep_req = kzalloc(sizeof(struct o2net_msg), GFP_KERNEL);
|
||||
o2net_keep_resp = kzalloc(sizeof(struct o2net_msg), GFP_KERNEL);
|
||||
if (!o2net_hand || !o2net_keep_req || !o2net_keep_resp)
|
||||
folio = folio_alloc(GFP_KERNEL | __GFP_ZERO, 0);
|
||||
if (!folio)
|
||||
goto out;
|
||||
|
||||
p = folio_address(folio);
|
||||
o2net_hand = p;
|
||||
p += sizeof(struct o2net_handshake);
|
||||
o2net_keep_req = p;
|
||||
p += sizeof(struct o2net_msg);
|
||||
o2net_keep_resp = p;
|
||||
|
||||
o2net_hand->protocol_version = cpu_to_be64(O2NET_PROTOCOL_VERSION);
|
||||
o2net_hand->connector_id = cpu_to_be64(1);
|
||||
|
||||
|
@ -2124,9 +2133,6 @@ int o2net_init(void)
|
|||
return 0;
|
||||
|
||||
out:
|
||||
kfree(o2net_hand);
|
||||
kfree(o2net_keep_req);
|
||||
kfree(o2net_keep_resp);
|
||||
o2net_debugfs_exit();
|
||||
o2quo_exit();
|
||||
return -ENOMEM;
|
||||
|
@ -2135,8 +2141,6 @@ out:
|
|||
void o2net_exit(void)
|
||||
{
|
||||
o2quo_exit();
|
||||
kfree(o2net_hand);
|
||||
kfree(o2net_keep_req);
|
||||
kfree(o2net_keep_resp);
|
||||
o2net_debugfs_exit();
|
||||
folio_put(virt_to_folio(o2net_hand));
|
||||
}
|
||||
|
|
|
@ -229,8 +229,6 @@ void af_alg_wmem_wakeup(struct sock *sk);
|
|||
int af_alg_wait_for_data(struct sock *sk, unsigned flags, unsigned min);
|
||||
int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
|
||||
unsigned int ivsize);
|
||||
ssize_t af_alg_sendpage(struct socket *sock, struct page *page,
|
||||
int offset, size_t size, int flags);
|
||||
void af_alg_free_resources(struct af_alg_async_req *areq);
|
||||
void af_alg_async_cb(void *data, int err);
|
||||
__poll_t af_alg_poll(struct file *file, struct socket *sock,
|
||||
|
|
|
@ -207,8 +207,6 @@ struct proto_ops {
|
|||
size_t total_len, int flags);
|
||||
int (*mmap) (struct file *file, struct socket *sock,
|
||||
struct vm_area_struct * vma);
|
||||
ssize_t (*sendpage) (struct socket *sock, struct page *page,
|
||||
int offset, size_t size, int flags);
|
||||
ssize_t (*splice_read)(struct socket *sock, loff_t *ppos,
|
||||
struct pipe_inode_info *pipe, size_t len, unsigned int flags);
|
||||
void (*splice_eof)(struct socket *sock);
|
||||
|
@ -222,8 +220,6 @@ struct proto_ops {
|
|||
sk_read_actor_t recv_actor);
|
||||
/* This is different from read_sock(), it reads an entire skb at a time. */
|
||||
int (*read_skb)(struct sock *sk, skb_read_actor_t recv_actor);
|
||||
int (*sendpage_locked)(struct sock *sk, struct page *page,
|
||||
int offset, size_t size, int flags);
|
||||
int (*sendmsg_locked)(struct sock *sk, struct msghdr *msg,
|
||||
size_t size);
|
||||
int (*set_rcvlowat)(struct sock *sk, int val);
|
||||
|
@ -341,10 +337,6 @@ int kernel_connect(struct socket *sock, struct sockaddr *addr, int addrlen,
|
|||
int flags);
|
||||
int kernel_getsockname(struct socket *sock, struct sockaddr *addr);
|
||||
int kernel_getpeername(struct socket *sock, struct sockaddr *addr);
|
||||
int kernel_sendpage(struct socket *sock, struct page *page, int offset,
|
||||
size_t size, int flags);
|
||||
int kernel_sendpage_locked(struct sock *sk, struct page *page, int offset,
|
||||
size_t size, int flags);
|
||||
int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd how);
|
||||
|
||||
/* Routine returns the IP overhead imposed by a (caller-protected) socket. */
|
||||
|
|
|
@ -319,7 +319,6 @@ struct ucred {
|
|||
#define MSG_MORE 0x8000 /* Sender will send more */
|
||||
#define MSG_WAITFORONE 0x10000 /* recvmmsg(): block until 1+ packets avail */
|
||||
#define MSG_SENDPAGE_NOPOLICY 0x10000 /* sendpage() internal : do no apply policy */
|
||||
#define MSG_SENDPAGE_NOTLAST 0x20000 /* sendpage() internal : not the last page */
|
||||
#define MSG_BATCH 0x40000 /* sendmmsg(): more messages coming */
|
||||
#define MSG_EOF MSG_FIN
|
||||
#define MSG_NO_SHARED_FRAGS 0x80000 /* sendpage() internal : page frags are not shared */
|
||||
|
@ -341,8 +340,7 @@ struct ucred {
|
|||
|
||||
/* Flags to be cleared on entry by sendmsg and sendmmsg syscalls */
|
||||
#define MSG_INTERNAL_SENDMSG_FLAGS \
|
||||
(MSG_SPLICE_PAGES | MSG_SENDPAGE_NOPOLICY | MSG_SENDPAGE_NOTLAST | \
|
||||
MSG_SENDPAGE_DECRYPTED)
|
||||
(MSG_SPLICE_PAGES | MSG_SENDPAGE_NOPOLICY | MSG_SENDPAGE_DECRYPTED)
|
||||
|
||||
/* Setsockoptions(2) level. Thanks to BSD these must match IPPROTO_xxx */
|
||||
#define SOL_IP 0
|
||||
|
|
|
@ -36,8 +36,6 @@ void __inet_accept(struct socket *sock, struct socket *newsock,
|
|||
int inet_send_prepare(struct sock *sk);
|
||||
int inet_sendmsg(struct socket *sock, struct msghdr *msg, size_t size);
|
||||
void inet_splice_eof(struct socket *sock);
|
||||
ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset,
|
||||
size_t size, int flags);
|
||||
int inet_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
|
||||
int flags);
|
||||
int inet_shutdown(struct socket *sock, int how);
|
||||
|
|
|
@ -1277,8 +1277,6 @@ struct proto {
|
|||
size_t len);
|
||||
int (*recvmsg)(struct sock *sk, struct msghdr *msg,
|
||||
size_t len, int flags, int *addr_len);
|
||||
int (*sendpage)(struct sock *sk, struct page *page,
|
||||
int offset, size_t size, int flags);
|
||||
void (*splice_eof)(struct socket *sock);
|
||||
int (*bind)(struct sock *sk,
|
||||
struct sockaddr *addr, int addr_len);
|
||||
|
@ -1919,10 +1917,6 @@ int sock_no_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t len);
|
|||
int sock_no_recvmsg(struct socket *, struct msghdr *, size_t, int);
|
||||
int sock_no_mmap(struct file *file, struct socket *sock,
|
||||
struct vm_area_struct *vma);
|
||||
ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset,
|
||||
size_t size, int flags);
|
||||
ssize_t sock_no_sendpage_locked(struct sock *sk, struct page *page,
|
||||
int offset, size_t size, int flags);
|
||||
|
||||
/*
|
||||
* Functions to fill in entries in struct proto_ops when a protocol
|
||||
|
|
|
@ -329,10 +329,6 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size);
|
|||
int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, int *copied,
|
||||
size_t size, struct ubuf_info *uarg);
|
||||
void tcp_splice_eof(struct socket *sock);
|
||||
int tcp_sendpage(struct sock *sk, struct page *page, int offset, size_t size,
|
||||
int flags);
|
||||
int tcp_sendpage_locked(struct sock *sk, struct page *page, int offset,
|
||||
size_t size, int flags);
|
||||
int tcp_send_mss(struct sock *sk, int *size_goal, int flags);
|
||||
int tcp_wmem_schedule(struct sock *sk, int copy);
|
||||
void tcp_push(struct sock *sk, int flags, int mss_now, int nonagle,
|
||||
|
|
|
@ -1929,7 +1929,6 @@ static const struct proto_ops atalk_dgram_ops = {
|
|||
.sendmsg = atalk_sendmsg,
|
||||
.recvmsg = atalk_recvmsg,
|
||||
.mmap = sock_no_mmap,
|
||||
.sendpage = sock_no_sendpage,
|
||||
};
|
||||
|
||||
static struct notifier_block ddp_notifier = {
|
||||
|
|
|
@ -126,7 +126,6 @@ static const struct proto_ops pvc_proto_ops = {
|
|||
.sendmsg = vcc_sendmsg,
|
||||
.recvmsg = vcc_recvmsg,
|
||||
.mmap = sock_no_mmap,
|
||||
.sendpage = sock_no_sendpage,
|
||||
};
|
||||
|
||||
|
||||
|
|
|
@ -654,7 +654,6 @@ static const struct proto_ops svc_proto_ops = {
|
|||
.sendmsg = vcc_sendmsg,
|
||||
.recvmsg = vcc_recvmsg,
|
||||
.mmap = sock_no_mmap,
|
||||
.sendpage = sock_no_sendpage,
|
||||
};
|
||||
|
||||
|
||||
|
|
|
@ -2022,7 +2022,6 @@ static const struct proto_ops ax25_proto_ops = {
|
|||
.sendmsg = ax25_sendmsg,
|
||||
.recvmsg = ax25_recvmsg,
|
||||
.mmap = sock_no_mmap,
|
||||
.sendpage = sock_no_sendpage,
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
@ -976,7 +976,6 @@ static const struct proto_ops caif_seqpacket_ops = {
|
|||
.sendmsg = caif_seqpkt_sendmsg,
|
||||
.recvmsg = caif_seqpkt_recvmsg,
|
||||
.mmap = sock_no_mmap,
|
||||
.sendpage = sock_no_sendpage,
|
||||
};
|
||||
|
||||
static const struct proto_ops caif_stream_ops = {
|
||||
|
@ -996,7 +995,6 @@ static const struct proto_ops caif_stream_ops = {
|
|||
.sendmsg = caif_stream_sendmsg,
|
||||
.recvmsg = caif_stream_recvmsg,
|
||||
.mmap = sock_no_mmap,
|
||||
.sendpage = sock_no_sendpage,
|
||||
};
|
||||
|
||||
/* This function is called when a socket is finally destroyed. */
|
||||
|
|
|
@ -1703,7 +1703,6 @@ static const struct proto_ops bcm_ops = {
|
|||
.sendmsg = bcm_sendmsg,
|
||||
.recvmsg = bcm_recvmsg,
|
||||
.mmap = sock_no_mmap,
|
||||
.sendpage = sock_no_sendpage,
|
||||
};
|
||||
|
||||
static struct proto bcm_proto __read_mostly = {
|
||||
|
|
|
@ -1699,7 +1699,6 @@ static const struct proto_ops isotp_ops = {
|
|||
.sendmsg = isotp_sendmsg,
|
||||
.recvmsg = isotp_recvmsg,
|
||||
.mmap = sock_no_mmap,
|
||||
.sendpage = sock_no_sendpage,
|
||||
};
|
||||
|
||||
static struct proto isotp_proto __read_mostly = {
|
||||
|
|
|
@ -1306,7 +1306,6 @@ static const struct proto_ops j1939_ops = {
|
|||
.sendmsg = j1939_sk_sendmsg,
|
||||
.recvmsg = j1939_sk_recvmsg,
|
||||
.mmap = sock_no_mmap,
|
||||
.sendpage = sock_no_sendpage,
|
||||
};
|
||||
|
||||
static struct proto j1939_proto __read_mostly = {
|
||||
|
|
|
@ -962,7 +962,6 @@ static const struct proto_ops raw_ops = {
|
|||
.sendmsg = raw_sendmsg,
|
||||
.recvmsg = raw_recvmsg,
|
||||
.mmap = sock_no_mmap,
|
||||
.sendpage = sock_no_sendpage,
|
||||
};
|
||||
|
||||
static struct proto raw_proto __read_mostly = {
|
||||
|
|
|
@ -74,37 +74,6 @@ static int ceph_tcp_sendmsg(struct socket *sock, struct kvec *iov,
|
|||
return r;
|
||||
}
|
||||
|
||||
/*
|
||||
* @more: either or both of MSG_MORE and MSG_SENDPAGE_NOTLAST
|
||||
*/
|
||||
static int ceph_tcp_sendpage(struct socket *sock, struct page *page,
|
||||
int offset, size_t size, int more)
|
||||
{
|
||||
ssize_t (*sendpage)(struct socket *sock, struct page *page,
|
||||
int offset, size_t size, int flags);
|
||||
int flags = MSG_DONTWAIT | MSG_NOSIGNAL | more;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* sendpage cannot properly handle pages with page_count == 0,
|
||||
* we need to fall back to sendmsg if that's the case.
|
||||
*
|
||||
* Same goes for slab pages: skb_can_coalesce() allows
|
||||
* coalescing neighboring slab objects into a single frag which
|
||||
* triggers one of hardened usercopy checks.
|
||||
*/
|
||||
if (sendpage_ok(page))
|
||||
sendpage = sock->ops->sendpage;
|
||||
else
|
||||
sendpage = sock_no_sendpage;
|
||||
|
||||
ret = sendpage(sock, page, offset, size, flags);
|
||||
if (ret == -EAGAIN)
|
||||
ret = 0;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void con_out_kvec_reset(struct ceph_connection *con)
|
||||
{
|
||||
BUG_ON(con->v1.out_skip);
|
||||
|
@ -464,7 +433,6 @@ static int write_partial_message_data(struct ceph_connection *con)
|
|||
struct ceph_msg *msg = con->out_msg;
|
||||
struct ceph_msg_data_cursor *cursor = &msg->cursor;
|
||||
bool do_datacrc = !ceph_test_opt(from_msgr(con->msgr), NOCRC);
|
||||
int more = MSG_MORE | MSG_SENDPAGE_NOTLAST;
|
||||
u32 crc;
|
||||
|
||||
dout("%s %p msg %p\n", __func__, con, msg);
|
||||
|
@ -482,6 +450,10 @@ static int write_partial_message_data(struct ceph_connection *con)
|
|||
*/
|
||||
crc = do_datacrc ? le32_to_cpu(msg->footer.data_crc) : 0;
|
||||
while (cursor->total_resid) {
|
||||
struct bio_vec bvec;
|
||||
struct msghdr msghdr = {
|
||||
.msg_flags = MSG_SPLICE_PAGES,
|
||||
};
|
||||
struct page *page;
|
||||
size_t page_offset;
|
||||
size_t length;
|
||||
|
@ -493,10 +465,13 @@ static int write_partial_message_data(struct ceph_connection *con)
|
|||
}
|
||||
|
||||
page = ceph_msg_data_next(cursor, &page_offset, &length);
|
||||
if (length == cursor->total_resid)
|
||||
more = MSG_MORE;
|
||||
ret = ceph_tcp_sendpage(con->sock, page, page_offset, length,
|
||||
more);
|
||||
if (length != cursor->total_resid)
|
||||
msghdr.msg_flags |= MSG_MORE;
|
||||
|
||||
bvec_set_page(&bvec, page, length, page_offset);
|
||||
iov_iter_bvec(&msghdr.msg_iter, ITER_SOURCE, &bvec, 1, length);
|
||||
|
||||
ret = sock_sendmsg(con->sock, &msghdr);
|
||||
if (ret <= 0) {
|
||||
if (do_datacrc)
|
||||
msg->footer.data_crc = cpu_to_le32(crc);
|
||||
|
@ -526,7 +501,10 @@ static int write_partial_message_data(struct ceph_connection *con)
|
|||
*/
|
||||
static int write_partial_skip(struct ceph_connection *con)
|
||||
{
|
||||
int more = MSG_MORE | MSG_SENDPAGE_NOTLAST;
|
||||
struct bio_vec bvec;
|
||||
struct msghdr msghdr = {
|
||||
.msg_flags = MSG_SPLICE_PAGES | MSG_MORE,
|
||||
};
|
||||
int ret;
|
||||
|
||||
dout("%s %p %d left\n", __func__, con, con->v1.out_skip);
|
||||
|
@ -534,9 +512,11 @@ static int write_partial_skip(struct ceph_connection *con)
|
|||
size_t size = min(con->v1.out_skip, (int)PAGE_SIZE);
|
||||
|
||||
if (size == con->v1.out_skip)
|
||||
more = MSG_MORE;
|
||||
ret = ceph_tcp_sendpage(con->sock, ceph_zero_page, 0, size,
|
||||
more);
|
||||
msghdr.msg_flags &= ~MSG_MORE;
|
||||
bvec_set_page(&bvec, ZERO_PAGE(0), size, 0);
|
||||
iov_iter_bvec(&msghdr.msg_iter, ITER_SOURCE, &bvec, 1, size);
|
||||
|
||||
ret = sock_sendmsg(con->sock, &msghdr);
|
||||
if (ret <= 0)
|
||||
goto out;
|
||||
con->v1.out_skip -= ret;
|
||||
|
|
|
@ -117,91 +117,38 @@ static int ceph_tcp_recv(struct ceph_connection *con)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int do_sendmsg(struct socket *sock, struct iov_iter *it)
|
||||
{
|
||||
struct msghdr msg = { .msg_flags = CEPH_MSG_FLAGS };
|
||||
int ret;
|
||||
|
||||
msg.msg_iter = *it;
|
||||
while (iov_iter_count(it)) {
|
||||
ret = sock_sendmsg(sock, &msg);
|
||||
if (ret <= 0) {
|
||||
if (ret == -EAGAIN)
|
||||
ret = 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
iov_iter_advance(it, ret);
|
||||
}
|
||||
|
||||
WARN_ON(msg_data_left(&msg));
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int do_try_sendpage(struct socket *sock, struct iov_iter *it)
|
||||
{
|
||||
struct msghdr msg = { .msg_flags = CEPH_MSG_FLAGS };
|
||||
struct bio_vec bv;
|
||||
int ret;
|
||||
|
||||
if (WARN_ON(!iov_iter_is_bvec(it)))
|
||||
return -EINVAL;
|
||||
|
||||
while (iov_iter_count(it)) {
|
||||
/* iov_iter_iovec() for ITER_BVEC */
|
||||
bvec_set_page(&bv, it->bvec->bv_page,
|
||||
min(iov_iter_count(it),
|
||||
it->bvec->bv_len - it->iov_offset),
|
||||
it->bvec->bv_offset + it->iov_offset);
|
||||
|
||||
/*
|
||||
* sendpage cannot properly handle pages with
|
||||
* page_count == 0, we need to fall back to sendmsg if
|
||||
* that's the case.
|
||||
*
|
||||
* Same goes for slab pages: skb_can_coalesce() allows
|
||||
* coalescing neighboring slab objects into a single frag
|
||||
* which triggers one of hardened usercopy checks.
|
||||
*/
|
||||
if (sendpage_ok(bv.bv_page)) {
|
||||
ret = sock->ops->sendpage(sock, bv.bv_page,
|
||||
bv.bv_offset, bv.bv_len,
|
||||
CEPH_MSG_FLAGS);
|
||||
} else {
|
||||
iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bv, 1, bv.bv_len);
|
||||
ret = sock_sendmsg(sock, &msg);
|
||||
}
|
||||
if (ret <= 0) {
|
||||
if (ret == -EAGAIN)
|
||||
ret = 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
iov_iter_advance(it, ret);
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Write as much as possible. The socket is expected to be corked,
|
||||
* so we don't bother with MSG_MORE/MSG_SENDPAGE_NOTLAST here.
|
||||
* so we don't bother with MSG_MORE here.
|
||||
*
|
||||
* Return:
|
||||
* 1 - done, nothing (else) to write
|
||||
* >0 - done, nothing (else) to write
|
||||
* 0 - socket is full, need to wait
|
||||
* <0 - error
|
||||
*/
|
||||
static int ceph_tcp_send(struct ceph_connection *con)
|
||||
{
|
||||
struct msghdr msg = {
|
||||
.msg_iter = con->v2.out_iter,
|
||||
.msg_flags = CEPH_MSG_FLAGS,
|
||||
};
|
||||
int ret;
|
||||
|
||||
if (WARN_ON(!iov_iter_is_bvec(&con->v2.out_iter)))
|
||||
return -EINVAL;
|
||||
|
||||
if (con->v2.out_iter_sendpage)
|
||||
msg.msg_flags |= MSG_SPLICE_PAGES;
|
||||
|
||||
dout("%s con %p have %zu try_sendpage %d\n", __func__, con,
|
||||
iov_iter_count(&con->v2.out_iter), con->v2.out_iter_sendpage);
|
||||
if (con->v2.out_iter_sendpage)
|
||||
ret = do_try_sendpage(con->sock, &con->v2.out_iter);
|
||||
else
|
||||
ret = do_sendmsg(con->sock, &con->v2.out_iter);
|
||||
|
||||
ret = sock_sendmsg(con->sock, &msg);
|
||||
if (ret > 0)
|
||||
iov_iter_advance(&con->v2.out_iter, ret);
|
||||
else if (ret == -EAGAIN)
|
||||
ret = 0;
|
||||
|
||||
dout("%s con %p ret %d left %zu\n", __func__, con, ret,
|
||||
iov_iter_count(&con->v2.out_iter));
|
||||
return ret;
|
||||
|
|
|
@ -2989,32 +2989,32 @@ int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(skb_splice_bits);
|
||||
|
||||
static int sendmsg_unlocked(struct sock *sk, struct msghdr *msg,
|
||||
struct kvec *vec, size_t num, size_t size)
|
||||
static int sendmsg_locked(struct sock *sk, struct msghdr *msg)
|
||||
{
|
||||
struct socket *sock = sk->sk_socket;
|
||||
size_t size = msg_data_left(msg);
|
||||
|
||||
if (!sock)
|
||||
return -EINVAL;
|
||||
|
||||
if (!sock->ops->sendmsg_locked)
|
||||
return sock_no_sendmsg_locked(sk, msg, size);
|
||||
|
||||
return sock->ops->sendmsg_locked(sk, msg, size);
|
||||
}
|
||||
|
||||
static int sendmsg_unlocked(struct sock *sk, struct msghdr *msg)
|
||||
{
|
||||
struct socket *sock = sk->sk_socket;
|
||||
|
||||
if (!sock)
|
||||
return -EINVAL;
|
||||
return kernel_sendmsg(sock, msg, vec, num, size);
|
||||
return sock_sendmsg(sock, msg);
|
||||
}
|
||||
|
||||
static int sendpage_unlocked(struct sock *sk, struct page *page, int offset,
|
||||
size_t size, int flags)
|
||||
{
|
||||
struct socket *sock = sk->sk_socket;
|
||||
|
||||
if (!sock)
|
||||
return -EINVAL;
|
||||
return kernel_sendpage(sock, page, offset, size, flags);
|
||||
}
|
||||
|
||||
typedef int (*sendmsg_func)(struct sock *sk, struct msghdr *msg,
|
||||
struct kvec *vec, size_t num, size_t size);
|
||||
typedef int (*sendpage_func)(struct sock *sk, struct page *page, int offset,
|
||||
size_t size, int flags);
|
||||
typedef int (*sendmsg_func)(struct sock *sk, struct msghdr *msg);
|
||||
static int __skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset,
|
||||
int len, sendmsg_func sendmsg, sendpage_func sendpage)
|
||||
int len, sendmsg_func sendmsg)
|
||||
{
|
||||
unsigned int orig_len = len;
|
||||
struct sk_buff *head = skb;
|
||||
|
@ -3034,8 +3034,9 @@ do_frag_list:
|
|||
memset(&msg, 0, sizeof(msg));
|
||||
msg.msg_flags = MSG_DONTWAIT;
|
||||
|
||||
ret = INDIRECT_CALL_2(sendmsg, kernel_sendmsg_locked,
|
||||
sendmsg_unlocked, sk, &msg, &kv, 1, slen);
|
||||
iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, &kv, 1, slen);
|
||||
ret = INDIRECT_CALL_2(sendmsg, sendmsg_locked,
|
||||
sendmsg_unlocked, sk, &msg);
|
||||
if (ret <= 0)
|
||||
goto error;
|
||||
|
||||
|
@ -3066,11 +3067,18 @@ do_frag_list:
|
|||
slen = min_t(size_t, len, skb_frag_size(frag) - offset);
|
||||
|
||||
while (slen) {
|
||||
ret = INDIRECT_CALL_2(sendpage, kernel_sendpage_locked,
|
||||
sendpage_unlocked, sk,
|
||||
skb_frag_page(frag),
|
||||
skb_frag_off(frag) + offset,
|
||||
slen, MSG_DONTWAIT);
|
||||
struct bio_vec bvec;
|
||||
struct msghdr msg = {
|
||||
.msg_flags = MSG_SPLICE_PAGES | MSG_DONTWAIT,
|
||||
};
|
||||
|
||||
bvec_set_page(&bvec, skb_frag_page(frag), slen,
|
||||
skb_frag_off(frag) + offset);
|
||||
iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1,
|
||||
slen);
|
||||
|
||||
ret = INDIRECT_CALL_2(sendmsg, sendmsg_locked,
|
||||
sendmsg_unlocked, sk, &msg);
|
||||
if (ret <= 0)
|
||||
goto error;
|
||||
|
||||
|
@ -3107,16 +3115,14 @@ error:
|
|||
int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset,
|
||||
int len)
|
||||
{
|
||||
return __skb_send_sock(sk, skb, offset, len, kernel_sendmsg_locked,
|
||||
kernel_sendpage_locked);
|
||||
return __skb_send_sock(sk, skb, offset, len, sendmsg_locked);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(skb_send_sock_locked);
|
||||
|
||||
/* Send skb data on a socket. Socket must be unlocked. */
|
||||
int skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, int len)
|
||||
{
|
||||
return __skb_send_sock(sk, skb, offset, len, sendmsg_unlocked,
|
||||
sendpage_unlocked);
|
||||
return __skb_send_sock(sk, skb, offset, len, sendmsg_unlocked);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -3261,36 +3261,6 @@ void __receive_sock(struct file *file)
|
|||
}
|
||||
}
|
||||
|
||||
ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
|
||||
{
|
||||
ssize_t res;
|
||||
struct msghdr msg = {.msg_flags = flags};
|
||||
struct kvec iov;
|
||||
char *kaddr = kmap(page);
|
||||
iov.iov_base = kaddr + offset;
|
||||
iov.iov_len = size;
|
||||
res = kernel_sendmsg(sock, &msg, &iov, 1, size);
|
||||
kunmap(page);
|
||||
return res;
|
||||
}
|
||||
EXPORT_SYMBOL(sock_no_sendpage);
|
||||
|
||||
ssize_t sock_no_sendpage_locked(struct sock *sk, struct page *page,
|
||||
int offset, size_t size, int flags)
|
||||
{
|
||||
ssize_t res;
|
||||
struct msghdr msg = {.msg_flags = flags};
|
||||
struct kvec iov;
|
||||
char *kaddr = kmap(page);
|
||||
|
||||
iov.iov_base = kaddr + offset;
|
||||
iov.iov_len = size;
|
||||
res = kernel_sendmsg_locked(sk, &msg, &iov, 1, size);
|
||||
kunmap(page);
|
||||
return res;
|
||||
}
|
||||
EXPORT_SYMBOL(sock_no_sendpage_locked);
|
||||
|
||||
/*
|
||||
* Default Socket Callbacks
|
||||
*/
|
||||
|
@ -4046,7 +4016,7 @@ static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
|
|||
{
|
||||
|
||||
seq_printf(seq, "%-9s %4u %6d %6ld %-3s %6u %-3s %-10s "
|
||||
"%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
|
||||
"%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
|
||||
proto->name,
|
||||
proto->obj_size,
|
||||
sock_prot_inuse_get(seq_file_net(seq), proto),
|
||||
|
@ -4067,7 +4037,6 @@ static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
|
|||
proto_method_implemented(proto->getsockopt),
|
||||
proto_method_implemented(proto->sendmsg),
|
||||
proto_method_implemented(proto->recvmsg),
|
||||
proto_method_implemented(proto->sendpage),
|
||||
proto_method_implemented(proto->bind),
|
||||
proto_method_implemented(proto->backlog_rcv),
|
||||
proto_method_implemented(proto->hash),
|
||||
|
@ -4088,7 +4057,7 @@ static int proto_seq_show(struct seq_file *seq, void *v)
|
|||
"maxhdr",
|
||||
"slab",
|
||||
"module",
|
||||
"cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n");
|
||||
"cl co di ac io in de sh ss gs se re bi br ha uh gp em\n");
|
||||
else
|
||||
proto_seq_printf(seq, list_entry(v, struct proto, node));
|
||||
return 0;
|
||||
|
|
|
@ -1010,7 +1010,6 @@ static const struct proto_ops inet_dccp_ops = {
|
|||
.sendmsg = inet_sendmsg,
|
||||
.recvmsg = sock_common_recvmsg,
|
||||
.mmap = sock_no_mmap,
|
||||
.sendpage = sock_no_sendpage,
|
||||
};
|
||||
|
||||
static struct inet_protosw dccp_v4_protosw = {
|
||||
|
|
|
@ -1087,7 +1087,6 @@ static const struct proto_ops inet6_dccp_ops = {
|
|||
.sendmsg = inet_sendmsg,
|
||||
.recvmsg = sock_common_recvmsg,
|
||||
.mmap = sock_no_mmap,
|
||||
.sendpage = sock_no_sendpage,
|
||||
#ifdef CONFIG_COMPAT
|
||||
.compat_ioctl = inet6_compat_ioctl,
|
||||
#endif
|
||||
|
|
|
@ -426,7 +426,6 @@ static const struct proto_ops ieee802154_raw_ops = {
|
|||
.sendmsg = ieee802154_sock_sendmsg,
|
||||
.recvmsg = sock_common_recvmsg,
|
||||
.mmap = sock_no_mmap,
|
||||
.sendpage = sock_no_sendpage,
|
||||
};
|
||||
|
||||
/* DGRAM Sockets (802.15.4 dataframes) */
|
||||
|
@ -989,7 +988,6 @@ static const struct proto_ops ieee802154_dgram_ops = {
|
|||
.sendmsg = ieee802154_sock_sendmsg,
|
||||
.recvmsg = sock_common_recvmsg,
|
||||
.mmap = sock_no_mmap,
|
||||
.sendpage = sock_no_sendpage,
|
||||
};
|
||||
|
||||
static void ieee802154_sock_destruct(struct sock *sk)
|
||||
|
|
|
@ -847,23 +847,6 @@ void inet_splice_eof(struct socket *sock)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(inet_splice_eof);
|
||||
|
||||
ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset,
|
||||
size_t size, int flags)
|
||||
{
|
||||
struct sock *sk = sock->sk;
|
||||
const struct proto *prot;
|
||||
|
||||
if (unlikely(inet_send_prepare(sk)))
|
||||
return -EAGAIN;
|
||||
|
||||
/* IPV6_ADDRFORM can change sk->sk_prot under us. */
|
||||
prot = READ_ONCE(sk->sk_prot);
|
||||
if (prot->sendpage)
|
||||
return prot->sendpage(sk, page, offset, size, flags);
|
||||
return sock_no_sendpage(sock, page, offset, size, flags);
|
||||
}
|
||||
EXPORT_SYMBOL(inet_sendpage);
|
||||
|
||||
INDIRECT_CALLABLE_DECLARE(int udp_recvmsg(struct sock *, struct msghdr *,
|
||||
size_t, int, int *));
|
||||
int inet_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
|
||||
|
@ -1067,12 +1050,10 @@ const struct proto_ops inet_stream_ops = {
|
|||
.mmap = tcp_mmap,
|
||||
#endif
|
||||
.splice_eof = inet_splice_eof,
|
||||
.sendpage = inet_sendpage,
|
||||
.splice_read = tcp_splice_read,
|
||||
.read_sock = tcp_read_sock,
|
||||
.read_skb = tcp_read_skb,
|
||||
.sendmsg_locked = tcp_sendmsg_locked,
|
||||
.sendpage_locked = tcp_sendpage_locked,
|
||||
.peek_len = tcp_peek_len,
|
||||
#ifdef CONFIG_COMPAT
|
||||
.compat_ioctl = inet_compat_ioctl,
|
||||
|
@ -1102,7 +1083,6 @@ const struct proto_ops inet_dgram_ops = {
|
|||
.recvmsg = inet_recvmsg,
|
||||
.mmap = sock_no_mmap,
|
||||
.splice_eof = inet_splice_eof,
|
||||
.sendpage = inet_sendpage,
|
||||
.set_peek_off = sk_set_peek_off,
|
||||
#ifdef CONFIG_COMPAT
|
||||
.compat_ioctl = inet_compat_ioctl,
|
||||
|
@ -1134,7 +1114,6 @@ static const struct proto_ops inet_sockraw_ops = {
|
|||
.recvmsg = inet_recvmsg,
|
||||
.mmap = sock_no_mmap,
|
||||
.splice_eof = inet_splice_eof,
|
||||
.sendpage = inet_sendpage,
|
||||
#ifdef CONFIG_COMPAT
|
||||
.compat_ioctl = inet_compat_ioctl,
|
||||
#endif
|
||||
|
|
|
@ -923,11 +923,10 @@ int tcp_send_mss(struct sock *sk, int *size_goal, int flags)
|
|||
return mss_now;
|
||||
}
|
||||
|
||||
/* In some cases, both sendpage() and sendmsg() could have added
|
||||
* an skb to the write queue, but failed adding payload on it.
|
||||
* We need to remove it to consume less memory, but more
|
||||
* importantly be able to generate EPOLLOUT for Edge Trigger epoll()
|
||||
* users.
|
||||
/* In some cases, both sendmsg() could have added an skb to the write queue,
|
||||
* but failed adding payload on it. We need to remove it to consume less
|
||||
* memory, but more importantly be able to generate EPOLLOUT for Edge Trigger
|
||||
* epoll() users.
|
||||
*/
|
||||
void tcp_remove_empty_skb(struct sock *sk)
|
||||
{
|
||||
|
@ -975,40 +974,6 @@ int tcp_wmem_schedule(struct sock *sk, int copy)
|
|||
return min(copy, sk->sk_forward_alloc);
|
||||
}
|
||||
|
||||
int tcp_sendpage_locked(struct sock *sk, struct page *page, int offset,
|
||||
size_t size, int flags)
|
||||
{
|
||||
struct bio_vec bvec;
|
||||
struct msghdr msg = { .msg_flags = flags | MSG_SPLICE_PAGES, };
|
||||
|
||||
if (!(sk->sk_route_caps & NETIF_F_SG))
|
||||
return sock_no_sendpage_locked(sk, page, offset, size, flags);
|
||||
|
||||
tcp_rate_check_app_limited(sk); /* is sending application-limited? */
|
||||
|
||||
bvec_set_page(&bvec, page, size, offset);
|
||||
iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, size);
|
||||
|
||||
if (flags & MSG_SENDPAGE_NOTLAST)
|
||||
msg.msg_flags |= MSG_MORE;
|
||||
|
||||
return tcp_sendmsg_locked(sk, &msg, size);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tcp_sendpage_locked);
|
||||
|
||||
int tcp_sendpage(struct sock *sk, struct page *page, int offset,
|
||||
size_t size, int flags)
|
||||
{
|
||||
int ret;
|
||||
|
||||
lock_sock(sk);
|
||||
ret = tcp_sendpage_locked(sk, page, offset, size, flags);
|
||||
release_sock(sk);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(tcp_sendpage);
|
||||
|
||||
void tcp_free_fastopen_req(struct tcp_sock *tp)
|
||||
{
|
||||
if (tp->fastopen_req) {
|
||||
|
|
|
@ -88,9 +88,9 @@ static int bpf_tcp_ingress(struct sock *sk, struct sk_psock *psock,
|
|||
static int tcp_bpf_push(struct sock *sk, struct sk_msg *msg, u32 apply_bytes,
|
||||
int flags, bool uncharge)
|
||||
{
|
||||
struct msghdr msghdr = {};
|
||||
bool apply = apply_bytes;
|
||||
struct scatterlist *sge;
|
||||
struct msghdr msghdr = { .msg_flags = flags | MSG_SPLICE_PAGES, };
|
||||
struct page *page;
|
||||
int size, ret = 0;
|
||||
u32 off;
|
||||
|
@ -107,11 +107,12 @@ static int tcp_bpf_push(struct sock *sk, struct sk_msg *msg, u32 apply_bytes,
|
|||
|
||||
tcp_rate_check_app_limited(sk);
|
||||
retry:
|
||||
msghdr.msg_flags = flags | MSG_SPLICE_PAGES;
|
||||
has_tx_ulp = tls_sw_has_ctx_tx(sk);
|
||||
if (has_tx_ulp)
|
||||
msghdr.msg_flags |= MSG_SENDPAGE_NOPOLICY;
|
||||
|
||||
if (flags & MSG_SENDPAGE_NOTLAST)
|
||||
if (size < sge->length && msg->sg.start != msg->sg.end)
|
||||
msghdr.msg_flags |= MSG_MORE;
|
||||
|
||||
bvec_set_page(&bvec, page, size, off);
|
||||
|
@ -485,7 +486,7 @@ static int tcp_bpf_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
|
|||
long timeo;
|
||||
int flags;
|
||||
|
||||
/* Don't let internal sendpage flags through */
|
||||
/* Don't let internal flags through */
|
||||
flags = (msg->msg_flags & ~MSG_SENDPAGE_DECRYPTED);
|
||||
flags |= MSG_NO_SHARED_FRAGS;
|
||||
|
||||
|
@ -565,23 +566,6 @@ out_err:
|
|||
return copied ? copied : err;
|
||||
}
|
||||
|
||||
static int tcp_bpf_sendpage(struct sock *sk, struct page *page, int offset,
|
||||
size_t size, int flags)
|
||||
{
|
||||
struct bio_vec bvec;
|
||||
struct msghdr msg = {
|
||||
.msg_flags = flags | MSG_SPLICE_PAGES,
|
||||
};
|
||||
|
||||
bvec_set_page(&bvec, page, size, offset);
|
||||
iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, size);
|
||||
|
||||
if (flags & MSG_SENDPAGE_NOTLAST)
|
||||
msg.msg_flags |= MSG_MORE;
|
||||
|
||||
return tcp_bpf_sendmsg(sk, &msg, size);
|
||||
}
|
||||
|
||||
enum {
|
||||
TCP_BPF_IPV4,
|
||||
TCP_BPF_IPV6,
|
||||
|
@ -611,7 +595,6 @@ static void tcp_bpf_rebuild_protos(struct proto prot[TCP_BPF_NUM_CFGS],
|
|||
|
||||
prot[TCP_BPF_TX] = prot[TCP_BPF_BASE];
|
||||
prot[TCP_BPF_TX].sendmsg = tcp_bpf_sendmsg;
|
||||
prot[TCP_BPF_TX].sendpage = tcp_bpf_sendpage;
|
||||
|
||||
prot[TCP_BPF_RX] = prot[TCP_BPF_BASE];
|
||||
prot[TCP_BPF_RX].recvmsg = tcp_bpf_recvmsg_parser;
|
||||
|
@ -646,8 +629,7 @@ static int tcp_bpf_assert_proto_ops(struct proto *ops)
|
|||
* indeed valid assumptions.
|
||||
*/
|
||||
return ops->recvmsg == tcp_recvmsg &&
|
||||
ops->sendmsg == tcp_sendmsg &&
|
||||
ops->sendpage == tcp_sendpage ? 0 : -ENOTSUPP;
|
||||
ops->sendmsg == tcp_sendmsg ? 0 : -ENOTSUPP;
|
||||
}
|
||||
|
||||
int tcp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore)
|
||||
|
|
|
@ -3117,7 +3117,6 @@ struct proto tcp_prot = {
|
|||
.recvmsg = tcp_recvmsg,
|
||||
.sendmsg = tcp_sendmsg,
|
||||
.splice_eof = tcp_splice_eof,
|
||||
.sendpage = tcp_sendpage,
|
||||
.backlog_rcv = tcp_v4_do_rcv,
|
||||
.release_cb = tcp_release_cb,
|
||||
.hash = inet_hash,
|
||||
|
|
|
@ -1340,20 +1340,6 @@ void udp_splice_eof(struct socket *sock)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(udp_splice_eof);
|
||||
|
||||
int udp_sendpage(struct sock *sk, struct page *page, int offset,
|
||||
size_t size, int flags)
|
||||
{
|
||||
struct bio_vec bvec;
|
||||
struct msghdr msg = { .msg_flags = flags | MSG_SPLICE_PAGES };
|
||||
|
||||
if (flags & MSG_SENDPAGE_NOTLAST)
|
||||
msg.msg_flags |= MSG_MORE;
|
||||
|
||||
bvec_set_page(&bvec, page, size, offset);
|
||||
iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, size);
|
||||
return udp_sendmsg(sk, &msg, size);
|
||||
}
|
||||
|
||||
#define UDP_SKB_IS_STATELESS 0x80000000
|
||||
|
||||
/* all head states (dst, sk, nf conntrack) except skb extensions are
|
||||
|
@ -2933,7 +2919,6 @@ struct proto udp_prot = {
|
|||
.sendmsg = udp_sendmsg,
|
||||
.recvmsg = udp_recvmsg,
|
||||
.splice_eof = udp_splice_eof,
|
||||
.sendpage = udp_sendpage,
|
||||
.release_cb = ip4_datagram_release_cb,
|
||||
.hash = udp_lib_hash,
|
||||
.unhash = udp_lib_unhash,
|
||||
|
|
|
@ -19,8 +19,6 @@ int udp_getsockopt(struct sock *sk, int level, int optname,
|
|||
|
||||
int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int flags,
|
||||
int *addr_len);
|
||||
int udp_sendpage(struct sock *sk, struct page *page, int offset, size_t size,
|
||||
int flags);
|
||||
void udp_destroy_sock(struct sock *sk);
|
||||
|
||||
#ifdef CONFIG_PROC_FS
|
||||
|
|
|
@ -56,7 +56,6 @@ struct proto udplite_prot = {
|
|||
.getsockopt = udp_getsockopt,
|
||||
.sendmsg = udp_sendmsg,
|
||||
.recvmsg = udp_recvmsg,
|
||||
.sendpage = udp_sendpage,
|
||||
.hash = udp_lib_hash,
|
||||
.unhash = udp_lib_unhash,
|
||||
.rehash = udp_v4_rehash,
|
||||
|
|
|
@ -696,9 +696,7 @@ const struct proto_ops inet6_stream_ops = {
|
|||
.mmap = tcp_mmap,
|
||||
#endif
|
||||
.splice_eof = inet_splice_eof,
|
||||
.sendpage = inet_sendpage,
|
||||
.sendmsg_locked = tcp_sendmsg_locked,
|
||||
.sendpage_locked = tcp_sendpage_locked,
|
||||
.splice_read = tcp_splice_read,
|
||||
.read_sock = tcp_read_sock,
|
||||
.read_skb = tcp_read_skb,
|
||||
|
@ -729,7 +727,6 @@ const struct proto_ops inet6_dgram_ops = {
|
|||
.recvmsg = inet6_recvmsg, /* retpoline's sake */
|
||||
.read_skb = udp_read_skb,
|
||||
.mmap = sock_no_mmap,
|
||||
.sendpage = sock_no_sendpage,
|
||||
.set_peek_off = sk_set_peek_off,
|
||||
#ifdef CONFIG_COMPAT
|
||||
.compat_ioctl = inet6_compat_ioctl,
|
||||
|
|
|
@ -1296,7 +1296,6 @@ const struct proto_ops inet6_sockraw_ops = {
|
|||
.sendmsg = inet_sendmsg, /* ok */
|
||||
.recvmsg = sock_common_recvmsg, /* ok */
|
||||
.mmap = sock_no_mmap,
|
||||
.sendpage = sock_no_sendpage,
|
||||
#ifdef CONFIG_COMPAT
|
||||
.compat_ioctl = inet6_compat_ioctl,
|
||||
#endif
|
||||
|
|
|
@ -2151,7 +2151,6 @@ struct proto tcpv6_prot = {
|
|||
.recvmsg = tcp_recvmsg,
|
||||
.sendmsg = tcp_sendmsg,
|
||||
.splice_eof = tcp_splice_eof,
|
||||
.sendpage = tcp_sendpage,
|
||||
.backlog_rcv = tcp_v6_do_rcv,
|
||||
.release_cb = tcp_release_cb,
|
||||
.hash = inet6_hash,
|
||||
|
|
|
@ -963,24 +963,6 @@ static void kcm_splice_eof(struct socket *sock)
|
|||
release_sock(sk);
|
||||
}
|
||||
|
||||
static ssize_t kcm_sendpage(struct socket *sock, struct page *page,
|
||||
int offset, size_t size, int flags)
|
||||
|
||||
{
|
||||
struct bio_vec bvec;
|
||||
struct msghdr msg = { .msg_flags = flags | MSG_SPLICE_PAGES, };
|
||||
|
||||
if (flags & MSG_SENDPAGE_NOTLAST)
|
||||
msg.msg_flags |= MSG_MORE;
|
||||
|
||||
if (flags & MSG_OOB)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
bvec_set_page(&bvec, page, size, offset);
|
||||
iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, size);
|
||||
return kcm_sendmsg(sock, &msg, size);
|
||||
}
|
||||
|
||||
static int kcm_recvmsg(struct socket *sock, struct msghdr *msg,
|
||||
size_t len, int flags)
|
||||
{
|
||||
|
@ -1769,7 +1751,6 @@ static const struct proto_ops kcm_dgram_ops = {
|
|||
.recvmsg = kcm_recvmsg,
|
||||
.mmap = sock_no_mmap,
|
||||
.splice_eof = kcm_splice_eof,
|
||||
.sendpage = kcm_sendpage,
|
||||
};
|
||||
|
||||
static const struct proto_ops kcm_seqpacket_ops = {
|
||||
|
@ -1791,7 +1772,6 @@ static const struct proto_ops kcm_seqpacket_ops = {
|
|||
.recvmsg = kcm_recvmsg,
|
||||
.mmap = sock_no_mmap,
|
||||
.splice_eof = kcm_splice_eof,
|
||||
.sendpage = kcm_sendpage,
|
||||
.splice_read = kcm_splice_read,
|
||||
};
|
||||
|
||||
|
|
|
@ -3761,7 +3761,6 @@ static const struct proto_ops pfkey_ops = {
|
|||
.listen = sock_no_listen,
|
||||
.shutdown = sock_no_shutdown,
|
||||
.mmap = sock_no_mmap,
|
||||
.sendpage = sock_no_sendpage,
|
||||
|
||||
/* Now the operations that really occur. */
|
||||
.release = pfkey_release,
|
||||
|
|
|
@ -624,7 +624,6 @@ static const struct proto_ops l2tp_ip_ops = {
|
|||
.sendmsg = inet_sendmsg,
|
||||
.recvmsg = sock_common_recvmsg,
|
||||
.mmap = sock_no_mmap,
|
||||
.sendpage = sock_no_sendpage,
|
||||
};
|
||||
|
||||
static struct inet_protosw l2tp_ip_protosw = {
|
||||
|
|
|
@ -751,7 +751,6 @@ static const struct proto_ops l2tp_ip6_ops = {
|
|||
.sendmsg = inet_sendmsg,
|
||||
.recvmsg = sock_common_recvmsg,
|
||||
.mmap = sock_no_mmap,
|
||||
.sendpage = sock_no_sendpage,
|
||||
#ifdef CONFIG_COMPAT
|
||||
.compat_ioctl = inet6_compat_ioctl,
|
||||
#endif
|
||||
|
|
|
@ -1232,7 +1232,6 @@ static const struct proto_ops llc_ui_ops = {
|
|||
.sendmsg = llc_ui_sendmsg,
|
||||
.recvmsg = llc_ui_recvmsg,
|
||||
.mmap = sock_no_mmap,
|
||||
.sendpage = sock_no_sendpage,
|
||||
};
|
||||
|
||||
static const char llc_proc_err_msg[] __initconst =
|
||||
|
|
|
@ -485,7 +485,6 @@ static const struct proto_ops mctp_dgram_ops = {
|
|||
.sendmsg = mctp_sendmsg,
|
||||
.recvmsg = mctp_recvmsg,
|
||||
.mmap = sock_no_mmap,
|
||||
.sendpage = sock_no_sendpage,
|
||||
#ifdef CONFIG_COMPAT
|
||||
.compat_ioctl = mctp_compat_ioctl,
|
||||
#endif
|
||||
|
|
|
@ -3866,7 +3866,6 @@ static const struct proto_ops mptcp_stream_ops = {
|
|||
.sendmsg = inet_sendmsg,
|
||||
.recvmsg = inet_recvmsg,
|
||||
.mmap = sock_no_mmap,
|
||||
.sendpage = inet_sendpage,
|
||||
};
|
||||
|
||||
static struct inet_protosw mptcp_protosw = {
|
||||
|
@ -3961,7 +3960,6 @@ static const struct proto_ops mptcp_v6_stream_ops = {
|
|||
.sendmsg = inet6_sendmsg,
|
||||
.recvmsg = inet6_recvmsg,
|
||||
.mmap = sock_no_mmap,
|
||||
.sendpage = inet_sendpage,
|
||||
#ifdef CONFIG_COMPAT
|
||||
.compat_ioctl = inet6_compat_ioctl,
|
||||
#endif
|
||||
|
|
|
@ -2815,7 +2815,6 @@ static const struct proto_ops netlink_ops = {
|
|||
.sendmsg = netlink_sendmsg,
|
||||
.recvmsg = netlink_recvmsg,
|
||||
.mmap = sock_no_mmap,
|
||||
.sendpage = sock_no_sendpage,
|
||||
};
|
||||
|
||||
static const struct net_proto_family netlink_family_ops = {
|
||||
|
|
|
@ -1364,7 +1364,6 @@ static const struct proto_ops nr_proto_ops = {
|
|||
.sendmsg = nr_sendmsg,
|
||||
.recvmsg = nr_recvmsg,
|
||||
.mmap = sock_no_mmap,
|
||||
.sendpage = sock_no_sendpage,
|
||||
};
|
||||
|
||||
static struct notifier_block nr_dev_notifier = {
|
||||
|
|
|
@ -4621,7 +4621,6 @@ static const struct proto_ops packet_ops_spkt = {
|
|||
.sendmsg = packet_sendmsg_spkt,
|
||||
.recvmsg = packet_recvmsg,
|
||||
.mmap = sock_no_mmap,
|
||||
.sendpage = sock_no_sendpage,
|
||||
};
|
||||
|
||||
static const struct proto_ops packet_ops = {
|
||||
|
@ -4643,7 +4642,6 @@ static const struct proto_ops packet_ops = {
|
|||
.sendmsg = packet_sendmsg,
|
||||
.recvmsg = packet_recvmsg,
|
||||
.mmap = packet_mmap,
|
||||
.sendpage = sock_no_sendpage,
|
||||
};
|
||||
|
||||
static const struct net_proto_family packet_family_ops = {
|
||||
|
|
|
@ -441,7 +441,6 @@ const struct proto_ops phonet_dgram_ops = {
|
|||
.sendmsg = pn_socket_sendmsg,
|
||||
.recvmsg = sock_common_recvmsg,
|
||||
.mmap = sock_no_mmap,
|
||||
.sendpage = sock_no_sendpage,
|
||||
};
|
||||
|
||||
const struct proto_ops phonet_stream_ops = {
|
||||
|
@ -462,7 +461,6 @@ const struct proto_ops phonet_stream_ops = {
|
|||
.sendmsg = pn_socket_sendmsg,
|
||||
.recvmsg = sock_common_recvmsg,
|
||||
.mmap = sock_no_mmap,
|
||||
.sendpage = sock_no_sendpage,
|
||||
};
|
||||
EXPORT_SYMBOL(phonet_stream_ops);
|
||||
|
||||
|
|
|
@ -1244,7 +1244,6 @@ static const struct proto_ops qrtr_proto_ops = {
|
|||
.shutdown = sock_no_shutdown,
|
||||
.release = qrtr_release,
|
||||
.mmap = sock_no_mmap,
|
||||
.sendpage = sock_no_sendpage,
|
||||
};
|
||||
|
||||
static struct proto qrtr_proto = {
|
||||
|
|
|
@ -653,7 +653,6 @@ static const struct proto_ops rds_proto_ops = {
|
|||
.sendmsg = rds_sendmsg,
|
||||
.recvmsg = rds_recvmsg,
|
||||
.mmap = sock_no_mmap,
|
||||
.sendpage = sock_no_sendpage,
|
||||
};
|
||||
|
||||
static void rds_sock_destruct(struct sock *sk)
|
||||
|
|
|
@ -72,9 +72,10 @@ int rds_tcp_xmit(struct rds_connection *conn, struct rds_message *rm,
|
|||
{
|
||||
struct rds_conn_path *cp = rm->m_inc.i_conn_path;
|
||||
struct rds_tcp_connection *tc = cp->cp_transport_data;
|
||||
struct msghdr msg = {};
|
||||
struct bio_vec bvec;
|
||||
int done = 0;
|
||||
int ret = 0;
|
||||
int more;
|
||||
|
||||
if (hdr_off == 0) {
|
||||
/*
|
||||
|
@ -111,15 +112,17 @@ int rds_tcp_xmit(struct rds_connection *conn, struct rds_message *rm,
|
|||
goto out;
|
||||
}
|
||||
|
||||
more = rm->data.op_nents > 1 ? (MSG_MORE | MSG_SENDPAGE_NOTLAST) : 0;
|
||||
while (sg < rm->data.op_nents) {
|
||||
int flags = MSG_DONTWAIT | MSG_NOSIGNAL | more;
|
||||
msg.msg_flags = MSG_SPLICE_PAGES | MSG_DONTWAIT | MSG_NOSIGNAL;
|
||||
if (sg + 1 < rm->data.op_nents)
|
||||
msg.msg_flags |= MSG_MORE;
|
||||
|
||||
ret = tc->t_sock->ops->sendpage(tc->t_sock,
|
||||
sg_page(&rm->data.op_sg[sg]),
|
||||
rm->data.op_sg[sg].offset + off,
|
||||
rm->data.op_sg[sg].length - off,
|
||||
flags);
|
||||
bvec_set_page(&bvec, sg_page(&rm->data.op_sg[sg]),
|
||||
rm->data.op_sg[sg].length - off,
|
||||
rm->data.op_sg[sg].offset + off);
|
||||
iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1,
|
||||
rm->data.op_sg[sg].length - off);
|
||||
ret = sock_sendmsg(tc->t_sock, &msg);
|
||||
rdsdebug("tcp sendpage %p:%u:%u ret %d\n", (void *)sg_page(&rm->data.op_sg[sg]),
|
||||
rm->data.op_sg[sg].offset + off, rm->data.op_sg[sg].length - off,
|
||||
ret);
|
||||
|
@ -132,8 +135,6 @@ int rds_tcp_xmit(struct rds_connection *conn, struct rds_message *rm,
|
|||
off = 0;
|
||||
sg++;
|
||||
}
|
||||
if (sg == rm->data.op_nents - 1)
|
||||
more = 0;
|
||||
}
|
||||
|
||||
out:
|
||||
|
|
|
@ -1496,7 +1496,6 @@ static const struct proto_ops rose_proto_ops = {
|
|||
.sendmsg = rose_sendmsg,
|
||||
.recvmsg = rose_recvmsg,
|
||||
.mmap = sock_no_mmap,
|
||||
.sendpage = sock_no_sendpage,
|
||||
};
|
||||
|
||||
static struct notifier_block rose_dev_notifier = {
|
||||
|
|
|
@ -954,7 +954,6 @@ static const struct proto_ops rxrpc_rpc_ops = {
|
|||
.sendmsg = rxrpc_sendmsg,
|
||||
.recvmsg = rxrpc_recvmsg,
|
||||
.mmap = sock_no_mmap,
|
||||
.sendpage = sock_no_sendpage,
|
||||
};
|
||||
|
||||
static struct proto rxrpc_proto = {
|
||||
|
|
|
@ -1133,7 +1133,6 @@ static const struct proto_ops inet_seqpacket_ops = {
|
|||
.sendmsg = inet_sendmsg,
|
||||
.recvmsg = inet_recvmsg,
|
||||
.mmap = sock_no_mmap,
|
||||
.sendpage = sock_no_sendpage,
|
||||
};
|
||||
|
||||
/* Registration with AF_INET family. */
|
||||
|
|
|
@ -3133,34 +3133,6 @@ static int smc_ioctl(struct socket *sock, unsigned int cmd,
|
|||
return put_user(answ, (int __user *)arg);
|
||||
}
|
||||
|
||||
static ssize_t smc_sendpage(struct socket *sock, struct page *page,
|
||||
int offset, size_t size, int flags)
|
||||
{
|
||||
struct sock *sk = sock->sk;
|
||||
struct smc_sock *smc;
|
||||
int rc = -EPIPE;
|
||||
|
||||
smc = smc_sk(sk);
|
||||
lock_sock(sk);
|
||||
if (sk->sk_state != SMC_ACTIVE) {
|
||||
release_sock(sk);
|
||||
goto out;
|
||||
}
|
||||
release_sock(sk);
|
||||
if (smc->use_fallback) {
|
||||
rc = kernel_sendpage(smc->clcsock, page, offset,
|
||||
size, flags);
|
||||
} else {
|
||||
lock_sock(sk);
|
||||
rc = smc_tx_sendpage(smc, page, offset, size, flags);
|
||||
release_sock(sk);
|
||||
SMC_STAT_INC(smc, sendpage_cnt);
|
||||
}
|
||||
|
||||
out:
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* Map the affected portions of the rmbe into an spd, note the number of bytes
|
||||
* to splice in conn->splice_pending, and press 'go'. Delays consumer cursor
|
||||
* updates till whenever a respective page has been fully processed.
|
||||
|
@ -3232,7 +3204,6 @@ static const struct proto_ops smc_sock_ops = {
|
|||
.sendmsg = smc_sendmsg,
|
||||
.recvmsg = smc_recvmsg,
|
||||
.mmap = sock_no_mmap,
|
||||
.sendpage = smc_sendpage,
|
||||
.splice_read = smc_splice_read,
|
||||
};
|
||||
|
||||
|
|
|
@ -227,7 +227,7 @@ static int smc_nl_fill_stats_tech_data(struct sk_buff *skb,
|
|||
SMC_NLA_STATS_PAD))
|
||||
goto errattr;
|
||||
if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_SENDPAGE_CNT,
|
||||
smc_tech->sendpage_cnt,
|
||||
0,
|
||||
SMC_NLA_STATS_PAD))
|
||||
goto errattr;
|
||||
if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_CORK_CNT,
|
||||
|
|
|
@ -71,7 +71,6 @@ struct smc_stats_tech {
|
|||
u64 clnt_v2_succ_cnt;
|
||||
u64 srv_v1_succ_cnt;
|
||||
u64 srv_v2_succ_cnt;
|
||||
u64 sendpage_cnt;
|
||||
u64 urg_data_cnt;
|
||||
u64 splice_cnt;
|
||||
u64 cork_cnt;
|
||||
|
|
|
@ -168,8 +168,7 @@ static bool smc_tx_should_cork(struct smc_sock *smc, struct msghdr *msg)
|
|||
* should known how/when to uncork it.
|
||||
*/
|
||||
if ((msg->msg_flags & MSG_MORE ||
|
||||
smc_tx_is_corked(smc) ||
|
||||
msg->msg_flags & MSG_SENDPAGE_NOTLAST) &&
|
||||
smc_tx_is_corked(smc)) &&
|
||||
atomic_read(&conn->sndbuf_space))
|
||||
return true;
|
||||
|
||||
|
@ -298,22 +297,6 @@ out_err:
|
|||
return rc;
|
||||
}
|
||||
|
||||
int smc_tx_sendpage(struct smc_sock *smc, struct page *page, int offset,
|
||||
size_t size, int flags)
|
||||
{
|
||||
struct msghdr msg = {.msg_flags = flags};
|
||||
char *kaddr = kmap(page);
|
||||
struct kvec iov;
|
||||
int rc;
|
||||
|
||||
iov.iov_base = kaddr + offset;
|
||||
iov.iov_len = size;
|
||||
iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, &iov, 1, size);
|
||||
rc = smc_tx_sendmsg(smc, &msg, size);
|
||||
kunmap(page);
|
||||
return rc;
|
||||
}
|
||||
|
||||
/***************************** sndbuf consumer *******************************/
|
||||
|
||||
/* sndbuf consumer: actual data transfer of one target chunk with ISM write */
|
||||
|
|
|
@ -31,8 +31,6 @@ void smc_tx_pending(struct smc_connection *conn);
|
|||
void smc_tx_work(struct work_struct *work);
|
||||
void smc_tx_init(struct smc_sock *smc);
|
||||
int smc_tx_sendmsg(struct smc_sock *smc, struct msghdr *msg, size_t len);
|
||||
int smc_tx_sendpage(struct smc_sock *smc, struct page *page, int offset,
|
||||
size_t size, int flags);
|
||||
int smc_tx_sndbuf_nonempty(struct smc_connection *conn);
|
||||
void smc_tx_sndbuf_nonfull(struct smc_sock *smc);
|
||||
void smc_tx_consumer_update(struct smc_connection *conn, bool force);
|
||||
|
|
48
net/socket.c
48
net/socket.c
|
@ -3552,54 +3552,6 @@ int kernel_getpeername(struct socket *sock, struct sockaddr *addr)
|
|||
}
|
||||
EXPORT_SYMBOL(kernel_getpeername);
|
||||
|
||||
/**
|
||||
* kernel_sendpage - send a &page through a socket (kernel space)
|
||||
* @sock: socket
|
||||
* @page: page
|
||||
* @offset: page offset
|
||||
* @size: total size in bytes
|
||||
* @flags: flags (MSG_DONTWAIT, ...)
|
||||
*
|
||||
* Returns the total amount sent in bytes or an error.
|
||||
*/
|
||||
|
||||
int kernel_sendpage(struct socket *sock, struct page *page, int offset,
|
||||
size_t size, int flags)
|
||||
{
|
||||
if (sock->ops->sendpage) {
|
||||
/* Warn in case the improper page to zero-copy send */
|
||||
WARN_ONCE(!sendpage_ok(page), "improper page for zero-copy send");
|
||||
return sock->ops->sendpage(sock, page, offset, size, flags);
|
||||
}
|
||||
return sock_no_sendpage(sock, page, offset, size, flags);
|
||||
}
|
||||
EXPORT_SYMBOL(kernel_sendpage);
|
||||
|
||||
/**
|
||||
* kernel_sendpage_locked - send a &page through the locked sock (kernel space)
|
||||
* @sk: sock
|
||||
* @page: page
|
||||
* @offset: page offset
|
||||
* @size: total size in bytes
|
||||
* @flags: flags (MSG_DONTWAIT, ...)
|
||||
*
|
||||
* Returns the total amount sent in bytes or an error.
|
||||
* Caller must hold @sk.
|
||||
*/
|
||||
|
||||
int kernel_sendpage_locked(struct sock *sk, struct page *page, int offset,
|
||||
size_t size, int flags)
|
||||
{
|
||||
struct socket *sock = sk->sk_socket;
|
||||
|
||||
if (sock->ops->sendpage_locked)
|
||||
return sock->ops->sendpage_locked(sk, page, offset, size,
|
||||
flags);
|
||||
|
||||
return sock_no_sendpage_locked(sk, page, offset, size, flags);
|
||||
}
|
||||
EXPORT_SYMBOL(kernel_sendpage_locked);
|
||||
|
||||
/**
|
||||
* kernel_sock_shutdown - shut down part of a full-duplex connection (kernel space)
|
||||
* @sock: socket
|
||||
|
|
|
@ -3375,7 +3375,6 @@ static const struct proto_ops msg_ops = {
|
|||
.sendmsg = tipc_sendmsg,
|
||||
.recvmsg = tipc_recvmsg,
|
||||
.mmap = sock_no_mmap,
|
||||
.sendpage = sock_no_sendpage
|
||||
};
|
||||
|
||||
static const struct proto_ops packet_ops = {
|
||||
|
@ -3396,7 +3395,6 @@ static const struct proto_ops packet_ops = {
|
|||
.sendmsg = tipc_send_packet,
|
||||
.recvmsg = tipc_recvmsg,
|
||||
.mmap = sock_no_mmap,
|
||||
.sendpage = sock_no_sendpage
|
||||
};
|
||||
|
||||
static const struct proto_ops stream_ops = {
|
||||
|
@ -3417,7 +3415,6 @@ static const struct proto_ops stream_ops = {
|
|||
.sendmsg = tipc_sendstream,
|
||||
.recvmsg = tipc_recvstream,
|
||||
.mmap = sock_no_mmap,
|
||||
.sendpage = sock_no_sendpage
|
||||
};
|
||||
|
||||
static const struct net_proto_family tipc_family_ops = {
|
||||
|
|
|
@ -98,10 +98,6 @@ void tls_sw_strparser_arm(struct sock *sk, struct tls_context *ctx);
|
|||
void tls_sw_strparser_done(struct tls_context *tls_ctx);
|
||||
int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
|
||||
void tls_sw_splice_eof(struct socket *sock);
|
||||
int tls_sw_sendpage_locked(struct sock *sk, struct page *page,
|
||||
int offset, size_t size, int flags);
|
||||
int tls_sw_sendpage(struct sock *sk, struct page *page,
|
||||
int offset, size_t size, int flags);
|
||||
void tls_sw_cancel_work_tx(struct tls_context *tls_ctx);
|
||||
void tls_sw_release_resources_tx(struct sock *sk);
|
||||
void tls_sw_free_ctx_tx(struct tls_context *tls_ctx);
|
||||
|
@ -117,8 +113,6 @@ ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
|
|||
|
||||
int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
|
||||
void tls_device_splice_eof(struct socket *sock);
|
||||
int tls_device_sendpage(struct sock *sk, struct page *page,
|
||||
int offset, size_t size, int flags);
|
||||
int tls_tx_records(struct sock *sk, int flags);
|
||||
|
||||
void tls_sw_write_space(struct sock *sk, struct tls_context *ctx);
|
||||
|
|
|
@ -441,15 +441,14 @@ static int tls_push_data(struct sock *sk,
|
|||
long timeo;
|
||||
|
||||
if (flags &
|
||||
~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL | MSG_SENDPAGE_NOTLAST |
|
||||
MSG_SPLICE_PAGES))
|
||||
~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL | MSG_SPLICE_PAGES))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (unlikely(sk->sk_err))
|
||||
return -sk->sk_err;
|
||||
|
||||
flags |= MSG_SENDPAGE_DECRYPTED;
|
||||
tls_push_record_flags = flags | MSG_SENDPAGE_NOTLAST;
|
||||
tls_push_record_flags = flags | MSG_MORE;
|
||||
|
||||
timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
|
||||
if (tls_is_partially_sent_record(tls_ctx)) {
|
||||
|
@ -532,7 +531,7 @@ handle_error:
|
|||
if (!size) {
|
||||
last_record:
|
||||
tls_push_record_flags = flags;
|
||||
if (flags & (MSG_SENDPAGE_NOTLAST | MSG_MORE)) {
|
||||
if (flags & MSG_MORE) {
|
||||
more = true;
|
||||
break;
|
||||
}
|
||||
|
@ -621,23 +620,6 @@ void tls_device_splice_eof(struct socket *sock)
|
|||
mutex_unlock(&tls_ctx->tx_lock);
|
||||
}
|
||||
|
||||
int tls_device_sendpage(struct sock *sk, struct page *page,
|
||||
int offset, size_t size, int flags)
|
||||
{
|
||||
struct bio_vec bvec;
|
||||
struct msghdr msg = { .msg_flags = flags | MSG_SPLICE_PAGES, };
|
||||
|
||||
if (flags & MSG_SENDPAGE_NOTLAST)
|
||||
msg.msg_flags |= MSG_MORE;
|
||||
|
||||
if (flags & MSG_OOB)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
bvec_set_page(&bvec, page, size, offset);
|
||||
iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, size);
|
||||
return tls_device_sendmsg(sk, &msg, size);
|
||||
}
|
||||
|
||||
struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context,
|
||||
u32 seq, u64 *p_record_sn)
|
||||
{
|
||||
|
|
|
@ -127,7 +127,7 @@ int tls_push_sg(struct sock *sk,
|
|||
{
|
||||
struct bio_vec bvec;
|
||||
struct msghdr msg = {
|
||||
.msg_flags = MSG_SENDPAGE_NOTLAST | MSG_SPLICE_PAGES | flags,
|
||||
.msg_flags = MSG_SPLICE_PAGES | flags,
|
||||
};
|
||||
int ret = 0;
|
||||
struct page *p;
|
||||
|
@ -958,7 +958,6 @@ static void build_proto_ops(struct proto_ops ops[TLS_NUM_CONFIG][TLS_NUM_CONFIG]
|
|||
|
||||
ops[TLS_SW ][TLS_BASE] = ops[TLS_BASE][TLS_BASE];
|
||||
ops[TLS_SW ][TLS_BASE].splice_eof = tls_sw_splice_eof;
|
||||
ops[TLS_SW ][TLS_BASE].sendpage_locked = tls_sw_sendpage_locked;
|
||||
|
||||
ops[TLS_BASE][TLS_SW ] = ops[TLS_BASE][TLS_BASE];
|
||||
ops[TLS_BASE][TLS_SW ].splice_read = tls_sw_splice_read;
|
||||
|
@ -970,17 +969,14 @@ static void build_proto_ops(struct proto_ops ops[TLS_NUM_CONFIG][TLS_NUM_CONFIG]
|
|||
|
||||
#ifdef CONFIG_TLS_DEVICE
|
||||
ops[TLS_HW ][TLS_BASE] = ops[TLS_BASE][TLS_BASE];
|
||||
ops[TLS_HW ][TLS_BASE].sendpage_locked = NULL;
|
||||
|
||||
ops[TLS_HW ][TLS_SW ] = ops[TLS_BASE][TLS_SW ];
|
||||
ops[TLS_HW ][TLS_SW ].sendpage_locked = NULL;
|
||||
|
||||
ops[TLS_BASE][TLS_HW ] = ops[TLS_BASE][TLS_SW ];
|
||||
|
||||
ops[TLS_SW ][TLS_HW ] = ops[TLS_SW ][TLS_SW ];
|
||||
|
||||
ops[TLS_HW ][TLS_HW ] = ops[TLS_HW ][TLS_SW ];
|
||||
ops[TLS_HW ][TLS_HW ].sendpage_locked = NULL;
|
||||
#endif
|
||||
#ifdef CONFIG_TLS_TOE
|
||||
ops[TLS_HW_RECORD][TLS_HW_RECORD] = *base;
|
||||
|
@ -1029,7 +1025,6 @@ static void build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG],
|
|||
prot[TLS_SW][TLS_BASE] = prot[TLS_BASE][TLS_BASE];
|
||||
prot[TLS_SW][TLS_BASE].sendmsg = tls_sw_sendmsg;
|
||||
prot[TLS_SW][TLS_BASE].splice_eof = tls_sw_splice_eof;
|
||||
prot[TLS_SW][TLS_BASE].sendpage = tls_sw_sendpage;
|
||||
|
||||
prot[TLS_BASE][TLS_SW] = prot[TLS_BASE][TLS_BASE];
|
||||
prot[TLS_BASE][TLS_SW].recvmsg = tls_sw_recvmsg;
|
||||
|
@ -1045,12 +1040,10 @@ static void build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG],
|
|||
prot[TLS_HW][TLS_BASE] = prot[TLS_BASE][TLS_BASE];
|
||||
prot[TLS_HW][TLS_BASE].sendmsg = tls_device_sendmsg;
|
||||
prot[TLS_HW][TLS_BASE].splice_eof = tls_device_splice_eof;
|
||||
prot[TLS_HW][TLS_BASE].sendpage = tls_device_sendpage;
|
||||
|
||||
prot[TLS_HW][TLS_SW] = prot[TLS_BASE][TLS_SW];
|
||||
prot[TLS_HW][TLS_SW].sendmsg = tls_device_sendmsg;
|
||||
prot[TLS_HW][TLS_SW].splice_eof = tls_device_splice_eof;
|
||||
prot[TLS_HW][TLS_SW].sendpage = tls_device_sendpage;
|
||||
|
||||
prot[TLS_BASE][TLS_HW] = prot[TLS_BASE][TLS_SW];
|
||||
|
||||
|
|
|
@ -1194,7 +1194,7 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
|
|||
|
||||
if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
|
||||
MSG_CMSG_COMPAT | MSG_SPLICE_PAGES |
|
||||
MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY))
|
||||
MSG_SENDPAGE_NOPOLICY))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
ret = mutex_lock_interruptible(&tls_ctx->tx_lock);
|
||||
|
@ -1281,41 +1281,6 @@ unlock:
|
|||
mutex_unlock(&tls_ctx->tx_lock);
|
||||
}
|
||||
|
||||
int tls_sw_sendpage_locked(struct sock *sk, struct page *page,
|
||||
int offset, size_t size, int flags)
|
||||
{
|
||||
struct bio_vec bvec;
|
||||
struct msghdr msg = { .msg_flags = flags | MSG_SPLICE_PAGES, };
|
||||
|
||||
if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
|
||||
MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY |
|
||||
MSG_NO_SHARED_FRAGS))
|
||||
return -EOPNOTSUPP;
|
||||
if (flags & MSG_SENDPAGE_NOTLAST)
|
||||
msg.msg_flags |= MSG_MORE;
|
||||
|
||||
bvec_set_page(&bvec, page, size, offset);
|
||||
iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, size);
|
||||
return tls_sw_sendmsg_locked(sk, &msg, size);
|
||||
}
|
||||
|
||||
int tls_sw_sendpage(struct sock *sk, struct page *page,
|
||||
int offset, size_t size, int flags)
|
||||
{
|
||||
struct bio_vec bvec;
|
||||
struct msghdr msg = { .msg_flags = flags | MSG_SPLICE_PAGES, };
|
||||
|
||||
if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
|
||||
MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY))
|
||||
return -EOPNOTSUPP;
|
||||
if (flags & MSG_SENDPAGE_NOTLAST)
|
||||
msg.msg_flags |= MSG_MORE;
|
||||
|
||||
bvec_set_page(&bvec, page, size, offset);
|
||||
iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, size);
|
||||
return tls_sw_sendmsg(sk, &msg, size);
|
||||
}
|
||||
|
||||
static int
|
||||
tls_rx_rec_wait(struct sock *sk, struct sk_psock *psock, bool nonblock,
|
||||
bool released)
|
||||
|
|
|
@ -758,8 +758,6 @@ static int unix_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned lon
|
|||
static int unix_shutdown(struct socket *, int);
|
||||
static int unix_stream_sendmsg(struct socket *, struct msghdr *, size_t);
|
||||
static int unix_stream_recvmsg(struct socket *, struct msghdr *, size_t, int);
|
||||
static ssize_t unix_stream_sendpage(struct socket *, struct page *, int offset,
|
||||
size_t size, int flags);
|
||||
static ssize_t unix_stream_splice_read(struct socket *, loff_t *ppos,
|
||||
struct pipe_inode_info *, size_t size,
|
||||
unsigned int flags);
|
||||
|
@ -852,7 +850,6 @@ static const struct proto_ops unix_stream_ops = {
|
|||
.recvmsg = unix_stream_recvmsg,
|
||||
.read_skb = unix_stream_read_skb,
|
||||
.mmap = sock_no_mmap,
|
||||
.sendpage = unix_stream_sendpage,
|
||||
.splice_read = unix_stream_splice_read,
|
||||
.set_peek_off = unix_set_peek_off,
|
||||
.show_fdinfo = unix_show_fdinfo,
|
||||
|
@ -878,7 +875,6 @@ static const struct proto_ops unix_dgram_ops = {
|
|||
.read_skb = unix_read_skb,
|
||||
.recvmsg = unix_dgram_recvmsg,
|
||||
.mmap = sock_no_mmap,
|
||||
.sendpage = sock_no_sendpage,
|
||||
.set_peek_off = unix_set_peek_off,
|
||||
.show_fdinfo = unix_show_fdinfo,
|
||||
};
|
||||
|
@ -902,7 +898,6 @@ static const struct proto_ops unix_seqpacket_ops = {
|
|||
.sendmsg = unix_seqpacket_sendmsg,
|
||||
.recvmsg = unix_seqpacket_recvmsg,
|
||||
.mmap = sock_no_mmap,
|
||||
.sendpage = sock_no_sendpage,
|
||||
.set_peek_off = unix_set_peek_off,
|
||||
.show_fdinfo = unix_show_fdinfo,
|
||||
};
|
||||
|
@ -2294,20 +2289,6 @@ out_err:
|
|||
return sent ? : err;
|
||||
}
|
||||
|
||||
static ssize_t unix_stream_sendpage(struct socket *socket, struct page *page,
|
||||
int offset, size_t size, int flags)
|
||||
{
|
||||
struct bio_vec bvec;
|
||||
struct msghdr msg = { .msg_flags = flags | MSG_SPLICE_PAGES };
|
||||
|
||||
if (flags & MSG_SENDPAGE_NOTLAST)
|
||||
msg.msg_flags |= MSG_MORE;
|
||||
|
||||
bvec_set_page(&bvec, page, size, offset);
|
||||
iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, size);
|
||||
return unix_stream_sendmsg(socket, &msg, size);
|
||||
}
|
||||
|
||||
static int unix_seqpacket_sendmsg(struct socket *sock, struct msghdr *msg,
|
||||
size_t len)
|
||||
{
|
||||
|
|
|
@ -1306,7 +1306,6 @@ static const struct proto_ops vsock_dgram_ops = {
|
|||
.sendmsg = vsock_dgram_sendmsg,
|
||||
.recvmsg = vsock_dgram_recvmsg,
|
||||
.mmap = sock_no_mmap,
|
||||
.sendpage = sock_no_sendpage,
|
||||
.read_skb = vsock_read_skb,
|
||||
};
|
||||
|
||||
|
@ -2234,7 +2233,6 @@ static const struct proto_ops vsock_stream_ops = {
|
|||
.sendmsg = vsock_connectible_sendmsg,
|
||||
.recvmsg = vsock_connectible_recvmsg,
|
||||
.mmap = sock_no_mmap,
|
||||
.sendpage = sock_no_sendpage,
|
||||
.set_rcvlowat = vsock_set_rcvlowat,
|
||||
.read_skb = vsock_read_skb,
|
||||
};
|
||||
|
@ -2257,7 +2255,6 @@ static const struct proto_ops vsock_seqpacket_ops = {
|
|||
.sendmsg = vsock_connectible_sendmsg,
|
||||
.recvmsg = vsock_connectible_recvmsg,
|
||||
.mmap = sock_no_mmap,
|
||||
.sendpage = sock_no_sendpage,
|
||||
.read_skb = vsock_read_skb,
|
||||
};
|
||||
|
||||
|
|
|
@ -1757,7 +1757,6 @@ static const struct proto_ops x25_proto_ops = {
|
|||
.sendmsg = x25_sendmsg,
|
||||
.recvmsg = x25_recvmsg,
|
||||
.mmap = sock_no_mmap,
|
||||
.sendpage = sock_no_sendpage,
|
||||
};
|
||||
|
||||
static struct packet_type x25_packet_type __read_mostly = {
|
||||
|
|
|
@ -1389,7 +1389,6 @@ static const struct proto_ops xsk_proto_ops = {
|
|||
.sendmsg = xsk_sendmsg,
|
||||
.recvmsg = xsk_recvmsg,
|
||||
.mmap = xsk_mmap,
|
||||
.sendpage = sock_no_sendpage,
|
||||
};
|
||||
|
||||
static void xsk_destruct(struct sock *sk)
|
||||
|
|
|
@ -205,13 +205,15 @@ static int espintcp_sendskb_locked(struct sock *sk, struct espintcp_msg *emsg,
|
|||
static int espintcp_sendskmsg_locked(struct sock *sk,
|
||||
struct espintcp_msg *emsg, int flags)
|
||||
{
|
||||
struct msghdr msghdr = { .msg_flags = flags | MSG_SPLICE_PAGES, };
|
||||
struct msghdr msghdr = {
|
||||
.msg_flags = flags | MSG_SPLICE_PAGES | MSG_MORE,
|
||||
};
|
||||
struct sk_msg *skmsg = &emsg->skmsg;
|
||||
bool more = flags & MSG_MORE;
|
||||
struct scatterlist *sg;
|
||||
int done = 0;
|
||||
int ret;
|
||||
|
||||
msghdr.msg_flags |= MSG_SENDPAGE_NOTLAST;
|
||||
sg = &skmsg->sg.data[skmsg->sg.start];
|
||||
do {
|
||||
struct bio_vec bvec;
|
||||
|
@ -221,8 +223,8 @@ static int espintcp_sendskmsg_locked(struct sock *sk,
|
|||
|
||||
emsg->offset = 0;
|
||||
|
||||
if (sg_is_last(sg))
|
||||
msghdr.msg_flags &= ~MSG_SENDPAGE_NOTLAST;
|
||||
if (sg_is_last(sg) && !more)
|
||||
msghdr.msg_flags &= ~MSG_MORE;
|
||||
|
||||
p = sg_page(sg);
|
||||
retry:
|
||||
|
|
|
@ -318,7 +318,6 @@ struct ucred {
|
|||
#define MSG_MORE 0x8000 /* Sender will send more */
|
||||
#define MSG_WAITFORONE 0x10000 /* recvmmsg(): block until 1+ packets avail */
|
||||
#define MSG_SENDPAGE_NOPOLICY 0x10000 /* sendpage() internal : do no apply policy */
|
||||
#define MSG_SENDPAGE_NOTLAST 0x20000 /* sendpage() internal : not the last page */
|
||||
#define MSG_BATCH 0x40000 /* sendmmsg(): more messages coming */
|
||||
#define MSG_EOF MSG_FIN
|
||||
#define MSG_NO_SHARED_FRAGS 0x80000 /* sendpage() internal : page frags are not shared */
|
||||
|
|
|
@ -8,9 +8,6 @@
|
|||
#ifndef MSG_WAITFORONE
|
||||
#define MSG_WAITFORONE 0x10000
|
||||
#endif
|
||||
#ifndef MSG_SENDPAGE_NOTLAST
|
||||
#define MSG_SENDPAGE_NOTLAST 0x20000
|
||||
#endif
|
||||
#ifndef MSG_FASTOPEN
|
||||
#define MSG_FASTOPEN 0x20000000
|
||||
#endif
|
||||
|
@ -50,7 +47,7 @@ static size_t syscall_arg__scnprintf_msg_flags(char *bf, size_t size,
|
|||
P_MSG_FLAG(NOSIGNAL);
|
||||
P_MSG_FLAG(MORE);
|
||||
P_MSG_FLAG(WAITFORONE);
|
||||
P_MSG_FLAG(SENDPAGE_NOTLAST);
|
||||
P_MSG_FLAG(SPLICE_PAGES);
|
||||
P_MSG_FLAG(FASTOPEN);
|
||||
P_MSG_FLAG(CMSG_CLOEXEC);
|
||||
#undef P_MSG_FLAG
|
||||
|
|
Loading…
Add table
Reference in a new issue