mirror of
https://github.com/Fishwaldo/linux-bl808.git
synced 2025-06-17 20:25:19 +00:00
tls: rx: async: adjust record geometry immediately
Async crypto TLS Rx currently waits for crypto to be done in order to strip the TLS header and tailer. Simplify the code by moving the pointers immediately, since only TLS 1.2 is supported here there is no message padding. This simplifies the decryption into a new skb in the next patch as we don't have to worry about input vs output skb in the decrypt_done() handler any more. Signed-off-by: Jakub Kicinski <kuba@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
6bd116c8c6
commit
6ececdc513
1 changed files with 10 additions and 39 deletions
|
@ -184,39 +184,22 @@ static void tls_decrypt_done(struct crypto_async_request *req, int err)
|
||||||
struct scatterlist *sgin = aead_req->src;
|
struct scatterlist *sgin = aead_req->src;
|
||||||
struct tls_sw_context_rx *ctx;
|
struct tls_sw_context_rx *ctx;
|
||||||
struct tls_context *tls_ctx;
|
struct tls_context *tls_ctx;
|
||||||
struct tls_prot_info *prot;
|
|
||||||
struct scatterlist *sg;
|
struct scatterlist *sg;
|
||||||
struct sk_buff *skb;
|
|
||||||
unsigned int pages;
|
unsigned int pages;
|
||||||
|
struct sock *sk;
|
||||||
|
|
||||||
skb = (struct sk_buff *)req->data;
|
sk = (struct sock *)req->data;
|
||||||
tls_ctx = tls_get_ctx(skb->sk);
|
tls_ctx = tls_get_ctx(sk);
|
||||||
ctx = tls_sw_ctx_rx(tls_ctx);
|
ctx = tls_sw_ctx_rx(tls_ctx);
|
||||||
prot = &tls_ctx->prot_info;
|
|
||||||
|
|
||||||
/* Propagate if there was an err */
|
/* Propagate if there was an err */
|
||||||
if (err) {
|
if (err) {
|
||||||
if (err == -EBADMSG)
|
if (err == -EBADMSG)
|
||||||
TLS_INC_STATS(sock_net(skb->sk),
|
TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSDECRYPTERROR);
|
||||||
LINUX_MIB_TLSDECRYPTERROR);
|
|
||||||
ctx->async_wait.err = err;
|
ctx->async_wait.err = err;
|
||||||
tls_err_abort(skb->sk, err);
|
tls_err_abort(sk, err);
|
||||||
} else {
|
|
||||||
struct strp_msg *rxm = strp_msg(skb);
|
|
||||||
|
|
||||||
/* No TLS 1.3 support with async crypto */
|
|
||||||
WARN_ON(prot->tail_size);
|
|
||||||
|
|
||||||
rxm->offset += prot->prepend_size;
|
|
||||||
rxm->full_len -= prot->overhead_size;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* After using skb->sk to propagate sk through crypto async callback
|
|
||||||
* we need to NULL it again.
|
|
||||||
*/
|
|
||||||
skb->sk = NULL;
|
|
||||||
|
|
||||||
|
|
||||||
/* Free the destination pages if skb was not decrypted inplace */
|
/* Free the destination pages if skb was not decrypted inplace */
|
||||||
if (sgout != sgin) {
|
if (sgout != sgin) {
|
||||||
/* Skip the first S/G entry as it points to AAD */
|
/* Skip the first S/G entry as it points to AAD */
|
||||||
|
@ -236,7 +219,6 @@ static void tls_decrypt_done(struct crypto_async_request *req, int err)
|
||||||
}
|
}
|
||||||
|
|
||||||
static int tls_do_decryption(struct sock *sk,
|
static int tls_do_decryption(struct sock *sk,
|
||||||
struct sk_buff *skb,
|
|
||||||
struct scatterlist *sgin,
|
struct scatterlist *sgin,
|
||||||
struct scatterlist *sgout,
|
struct scatterlist *sgout,
|
||||||
char *iv_recv,
|
char *iv_recv,
|
||||||
|
@ -256,16 +238,9 @@ static int tls_do_decryption(struct sock *sk,
|
||||||
(u8 *)iv_recv);
|
(u8 *)iv_recv);
|
||||||
|
|
||||||
if (darg->async) {
|
if (darg->async) {
|
||||||
/* Using skb->sk to push sk through to crypto async callback
|
|
||||||
* handler. This allows propagating errors up to the socket
|
|
||||||
* if needed. It _must_ be cleared in the async handler
|
|
||||||
* before consume_skb is called. We _know_ skb->sk is NULL
|
|
||||||
* because it is a clone from strparser.
|
|
||||||
*/
|
|
||||||
skb->sk = sk;
|
|
||||||
aead_request_set_callback(aead_req,
|
aead_request_set_callback(aead_req,
|
||||||
CRYPTO_TFM_REQ_MAY_BACKLOG,
|
CRYPTO_TFM_REQ_MAY_BACKLOG,
|
||||||
tls_decrypt_done, skb);
|
tls_decrypt_done, sk);
|
||||||
atomic_inc(&ctx->decrypt_pending);
|
atomic_inc(&ctx->decrypt_pending);
|
||||||
} else {
|
} else {
|
||||||
aead_request_set_callback(aead_req,
|
aead_request_set_callback(aead_req,
|
||||||
|
@ -1554,7 +1529,7 @@ fallback_to_reg_recv:
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Prepare and submit AEAD request */
|
/* Prepare and submit AEAD request */
|
||||||
err = tls_do_decryption(sk, skb, sgin, sgout, dctx->iv,
|
err = tls_do_decryption(sk, sgin, sgout, dctx->iv,
|
||||||
data_len + prot->tail_size, aead_req, darg);
|
data_len + prot->tail_size, aead_req, darg);
|
||||||
if (err)
|
if (err)
|
||||||
goto exit_free_pages;
|
goto exit_free_pages;
|
||||||
|
@ -1617,11 +1592,8 @@ static int tls_rx_one_record(struct sock *sk, struct iov_iter *dest,
|
||||||
TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSDECRYPTERROR);
|
TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSDECRYPTERROR);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
if (darg->async) {
|
if (darg->async)
|
||||||
if (darg->skb == ctx->recv_pkt)
|
goto decrypt_done;
|
||||||
ctx->recv_pkt = NULL;
|
|
||||||
goto decrypt_next;
|
|
||||||
}
|
|
||||||
/* If opportunistic TLS 1.3 ZC failed retry without ZC */
|
/* If opportunistic TLS 1.3 ZC failed retry without ZC */
|
||||||
if (unlikely(darg->zc && prot->version == TLS_1_3_VERSION &&
|
if (unlikely(darg->zc && prot->version == TLS_1_3_VERSION &&
|
||||||
darg->tail != TLS_RECORD_TYPE_DATA)) {
|
darg->tail != TLS_RECORD_TYPE_DATA)) {
|
||||||
|
@ -1632,10 +1604,10 @@ static int tls_rx_one_record(struct sock *sk, struct iov_iter *dest,
|
||||||
return tls_rx_one_record(sk, dest, darg);
|
return tls_rx_one_record(sk, dest, darg);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
decrypt_done:
|
||||||
if (darg->skb == ctx->recv_pkt)
|
if (darg->skb == ctx->recv_pkt)
|
||||||
ctx->recv_pkt = NULL;
|
ctx->recv_pkt = NULL;
|
||||||
|
|
||||||
decrypt_done:
|
|
||||||
pad = tls_padding_length(prot, darg->skb, darg);
|
pad = tls_padding_length(prot, darg->skb, darg);
|
||||||
if (pad < 0) {
|
if (pad < 0) {
|
||||||
consume_skb(darg->skb);
|
consume_skb(darg->skb);
|
||||||
|
@ -1646,7 +1618,6 @@ decrypt_done:
|
||||||
rxm->full_len -= pad;
|
rxm->full_len -= pad;
|
||||||
rxm->offset += prot->prepend_size;
|
rxm->offset += prot->prepend_size;
|
||||||
rxm->full_len -= prot->overhead_size;
|
rxm->full_len -= prot->overhead_size;
|
||||||
decrypt_next:
|
|
||||||
tls_advance_record_sn(sk, prot, &tls_ctx->rx);
|
tls_advance_record_sn(sk, prot, &tls_ctx->rx);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue