mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-03-15 11:44:11 +00:00
blk-mq: release crypto keyslot before reporting I/O complete
commit9cd1e56667
upstream. Once all I/O using a blk_crypto_key has completed, filesystems can call blk_crypto_evict_key(). However, the block layer currently doesn't call blk_crypto_put_keyslot() until the request is being freed, which happens after upper layers have been told (via bio_endio()) the I/O has completed. This causes a race condition where blk_crypto_evict_key() can see 'slot_refs != 0' without there being an actual bug. This makes __blk_crypto_evict_key() hit the 'WARN_ON_ONCE(atomic_read(&slot->slot_refs) != 0)' and return without doing anything, eventually causing a use-after-free in blk_crypto_reprogram_all_keys(). (This is a very rare bug and has only been seen when per-file keys are being used with fscrypt.) There are two options to fix this: either release the keyslot before bio_endio() is called on the request's last bio, or make __blk_crypto_evict_key() ignore slot_refs. Let's go with the first solution, since it preserves the ability to report bugs (via WARN_ON_ONCE) where a key is evicted while still in-use. Fixes:a892c8d52c
("block: Inline encryption support for blk-mq") Cc: stable@vger.kernel.org Reviewed-by: Nathan Huckleberry <nhuck@google.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Eric Biggers <ebiggers@google.com> Link: https://lore.kernel.org/r/20230315183907.53675-2-ebiggers@kernel.org Signed-off-by: Jens Axboe <axboe@kernel.dk> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
641ee1bbe5
commit
8e3c5d68d0
5 changed files with 43 additions and 17 deletions
|
@ -1421,6 +1421,13 @@ bool blk_update_request(struct request *req, blk_status_t error,
|
|||
req->q->integrity.profile->complete_fn(req, nr_bytes);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Upper layers may call blk_crypto_evict_key() anytime after the last
|
||||
* bio_endio(). Therefore, the keyslot must be released before that.
|
||||
*/
|
||||
if (blk_crypto_rq_has_keyslot(req) && nr_bytes >= blk_rq_bytes(req))
|
||||
__blk_crypto_rq_put_keyslot(req);
|
||||
|
||||
if (unlikely(error && !blk_rq_is_passthrough(req) &&
|
||||
!(req->rq_flags & RQF_QUIET)))
|
||||
print_req_error(req, error, __func__);
|
||||
|
|
|
@ -60,6 +60,11 @@ static inline bool blk_crypto_rq_is_encrypted(struct request *rq)
|
|||
return rq->crypt_ctx;
|
||||
}
|
||||
|
||||
static inline bool blk_crypto_rq_has_keyslot(struct request *rq)
|
||||
{
|
||||
return rq->crypt_keyslot;
|
||||
}
|
||||
|
||||
#else /* CONFIG_BLK_INLINE_ENCRYPTION */
|
||||
|
||||
static inline bool bio_crypt_rq_ctx_compatible(struct request *rq,
|
||||
|
@ -93,6 +98,11 @@ static inline bool blk_crypto_rq_is_encrypted(struct request *rq)
|
|||
return false;
|
||||
}
|
||||
|
||||
static inline bool blk_crypto_rq_has_keyslot(struct request *rq)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_BLK_INLINE_ENCRYPTION */
|
||||
|
||||
void __bio_crypt_advance(struct bio *bio, unsigned int bytes);
|
||||
|
@ -127,14 +137,21 @@ static inline bool blk_crypto_bio_prep(struct bio **bio_ptr)
|
|||
return true;
|
||||
}
|
||||
|
||||
blk_status_t __blk_crypto_init_request(struct request *rq);
|
||||
static inline blk_status_t blk_crypto_init_request(struct request *rq)
|
||||
blk_status_t __blk_crypto_rq_get_keyslot(struct request *rq);
|
||||
static inline blk_status_t blk_crypto_rq_get_keyslot(struct request *rq)
|
||||
{
|
||||
if (blk_crypto_rq_is_encrypted(rq))
|
||||
return __blk_crypto_init_request(rq);
|
||||
return __blk_crypto_rq_get_keyslot(rq);
|
||||
return BLK_STS_OK;
|
||||
}
|
||||
|
||||
void __blk_crypto_rq_put_keyslot(struct request *rq);
|
||||
static inline void blk_crypto_rq_put_keyslot(struct request *rq)
|
||||
{
|
||||
if (blk_crypto_rq_has_keyslot(rq))
|
||||
__blk_crypto_rq_put_keyslot(rq);
|
||||
}
|
||||
|
||||
void __blk_crypto_free_request(struct request *rq);
|
||||
static inline void blk_crypto_free_request(struct request *rq)
|
||||
{
|
||||
|
@ -173,7 +190,7 @@ static inline blk_status_t blk_crypto_insert_cloned_request(struct request *rq)
|
|||
{
|
||||
|
||||
if (blk_crypto_rq_is_encrypted(rq))
|
||||
return blk_crypto_init_request(rq);
|
||||
return blk_crypto_rq_get_keyslot(rq);
|
||||
return BLK_STS_OK;
|
||||
}
|
||||
|
||||
|
|
|
@ -216,26 +216,26 @@ static bool bio_crypt_check_alignment(struct bio *bio)
|
|||
return true;
|
||||
}
|
||||
|
||||
blk_status_t __blk_crypto_init_request(struct request *rq)
|
||||
blk_status_t __blk_crypto_rq_get_keyslot(struct request *rq)
|
||||
{
|
||||
return blk_ksm_get_slot_for_key(rq->q->ksm, rq->crypt_ctx->bc_key,
|
||||
&rq->crypt_keyslot);
|
||||
}
|
||||
|
||||
/**
|
||||
* __blk_crypto_free_request - Uninitialize the crypto fields of a request.
|
||||
*
|
||||
* @rq: The request whose crypto fields to uninitialize.
|
||||
*
|
||||
* Completely uninitializes the crypto fields of a request. If a keyslot has
|
||||
* been programmed into some inline encryption hardware, that keyslot is
|
||||
* released. The rq->crypt_ctx is also freed.
|
||||
*/
|
||||
void __blk_crypto_free_request(struct request *rq)
|
||||
void __blk_crypto_rq_put_keyslot(struct request *rq)
|
||||
{
|
||||
blk_ksm_put_slot(rq->crypt_keyslot);
|
||||
rq->crypt_keyslot = NULL;
|
||||
}
|
||||
|
||||
void __blk_crypto_free_request(struct request *rq)
|
||||
{
|
||||
/* The keyslot, if one was needed, should have been released earlier. */
|
||||
if (WARN_ON_ONCE(rq->crypt_keyslot))
|
||||
__blk_crypto_rq_put_keyslot(rq);
|
||||
|
||||
mempool_free(rq->crypt_ctx, bio_crypt_ctx_pool);
|
||||
blk_crypto_rq_set_defaults(rq);
|
||||
rq->crypt_ctx = NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -818,6 +818,8 @@ static struct request *attempt_merge(struct request_queue *q,
|
|||
if (!blk_discard_mergable(req))
|
||||
elv_merge_requests(q, req, next);
|
||||
|
||||
blk_crypto_rq_put_keyslot(next);
|
||||
|
||||
/*
|
||||
* 'next' is going away, so update stats accordingly
|
||||
*/
|
||||
|
|
|
@ -2228,7 +2228,7 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio)
|
|||
|
||||
blk_mq_bio_to_request(rq, bio, nr_segs);
|
||||
|
||||
ret = blk_crypto_init_request(rq);
|
||||
ret = blk_crypto_rq_get_keyslot(rq);
|
||||
if (ret != BLK_STS_OK) {
|
||||
bio->bi_status = ret;
|
||||
bio_endio(bio);
|
||||
|
|
Loading…
Add table
Reference in a new issue