mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-07 07:08:07 +00:00
crypto: ccree - improve error handling
pass the returned error code to the higher level functions Signed-off-by: Hadar Gat <hadar.gat@arm.com> Signed-off-by: Gilad Ben-Yossef <gilad@benyossef.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
parent
a0d608ee5e
commit
ccba2f1112
1 changed files with 35 additions and 39 deletions
|
@ -511,10 +511,8 @@ int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx,
|
||||||
/* Map the src SGL */
|
/* Map the src SGL */
|
||||||
rc = cc_map_sg(dev, src, nbytes, DMA_BIDIRECTIONAL, &req_ctx->in_nents,
|
rc = cc_map_sg(dev, src, nbytes, DMA_BIDIRECTIONAL, &req_ctx->in_nents,
|
||||||
LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents);
|
LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents);
|
||||||
if (rc) {
|
if (rc)
|
||||||
rc = -ENOMEM;
|
|
||||||
goto cipher_exit;
|
goto cipher_exit;
|
||||||
}
|
|
||||||
if (mapped_nents > 1)
|
if (mapped_nents > 1)
|
||||||
req_ctx->dma_buf_type = CC_DMA_BUF_MLLI;
|
req_ctx->dma_buf_type = CC_DMA_BUF_MLLI;
|
||||||
|
|
||||||
|
@ -528,12 +526,11 @@ int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx,
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
/* Map the dst sg */
|
/* Map the dst sg */
|
||||||
if (cc_map_sg(dev, dst, nbytes, DMA_BIDIRECTIONAL,
|
rc = cc_map_sg(dev, dst, nbytes, DMA_BIDIRECTIONAL,
|
||||||
&req_ctx->out_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
|
&req_ctx->out_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
|
||||||
&dummy, &mapped_nents)) {
|
&dummy, &mapped_nents);
|
||||||
rc = -ENOMEM;
|
if (rc)
|
||||||
goto cipher_exit;
|
goto cipher_exit;
|
||||||
}
|
|
||||||
if (mapped_nents > 1)
|
if (mapped_nents > 1)
|
||||||
req_ctx->dma_buf_type = CC_DMA_BUF_MLLI;
|
req_ctx->dma_buf_type = CC_DMA_BUF_MLLI;
|
||||||
|
|
||||||
|
@ -1078,10 +1075,8 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
|
||||||
&areq_ctx->dst.nents,
|
&areq_ctx->dst.nents,
|
||||||
LLI_MAX_NUM_OF_DATA_ENTRIES, &dst_last_bytes,
|
LLI_MAX_NUM_OF_DATA_ENTRIES, &dst_last_bytes,
|
||||||
&dst_mapped_nents);
|
&dst_mapped_nents);
|
||||||
if (rc) {
|
if (rc)
|
||||||
rc = -ENOMEM;
|
|
||||||
goto chain_data_exit;
|
goto chain_data_exit;
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
dst_mapped_nents = cc_get_sgl_nents(dev, req->dst, size_for_map,
|
dst_mapped_nents = cc_get_sgl_nents(dev, req->dst, size_for_map,
|
||||||
|
@ -1235,11 +1230,10 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
|
||||||
}
|
}
|
||||||
areq_ctx->ccm_iv0_dma_addr = dma_addr;
|
areq_ctx->ccm_iv0_dma_addr = dma_addr;
|
||||||
|
|
||||||
if (cc_set_aead_conf_buf(dev, areq_ctx, areq_ctx->ccm_config,
|
rc = cc_set_aead_conf_buf(dev, areq_ctx, areq_ctx->ccm_config,
|
||||||
&sg_data, req->assoclen)) {
|
&sg_data, req->assoclen);
|
||||||
rc = -ENOMEM;
|
if (rc)
|
||||||
goto aead_map_failure;
|
goto aead_map_failure;
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) {
|
if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) {
|
||||||
|
@ -1299,10 +1293,8 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
|
||||||
(LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES +
|
(LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES +
|
||||||
LLI_MAX_NUM_OF_DATA_ENTRIES),
|
LLI_MAX_NUM_OF_DATA_ENTRIES),
|
||||||
&dummy, &mapped_nents);
|
&dummy, &mapped_nents);
|
||||||
if (rc) {
|
if (rc)
|
||||||
rc = -ENOMEM;
|
|
||||||
goto aead_map_failure;
|
goto aead_map_failure;
|
||||||
}
|
|
||||||
|
|
||||||
if (areq_ctx->is_single_pass) {
|
if (areq_ctx->is_single_pass) {
|
||||||
/*
|
/*
|
||||||
|
@ -1386,6 +1378,7 @@ int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx,
|
||||||
struct mlli_params *mlli_params = &areq_ctx->mlli_params;
|
struct mlli_params *mlli_params = &areq_ctx->mlli_params;
|
||||||
struct buffer_array sg_data;
|
struct buffer_array sg_data;
|
||||||
struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
|
struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
|
||||||
|
int rc = 0;
|
||||||
u32 dummy = 0;
|
u32 dummy = 0;
|
||||||
u32 mapped_nents = 0;
|
u32 mapped_nents = 0;
|
||||||
|
|
||||||
|
@ -1405,18 +1398,18 @@ int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx,
|
||||||
/*TODO: copy data in case that buffer is enough for operation */
|
/*TODO: copy data in case that buffer is enough for operation */
|
||||||
/* map the previous buffer */
|
/* map the previous buffer */
|
||||||
if (*curr_buff_cnt) {
|
if (*curr_buff_cnt) {
|
||||||
if (cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
|
rc = cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
|
||||||
&sg_data)) {
|
&sg_data);
|
||||||
return -ENOMEM;
|
if (rc)
|
||||||
}
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (src && nbytes > 0 && do_update) {
|
if (src && nbytes > 0 && do_update) {
|
||||||
if (cc_map_sg(dev, src, nbytes, DMA_TO_DEVICE,
|
rc = cc_map_sg(dev, src, nbytes, DMA_TO_DEVICE,
|
||||||
&areq_ctx->in_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
|
&areq_ctx->in_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
|
||||||
&dummy, &mapped_nents)) {
|
&dummy, &mapped_nents);
|
||||||
|
if (rc)
|
||||||
goto unmap_curr_buff;
|
goto unmap_curr_buff;
|
||||||
}
|
|
||||||
if (src && mapped_nents == 1 &&
|
if (src && mapped_nents == 1 &&
|
||||||
areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
|
areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
|
||||||
memcpy(areq_ctx->buff_sg, src,
|
memcpy(areq_ctx->buff_sg, src,
|
||||||
|
@ -1435,7 +1428,8 @@ int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx,
|
||||||
/* add the src data to the sg_data */
|
/* add the src data to the sg_data */
|
||||||
cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src, nbytes,
|
cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src, nbytes,
|
||||||
0, true, &areq_ctx->mlli_nents);
|
0, true, &areq_ctx->mlli_nents);
|
||||||
if (cc_generate_mlli(dev, &sg_data, mlli_params, flags))
|
rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
|
||||||
|
if (rc)
|
||||||
goto fail_unmap_din;
|
goto fail_unmap_din;
|
||||||
}
|
}
|
||||||
/* change the buffer index for the unmap function */
|
/* change the buffer index for the unmap function */
|
||||||
|
@ -1451,7 +1445,7 @@ unmap_curr_buff:
|
||||||
if (*curr_buff_cnt)
|
if (*curr_buff_cnt)
|
||||||
dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
|
dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
|
||||||
|
|
||||||
return -ENOMEM;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
|
int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
|
||||||
|
@ -1470,6 +1464,7 @@ int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
|
||||||
struct buffer_array sg_data;
|
struct buffer_array sg_data;
|
||||||
struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
|
struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
|
||||||
unsigned int swap_index = 0;
|
unsigned int swap_index = 0;
|
||||||
|
int rc = 0;
|
||||||
u32 dummy = 0;
|
u32 dummy = 0;
|
||||||
u32 mapped_nents = 0;
|
u32 mapped_nents = 0;
|
||||||
|
|
||||||
|
@ -1514,21 +1509,21 @@ int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (*curr_buff_cnt) {
|
if (*curr_buff_cnt) {
|
||||||
if (cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
|
rc = cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
|
||||||
&sg_data)) {
|
&sg_data);
|
||||||
return -ENOMEM;
|
if (rc)
|
||||||
}
|
return rc;
|
||||||
/* change the buffer index for next operation */
|
/* change the buffer index for next operation */
|
||||||
swap_index = 1;
|
swap_index = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (update_data_len > *curr_buff_cnt) {
|
if (update_data_len > *curr_buff_cnt) {
|
||||||
if (cc_map_sg(dev, src, (update_data_len - *curr_buff_cnt),
|
rc = cc_map_sg(dev, src, (update_data_len - *curr_buff_cnt),
|
||||||
DMA_TO_DEVICE, &areq_ctx->in_nents,
|
DMA_TO_DEVICE, &areq_ctx->in_nents,
|
||||||
LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy,
|
LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy,
|
||||||
&mapped_nents)) {
|
&mapped_nents);
|
||||||
|
if (rc)
|
||||||
goto unmap_curr_buff;
|
goto unmap_curr_buff;
|
||||||
}
|
|
||||||
if (mapped_nents == 1 &&
|
if (mapped_nents == 1 &&
|
||||||
areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
|
areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
|
||||||
/* only one entry in the SG and no previous data */
|
/* only one entry in the SG and no previous data */
|
||||||
|
@ -1548,7 +1543,8 @@ int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
|
||||||
cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src,
|
cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src,
|
||||||
(update_data_len - *curr_buff_cnt), 0, true,
|
(update_data_len - *curr_buff_cnt), 0, true,
|
||||||
&areq_ctx->mlli_nents);
|
&areq_ctx->mlli_nents);
|
||||||
if (cc_generate_mlli(dev, &sg_data, mlli_params, flags))
|
rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
|
||||||
|
if (rc)
|
||||||
goto fail_unmap_din;
|
goto fail_unmap_din;
|
||||||
}
|
}
|
||||||
areq_ctx->buff_index = (areq_ctx->buff_index ^ swap_index);
|
areq_ctx->buff_index = (areq_ctx->buff_index ^ swap_index);
|
||||||
|
@ -1562,7 +1558,7 @@ unmap_curr_buff:
|
||||||
if (*curr_buff_cnt)
|
if (*curr_buff_cnt)
|
||||||
dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
|
dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
|
||||||
|
|
||||||
return -ENOMEM;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
void cc_unmap_hash_request(struct device *dev, void *ctx,
|
void cc_unmap_hash_request(struct device *dev, void *ctx,
|
||||||
|
|
Loading…
Add table
Reference in a new issue