add patches for libkcapi tool

This commit is contained in:
Huan.Feng 2022-01-13 18:11:09 +08:00
parent 0efc2338f6
commit 1d1ebfc3c5
15 changed files with 1319 additions and 51 deletions

View file

@ -1916,6 +1916,25 @@ config CRYPTO_STATS
- encrypt/decrypt/sign/verify numbers for asymmetric operations
- generate/seed numbers for rng operations
config CRYPTO_USER_API_AKCIPHER
tristate "User-space interface for asymmetric key cipher algorithms"
depends on NET
select CRYPTO_AKCIPHER2
select CRYPTO_USER_API
help
This option enables the user-space interface for asymmetric
key cipher algorithms.
config CRYPTO_USER_API_KPP
tristate "User-space interface for key protocol primitives algorithms"
depends on NET
select CRYPTO_KPP2
select CRYPTO_USER_API
help
This option enables the user-spaces interface for key protocol
primitives algorithms. This covers Diffie-Hellman and EC
Diffie-Hellman.
config CRYPTO_HASH_INFO
bool

View file

@ -27,7 +27,12 @@ obj-$(CONFIG_CRYPTO_HASH2) += crypto_hash.o
obj-$(CONFIG_CRYPTO_AKCIPHER2) += akcipher.o
obj-$(CONFIG_CRYPTO_KPP2) += kpp.o
dh_generic-y := dh.o
$(obj)/dhparameter-asn1.o: $(obj)/dhparameter.asn1.c $(obj)/dhparameter.asn1.h
$(obj)/dh_helper.o: $(obj)/dhparameter.asn1.h
clean-files += dhparameter.asn1.c dhparameter.asn1.h
dh_generic-y := dhparameter.asn1.o
dh_generic-y += dh.o
dh_generic-y += dh_helper.o
obj-$(CONFIG_CRYPTO_DH) += dh_generic.o
@ -171,6 +176,8 @@ obj-$(CONFIG_CRYPTO_USER_API_HASH) += algif_hash.o
obj-$(CONFIG_CRYPTO_USER_API_SKCIPHER) += algif_skcipher.o
obj-$(CONFIG_CRYPTO_USER_API_RNG) += algif_rng.o
obj-$(CONFIG_CRYPTO_USER_API_AEAD) += algif_aead.o
obj-$(CONFIG_CRYPTO_USER_API_AKCIPHER) += algif_akcipher.o
obj-$(CONFIG_CRYPTO_USER_API_KPP) += algif_kpp.o
obj-$(CONFIG_CRYPTO_ZSTD) += zstd.o
obj-$(CONFIG_CRYPTO_OFB) += ofb.o
obj-$(CONFIG_CRYPTO_ECC) += ecc.o

View file

@ -202,13 +202,17 @@ unlock:
return err;
}
static int alg_setkey(struct sock *sk, sockptr_t ukey, unsigned int keylen)
static int alg_setkey(struct sock *sk, sockptr_t ukey, unsigned int keylen,
int (*setkey)(void *private, const u8 *key,
unsigned int keylen))
{
struct alg_sock *ask = alg_sk(sk);
const struct af_alg_type *type = ask->type;
u8 *key;
int err;
if (!setkey)
return -ENOPROTOOPT;
key = sock_kmalloc(sk, keylen, GFP_KERNEL);
if (!key)
return -ENOMEM;
@ -217,8 +221,7 @@ static int alg_setkey(struct sock *sk, sockptr_t ukey, unsigned int keylen)
if (copy_from_sockptr(key, ukey, keylen))
goto out;
err = type->setkey(ask->private, key, keylen);
err = setkey(ask->private, key, keylen);
out:
sock_kzfree_s(sk, key, keylen);
@ -243,18 +246,23 @@ static int alg_setsockopt(struct socket *sock, int level, int optname,
if (level != SOL_ALG || !type)
goto unlock;
if (sock->state == SS_CONNECTED)
goto unlock;
switch (optname) {
case ALG_SET_KEY:
if (sock->state == SS_CONNECTED)
goto unlock;
if (!type->setkey)
goto unlock;
err = alg_setkey(sk, optval, optlen);
err = alg_setkey(sk, optval, optlen, type->setkey);
break;
case ALG_SET_PUBKEY:
err = alg_setkey(sk, optval, optlen, type->setpubkey);
break;
case ALG_SET_DH_PARAMETERS:
err = alg_setkey(sk, optval, optlen, type->dhparams);
break;
case ALG_SET_ECDH_CURVE:
err = alg_setkey(sk, optval, optlen, type->ecdhcurve);
break;
case ALG_SET_AEAD_AUTHSIZE:
if (sock->state == SS_CONNECTED)
goto unlock;
if (!type->setauthsize)
goto unlock;
err = type->setauthsize(ask->private, optlen);
@ -836,7 +844,7 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
struct af_alg_tsgl *sgl;
struct af_alg_control con = {};
long copied = 0;
bool enc = false;
int op = 0;
bool init = false;
int err = 0;
@ -847,11 +855,13 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
init = true;
switch (con.op) {
case ALG_OP_VERIFY:
case ALG_OP_SIGN:
case ALG_OP_ENCRYPT:
enc = true;
break;
case ALG_OP_DECRYPT:
enc = false;
case ALG_OP_KEYGEN:
case ALG_OP_SSGEN:
op = con.op;
break;
default:
return -EINVAL;
@ -875,7 +885,7 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
ctx->init = true;
if (init) {
ctx->enc = enc;
ctx->op = op;
if (con.iv)
memcpy(ctx->iv, con.iv->iv, ivsize);

View file

@ -55,7 +55,7 @@ static inline bool aead_sufficient_data(struct sock *sk)
* The minimum amount of memory needed for an AEAD cipher is
* the AAD and in case of decryption the tag.
*/
return ctx->used >= ctx->aead_assoclen + (ctx->enc ? 0 : as);
return ctx->used >= ctx->aead_assoclen + (ctx->op ? 0 : as);
}
static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
@ -71,6 +71,19 @@ static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
return af_alg_sendmsg(sock, msg, size, ivsize);
}
static inline int aead_cipher_op(struct af_alg_ctx *ctx,
struct af_alg_async_req *areq)
{
switch (ctx->op) {
case ALG_OP_ENCRYPT:
return crypto_aead_encrypt(&areq->cra_u.aead_req);
case ALG_OP_DECRYPT:
return crypto_aead_decrypt(&areq->cra_u.aead_req);
default:
return -EOPNOTSUPP;
}
}
static int crypto_aead_copy_sgl(struct crypto_sync_skcipher *null_tfm,
struct scatterlist *src,
struct scatterlist *dst, unsigned int len)
@ -138,7 +151,7 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
* buffer provides the tag which is consumed resulting in only the
* plaintext without a buffer for the tag returned to the caller.
*/
if (ctx->enc)
if (ctx->op)
outlen = used + as;
else
outlen = used - as;
@ -212,7 +225,7 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
/* Use the RX SGL as source (and destination) for crypto op. */
rsgl_src = areq->first_rsgl.sgl.sg;
if (ctx->enc) {
if (ctx->op == ALG_OP_ENCRYPT) {
/*
* Encryption operation - The in-place cipher operation is
* achieved by the following operation:
@ -228,7 +241,7 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
if (err)
goto free;
af_alg_pull_tsgl(sk, processed, NULL, 0);
} else {
} else if (ctx->op == ALG_OP_DECRYPT) {
/*
* Decryption operation - To achieve an in-place cipher
* operation, the following SGL structure is used:
@ -293,9 +306,8 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
aead_request_set_callback(&areq->cra_u.aead_req,
CRYPTO_TFM_REQ_MAY_SLEEP,
af_alg_async_cb, areq);
err = ctx->enc ? crypto_aead_encrypt(&areq->cra_u.aead_req) :
crypto_aead_decrypt(&areq->cra_u.aead_req);
err = aead_cipher_op(ctx, areq);
/* AIO operation in progress */
if (err == -EINPROGRESS)
return -EIOCBQUEUED;
@ -307,10 +319,7 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
CRYPTO_TFM_REQ_MAY_SLEEP |
CRYPTO_TFM_REQ_MAY_BACKLOG,
crypto_req_done, &ctx->wait);
err = crypto_wait_req(ctx->enc ?
crypto_aead_encrypt(&areq->cra_u.aead_req) :
crypto_aead_decrypt(&areq->cra_u.aead_req),
&ctx->wait);
err = crypto_wait_req(aead_cipher_op(ctx, areq), &ctx->wait);
}

468
crypto/algif_akcipher.c Normal file
View file

@ -0,0 +1,468 @@
/*
* algif_akcipher: User-space interface for asymmetric cipher algorithms
*
* Copyright (C) 2018 - 2020, Stephan Mueller <smueller@chronox.de>
*
* This file provides the user-space API for asymmetric ciphers.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* The following concept of the memory management is used:
*
* The kernel maintains two SGLs, the TX SGL and the RX SGL. The TX SGL is
* filled by user space with the data submitted via sendpage/sendmsg. Filling
* up the TX SGL does not cause a crypto operation -- the data will only be
* tracked by the kernel. Upon receipt of one recvmsg call, the caller must
* provide a buffer which is tracked with the RX SGL.
*
* During the processing of the recvmsg operation, the cipher request is
* allocated and prepared. As part of the recvmsg operation, the processed
* TX buffers are extracted from the TX SGL into a separate SGL.
*
* After the completion of the crypto operation, the RX SGL and the cipher
* request is released. The extracted TX SGL parts are released together with
* the RX SGL release.
*/
#include <crypto/akcipher.h>
#include <crypto/if_alg.h>
#include <crypto/scatterwalk.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/net.h>
#include <net/sock.h>
struct akcipher_tfm {
struct crypto_akcipher *akcipher;
bool has_key;
};
static int akcipher_sendmsg(struct socket *sock, struct msghdr *msg,
size_t size)
{
return af_alg_sendmsg(sock, msg, size, 0);
}
static inline int akcipher_cipher_op(struct af_alg_ctx *ctx,
struct af_alg_async_req *areq)
{
switch (ctx->op) {
case ALG_OP_ENCRYPT:
return crypto_akcipher_encrypt(&areq->cra_u.akcipher_req);
case ALG_OP_DECRYPT:
return crypto_akcipher_decrypt(&areq->cra_u.akcipher_req);
case ALG_OP_SIGN:
return crypto_akcipher_sign(&areq->cra_u.akcipher_req);
case ALG_OP_VERIFY:
return crypto_akcipher_verify(&areq->cra_u.akcipher_req);
default:
return -EOPNOTSUPP;
}
}
static int _akcipher_recvmsg(struct socket *sock, struct msghdr *msg,
size_t ignored, int flags)
{
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
struct sock *psk = ask->parent;
struct alg_sock *pask = alg_sk(psk);
struct af_alg_ctx *ctx = ask->private;
struct akcipher_tfm *akc = pask->private;
struct crypto_akcipher *tfm = akc->akcipher;
struct af_alg_async_req *areq;
size_t len;
size_t used;
int err;
int maxsize;
if (!ctx->used) {
err = af_alg_wait_for_data(sk, flags, 0);
if (err)
return err;
}
maxsize = crypto_akcipher_maxsize(tfm);
if (maxsize < 0)
return maxsize;
/* Allocate cipher request for current operation. */
areq = af_alg_alloc_areq(sk, sizeof(struct af_alg_async_req) +
crypto_akcipher_reqsize(tfm));
if (IS_ERR(areq))
return PTR_ERR(areq);
/* convert iovecs of output buffers into RX SGL */
err = af_alg_get_rsgl(sk, msg, flags, areq, maxsize, &len);
if (err)
goto free;
/* ensure output buffer is sufficiently large */
if (len < maxsize) {
err = -EMSGSIZE;
goto free;
}
/*
* Create a per request TX SGL for this request which tracks the
* SG entries from the global TX SGL.
*/
used = ctx->used;
areq->tsgl_entries = af_alg_count_tsgl(sk, used, 0);
if (!areq->tsgl_entries)
areq->tsgl_entries = 1;
areq->tsgl = sock_kmalloc(sk, sizeof(*areq->tsgl) * areq->tsgl_entries,
GFP_KERNEL);
if (!areq->tsgl) {
err = -ENOMEM;
goto free;
}
sg_init_table(areq->tsgl, areq->tsgl_entries);
af_alg_pull_tsgl(sk, used, areq->tsgl, 0);
/* Initialize the crypto operation */
akcipher_request_set_tfm(&areq->cra_u.akcipher_req, tfm);
akcipher_request_set_crypt(&areq->cra_u.akcipher_req, areq->tsgl,
areq->first_rsgl.sgl.sg, used, len);
if (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) {
/* AIO operation */
sock_hold(sk);
areq->iocb = msg->msg_iocb;
/* Remember output size that will be generated. */
areq->outlen = areq->cra_u.akcipher_req.dst_len ?
areq->cra_u.akcipher_req.dst_len : len;
akcipher_request_set_callback(&areq->cra_u.akcipher_req,
CRYPTO_TFM_REQ_MAY_SLEEP,
af_alg_async_cb, areq);
err = akcipher_cipher_op(ctx, areq);
/* AIO operation in progress */
if (err == -EINPROGRESS || err == -EBUSY)
return -EIOCBQUEUED;
sock_put(sk);
} else {
/* Synchronous operation */
akcipher_request_set_callback(&areq->cra_u.akcipher_req,
CRYPTO_TFM_REQ_MAY_SLEEP |
CRYPTO_TFM_REQ_MAY_BACKLOG,
crypto_req_done,
&ctx->wait);
err = crypto_wait_req(akcipher_cipher_op(ctx, areq),
&ctx->wait);
}
free:
af_alg_free_resources(areq);
return err ? err : areq->cra_u.akcipher_req.dst_len;
}
static int akcipher_recvmsg(struct socket *sock, struct msghdr *msg,
size_t ignored, int flags)
{
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
struct sock *psk = ask->parent;
struct alg_sock *pask = alg_sk(psk);
struct akcipher_tfm *akc = pask->private;
struct crypto_akcipher *tfm = akc->akcipher;
int ret = 0;
int err;
lock_sock(sk);
while (msg_data_left(msg)) {
err = _akcipher_recvmsg(sock, msg, ignored, flags);
/*
* This error covers -EIOCBQUEUED which implies that we can
* only handle one AIO request. If the caller wants to have
* multiple AIO requests in parallel, he must make multiple
* separate AIO calls.
*/
if (err <= 0) {
if (err == -EIOCBQUEUED || err == -EBADMSG || !ret)
ret = err;
goto out;
}
ret += err;
/*
* The caller must provide crypto_akcipher_maxsize per request.
* If he provides more, we conclude that multiple akcipher
* operations are requested.
*/
iov_iter_advance(&msg->msg_iter,
crypto_akcipher_maxsize(tfm) - err);
}
out:
af_alg_wmem_wakeup(sk);
release_sock(sk);
return ret;
}
static struct proto_ops algif_akcipher_ops = {
.family = PF_ALG,
.connect = sock_no_connect,
.socketpair = sock_no_socketpair,
.getname = sock_no_getname,
.ioctl = sock_no_ioctl,
.listen = sock_no_listen,
.shutdown = sock_no_shutdown,
.mmap = sock_no_mmap,
.bind = sock_no_bind,
.accept = sock_no_accept,
.release = af_alg_release,
.sendmsg = akcipher_sendmsg,
.sendpage = af_alg_sendpage,
.recvmsg = akcipher_recvmsg,
.poll = af_alg_poll,
};
static int akcipher_check_key(struct socket *sock)
{
struct sock *psk;
struct alg_sock *pask;
struct akcipher_tfm *tfm;
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
int err = 0;
lock_sock(sk);
if (!atomic_read(&ask->nokey_refcnt))
goto unlock_child;
psk = ask->parent;
pask = alg_sk(ask->parent);
tfm = pask->private;
lock_sock_nested(psk, SINGLE_DEPTH_NESTING);
if (!tfm->has_key) {
err = -ENOKEY;
goto unlock;
}
atomic_dec(&pask->nokey_refcnt);
atomic_set(&ask->nokey_refcnt, 0);
err = 0;
unlock:
release_sock(psk);
unlock_child:
release_sock(sk);
return err;
}
static int akcipher_sendmsg_nokey(struct socket *sock, struct msghdr *msg,
size_t size)
{
int err;
err = akcipher_check_key(sock);
if (err)
return err;
return akcipher_sendmsg(sock, msg, size);
}
static ssize_t akcipher_sendpage_nokey(struct socket *sock, struct page *page,
int offset, size_t size, int flags)
{
int err;
err = akcipher_check_key(sock);
if (err)
return err;
return af_alg_sendpage(sock, page, offset, size, flags);
}
static int akcipher_recvmsg_nokey(struct socket *sock, struct msghdr *msg,
size_t ignored, int flags)
{
int err;
err = akcipher_check_key(sock);
if (err)
return err;
return akcipher_recvmsg(sock, msg, ignored, flags);
}
static struct proto_ops algif_akcipher_ops_nokey = {
.family = PF_ALG,
.connect = sock_no_connect,
.socketpair = sock_no_socketpair,
.getname = sock_no_getname,
.ioctl = sock_no_ioctl,
.listen = sock_no_listen,
.shutdown = sock_no_shutdown,
.mmap = sock_no_mmap,
.bind = sock_no_bind,
.accept = sock_no_accept,
.release = af_alg_release,
.sendmsg = akcipher_sendmsg_nokey,
.sendpage = akcipher_sendpage_nokey,
.recvmsg = akcipher_recvmsg_nokey,
.poll = af_alg_poll,
};
static void *akcipher_bind(const char *name, u32 type, u32 mask)
{
struct akcipher_tfm *tfm;
struct crypto_akcipher *akcipher;
tfm = kmalloc(sizeof(*tfm), GFP_KERNEL);
if (!tfm)
return ERR_PTR(-ENOMEM);
akcipher = crypto_alloc_akcipher(name, type, mask);
if (IS_ERR(akcipher)) {
kfree(tfm);
return ERR_CAST(akcipher);
}
tfm->akcipher = akcipher;
tfm->has_key = false;
return tfm;
}
static void akcipher_release(void *private)
{
struct akcipher_tfm *tfm = private;
struct crypto_akcipher *akcipher = tfm->akcipher;
crypto_free_akcipher(akcipher);
kfree(tfm);
}
static int akcipher_setprivkey(void *private, const u8 *key,
unsigned int keylen)
{
struct akcipher_tfm *tfm = private;
struct crypto_akcipher *akcipher = tfm->akcipher;
int err;
err = crypto_akcipher_set_priv_key(akcipher, key, keylen);
tfm->has_key = !err;
/* Return the maximum size of the akcipher operation. */
if (!err)
err = crypto_akcipher_maxsize(akcipher);
return err;
}
static int akcipher_setpubkey(void *private, const u8 *key, unsigned int keylen)
{
struct akcipher_tfm *tfm = private;
struct crypto_akcipher *akcipher = tfm->akcipher;
int err;
err = crypto_akcipher_set_pub_key(akcipher, key, keylen);
tfm->has_key = !err;
/* Return the maximum size of the akcipher operation. */
if (!err)
err = crypto_akcipher_maxsize(akcipher);
return err;
}
static void akcipher_sock_destruct(struct sock *sk)
{
struct alg_sock *ask = alg_sk(sk);
struct af_alg_ctx *ctx = ask->private;
af_alg_pull_tsgl(sk, ctx->used, NULL, 0);
sock_kfree_s(sk, ctx, ctx->len);
af_alg_release_parent(sk);
}
static int akcipher_accept_parent_nokey(void *private, struct sock *sk)
{
struct af_alg_ctx *ctx;
struct alg_sock *ask = alg_sk(sk);
unsigned int len = sizeof(*ctx);
ctx = sock_kmalloc(sk, len, GFP_KERNEL);
if (!ctx)
return -ENOMEM;
INIT_LIST_HEAD(&ctx->tsgl_list);
ctx->len = len;
ctx->used = 0;
atomic_set(&ctx->rcvused, 0);
ctx->more = 0;
ctx->merge = 0;
ctx->op = 0;
crypto_init_wait(&ctx->wait);
ask->private = ctx;
sk->sk_destruct = akcipher_sock_destruct;
return 0;
}
static int akcipher_accept_parent(void *private, struct sock *sk)
{
struct akcipher_tfm *tfm = private;
if (!tfm->has_key)
return -ENOKEY;
return akcipher_accept_parent_nokey(private, sk);
}
static const struct af_alg_type algif_type_akcipher = {
.bind = akcipher_bind,
.release = akcipher_release,
.setkey = akcipher_setprivkey,
.setpubkey = akcipher_setpubkey,
.setauthsize = NULL,
.accept = akcipher_accept_parent,
.accept_nokey = akcipher_accept_parent_nokey,
.ops = &algif_akcipher_ops,
.ops_nokey = &algif_akcipher_ops_nokey,
.name = "akcipher",
.owner = THIS_MODULE
};
static int __init algif_akcipher_init(void)
{
return af_alg_register_type(&algif_type_akcipher);
}
static void __exit algif_akcipher_exit(void)
{
int err = af_alg_unregister_type(&algif_type_akcipher);
BUG_ON(err);
}
module_init(algif_akcipher_init);
module_exit(algif_akcipher_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Stephan Mueller <smueller@chronox.de>");
MODULE_DESCRIPTION("Asymmetric kernel crypto API user space interface");

606
crypto/algif_kpp.c Normal file
View file

@ -0,0 +1,606 @@
/*
* algif_kpp: User-space interface for key protocol primitives algorithms
*
* Copyright (C) 2018 - 2020, Stephan Mueller <smueller@chronox.de>
*
* This file provides the user-space API for key protocol primitives.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* The following concept of the memory management is used:
*
* The kernel maintains two SGLs, the TX SGL and the RX SGL. The TX SGL is
* filled by user space with the data submitted via sendpage/sendmsg. Filling
* up the TX SGL does not cause a crypto operation -- the data will only be
* tracked by the kernel. Upon receipt of one recvmsg call, the caller must
* provide a buffer which is tracked with the RX SGL.
*
* During the processing of the recvmsg operation, the cipher request is
* allocated and prepared. As part of the recvmsg operation, the processed
* TX buffers are extracted from the TX SGL into a separate SGL.
*
* After the completion of the crypto operation, the RX SGL and the cipher
* request is released. The extracted TX SGL parts are released together with
* the RX SGL release.
*/
#include <crypto/dh.h>
#include <crypto/ecdh.h>
#include <crypto/kpp.h>
#include <crypto/rng.h>
#include <crypto/if_alg.h>
#include <crypto/scatterwalk.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/net.h>
#include <net/sock.h>
struct kpp_tfm {
struct crypto_kpp *kpp;
bool has_key;
#define KPP_NO_PARAMS 0
#define KPP_DH_PARAMS 1
#define KPP_ECDH_PARAMS 2
int has_params; /* Type of KPP mechanism */
};
static int kpp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
{
return af_alg_sendmsg(sock, msg, size, 0);
}
static inline int kpp_cipher_op(struct af_alg_ctx *ctx,
struct af_alg_async_req *areq)
{
switch (ctx->op) {
case ALG_OP_KEYGEN:
return crypto_kpp_generate_public_key(&areq->cra_u.kpp_req);
case ALG_OP_SSGEN:
return crypto_kpp_compute_shared_secret(&areq->cra_u.kpp_req);
default:
return -EOPNOTSUPP;
}
}
static int _kpp_recvmsg(struct socket *sock, struct msghdr *msg,
size_t ignored, int flags)
{
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
struct sock *psk = ask->parent;
struct alg_sock *pask = alg_sk(psk);
struct af_alg_ctx *ctx = ask->private;
struct kpp_tfm *kpp = pask->private;
struct crypto_kpp *tfm = kpp->kpp;
struct af_alg_async_req *areq;
size_t len;
size_t used = 0;
int err;
int maxsize;
if (!ctx->used) {
err = af_alg_wait_for_data(sk, flags, 0);
if (err)
return err;
}
maxsize = crypto_kpp_maxsize(tfm);
if (maxsize < 0)
return maxsize;
/* Allocate cipher request for current operation. */
areq = af_alg_alloc_areq(sk, sizeof(struct af_alg_async_req) +
crypto_kpp_reqsize(tfm));
if (IS_ERR(areq))
return PTR_ERR(areq);
/* convert iovecs of output buffers into RX SGL */
err = af_alg_get_rsgl(sk, msg, flags, areq, maxsize, &len);
if (err)
goto free;
/* ensure output buffer is sufficiently large */
if (len < maxsize) {
err = -EMSGSIZE;
goto free;
}
/*
* Create a per request TX SGL for this request which tracks the
* SG entries from the global TX SGL.
*/
if (ctx->op == ALG_OP_SSGEN) {
used = ctx->used;
areq->tsgl_entries = af_alg_count_tsgl(sk, used, 0);
if (!areq->tsgl_entries)
areq->tsgl_entries = 1;
areq->tsgl = sock_kmalloc(
sk, sizeof(*areq->tsgl) * areq->tsgl_entries,
GFP_KERNEL);
if (!areq->tsgl) {
err = -ENOMEM;
goto free;
}
sg_init_table(areq->tsgl, areq->tsgl_entries);
af_alg_pull_tsgl(sk, used, areq->tsgl, 0);
}
/* Initialize the crypto operation */
kpp_request_set_input(&areq->cra_u.kpp_req, areq->tsgl, used);
kpp_request_set_output(&areq->cra_u.kpp_req, areq->first_rsgl.sgl.sg,
len);
kpp_request_set_tfm(&areq->cra_u.kpp_req, tfm);
if (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) {
/* AIO operation */
sock_hold(sk);
areq->iocb = msg->msg_iocb;
/* Remember output size that will be generated. */
areq->outlen = len;
kpp_request_set_callback(&areq->cra_u.kpp_req,
CRYPTO_TFM_REQ_MAY_SLEEP,
af_alg_async_cb, areq);
err = kpp_cipher_op(ctx, areq);
/* AIO operation in progress */
if (err == -EINPROGRESS || err == -EBUSY)
return -EIOCBQUEUED;
sock_put(sk);
} else {
/* Synchronous operation */
kpp_request_set_callback(&areq->cra_u.kpp_req,
CRYPTO_TFM_REQ_MAY_SLEEP |
CRYPTO_TFM_REQ_MAY_BACKLOG,
crypto_req_done,
&ctx->wait);
err = crypto_wait_req(kpp_cipher_op(ctx, areq), &ctx->wait);
}
free:
af_alg_free_resources(areq);
return err ? err : len;
}
static int kpp_recvmsg(struct socket *sock, struct msghdr *msg,
size_t ignored, int flags)
{
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
struct sock *psk = ask->parent;
struct alg_sock *pask = alg_sk(psk);
struct kpp_tfm *kpp = pask->private;
struct crypto_kpp *tfm = kpp->kpp;
int ret = 0;
int err;
lock_sock(sk);
while (msg_data_left(msg)) {
err = _kpp_recvmsg(sock, msg, ignored, flags);
/*
* This error covers -EIOCBQUEUED which implies that we can
* only handle one AIO request. If the caller wants to have
* multiple AIO requests in parallel, he must make multiple
* separate AIO calls.
*/
if (err <= 0) {
if (err == -EIOCBQUEUED || err == -EBADMSG || !ret)
ret = err;
goto out;
}
ret += err;
/*
* The caller must provide crypto_kpp_maxsize per request.
* If he provides more, we conclude that multiple kpp
* operations are requested.
*/
iov_iter_advance(&msg->msg_iter,
crypto_kpp_maxsize(tfm) - err);
}
out:
af_alg_wmem_wakeup(sk);
release_sock(sk);
return ret;
}
static struct proto_ops algif_kpp_ops = {
.family = PF_ALG,
.connect = sock_no_connect,
.socketpair = sock_no_socketpair,
.getname = sock_no_getname,
.ioctl = sock_no_ioctl,
.listen = sock_no_listen,
.shutdown = sock_no_shutdown,
.mmap = sock_no_mmap,
.bind = sock_no_bind,
.accept = sock_no_accept,
.release = af_alg_release,
.sendmsg = kpp_sendmsg,
.sendpage = af_alg_sendpage,
.recvmsg = kpp_recvmsg,
.poll = af_alg_poll,
};
static int kpp_check_key(struct socket *sock)
{
struct sock *psk;
struct alg_sock *pask;
struct kpp_tfm *tfm;
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
int err = 0;
lock_sock(sk);
if (!atomic_read(&ask->refcnt))
goto unlock_child;
psk = ask->parent;
pask = alg_sk(ask->parent);
tfm = pask->private;
lock_sock_nested(psk, SINGLE_DEPTH_NESTING);
if (!tfm->has_key || (tfm->has_params == KPP_NO_PARAMS)) {
err = -ENOKEY;
goto unlock;
}
atomic_dec(&pask->refcnt);
atomic_set(&ask->refcnt, 0);
err = 0;
unlock:
release_sock(psk);
unlock_child:
release_sock(sk);
return err;
}
static int kpp_sendmsg_nokey(struct socket *sock, struct msghdr *msg,
size_t size)
{
int err;
err = kpp_check_key(sock);
if (err)
return err;
return kpp_sendmsg(sock, msg, size);
}
static ssize_t kpp_sendpage_nokey(struct socket *sock, struct page *page,
int offset, size_t size, int flags)
{
int err;
err = kpp_check_key(sock);
if (err)
return err;
return af_alg_sendpage(sock, page, offset, size, flags);
}
static int kpp_recvmsg_nokey(struct socket *sock, struct msghdr *msg,
size_t ignored, int flags)
{
int err;
err = kpp_check_key(sock);
if (err)
return err;
return kpp_recvmsg(sock, msg, ignored, flags);
}
static struct proto_ops algif_kpp_ops_nokey = {
.family = PF_ALG,
.connect = sock_no_connect,
.socketpair = sock_no_socketpair,
.getname = sock_no_getname,
.ioctl = sock_no_ioctl,
.listen = sock_no_listen,
.shutdown = sock_no_shutdown,
.mmap = sock_no_mmap,
.bind = sock_no_bind,
.accept = sock_no_accept,
.release = af_alg_release,
.sendmsg = kpp_sendmsg_nokey,
.sendpage = kpp_sendpage_nokey,
.recvmsg = kpp_recvmsg_nokey,
.poll = af_alg_poll,
};
static void *kpp_bind(const char *name, u32 type, u32 mask)
{
struct kpp_tfm *tfm;
struct crypto_kpp *kpp;
tfm = kmalloc(sizeof(*tfm), GFP_KERNEL);
if (!tfm)
return ERR_PTR(-ENOMEM);
kpp = crypto_alloc_kpp(name, type, mask);
if (IS_ERR(kpp)) {
kfree(tfm);
return ERR_CAST(kpp);
}
tfm->kpp = kpp;
tfm->has_key = false;
tfm->has_params = KPP_NO_PARAMS;
return tfm;
}
static void kpp_release(void *private)
{
struct kpp_tfm *tfm = private;
struct crypto_kpp *kpp = tfm->kpp;
crypto_free_kpp(kpp);
kfree(tfm);
}
static int kpp_dh_set_secret(struct crypto_kpp *tfm, struct dh *params)
{
char *packed_key = NULL;
unsigned int packed_key_len;
int ret;
packed_key_len = crypto_dh_key_len(params);
packed_key = kmalloc(packed_key_len, GFP_KERNEL);
if (!packed_key)
return -ENOMEM;
ret = crypto_dh_encode_key(packed_key, packed_key_len, params);
if (ret)
goto out;
ret = crypto_kpp_set_secret(tfm, packed_key, packed_key_len);
out:
kfree(packed_key);
return ret;
}
static int kpp_dh_set_privkey(struct crypto_kpp *tfm, const u8 *key,
unsigned int keylen)
{
struct dh params = {
.key = key,
.key_size = keylen,
.p = NULL,
.p_size = 0,
.g = NULL,
.g_size = 0,
};
return kpp_dh_set_secret(tfm, &params);
}
static int kpp_ecdh_set_secret(struct crypto_kpp *tfm, struct ecdh *params)
{
char *packed_key = NULL;
unsigned int packed_key_len;
int ret;
packed_key_len = crypto_ecdh_key_len(params);
packed_key = kmalloc(packed_key_len, GFP_KERNEL);
if (!packed_key)
return -ENOMEM;
ret = crypto_ecdh_encode_key(packed_key, packed_key_len, params);
if (ret)
goto out;
ret = crypto_kpp_set_secret(tfm, packed_key, packed_key_len);
out:
kfree(packed_key);
return ret;
}
static int kpp_ecdh_set_privkey(struct crypto_kpp *tfm, const u8 *key,
unsigned int keylen)
{
struct ecdh params = {
.curve_id = 0,
.key = key,
.key_size = keylen,
};
return kpp_ecdh_set_secret(tfm, &params);
}
static int kpp_setprivkey(void *private, const u8 *key, unsigned int keylen)
{
struct kpp_tfm *kpp = private;
struct crypto_kpp *tfm = kpp->kpp;
int err;
if (kpp->has_params == KPP_NO_PARAMS)
return -ENOKEY;
/* The DH code cannot generate private keys. ECDH can do that */
if ((!key || !keylen) && (kpp->has_params == KPP_DH_PARAMS)) {
kpp->has_key = false;
return -EOPNOTSUPP;
}
switch (kpp->has_params) {
case KPP_DH_PARAMS:
err = kpp_dh_set_privkey(tfm, key, keylen);
break;
case KPP_ECDH_PARAMS:
err = kpp_ecdh_set_privkey(tfm, key, keylen);
break;
default:
err = -EFAULT;
}
kpp->has_key = !err;
/* Return the maximum size of the kpp operation. */
if (!err)
err = crypto_kpp_maxsize(tfm);
return err;
}
static int kpp_dh_setparams_pkcs3(void *private, const u8 *params,
unsigned int paramslen)
{
struct kpp_tfm *kpp = private;
struct crypto_kpp *tfm = kpp->kpp;
int err;
/* If parameters were already set, disallow setting them again. */
if (kpp->has_params != KPP_NO_PARAMS)
return -EINVAL;
err = crypto_kpp_set_params(tfm, params, paramslen);
if (!err) {
kpp->has_params = KPP_DH_PARAMS;
/* Return the maximum size of the kpp operation. */
err = crypto_kpp_maxsize(tfm);
} else
kpp->has_params = KPP_NO_PARAMS;
return err;
}
static int kpp_ecdh_setcurve(void *private, const u8 *curveid,
unsigned int curveidlen)
{
struct kpp_tfm *kpp = private;
struct crypto_kpp *tfm = kpp->kpp;
int err;
struct ecdh params = {
.key = NULL,
.key_size = 0,
};
/* If parameters were already set, disallow setting them again. */
if (kpp->has_params != KPP_NO_PARAMS)
return -EINVAL;
if (curveidlen != sizeof(unsigned long))
return -EINVAL;
err = kstrtou16(curveid, 10, &params.curve_id);
if (err)
return err;
err = kpp_ecdh_set_secret(tfm, &params);
if (!err) {
kpp->has_params = KPP_ECDH_PARAMS;
/* Return the maximum size of the kpp operation. */
err = crypto_kpp_maxsize(tfm);
} else
kpp->has_params = KPP_NO_PARAMS;
return err;
}
static void kpp_sock_destruct(struct sock *sk)
{
struct alg_sock *ask = alg_sk(sk);
struct af_alg_ctx *ctx = ask->private;
af_alg_pull_tsgl(sk, ctx->used, NULL, 0);
sock_kfree_s(sk, ctx, ctx->len);
af_alg_release_parent(sk);
}
static int kpp_accept_parent_nokey(void *private, struct sock *sk)
{
struct af_alg_ctx *ctx;
struct alg_sock *ask = alg_sk(sk);
unsigned int len = sizeof(*ctx);
ctx = sock_kmalloc(sk, len, GFP_KERNEL);
if (!ctx)
return -ENOMEM;
INIT_LIST_HEAD(&ctx->tsgl_list);
ctx->len = len;
ctx->used = 0;
atomic_set(&ctx->rcvused, 0);
ctx->more = 0;
ctx->merge = 0;
ctx->op = 0;
crypto_init_wait(&ctx->wait);
ask->private = ctx;
sk->sk_destruct = kpp_sock_destruct;
return 0;
}
static int kpp_accept_parent(void *private, struct sock *sk)
{
struct kpp_tfm *tfm = private;
if (!tfm->has_key || (tfm->has_params == KPP_NO_PARAMS))
return -ENOKEY;
return kpp_accept_parent_nokey(private, sk);
}
static const struct af_alg_type algif_type_kpp = {
.bind = kpp_bind,
.release = kpp_release,
.setkey = kpp_setprivkey,
.setpubkey = NULL,
.dhparams = kpp_dh_setparams_pkcs3,
.ecdhcurve = kpp_ecdh_setcurve,
.setauthsize = NULL,
.accept = kpp_accept_parent,
.accept_nokey = kpp_accept_parent_nokey,
.ops = &algif_kpp_ops,
.ops_nokey = &algif_kpp_ops_nokey,
.name = "kpp",
.owner = THIS_MODULE
};
static int __init algif_kpp_init(void)
{
return af_alg_register_type(&algif_type_kpp);
}
static void __exit algif_kpp_exit(void)
{
int err = af_alg_unregister_type(&algif_type_kpp);
BUG_ON(err);
}
module_init(algif_kpp_init);
module_exit(algif_kpp_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Stephan Mueller <smueller@chronox.de>");
MODULE_DESCRIPTION("Key protocol primitives kernel crypto API user space interface");

View file

@ -47,6 +47,19 @@ static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg,
return af_alg_sendmsg(sock, msg, size, ivsize);
}
static inline int skcipher_cipher_op(struct af_alg_ctx *ctx,
struct af_alg_async_req *areq)
{
switch (ctx->op) {
case ALG_OP_ENCRYPT:
return crypto_skcipher_encrypt(&areq->cra_u.skcipher_req);
case ALG_OP_DECRYPT:
return crypto_skcipher_decrypt(&areq->cra_u.skcipher_req);
default:
return -EOPNOTSUPP;
}
}
static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
size_t ignored, int flags)
{
@ -118,10 +131,8 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
skcipher_request_set_callback(&areq->cra_u.skcipher_req,
CRYPTO_TFM_REQ_MAY_SLEEP,
af_alg_async_cb, areq);
err = ctx->enc ?
crypto_skcipher_encrypt(&areq->cra_u.skcipher_req) :
crypto_skcipher_decrypt(&areq->cra_u.skcipher_req);
err = skcipher_cipher_op(ctx, areq);
/* AIO operation in progress */
if (err == -EINPROGRESS)
return -EIOCBQUEUED;
@ -133,10 +144,8 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
CRYPTO_TFM_REQ_MAY_SLEEP |
CRYPTO_TFM_REQ_MAY_BACKLOG,
crypto_req_done, &ctx->wait);
err = crypto_wait_req(ctx->enc ?
crypto_skcipher_encrypt(&areq->cra_u.skcipher_req) :
crypto_skcipher_decrypt(&areq->cra_u.skcipher_req),
&ctx->wait);
err = crypto_wait_req(skcipher_cipher_op(ctx, areq),
&ctx->wait);
}

View file

@ -19,15 +19,25 @@ struct dh_ctx {
MPI xa; /* Value is guaranteed to be set. */
};
static void dh_clear_ctx(struct dh_ctx *ctx)
static inline void dh_clear_params(struct dh_ctx *ctx)
{
mpi_free(ctx->p);
mpi_free(ctx->q);
mpi_free(ctx->g);
}
static inline void dh_clear_key(struct dh_ctx *ctx)
{
mpi_free(ctx->xa);
memset(ctx, 0, sizeof(*ctx));
}
static void dh_clear_ctx(struct dh_ctx *ctx)
{
dh_clear_params(ctx);
dh_clear_key(ctx);
}
/*
* If base is g we compute the public key
* ya = g^xa mod p; [RFC2631 sec 2.1.1]
@ -52,6 +62,10 @@ static int dh_check_params_length(unsigned int p_len)
static int dh_set_params(struct dh_ctx *ctx, struct dh *params)
{
/* If DH parameters are not given, do not check them. */
if (!params->p_size && !params->g_size)
return 0;
if (dh_check_params_length(params->p_size << 3))
return -EINVAL;
@ -72,6 +86,23 @@ static int dh_set_params(struct dh_ctx *ctx, struct dh *params)
return 0;
}
static int dh_set_params_pkcs3(struct crypto_kpp *tfm, const void *param,
unsigned int param_len)
{
struct dh_ctx *ctx = dh_get_ctx(tfm);
struct dh parsed_params;
int ret;
/* Free the old parameter if any */
dh_clear_params(ctx);
ret = dh_parse_params_pkcs3(&parsed_params, param, param_len);
if (ret)
return ret;
return dh_set_params(ctx, &parsed_params);
}
static int dh_set_secret(struct crypto_kpp *tfm, const void *buf,
unsigned int len)
{
@ -79,7 +110,7 @@ static int dh_set_secret(struct crypto_kpp *tfm, const void *buf,
struct dh params;
/* Free the old MPI key if any */
dh_clear_ctx(ctx);
dh_clear_key(ctx);
if (crypto_dh_decode_key(buf, len, &params) < 0)
goto err_clear_ctx;
@ -158,7 +189,7 @@ static int dh_compute_value(struct kpp_request *req)
if (!val)
return -ENOMEM;
if (unlikely(!ctx->xa)) {
if (unlikely(!ctx->xa || !ctx->p || !ctx->g)) {
ret = -EINVAL;
goto err_free_val;
}
@ -246,6 +277,7 @@ static void dh_exit_tfm(struct crypto_kpp *tfm)
}
static struct kpp_alg dh = {
.set_params = dh_set_params_pkcs3,
.set_secret = dh_set_secret,
.generate_public_key = dh_compute_value,
.compute_shared_secret = dh_compute_value,

View file

@ -9,6 +9,7 @@
#include <linux/string.h>
#include <crypto/dh.h>
#include <crypto/kpp.h>
#include "dhparameter.asn1.h"
#define DH_KPP_SECRET_MIN_SIZE (sizeof(struct kpp_secret) + 4 * sizeof(int))
@ -49,16 +50,26 @@ int crypto_dh_encode_key(char *buf, unsigned int len, const struct dh *params)
if (unlikely(!len))
return -EINVAL;
/* Prevention of out-of-bounds access in decode code path */
if ((!params->key && params->key_size) ||
(!params->p && params->p_size) ||
(!params->g && params->g_size))
return -EINVAL;
ptr = dh_pack_data(ptr, end, &secret, sizeof(secret));
ptr = dh_pack_data(ptr, end, &params->key_size,
sizeof(params->key_size));
ptr = dh_pack_data(ptr, end, &params->p_size, sizeof(params->p_size));
ptr = dh_pack_data(ptr, end, &params->q_size, sizeof(params->q_size));
ptr = dh_pack_data(ptr, end, &params->g_size, sizeof(params->g_size));
ptr = dh_pack_data(ptr, end, params->key, params->key_size);
ptr = dh_pack_data(ptr, end, params->p, params->p_size);
ptr = dh_pack_data(ptr, end, params->q, params->q_size);
ptr = dh_pack_data(ptr, end, params->g, params->g_size);
if (params->key)
ptr = dh_pack_data(ptr, end, params->key, params->key_size);
if (params->p)
ptr = dh_pack_data(ptr, end, params->p, params->p_size);
if (params->q)
ptr = dh_pack_data(ptr, end, params->q, params->q_size);
if (params->g)
ptr = dh_pack_data(ptr, end, params->g, params->g_size);
if (ptr != end)
return -EINVAL;
return 0;
@ -116,3 +127,41 @@ int crypto_dh_decode_key(const char *buf, unsigned int len, struct dh *params)
return 0;
}
EXPORT_SYMBOL_GPL(crypto_dh_decode_key);
int dh_get_p(void *context, size_t hdrlen, unsigned char tag, const void *value,
size_t vlen)
{
struct dh *dh = context;
/* invalid key provided */
if (!value || !vlen)
return -EINVAL;
dh->p = value;
dh->p_size = vlen;
return 0;
}
int dh_get_g(void *context, size_t hdrlen, unsigned char tag,
const void *value, size_t vlen)
{
struct dh *dh = context;
/* invalid base provided */
if (!value || !dh->p_size || !vlen || vlen > dh->p_size)
return -EINVAL;
dh->g = value;
dh->g_size = vlen;
return 0;
}
int dh_parse_params_pkcs3(struct dh *dh, const void *param,
unsigned int param_len)
{
return asn1_ber_decoder(&dhparameter_decoder, dh, param, param_len);
}
EXPORT_SYMBOL_GPL(dh_parse_params_pkcs3);

4
crypto/dhparameter.asn1 Normal file
View file

@ -0,0 +1,4 @@
DHParameter ::= SEQUENCE {
prime INTEGER ({ dh_get_p }),
base INTEGER ({ dh_get_g })
}

View file

@ -32,10 +32,10 @@
* @g_size: Size of DH generator G
*/
struct dh {
void *key;
void *p;
void *q;
void *g;
const void *key;
const void *p;
const void *q;
const void *g;
unsigned int key_size;
unsigned int p_size;
unsigned int q_size;
@ -83,4 +83,19 @@ int crypto_dh_encode_key(char *buf, unsigned int len, const struct dh *params);
*/
int crypto_dh_decode_key(const char *buf, unsigned int len, struct dh *params);
/**
* dh_parse_params_pkcs3() - decodes the PKCS#3 BER encoded buffer of the DH
* parameters and stores in the provided struct dh,
* pointers to the raw p and g parameters as is, so
* that the caller can copy it or MPI parse it, etc.
*
* @dh: struct dh representation
* @param: DH parameters in BER format following PKCS#3
* @param_len: length of parameter buffer
*
* Return: 0 on success or error code in case of error
*/
int dh_parse_params_pkcs3(struct dh *dh, const void *param,
unsigned int param_len);
#endif

View file

@ -34,7 +34,8 @@
* @key_size: Size of the private ECDH key
*/
struct ecdh {
char *key;
unsigned short curve_id;
const char *key;
unsigned short key_size;
};

View file

@ -18,6 +18,8 @@
#include <crypto/aead.h>
#include <crypto/skcipher.h>
#include <crypto/akcipher.h>
#include <crypto/kpp.h>
#define ALG_MAX_PAGES 16
@ -46,7 +48,10 @@ struct af_alg_type {
void *(*bind)(const char *name, u32 type, u32 mask);
void (*release)(void *private);
int (*setkey)(void *private, const u8 *key, unsigned int keylen);
int (*setpubkey)(void *private, const u8 *key, unsigned int keylen);
int (*setentropy)(void *private, sockptr_t entropy, unsigned int len);
int (*dhparams)(void *private, const u8 *param, unsigned int paramlen);
int (*ecdhcurve)(void *private, const u8 *param, unsigned int paramlen);
int (*accept)(void *private, struct sock *sk);
int (*accept_nokey)(void *private, struct sock *sk);
int (*setauthsize)(void *private, unsigned int authsize);
@ -110,6 +115,8 @@ struct af_alg_async_req {
union {
struct aead_request aead_req;
struct skcipher_request skcipher_req;
struct akcipher_request akcipher_req;
struct kpp_request kpp_req;
} cra_u;
/* req ctx trails this struct */
@ -134,7 +141,7 @@ struct af_alg_async_req {
* @more: More data to be expected from user space?
* @merge: Shall new data from user space be merged into existing
* SG?
* @enc: Cryptographic operation to be performed when
* @op: Cryptographic operation to be performed when
* recvmsg is invoked.
* @init: True if metadata has been sent.
* @len: Length of memory allocated for this data structure.
@ -152,7 +159,7 @@ struct af_alg_ctx {
bool more;
bool merge;
bool enc;
int op;
bool init;
unsigned int len;

View file

@ -69,6 +69,8 @@ struct crypto_kpp {
* @base: Common crypto API algorithm data structure
*/
struct kpp_alg {
int (*set_params)(struct crypto_kpp *tfm, const void *buffer,
unsigned int len);
int (*set_secret)(struct crypto_kpp *tfm, const void *buffer,
unsigned int len);
int (*generate_public_key)(struct kpp_request *req);
@ -265,6 +267,29 @@ struct kpp_secret {
unsigned short len;
};
/**
* crypto_kpp_set_params() - Set parameters needed for kpp operation
*
* Function invokes the specific kpp operation for a given alg.
*
* @tfm: tfm handle
* @buffer: Buffer holding the protocol specific representation of the
* parameters (e.g. PKCS#3 DER for DH)
* @len: Length of the parameter buffer.
*
* Return: zero on success; error code in case of error
*/
static inline int crypto_kpp_set_params(struct crypto_kpp *tfm,
const void *buffer, unsigned int len)
{
struct kpp_alg *alg = crypto_kpp_alg(tfm);
if (alg->set_params)
return alg->set_params(tfm, buffer, len);
else
return -EOPNOTSUPP;
}
/**
* crypto_kpp_set_secret() - Invoke kpp operation
*

View file

@ -52,9 +52,16 @@ struct af_alg_iv {
#define ALG_SET_AEAD_ASSOCLEN 4
#define ALG_SET_AEAD_AUTHSIZE 5
#define ALG_SET_DRBG_ENTROPY 6
#define ALG_SET_PUBKEY 7
#define ALG_SET_DH_PARAMETERS 8
#define ALG_SET_ECDH_CURVE 9
/* Operations */
#define ALG_OP_DECRYPT 0
#define ALG_OP_ENCRYPT 1
#define ALG_OP_SIGN 2
#define ALG_OP_VERIFY 3
#define ALG_OP_KEYGEN 4
#define ALG_OP_SSGEN 5
#endif /* _LINUX_IF_ALG_H */