kernel_samsung_a53x/drivers/crypto/virtio/virtio_crypto_common.h
zhenwei pi 6ffcb1bc73 virtio-crypto: use private buffer for control request
[ Upstream commit 0756ad15b1fef287d4d8fa11bc36ea77a5c42e4a ]

Originally, all of the control requests share a single buffer(
ctrl & input & ctrl_status fields in struct virtio_crypto), this
allows queue depth 1 only, the performance of control queue gets
limited by this design.

In this patch, each request allocates request buffer dynamically, and
free buffer after request, so the scope protected by ctrl_lock also
get optimized here.
It's possible to optimize control queue depth in the next step.

A necessary comment is already in code, still describe it again:
/*
 * Note: there are padding fields in request, clear them to zero before
 * sending to host to avoid to divulge any information.
 * Ex, virtio_crypto_ctrl_request::ctrl::u::destroy_session::padding[48]
 */
So use kzalloc to allocate buffer of struct virtio_crypto_ctrl_request.

Potentially dereferencing uninitialized variables:
Reported-by: kernel test robot <lkp@intel.com>
Reported-by: Dan Carpenter <dan.carpenter@oracle.com>

Cc: Michael S. Tsirkin <mst@redhat.com>
Cc: Jason Wang <jasowang@redhat.com>
Cc: Gonglei <arei.gonglei@huawei.com>
Reviewed-by: Gonglei <arei.gonglei@huawei.com>
Signed-off-by: zhenwei pi <pizhenwei@bytedance.com>
Message-Id: <20220506131627.180784-3-pizhenwei@bytedance.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Stable-dep-of: fed93fb62e05 ("crypto: virtio - Handle dataq logic with tasklet")
Signed-off-by: Sasha Levin <sashal@kernel.org>
2024-11-18 12:12:25 +01:00

145 lines
3.9 KiB
C
Executable file

/* SPDX-License-Identifier: GPL-2.0-or-later */
/* Common header for Virtio crypto device.
*
* Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD.
*/
#ifndef _VIRTIO_CRYPTO_COMMON_H
#define _VIRTIO_CRYPTO_COMMON_H
#include <linux/virtio.h>
#include <linux/crypto.h>
#include <linux/spinlock.h>
#include <crypto/aead.h>
#include <crypto/aes.h>
#include <crypto/engine.h>
#include <uapi/linux/virtio_crypto.h>
/* Internal representation of a data virtqueue */
struct data_queue {
/* Virtqueue associated with this send _queue */
struct virtqueue *vq;
/* To protect the vq operations for the dataq */
spinlock_t lock;
/* Name of the tx queue: dataq.$index */
char name[32];
struct crypto_engine *engine;
};
struct virtio_crypto {
struct virtio_device *vdev;
struct virtqueue *ctrl_vq;
struct data_queue *data_vq;
/* To protect the vq operations for the controlq */
spinlock_t ctrl_lock;
/* Maximum of data queues supported by the device */
u32 max_data_queues;
/* Number of queue currently used by the driver */
u32 curr_queue;
/*
* Specifies the services mask which the device support,
* see VIRTIO_CRYPTO_SERVICE_*
*/
u32 crypto_services;
/* Detailed algorithms mask */
u32 cipher_algo_l;
u32 cipher_algo_h;
u32 hash_algo;
u32 mac_algo_l;
u32 mac_algo_h;
u32 aead_algo;
u32 akcipher_algo;
/* Maximum length of cipher key */
u32 max_cipher_key_len;
/* Maximum length of authenticated key */
u32 max_auth_key_len;
/* Maximum size of per request */
u64 max_size;
unsigned long status;
atomic_t ref_count;
struct list_head list;
struct module *owner;
uint8_t dev_id;
/* Does the affinity hint is set for virtqueues? */
bool affinity_hint_set;
};
struct virtio_crypto_sym_session_info {
/* Backend session id, which come from the host side */
__u64 session_id;
};
/*
* Note: there are padding fields in request, clear them to zero before
* sending to host to avoid to divulge any information.
* Ex, virtio_crypto_ctrl_request::ctrl::u::destroy_session::padding[48]
*/
struct virtio_crypto_ctrl_request {
struct virtio_crypto_op_ctrl_req ctrl;
struct virtio_crypto_session_input input;
struct virtio_crypto_inhdr ctrl_status;
};
struct virtio_crypto_request;
typedef void (*virtio_crypto_data_callback)
(struct virtio_crypto_request *vc_req, int len);
struct virtio_crypto_request {
uint8_t status;
struct virtio_crypto_op_data_req *req_data;
struct scatterlist **sgs;
struct data_queue *dataq;
virtio_crypto_data_callback alg_cb;
};
int virtcrypto_devmgr_add_dev(struct virtio_crypto *vcrypto_dev);
struct list_head *virtcrypto_devmgr_get_head(void);
void virtcrypto_devmgr_rm_dev(struct virtio_crypto *vcrypto_dev);
struct virtio_crypto *virtcrypto_devmgr_get_first(void);
int virtcrypto_dev_in_use(struct virtio_crypto *vcrypto_dev);
int virtcrypto_dev_get(struct virtio_crypto *vcrypto_dev);
void virtcrypto_dev_put(struct virtio_crypto *vcrypto_dev);
int virtcrypto_dev_started(struct virtio_crypto *vcrypto_dev);
bool virtcrypto_algo_is_supported(struct virtio_crypto *vcrypto_dev,
uint32_t service,
uint32_t algo);
struct virtio_crypto *virtcrypto_get_dev_node(int node,
uint32_t service,
uint32_t algo);
int virtcrypto_dev_start(struct virtio_crypto *vcrypto);
void virtcrypto_dev_stop(struct virtio_crypto *vcrypto);
int virtio_crypto_skcipher_crypt_req(
struct crypto_engine *engine, void *vreq);
void
virtcrypto_clear_request(struct virtio_crypto_request *vc_req);
static inline int virtio_crypto_get_current_node(void)
{
int cpu, node;
cpu = get_cpu();
node = topology_physical_package_id(cpu);
put_cpu();
return node;
}
int virtio_crypto_algs_register(struct virtio_crypto *vcrypto);
void virtio_crypto_algs_unregister(struct virtio_crypto *vcrypto);
int virtio_crypto_akcipher_algs_register(struct virtio_crypto *vcrypto);
void virtio_crypto_akcipher_algs_unregister(struct virtio_crypto *vcrypto);
#endif /* _VIRTIO_CRYPTO_COMMON_H */