mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-05-04 06:13:46 +00:00
RDMA/mlx5: Set relaxed ordering when requested
Enable relaxed ordering in the mkey context when requested. As relaxed ordering is not currently supported in UMR, disable UMR usage for relaxed ordering MRs. Link: https://lore.kernel.org/r/1578506740-22188-11-git-send-email-yishaih@mellanox.com Signed-off-by: Michael Guralnik <michaelgur@mellanox.com> Signed-off-by: Yishai Hadas <yishaih@mellanox.com> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
parent
811646998e
commit
d6de0bb185
4 changed files with 23 additions and 5 deletions
|
@ -1523,7 +1523,7 @@ int mlx5_ib_qp_set_counter(struct ib_qp *qp, struct rdma_counter *counter);
|
||||||
u16 mlx5_ib_get_counters_id(struct mlx5_ib_dev *dev, u8 port_num);
|
u16 mlx5_ib_get_counters_id(struct mlx5_ib_dev *dev, u8 port_num);
|
||||||
|
|
||||||
static inline bool mlx5_ib_can_use_umr(struct mlx5_ib_dev *dev,
|
static inline bool mlx5_ib_can_use_umr(struct mlx5_ib_dev *dev,
|
||||||
bool do_modify_atomic)
|
bool do_modify_atomic, int access_flags)
|
||||||
{
|
{
|
||||||
if (MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled))
|
if (MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled))
|
||||||
return false;
|
return false;
|
||||||
|
@ -1533,6 +1533,9 @@ static inline bool mlx5_ib_can_use_umr(struct mlx5_ib_dev *dev,
|
||||||
MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled))
|
MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
|
if (access_flags & IB_ACCESS_RELAXED_ORDERING)
|
||||||
|
return false;
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -661,12 +661,21 @@ int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
|
||||||
static void set_mkc_access_pd_addr_fields(void *mkc, int acc, u64 start_addr,
|
static void set_mkc_access_pd_addr_fields(void *mkc, int acc, u64 start_addr,
|
||||||
struct ib_pd *pd)
|
struct ib_pd *pd)
|
||||||
{
|
{
|
||||||
|
struct mlx5_ib_dev *dev = to_mdev(pd->device);
|
||||||
|
|
||||||
MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC));
|
MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC));
|
||||||
MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE));
|
MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE));
|
||||||
MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ));
|
MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ));
|
||||||
MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE));
|
MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE));
|
||||||
MLX5_SET(mkc, mkc, lr, 1);
|
MLX5_SET(mkc, mkc, lr, 1);
|
||||||
|
|
||||||
|
if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write))
|
||||||
|
MLX5_SET(mkc, mkc, relaxed_ordering_write,
|
||||||
|
!!(acc & IB_ACCESS_RELAXED_ORDERING));
|
||||||
|
if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read))
|
||||||
|
MLX5_SET(mkc, mkc, relaxed_ordering_read,
|
||||||
|
!!(acc & IB_ACCESS_RELAXED_ORDERING));
|
||||||
|
|
||||||
MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
|
MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
|
||||||
MLX5_SET(mkc, mkc, qpn, 0xffffff);
|
MLX5_SET(mkc, mkc, qpn, 0xffffff);
|
||||||
MLX5_SET64(mkc, mkc, start_addr, start_addr);
|
MLX5_SET64(mkc, mkc, start_addr, start_addr);
|
||||||
|
@ -1063,6 +1072,12 @@ static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
|
||||||
mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
|
mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
|
||||||
MLX5_SET(mkc, mkc, free, !populate);
|
MLX5_SET(mkc, mkc, free, !populate);
|
||||||
MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT);
|
MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT);
|
||||||
|
if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write))
|
||||||
|
MLX5_SET(mkc, mkc, relaxed_ordering_write,
|
||||||
|
!!(access_flags & IB_ACCESS_RELAXED_ORDERING));
|
||||||
|
if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read))
|
||||||
|
MLX5_SET(mkc, mkc, relaxed_ordering_read,
|
||||||
|
!!(access_flags & IB_ACCESS_RELAXED_ORDERING));
|
||||||
MLX5_SET(mkc, mkc, a, !!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
|
MLX5_SET(mkc, mkc, a, !!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
|
||||||
MLX5_SET(mkc, mkc, rw, !!(access_flags & IB_ACCESS_REMOTE_WRITE));
|
MLX5_SET(mkc, mkc, rw, !!(access_flags & IB_ACCESS_REMOTE_WRITE));
|
||||||
MLX5_SET(mkc, mkc, rr, !!(access_flags & IB_ACCESS_REMOTE_READ));
|
MLX5_SET(mkc, mkc, rr, !!(access_flags & IB_ACCESS_REMOTE_READ));
|
||||||
|
@ -1251,7 +1266,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
return ERR_PTR(err);
|
return ERR_PTR(err);
|
||||||
|
|
||||||
use_umr = mlx5_ib_can_use_umr(dev, true);
|
use_umr = mlx5_ib_can_use_umr(dev, true, access_flags);
|
||||||
|
|
||||||
if (order <= mr_cache_max_order(dev) && use_umr) {
|
if (order <= mr_cache_max_order(dev) && use_umr) {
|
||||||
mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont,
|
mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont,
|
||||||
|
@ -1419,7 +1434,7 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!mlx5_ib_can_use_umr(dev, true) ||
|
if (!mlx5_ib_can_use_umr(dev, true, access_flags) ||
|
||||||
(flags & IB_MR_REREG_TRANS && !use_umr_mtt_update(mr, addr, len))) {
|
(flags & IB_MR_REREG_TRANS && !use_umr_mtt_update(mr, addr, len))) {
|
||||||
/*
|
/*
|
||||||
* UMR can't be used - MKey needs to be replaced.
|
* UMR can't be used - MKey needs to be replaced.
|
||||||
|
|
|
@ -380,7 +380,7 @@ void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
|
||||||
memset(caps, 0, sizeof(*caps));
|
memset(caps, 0, sizeof(*caps));
|
||||||
|
|
||||||
if (!MLX5_CAP_GEN(dev->mdev, pg) ||
|
if (!MLX5_CAP_GEN(dev->mdev, pg) ||
|
||||||
!mlx5_ib_can_use_umr(dev, true))
|
!mlx5_ib_can_use_umr(dev, true, 0))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
caps->general_caps = IB_ODP_SUPPORT;
|
caps->general_caps = IB_ODP_SUPPORT;
|
||||||
|
|
|
@ -4823,7 +4823,7 @@ static int set_reg_wr(struct mlx5_ib_qp *qp,
|
||||||
bool atomic = wr->access & IB_ACCESS_REMOTE_ATOMIC;
|
bool atomic = wr->access & IB_ACCESS_REMOTE_ATOMIC;
|
||||||
u8 flags = 0;
|
u8 flags = 0;
|
||||||
|
|
||||||
if (!mlx5_ib_can_use_umr(dev, atomic)) {
|
if (!mlx5_ib_can_use_umr(dev, atomic, wr->access)) {
|
||||||
mlx5_ib_warn(to_mdev(qp->ibqp.device),
|
mlx5_ib_warn(to_mdev(qp->ibqp.device),
|
||||||
"Fast update of %s for MR is disabled\n",
|
"Fast update of %s for MR is disabled\n",
|
||||||
(MLX5_CAP_GEN(dev->mdev,
|
(MLX5_CAP_GEN(dev->mdev,
|
||||||
|
|
Loading…
Add table
Reference in a new issue