Commit 819b6028 authored by Jason Gunthorpe's avatar Jason Gunthorpe

Merge branch '32compat'

The design of the uAPI had intended all structs to share the same layout on 32
and 64 bit compiles. Unfortunately over the years some errors have crept in.

This series fixes all the incompatabilities. It goes along with a userspace
rdma-core series that causes the providers to use these structs directly and
then does various self-checks on the command formation.

Those checks were combined with output from pahole on 32 and 64 bit compiles
to confirm that the structure layouts are the same.

This series does not make implicit padding explicit, as long as the implicit
padding is the same on 32 and 64 bit compiles.

Finally, the issue is put to rest by using __aligned_u64 in the uapi headers,
if new code copies that type, and is checked in userspace, it is unlikely we
will see problems in future.

There are two patches that break the ABI for a 32 bit kernel, one for rxe and
one for mlx4. Both patches have notes, but the overall feeling from Doug and I
is that providing compat is just too difficult and not necessary since there
is no real user of a 32 bit userspace and 32 bit kernel for various good
reasons.

The 32 bit userspace / 64 bit kernel case however does seem to have some real
users and does need to work as designed.

* 32compat:
  RDMA: Change all uapi headers to use __aligned_u64 instead of __u64
  RDMA/rxe: Fix uABI structure layouts for 32/64 compat
  RDMA/mlx4: Fix uABI structure layouts for 32/64 compat
  RDMA/qedr: Fix uABI structure layouts for 32/64 compat
  RDMA/ucma: Fix uABI structure layouts for 32/64 compat
  RDMA: Remove minor pahole differences between 32/64
parents f64705b8 26b99066
......@@ -382,7 +382,11 @@ static ssize_t ucma_get_event(struct ucma_file *file, const char __user *inbuf,
struct ucma_event *uevent;
int ret = 0;
if (out_len < sizeof uevent->resp)
/*
* Old 32 bit user space does not send the 4 byte padding in the
* reserved field. We don't care, allow it to keep working.
*/
if (out_len < sizeof(uevent->resp) - sizeof(uevent->resp.reserved))
return -ENOSPC;
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
......@@ -417,7 +421,8 @@ static ssize_t ucma_get_event(struct ucma_file *file, const char __user *inbuf,
}
if (copy_to_user((void __user *)(unsigned long)cmd.response,
&uevent->resp, sizeof uevent->resp)) {
&uevent->resp,
min_t(size_t, out_len, sizeof(uevent->resp)))) {
ret = -EFAULT;
goto done;
}
......
......@@ -59,7 +59,11 @@
#include "rxe_verbs.h"
#include "rxe_loc.h"
#define RXE_UVERBS_ABI_VERSION (1)
/*
* Version 1 and Version 2 are identical on 64 bit machines, but on 32 bit
* machines Version 2 has a different struct layout.
*/
#define RXE_UVERBS_ABI_VERSION 2
#define IB_PHYS_STATE_LINK_UP (5)
#define IB_PHYS_STATE_LINK_DOWN (3)
......
......@@ -65,8 +65,8 @@ struct bnxt_re_pd_resp {
} __attribute__((packed, aligned(4)));
struct bnxt_re_cq_req {
__u64 cq_va;
__u64 cq_handle;
__aligned_u64 cq_va;
__aligned_u64 cq_handle;
};
struct bnxt_re_cq_resp {
......@@ -77,9 +77,9 @@ struct bnxt_re_cq_resp {
};
struct bnxt_re_qp_req {
__u64 qpsva;
__u64 qprva;
__u64 qp_handle;
__aligned_u64 qpsva;
__aligned_u64 qprva;
__aligned_u64 qp_handle;
};
struct bnxt_re_qp_resp {
......@@ -88,8 +88,8 @@ struct bnxt_re_qp_resp {
};
struct bnxt_re_srq_req {
__u64 srqva;
__u64 srq_handle;
__aligned_u64 srqva;
__aligned_u64 srq_handle;
};
struct bnxt_re_srq_resp {
......
......@@ -41,21 +41,21 @@
* Make sure that all structs defined in this file remain laid out so
* that they pack the same way on 32-bit and 64-bit architectures (to
* avoid incompatibility between 32-bit userspace and 64-bit kernels).
* In particular do not use pointer types -- pass pointers in __u64
* In particular do not use pointer types -- pass pointers in __aligned_u64
* instead.
*/
struct iwch_create_cq_req {
__u64 user_rptr_addr;
__aligned_u64 user_rptr_addr;
};
struct iwch_create_cq_resp_v0 {
__u64 key;
__aligned_u64 key;
__u32 cqid;
__u32 size_log2;
};
struct iwch_create_cq_resp {
__u64 key;
__aligned_u64 key;
__u32 cqid;
__u32 size_log2;
__u32 memsize;
......@@ -63,8 +63,8 @@ struct iwch_create_cq_resp {
};
struct iwch_create_qp_resp {
__u64 key;
__u64 db_key;
__aligned_u64 key;
__aligned_u64 db_key;
__u32 qpid;
__u32 size_log2;
__u32 sq_size_log2;
......
......@@ -41,13 +41,13 @@
* Make sure that all structs defined in this file remain laid out so
* that they pack the same way on 32-bit and 64-bit architectures (to
* avoid incompatibility between 32-bit userspace and 64-bit kernels).
* In particular do not use pointer types -- pass pointers in __u64
* In particular do not use pointer types -- pass pointers in __aligned_u64
* instead.
*/
struct c4iw_create_cq_resp {
__u64 key;
__u64 gts_key;
__u64 memsize;
__aligned_u64 key;
__aligned_u64 gts_key;
__aligned_u64 memsize;
__u32 cqid;
__u32 size;
__u32 qid_mask;
......@@ -59,13 +59,13 @@ enum {
};
struct c4iw_create_qp_resp {
__u64 ma_sync_key;
__u64 sq_key;
__u64 rq_key;
__u64 sq_db_gts_key;
__u64 rq_db_gts_key;
__u64 sq_memsize;
__u64 rq_memsize;
__aligned_u64 ma_sync_key;
__aligned_u64 sq_key;
__aligned_u64 rq_key;
__aligned_u64 sq_db_gts_key;
__aligned_u64 rq_db_gts_key;
__aligned_u64 sq_memsize;
__aligned_u64 rq_memsize;
__u32 sqid;
__u32 rqid;
__u32 sq_size;
......@@ -75,7 +75,7 @@ struct c4iw_create_qp_resp {
};
struct c4iw_alloc_ucontext_resp {
__u64 status_page_key;
__aligned_u64 status_page_key;
__u32 status_page_size;
__u32 reserved; /* explicit padding (optional for i386) */
};
......
......@@ -79,7 +79,7 @@ struct hfi1_user_info {
};
struct hfi1_ctxt_info {
__u64 runtime_flags; /* chip/drv runtime flags (HFI1_CAP_*) */
__aligned_u64 runtime_flags; /* chip/drv runtime flags (HFI1_CAP_*) */
__u32 rcvegr_size; /* size of each eager buffer */
__u16 num_active; /* number of active units */
__u16 unit; /* unit (chip) assigned to caller */
......@@ -98,9 +98,9 @@ struct hfi1_ctxt_info {
struct hfi1_tid_info {
/* virtual address of first page in transfer */
__u64 vaddr;
__aligned_u64 vaddr;
/* pointer to tid array. this array is big enough */
__u64 tidlist;
__aligned_u64 tidlist;
/* number of tids programmed by this request */
__u32 tidcnt;
/* length of transfer buffer programmed by this request */
......@@ -131,23 +131,23 @@ struct hfi1_base_info {
*/
__u32 bthqp;
/* PIO credit return address, */
__u64 sc_credits_addr;
__aligned_u64 sc_credits_addr;
/*
* Base address of write-only pio buffers for this process.
* Each buffer has sendpio_credits*64 bytes.
*/
__u64 pio_bufbase_sop;
__aligned_u64 pio_bufbase_sop;
/*
* Base address of write-only pio buffers for this process.
* Each buffer has sendpio_credits*64 bytes.
*/
__u64 pio_bufbase;
__aligned_u64 pio_bufbase;
/* address where receive buffer queue is mapped into */
__u64 rcvhdr_bufbase;
__aligned_u64 rcvhdr_bufbase;
/* base address of Eager receive buffers. */
__u64 rcvegr_bufbase;
__aligned_u64 rcvegr_bufbase;
/* base address of SDMA completion ring */
__u64 sdma_comp_bufbase;
__aligned_u64 sdma_comp_bufbase;
/*
* User register base for init code, not to be used directly by
* protocol or applications. Always maps real chip register space.
......@@ -155,20 +155,20 @@ struct hfi1_base_info {
* ur_rcvhdrhead, ur_rcvhdrtail, ur_rcvegrhead, ur_rcvegrtail,
* ur_rcvtidflow
*/
__u64 user_regbase;
__aligned_u64 user_regbase;
/* notification events */
__u64 events_bufbase;
__aligned_u64 events_bufbase;
/* status page */
__u64 status_bufbase;
__aligned_u64 status_bufbase;
/* rcvhdrtail update */
__u64 rcvhdrtail_base;
__aligned_u64 rcvhdrtail_base;
/*
* shared memory pages for subctxts if ctxt is shared; these cover
* all the processes in the group sharing a single context.
* all have enough space for the num_subcontexts value on this job.
*/
__u64 subctxt_uregbase;
__u64 subctxt_rcvegrbuf;
__u64 subctxt_rcvhdrbuf;
__aligned_u64 subctxt_uregbase;
__aligned_u64 subctxt_rcvegrbuf;
__aligned_u64 subctxt_rcvhdrbuf;
};
#endif /* _LINIUX__HFI1_IOCTL_H */
......@@ -177,8 +177,8 @@ struct hfi1_sdma_comp_entry {
* Device status and notifications from driver to user-space.
*/
struct hfi1_status {
__u64 dev; /* device/hw status bits */
__u64 port; /* port state and status bits */
__aligned_u64 dev; /* device/hw status bits */
__aligned_u64 port; /* port state and status bits */
char freezemsg[0];
};
......
......@@ -37,18 +37,18 @@
#include <linux/types.h>
struct hns_roce_ib_create_cq {
__u64 buf_addr;
__u64 db_addr;
__aligned_u64 buf_addr;
__aligned_u64 db_addr;
};
struct hns_roce_ib_create_cq_resp {
__u64 cqn; /* Only 32 bits used, 64 for compat */
__u64 cap_flags;
__aligned_u64 cqn; /* Only 32 bits used, 64 for compat */
__aligned_u64 cap_flags;
};
struct hns_roce_ib_create_qp {
__u64 buf_addr;
__u64 db_addr;
__aligned_u64 buf_addr;
__aligned_u64 db_addr;
__u8 log_sq_bb_count;
__u8 log_sq_stride;
__u8 sq_no_prefetch;
......@@ -56,7 +56,7 @@ struct hns_roce_ib_create_qp {
};
struct hns_roce_ib_create_qp_resp {
__u64 cap_flags;
__aligned_u64 cap_flags;
};
struct hns_roce_ib_alloc_ucontext_resp {
......
......@@ -61,17 +61,17 @@ struct i40iw_alloc_pd_resp {
};
struct i40iw_create_cq_req {
__u64 user_cq_buffer;
__u64 user_shadow_area;
__aligned_u64 user_cq_buffer;
__aligned_u64 user_shadow_area;
};
struct i40iw_create_qp_req {
__u64 user_wqe_buffers;
__u64 user_compl_ctx;
__aligned_u64 user_wqe_buffers;
__aligned_u64 user_compl_ctx;
/* UDA QP PHB */
__u64 user_sq_phb; /* place for VA of the sq phb buff */
__u64 user_rq_phb; /* place for VA of the rq phb buff */
__aligned_u64 user_sq_phb; /* place for VA of the sq phb buff */
__aligned_u64 user_rq_phb; /* place for VA of the rq phb buff */
};
enum i40iw_memreg_type {
......
......@@ -73,8 +73,8 @@ struct ib_ucm_cmd_hdr {
};
struct ib_ucm_create_id {
__u64 uid;
__u64 response;
__aligned_u64 uid;
__aligned_u64 response;
};
struct ib_ucm_create_id_resp {
......@@ -82,7 +82,7 @@ struct ib_ucm_create_id_resp {
};
struct ib_ucm_destroy_id {
__u64 response;
__aligned_u64 response;
__u32 id;
__u32 reserved;
};
......@@ -92,7 +92,7 @@ struct ib_ucm_destroy_id_resp {
};
struct ib_ucm_attr_id {
__u64 response;
__aligned_u64 response;
__u32 id;
__u32 reserved;
};
......@@ -105,7 +105,7 @@ struct ib_ucm_attr_id_resp {
};
struct ib_ucm_init_qp_attr {
__u64 response;
__aligned_u64 response;
__u32 id;
__u32 qp_state;
};
......@@ -123,7 +123,7 @@ struct ib_ucm_notify {
};
struct ib_ucm_private_data {
__u64 data;
__aligned_u64 data;
__u32 id;
__u8 len;
__u8 reserved[3];
......@@ -135,9 +135,9 @@ struct ib_ucm_req {
__u32 qp_type;
__u32 psn;
__be64 sid;
__u64 data;
__u64 primary_path;
__u64 alternate_path;
__aligned_u64 data;
__aligned_u64 primary_path;
__aligned_u64 alternate_path;
__u8 len;
__u8 peer_to_peer;
__u8 responder_resources;
......@@ -153,8 +153,8 @@ struct ib_ucm_req {
};
struct ib_ucm_rep {
__u64 uid;
__u64 data;
__aligned_u64 uid;
__aligned_u64 data;
__u32 id;
__u32 qpn;
__u32 psn;
......@@ -172,15 +172,15 @@ struct ib_ucm_rep {
struct ib_ucm_info {
__u32 id;
__u32 status;
__u64 info;
__u64 data;
__aligned_u64 info;
__aligned_u64 data;
__u8 info_len;
__u8 data_len;
__u8 reserved[6];
};
struct ib_ucm_mra {
__u64 data;
__aligned_u64 data;
__u32 id;
__u8 len;
__u8 timeout;
......@@ -188,8 +188,8 @@ struct ib_ucm_mra {
};
struct ib_ucm_lap {
__u64 path;
__u64 data;
__aligned_u64 path;
__aligned_u64 data;
__u32 id;
__u8 len;
__u8 reserved[3];
......@@ -199,8 +199,8 @@ struct ib_ucm_sidr_req {
__u32 id;
__u32 timeout;
__be64 sid;
__u64 data;
__u64 path;
__aligned_u64 data;
__aligned_u64 path;
__u16 reserved_pkey;
__u8 len;
__u8 max_cm_retries;
......@@ -212,8 +212,8 @@ struct ib_ucm_sidr_rep {
__u32 qpn;
__u32 qkey;
__u32 status;
__u64 info;
__u64 data;
__aligned_u64 info;
__aligned_u64 data;
__u8 info_len;
__u8 data_len;
__u8 reserved[6];
......@@ -222,9 +222,9 @@ struct ib_ucm_sidr_rep {
* event notification ABI structures.
*/
struct ib_ucm_event_get {
__u64 response;
__u64 data;
__u64 info;
__aligned_u64 response;
__aligned_u64 data;
__aligned_u64 info;
__u8 data_len;
__u8 info_len;
__u8 reserved[6];
......@@ -303,7 +303,7 @@ struct ib_ucm_sidr_rep_event_resp {
#define IB_UCM_PRES_ALTERNATE 0x08
struct ib_ucm_event_resp {
__u64 uid;
__aligned_u64 uid;
__u32 id;
__u32 event;
__u32 present;
......
......@@ -143,7 +143,7 @@ struct ib_user_mad_hdr {
*/
struct ib_user_mad {
struct ib_user_mad_hdr hdr;
__u64 data[0];
__aligned_u64 data[0];
};
/*
......@@ -225,7 +225,7 @@ struct ib_user_mad_reg_req2 {
__u8 mgmt_class_version;
__u16 res;
__u32 flags;
__u64 method_mask[2];
__aligned_u64 method_mask[2];
__u32 oui;
__u8 rmpp_version;
__u8 reserved[3];
......
This diff is collapsed.
......@@ -77,8 +77,8 @@ struct mlx4_ib_alloc_pd_resp {
};
struct mlx4_ib_create_cq {
__u64 buf_addr;
__u64 db_addr;
__aligned_u64 buf_addr;
__aligned_u64 db_addr;
};
struct mlx4_ib_create_cq_resp {
......@@ -87,12 +87,12 @@ struct mlx4_ib_create_cq_resp {
};
struct mlx4_ib_resize_cq {
__u64 buf_addr;
__aligned_u64 buf_addr;
};
struct mlx4_ib_create_srq {
__u64 buf_addr;
__u64 db_addr;
__aligned_u64 buf_addr;
__aligned_u64 db_addr;
};
struct mlx4_ib_create_srq_resp {
......@@ -101,7 +101,7 @@ struct mlx4_ib_create_srq_resp {
};
struct mlx4_ib_create_qp_rss {
__u64 rx_hash_fields_mask; /* Use enum mlx4_ib_rx_hash_fields */
__aligned_u64 rx_hash_fields_mask; /* Use enum mlx4_ib_rx_hash_fields */
__u8 rx_hash_function; /* Use enum mlx4_ib_rx_hash_function_flags */
__u8 reserved[7];
__u8 rx_hash_key[40];
......@@ -110,8 +110,8 @@ struct mlx4_ib_create_qp_rss {
};
struct mlx4_ib_create_qp {
__u64 buf_addr;
__u64 db_addr;
__aligned_u64 buf_addr;
__aligned_u64 db_addr;
__u8 log_sq_bb_count;
__u8 log_sq_stride;
__u8 sq_no_prefetch;
......@@ -120,8 +120,8 @@ struct mlx4_ib_create_qp {
};
struct mlx4_ib_create_wq {
__u64 buf_addr;
__u64 db_addr;
__aligned_u64 buf_addr;
__aligned_u64 db_addr;
__u8 log_range_size;
__u8 reserved[3];
__u32 comp_mask;
......@@ -161,7 +161,7 @@ enum mlx4_ib_rx_hash_fields {
};
struct mlx4_ib_rss_caps {
__u64 rx_hash_fields_mask; /* enum mlx4_ib_rx_hash_fields */
__aligned_u64 rx_hash_fields_mask; /* enum mlx4_ib_rx_hash_fields */
__u8 rx_hash_function; /* enum mlx4_ib_rx_hash_function_flags */
__u8 reserved[7];
};
......@@ -181,8 +181,9 @@ struct mlx4_ib_tso_caps {
struct mlx4_uverbs_ex_query_device_resp {
__u32 comp_mask;
__u32 response_length;
__u64 hca_core_clock_offset;
__aligned_u64 hca_core_clock_offset;
__u32 max_inl_recv_sz;
__u32 reserved;
struct mlx4_ib_rss_caps rss_caps;
struct mlx4_ib_tso_caps tso_caps;
};
......
......@@ -84,7 +84,7 @@ struct mlx5_ib_alloc_ucontext_req_v2 {
__u8 reserved0;
__u16 reserved1;
__u32 reserved2;
__u64 lib_caps;
__aligned_u64 lib_caps;
};
enum mlx5_ib_alloc_ucontext_resp_mask {
......@@ -125,7 +125,7 @@ struct mlx5_ib_alloc_ucontext_resp {
__u8 cmds_supp_uhw;
__u8 eth_min_inline;
__u8 clock_info_versions;
__u64 hca_core_clock_offset;
__aligned_u64 hca_core_clock_offset;
__u32 log_uar_size;
__u32 num_uars_per_page;
__u32 num_dyn_bfregs;
......@@ -147,7 +147,7 @@ struct mlx5_ib_tso_caps {
};
struct mlx5_ib_rss_caps {
__u64 rx_hash_fields_mask; /* enum mlx5_rx_hash_fields */
__aligned_u64 rx_hash_fields_mask; /* enum mlx5_rx_hash_fields */
__u8 rx_hash_function; /* enum mlx5_rx_hash_function_flags */
__u8 reserved[7];
};
......@@ -248,8 +248,8 @@ enum mlx5_ib_create_cq_flags {
};
struct mlx5_ib_create_cq {
__u64 buf_addr;
__u64 db_addr;
__aligned_u64 buf_addr;
__aligned_u64 db_addr;
__u32 cqe_size;
__u8 cqe_comp_en;
__u8 cqe_comp_res_format;
......@@ -262,15 +262,15 @@ struct mlx5_ib_create_cq_resp {
};
struct mlx5_ib_resize_cq {
__u64 buf_addr;
__aligned_u64 buf_addr;
__u16 cqe_size;
__u16 reserved0;
__u32 reserved1;
};
struct mlx5_ib_create_srq {
__u64 buf_addr;
__u64 db_addr;
__aligned_u64 buf_addr;
__aligned_u64 db_addr;
__u32 flags;
__u32 reserved0; /* explicit padding (optional on i386) */
__u32 uidx;
......@@ -283,8 +283,8 @@ struct mlx5_ib_create_srq_resp {
};
struct mlx5_ib_create_qp {
__u64 buf_addr;
__u64 db_addr;
__aligned_u64 buf_addr;
__aligned_u64 db_addr;
__u32 sq_wqe_count;
__u32 rq_wqe_count;
__u32 rq_wqe_shift;
......@@ -292,8 +292,8 @@ struct mlx5_ib_create_qp {
__u32 uidx;
__u32 bfreg_index;
union {
__u64 sq_buf_addr;
__u64 access_key;
__aligned_u64 sq_buf_addr;
__aligned_u64 access_key;
};
};
......@@ -324,7 +324,7 @@ enum mlx5_rx_hash_fields {
};
struct mlx5_ib_create_qp_rss {
__u64 rx_hash_fields_mask; /* enum mlx5_rx_hash_fields */
__aligned_u64 rx_hash_fields_mask; /* enum mlx5_rx_hash_fields */
__u8 rx_hash_function; /* enum mlx5_rx_hash_function_flags */
__u8 rx_key_len; /* valid only for Toeplitz */
__u8 reserved[6];
......@@ -349,8 +349,8 @@ enum mlx5_ib_create_wq_mask {
};
struct mlx5_ib_create_wq {
__u64 buf_addr;
__u64 db_addr;
__aligned_u64 buf_addr;
__aligned_u64 db_addr;
__u32 rq_wqe_count;
__u32 rq_wqe_shift;
__u32 user_index;
......@@ -402,13 +402,13 @@ struct mlx5_ib_modify_wq {
struct mlx5_ib_clock_info {
__u32 sign;
__u32 resv;
__u64 nsec;
__u64 cycles;
__u64 frac;
__aligned_u64 nsec;
__aligned_u64 cycles;
__aligned_u64 frac;
__u32 mult;
__u32 shift;
__u64 mask;
__u64 overflow_period;
__aligned_u64 mask;
__aligned_u64 overflow_period;
};
enum mlx5_ib_mmap_cmd {
......
......@@ -74,8 +74,8 @@ struct mthca_reg_mr {
struct mthca_create_cq {
__u32 lkey;
__u32 pdn;
__u64 arm_db_page;
__u64 set_db_page;
__aligned_u64 arm_db_page;
__aligned_u64 set_db_page;
__u32 arm_db_index;
__u32 set_db_index;
};
......@@ -93,7 +93,7 @@ struct mthca_resize_cq {
struct mthca_create_srq {
__u32 lkey;
__u32 db_index;
__u64 db_page;
__aligned_u64 db_page;
};
struct mthca_create_srq_resp {
......@@ -104,8 +104,8 @@ struct mthca_create_srq_resp {
struct mthca_create_qp {
__u32 lkey;
__u32 reserved;
__u64 sq_db_page;
__u64 rq_db_page;
__aligned_u64 sq_db_page;
__aligned_u64 rq_db_page;
__u32 sq_db_index;
__u32 rq_db_index;
};
......
......@@ -72,14 +72,14 @@ struct nes_alloc_pd_resp {
};
struct nes_create_cq_req {
__u64 user_cq_buffer;
__aligned_u64 user_cq_buffer;
__u32 mcrqf;
__u8 reserved[4];
};
struct nes_create_qp_req {
__u64 user_wqe_buffers;
__u64 user_qp_buffer;
__aligned_u64 user_wqe_buffers;
__aligned_u64 user_qp_buffer;
};
enum iwnes_memreg_type {
......
......@@ -55,13 +55,13 @@ struct ocrdma_alloc_ucontext_resp {
__u32 wqe_size;
__u32 max_inline_data;
__u32 dpp_wqe_size;
__u64 ah_tbl_page;
__aligned_u64 ah_tbl_page;
__u32 ah_tbl_len;
__u32 rqe_size;
__u8 fw_ver[32];
/* for future use/new features in progress */
__u64 rsvd1;
__u64 rsvd2;
__aligned_u64 rsvd1;
__aligned_u64 rsvd2;
};
struct ocrdma_alloc_pd_ureq {
......@@ -87,13 +87,13 @@ struct ocrdma_create_cq_uresp {
__u32 page_size;
__u32 num_pages;
__u32 max_hw_cqe;
__u64 page_addr[MAX_CQ_PAGES];
__u64 db_page_addr;
__aligned_u64 page_addr[MAX_CQ_PAGES];
__aligned_u64 db_page_addr;
__u32 db_page_size;
__u32 phase_change;
/* for future use/new features in progress */
__u64 rsvd1;
__u64 rsvd2;
__aligned_u64 rsvd1;
__aligned_u64 rsvd2;
};
#define MAX_QP_PAGES 8
......@@ -115,9 +115,9 @@ struct ocrdma_create_qp_uresp {
__u32 rq_page_size;
__u32 num_sq_pages;
__u32 num_rq_pages;
__u64 sq_page_addr[MAX_QP_PAGES];
__u64 rq_page_addr[MAX_QP_PAGES];
__u64 db_page_addr;
__aligned_u64 sq_page_addr[MAX_QP_PAGES];
__aligned_u64 rq_page_addr[MAX_QP_PAGES];
__aligned_u64 db_page_addr;
__u32 db_page_size;
__u32 dpp_credit;
__u32 dpp_offset;
......@@ -126,7 +126,7 @@ struct ocrdma_create_qp_uresp {
__u32 db_sq_offset;
__u32 db_rq_offset;
__u32 db_shift;
__u64 rsvd[11];
__aligned_u64 rsvd[11];
};
struct ocrdma_create_srq_uresp {
......@@ -137,16 +137,16 @@ struct ocrdma_create_srq_uresp {
__u32 rq_page_size;
__u32 num_rq_pages;
__u64 rq_page_addr[MAX_QP_PAGES];
__u64 db_page_addr;
__aligned_u64 rq_page_addr[MAX_QP_PAGES];
__aligned_u64 db_page_addr;
__u32 db_page_size;
__u32 num_rqe_allocated;
__u32 db_rq_offset;
__u32 db_shift;
__u64 rsvd2;
__u64 rsvd3;
__aligned_u64 rsvd2;
__aligned_u64 rsvd3;
};
#endif /* OCRDMA_ABI_USER_H */
......@@ -40,7 +40,7 @@
/* user kernel communication data structures. */
struct qedr_alloc_ucontext_resp {
__u64 db_pa;
__aligned_u64 db_pa;
__u32 db_size;
__u32 max_send_wr;
......@@ -53,24 +53,27 @@ struct qedr_alloc_ucontext_resp {
__u8 dpm_enabled;
__u8 wids_enabled;
__u16 wid_count;
__u32 reserved;
};
struct qedr_alloc_pd_ureq {
__u64 rsvd1;
__aligned_u64 rsvd1;
};
struct qedr_alloc_pd_uresp {
__u32 pd_id;
__u32 reserved;
};
struct qedr_create_cq_ureq {
__u64 addr;
__u64 len;
__aligned_u64 addr;
__aligned_u64 len;
};
struct qedr_create_cq_uresp {
__u32 db_offset;
__u16 icid;
__u16 reserved;
};
struct qedr_create_qp_ureq {
......@@ -79,17 +82,17 @@ struct qedr_create_qp_ureq {
/* SQ */
/* user space virtual address of SQ buffer */
__u64 sq_addr;
__aligned_u64 sq_addr;
/* length of SQ buffer */
__u64 sq_len;
__aligned_u64 sq_len;
/* RQ */
/* user space virtual address of RQ buffer */
__u64 rq_addr;
__aligned_u64 rq_addr;
/* length of RQ buffer */
__u64 rq_len;
__aligned_u64 rq_len;
};
struct qedr_create_qp_uresp {
......@@ -105,6 +108,7 @@ struct qedr_create_qp_uresp {
__u16 rq_icid;
__u32 rq_db2_offset;
__u32 reserved;
};
#endif /* __QEDR_USER_H__ */
......@@ -80,8 +80,8 @@ struct rdma_ucm_cmd_hdr {
};
struct rdma_ucm_create_id {
__u64 uid;
__u64 response;
__aligned_u64 uid;
__aligned_u64 response;
__u16 ps;
__u8 qp_type;
__u8 reserved[5];
......@@ -92,7 +92,7 @@ struct rdma_ucm_create_id_resp {
};
struct rdma_ucm_destroy_id {
__u64 response;
__aligned_u64 response;
__u32 id;
__u32 reserved;
};
......@@ -102,7 +102,7 @@ struct rdma_ucm_destroy_id_resp {
};
struct rdma_ucm_bind_ip {
__u64 response;
__aligned_u64 response;
struct sockaddr_in6 addr;
__u32 id;
};
......@@ -143,13 +143,13 @@ enum {
};
struct rdma_ucm_query {
__u64 response;
__aligned_u64 response;
__u32 id;
__u32 option;
};
struct rdma_ucm_query_route_resp {
__u64 node_guid;
__aligned_u64 node_guid;
struct ib_user_path_rec ib_route[2];
struct sockaddr_in6 src_addr;
struct sockaddr_in6 dst_addr;
......@@ -159,7 +159,7 @@ struct rdma_ucm_query_route_resp {
};
struct rdma_ucm_query_addr_resp {
__u64 node_guid;
__aligned_u64 node_guid;
__u8 port_num;
__u8 reserved;
__u16 pkey;
......@@ -210,7 +210,7 @@ struct rdma_ucm_listen {
};
struct rdma_ucm_accept {
__u64 uid;
__aligned_u64 uid;
struct rdma_ucm_conn_param conn_param;
__u32 id;
__u32 reserved;
......@@ -228,7 +228,7 @@ struct rdma_ucm_disconnect {
};
struct rdma_ucm_init_qp_attr {
__u64 response;
__aligned_u64 response;
__u32 id;
__u32 qp_state;
};
......@@ -239,8 +239,8 @@ struct rdma_ucm_notify {
};
struct rdma_ucm_join_ip_mcast {
__u64 response; /* rdma_ucm_create_id_resp */
__u64 uid;
__aligned_u64 response; /* rdma_ucm_create_id_resp */
__aligned_u64 uid;
struct sockaddr_in6 addr;
__u32 id;
};
......@@ -253,8 +253,8 @@ enum {
};
struct rdma_ucm_join_mcast {
__u64 response; /* rdma_ucma_create_id_resp */
__u64 uid;
__aligned_u64 response; /* rdma_ucma_create_id_resp */
__aligned_u64 uid;
__u32 id;
__u16 addr_size;
__u16 join_flags;
......@@ -262,18 +262,23 @@ struct rdma_ucm_join_mcast {
};
struct rdma_ucm_get_event {
__u64 response;
__aligned_u64 response;
};
struct rdma_ucm_event_resp {
__u64 uid;
__aligned_u64 uid;
__u32 id;
__u32 event;
__u32 status;
/*
* NOTE: This union is not aligned to 8 bytes so none of the union
* members may contain a u64 or anything with higher alignment than 4.
*/
union {
struct rdma_ucm_conn_param conn;
struct rdma_ucm_ud_param ud;
} param;
__u32 reserved;
};
/* Option levels */
......@@ -291,7 +296,7 @@ enum {
};
struct rdma_ucm_set_option {
__u64 optval;
__aligned_u64 optval;
__u32 id;
__u32 level;
__u32 optname;
......@@ -299,7 +304,7 @@ struct rdma_ucm_set_option {
};
struct rdma_ucm_migrate_id {
__u64 response;
__aligned_u64 response;
__u32 id;
__u32 fd;
};
......
......@@ -58,6 +58,8 @@ struct rxe_global_route {
struct rxe_av {
__u8 port_num;
__u8 network_type;
__u16 reserved1;
__u32 reserved2;
struct rxe_global_route grh;
union {
struct sockaddr_in _sockaddr_in;
......@@ -66,7 +68,7 @@ struct rxe_av {
};
struct rxe_send_wr {
__u64 wr_id;
__aligned_u64 wr_id;
__u32 num_sge;
__u32 opcode;
__u32 send_flags;
......@@ -76,36 +78,42 @@ struct rxe_send_wr {
} ex;
union {
struct {
__u64 remote_addr;
__aligned_u64 remote_addr;
__u32 rkey;
__u32 reserved;
} rdma;
struct {
__u64 remote_addr;
__u64 compare_add;
__u64 swap;
__aligned_u64 remote_addr;
__aligned_u64 compare_add;
__aligned_u64 swap;
__u32 rkey;
__u32 reserved;
} atomic;
struct {
__u32 remote_qpn;
__u32 remote_qkey;
__u16 pkey_index;
} ud;
/* reg is only used by the kernel and is not part of the uapi */
struct {
struct ib_mr *mr;
union {
struct ib_mr *mr;
__aligned_u64 reserved;
};
__u32 key;
int access;
__u32 access;
} reg;
} wr;
};
struct rxe_sge {
__u64 addr;
__aligned_u64 addr;
__u32 length;
__u32 lkey;
};
struct mminfo {
__u64 offset;
__aligned_u64 offset;
__u32 size;
__u32 pad;
};
......@@ -116,6 +124,7 @@ struct rxe_dma_info {
__u32 cur_sge;
__u32 num_sge;
__u32 sge_offset;
__u32 reserved;
union {
__u8 inline_data[0];
struct rxe_sge sge[0];
......@@ -127,7 +136,7 @@ struct rxe_send_wqe {
struct rxe_av av;
__u32 status;
__u32 state;
__u64 iova;
__aligned_u64 iova;
__u32 mask;
__u32 first_psn;
__u32 last_psn;
......@@ -138,7 +147,7 @@ struct rxe_send_wqe {
};
struct rxe_recv_wqe {
__u64 wr_id;
__aligned_u64 wr_id;
__u32 num_sge;
__u32 padding;
struct rxe_dma_info dma;
......@@ -160,10 +169,11 @@ struct rxe_create_qp_resp {
struct rxe_create_srq_resp {
struct mminfo mi;
__u32 srq_num;
__u32 reserved;
};
struct rxe_modify_srq_cmd {
__u64 mmap_info_addr;
__aligned_u64 mmap_info_addr;
};
#endif /* RDMA_USER_RXE_H */
......@@ -143,7 +143,7 @@ struct pvrdma_alloc_pd_resp {
};
struct pvrdma_create_cq {
__u64 buf_addr;
__aligned_u64 buf_addr;
__u32 buf_size;
__u32 reserved;
};
......@@ -154,13 +154,13 @@ struct pvrdma_create_cq_resp {
};
struct pvrdma_resize_cq {
__u64 buf_addr;
__aligned_u64 buf_addr;
__u32 buf_size;
__u32 reserved;
};
struct pvrdma_create_srq {
__u64 buf_addr;
__aligned_u64 buf_addr;
__u32 buf_size;
__u32 reserved;
};
......@@ -171,25 +171,25 @@ struct pvrdma_create_srq_resp {
};
struct pvrdma_create_qp {
__u64 rbuf_addr;
__u64 sbuf_addr;
__aligned_u64 rbuf_addr;
__aligned_u64 sbuf_addr;
__u32 rbuf_size;
__u32 sbuf_size;
__u64 qp_addr;
__aligned_u64 qp_addr;
};
/* PVRDMA masked atomic compare and swap */
struct pvrdma_ex_cmp_swap {
__u64 swap_val;
__u64 compare_val;
__u64 swap_mask;
__u64 compare_mask;
__aligned_u64 swap_val;
__aligned_u64 compare_val;
__aligned_u64 swap_mask;
__aligned_u64 compare_mask;
};
/* PVRDMA masked atomic fetch and add */
struct pvrdma_ex_fetch_add {
__u64 add_val;
__u64 field_boundary;
__aligned_u64 add_val;
__aligned_u64 field_boundary;
};
/* PVRDMA address vector. */
......@@ -207,14 +207,14 @@ struct pvrdma_av {
/* PVRDMA scatter/gather entry */
struct pvrdma_sge {
__u64 addr;
__aligned_u64 addr;
__u32 length;
__u32 lkey;
};
/* PVRDMA receive queue work request */
struct pvrdma_rq_wqe_hdr {
__u64 wr_id; /* wr id */
__aligned_u64 wr_id; /* wr id */
__u32 num_sge; /* size of s/g array */
__u32 total_len; /* reserved */
};
......@@ -222,7 +222,7 @@ struct pvrdma_rq_wqe_hdr {
/* PVRDMA send queue work request */
struct pvrdma_sq_wqe_hdr {
__u64 wr_id; /* wr id */
__aligned_u64 wr_id; /* wr id */
__u32 num_sge; /* size of s/g array */
__u32 total_len; /* reserved */
__u32 opcode; /* operation type */
......@@ -234,19 +234,19 @@ struct pvrdma_sq_wqe_hdr {
__u32 reserved;
union {
struct {
__u64 remote_addr;
__aligned_u64 remote_addr;
__u32 rkey;
__u8 reserved[4];
} rdma;
struct {
__u64 remote_addr;
__u64 compare_add;
__u64 swap;
__aligned_u64 remote_addr;
__aligned_u64 compare_add;
__aligned_u64 swap;
__u32 rkey;
__u32 reserved;
} atomic;
struct {
__u64 remote_addr;
__aligned_u64 remote_addr;
__u32 log_arg_sz;
__u32 rkey;
union {
......@@ -255,13 +255,14 @@ struct pvrdma_sq_wqe_hdr {
} wr_data;
} masked_atomics;
struct {
__u64 iova_start;
__u64 pl_pdir_dma;
__aligned_u64 iova_start;
__aligned_u64 pl_pdir_dma;
__u32 page_shift;
__u32 page_list_len;
__u32 length;
__u32 access_flags;
__u32 rkey;
__u32 reserved;
} fast_reg;
struct {
__u32 remote_qpn;
......@@ -274,8 +275,8 @@ struct pvrdma_sq_wqe_hdr {
/* Completion queue element. */
struct pvrdma_cqe {
__u64 wr_id;
__u64 qp;
__aligned_u64 wr_id;
__aligned_u64 qp;
__u32 opcode;
__u32 status;
__u32 byte_len;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment