Skip to content

Commit

Permalink
net/bnxt: support Rx completion v3
Browse files Browse the repository at this point in the history
P7 devices support the newer Rx completion version.
This Rx completion though similar to the previous generation,
provides some extra information for flow offload scenarios
apart from the normal information.

Signed-off-by: Ajit Khaparde <[email protected]>
  • Loading branch information
ajitkhaparde committed Feb 20, 2024
1 parent fa287e4 commit 65d2b05
Show file tree
Hide file tree
Showing 2 changed files with 177 additions and 2 deletions.
87 changes: 85 additions & 2 deletions drivers/net/bnxt/bnxt_rxr.c
Original file line number Diff line number Diff line change
Expand Up @@ -553,6 +553,41 @@ bnxt_parse_pkt_type(struct rx_pkt_cmpl *rxcmp, struct rx_pkt_cmpl_hi *rxcmp1)
return bnxt_ptype_table[index];
}

static void
bnxt_parse_pkt_type_v3(struct rte_mbuf *mbuf,
struct rx_pkt_cmpl *rxcmp_v1,
struct rx_pkt_cmpl_hi *rxcmp1_v1)
{
uint32_t flags_type, flags2, meta;
struct rx_pkt_v3_cmpl_hi *rxcmp1;
struct rx_pkt_v3_cmpl *rxcmp;
uint8_t index;

rxcmp = (void *)rxcmp_v1;
rxcmp1 = (void *)rxcmp1_v1;

flags_type = rte_le_to_cpu_16(rxcmp->flags_type);
flags2 = rte_le_to_cpu_32(rxcmp1->flags2);
meta = rte_le_to_cpu_32(rxcmp->metadata1_payload_offset);

/* TODO */
/* Validate ptype table indexing at build time. */
/* bnxt_check_ptype_constants_v3(); */

/*
* Index format:
* bit 0: Set if IP tunnel encapsulated packet.
* bit 1: Set if IPv6 packet, clear if IPv4.
* bit 2: Set if VLAN tag present.
* bits 3-6: Four-bit hardware packet type field.
*/
index = BNXT_CMPL_V3_ITYPE_TO_IDX(flags_type) |
BNXT_CMPL_V3_VLAN_TO_IDX(meta) |
BNXT_CMPL_V3_IP_VER_TO_IDX(flags2);

mbuf->packet_type = bnxt_ptype_table[index];
}

static void __rte_cold
bnxt_init_ol_flags_tables(struct bnxt_rx_queue *rxq)
{
Expand Down Expand Up @@ -716,6 +751,43 @@ bnxt_get_rx_ts_p5(struct bnxt *bp, uint32_t rx_ts_cmpl)
ptp->rx_timestamp = pkt_time;
}

static uint32_t
bnxt_ulp_set_mark_in_mbuf_v3(struct bnxt *bp, struct rx_pkt_cmpl_hi *rxcmp1,
struct rte_mbuf *mbuf, uint32_t *vfr_flag)
{
struct rx_pkt_v3_cmpl_hi *rxcmp1_v3 = (void *)rxcmp1;
uint32_t flags2, meta, mark_id = 0;
/* revisit the usage of gfid/lfid if mark action is supported.
* for now, only VFR is using mark and the metadata is the SVIF
* (a small number)
*/
bool gfid = false;
int rc = 0;

flags2 = rte_le_to_cpu_32(rxcmp1_v3->flags2);

switch (flags2 & RX_PKT_V3_CMPL_HI_FLAGS2_META_FORMAT_MASK) {
case RX_PKT_V3_CMPL_HI_FLAGS2_META_FORMAT_CHDR_DATA:
/* Only supporting Metadata for ulp now */
meta = rxcmp1_v3->metadata2;
break;
default:
goto skip_mark;
}

rc = ulp_mark_db_mark_get(bp->ulp_ctx, gfid, meta, vfr_flag, &mark_id);
if (!rc) {
/* Only supporting VFR for now, no Mark actions */
if (vfr_flag && *vfr_flag)
return mark_id;
}

skip_mark:
mbuf->hash.fdir.hi = 0;

return 0;
}

static uint32_t
bnxt_ulp_set_mark_in_mbuf(struct bnxt *bp, struct rx_pkt_cmpl_hi *rxcmp1,
struct rte_mbuf *mbuf, uint32_t *vfr_flag)
Expand Down Expand Up @@ -892,7 +964,8 @@ static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
*rx_pkt = mbuf;
goto next_rx;
} else if ((cmp_type != CMPL_BASE_TYPE_RX_L2) &&
(cmp_type != CMPL_BASE_TYPE_RX_L2_V2)) {
(cmp_type != CMPL_BASE_TYPE_RX_L2_V2) &&
(cmp_type != CMPL_BASE_TYPE_RX_L2_V3)) {
rc = -EINVAL;
goto next_rx;
}
Expand Down Expand Up @@ -929,6 +1002,16 @@ static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
bp->ptp_all_rx_tstamp)
bnxt_get_rx_ts_p5(rxq->bp, rxcmp1->reorder);

if (cmp_type == CMPL_BASE_TYPE_RX_L2_V3) {
bnxt_parse_csum_v3(mbuf, rxcmp1);
bnxt_parse_pkt_type_v3(mbuf, rxcmp, rxcmp1);
bnxt_rx_vlan_v3(mbuf, rxcmp, rxcmp1);
if (BNXT_TRUFLOW_EN(bp))
mark_id = bnxt_ulp_set_mark_in_mbuf_v3(rxq->bp, rxcmp1,
mbuf, &vfr_flag);
goto reuse_rx_mbuf;
}

if (cmp_type == CMPL_BASE_TYPE_RX_L2_V2) {
bnxt_parse_csum_v2(mbuf, rxcmp1);
bnxt_parse_pkt_type_v2(mbuf, rxcmp, rxcmp1);
Expand Down Expand Up @@ -1066,7 +1149,7 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
if (CMP_TYPE(rxcmp) == CMPL_BASE_TYPE_HWRM_DONE) {
PMD_DRV_LOG(ERR, "Rx flush done\n");
} else if ((CMP_TYPE(rxcmp) >= CMPL_BASE_TYPE_RX_TPA_START_V2) &&
(CMP_TYPE(rxcmp) <= RX_TPA_V2_ABUF_CMPL_TYPE_RX_TPA_AGG)) {
(CMP_TYPE(rxcmp) <= CMPL_BASE_TYPE_RX_TPA_START_V3)) {
rc = bnxt_rx_pkt(&rx_pkts[nb_rx_pkts], rxq, &raw_cons);
if (!rc)
nb_rx_pkts++;
Expand Down
92 changes: 92 additions & 0 deletions drivers/net/bnxt/bnxt_rxr.h
Original file line number Diff line number Diff line change
Expand Up @@ -386,4 +386,96 @@ bnxt_parse_pkt_type_v2(struct rte_mbuf *mbuf,

mbuf->packet_type = pkt_type;
}

/* Thor2 specific code for RX completion parsing */
#define RX_PKT_V3_CMPL_FLAGS2_IP_TYPE_SFT 8
#define RX_PKT_V3_CMPL_METADATA1_VALID_SFT 15

#define BNXT_CMPL_V3_ITYPE_TO_IDX(ft) \
(((ft) & RX_PKT_V3_CMPL_FLAGS_ITYPE_MASK) >> \
(RX_PKT_V3_CMPL_FLAGS_ITYPE_SFT - BNXT_PTYPE_TBL_TYPE_SFT))

#define BNXT_CMPL_V3_VLAN_TO_IDX(meta) \
(((meta) & (1 << RX_PKT_V3_CMPL_METADATA1_VALID_SFT)) >> \
(RX_PKT_V3_CMPL_METADATA1_VALID_SFT - BNXT_PTYPE_TBL_VLAN_SFT))

#define BNXT_CMPL_V3_IP_VER_TO_IDX(f2) \
(((f2) & RX_PKT_V3_CMPL_HI_FLAGS2_IP_TYPE) >> \
(RX_PKT_V3_CMPL_FLAGS2_IP_TYPE_SFT - BNXT_PTYPE_TBL_IP_VER_SFT))

#define RX_CMP_V3_VLAN_VALID(rxcmp) \
(((struct rx_pkt_v3_cmpl *)rxcmp)->metadata1_payload_offset & \
RX_PKT_V3_CMPL_METADATA1_VALID)

#define RX_CMP_V3_METADATA0_VID(rxcmp1) \
((((struct rx_pkt_v3_cmpl_hi *)rxcmp1)->metadata0) & \
(RX_PKT_V3_CMPL_HI_METADATA0_VID_MASK | \
RX_PKT_V3_CMPL_HI_METADATA0_DE | \
RX_PKT_V3_CMPL_HI_METADATA0_PRI_MASK))

static inline void bnxt_rx_vlan_v3(struct rte_mbuf *mbuf,
struct rx_pkt_cmpl *rxcmp,
struct rx_pkt_cmpl_hi *rxcmp1)
{
if (RX_CMP_V3_VLAN_VALID(rxcmp)) {
mbuf->vlan_tci = RX_CMP_V3_METADATA0_VID(rxcmp1);
mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
}
}

#define RX_CMP_V3_L4_CS_ERR(err) \
(((err) & RX_PKT_CMPL_ERRORS_MASK) \
& (RX_PKT_CMPL_ERRORS_L4_CS_ERROR))
#define RX_CMP_V3_L3_CS_ERR(err) \
(((err) & RX_PKT_CMPL_ERRORS_MASK) \
& (RX_PKT_CMPL_ERRORS_IP_CS_ERROR))
#define RX_CMP_V3_T_IP_CS_ERR(err) \
(((err) & RX_PKT_CMPL_ERRORS_MASK) \
& (RX_PKT_CMPL_ERRORS_T_IP_CS_ERROR))
#define RX_CMP_V3_T_L4_CS_ERR(err) \
(((err) & RX_PKT_CMPL_ERRORS_MASK) \
& (RX_PKT_CMPL_ERRORS_T_L4_CS_ERROR))
#define RX_PKT_CMPL_CALC \
(RX_PKT_CMPL_FLAGS2_IP_CS_CALC | \
RX_PKT_CMPL_FLAGS2_L4_CS_CALC | \
RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC | \
RX_PKT_CMPL_FLAGS2_T_L4_CS_CALC)

static inline uint64_t
bnxt_parse_csum_fields_v3(uint32_t flags2, uint32_t error_v2)
{
uint64_t ol_flags = 0;

if (flags2 & RX_PKT_CMPL_CALC) {
if (unlikely(RX_CMP_V3_L4_CS_ERR(error_v2)))
ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
else
ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
if (unlikely(RX_CMP_V3_L3_CS_ERR(error_v2)))
ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
if (unlikely(RX_CMP_V3_T_L4_CS_ERR(error_v2)))
ol_flags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD;
else
ol_flags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD;
if (unlikely(RX_CMP_V3_T_IP_CS_ERR(error_v2)))
ol_flags |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD;
if (!(ol_flags & (RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD)))
ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
} else {
/* Unknown is defined as 0 for all packets types hence using below for all */
ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN;
}
return ol_flags;
}

static inline void
bnxt_parse_csum_v3(struct rte_mbuf *mbuf, struct rx_pkt_cmpl_hi *rxcmp1)
{
struct rx_pkt_v3_cmpl_hi *v3_cmp =
(struct rx_pkt_v3_cmpl_hi *)(rxcmp1);
uint16_t error_v2 = rte_le_to_cpu_16(v3_cmp->errors_v2);
uint32_t flags2 = rte_le_to_cpu_32(v3_cmp->flags2);

mbuf->ol_flags |= bnxt_parse_csum_fields_v3(flags2, error_v2);
}
#endif /* _BNXT_RXR_H_ */

0 comments on commit 65d2b05

Please sign in to comment.