149 #define RTE_ETHDEV_HAS_LRO_SUPPORT
151 #include <rte_compat.h>
158 #include <rte_config.h>
162 #include "rte_dev_info.h"
164 extern int rte_eth_dev_logtype;
166 #define RTE_ETHDEV_LOG(level, ...) \
167 rte_log(RTE_LOG_ ## level, rte_eth_dev_logtype, "" __VA_ARGS__)
232 #define RTE_ETH_FOREACH_MATCHING_DEV(id, devargs, iter) \
233 for (rte_eth_iterator_init(iter, devargs), \
234 id = rte_eth_iterator_next(iter); \
235 id != RTE_MAX_ETHPORTS; \
236 id = rte_eth_iterator_next(iter))
272 #define ETH_LINK_SPEED_AUTONEG (0 << 0)
273 #define ETH_LINK_SPEED_FIXED (1 << 0)
274 #define ETH_LINK_SPEED_10M_HD (1 << 1)
275 #define ETH_LINK_SPEED_10M (1 << 2)
276 #define ETH_LINK_SPEED_100M_HD (1 << 3)
277 #define ETH_LINK_SPEED_100M (1 << 4)
278 #define ETH_LINK_SPEED_1G (1 << 5)
279 #define ETH_LINK_SPEED_2_5G (1 << 6)
280 #define ETH_LINK_SPEED_5G (1 << 7)
281 #define ETH_LINK_SPEED_10G (1 << 8)
282 #define ETH_LINK_SPEED_20G (1 << 9)
283 #define ETH_LINK_SPEED_25G (1 << 10)
284 #define ETH_LINK_SPEED_40G (1 << 11)
285 #define ETH_LINK_SPEED_50G (1 << 12)
286 #define ETH_LINK_SPEED_56G (1 << 13)
287 #define ETH_LINK_SPEED_100G (1 << 14)
288 #define ETH_LINK_SPEED_200G (1 << 15)
293 #define ETH_SPEED_NUM_NONE 0
294 #define ETH_SPEED_NUM_10M 10
295 #define ETH_SPEED_NUM_100M 100
296 #define ETH_SPEED_NUM_1G 1000
297 #define ETH_SPEED_NUM_2_5G 2500
298 #define ETH_SPEED_NUM_5G 5000
299 #define ETH_SPEED_NUM_10G 10000
300 #define ETH_SPEED_NUM_20G 20000
301 #define ETH_SPEED_NUM_25G 25000
302 #define ETH_SPEED_NUM_40G 40000
303 #define ETH_SPEED_NUM_50G 50000
304 #define ETH_SPEED_NUM_56G 56000
305 #define ETH_SPEED_NUM_100G 100000
306 #define ETH_SPEED_NUM_200G 200000
307 #define ETH_SPEED_NUM_UNKNOWN UINT32_MAX
321 #define ETH_LINK_HALF_DUPLEX 0
322 #define ETH_LINK_FULL_DUPLEX 1
323 #define ETH_LINK_DOWN 0
324 #define ETH_LINK_UP 1
325 #define ETH_LINK_FIXED 0
326 #define ETH_LINK_AUTONEG 1
327 #define RTE_ETH_LINK_MAX_STR_LEN 40
342 #define ETH_MQ_RX_RSS_FLAG 0x1
343 #define ETH_MQ_RX_DCB_FLAG 0x2
344 #define ETH_MQ_RX_VMDQ_FLAG 0x4
375 #define ETH_RSS ETH_MQ_RX_RSS
376 #define VMDQ_DCB ETH_MQ_RX_VMDQ_DCB
377 #define ETH_DCB_RX ETH_MQ_RX_DCB
393 #define ETH_DCB_NONE ETH_MQ_TX_NONE
394 #define ETH_VMDQ_DCB_TX ETH_MQ_TX_VMDQ_DCB
395 #define ETH_DCB_TX ETH_MQ_TX_DCB
423 ETH_VLAN_TYPE_UNKNOWN = 0,
468 #define RTE_ETH_FLOW_UNKNOWN 0
469 #define RTE_ETH_FLOW_RAW 1
470 #define RTE_ETH_FLOW_IPV4 2
471 #define RTE_ETH_FLOW_FRAG_IPV4 3
472 #define RTE_ETH_FLOW_NONFRAG_IPV4_TCP 4
473 #define RTE_ETH_FLOW_NONFRAG_IPV4_UDP 5
474 #define RTE_ETH_FLOW_NONFRAG_IPV4_SCTP 6
475 #define RTE_ETH_FLOW_NONFRAG_IPV4_OTHER 7
476 #define RTE_ETH_FLOW_IPV6 8
477 #define RTE_ETH_FLOW_FRAG_IPV6 9
478 #define RTE_ETH_FLOW_NONFRAG_IPV6_TCP 10
479 #define RTE_ETH_FLOW_NONFRAG_IPV6_UDP 11
480 #define RTE_ETH_FLOW_NONFRAG_IPV6_SCTP 12
481 #define RTE_ETH_FLOW_NONFRAG_IPV6_OTHER 13
482 #define RTE_ETH_FLOW_L2_PAYLOAD 14
483 #define RTE_ETH_FLOW_IPV6_EX 15
484 #define RTE_ETH_FLOW_IPV6_TCP_EX 16
485 #define RTE_ETH_FLOW_IPV6_UDP_EX 17
486 #define RTE_ETH_FLOW_PORT 18
488 #define RTE_ETH_FLOW_VXLAN 19
489 #define RTE_ETH_FLOW_GENEVE 20
490 #define RTE_ETH_FLOW_NVGRE 21
491 #define RTE_ETH_FLOW_VXLAN_GPE 22
492 #define RTE_ETH_FLOW_GTPU 23
493 #define RTE_ETH_FLOW_MAX 24
499 #define ETH_RSS_IPV4 (1ULL << 2)
500 #define ETH_RSS_FRAG_IPV4 (1ULL << 3)
501 #define ETH_RSS_NONFRAG_IPV4_TCP (1ULL << 4)
502 #define ETH_RSS_NONFRAG_IPV4_UDP (1ULL << 5)
503 #define ETH_RSS_NONFRAG_IPV4_SCTP (1ULL << 6)
504 #define ETH_RSS_NONFRAG_IPV4_OTHER (1ULL << 7)
505 #define ETH_RSS_IPV6 (1ULL << 8)
506 #define ETH_RSS_FRAG_IPV6 (1ULL << 9)
507 #define ETH_RSS_NONFRAG_IPV6_TCP (1ULL << 10)
508 #define ETH_RSS_NONFRAG_IPV6_UDP (1ULL << 11)
509 #define ETH_RSS_NONFRAG_IPV6_SCTP (1ULL << 12)
510 #define ETH_RSS_NONFRAG_IPV6_OTHER (1ULL << 13)
511 #define ETH_RSS_L2_PAYLOAD (1ULL << 14)
512 #define ETH_RSS_IPV6_EX (1ULL << 15)
513 #define ETH_RSS_IPV6_TCP_EX (1ULL << 16)
514 #define ETH_RSS_IPV6_UDP_EX (1ULL << 17)
515 #define ETH_RSS_PORT (1ULL << 18)
516 #define ETH_RSS_VXLAN (1ULL << 19)
517 #define ETH_RSS_GENEVE (1ULL << 20)
518 #define ETH_RSS_NVGRE (1ULL << 21)
519 #define ETH_RSS_GTPU (1ULL << 23)
520 #define ETH_RSS_ETH (1ULL << 24)
521 #define ETH_RSS_S_VLAN (1ULL << 25)
522 #define ETH_RSS_C_VLAN (1ULL << 26)
523 #define ETH_RSS_ESP (1ULL << 27)
524 #define ETH_RSS_AH (1ULL << 28)
525 #define ETH_RSS_L2TPV3 (1ULL << 29)
526 #define ETH_RSS_PFCP (1ULL << 30)
527 #define ETH_RSS_PPPOE (1ULL << 31)
528 #define ETH_RSS_ECPRI (1ULL << 32)
539 #define ETH_RSS_L3_SRC_ONLY (1ULL << 63)
540 #define ETH_RSS_L3_DST_ONLY (1ULL << 62)
541 #define ETH_RSS_L4_SRC_ONLY (1ULL << 61)
542 #define ETH_RSS_L4_DST_ONLY (1ULL << 60)
543 #define ETH_RSS_L2_SRC_ONLY (1ULL << 59)
544 #define ETH_RSS_L2_DST_ONLY (1ULL << 58)
552 #define RTE_ETH_RSS_L3_PRE32 (1ULL << 57)
553 #define RTE_ETH_RSS_L3_PRE40 (1ULL << 56)
554 #define RTE_ETH_RSS_L3_PRE48 (1ULL << 55)
555 #define RTE_ETH_RSS_L3_PRE56 (1ULL << 54)
556 #define RTE_ETH_RSS_L3_PRE64 (1ULL << 53)
557 #define RTE_ETH_RSS_L3_PRE96 (1ULL << 52)
572 #define ETH_RSS_LEVEL_PMD_DEFAULT (0ULL << 50)
578 #define ETH_RSS_LEVEL_OUTERMOST (1ULL << 50)
584 #define ETH_RSS_LEVEL_INNERMOST (2ULL << 50)
585 #define ETH_RSS_LEVEL_MASK (3ULL << 50)
587 #define ETH_RSS_LEVEL(rss_hf) ((rss_hf & ETH_RSS_LEVEL_MASK) >> 50)
599 static inline uint64_t
602 if ((rss_hf & ETH_RSS_L3_SRC_ONLY) && (rss_hf & ETH_RSS_L3_DST_ONLY))
603 rss_hf &= ~(ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY);
605 if ((rss_hf & ETH_RSS_L4_SRC_ONLY) && (rss_hf & ETH_RSS_L4_DST_ONLY))
606 rss_hf &= ~(ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY);
611 #define ETH_RSS_IPV6_PRE32 ( \
613 RTE_ETH_RSS_L3_PRE32)
615 #define ETH_RSS_IPV6_PRE40 ( \
617 RTE_ETH_RSS_L3_PRE40)
619 #define ETH_RSS_IPV6_PRE48 ( \
621 RTE_ETH_RSS_L3_PRE48)
623 #define ETH_RSS_IPV6_PRE56 ( \
625 RTE_ETH_RSS_L3_PRE56)
627 #define ETH_RSS_IPV6_PRE64 ( \
629 RTE_ETH_RSS_L3_PRE64)
631 #define ETH_RSS_IPV6_PRE96 ( \
633 RTE_ETH_RSS_L3_PRE96)
635 #define ETH_RSS_IPV6_PRE32_UDP ( \
636 ETH_RSS_NONFRAG_IPV6_UDP | \
637 RTE_ETH_RSS_L3_PRE32)
639 #define ETH_RSS_IPV6_PRE40_UDP ( \
640 ETH_RSS_NONFRAG_IPV6_UDP | \
641 RTE_ETH_RSS_L3_PRE40)
643 #define ETH_RSS_IPV6_PRE48_UDP ( \
644 ETH_RSS_NONFRAG_IPV6_UDP | \
645 RTE_ETH_RSS_L3_PRE48)
647 #define ETH_RSS_IPV6_PRE56_UDP ( \
648 ETH_RSS_NONFRAG_IPV6_UDP | \
649 RTE_ETH_RSS_L3_PRE56)
651 #define ETH_RSS_IPV6_PRE64_UDP ( \
652 ETH_RSS_NONFRAG_IPV6_UDP | \
653 RTE_ETH_RSS_L3_PRE64)
655 #define ETH_RSS_IPV6_PRE96_UDP ( \
656 ETH_RSS_NONFRAG_IPV6_UDP | \
657 RTE_ETH_RSS_L3_PRE96)
659 #define ETH_RSS_IPV6_PRE32_TCP ( \
660 ETH_RSS_NONFRAG_IPV6_TCP | \
661 RTE_ETH_RSS_L3_PRE32)
663 #define ETH_RSS_IPV6_PRE40_TCP ( \
664 ETH_RSS_NONFRAG_IPV6_TCP | \
665 RTE_ETH_RSS_L3_PRE40)
667 #define ETH_RSS_IPV6_PRE48_TCP ( \
668 ETH_RSS_NONFRAG_IPV6_TCP | \
669 RTE_ETH_RSS_L3_PRE48)
671 #define ETH_RSS_IPV6_PRE56_TCP ( \
672 ETH_RSS_NONFRAG_IPV6_TCP | \
673 RTE_ETH_RSS_L3_PRE56)
675 #define ETH_RSS_IPV6_PRE64_TCP ( \
676 ETH_RSS_NONFRAG_IPV6_TCP | \
677 RTE_ETH_RSS_L3_PRE64)
679 #define ETH_RSS_IPV6_PRE96_TCP ( \
680 ETH_RSS_NONFRAG_IPV6_TCP | \
681 RTE_ETH_RSS_L3_PRE96)
683 #define ETH_RSS_IPV6_PRE32_SCTP ( \
684 ETH_RSS_NONFRAG_IPV6_SCTP | \
685 RTE_ETH_RSS_L3_PRE32)
687 #define ETH_RSS_IPV6_PRE40_SCTP ( \
688 ETH_RSS_NONFRAG_IPV6_SCTP | \
689 RTE_ETH_RSS_L3_PRE40)
691 #define ETH_RSS_IPV6_PRE48_SCTP ( \
692 ETH_RSS_NONFRAG_IPV6_SCTP | \
693 RTE_ETH_RSS_L3_PRE48)
695 #define ETH_RSS_IPV6_PRE56_SCTP ( \
696 ETH_RSS_NONFRAG_IPV6_SCTP | \
697 RTE_ETH_RSS_L3_PRE56)
699 #define ETH_RSS_IPV6_PRE64_SCTP ( \
700 ETH_RSS_NONFRAG_IPV6_SCTP | \
701 RTE_ETH_RSS_L3_PRE64)
703 #define ETH_RSS_IPV6_PRE96_SCTP ( \
704 ETH_RSS_NONFRAG_IPV6_SCTP | \
705 RTE_ETH_RSS_L3_PRE96)
707 #define ETH_RSS_IP ( \
709 ETH_RSS_FRAG_IPV4 | \
710 ETH_RSS_NONFRAG_IPV4_OTHER | \
712 ETH_RSS_FRAG_IPV6 | \
713 ETH_RSS_NONFRAG_IPV6_OTHER | \
716 #define ETH_RSS_UDP ( \
717 ETH_RSS_NONFRAG_IPV4_UDP | \
718 ETH_RSS_NONFRAG_IPV6_UDP | \
721 #define ETH_RSS_TCP ( \
722 ETH_RSS_NONFRAG_IPV4_TCP | \
723 ETH_RSS_NONFRAG_IPV6_TCP | \
726 #define ETH_RSS_SCTP ( \
727 ETH_RSS_NONFRAG_IPV4_SCTP | \
728 ETH_RSS_NONFRAG_IPV6_SCTP)
730 #define ETH_RSS_TUNNEL ( \
735 #define ETH_RSS_VLAN ( \
740 #define ETH_RSS_PROTO_MASK ( \
742 ETH_RSS_FRAG_IPV4 | \
743 ETH_RSS_NONFRAG_IPV4_TCP | \
744 ETH_RSS_NONFRAG_IPV4_UDP | \
745 ETH_RSS_NONFRAG_IPV4_SCTP | \
746 ETH_RSS_NONFRAG_IPV4_OTHER | \
748 ETH_RSS_FRAG_IPV6 | \
749 ETH_RSS_NONFRAG_IPV6_TCP | \
750 ETH_RSS_NONFRAG_IPV6_UDP | \
751 ETH_RSS_NONFRAG_IPV6_SCTP | \
752 ETH_RSS_NONFRAG_IPV6_OTHER | \
753 ETH_RSS_L2_PAYLOAD | \
755 ETH_RSS_IPV6_TCP_EX | \
756 ETH_RSS_IPV6_UDP_EX | \
767 #define ETH_RSS_RETA_SIZE_64 64
768 #define ETH_RSS_RETA_SIZE_128 128
769 #define ETH_RSS_RETA_SIZE_256 256
770 #define ETH_RSS_RETA_SIZE_512 512
771 #define RTE_RETA_GROUP_SIZE 64
774 #define ETH_VMDQ_MAX_VLAN_FILTERS 64
775 #define ETH_DCB_NUM_USER_PRIORITIES 8
776 #define ETH_VMDQ_DCB_NUM_QUEUES 128
777 #define ETH_DCB_NUM_QUEUES 128
780 #define ETH_DCB_PG_SUPPORT 0x00000001
781 #define ETH_DCB_PFC_SUPPORT 0x00000002
784 #define ETH_VLAN_STRIP_OFFLOAD 0x0001
785 #define ETH_VLAN_FILTER_OFFLOAD 0x0002
786 #define ETH_VLAN_EXTEND_OFFLOAD 0x0004
787 #define ETH_QINQ_STRIP_OFFLOAD 0x0008
790 #define ETH_VLAN_STRIP_MASK 0x0001
791 #define ETH_VLAN_FILTER_MASK 0x0002
792 #define ETH_VLAN_EXTEND_MASK 0x0004
793 #define ETH_QINQ_STRIP_MASK 0x0008
794 #define ETH_VLAN_ID_MAX 0x0FFF
797 #define ETH_NUM_RECEIVE_MAC_ADDR 128
800 #define ETH_VMDQ_NUM_UC_HASH_ARRAY 128
803 #define ETH_VMDQ_ACCEPT_UNTAG 0x0001
804 #define ETH_VMDQ_ACCEPT_HASH_MC 0x0002
805 #define ETH_VMDQ_ACCEPT_HASH_UC 0x0004
806 #define ETH_VMDQ_ACCEPT_BROADCAST 0x0008
807 #define ETH_VMDQ_ACCEPT_MULTICAST 0x0010
810 #define ETH_MIRROR_MAX_VLANS 64
812 #define ETH_MIRROR_VIRTUAL_POOL_UP 0x01
813 #define ETH_MIRROR_UPLINK_PORT 0x02
814 #define ETH_MIRROR_DOWNLINK_PORT 0x04
815 #define ETH_MIRROR_VLAN 0x08
816 #define ETH_MIRROR_VIRTUAL_POOL_DOWN 0x10
847 uint16_t
reta[RTE_RETA_GROUP_SIZE];
872 struct rte_eth_dcb_rx_conf {
878 struct rte_eth_vmdq_dcb_tx_conf {
884 struct rte_eth_dcb_tx_conf {
890 struct rte_eth_vmdq_tx_conf {
1094 #define RTE_ETH_MAX_HAIRPIN_PEERS 32
1214 RTE_TUNNEL_TYPE_NONE = 0,
1215 RTE_TUNNEL_TYPE_VXLAN,
1216 RTE_TUNNEL_TYPE_GENEVE,
1217 RTE_TUNNEL_TYPE_TEREDO,
1218 RTE_TUNNEL_TYPE_NVGRE,
1219 RTE_TUNNEL_TYPE_IP_IN_GRE,
1220 RTE_L2_TUNNEL_TYPE_E_TAG,
1221 RTE_TUNNEL_TYPE_VXLAN_GPE,
1222 RTE_TUNNEL_TYPE_MAX,
1335 #define DEV_RX_OFFLOAD_VLAN_STRIP 0x00000001
1336 #define DEV_RX_OFFLOAD_IPV4_CKSUM 0x00000002
1337 #define DEV_RX_OFFLOAD_UDP_CKSUM 0x00000004
1338 #define DEV_RX_OFFLOAD_TCP_CKSUM 0x00000008
1339 #define DEV_RX_OFFLOAD_TCP_LRO 0x00000010
1340 #define DEV_RX_OFFLOAD_QINQ_STRIP 0x00000020
1341 #define DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000040
1342 #define DEV_RX_OFFLOAD_MACSEC_STRIP 0x00000080
1343 #define DEV_RX_OFFLOAD_HEADER_SPLIT 0x00000100
1344 #define DEV_RX_OFFLOAD_VLAN_FILTER 0x00000200
1345 #define DEV_RX_OFFLOAD_VLAN_EXTEND 0x00000400
1346 #define DEV_RX_OFFLOAD_JUMBO_FRAME 0x00000800
1347 #define DEV_RX_OFFLOAD_SCATTER 0x00002000
1353 #define DEV_RX_OFFLOAD_TIMESTAMP 0x00004000
1354 #define DEV_RX_OFFLOAD_SECURITY 0x00008000
1355 #define DEV_RX_OFFLOAD_KEEP_CRC 0x00010000
1356 #define DEV_RX_OFFLOAD_SCTP_CKSUM 0x00020000
1357 #define DEV_RX_OFFLOAD_OUTER_UDP_CKSUM 0x00040000
1358 #define DEV_RX_OFFLOAD_RSS_HASH 0x00080000
1359 #define RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT 0x00100000
1361 #define DEV_RX_OFFLOAD_CHECKSUM (DEV_RX_OFFLOAD_IPV4_CKSUM | \
1362 DEV_RX_OFFLOAD_UDP_CKSUM | \
1363 DEV_RX_OFFLOAD_TCP_CKSUM)
1364 #define DEV_RX_OFFLOAD_VLAN (DEV_RX_OFFLOAD_VLAN_STRIP | \
1365 DEV_RX_OFFLOAD_VLAN_FILTER | \
1366 DEV_RX_OFFLOAD_VLAN_EXTEND | \
1367 DEV_RX_OFFLOAD_QINQ_STRIP)
1377 #define DEV_TX_OFFLOAD_VLAN_INSERT 0x00000001
1378 #define DEV_TX_OFFLOAD_IPV4_CKSUM 0x00000002
1379 #define DEV_TX_OFFLOAD_UDP_CKSUM 0x00000004
1380 #define DEV_TX_OFFLOAD_TCP_CKSUM 0x00000008
1381 #define DEV_TX_OFFLOAD_SCTP_CKSUM 0x00000010
1382 #define DEV_TX_OFFLOAD_TCP_TSO 0x00000020
1383 #define DEV_TX_OFFLOAD_UDP_TSO 0x00000040
1384 #define DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000080
1385 #define DEV_TX_OFFLOAD_QINQ_INSERT 0x00000100
1386 #define DEV_TX_OFFLOAD_VXLAN_TNL_TSO 0x00000200
1387 #define DEV_TX_OFFLOAD_GRE_TNL_TSO 0x00000400
1388 #define DEV_TX_OFFLOAD_IPIP_TNL_TSO 0x00000800
1389 #define DEV_TX_OFFLOAD_GENEVE_TNL_TSO 0x00001000
1390 #define DEV_TX_OFFLOAD_MACSEC_INSERT 0x00002000
1391 #define DEV_TX_OFFLOAD_MT_LOCKFREE 0x00004000
1395 #define DEV_TX_OFFLOAD_MULTI_SEGS 0x00008000
1397 #define DEV_TX_OFFLOAD_MBUF_FAST_FREE 0x00010000
1402 #define DEV_TX_OFFLOAD_SECURITY 0x00020000
1408 #define DEV_TX_OFFLOAD_UDP_TNL_TSO 0x00040000
1414 #define DEV_TX_OFFLOAD_IP_TNL_TSO 0x00080000
1416 #define DEV_TX_OFFLOAD_OUTER_UDP_CKSUM 0x00100000
1422 #define DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP 0x00200000
1432 #define RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP 0x00000001
1434 #define RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP 0x00000002
1442 #define RTE_ETH_DEV_FALLBACK_RX_RINGSIZE 512
1443 #define RTE_ETH_DEV_FALLBACK_TX_RINGSIZE 512
1444 #define RTE_ETH_DEV_FALLBACK_RX_NBQUEUES 1
1445 #define RTE_ETH_DEV_FALLBACK_TX_NBQUEUES 1
1462 #define RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID (UINT16_MAX)
1592 #define RTE_ETH_BURST_FLAG_PER_QUEUE (1ULL << 0)
1601 #define RTE_ETH_BURST_MODE_INFO_SIZE 1024
1606 #define RTE_ETH_XSTATS_NAME_SIZE 64
1640 #define ETH_DCB_NUM_TCS 8
1641 #define ETH_MAX_VMDQ_POOL 64
1652 }
tc_rxq[ETH_MAX_VMDQ_POOL][ETH_DCB_NUM_TCS];
1657 }
tc_txq[ETH_MAX_VMDQ_POOL][ETH_DCB_NUM_TCS];
1684 #define RTE_ETH_FEC_MODE_TO_CAPA(x) (1U << (x))
1687 #define RTE_ETH_FEC_MODE_CAPA_MASK(x) (1U << (RTE_ETH_FEC_ ## x))
1690 struct rte_eth_fec_capa {
1695 #define RTE_ETH_ALL RTE_MAX_ETHPORTS
1698 #define RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, retval) do { \
1699 if (!rte_eth_dev_is_valid_port(port_id)) { \
1700 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%u\n", port_id); \
1705 #define RTE_ETH_VALID_PORTID_OR_RET(port_id) do { \
1706 if (!rte_eth_dev_is_valid_port(port_id)) { \
1707 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%u\n", port_id); \
1717 #define ETH_L2_TUNNEL_ENABLE_MASK 0x00000001
1719 #define ETH_L2_TUNNEL_INSERTION_MASK 0x00000002
1721 #define ETH_L2_TUNNEL_STRIPPING_MASK 0x00000004
1723 #define ETH_L2_TUNNEL_FORWARDING_MASK 0x00000008
1748 struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t max_pkts,
1772 struct rte_mbuf *pkts[], uint16_t nb_pkts,
void *user_param);
1786 struct rte_eth_dev_sriov {
1788 uint8_t nb_q_per_pool;
1789 uint16_t def_vmdq_idx;
1790 uint16_t def_pool_q_idx;
1792 #define RTE_ETH_DEV_SRIOV(dev) ((dev)->data->sriov)
1794 #define RTE_ETH_NAME_MAX_LEN RTE_DEV_NAME_MAX_LEN
1796 #define RTE_ETH_DEV_NO_OWNER 0
1798 #define RTE_ETH_MAX_OWNER_NAME_LEN 64
1800 struct rte_eth_dev_owner {
1802 char name[RTE_ETH_MAX_OWNER_NAME_LEN];
1806 #define RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE 0x0001
1808 #define RTE_ETH_DEV_INTR_LSC 0x0002
1810 #define RTE_ETH_DEV_BONDED_SLAVE 0x0004
1812 #define RTE_ETH_DEV_INTR_RMV 0x0008
1814 #define RTE_ETH_DEV_REPRESENTOR 0x0010
1816 #define RTE_ETH_DEV_NOLIVE_MAC_ADDR 0x0020
1821 #define RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS 0x0040
1835 const uint64_t owner_id);
1840 #define RTE_ETH_FOREACH_DEV_OWNED_BY(p, o) \
1841 for (p = rte_eth_find_next_owned_by(0, o); \
1842 (unsigned int)p < (unsigned int)RTE_MAX_ETHPORTS; \
1843 p = rte_eth_find_next_owned_by(p + 1, o))
1858 #define RTE_ETH_FOREACH_DEV(p) \
1859 RTE_ETH_FOREACH_DEV_OWNED_BY(p, RTE_ETH_DEV_NO_OWNER)
1888 #define RTE_ETH_FOREACH_DEV_OF(port_id, parent) \
1889 for (port_id = rte_eth_find_next_of(0, parent); \
1890 port_id < RTE_MAX_ETHPORTS; \
1891 port_id = rte_eth_find_next_of(port_id + 1, parent))
1921 #define RTE_ETH_FOREACH_DEV_SIBLING(port_id, ref_port_id) \
1922 for (port_id = rte_eth_find_next_sibling(0, ref_port_id); \
1923 port_id < RTE_MAX_ETHPORTS; \
1924 port_id = rte_eth_find_next_sibling(port_id + 1, ref_port_id))
1957 const struct rte_eth_dev_owner *owner);
1974 const uint64_t owner_id);
2005 struct rte_eth_dev_owner *owner);
2102 uint16_t nb_tx_queue,
const struct rte_eth_conf *eth_conf);
2182 uint16_t nb_rx_desc,
unsigned int socket_id,
2215 (uint16_t port_id, uint16_t rx_queue_id, uint16_t nb_rx_desc,
2267 uint16_t nb_tx_desc,
unsigned int socket_id,
2297 (uint16_t port_id, uint16_t tx_queue_id, uint16_t nb_tx_desc,
2328 size_t len, uint32_t direction);
2886 uint64_t *values,
unsigned int size);
2942 uint16_t tx_queue_id, uint8_t stat_idx);
2963 uint16_t rx_queue_id,
3044 char *fw_version,
size_t fw_size);
3085 uint32_t *ptypes,
int num);
3121 uint32_t *set_ptypes,
unsigned int num);
3267 typedef void (*buffer_tx_error_fn)(
struct rte_mbuf **unsent, uint16_t count,
3275 buffer_tx_error_fn error_callback;
3276 void *error_userdata;
3289 #define RTE_ETH_TX_BUFFER_SIZE(sz) \
3290 (sizeof(struct rte_eth_dev_tx_buffer) + (sz) * sizeof(struct rte_mbuf *))
3331 buffer_tx_error_fn callback,
void *userdata);
3607 int epfd,
int op,
void *data);
3690 struct rte_eth_fec_capa *speed_fec_capa,
3865 uint16_t reta_size);
3887 uint16_t reta_size);
4090 struct rte_eth_rxtx_callback;
4117 const struct rte_eth_rxtx_callback *
4147 const struct rte_eth_rxtx_callback *
4176 const struct rte_eth_rxtx_callback *
4214 const struct rte_eth_rxtx_callback *user_cb);
4250 const struct rte_eth_rxtx_callback *user_cb);
4455 struct rte_dev_eeprom_info *info);
4477 uint32_t nb_mc_addr);
4526 struct timespec *timestamp, uint32_t flags);
4544 struct timespec *timestamp);
4702 uint16_t *nb_rx_desc,
4703 uint16_t *nb_tx_desc);
4841 static inline uint16_t
4843 struct rte_mbuf **rx_pkts,
const uint16_t nb_pkts)
4845 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4848 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
4849 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
4850 RTE_FUNC_PTR_OR_ERR_RET(*dev->rx_pkt_burst, 0);
4852 if (queue_id >= dev->data->nb_rx_queues) {
4853 RTE_ETHDEV_LOG(ERR,
"Invalid RX queue_id=%u\n", queue_id);
4857 nb_rx = (*dev->rx_pkt_burst)(dev->data->rx_queues[queue_id],
4860 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
4861 struct rte_eth_rxtx_callback *cb;
4869 cb = __atomic_load_n(&dev->post_rx_burst_cbs[queue_id],
4874 nb_rx = cb->fn.rx(port_id, queue_id, rx_pkts, nb_rx,
4875 nb_pkts, cb->param);
4877 }
while (cb != NULL);
4881 rte_ethdev_trace_rx_burst(port_id, queue_id, (
void **)rx_pkts, nb_rx);
4901 struct rte_eth_dev *dev;
4903 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4904 dev = &rte_eth_devices[port_id];
4905 RTE_FUNC_PTR_OR_ERR_RET(*dev->rx_queue_count, -ENOTSUP);
4906 if (queue_id >= dev->data->nb_rx_queues ||
4907 dev->data->rx_queues[queue_id] == NULL)
4910 return (
int)(*dev->rx_queue_count)(dev, queue_id);
4932 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4933 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4934 RTE_FUNC_PTR_OR_ERR_RET(*dev->rx_descriptor_done, -ENOTSUP);
4935 return (*dev->rx_descriptor_done)(dev->data->rx_queues[queue_id], offset);
4938 #define RTE_ETH_RX_DESC_AVAIL 0
4939 #define RTE_ETH_RX_DESC_DONE 1
4940 #define RTE_ETH_RX_DESC_UNAVAIL 2
4979 struct rte_eth_dev *dev;
4982 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
4983 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4985 dev = &rte_eth_devices[port_id];
4986 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
4987 if (queue_id >= dev->data->nb_rx_queues)
4990 RTE_FUNC_PTR_OR_ERR_RET(*dev->rx_descriptor_status, -ENOTSUP);
4991 rxq = dev->data->rx_queues[queue_id];
4993 return (*dev->rx_descriptor_status)(rxq, offset);
4996 #define RTE_ETH_TX_DESC_FULL 0
4997 #define RTE_ETH_TX_DESC_DONE 1
4998 #define RTE_ETH_TX_DESC_UNAVAIL 2
5034 uint16_t queue_id, uint16_t offset)
5036 struct rte_eth_dev *dev;
5039 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
5040 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5042 dev = &rte_eth_devices[port_id];
5043 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
5044 if (queue_id >= dev->data->nb_tx_queues)
5047 RTE_FUNC_PTR_OR_ERR_RET(*dev->tx_descriptor_status, -ENOTSUP);
5048 txq = dev->data->tx_queues[queue_id];
5050 return (*dev->tx_descriptor_status)(txq, offset);
5119 static inline uint16_t
5121 struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
5123 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
5125 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
5126 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
5127 RTE_FUNC_PTR_OR_ERR_RET(*dev->tx_pkt_burst, 0);
5129 if (queue_id >= dev->data->nb_tx_queues) {
5130 RTE_ETHDEV_LOG(ERR,
"Invalid TX queue_id=%u\n", queue_id);
5135 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
5136 struct rte_eth_rxtx_callback *cb;
5144 cb = __atomic_load_n(&dev->pre_tx_burst_cbs[queue_id],
5149 nb_pkts = cb->fn.tx(port_id, queue_id, tx_pkts, nb_pkts,
5152 }
while (cb != NULL);
5156 rte_ethdev_trace_tx_burst(port_id, queue_id, (
void **)tx_pkts,
5158 return (*dev->tx_pkt_burst)(dev->data->tx_queues[queue_id], tx_pkts, nb_pkts);
5215 #ifndef RTE_ETHDEV_TX_PREPARE_NOOP
5217 static inline uint16_t
5219 struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
5221 struct rte_eth_dev *dev;
5223 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
5225 RTE_ETHDEV_LOG(ERR,
"Invalid TX port_id=%u\n", port_id);
5231 dev = &rte_eth_devices[port_id];
5233 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
5234 if (queue_id >= dev->data->nb_tx_queues) {
5235 RTE_ETHDEV_LOG(ERR,
"Invalid TX queue_id=%u\n", queue_id);
5241 if (!dev->tx_pkt_prepare)
5244 return (*dev->tx_pkt_prepare)(dev->data->tx_queues[queue_id],
5259 static inline uint16_t
5291 static inline uint16_t
5296 uint16_t to_send = buffer->
length;
5307 buffer->error_callback(&buffer->
pkts[sent],
5308 (uint16_t)(to_send - sent),
5309 buffer->error_userdata);
#define __rte_cache_min_aligned
#define __rte_always_inline
int rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
rte_eth_event_ipsec_subtype
@ RTE_ETH_EVENT_IPSEC_UNKNOWN
@ RTE_ETH_EVENT_IPSEC_MAX
@ RTE_ETH_EVENT_IPSEC_ESN_OVERFLOW
@ RTE_ETH_EVENT_IPSEC_SA_BYTE_EXPIRY
@ RTE_ETH_EVENT_IPSEC_SA_TIME_EXPIRY
int rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *time)
__rte_experimental int rte_eth_dev_hairpin_capability_get(uint16_t port_id, struct rte_eth_hairpin_cap *cap)
int rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
int rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp, uint32_t flags)
static uint64_t rte_eth_rss_hf_refine(uint64_t rss_hf)
static __rte_always_inline uint16_t rte_eth_tx_buffer(uint16_t port_id, uint16_t queue_id, struct rte_eth_dev_tx_buffer *buffer, struct rte_mbuf *tx_pkt)
void rte_eth_iterator_cleanup(struct rte_dev_iterator *iter)
__rte_experimental int rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
int rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
int rte_eth_dev_set_link_down(uint16_t port_id)
const struct rte_eth_rxtx_callback * rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id, rte_tx_callback_fn fn, void *user_param)
int rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_queue, uint16_t nb_tx_queue, const struct rte_eth_conf *eth_conf)
int rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id, int epfd, int op, void *data)
int rte_eth_mirror_rule_set(uint16_t port_id, struct rte_eth_mirror_conf *mirror_conf, uint8_t rule_id, uint8_t on)
@ RTE_ETH_EVENT_INTR_RESET
@ RTE_ETH_EVENT_FLOW_AGED
@ RTE_ETH_EVENT_QUEUE_STATE
__rte_experimental int rte_eth_dev_get_module_info(uint16_t port_id, struct rte_eth_dev_module_info *modinfo)
int rte_eth_dev_is_valid_port(uint16_t port_id)
int rte_eth_dev_reset(uint16_t port_id)
#define RTE_ETH_BURST_MODE_INFO_SIZE
int rte_eth_allmulticast_disable(uint16_t port_id)
int rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats, unsigned int n)
__rte_experimental int rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id)
int rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, uint16_t nb_rx_desc, unsigned int socket_id, const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mb_pool)
int rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
__rte_experimental int rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_burst_mode *mode)
static __rte_deprecated int rte_eth_rx_descriptor_done(uint16_t port_id, uint16_t queue_id, uint16_t offset)
int rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
int rte_eth_dev_rss_reta_update(uint16_t port_id, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
static uint16_t rte_eth_rx_burst(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **rx_pkts, const uint16_t nb_pkts)
int rte_eth_xstats_get_names(uint16_t port_id, struct rte_eth_xstat_name *xstats_names, unsigned int size)
__rte_experimental int rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
int rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
__rte_experimental int rte_eth_read_clock(uint16_t port_id, uint64_t *clock)
int rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
const struct rte_eth_rxtx_callback * rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id, rte_rx_callback_fn fn, void *user_param)
int rte_eth_promiscuous_get(uint16_t port_id)
uint64_t rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id)
__rte_experimental int rte_eth_dev_owner_set(const uint16_t port_id, const struct rte_eth_dev_owner *owner)
int rte_eth_led_off(uint16_t port_id)
int rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
int rte_eth_dev_set_link_up(uint16_t port_id)
__rte_experimental int rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa)
void * rte_eth_dev_get_sec_ctx(uint16_t port_id)
int rte_eth_link_get(uint16_t port_id, struct rte_eth_link *link)
int rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id, uint8_t stat_idx)
uint16_t rte_eth_find_next(uint16_t port_id)
__rte_experimental int rte_eth_dev_owner_new(uint64_t *owner_id)
int rte_eth_allmulticast_get(uint16_t port_id)
int rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids, uint64_t *values, unsigned int size)
int rte_eth_allmulticast_enable(uint16_t port_id)
int rte_eth_promiscuous_enable(uint16_t port_id)
int rte_eth_timesync_enable(uint16_t port_id)
__rte_experimental int rte_eth_link_to_str(char *str, size_t len, const struct rte_eth_link *eth_link)
int rte_eth_dev_rss_hash_conf_get(uint16_t port_id, struct rte_eth_rss_conf *rss_conf)
#define ETH_VMDQ_MAX_VLAN_FILTERS
int rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id, const struct rte_eth_rxtx_callback *user_cb)
int rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id, struct rte_eth_pfc_conf *pfc_conf)
int rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
int rte_eth_dev_udp_tunnel_port_add(uint16_t port_id, struct rte_eth_udp_tunnel *tunnel_udp)
int rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
uint16_t rte_eth_iterator_next(struct rte_dev_iterator *iter)
__rte_experimental int rte_eth_dev_is_removed(uint16_t port_id)
__rte_experimental int rte_eth_dev_get_module_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
int rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id, int on)
int rte_eth_dev_set_vlan_ether_type(uint16_t port_id, enum rte_vlan_type vlan_type, uint16_t tag_type)
int rte_eth_dev_stop(uint16_t port_id)
int rte_eth_timesync_disable(uint16_t port_id)
int rte_eth_mirror_rule_reset(uint16_t port_id, uint8_t rule_id)
__rte_experimental int rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id, uint16_t nb_rx_desc, const struct rte_eth_hairpin_conf *conf)
uint16_t(* rte_rx_callback_fn)(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t max_pkts, void *user_param)
int rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
static uint16_t rte_eth_tx_prepare(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
int rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
int rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id, uint16_t nb_tx_desc, unsigned int socket_id, const struct rte_eth_txconf *tx_conf)
int rte_eth_promiscuous_disable(uint16_t port_id)
int rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id, const struct rte_eth_rxtx_callback *user_cb)
int rte_eth_dev_rx_intr_disable(uint16_t port_id, uint16_t queue_id)
int rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
__rte_experimental int rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask, uint32_t *set_ptypes, unsigned int num)
int rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
int rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
#define ETH_DCB_NUM_USER_PRIORITIES
static uint16_t rte_eth_tx_burst(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
__rte_experimental int rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_burst_mode *mode)
int(* rte_eth_dev_cb_fn)(uint16_t port_id, enum rte_eth_event_type event, void *cb_arg, void *ret_param)
int rte_eth_dev_rx_intr_enable(uint16_t port_id, uint16_t queue_id)
int rte_eth_dev_get_eeprom_length(uint16_t port_id)
int rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr)
int rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *mac_addr)
int rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
int rte_eth_xstats_get_names_by_id(uint16_t port_id, struct rte_eth_xstat_name *xstats_names, unsigned int size, uint64_t *ids)
int rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name, uint64_t *id)
uint16_t rte_eth_dev_count_avail(void)
int rte_eth_dev_callback_unregister(uint16_t port_id, enum rte_eth_event_type event, rte_eth_dev_cb_fn cb_fn, void *cb_arg)
__rte_experimental int rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa)
int rte_eth_led_on(uint16_t port_id)
const char * rte_eth_dev_tx_offload_name(uint64_t offload)
int rte_eth_dev_rss_reta_query(uint16_t port_id, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
__rte_experimental uint16_t rte_eth_find_next_sibling(uint16_t port_id_start, uint16_t ref_port_id)
int rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask, uint32_t *ptypes, int num)
int rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
int rte_eth_dev_set_mc_addr_list(uint16_t port_id, struct rte_ether_addr *mc_addr_set, uint32_t nb_mc_addr)
int rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs)
int rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer, buffer_tx_error_fn callback, void *userdata)
__rte_experimental int rte_eth_dev_owner_delete(const uint64_t owner_id)
int rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *mac_addr, uint32_t pool)
int rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr, uint8_t on)
uint32_t rte_eth_speed_bitflag(uint32_t speed, int duplex)
__rte_experimental int rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port)
__rte_experimental int rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports, size_t len, uint32_t direction)
int rte_eth_dev_get_vlan_offload(uint16_t port_id)
#define ETH_MQ_RX_RSS_FLAG
int rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx, uint16_t tx_rate)
int rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_rxq_info *qinfo)
int rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
int rte_eth_dev_callback_register(uint16_t port_id, enum rte_eth_event_type event, rte_eth_dev_cb_fn cb_fn, void *cb_arg)
int rte_eth_dev_close(uint16_t port_id)
void rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent, void *userdata)
__rte_experimental int rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port)
int rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_txq_info *qinfo)
int rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id, struct rte_eth_udp_tunnel *tunnel_udp)
int rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
#define ETH_MIRROR_MAX_VLANS
void rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent, void *userdata)
__rte_experimental int rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id, uint16_t nb_tx_desc, const struct rte_eth_hairpin_conf *conf)
int rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
static uint16_t rte_eth_tx_buffer_flush(uint16_t port_id, uint16_t queue_id, struct rte_eth_dev_tx_buffer *buffer)
@ RTE_FDIR_NO_REPORT_STATUS
@ RTE_FDIR_REPORT_STATUS_ALWAYS
int rte_eth_dev_socket_id(uint16_t port_id)
int rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *link)
int rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
int rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id, uint16_t *nb_rx_desc, uint16_t *nb_tx_desc)
static int rte_eth_rx_descriptor_status(uint16_t port_id, uint16_t queue_id, uint16_t offset)
static int rte_eth_tx_descriptor_status(uint16_t port_id, uint16_t queue_id, uint16_t offset)
const char * rte_eth_dev_rx_offload_name(uint64_t offload)
int rte_eth_dev_get_dcb_info(uint16_t port_id, struct rte_eth_dcb_info *dcb_info)
int rte_eth_dev_rss_hash_update(uint16_t port_id, struct rte_eth_rss_conf *rss_conf)
int rte_eth_timesync_read_time(uint16_t port_id, struct timespec *time)
int rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *mac_addr)
__extension__ struct rte_eth_link __rte_aligned(8)
const struct rte_eth_rxtx_callback * rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id, rte_rx_callback_fn fn, void *user_param)
int rte_eth_xstats_reset(uint16_t port_id)
__rte_experimental const char * rte_eth_link_speed_to_str(uint32_t link_speed)
int rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
int rte_eth_timesync_read_tx_timestamp(uint16_t port_id, struct timespec *timestamp)
__rte_experimental uint16_t rte_eth_find_next_of(uint16_t port_id_start, const struct rte_device *parent)
uint16_t(* rte_tx_callback_fn)(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[], uint16_t nb_pkts, void *user_param)
uint16_t rte_eth_dev_count_total(void)
#define RTE_ETH_XSTATS_NAME_SIZE
int rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
int rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
int rte_eth_stats_reset(uint16_t port_id)
int rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id, uint8_t stat_idx)
static int rte_eth_rx_queue_count(uint16_t port_id, uint16_t queue_id)
int rte_eth_dev_start(uint16_t port_id)
__rte_experimental int rte_eth_fec_get_capability(uint16_t port_id, struct rte_eth_fec_capa *speed_fec_capa, unsigned int num)
char info[RTE_ETH_BURST_MODE_INFO_SIZE]
struct rte_eth_conf::@153 rx_adv_conf
struct rte_eth_vmdq_rx_conf vmdq_rx_conf
struct rte_eth_txmode txmode
struct rte_eth_rxmode rxmode
struct rte_eth_vmdq_dcb_conf vmdq_dcb_conf
union rte_eth_conf::@154 tx_adv_conf
struct rte_fdir_conf fdir_conf
uint32_t dcb_capability_en
struct rte_intr_conf intr_conf
struct rte_eth_vmdq_dcb_tx_conf vmdq_dcb_tx_conf
struct rte_eth_rss_conf rss_conf
struct rte_eth_dcb_tx_conf dcb_tx_conf
struct rte_eth_dcb_rx_conf dcb_rx_conf
struct rte_eth_vmdq_tx_conf vmdq_tx_conf
uint8_t prio_tc[ETH_DCB_NUM_USER_PRIORITIES]
struct rte_eth_dcb_tc_queue_mapping tc_queue
uint8_t tc_bws[ETH_DCB_NUM_TCS]
struct rte_eth_dcb_tc_queue_mapping::@155 tc_rxq[ETH_MAX_VMDQ_POOL][ETH_DCB_NUM_TCS]
struct rte_eth_dcb_tc_queue_mapping::@156 tc_txq[ETH_MAX_VMDQ_POOL][ETH_DCB_NUM_TCS]
uint32_t max_hash_mac_addrs
struct rte_eth_desc_lim rx_desc_lim
struct rte_eth_txconf default_txconf
struct rte_device * device
struct rte_eth_rxconf default_rxconf
uint32_t max_lro_pkt_size
uint64_t tx_queue_offload_capa
struct rte_eth_desc_lim tx_desc_lim
uint64_t flow_type_rss_offloads
struct rte_eth_dev_portconf default_txportconf
struct rte_eth_dev_portconf default_rxportconf
struct rte_eth_switch_info switch_info
struct rte_eth_rxseg_capa rx_seg_capa
uint64_t rx_queue_offload_capa
const uint32_t * dev_flags
enum rte_eth_event_ipsec_subtype subtype
enum rte_eth_fc_mode mode
uint8_t mac_ctrl_frame_fwd
struct rte_eth_vlan_mirror vlan
struct rte_eth_fc_conf fc
struct rte_eth_thresh rx_thresh
uint8_t rx_deferred_start
union rte_eth_rxseg * rx_seg
uint32_t max_lro_pkt_size
enum rte_eth_rx_mq_mode mq_mode
struct rte_eth_rxconf conf
__extension__ uint32_t multi_pools
uint32_t offset_align_log2
uint64_t q_ibytes[RTE_ETHDEV_QUEUE_STAT_CNTRS]
uint64_t q_opackets[RTE_ETHDEV_QUEUE_STAT_CNTRS]
uint64_t q_errors[RTE_ETHDEV_QUEUE_STAT_CNTRS]
uint64_t q_ipackets[RTE_ETHDEV_QUEUE_STAT_CNTRS]
uint64_t q_obytes[RTE_ETHDEV_QUEUE_STAT_CNTRS]
uint8_t tx_deferred_start
struct rte_eth_thresh tx_thresh
__extension__ uint8_t hw_vlan_insert_pvid
__extension__ uint8_t hw_vlan_reject_tagged
__extension__ uint8_t hw_vlan_reject_untagged
enum rte_eth_tx_mq_mode mq_mode
struct rte_eth_txconf conf
uint16_t vlan_id[ETH_MIRROR_MAX_VLANS]
enum rte_eth_nb_pools nb_queue_pools
struct rte_eth_vmdq_dcb_conf::@151 pool_map[ETH_VMDQ_MAX_VLAN_FILTERS]
uint8_t enable_default_pool
uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES]
enum rte_eth_nb_pools nb_queue_pools
uint8_t enable_default_pool
struct rte_eth_vmdq_rx_conf::@152 pool_map[ETH_VMDQ_MAX_VLAN_FILTERS]
char name[RTE_ETH_XSTATS_NAME_SIZE]
enum rte_fdir_status_mode status
enum rte_fdir_pballoc_type pballoc
struct rte_eth_fdir_flex_conf flex_conf