DPDK 22.11.4
rte_ethdev.h
Go to the documentation of this file.
1/* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
3 */
4
5#ifndef _RTE_ETHDEV_H_
6#define _RTE_ETHDEV_H_
7
148#ifdef __cplusplus
149extern "C" {
150#endif
151
152#include <stdint.h>
153
154/* Use this macro to check if LRO API is supported */
155#define RTE_ETHDEV_HAS_LRO_SUPPORT
156
157/* Alias RTE_LIBRTE_ETHDEV_DEBUG for backward compatibility. */
158#ifdef RTE_LIBRTE_ETHDEV_DEBUG
159#define RTE_ETHDEV_DEBUG_RX
160#define RTE_ETHDEV_DEBUG_TX
161#endif
162
163#include <rte_cman.h>
164#include <rte_compat.h>
165#include <rte_log.h>
166#include <rte_interrupts.h>
167#include <rte_dev.h>
168#include <rte_devargs.h>
169#include <rte_bitops.h>
170#include <rte_errno.h>
171#include <rte_common.h>
172#include <rte_config.h>
173#include <rte_power_intrinsics.h>
174
175#include "rte_ethdev_trace_fp.h"
176#include "rte_dev_info.h"
177
178extern int rte_eth_dev_logtype;
179
180#define RTE_ETHDEV_LOG(level, ...) \
181 rte_log(RTE_LOG_ ## level, rte_eth_dev_logtype, "" __VA_ARGS__)
182
183struct rte_mbuf;
184
201int rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs);
202
218
232
246#define RTE_ETH_FOREACH_MATCHING_DEV(id, devargs, iter) \
247 for (rte_eth_iterator_init(iter, devargs), \
248 id = rte_eth_iterator_next(iter); \
249 id != RTE_MAX_ETHPORTS; \
250 id = rte_eth_iterator_next(iter))
251
262 uint64_t ipackets;
263 uint64_t opackets;
264 uint64_t ibytes;
265 uint64_t obytes;
270 uint64_t imissed;
271 uint64_t ierrors;
272 uint64_t oerrors;
273 uint64_t rx_nombuf;
274 /* Queue stats are limited to max 256 queues */
276 uint64_t q_ipackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
278 uint64_t q_opackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
280 uint64_t q_ibytes[RTE_ETHDEV_QUEUE_STAT_CNTRS];
282 uint64_t q_obytes[RTE_ETHDEV_QUEUE_STAT_CNTRS];
284 uint64_t q_errors[RTE_ETHDEV_QUEUE_STAT_CNTRS];
285};
286
290#define RTE_ETH_LINK_SPEED_AUTONEG 0
291#define RTE_ETH_LINK_SPEED_FIXED RTE_BIT32(0)
292#define RTE_ETH_LINK_SPEED_10M_HD RTE_BIT32(1)
293#define RTE_ETH_LINK_SPEED_10M RTE_BIT32(2)
294#define RTE_ETH_LINK_SPEED_100M_HD RTE_BIT32(3)
295#define RTE_ETH_LINK_SPEED_100M RTE_BIT32(4)
296#define RTE_ETH_LINK_SPEED_1G RTE_BIT32(5)
297#define RTE_ETH_LINK_SPEED_2_5G RTE_BIT32(6)
298#define RTE_ETH_LINK_SPEED_5G RTE_BIT32(7)
299#define RTE_ETH_LINK_SPEED_10G RTE_BIT32(8)
300#define RTE_ETH_LINK_SPEED_20G RTE_BIT32(9)
301#define RTE_ETH_LINK_SPEED_25G RTE_BIT32(10)
302#define RTE_ETH_LINK_SPEED_40G RTE_BIT32(11)
303#define RTE_ETH_LINK_SPEED_50G RTE_BIT32(12)
304#define RTE_ETH_LINK_SPEED_56G RTE_BIT32(13)
305#define RTE_ETH_LINK_SPEED_100G RTE_BIT32(14)
306#define RTE_ETH_LINK_SPEED_200G RTE_BIT32(15)
312#define RTE_ETH_SPEED_NUM_NONE 0
313#define RTE_ETH_SPEED_NUM_10M 10
314#define RTE_ETH_SPEED_NUM_100M 100
315#define RTE_ETH_SPEED_NUM_1G 1000
316#define RTE_ETH_SPEED_NUM_2_5G 2500
317#define RTE_ETH_SPEED_NUM_5G 5000
318#define RTE_ETH_SPEED_NUM_10G 10000
319#define RTE_ETH_SPEED_NUM_20G 20000
320#define RTE_ETH_SPEED_NUM_25G 25000
321#define RTE_ETH_SPEED_NUM_40G 40000
322#define RTE_ETH_SPEED_NUM_50G 50000
323#define RTE_ETH_SPEED_NUM_56G 56000
324#define RTE_ETH_SPEED_NUM_100G 100000
325#define RTE_ETH_SPEED_NUM_200G 200000
326#define RTE_ETH_SPEED_NUM_UNKNOWN UINT32_MAX
332__extension__
334 uint32_t link_speed;
335 uint16_t link_duplex : 1;
336 uint16_t link_autoneg : 1;
337 uint16_t link_status : 1;
338} __rte_aligned(8);
343#define RTE_ETH_LINK_HALF_DUPLEX 0
344#define RTE_ETH_LINK_FULL_DUPLEX 1
345#define RTE_ETH_LINK_DOWN 0
346#define RTE_ETH_LINK_UP 1
347#define RTE_ETH_LINK_FIXED 0
348#define RTE_ETH_LINK_AUTONEG 1
349#define RTE_ETH_LINK_MAX_STR_LEN 40
357 uint8_t pthresh;
358 uint8_t hthresh;
359 uint8_t wthresh;
360};
361
365#define RTE_ETH_MQ_RX_RSS_FLAG RTE_BIT32(0)
366#define RTE_ETH_MQ_RX_DCB_FLAG RTE_BIT32(1)
367#define RTE_ETH_MQ_RX_VMDQ_FLAG RTE_BIT32(2)
377
384
394};
395
405};
406
413 uint32_t mtu;
421 uint64_t offloads;
422
423 uint64_t reserved_64s[2];
424 void *reserved_ptrs[2];
425};
426
432 RTE_ETH_VLAN_TYPE_UNKNOWN = 0,
435 RTE_ETH_VLAN_TYPE_MAX,
436};
437
443 uint64_t ids[64];
444};
445
464 uint8_t *rss_key;
465 uint8_t rss_key_len;
466 uint64_t rss_hf;
467};
468
469/*
470 * A packet can be identified by hardware as different flow types. Different
471 * NIC hardware may support different flow types.
472 * Basically, the NIC hardware identifies the flow type as deep protocol as
473 * possible, and exclusively. For example, if a packet is identified as
474 * 'RTE_ETH_FLOW_NONFRAG_IPV4_TCP', it will not be any of other flow types,
475 * though it is an actual IPV4 packet.
476 */
477#define RTE_ETH_FLOW_UNKNOWN 0
478#define RTE_ETH_FLOW_RAW 1
479#define RTE_ETH_FLOW_IPV4 2
480#define RTE_ETH_FLOW_FRAG_IPV4 3
481#define RTE_ETH_FLOW_NONFRAG_IPV4_TCP 4
482#define RTE_ETH_FLOW_NONFRAG_IPV4_UDP 5
483#define RTE_ETH_FLOW_NONFRAG_IPV4_SCTP 6
484#define RTE_ETH_FLOW_NONFRAG_IPV4_OTHER 7
485#define RTE_ETH_FLOW_IPV6 8
486#define RTE_ETH_FLOW_FRAG_IPV6 9
487#define RTE_ETH_FLOW_NONFRAG_IPV6_TCP 10
488#define RTE_ETH_FLOW_NONFRAG_IPV6_UDP 11
489#define RTE_ETH_FLOW_NONFRAG_IPV6_SCTP 12
490#define RTE_ETH_FLOW_NONFRAG_IPV6_OTHER 13
491#define RTE_ETH_FLOW_L2_PAYLOAD 14
492#define RTE_ETH_FLOW_IPV6_EX 15
493#define RTE_ETH_FLOW_IPV6_TCP_EX 16
494#define RTE_ETH_FLOW_IPV6_UDP_EX 17
496#define RTE_ETH_FLOW_PORT 18
497#define RTE_ETH_FLOW_VXLAN 19
498#define RTE_ETH_FLOW_GENEVE 20
499#define RTE_ETH_FLOW_NVGRE 21
500#define RTE_ETH_FLOW_VXLAN_GPE 22
501#define RTE_ETH_FLOW_GTPU 23
502#define RTE_ETH_FLOW_MAX 24
503
504/*
505 * Below macros are defined for RSS offload types, they can be used to
506 * fill rte_eth_rss_conf.rss_hf or rte_flow_action_rss.types.
507 */
508#define RTE_ETH_RSS_IPV4 RTE_BIT64(2)
509#define RTE_ETH_RSS_FRAG_IPV4 RTE_BIT64(3)
510#define RTE_ETH_RSS_NONFRAG_IPV4_TCP RTE_BIT64(4)
511#define RTE_ETH_RSS_NONFRAG_IPV4_UDP RTE_BIT64(5)
512#define RTE_ETH_RSS_NONFRAG_IPV4_SCTP RTE_BIT64(6)
513#define RTE_ETH_RSS_NONFRAG_IPV4_OTHER RTE_BIT64(7)
514#define RTE_ETH_RSS_IPV6 RTE_BIT64(8)
515#define RTE_ETH_RSS_FRAG_IPV6 RTE_BIT64(9)
516#define RTE_ETH_RSS_NONFRAG_IPV6_TCP RTE_BIT64(10)
517#define RTE_ETH_RSS_NONFRAG_IPV6_UDP RTE_BIT64(11)
518#define RTE_ETH_RSS_NONFRAG_IPV6_SCTP RTE_BIT64(12)
519#define RTE_ETH_RSS_NONFRAG_IPV6_OTHER RTE_BIT64(13)
520#define RTE_ETH_RSS_L2_PAYLOAD RTE_BIT64(14)
521#define RTE_ETH_RSS_IPV6_EX RTE_BIT64(15)
522#define RTE_ETH_RSS_IPV6_TCP_EX RTE_BIT64(16)
523#define RTE_ETH_RSS_IPV6_UDP_EX RTE_BIT64(17)
524#define RTE_ETH_RSS_PORT RTE_BIT64(18)
525#define RTE_ETH_RSS_VXLAN RTE_BIT64(19)
526#define RTE_ETH_RSS_GENEVE RTE_BIT64(20)
527#define RTE_ETH_RSS_NVGRE RTE_BIT64(21)
528#define RTE_ETH_RSS_GTPU RTE_BIT64(23)
529#define RTE_ETH_RSS_ETH RTE_BIT64(24)
530#define RTE_ETH_RSS_S_VLAN RTE_BIT64(25)
531#define RTE_ETH_RSS_C_VLAN RTE_BIT64(26)
532#define RTE_ETH_RSS_ESP RTE_BIT64(27)
533#define RTE_ETH_RSS_AH RTE_BIT64(28)
534#define RTE_ETH_RSS_L2TPV3 RTE_BIT64(29)
535#define RTE_ETH_RSS_PFCP RTE_BIT64(30)
536#define RTE_ETH_RSS_PPPOE RTE_BIT64(31)
537#define RTE_ETH_RSS_ECPRI RTE_BIT64(32)
538#define RTE_ETH_RSS_MPLS RTE_BIT64(33)
539#define RTE_ETH_RSS_IPV4_CHKSUM RTE_BIT64(34)
540
553#define RTE_ETH_RSS_L4_CHKSUM RTE_BIT64(35)
554
555#define RTE_ETH_RSS_L2TPV2 RTE_BIT64(36)
556
557/*
558 * We use the following macros to combine with above RTE_ETH_RSS_* for
559 * more specific input set selection. These bits are defined starting
560 * from the high end of the 64 bits.
561 * Note: If we use above RTE_ETH_RSS_* without SRC/DST_ONLY, it represents
562 * both SRC and DST are taken into account. If SRC_ONLY and DST_ONLY of
563 * the same level are used simultaneously, it is the same case as none of
564 * them are added.
565 */
566#define RTE_ETH_RSS_L3_SRC_ONLY RTE_BIT64(63)
567#define RTE_ETH_RSS_L3_DST_ONLY RTE_BIT64(62)
568#define RTE_ETH_RSS_L4_SRC_ONLY RTE_BIT64(61)
569#define RTE_ETH_RSS_L4_DST_ONLY RTE_BIT64(60)
570#define RTE_ETH_RSS_L2_SRC_ONLY RTE_BIT64(59)
571#define RTE_ETH_RSS_L2_DST_ONLY RTE_BIT64(58)
572
573/*
574 * Only select IPV6 address prefix as RSS input set according to
575 * https://tools.ietf.org/html/rfc6052
576 * Must be combined with RTE_ETH_RSS_IPV6, RTE_ETH_RSS_NONFRAG_IPV6_UDP,
577 * RTE_ETH_RSS_NONFRAG_IPV6_TCP, RTE_ETH_RSS_NONFRAG_IPV6_SCTP.
578 */
579#define RTE_ETH_RSS_L3_PRE32 RTE_BIT64(57)
580#define RTE_ETH_RSS_L3_PRE40 RTE_BIT64(56)
581#define RTE_ETH_RSS_L3_PRE48 RTE_BIT64(55)
582#define RTE_ETH_RSS_L3_PRE56 RTE_BIT64(54)
583#define RTE_ETH_RSS_L3_PRE64 RTE_BIT64(53)
584#define RTE_ETH_RSS_L3_PRE96 RTE_BIT64(52)
585
586/*
587 * Use the following macros to combine with the above layers
588 * to choose inner and outer layers or both for RSS computation.
589 * Bits 50 and 51 are reserved for this.
590 */
591
599#define RTE_ETH_RSS_LEVEL_PMD_DEFAULT (UINT64_C(0) << 50)
600
605#define RTE_ETH_RSS_LEVEL_OUTERMOST (UINT64_C(1) << 50)
606
611#define RTE_ETH_RSS_LEVEL_INNERMOST (UINT64_C(2) << 50)
612#define RTE_ETH_RSS_LEVEL_MASK (UINT64_C(3) << 50)
613
614#define RTE_ETH_RSS_LEVEL(rss_hf) ((rss_hf & RTE_ETH_RSS_LEVEL_MASK) >> 50)
615
626static inline uint64_t
627rte_eth_rss_hf_refine(uint64_t rss_hf)
628{
629 if ((rss_hf & RTE_ETH_RSS_L3_SRC_ONLY) && (rss_hf & RTE_ETH_RSS_L3_DST_ONLY))
630 rss_hf &= ~(RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY);
631
632 if ((rss_hf & RTE_ETH_RSS_L4_SRC_ONLY) && (rss_hf & RTE_ETH_RSS_L4_DST_ONLY))
633 rss_hf &= ~(RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY);
634
635 return rss_hf;
636}
637
638#define RTE_ETH_RSS_IPV6_PRE32 ( \
639 RTE_ETH_RSS_IPV6 | \
640 RTE_ETH_RSS_L3_PRE32)
641
642#define RTE_ETH_RSS_IPV6_PRE40 ( \
643 RTE_ETH_RSS_IPV6 | \
644 RTE_ETH_RSS_L3_PRE40)
645
646#define RTE_ETH_RSS_IPV6_PRE48 ( \
647 RTE_ETH_RSS_IPV6 | \
648 RTE_ETH_RSS_L3_PRE48)
649
650#define RTE_ETH_RSS_IPV6_PRE56 ( \
651 RTE_ETH_RSS_IPV6 | \
652 RTE_ETH_RSS_L3_PRE56)
653
654#define RTE_ETH_RSS_IPV6_PRE64 ( \
655 RTE_ETH_RSS_IPV6 | \
656 RTE_ETH_RSS_L3_PRE64)
657
658#define RTE_ETH_RSS_IPV6_PRE96 ( \
659 RTE_ETH_RSS_IPV6 | \
660 RTE_ETH_RSS_L3_PRE96)
661
662#define RTE_ETH_RSS_IPV6_PRE32_UDP ( \
663 RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
664 RTE_ETH_RSS_L3_PRE32)
665
666#define RTE_ETH_RSS_IPV6_PRE40_UDP ( \
667 RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
668 RTE_ETH_RSS_L3_PRE40)
669
670#define RTE_ETH_RSS_IPV6_PRE48_UDP ( \
671 RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
672 RTE_ETH_RSS_L3_PRE48)
673
674#define RTE_ETH_RSS_IPV6_PRE56_UDP ( \
675 RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
676 RTE_ETH_RSS_L3_PRE56)
677
678#define RTE_ETH_RSS_IPV6_PRE64_UDP ( \
679 RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
680 RTE_ETH_RSS_L3_PRE64)
681
682#define RTE_ETH_RSS_IPV6_PRE96_UDP ( \
683 RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
684 RTE_ETH_RSS_L3_PRE96)
685
686#define RTE_ETH_RSS_IPV6_PRE32_TCP ( \
687 RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
688 RTE_ETH_RSS_L3_PRE32)
689
690#define RTE_ETH_RSS_IPV6_PRE40_TCP ( \
691 RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
692 RTE_ETH_RSS_L3_PRE40)
693
694#define RTE_ETH_RSS_IPV6_PRE48_TCP ( \
695 RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
696 RTE_ETH_RSS_L3_PRE48)
697
698#define RTE_ETH_RSS_IPV6_PRE56_TCP ( \
699 RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
700 RTE_ETH_RSS_L3_PRE56)
701
702#define RTE_ETH_RSS_IPV6_PRE64_TCP ( \
703 RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
704 RTE_ETH_RSS_L3_PRE64)
705
706#define RTE_ETH_RSS_IPV6_PRE96_TCP ( \
707 RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
708 RTE_ETH_RSS_L3_PRE96)
709
710#define RTE_ETH_RSS_IPV6_PRE32_SCTP ( \
711 RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
712 RTE_ETH_RSS_L3_PRE32)
713
714#define RTE_ETH_RSS_IPV6_PRE40_SCTP ( \
715 RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
716 RTE_ETH_RSS_L3_PRE40)
717
718#define RTE_ETH_RSS_IPV6_PRE48_SCTP ( \
719 RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
720 RTE_ETH_RSS_L3_PRE48)
721
722#define RTE_ETH_RSS_IPV6_PRE56_SCTP ( \
723 RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
724 RTE_ETH_RSS_L3_PRE56)
725
726#define RTE_ETH_RSS_IPV6_PRE64_SCTP ( \
727 RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
728 RTE_ETH_RSS_L3_PRE64)
729
730#define RTE_ETH_RSS_IPV6_PRE96_SCTP ( \
731 RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
732 RTE_ETH_RSS_L3_PRE96)
733
734#define RTE_ETH_RSS_IP ( \
735 RTE_ETH_RSS_IPV4 | \
736 RTE_ETH_RSS_FRAG_IPV4 | \
737 RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \
738 RTE_ETH_RSS_IPV6 | \
739 RTE_ETH_RSS_FRAG_IPV6 | \
740 RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \
741 RTE_ETH_RSS_IPV6_EX)
742
743#define RTE_ETH_RSS_UDP ( \
744 RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
745 RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
746 RTE_ETH_RSS_IPV6_UDP_EX)
747
748#define RTE_ETH_RSS_TCP ( \
749 RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
750 RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
751 RTE_ETH_RSS_IPV6_TCP_EX)
752
753#define RTE_ETH_RSS_SCTP ( \
754 RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
755 RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
756
757#define RTE_ETH_RSS_TUNNEL ( \
758 RTE_ETH_RSS_VXLAN | \
759 RTE_ETH_RSS_GENEVE | \
760 RTE_ETH_RSS_NVGRE)
761
762#define RTE_ETH_RSS_VLAN ( \
763 RTE_ETH_RSS_S_VLAN | \
764 RTE_ETH_RSS_C_VLAN)
765
767#define RTE_ETH_RSS_PROTO_MASK ( \
768 RTE_ETH_RSS_IPV4 | \
769 RTE_ETH_RSS_FRAG_IPV4 | \
770 RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
771 RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
772 RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
773 RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \
774 RTE_ETH_RSS_IPV6 | \
775 RTE_ETH_RSS_FRAG_IPV6 | \
776 RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
777 RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
778 RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
779 RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \
780 RTE_ETH_RSS_L2_PAYLOAD | \
781 RTE_ETH_RSS_IPV6_EX | \
782 RTE_ETH_RSS_IPV6_TCP_EX | \
783 RTE_ETH_RSS_IPV6_UDP_EX | \
784 RTE_ETH_RSS_PORT | \
785 RTE_ETH_RSS_VXLAN | \
786 RTE_ETH_RSS_GENEVE | \
787 RTE_ETH_RSS_NVGRE | \
788 RTE_ETH_RSS_MPLS)
789
790/*
791 * Definitions used for redirection table entry size.
792 * Some RSS RETA sizes may not be supported by some drivers, check the
793 * documentation or the description of relevant functions for more details.
794 */
795#define RTE_ETH_RSS_RETA_SIZE_64 64
796#define RTE_ETH_RSS_RETA_SIZE_128 128
797#define RTE_ETH_RSS_RETA_SIZE_256 256
798#define RTE_ETH_RSS_RETA_SIZE_512 512
799#define RTE_ETH_RETA_GROUP_SIZE 64
800
802#define RTE_ETH_VMDQ_MAX_VLAN_FILTERS 64
803#define RTE_ETH_DCB_NUM_USER_PRIORITIES 8
804#define RTE_ETH_VMDQ_DCB_NUM_QUEUES 128
805#define RTE_ETH_DCB_NUM_QUEUES 128
809#define RTE_ETH_DCB_PG_SUPPORT RTE_BIT32(0)
810#define RTE_ETH_DCB_PFC_SUPPORT RTE_BIT32(1)
814#define RTE_ETH_VLAN_STRIP_OFFLOAD 0x0001
815#define RTE_ETH_VLAN_FILTER_OFFLOAD 0x0002
816#define RTE_ETH_VLAN_EXTEND_OFFLOAD 0x0004
817#define RTE_ETH_QINQ_STRIP_OFFLOAD 0x0008
819#define RTE_ETH_VLAN_STRIP_MASK 0x0001
820#define RTE_ETH_VLAN_FILTER_MASK 0x0002
821#define RTE_ETH_VLAN_EXTEND_MASK 0x0004
822#define RTE_ETH_QINQ_STRIP_MASK 0x0008
823#define RTE_ETH_VLAN_ID_MAX 0x0FFF
826/* Definitions used for receive MAC address */
827#define RTE_ETH_NUM_RECEIVE_MAC_ADDR 128
829/* Definitions used for unicast hash */
830#define RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY 128
836#define RTE_ETH_VMDQ_ACCEPT_UNTAG RTE_BIT32(0)
838#define RTE_ETH_VMDQ_ACCEPT_HASH_MC RTE_BIT32(1)
840#define RTE_ETH_VMDQ_ACCEPT_HASH_UC RTE_BIT32(2)
842#define RTE_ETH_VMDQ_ACCEPT_BROADCAST RTE_BIT32(3)
844#define RTE_ETH_VMDQ_ACCEPT_MULTICAST RTE_BIT32(4)
855 uint64_t mask;
857 uint16_t reta[RTE_ETH_RETA_GROUP_SIZE];
858};
859
866 RTE_ETH_8_TCS = 8
868
877 RTE_ETH_64_POOLS = 64
879
880/* This structure may be extended in future. */
881struct rte_eth_dcb_rx_conf {
882 enum rte_eth_nb_tcs nb_tcs;
884 uint8_t dcb_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES];
885};
886
887struct rte_eth_vmdq_dcb_tx_conf {
888 enum rte_eth_nb_pools nb_queue_pools;
890 uint8_t dcb_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES];
891};
892
893struct rte_eth_dcb_tx_conf {
894 enum rte_eth_nb_tcs nb_tcs;
896 uint8_t dcb_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES];
897};
898
899struct rte_eth_vmdq_tx_conf {
900 enum rte_eth_nb_pools nb_queue_pools;
901};
902
917 uint8_t default_pool;
918 uint8_t nb_pool_maps;
919 struct {
920 uint16_t vlan_id;
921 uint64_t pools;
925};
926
948 uint8_t default_pool;
950 uint8_t nb_pool_maps;
951 uint32_t rx_mode;
952 struct {
953 uint16_t vlan_id;
954 uint64_t pools;
956};
957
968 uint64_t offloads;
969
970 uint16_t pvid;
971 __extension__
972 uint8_t
978
979 uint64_t reserved_64s[2];
980 void *reserved_ptrs[2];
981};
982
1044 struct rte_mempool *mp;
1045 uint16_t length;
1046 uint16_t offset;
1058 uint32_t proto_hdr;
1059};
1060
1068 /* The settings for buffer split offload. */
1069 struct rte_eth_rxseg_split split;
1070 /* The other features settings should be added here. */
1071};
1072
1079 uint8_t rx_drop_en;
1081 uint16_t rx_nseg;
1088 uint16_t share_group;
1089 uint16_t share_qid;
1095 uint64_t offloads;
1104
1125 uint16_t rx_nmempool;
1127 uint64_t reserved_64s[2];
1128 void *reserved_ptrs[2];
1129};
1130
1136 uint16_t tx_rs_thresh;
1146 uint64_t offloads;
1147
1148 uint64_t reserved_64s[2];
1149 void *reserved_ptrs[2];
1150};
1151
1164
1169 uint32_t rte_memory:1;
1170
1171 uint32_t reserved:30;
1172};
1173
1184 uint16_t max_rx_2_tx;
1186 uint16_t max_tx_2_rx;
1187 uint16_t max_nb_desc;
1190};
1191
1192#define RTE_ETH_MAX_HAIRPIN_PEERS 32
1193
1201 uint16_t port;
1202 uint16_t queue;
1203};
1204
1212 uint32_t peer_count:16;
1223 uint32_t tx_explicit:1;
1224
1236 uint32_t manual_bind:1;
1237
1250
1262 uint32_t use_rte_memory:1;
1263
1274 uint32_t force_memory:1;
1275
1276 uint32_t reserved:11;
1278 struct rte_eth_hairpin_peer peers[RTE_ETH_MAX_HAIRPIN_PEERS];
1279};
1280
1285 uint16_t nb_max;
1286 uint16_t nb_min;
1287 uint16_t nb_align;
1297 uint16_t nb_seg_max;
1298
1311};
1312
1322
1329 uint32_t high_water;
1330 uint32_t low_water;
1331 uint16_t pause_time;
1332 uint16_t send_xon;
1335 uint8_t autoneg;
1336};
1337
1345 uint8_t priority;
1346};
1347
1358 uint8_t tc_max;
1361};
1362
1383 struct {
1384 uint16_t tx_qid;
1388 uint8_t tc;
1389 } rx_pause; /* Valid when (mode == FC_RX_PAUSE || mode == FC_FULL) */
1390
1391 struct {
1392 uint16_t pause_time;
1393 uint16_t rx_qid;
1397 uint8_t tc;
1398 } tx_pause; /* Valid when (mode == FC_TX_PAUSE || mode == FC_FULL) */
1399};
1400
1406 RTE_ETH_TUNNEL_TYPE_NONE = 0,
1407 RTE_ETH_TUNNEL_TYPE_VXLAN,
1408 RTE_ETH_TUNNEL_TYPE_GENEVE,
1409 RTE_ETH_TUNNEL_TYPE_TEREDO,
1410 RTE_ETH_TUNNEL_TYPE_NVGRE,
1411 RTE_ETH_TUNNEL_TYPE_IP_IN_GRE,
1412 RTE_ETH_L2_TUNNEL_TYPE_E_TAG,
1413 RTE_ETH_TUNNEL_TYPE_VXLAN_GPE,
1414 RTE_ETH_TUNNEL_TYPE_ECPRI,
1415 RTE_ETH_TUNNEL_TYPE_MAX,
1416};
1417
1418/* Deprecated API file for rte_eth_dev_filter_* functions */
1419#include "rte_eth_ctrl.h"
1420
1431 uint16_t udp_port;
1432 uint8_t prot_type;
1433};
1434
1440 uint32_t lsc:1;
1442 uint32_t rxq:1;
1444 uint32_t rmv:1;
1445};
1446
1447#define rte_intr_conf rte_eth_intr_conf
1448
1455 uint32_t link_speeds;
1464 uint32_t lpbk_mode;
1469 struct {
1474 struct rte_eth_dcb_rx_conf dcb_rx_conf;
1478 union {
1480 struct rte_eth_vmdq_dcb_tx_conf vmdq_dcb_tx_conf;
1482 struct rte_eth_dcb_tx_conf dcb_tx_conf;
1484 struct rte_eth_vmdq_tx_conf vmdq_tx_conf;
1490};
1491
1495#define RTE_ETH_RX_OFFLOAD_VLAN_STRIP RTE_BIT64(0)
1496#define RTE_ETH_RX_OFFLOAD_IPV4_CKSUM RTE_BIT64(1)
1497#define RTE_ETH_RX_OFFLOAD_UDP_CKSUM RTE_BIT64(2)
1498#define RTE_ETH_RX_OFFLOAD_TCP_CKSUM RTE_BIT64(3)
1499#define RTE_ETH_RX_OFFLOAD_TCP_LRO RTE_BIT64(4)
1500#define RTE_ETH_RX_OFFLOAD_QINQ_STRIP RTE_BIT64(5)
1501#define RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM RTE_BIT64(6)
1502#define RTE_ETH_RX_OFFLOAD_MACSEC_STRIP RTE_BIT64(7)
1503#define RTE_ETH_RX_OFFLOAD_VLAN_FILTER RTE_BIT64(9)
1504#define RTE_ETH_RX_OFFLOAD_VLAN_EXTEND RTE_BIT64(10)
1505#define RTE_ETH_RX_OFFLOAD_SCATTER RTE_BIT64(13)
1511#define RTE_ETH_RX_OFFLOAD_TIMESTAMP RTE_BIT64(14)
1512#define RTE_ETH_RX_OFFLOAD_SECURITY RTE_BIT64(15)
1513#define RTE_ETH_RX_OFFLOAD_KEEP_CRC RTE_BIT64(16)
1514#define RTE_ETH_RX_OFFLOAD_SCTP_CKSUM RTE_BIT64(17)
1515#define RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM RTE_BIT64(18)
1516#define RTE_ETH_RX_OFFLOAD_RSS_HASH RTE_BIT64(19)
1517#define RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT RTE_BIT64(20)
1518
1519#define RTE_ETH_RX_OFFLOAD_CHECKSUM (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | \
1520 RTE_ETH_RX_OFFLOAD_UDP_CKSUM | \
1521 RTE_ETH_RX_OFFLOAD_TCP_CKSUM)
1522#define RTE_ETH_RX_OFFLOAD_VLAN (RTE_ETH_RX_OFFLOAD_VLAN_STRIP | \
1523 RTE_ETH_RX_OFFLOAD_VLAN_FILTER | \
1524 RTE_ETH_RX_OFFLOAD_VLAN_EXTEND | \
1525 RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
1526
1527/*
1528 * If new Rx offload capabilities are defined, they also must be
1529 * mentioned in rte_rx_offload_names in rte_ethdev.c file.
1530 */
1531
1535#define RTE_ETH_TX_OFFLOAD_VLAN_INSERT RTE_BIT64(0)
1536#define RTE_ETH_TX_OFFLOAD_IPV4_CKSUM RTE_BIT64(1)
1537#define RTE_ETH_TX_OFFLOAD_UDP_CKSUM RTE_BIT64(2)
1538#define RTE_ETH_TX_OFFLOAD_TCP_CKSUM RTE_BIT64(3)
1539#define RTE_ETH_TX_OFFLOAD_SCTP_CKSUM RTE_BIT64(4)
1540#define RTE_ETH_TX_OFFLOAD_TCP_TSO RTE_BIT64(5)
1541#define RTE_ETH_TX_OFFLOAD_UDP_TSO RTE_BIT64(6)
1542#define RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM RTE_BIT64(7)
1543#define RTE_ETH_TX_OFFLOAD_QINQ_INSERT RTE_BIT64(8)
1544#define RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO RTE_BIT64(9)
1545#define RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO RTE_BIT64(10)
1546#define RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO RTE_BIT64(11)
1547#define RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO RTE_BIT64(12)
1548#define RTE_ETH_TX_OFFLOAD_MACSEC_INSERT RTE_BIT64(13)
1553#define RTE_ETH_TX_OFFLOAD_MT_LOCKFREE RTE_BIT64(14)
1555#define RTE_ETH_TX_OFFLOAD_MULTI_SEGS RTE_BIT64(15)
1561#define RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE RTE_BIT64(16)
1562#define RTE_ETH_TX_OFFLOAD_SECURITY RTE_BIT64(17)
1568#define RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO RTE_BIT64(18)
1574#define RTE_ETH_TX_OFFLOAD_IP_TNL_TSO RTE_BIT64(19)
1576#define RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM RTE_BIT64(20)
1582#define RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP RTE_BIT64(21)
1583/*
1584 * If new Tx offload capabilities are defined, they also must be
1585 * mentioned in rte_tx_offload_names in rte_ethdev.c file.
1586 */
1587
1592#define RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP RTE_BIT64(0)
1594#define RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP RTE_BIT64(1)
1604#define RTE_ETH_DEV_CAPA_RXQ_SHARE RTE_BIT64(2)
1606#define RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP RTE_BIT64(3)
1608#define RTE_ETH_DEV_CAPA_FLOW_SHARED_OBJECT_KEEP RTE_BIT64(4)
1611/*
1612 * Fallback default preferred Rx/Tx port parameters.
1613 * These are used if an application requests default parameters
1614 * but the PMD does not provide preferred values.
1615 */
1616#define RTE_ETH_DEV_FALLBACK_RX_RINGSIZE 512
1617#define RTE_ETH_DEV_FALLBACK_TX_RINGSIZE 512
1618#define RTE_ETH_DEV_FALLBACK_RX_NBQUEUES 1
1619#define RTE_ETH_DEV_FALLBACK_TX_NBQUEUES 1
1620
1627 uint16_t burst_size;
1628 uint16_t ring_size;
1629 uint16_t nb_queues;
1630};
1631
1636#define RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID (UINT16_MAX)
1637
1642 const char *name;
1643 uint16_t domain_id;
1651 uint16_t port_id;
1657 uint16_t rx_domain;
1658};
1659
1667 __extension__
1668 uint32_t multi_pools:1;
1669 uint32_t offset_allowed:1;
1671 uint16_t max_nseg;
1672 uint16_t reserved;
1673};
1674
1687};
1688
1709};
1710
1717 struct rte_device *device;
1718 const char *driver_name;
1719 unsigned int if_index;
1721 uint16_t min_mtu;
1722 uint16_t max_mtu;
1723 const uint32_t *dev_flags;
1725 uint32_t max_rx_pktlen;
1728 uint16_t max_rx_queues;
1729 uint16_t max_tx_queues;
1730 uint32_t max_mac_addrs;
1733 uint16_t max_vfs;
1745 uint16_t reta_size;
1756 uint32_t speed_capa;
1758 uint16_t nb_rx_queues;
1759 uint16_t nb_tx_queues;
1772 uint64_t dev_capa;
1780
1781 uint64_t reserved_64s[2];
1782 void *reserved_ptrs[2];
1783};
1784
1786#define RTE_ETH_QUEUE_STATE_STOPPED 0
1787#define RTE_ETH_QUEUE_STATE_STARTED 1
1788#define RTE_ETH_QUEUE_STATE_HAIRPIN 2
1796 struct rte_mempool *mp;
1799 uint8_t queue_state;
1800 uint16_t nb_desc;
1801 uint16_t rx_buf_size;
1810
1817 uint16_t nb_desc;
1818 uint8_t queue_state;
1820
1821/* Generic Burst mode flag definition, values can be ORed. */
1822
1828#define RTE_ETH_BURST_FLAG_PER_QUEUE RTE_BIT64(0)
1829
1835 uint64_t flags;
1837#define RTE_ETH_BURST_MODE_INFO_SIZE 1024
1839};
1840
1842#define RTE_ETH_XSTATS_NAME_SIZE 64
1843
1854 uint64_t id;
1855 uint64_t value;
1856};
1857
1874};
1875
1876#define RTE_ETH_DCB_NUM_TCS 8
1877#define RTE_ETH_MAX_VMDQ_POOL 64
1878
1885 struct {
1886 uint16_t base;
1887 uint16_t nb_queue;
1888 } tc_rxq[RTE_ETH_MAX_VMDQ_POOL][RTE_ETH_DCB_NUM_TCS];
1890 struct {
1891 uint16_t base;
1892 uint16_t nb_queue;
1893 } tc_txq[RTE_ETH_MAX_VMDQ_POOL][RTE_ETH_DCB_NUM_TCS];
1894};
1895
1901 uint8_t nb_tcs;
1903 uint8_t tc_bws[RTE_ETH_DCB_NUM_TCS];
1906};
1907
1917};
1918
1919/* Translate from FEC mode to FEC capa */
1920#define RTE_ETH_FEC_MODE_TO_CAPA(x) RTE_BIT32(x)
1921
1922/* This macro indicates FEC capa mask */
1923#define RTE_ETH_FEC_MODE_CAPA_MASK(x) RTE_BIT32(RTE_ETH_FEC_ ## x)
1924
1925/* A structure used to get capabilities per link speed */
1926struct rte_eth_fec_capa {
1927 uint32_t speed;
1928 uint32_t capa;
1929};
1930
1931#define RTE_ETH_ALL RTE_MAX_ETHPORTS
1932
1933/* Macros to check for valid port */
1934#define RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, retval) do { \
1935 if (!rte_eth_dev_is_valid_port(port_id)) { \
1936 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%u\n", port_id); \
1937 return retval; \
1938 } \
1939} while (0)
1940
1941#define RTE_ETH_VALID_PORTID_OR_RET(port_id) do { \
1942 if (!rte_eth_dev_is_valid_port(port_id)) { \
1943 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%u\n", port_id); \
1944 return; \
1945 } \
1946} while (0)
1947
1970typedef uint16_t (*rte_rx_callback_fn)(uint16_t port_id, uint16_t queue,
1971 struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t max_pkts,
1972 void *user_param);
1973
1994typedef uint16_t (*rte_tx_callback_fn)(uint16_t port_id, uint16_t queue,
1995 struct rte_mbuf *pkts[], uint16_t nb_pkts, void *user_param);
1996
2007};
2008
2009struct rte_eth_dev_sriov {
2010 uint8_t active;
2011 uint8_t nb_q_per_pool;
2012 uint16_t def_vmdq_idx;
2013 uint16_t def_pool_q_idx;
2014};
2015#define RTE_ETH_DEV_SRIOV(dev) ((dev)->data->sriov)
2016
2017#define RTE_ETH_NAME_MAX_LEN RTE_DEV_NAME_MAX_LEN
2018
2019#define RTE_ETH_DEV_NO_OWNER 0
2020
2021#define RTE_ETH_MAX_OWNER_NAME_LEN 64
2022
2023struct rte_eth_dev_owner {
2024 uint64_t id;
2025 char name[RTE_ETH_MAX_OWNER_NAME_LEN];
2026};
2027
2033#define RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE RTE_BIT32(0)
2035#define RTE_ETH_DEV_INTR_LSC RTE_BIT32(1)
2037#define RTE_ETH_DEV_BONDED_SLAVE RTE_BIT32(2)
2039#define RTE_ETH_DEV_INTR_RMV RTE_BIT32(3)
2041#define RTE_ETH_DEV_REPRESENTOR RTE_BIT32(4)
2043#define RTE_ETH_DEV_NOLIVE_MAC_ADDR RTE_BIT32(5)
2048#define RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS RTE_BIT32(6)
2062uint64_t rte_eth_find_next_owned_by(uint16_t port_id,
2063 const uint64_t owner_id);
2064
2068#define RTE_ETH_FOREACH_DEV_OWNED_BY(p, o) \
2069 for (p = rte_eth_find_next_owned_by(0, o); \
2070 (unsigned int)p < (unsigned int)RTE_MAX_ETHPORTS; \
2071 p = rte_eth_find_next_owned_by(p + 1, o))
2072
2081uint16_t rte_eth_find_next(uint16_t port_id);
2082
2086#define RTE_ETH_FOREACH_DEV(p) \
2087 RTE_ETH_FOREACH_DEV_OWNED_BY(p, RTE_ETH_DEV_NO_OWNER)
2088
2100uint16_t
2101rte_eth_find_next_of(uint16_t port_id_start,
2102 const struct rte_device *parent);
2103
2112#define RTE_ETH_FOREACH_DEV_OF(port_id, parent) \
2113 for (port_id = rte_eth_find_next_of(0, parent); \
2114 port_id < RTE_MAX_ETHPORTS; \
2115 port_id = rte_eth_find_next_of(port_id + 1, parent))
2116
2128uint16_t
2129rte_eth_find_next_sibling(uint16_t port_id_start, uint16_t ref_port_id);
2130
2141#define RTE_ETH_FOREACH_DEV_SIBLING(port_id, ref_port_id) \
2142 for (port_id = rte_eth_find_next_sibling(0, ref_port_id); \
2143 port_id < RTE_MAX_ETHPORTS; \
2144 port_id = rte_eth_find_next_sibling(port_id + 1, ref_port_id))
2145
2156int rte_eth_dev_owner_new(uint64_t *owner_id);
2157
2168int rte_eth_dev_owner_set(const uint16_t port_id,
2169 const struct rte_eth_dev_owner *owner);
2170
2181int rte_eth_dev_owner_unset(const uint16_t port_id,
2182 const uint64_t owner_id);
2183
2192int rte_eth_dev_owner_delete(const uint64_t owner_id);
2193
2204int rte_eth_dev_owner_get(const uint16_t port_id,
2205 struct rte_eth_dev_owner *owner);
2206
2218
2228
2240uint32_t rte_eth_speed_bitflag(uint32_t speed, int duplex);
2241
2250const char *rte_eth_dev_rx_offload_name(uint64_t offload);
2251
2260const char *rte_eth_dev_tx_offload_name(uint64_t offload);
2261
2273__rte_experimental
2274const char *rte_eth_dev_capability_name(uint64_t capability);
2275
2315int rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_queue,
2316 uint16_t nb_tx_queue, const struct rte_eth_conf *eth_conf);
2317
2326int
2327rte_eth_dev_is_removed(uint16_t port_id);
2328
2391int rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
2392 uint16_t nb_rx_desc, unsigned int socket_id,
2393 const struct rte_eth_rxconf *rx_conf,
2394 struct rte_mempool *mb_pool);
2395
2423__rte_experimental
2425 (uint16_t port_id, uint16_t rx_queue_id, uint16_t nb_rx_desc,
2426 const struct rte_eth_hairpin_conf *conf);
2427
2476int rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
2477 uint16_t nb_tx_desc, unsigned int socket_id,
2478 const struct rte_eth_txconf *tx_conf);
2479
2505__rte_experimental
2507 (uint16_t port_id, uint16_t tx_queue_id, uint16_t nb_tx_desc,
2508 const struct rte_eth_hairpin_conf *conf);
2509
2536__rte_experimental
2537int rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports,
2538 size_t len, uint32_t direction);
2539
2562__rte_experimental
2563int rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port);
2564
2589__rte_experimental
2590int rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port);
2591
2604int rte_eth_dev_socket_id(uint16_t port_id);
2605
2615int rte_eth_dev_is_valid_port(uint16_t port_id);
2616
2634int rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id);
2635
2652int rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id);
2653
2671int rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id);
2672
2689int rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id);
2690
2711int rte_eth_dev_start(uint16_t port_id);
2712
2724int rte_eth_dev_stop(uint16_t port_id);
2725
2738int rte_eth_dev_set_link_up(uint16_t port_id);
2739
2749int rte_eth_dev_set_link_down(uint16_t port_id);
2750
2761int rte_eth_dev_close(uint16_t port_id);
2762
2800int rte_eth_dev_reset(uint16_t port_id);
2801
2813int rte_eth_promiscuous_enable(uint16_t port_id);
2814
2826int rte_eth_promiscuous_disable(uint16_t port_id);
2827
2838int rte_eth_promiscuous_get(uint16_t port_id);
2839
2851int rte_eth_allmulticast_enable(uint16_t port_id);
2852
2864int rte_eth_allmulticast_disable(uint16_t port_id);
2865
2876int rte_eth_allmulticast_get(uint16_t port_id);
2877
2895int rte_eth_link_get(uint16_t port_id, struct rte_eth_link *link);
2896
2911int rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *link);
2912
2926__rte_experimental
2928
2947__rte_experimental
2948int rte_eth_link_to_str(char *str, size_t len,
2949 const struct rte_eth_link *eth_link);
2950
2968int rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats);
2969
2981int rte_eth_stats_reset(uint16_t port_id);
2982
3012int rte_eth_xstats_get_names(uint16_t port_id,
3013 struct rte_eth_xstat_name *xstats_names,
3014 unsigned int size);
3015
3049int rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
3050 unsigned int n);
3051
3076int
3078 struct rte_eth_xstat_name *xstats_names, unsigned int size,
3079 uint64_t *ids);
3080
3105int rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
3106 uint64_t *values, unsigned int size);
3107
3127int rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
3128 uint64_t *id);
3129
3142int rte_eth_xstats_reset(uint16_t port_id);
3143
3163 uint16_t tx_queue_id, uint8_t stat_idx);
3164
3184 uint16_t rx_queue_id,
3185 uint8_t stat_idx);
3186
3200int rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr);
3201
3222__rte_experimental
3223int rte_eth_macaddrs_get(uint16_t port_id, struct rte_ether_addr *ma,
3224 unsigned int num);
3225
3269int rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info);
3270
3286__rte_experimental
3287int rte_eth_dev_conf_get(uint16_t port_id, struct rte_eth_conf *dev_conf);
3288
3309int rte_eth_dev_fw_version_get(uint16_t port_id,
3310 char *fw_version, size_t fw_size);
3311
3351int rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
3352 uint32_t *ptypes, int num);
3383int rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask,
3384 uint32_t *set_ptypes, unsigned int num);
3385
3398int rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu);
3399
3417int rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu);
3418
3438int rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on);
3439
3458int rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
3459 int on);
3460
3478 enum rte_vlan_type vlan_type,
3479 uint16_t tag_type);
3480
3498int rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask);
3499
3513int rte_eth_dev_get_vlan_offload(uint16_t port_id);
3514
3529int rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on);
3530
3556__rte_experimental
3557int rte_eth_rx_avail_thresh_set(uint16_t port_id, uint16_t queue_id,
3558 uint8_t avail_thresh);
3559
3586__rte_experimental
3587int rte_eth_rx_avail_thresh_query(uint16_t port_id, uint16_t *queue_id,
3588 uint8_t *avail_thresh);
3589
3590typedef void (*buffer_tx_error_fn)(struct rte_mbuf **unsent, uint16_t count,
3591 void *userdata);
3592
3598 buffer_tx_error_fn error_callback;
3599 void *error_userdata;
3600 uint16_t size;
3601 uint16_t length;
3603 struct rte_mbuf *pkts[];
3604};
3605
3612#define RTE_ETH_TX_BUFFER_SIZE(sz) \
3613 (sizeof(struct rte_eth_dev_tx_buffer) + (sz) * sizeof(struct rte_mbuf *))
3614
3625int
3626rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size);
3627
3652int
3654 buffer_tx_error_fn callback, void *userdata);
3655
3678void
3679rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
3680 void *userdata);
3681
3705void
3706rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
3707 void *userdata);
3708
3734int
3735rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt);
3736
3769};
3770
3790};
3791
3810 uint64_t metadata;
3811};
3812
3847
3872 uint64_t metadata;
3873};
3874
3952
3954typedef int (*rte_eth_dev_cb_fn)(uint16_t port_id,
3955 enum rte_eth_event_type event, void *cb_arg, void *ret_param);
3956
3975 enum rte_eth_event_type event,
3976 rte_eth_dev_cb_fn cb_fn, void *cb_arg);
3977
3997 enum rte_eth_event_type event,
3998 rte_eth_dev_cb_fn cb_fn, void *cb_arg);
3999
4021int rte_eth_dev_rx_intr_enable(uint16_t port_id, uint16_t queue_id);
4022
4043int rte_eth_dev_rx_intr_disable(uint16_t port_id, uint16_t queue_id);
4044
4062int rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data);
4063
4085int rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
4086 int epfd, int op, void *data);
4087
4102int
4103rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id);
4104
4118int rte_eth_led_on(uint16_t port_id);
4119
4133int rte_eth_led_off(uint16_t port_id);
4134
4163__rte_experimental
4164int rte_eth_fec_get_capability(uint16_t port_id,
4165 struct rte_eth_fec_capa *speed_fec_capa,
4166 unsigned int num);
4167
4188__rte_experimental
4189int rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa);
4190
4214__rte_experimental
4215int rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa);
4216
4231int rte_eth_dev_flow_ctrl_get(uint16_t port_id,
4232 struct rte_eth_fc_conf *fc_conf);
4233
4248int rte_eth_dev_flow_ctrl_set(uint16_t port_id,
4249 struct rte_eth_fc_conf *fc_conf);
4250
4267 struct rte_eth_pfc_conf *pfc_conf);
4268
4287int rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *mac_addr,
4288 uint32_t pool);
4289
4307__rte_experimental
4309 struct rte_eth_pfc_queue_info *pfc_queue_info);
4310
4334__rte_experimental
4336 struct rte_eth_pfc_queue_conf *pfc_queue_conf);
4337
4352int rte_eth_dev_mac_addr_remove(uint16_t port_id,
4353 struct rte_ether_addr *mac_addr);
4354
4373 struct rte_ether_addr *mac_addr);
4374
4392int rte_eth_dev_rss_reta_update(uint16_t port_id,
4393 struct rte_eth_rss_reta_entry64 *reta_conf,
4394 uint16_t reta_size);
4395
4414int rte_eth_dev_rss_reta_query(uint16_t port_id,
4415 struct rte_eth_rss_reta_entry64 *reta_conf,
4416 uint16_t reta_size);
4417
4437int rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr,
4438 uint8_t on);
4439
4458int rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on);
4459
4476int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
4477 uint32_t tx_rate);
4478
4493int rte_eth_dev_rss_hash_update(uint16_t port_id,
4494 struct rte_eth_rss_conf *rss_conf);
4495
4511int
4513 struct rte_eth_rss_conf *rss_conf);
4514
4539int
4541 struct rte_eth_udp_tunnel *tunnel_udp);
4542
4562int
4564 struct rte_eth_udp_tunnel *tunnel_udp);
4565
4580int rte_eth_dev_get_dcb_info(uint16_t port_id,
4581 struct rte_eth_dcb_info *dcb_info);
4582
4583struct rte_eth_rxtx_callback;
4584
4610const struct rte_eth_rxtx_callback *
4611rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
4612 rte_rx_callback_fn fn, void *user_param);
4613
4640const struct rte_eth_rxtx_callback *
4641rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
4642 rte_rx_callback_fn fn, void *user_param);
4643
4669const struct rte_eth_rxtx_callback *
4670rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
4671 rte_tx_callback_fn fn, void *user_param);
4672
4706int rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
4707 const struct rte_eth_rxtx_callback *user_cb);
4708
4742int rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
4743 const struct rte_eth_rxtx_callback *user_cb);
4744
4764int rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
4765 struct rte_eth_rxq_info *qinfo);
4766
4786int rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
4787 struct rte_eth_txq_info *qinfo);
4788
4807int rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
4808 struct rte_eth_burst_mode *mode);
4809
4828int rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
4829 struct rte_eth_burst_mode *mode);
4830
4851__rte_experimental
4852int rte_eth_get_monitor_addr(uint16_t port_id, uint16_t queue_id,
4853 struct rte_power_monitor_cond *pmc);
4854
4873int rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info);
4874
4887int rte_eth_dev_get_eeprom_length(uint16_t port_id);
4888
4905int rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info);
4906
4923int rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info);
4924
4943__rte_experimental
4944int
4946 struct rte_eth_dev_module_info *modinfo);
4947
4967__rte_experimental
4968int
4970 struct rte_dev_eeprom_info *info);
4971
4991int rte_eth_dev_set_mc_addr_list(uint16_t port_id,
4992 struct rte_ether_addr *mc_addr_set,
4993 uint32_t nb_mc_addr);
4994
5007int rte_eth_timesync_enable(uint16_t port_id);
5008
5021int rte_eth_timesync_disable(uint16_t port_id);
5022
5042 struct timespec *timestamp, uint32_t flags);
5043
5060 struct timespec *timestamp);
5061
5079int rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta);
5080
5096int rte_eth_timesync_read_time(uint16_t port_id, struct timespec *time);
5097
5116int rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *time);
5117
5163__rte_experimental
5164int
5165rte_eth_read_clock(uint16_t port_id, uint64_t *clock);
5166
5182int
5183rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id);
5184
5200int
5201rte_eth_dev_get_name_by_port(uint16_t port_id, char *name);
5202
5220 uint16_t *nb_rx_desc,
5221 uint16_t *nb_tx_desc);
5222
5237int
5238rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool);
5239
5249void *
5250rte_eth_dev_get_sec_ctx(uint16_t port_id);
5251
5267__rte_experimental
5269 struct rte_eth_hairpin_cap *cap);
5270
5280 int pf;
5281 __extension__
5282 union {
5283 int vf;
5284 int sf;
5285 };
5286 uint32_t id_base;
5287 uint32_t id_end;
5288 char name[RTE_DEV_NAME_MAX_LEN];
5289};
5290
5298 uint16_t controller;
5299 uint16_t pf;
5301 uint32_t nb_ranges;
5303};
5304
5328__rte_experimental
5329int rte_eth_representor_info_get(uint16_t port_id,
5330 struct rte_eth_representor_info *info);
5331
5333#define RTE_ETH_RX_METADATA_USER_FLAG RTE_BIT64(0)
5334
5336#define RTE_ETH_RX_METADATA_USER_MARK RTE_BIT64(1)
5337
5339#define RTE_ETH_RX_METADATA_TUNNEL_ID RTE_BIT64(2)
5340
5380int rte_eth_rx_metadata_negotiate(uint16_t port_id, uint64_t *features);
5381
5383#define RTE_ETH_DEV_REASSEMBLY_F_IPV4 (RTE_BIT32(0))
5385#define RTE_ETH_DEV_REASSEMBLY_F_IPV6 (RTE_BIT32(1))
5386
5397 uint32_t timeout_ms;
5399 uint16_t max_frags;
5404 uint16_t flags;
5405};
5406
5427__rte_experimental
5429 struct rte_eth_ip_reassembly_params *capa);
5430
5452__rte_experimental
5454 struct rte_eth_ip_reassembly_params *conf);
5455
5485__rte_experimental
5487 const struct rte_eth_ip_reassembly_params *conf);
5488
5496typedef struct {
5503 uint16_t time_spent;
5505 uint16_t nb_frags;
5507
5526__rte_experimental
5527int rte_eth_dev_priv_dump(uint16_t port_id, FILE *file);
5528
5552__rte_experimental
5553int rte_eth_rx_descriptor_dump(uint16_t port_id, uint16_t queue_id,
5554 uint16_t offset, uint16_t num, FILE *file);
5555
5579__rte_experimental
5580int rte_eth_tx_descriptor_dump(uint16_t port_id, uint16_t queue_id,
5581 uint16_t offset, uint16_t num, FILE *file);
5582
5583
5584/* Congestion management */
5585
5595};
5596
5618 uint8_t rsvd[8];
5619};
5620
5632 union {
5639 uint16_t rx_queue;
5647 } obj_param;
5648 union {
5662 } mode_param;
5663};
5664
5682__rte_experimental
5683int rte_eth_cman_info_get(uint16_t port_id, struct rte_eth_cman_info *info);
5684
5702__rte_experimental
5703int rte_eth_cman_config_init(uint16_t port_id, struct rte_eth_cman_config *config);
5704
5721__rte_experimental
5722int rte_eth_cman_config_set(uint16_t port_id, const struct rte_eth_cman_config *config);
5723
5744__rte_experimental
5745int rte_eth_cman_config_get(uint16_t port_id, struct rte_eth_cman_config *config);
5746
5747#include <rte_ethdev_core.h>
5748
5772uint16_t rte_eth_call_rx_callbacks(uint16_t port_id, uint16_t queue_id,
5773 struct rte_mbuf **rx_pkts, uint16_t nb_rx, uint16_t nb_pkts,
5774 void *opaque);
5775
5863static inline uint16_t
5864rte_eth_rx_burst(uint16_t port_id, uint16_t queue_id,
5865 struct rte_mbuf **rx_pkts, const uint16_t nb_pkts)
5866{
5867 uint16_t nb_rx;
5868 struct rte_eth_fp_ops *p;
5869 void *qd;
5870
5871#ifdef RTE_ETHDEV_DEBUG_RX
5872 if (port_id >= RTE_MAX_ETHPORTS ||
5873 queue_id >= RTE_MAX_QUEUES_PER_PORT) {
5874 RTE_ETHDEV_LOG(ERR,
5875 "Invalid port_id=%u or queue_id=%u\n",
5876 port_id, queue_id);
5877 return 0;
5878 }
5879#endif
5880
5881 /* fetch pointer to queue data */
5882 p = &rte_eth_fp_ops[port_id];
5883 qd = p->rxq.data[queue_id];
5884
5885#ifdef RTE_ETHDEV_DEBUG_RX
5886 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
5887
5888 if (qd == NULL) {
5889 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u for port_id=%u\n",
5890 queue_id, port_id);
5891 return 0;
5892 }
5893#endif
5894
5895 nb_rx = p->rx_pkt_burst(qd, rx_pkts, nb_pkts);
5896
5897#ifdef RTE_ETHDEV_RXTX_CALLBACKS
5898 {
5899 void *cb;
5900
5901 /* __ATOMIC_RELEASE memory order was used when the
5902 * call back was inserted into the list.
5903 * Since there is a clear dependency between loading
5904 * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is
5905 * not required.
5906 */
5907 cb = __atomic_load_n((void **)&p->rxq.clbk[queue_id],
5908 __ATOMIC_RELAXED);
5909 if (unlikely(cb != NULL))
5910 nb_rx = rte_eth_call_rx_callbacks(port_id, queue_id,
5911 rx_pkts, nb_rx, nb_pkts, cb);
5912 }
5913#endif
5914
5915 rte_ethdev_trace_rx_burst(port_id, queue_id, (void **)rx_pkts, nb_rx);
5916 return nb_rx;
5917}
5918
5936static inline int
5937rte_eth_rx_queue_count(uint16_t port_id, uint16_t queue_id)
5938{
5939 struct rte_eth_fp_ops *p;
5940 void *qd;
5941
5942#ifdef RTE_ETHDEV_DEBUG_RX
5943 if (port_id >= RTE_MAX_ETHPORTS ||
5944 queue_id >= RTE_MAX_QUEUES_PER_PORT) {
5945 RTE_ETHDEV_LOG(ERR,
5946 "Invalid port_id=%u or queue_id=%u\n",
5947 port_id, queue_id);
5948 return -EINVAL;
5949 }
5950#endif
5951
5952 /* fetch pointer to queue data */
5953 p = &rte_eth_fp_ops[port_id];
5954 qd = p->rxq.data[queue_id];
5955
5956#ifdef RTE_ETHDEV_DEBUG_RX
5957 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5958 if (qd == NULL)
5959 return -EINVAL;
5960#endif
5961
5962 if (*p->rx_queue_count == NULL)
5963 return -ENOTSUP;
5964 return (int)(*p->rx_queue_count)(qd);
5965}
5966
5970#define RTE_ETH_RX_DESC_AVAIL 0
5971#define RTE_ETH_RX_DESC_DONE 1
5972#define RTE_ETH_RX_DESC_UNAVAIL 2
6008static inline int
6009rte_eth_rx_descriptor_status(uint16_t port_id, uint16_t queue_id,
6010 uint16_t offset)
6011{
6012 struct rte_eth_fp_ops *p;
6013 void *qd;
6014
6015#ifdef RTE_ETHDEV_DEBUG_RX
6016 if (port_id >= RTE_MAX_ETHPORTS ||
6017 queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6018 RTE_ETHDEV_LOG(ERR,
6019 "Invalid port_id=%u or queue_id=%u\n",
6020 port_id, queue_id);
6021 return -EINVAL;
6022 }
6023#endif
6024
6025 /* fetch pointer to queue data */
6026 p = &rte_eth_fp_ops[port_id];
6027 qd = p->rxq.data[queue_id];
6028
6029#ifdef RTE_ETHDEV_DEBUG_RX
6030 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6031 if (qd == NULL)
6032 return -ENODEV;
6033#endif
6034 if (*p->rx_descriptor_status == NULL)
6035 return -ENOTSUP;
6036 return (*p->rx_descriptor_status)(qd, offset);
6037}
6038
6042#define RTE_ETH_TX_DESC_FULL 0
6043#define RTE_ETH_TX_DESC_DONE 1
6044#define RTE_ETH_TX_DESC_UNAVAIL 2
6080static inline int rte_eth_tx_descriptor_status(uint16_t port_id,
6081 uint16_t queue_id, uint16_t offset)
6082{
6083 struct rte_eth_fp_ops *p;
6084 void *qd;
6085
6086#ifdef RTE_ETHDEV_DEBUG_TX
6087 if (port_id >= RTE_MAX_ETHPORTS ||
6088 queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6089 RTE_ETHDEV_LOG(ERR,
6090 "Invalid port_id=%u or queue_id=%u\n",
6091 port_id, queue_id);
6092 return -EINVAL;
6093 }
6094#endif
6095
6096 /* fetch pointer to queue data */
6097 p = &rte_eth_fp_ops[port_id];
6098 qd = p->txq.data[queue_id];
6099
6100#ifdef RTE_ETHDEV_DEBUG_TX
6101 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6102 if (qd == NULL)
6103 return -ENODEV;
6104#endif
6105 if (*p->tx_descriptor_status == NULL)
6106 return -ENOTSUP;
6107 return (*p->tx_descriptor_status)(qd, offset);
6108}
6109
6129uint16_t rte_eth_call_tx_callbacks(uint16_t port_id, uint16_t queue_id,
6130 struct rte_mbuf **tx_pkts, uint16_t nb_pkts, void *opaque);
6131
6203static inline uint16_t
6204rte_eth_tx_burst(uint16_t port_id, uint16_t queue_id,
6205 struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
6206{
6207 struct rte_eth_fp_ops *p;
6208 void *qd;
6209
6210#ifdef RTE_ETHDEV_DEBUG_TX
6211 if (port_id >= RTE_MAX_ETHPORTS ||
6212 queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6213 RTE_ETHDEV_LOG(ERR,
6214 "Invalid port_id=%u or queue_id=%u\n",
6215 port_id, queue_id);
6216 return 0;
6217 }
6218#endif
6219
6220 /* fetch pointer to queue data */
6221 p = &rte_eth_fp_ops[port_id];
6222 qd = p->txq.data[queue_id];
6223
6224#ifdef RTE_ETHDEV_DEBUG_TX
6225 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
6226
6227 if (qd == NULL) {
6228 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u for port_id=%u\n",
6229 queue_id, port_id);
6230 return 0;
6231 }
6232#endif
6233
6234#ifdef RTE_ETHDEV_RXTX_CALLBACKS
6235 {
6236 void *cb;
6237
6238 /* __ATOMIC_RELEASE memory order was used when the
6239 * call back was inserted into the list.
6240 * Since there is a clear dependency between loading
6241 * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is
6242 * not required.
6243 */
6244 cb = __atomic_load_n((void **)&p->txq.clbk[queue_id],
6245 __ATOMIC_RELAXED);
6246 if (unlikely(cb != NULL))
6247 nb_pkts = rte_eth_call_tx_callbacks(port_id, queue_id,
6248 tx_pkts, nb_pkts, cb);
6249 }
6250#endif
6251
6252 nb_pkts = p->tx_pkt_burst(qd, tx_pkts, nb_pkts);
6253
6254 rte_ethdev_trace_tx_burst(port_id, queue_id, (void **)tx_pkts, nb_pkts);
6255 return nb_pkts;
6256}
6257
6312#ifndef RTE_ETHDEV_TX_PREPARE_NOOP
6313
6314static inline uint16_t
6315rte_eth_tx_prepare(uint16_t port_id, uint16_t queue_id,
6316 struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
6317{
6318 struct rte_eth_fp_ops *p;
6319 void *qd;
6320
6321#ifdef RTE_ETHDEV_DEBUG_TX
6322 if (port_id >= RTE_MAX_ETHPORTS ||
6323 queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6324 RTE_ETHDEV_LOG(ERR,
6325 "Invalid port_id=%u or queue_id=%u\n",
6326 port_id, queue_id);
6327 rte_errno = ENODEV;
6328 return 0;
6329 }
6330#endif
6331
6332 /* fetch pointer to queue data */
6333 p = &rte_eth_fp_ops[port_id];
6334 qd = p->txq.data[queue_id];
6335
6336#ifdef RTE_ETHDEV_DEBUG_TX
6337 if (!rte_eth_dev_is_valid_port(port_id)) {
6338 RTE_ETHDEV_LOG(ERR, "Invalid Tx port_id=%u\n", port_id);
6339 rte_errno = ENODEV;
6340 return 0;
6341 }
6342 if (qd == NULL) {
6343 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u for port_id=%u\n",
6344 queue_id, port_id);
6345 rte_errno = EINVAL;
6346 return 0;
6347 }
6348#endif
6349
6350 if (!p->tx_pkt_prepare)
6351 return nb_pkts;
6352
6353 return p->tx_pkt_prepare(qd, tx_pkts, nb_pkts);
6354}
6355
6356#else
6357
6358/*
6359 * Native NOOP operation for compilation targets which doesn't require any
6360 * preparations steps, and functional NOOP may introduce unnecessary performance
6361 * drop.
6362 *
6363 * Generally this is not a good idea to turn it on globally and didn't should
6364 * be used if behavior of tx_preparation can change.
6365 */
6366
6367static inline uint16_t
6368rte_eth_tx_prepare(__rte_unused uint16_t port_id,
6369 __rte_unused uint16_t queue_id,
6370 __rte_unused struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
6371{
6372 return nb_pkts;
6373}
6374
6375#endif
6376
6399static inline uint16_t
6400rte_eth_tx_buffer_flush(uint16_t port_id, uint16_t queue_id,
6401 struct rte_eth_dev_tx_buffer *buffer)
6402{
6403 uint16_t sent;
6404 uint16_t to_send = buffer->length;
6405
6406 if (to_send == 0)
6407 return 0;
6408
6409 sent = rte_eth_tx_burst(port_id, queue_id, buffer->pkts, to_send);
6410
6411 buffer->length = 0;
6412
6413 /* All packets sent, or to be dealt with by callback below */
6414 if (unlikely(sent != to_send))
6415 buffer->error_callback(&buffer->pkts[sent],
6416 (uint16_t)(to_send - sent),
6417 buffer->error_userdata);
6418
6419 return sent;
6420}
6421
6452static __rte_always_inline uint16_t
6453rte_eth_tx_buffer(uint16_t port_id, uint16_t queue_id,
6454 struct rte_eth_dev_tx_buffer *buffer, struct rte_mbuf *tx_pkt)
6455{
6456 buffer->pkts[buffer->length++] = tx_pkt;
6457 if (buffer->length < buffer->size)
6458 return 0;
6459
6460 return rte_eth_tx_buffer_flush(port_id, queue_id, buffer);
6461}
6462
6491__rte_experimental
6492int rte_eth_buffer_split_get_supported_hdr_ptypes(uint16_t port_id, uint32_t *ptypes, int num);
6493
6494#ifdef __cplusplus
6495}
6496#endif
6497
6498#endif /* _RTE_ETHDEV_H_ */
#define RTE_BIT32(nr)
Definition: rte_bitops.h:38
#define unlikely(x)
rte_cman_mode
Definition: rte_cman.h:20
#define __rte_cache_min_aligned
Definition: rte_common.h:443
#define __rte_unused
Definition: rte_common.h:120
#define __rte_always_inline
Definition: rte_common.h:255
#define rte_errno
Definition: rte_errno.h:29
int rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
rte_eth_nb_pools
Definition: rte_ethdev.h:873
@ RTE_ETH_64_POOLS
Definition: rte_ethdev.h:877
@ RTE_ETH_32_POOLS
Definition: rte_ethdev.h:876
@ RTE_ETH_8_POOLS
Definition: rte_ethdev.h:874
@ RTE_ETH_16_POOLS
Definition: rte_ethdev.h:875
rte_eth_event_ipsec_subtype
Definition: rte_ethdev.h:3817
@ RTE_ETH_EVENT_IPSEC_UNKNOWN
Definition: rte_ethdev.h:3819
@ RTE_ETH_EVENT_IPSEC_MAX
Definition: rte_ethdev.h:3845
@ RTE_ETH_EVENT_IPSEC_SA_PKT_EXPIRY
Definition: rte_ethdev.h:3833
@ RTE_ETH_EVENT_IPSEC_ESN_OVERFLOW
Definition: rte_ethdev.h:3821
@ RTE_ETH_EVENT_IPSEC_SA_BYTE_HARD_EXPIRY
Definition: rte_ethdev.h:3838
@ RTE_ETH_EVENT_IPSEC_SA_BYTE_EXPIRY
Definition: rte_ethdev.h:3828
@ RTE_ETH_EVENT_IPSEC_SA_TIME_EXPIRY
Definition: rte_ethdev.h:3823
@ RTE_ETH_EVENT_IPSEC_SA_PKT_HARD_EXPIRY
Definition: rte_ethdev.h:3843
int rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *time)
__rte_experimental int rte_eth_cman_config_get(uint16_t port_id, struct rte_eth_cman_config *config)
int rte_eth_dev_is_removed(uint16_t port_id)
__rte_experimental int rte_eth_dev_hairpin_capability_get(uint16_t port_id, struct rte_eth_hairpin_cap *cap)
int rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
int rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp, uint32_t flags)
static uint64_t rte_eth_rss_hf_refine(uint64_t rss_hf)
Definition: rte_ethdev.h:627
static __rte_always_inline uint16_t rte_eth_tx_buffer(uint16_t port_id, uint16_t queue_id, struct rte_eth_dev_tx_buffer *buffer, struct rte_mbuf *tx_pkt)
Definition: rte_ethdev.h:6453
void rte_eth_iterator_cleanup(struct rte_dev_iterator *iter)
int rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
int rte_eth_dev_set_link_down(uint16_t port_id)
rte_eth_event_macsec_subtype
Definition: rte_ethdev.h:3741
@ RTE_ETH_SUBEVENT_MACSEC_UNKNOWN
Definition: rte_ethdev.h:3743
@ RTE_ETH_SUBEVENT_MACSEC_RX_SECTAG_E_EQ0_C_EQ1
Definition: rte_ethdev.h:3753
@ RTE_ETH_SUBEVENT_MACSEC_RX_SECTAG_SL_GTE48
Definition: rte_ethdev.h:3758
@ RTE_ETH_SUBEVENT_MACSEC_RX_SECTAG_SC_EQ1_SCB_EQ1
Definition: rte_ethdev.h:3768
@ RTE_ETH_SUBEVENT_MACSEC_RX_SECTAG_ES_EQ1_SC_EQ1
Definition: rte_ethdev.h:3763
@ RTE_ETH_SUBEVENT_MACSEC_RX_SECTAG_V_EQ1
Definition: rte_ethdev.h:3748
int rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_queue, uint16_t nb_tx_queue, const struct rte_eth_conf *eth_conf)
int rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id, int epfd, int op, void *data)
rte_eth_event_type
Definition: rte_ethdev.h:3878
@ RTE_ETH_EVENT_RECOVERY_FAILED
Definition: rte_ethdev.h:3949
@ RTE_ETH_EVENT_UNKNOWN
Definition: rte_ethdev.h:3879
@ RTE_ETH_EVENT_VF_MBOX
Definition: rte_ethdev.h:3885
@ RTE_ETH_EVENT_IPSEC
Definition: rte_ethdev.h:3890
@ RTE_ETH_EVENT_INTR_RESET
Definition: rte_ethdev.h:3884
@ RTE_ETH_EVENT_INTR_RMV
Definition: rte_ethdev.h:3887
@ RTE_ETH_EVENT_ERR_RECOVERING
Definition: rte_ethdev.h:3913
@ RTE_ETH_EVENT_MACSEC
Definition: rte_ethdev.h:3886
@ RTE_ETH_EVENT_RECOVERY_SUCCESS
Definition: rte_ethdev.h:3944
@ RTE_ETH_EVENT_DESTROY
Definition: rte_ethdev.h:3889
@ RTE_ETH_EVENT_FLOW_AGED
Definition: rte_ethdev.h:3891
@ RTE_ETH_EVENT_QUEUE_STATE
Definition: rte_ethdev.h:3882
@ RTE_ETH_EVENT_INTR_LSC
Definition: rte_ethdev.h:3880
@ RTE_ETH_EVENT_MAX
Definition: rte_ethdev.h:3950
@ RTE_ETH_EVENT_RX_AVAIL_THRESH
Definition: rte_ethdev.h:3896
@ RTE_ETH_EVENT_NEW
Definition: rte_ethdev.h:3888
int rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_burst_mode *mode)
__rte_experimental int rte_eth_dev_get_module_info(uint16_t port_id, struct rte_eth_dev_module_info *modinfo)
int rte_eth_dev_is_valid_port(uint16_t port_id)
rte_eth_cman_obj
Definition: rte_ethdev.h:5587
@ RTE_ETH_CMAN_OBJ_RX_QUEUE_MEMPOOL
Definition: rte_ethdev.h:5594
@ RTE_ETH_CMAN_OBJ_RX_QUEUE
Definition: rte_ethdev.h:5589
#define RTE_ETH_DCB_NUM_USER_PRIORITIES
Definition: rte_ethdev.h:803
__rte_experimental int rte_eth_dev_priv_dump(uint16_t port_id, FILE *file)
int rte_eth_dev_reset(uint16_t port_id)
#define RTE_ETH_BURST_MODE_INFO_SIZE
Definition: rte_ethdev.h:1837
__rte_experimental const char * rte_eth_dev_capability_name(uint64_t capability)
int rte_eth_allmulticast_disable(uint16_t port_id)
int rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats, unsigned int n)
rte_eth_dev_state
Definition: rte_ethdev.h:2000
@ RTE_ETH_DEV_ATTACHED
Definition: rte_ethdev.h:2004
@ RTE_ETH_DEV_UNUSED
Definition: rte_ethdev.h:2002
@ RTE_ETH_DEV_REMOVED
Definition: rte_ethdev.h:2006
int rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, uint16_t nb_rx_desc, unsigned int socket_id, const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mb_pool)
int rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
int rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
int rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
int rte_eth_dev_rss_reta_update(uint16_t port_id, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
static uint16_t rte_eth_rx_burst(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **rx_pkts, const uint16_t nb_pkts)
Definition: rte_ethdev.h:5864
rte_eth_fec_mode
Definition: rte_ethdev.h:1912
@ RTE_ETH_FEC_NOFEC
Definition: rte_ethdev.h:1913
@ RTE_ETH_FEC_BASER
Definition: rte_ethdev.h:1915
@ RTE_ETH_FEC_AUTO
Definition: rte_ethdev.h:1914
@ RTE_ETH_FEC_RS
Definition: rte_ethdev.h:1916
int rte_eth_xstats_get_names(uint16_t port_id, struct rte_eth_xstat_name *xstats_names, unsigned int size)
int rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
__rte_experimental int rte_eth_read_clock(uint16_t port_id, uint64_t *clock)
rte_eth_err_handle_mode
Definition: rte_ethdev.h:1695
@ RTE_ETH_ERROR_HANDLE_MODE_PASSIVE
Definition: rte_ethdev.h:1702
@ RTE_ETH_ERROR_HANDLE_MODE_NONE
Definition: rte_ethdev.h:1697
@ RTE_ETH_ERROR_HANDLE_MODE_PROACTIVE
Definition: rte_ethdev.h:1708
const struct rte_eth_rxtx_callback * rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id, rte_rx_callback_fn fn, void *user_param)
int rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
rte_eth_tx_mq_mode
Definition: rte_ethdev.h:400
@ RTE_ETH_MQ_TX_DCB
Definition: rte_ethdev.h:402
@ RTE_ETH_MQ_TX_VMDQ_DCB
Definition: rte_ethdev.h:403
@ RTE_ETH_MQ_TX_VMDQ_ONLY
Definition: rte_ethdev.h:404
@ RTE_ETH_MQ_TX_NONE
Definition: rte_ethdev.h:401
int rte_eth_promiscuous_get(uint16_t port_id)
uint64_t rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id)
int rte_eth_led_off(uint16_t port_id)
int rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
int rte_eth_dev_set_link_up(uint16_t port_id)
__rte_experimental int rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa)
int rte_eth_link_get(uint16_t port_id, struct rte_eth_link *link)
int rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id, uint8_t stat_idx)
uint16_t rte_eth_find_next(uint16_t port_id)
rte_eth_rx_mq_mode
Definition: rte_ethdev.h:374
@ RTE_ETH_MQ_RX_DCB_RSS
Definition: rte_ethdev.h:383
@ RTE_ETH_MQ_RX_VMDQ_DCB_RSS
Definition: rte_ethdev.h:392
@ RTE_ETH_MQ_RX_DCB
Definition: rte_ethdev.h:381
@ RTE_ETH_MQ_RX_VMDQ_DCB
Definition: rte_ethdev.h:390
@ RTE_ETH_MQ_RX_VMDQ_RSS
Definition: rte_ethdev.h:388
@ RTE_ETH_MQ_RX_NONE
Definition: rte_ethdev.h:376
@ RTE_ETH_MQ_RX_RSS
Definition: rte_ethdev.h:379
@ RTE_ETH_MQ_RX_VMDQ_ONLY
Definition: rte_ethdev.h:386
int rte_eth_allmulticast_get(uint16_t port_id)
int rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids, uint64_t *values, unsigned int size)
int rte_eth_allmulticast_enable(uint16_t port_id)
int rte_eth_rx_metadata_negotiate(uint16_t port_id, uint64_t *features)
int rte_eth_promiscuous_enable(uint16_t port_id)
rte_eth_representor_type
Definition: rte_ethdev.h:1682
@ RTE_ETH_REPRESENTOR_PF
Definition: rte_ethdev.h:1686
@ RTE_ETH_REPRESENTOR_VF
Definition: rte_ethdev.h:1684
@ RTE_ETH_REPRESENTOR_SF
Definition: rte_ethdev.h:1685
@ RTE_ETH_REPRESENTOR_NONE
Definition: rte_ethdev.h:1683
int rte_eth_timesync_enable(uint16_t port_id)
__rte_experimental int rte_eth_link_to_str(char *str, size_t len, const struct rte_eth_link *eth_link)
int rte_eth_dev_rss_hash_conf_get(uint16_t port_id, struct rte_eth_rss_conf *rss_conf)
#define RTE_ETH_VMDQ_MAX_VLAN_FILTERS
Definition: rte_ethdev.h:802
int rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id, const struct rte_eth_rxtx_callback *user_cb)
int rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id, struct rte_eth_pfc_conf *pfc_conf)
int rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
int rte_eth_dev_udp_tunnel_port_add(uint16_t port_id, struct rte_eth_udp_tunnel *tunnel_udp)
int rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
uint16_t rte_eth_iterator_next(struct rte_dev_iterator *iter)
__rte_experimental int rte_eth_ip_reassembly_capability_get(uint16_t port_id, struct rte_eth_ip_reassembly_params *capa)
__rte_experimental int rte_eth_dev_get_module_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
int rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id, int on)
int rte_eth_dev_set_vlan_ether_type(uint16_t port_id, enum rte_vlan_type vlan_type, uint16_t tag_type)
int rte_eth_dev_stop(uint16_t port_id)
int rte_eth_timesync_disable(uint16_t port_id)
__rte_experimental int rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id, uint16_t nb_rx_desc, const struct rte_eth_hairpin_conf *conf)
uint16_t(* rte_rx_callback_fn)(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t max_pkts, void *user_param)
Definition: rte_ethdev.h:1970
int rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
static uint16_t rte_eth_tx_prepare(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
Definition: rte_ethdev.h:6315
rte_eth_tunnel_type
Definition: rte_ethdev.h:1405
int rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
int rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id, uint16_t nb_tx_desc, unsigned int socket_id, const struct rte_eth_txconf *tx_conf)
const struct rte_eth_rxtx_callback * rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id, rte_tx_callback_fn fn, void *user_param)
int rte_eth_promiscuous_disable(uint16_t port_id)
int rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id, const struct rte_eth_rxtx_callback *user_cb)
int rte_eth_dev_rx_intr_disable(uint16_t port_id, uint16_t queue_id)
int rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
int rte_eth_dev_owner_delete(const uint64_t owner_id)
int rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
int rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
static uint16_t rte_eth_tx_burst(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
Definition: rte_ethdev.h:6204
int rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id)
__rte_experimental int rte_eth_rx_descriptor_dump(uint16_t port_id, uint16_t queue_id, uint16_t offset, uint16_t num, FILE *file)
int(* rte_eth_dev_cb_fn)(uint16_t port_id, enum rte_eth_event_type event, void *cb_arg, void *ret_param)
Definition: rte_ethdev.h:3954
int rte_eth_dev_rx_intr_enable(uint16_t port_id, uint16_t queue_id)
int rte_eth_dev_get_eeprom_length(uint16_t port_id)
int rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr)
int rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *mac_addr)
int rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
int rte_eth_xstats_get_names_by_id(uint16_t port_id, struct rte_eth_xstat_name *xstats_names, unsigned int size, uint64_t *ids)
__rte_experimental int rte_eth_dev_priority_flow_ctrl_queue_info_get(uint16_t port_id, struct rte_eth_pfc_queue_info *pfc_queue_info)
__rte_experimental int rte_eth_buffer_split_get_supported_hdr_ptypes(uint16_t port_id, uint32_t *ptypes, int num)
#define RTE_ETH_MQ_RX_DCB_FLAG
Definition: rte_ethdev.h:366
uint16_t rte_eth_find_next_sibling(uint16_t port_id_start, uint16_t ref_port_id)
const struct rte_eth_rxtx_callback * rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id, rte_rx_callback_fn fn, void *user_param)
int rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name, uint64_t *id)
uint16_t rte_eth_dev_count_avail(void)
int rte_eth_dev_callback_unregister(uint16_t port_id, enum rte_eth_event_type event, rte_eth_dev_cb_fn cb_fn, void *cb_arg)
__rte_experimental int rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa)
rte_eth_fc_mode
Definition: rte_ethdev.h:1316
@ RTE_ETH_FC_TX_PAUSE
Definition: rte_ethdev.h:1319
@ RTE_ETH_FC_RX_PAUSE
Definition: rte_ethdev.h:1318
@ RTE_ETH_FC_NONE
Definition: rte_ethdev.h:1317
@ RTE_ETH_FC_FULL
Definition: rte_ethdev.h:1320
rte_eth_event_macsec_type
Definition: rte_ethdev.h:3775
@ RTE_ETH_EVENT_MACSEC_RX_SA_PN_HARD_EXP
Definition: rte_ethdev.h:3781
@ RTE_ETH_EVENT_MACSEC_SA_NOT_VALID
Definition: rte_ethdev.h:3789
@ RTE_ETH_EVENT_MACSEC_RX_SA_PN_SOFT_EXP
Definition: rte_ethdev.h:3783
@ RTE_ETH_EVENT_MACSEC_UNKNOWN
Definition: rte_ethdev.h:3777
@ RTE_ETH_EVENT_MACSEC_TX_SA_PN_HARD_EXP
Definition: rte_ethdev.h:3785
@ RTE_ETH_EVENT_MACSEC_SECTAG_VAL_ERR
Definition: rte_ethdev.h:3779
@ RTE_ETH_EVENT_MACSEC_TX_SA_PN_SOFT_EXP
Definition: rte_ethdev.h:3787
int rte_eth_led_on(uint16_t port_id)
int rte_eth_dev_rss_reta_query(uint16_t port_id, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
__rte_experimental int rte_eth_ip_reassembly_conf_set(uint16_t port_id, const struct rte_eth_ip_reassembly_params *conf)
__rte_experimental int rte_eth_rx_avail_thresh_set(uint16_t port_id, uint16_t queue_id, uint8_t avail_thresh)
int rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask, uint32_t *ptypes, int num)
__rte_experimental int rte_eth_cman_info_get(uint16_t port_id, struct rte_eth_cman_info *info)
int rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
int rte_eth_dev_set_mc_addr_list(uint16_t port_id, struct rte_ether_addr *mc_addr_set, uint32_t nb_mc_addr)
int rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs)
int rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_burst_mode *mode)
int rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer, buffer_tx_error_fn callback, void *userdata)
int rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *mac_addr, uint32_t pool)
uint32_t link_speed
Definition: rte_ethdev.h:0
int rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr, uint8_t on)
int rte_eth_dev_owner_set(const uint16_t port_id, const struct rte_eth_dev_owner *owner)
uint32_t rte_eth_speed_bitflag(uint32_t speed, int duplex)
__rte_experimental int rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port)
__rte_experimental int rte_eth_dev_priority_flow_ctrl_queue_configure(uint16_t port_id, struct rte_eth_pfc_queue_conf *pfc_queue_conf)
__rte_experimental int rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports, size_t len, uint32_t direction)
int rte_eth_dev_get_vlan_offload(uint16_t port_id)
#define RTE_ETH_MQ_RX_RSS_FLAG
Definition: rte_ethdev.h:365
int rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
int rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_rxq_info *qinfo)
int rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
void * rte_eth_dev_get_sec_ctx(uint16_t port_id)
int rte_eth_dev_callback_register(uint16_t port_id, enum rte_eth_event_type event, rte_eth_dev_cb_fn cb_fn, void *cb_arg)
int rte_eth_dev_close(uint16_t port_id)
void rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent, void *userdata)
__rte_experimental int rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port)
int rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
__rte_experimental int rte_eth_ip_reassembly_conf_get(uint16_t port_id, struct rte_eth_ip_reassembly_params *conf)
__rte_experimental int rte_eth_get_monitor_addr(uint16_t port_id, uint16_t queue_id, struct rte_power_monitor_cond *pmc)
int rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_txq_info *qinfo)
const char * rte_eth_dev_rx_offload_name(uint64_t offload)
int rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id, struct rte_eth_udp_tunnel *tunnel_udp)
int rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
#define RTE_ETH_MQ_RX_VMDQ_FLAG
Definition: rte_ethdev.h:367
__rte_experimental int rte_eth_tx_descriptor_dump(uint16_t port_id, uint16_t queue_id, uint16_t offset, uint16_t num, FILE *file)
__rte_experimental int rte_eth_dev_conf_get(uint16_t port_id, struct rte_eth_conf *dev_conf)
void rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent, void *userdata)
__rte_experimental int rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id, uint16_t nb_tx_desc, const struct rte_eth_hairpin_conf *conf)
int rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
static uint16_t rte_eth_tx_buffer_flush(uint16_t port_id, uint16_t queue_id, struct rte_eth_dev_tx_buffer *buffer)
Definition: rte_ethdev.h:6400
int rte_eth_dev_socket_id(uint16_t port_id)
int rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *link)
int rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
int rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id, uint16_t *nb_rx_desc, uint16_t *nb_tx_desc)
static int rte_eth_rx_descriptor_status(uint16_t port_id, uint16_t queue_id, uint16_t offset)
Definition: rte_ethdev.h:6009
static int rte_eth_tx_descriptor_status(uint16_t port_id, uint16_t queue_id, uint16_t offset)
Definition: rte_ethdev.h:6080
int rte_eth_dev_get_dcb_info(uint16_t port_id, struct rte_eth_dcb_info *dcb_info)
int rte_eth_dev_rss_hash_update(uint16_t port_id, struct rte_eth_rss_conf *rss_conf)
int rte_eth_dev_owner_new(uint64_t *owner_id)
int rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask, uint32_t *set_ptypes, unsigned int num)
int rte_eth_timesync_read_time(uint16_t port_id, struct timespec *time)
__rte_experimental int rte_eth_rx_avail_thresh_query(uint16_t port_id, uint16_t *queue_id, uint8_t *avail_thresh)
int rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *mac_addr)
__rte_experimental const char * rte_eth_link_speed_to_str(uint32_t link_speed)
__extension__ struct rte_eth_link __rte_aligned(8)
int rte_eth_xstats_reset(uint16_t port_id)
int rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx, uint32_t tx_rate)
int rte_eth_timesync_read_tx_timestamp(uint16_t port_id, struct timespec *timestamp)
uint16_t rte_eth_find_next_of(uint16_t port_id_start, const struct rte_device *parent)
rte_vlan_type
Definition: rte_ethdev.h:431
@ RTE_ETH_VLAN_TYPE_OUTER
Definition: rte_ethdev.h:434
@ RTE_ETH_VLAN_TYPE_INNER
Definition: rte_ethdev.h:433
__rte_experimental int rte_eth_macaddrs_get(uint16_t port_id, struct rte_ether_addr *ma, unsigned int num)
uint16_t(* rte_tx_callback_fn)(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[], uint16_t nb_pkts, void *user_param)
Definition: rte_ethdev.h:1994
const char * rte_eth_dev_tx_offload_name(uint64_t offload)
uint16_t rte_eth_dev_count_total(void)
#define RTE_ETH_XSTATS_NAME_SIZE
Definition: rte_ethdev.h:1842
int rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
int rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
int rte_eth_stats_reset(uint16_t port_id)
int rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id, uint8_t stat_idx)
static int rte_eth_rx_queue_count(uint16_t port_id, uint16_t queue_id)
Definition: rte_ethdev.h:5937
__rte_experimental int rte_eth_cman_config_set(uint16_t port_id, const struct rte_eth_cman_config *config)
rte_eth_nb_tcs
Definition: rte_ethdev.h:864
@ RTE_ETH_4_TCS
Definition: rte_ethdev.h:865
@ RTE_ETH_8_TCS
Definition: rte_ethdev.h:866
__rte_experimental int rte_eth_cman_config_init(uint16_t port_id, struct rte_eth_cman_config *config)
__rte_experimental int rte_eth_representor_info_get(uint16_t port_id, struct rte_eth_representor_info *info)
int rte_eth_dev_start(uint16_t port_id)
__rte_experimental int rte_eth_fec_get_capability(uint16_t port_id, struct rte_eth_fec_capa *speed_fec_capa, unsigned int num)
char info[RTE_ETH_BURST_MODE_INFO_SIZE]
Definition: rte_ethdev.h:1838
uint8_t rsvd_mode_params[4]
Definition: rte_ethdev.h:5661
enum rte_eth_cman_obj obj
Definition: rte_ethdev.h:5629
struct rte_cman_red_params red
Definition: rte_ethdev.h:5654
uint8_t rsvd_obj_params[4]
Definition: rte_ethdev.h:5646
enum rte_cman_mode mode
Definition: rte_ethdev.h:5631
uint8_t rsvd[8]
Definition: rte_ethdev.h:5618
uint64_t modes_supported
Definition: rte_ethdev.h:5608
uint64_t objs_supported
Definition: rte_ethdev.h:5613
struct rte_eth_intr_conf intr_conf
Definition: rte_ethdev.h:1489
struct rte_eth_vmdq_rx_conf vmdq_rx_conf
Definition: rte_ethdev.h:1476
struct rte_eth_txmode txmode
Definition: rte_ethdev.h:1463
union rte_eth_conf::@116 tx_adv_conf
struct rte_eth_rxmode rxmode
Definition: rte_ethdev.h:1462
struct rte_eth_vmdq_dcb_conf vmdq_dcb_conf
Definition: rte_ethdev.h:1472
uint32_t lpbk_mode
Definition: rte_ethdev.h:1464
uint32_t dcb_capability_en
Definition: rte_ethdev.h:1488
struct rte_eth_vmdq_dcb_tx_conf vmdq_dcb_tx_conf
Definition: rte_ethdev.h:1480
uint32_t link_speeds
Definition: rte_ethdev.h:1455
struct rte_eth_rss_conf rss_conf
Definition: rte_ethdev.h:1470
struct rte_eth_conf::@115 rx_adv_conf
struct rte_eth_dcb_tx_conf dcb_tx_conf
Definition: rte_ethdev.h:1482
struct rte_eth_dcb_rx_conf dcb_rx_conf
Definition: rte_ethdev.h:1474
struct rte_eth_vmdq_tx_conf vmdq_tx_conf
Definition: rte_ethdev.h:1484
uint8_t tc_bws[RTE_ETH_DCB_NUM_TCS]
Definition: rte_ethdev.h:1903
uint8_t prio_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES]
Definition: rte_ethdev.h:1902
struct rte_eth_dcb_tc_queue_mapping tc_queue
Definition: rte_ethdev.h:1905
struct rte_eth_dcb_tc_queue_mapping::@117 tc_rxq[RTE_ETH_MAX_VMDQ_POOL][RTE_ETH_DCB_NUM_TCS]
struct rte_eth_dcb_tc_queue_mapping::@118 tc_txq[RTE_ETH_MAX_VMDQ_POOL][RTE_ETH_DCB_NUM_TCS]
uint16_t nb_mtu_seg_max
Definition: rte_ethdev.h:1310
uint16_t nb_seg_max
Definition: rte_ethdev.h:1297
uint16_t nb_align
Definition: rte_ethdev.h:1287
uint32_t max_hash_mac_addrs
Definition: rte_ethdev.h:1732
struct rte_eth_desc_lim rx_desc_lim
Definition: rte_ethdev.h:1754
unsigned int if_index
Definition: rte_ethdev.h:1719
uint16_t max_rx_queues
Definition: rte_ethdev.h:1728
uint64_t dev_capa
Definition: rte_ethdev.h:1772
uint16_t vmdq_queue_num
Definition: rte_ethdev.h:1752
uint32_t min_rx_bufsize
Definition: rte_ethdev.h:1724
uint16_t max_tx_queues
Definition: rte_ethdev.h:1729
struct rte_eth_txconf default_txconf
Definition: rte_ethdev.h:1750
uint16_t max_vmdq_pools
Definition: rte_ethdev.h:1734
struct rte_device * device
Definition: rte_ethdev.h:1717
struct rte_eth_rxconf default_rxconf
Definition: rte_ethdev.h:1749
uint16_t nb_tx_queues
Definition: rte_ethdev.h:1759
enum rte_eth_err_handle_mode err_handle_mode
Definition: rte_ethdev.h:1779
uint32_t max_rx_pktlen
Definition: rte_ethdev.h:1725
uint16_t max_mtu
Definition: rte_ethdev.h:1722
uint32_t max_lro_pkt_size
Definition: rte_ethdev.h:1727
uint16_t vmdq_queue_base
Definition: rte_ethdev.h:1751
void * reserved_ptrs[2]
Definition: rte_ethdev.h:1782
uint64_t reserved_64s[2]
Definition: rte_ethdev.h:1781
uint64_t tx_queue_offload_capa
Definition: rte_ethdev.h:1743
uint16_t vmdq_pool_base
Definition: rte_ethdev.h:1753
uint16_t min_mtu
Definition: rte_ethdev.h:1721
uint16_t reta_size
Definition: rte_ethdev.h:1745
struct rte_eth_desc_lim tx_desc_lim
Definition: rte_ethdev.h:1755
uint64_t flow_type_rss_offloads
Definition: rte_ethdev.h:1748
uint16_t max_rx_mempools
Definition: rte_ethdev.h:1766
uint16_t max_vfs
Definition: rte_ethdev.h:1733
struct rte_eth_dev_portconf default_txportconf
Definition: rte_ethdev.h:1770
uint64_t tx_offload_capa
Definition: rte_ethdev.h:1739
const char * driver_name
Definition: rte_ethdev.h:1718
uint8_t hash_key_size
Definition: rte_ethdev.h:1746
uint32_t speed_capa
Definition: rte_ethdev.h:1756
struct rte_eth_dev_portconf default_rxportconf
Definition: rte_ethdev.h:1768
struct rte_eth_switch_info switch_info
Definition: rte_ethdev.h:1777
struct rte_eth_rxseg_capa rx_seg_capa
Definition: rte_ethdev.h:1735
uint64_t rx_queue_offload_capa
Definition: rte_ethdev.h:1741
uint64_t rx_offload_capa
Definition: rte_ethdev.h:1737
uint16_t nb_rx_queues
Definition: rte_ethdev.h:1758
uint32_t max_mac_addrs
Definition: rte_ethdev.h:1730
const uint32_t * dev_flags
Definition: rte_ethdev.h:1723
struct rte_mbuf * pkts[]
Definition: rte_ethdev.h:3603
enum rte_eth_event_ipsec_subtype subtype
Definition: rte_ethdev.h:3854
enum rte_eth_event_macsec_type type
Definition: rte_ethdev.h:3798
enum rte_eth_event_macsec_subtype subtype
Definition: rte_ethdev.h:3800
uint32_t low_water
Definition: rte_ethdev.h:1330
uint16_t send_xon
Definition: rte_ethdev.h:1332
enum rte_eth_fc_mode mode
Definition: rte_ethdev.h:1333
uint32_t high_water
Definition: rte_ethdev.h:1329
uint16_t pause_time
Definition: rte_ethdev.h:1331
uint8_t mac_ctrl_frame_fwd
Definition: rte_ethdev.h:1334
uint16_t max_nb_queues
Definition: rte_ethdev.h:1182
struct rte_eth_hairpin_queue_cap tx_cap
Definition: rte_ethdev.h:1189
struct rte_eth_hairpin_queue_cap rx_cap
Definition: rte_ethdev.h:1188
uint32_t use_locked_device_memory
Definition: rte_ethdev.h:1249
struct rte_eth_fc_conf fc
Definition: rte_ethdev.h:1344
enum rte_eth_fc_mode mode
Definition: rte_ethdev.h:1381
enum rte_eth_fc_mode mode_capa
Definition: rte_ethdev.h:1360
struct rte_eth_representor_range ranges[]
Definition: rte_ethdev.h:5302
enum rte_eth_representor_type type
Definition: rte_ethdev.h:5278
char name[RTE_DEV_NAME_MAX_LEN]
Definition: rte_ethdev.h:5288
uint8_t * rss_key
Definition: rte_ethdev.h:464
uint8_t rss_key_len
Definition: rte_ethdev.h:465
uint64_t rss_hf
Definition: rte_ethdev.h:466
uint16_t reta[RTE_ETH_RETA_GROUP_SIZE]
Definition: rte_ethdev.h:857
struct rte_eth_thresh rx_thresh
Definition: rte_ethdev.h:1077
uint64_t offloads
Definition: rte_ethdev.h:1095
void * reserved_ptrs[2]
Definition: rte_ethdev.h:1128
uint64_t reserved_64s[2]
Definition: rte_ethdev.h:1127
uint8_t rx_deferred_start
Definition: rte_ethdev.h:1080
uint16_t share_group
Definition: rte_ethdev.h:1088
uint8_t rx_drop_en
Definition: rte_ethdev.h:1079
uint16_t share_qid
Definition: rte_ethdev.h:1089
union rte_eth_rxseg * rx_seg
Definition: rte_ethdev.h:1103
struct rte_mempool ** rx_mempools
Definition: rte_ethdev.h:1124
uint16_t rx_nseg
Definition: rte_ethdev.h:1081
uint16_t rx_free_thresh
Definition: rte_ethdev.h:1078
uint32_t mtu
Definition: rte_ethdev.h:413
uint32_t max_lro_pkt_size
Definition: rte_ethdev.h:415
uint64_t offloads
Definition: rte_ethdev.h:421
void * reserved_ptrs[2]
Definition: rte_ethdev.h:424
uint64_t reserved_64s[2]
Definition: rte_ethdev.h:423
enum rte_eth_rx_mq_mode mq_mode
Definition: rte_ethdev.h:412
struct rte_eth_rxconf conf
Definition: rte_ethdev.h:1797
uint8_t scattered_rx
Definition: rte_ethdev.h:1798
struct rte_mempool * mp
Definition: rte_ethdev.h:1796
uint8_t queue_state
Definition: rte_ethdev.h:1799
uint8_t avail_thresh
Definition: rte_ethdev.h:1808
uint16_t nb_desc
Definition: rte_ethdev.h:1800
uint16_t rx_buf_size
Definition: rte_ethdev.h:1801
__extension__ uint32_t multi_pools
Definition: rte_ethdev.h:1668
uint32_t offset_allowed
Definition: rte_ethdev.h:1669
uint32_t offset_align_log2
Definition: rte_ethdev.h:1670
struct rte_mempool * mp
Definition: rte_ethdev.h:1044
uint64_t imissed
Definition: rte_ethdev.h:270
uint64_t obytes
Definition: rte_ethdev.h:265
uint64_t opackets
Definition: rte_ethdev.h:263
uint64_t rx_nombuf
Definition: rte_ethdev.h:273
uint64_t ibytes
Definition: rte_ethdev.h:264
uint64_t q_ibytes[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:280
uint64_t q_opackets[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:278
uint64_t ierrors
Definition: rte_ethdev.h:271
uint64_t q_errors[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:284
uint64_t ipackets
Definition: rte_ethdev.h:262
uint64_t q_ipackets[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:276
uint64_t q_obytes[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:282
uint64_t oerrors
Definition: rte_ethdev.h:272
const char * name
Definition: rte_ethdev.h:1642
uint8_t hthresh
Definition: rte_ethdev.h:358
uint8_t pthresh
Definition: rte_ethdev.h:357
uint8_t wthresh
Definition: rte_ethdev.h:359
uint8_t tx_deferred_start
Definition: rte_ethdev.h:1140
uint64_t offloads
Definition: rte_ethdev.h:1146
void * reserved_ptrs[2]
Definition: rte_ethdev.h:1149
uint64_t reserved_64s[2]
Definition: rte_ethdev.h:1148
struct rte_eth_thresh tx_thresh
Definition: rte_ethdev.h:1135
uint16_t tx_rs_thresh
Definition: rte_ethdev.h:1136
uint16_t tx_free_thresh
Definition: rte_ethdev.h:1137
uint64_t offloads
Definition: rte_ethdev.h:968
__extension__ uint8_t hw_vlan_insert_pvid
Definition: rte_ethdev.h:977
void * reserved_ptrs[2]
Definition: rte_ethdev.h:980
__extension__ uint8_t hw_vlan_reject_tagged
Definition: rte_ethdev.h:973
uint64_t reserved_64s[2]
Definition: rte_ethdev.h:979
__extension__ uint8_t hw_vlan_reject_untagged
Definition: rte_ethdev.h:975
enum rte_eth_tx_mq_mode mq_mode
Definition: rte_ethdev.h:962
struct rte_eth_txconf conf
Definition: rte_ethdev.h:1816
uint8_t queue_state
Definition: rte_ethdev.h:1818
uint16_t nb_desc
Definition: rte_ethdev.h:1817
enum rte_eth_nb_pools nb_queue_pools
Definition: rte_ethdev.h:915
uint8_t dcb_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES]
Definition: rte_ethdev.h:924
struct rte_eth_vmdq_dcb_conf::@111 pool_map[RTE_ETH_VMDQ_MAX_VLAN_FILTERS]
uint8_t enable_default_pool
Definition: rte_ethdev.h:916
enum rte_eth_nb_pools nb_queue_pools
Definition: rte_ethdev.h:946
struct rte_eth_vmdq_rx_conf::@112 pool_map[RTE_ETH_VMDQ_MAX_VLAN_FILTERS]
uint8_t enable_default_pool
Definition: rte_ethdev.h:947
uint8_t enable_loop_back
Definition: rte_ethdev.h:949
char name[RTE_ETH_XSTATS_NAME_SIZE]
Definition: rte_ethdev.h:1873
uint64_t value
Definition: rte_ethdev.h:1855
uint64_t id
Definition: rte_ethdev.h:1854