80 FatalError(
"Error creating thread %s: you do not have "
81 "support for DPDK enabled, on Linux host please recompile "
99 #define MIN_ZERO_POLL_COUNT 10U
100 #define MIN_ZERO_POLL_COUNT_TO_SLEEP 10U
101 #define MINIMUM_SLEEP_TIME_US 1U
102 #define STANDARD_SLEEP_TIME_US 100U
103 #define MAX_EPOLL_TIMEOUT_MS 500U
104 static rte_spinlock_t intr_lock[RTE_MAX_ETHPORTS];
109 typedef struct DPDKThreadVars_ {
118 uint16_t capture_dpdk_packets;
119 uint16_t capture_dpdk_rx_errs;
120 uint16_t capture_dpdk_imissed;
121 uint16_t capture_dpdk_rx_no_mbufs;
122 uint16_t capture_dpdk_ierrors;
123 uint16_t capture_dpdk_tx_errs;
128 uint16_t out_port_id;
136 int32_t port_socket_id;
137 struct rte_mempool *pkt_mempool;
138 struct rte_mbuf *received_mbufs[BURST_SIZE];
143 static void ReceiveDPDKThreadExitStats(
ThreadVars *,
void *);
151 static void DPDKFreeMbufArray(
struct rte_mbuf **mbuf_array, uint16_t mbuf_cnt, uint16_t
offset);
152 static bool InterruptsRXEnable(uint16_t port_id, uint16_t queue_id)
154 uint32_t event_data = port_id << UINT16_WIDTH | queue_id;
155 int32_t ret = rte_eth_dev_rx_intr_ctl_q(port_id, queue_id, RTE_EPOLL_PER_THREAD,
156 RTE_INTR_EVENT_ADD, (
void *)((uintptr_t)event_data));
159 SCLogError(
"%s-Q%d: failed to enable interrupt mode: %s", DPDKGetPortNameByPortID(port_id),
160 queue_id, rte_strerror(-ret));
166 static inline uint32_t InterruptsSleepHeuristic(uint32_t no_pkt_polls_count)
168 if (no_pkt_polls_count < MIN_ZERO_POLL_COUNT_TO_SLEEP)
169 return MINIMUM_SLEEP_TIME_US;
171 return STANDARD_SLEEP_TIME_US;
174 static inline void InterruptsTurnOnOff(uint16_t port_id, uint16_t queue_id,
bool on)
176 rte_spinlock_lock(&(intr_lock[port_id]));
179 rte_eth_dev_rx_intr_enable(port_id, queue_id);
181 rte_eth_dev_rx_intr_disable(port_id, queue_id);
183 rte_spinlock_unlock(&(intr_lock[port_id]));
186 static inline void DPDKFreeMbufArray(
187 struct rte_mbuf **mbuf_array, uint16_t mbuf_cnt, uint16_t
offset)
189 for (
int i =
offset; i < mbuf_cnt; i++) {
190 rte_pktmbuf_free(mbuf_array[i]);
194 static void DevicePostStartPMDSpecificActions(DPDKThreadVars *ptv,
const char *driver_name)
196 if (strcmp(driver_name,
"net_bonding") == 0)
197 driver_name = BondingDeviceDriverGet(ptv->port_id);
198 if (strcmp(driver_name,
"net_i40e") == 0)
199 i40eDeviceSetRSS(ptv->port_id, ptv->threads, ptv->livedev->dev);
200 else if (strcmp(driver_name,
"net_ixgbe") == 0)
201 ixgbeDeviceSetRSS(ptv->port_id, ptv->threads, ptv->livedev->dev);
202 else if (strcmp(driver_name,
"net_ice") == 0)
203 iceDeviceSetRSS(ptv->port_id, ptv->threads, ptv->livedev->dev);
204 else if (strcmp(driver_name,
"mlx5_pci") == 0)
205 mlx5DeviceSetRSS(ptv->port_id, ptv->threads, ptv->livedev->dev);
208 static void DevicePreClosePMDSpecificActions(DPDKThreadVars *ptv,
const char *driver_name)
210 if (strcmp(driver_name,
"net_bonding") == 0) {
211 driver_name = BondingDeviceDriverGet(ptv->port_id);
215 #
if RTE_VERSION > RTE_VERSION_NUM(20, 0, 0, 0)
216 strcmp(driver_name,
"net_i40e") == 0 ||
218 strcmp(driver_name,
"net_ixgbe") == 0 || strcmp(driver_name,
"net_ice") == 0 ||
219 strcmp(driver_name,
"mlx5_pci") == 0) {
221 struct rte_flow_error flush_error = { 0 };
222 int32_t retval = rte_flow_flush(ptv->port_id, &flush_error);
224 SCLogError(
"%s: unable to flush rte_flow rules: %s Flush error msg: %s",
225 ptv->livedev->dev, rte_strerror(-retval), flush_error.message);
234 static int GetNumaNode(
void)
239 #if defined(__linux__)
240 cpu = sched_getcpu();
241 node = numa_node_of_cpu(cpu);
243 SCLogWarning(
"NUMA node retrieval is not supported on this OS.");
281 static inline void DPDKDumpCounters(DPDKThreadVars *ptv)
286 if (ptv->queue_id == 0) {
287 struct rte_eth_stats eth_stats;
288 int retval = rte_eth_stats_get(ptv->port_id, ð_stats);
290 SCLogError(
"%s: failed to get stats: %s", ptv->livedev->dev, rte_strerror(-retval));
295 ptv->pkts + eth_stats.imissed + eth_stats.ierrors + eth_stats.rx_nombuf);
297 eth_stats.ipackets + eth_stats.imissed + eth_stats.ierrors + eth_stats.rx_nombuf);
299 eth_stats.imissed + eth_stats.ierrors + eth_stats.rx_nombuf);
300 StatsSetUI64(ptv->tv, ptv->capture_dpdk_imissed, eth_stats.imissed);
301 StatsSetUI64(ptv->tv, ptv->capture_dpdk_rx_no_mbufs, eth_stats.rx_nombuf);
302 StatsSetUI64(ptv->tv, ptv->capture_dpdk_ierrors, eth_stats.ierrors);
303 StatsSetUI64(ptv->tv, ptv->capture_dpdk_tx_errs, eth_stats.oerrors);
305 ptv->livedev->drop, eth_stats.imissed + eth_stats.ierrors + eth_stats.rx_nombuf);
307 StatsSetUI64(ptv->tv, ptv->capture_dpdk_packets, ptv->pkts);
311 static void DPDKReleasePacket(
Packet *p)
320 #
if defined(RTE_LIBRTE_I40E_PMD) || defined(RTE_LIBRTE_IXGBE_PMD) || defined(RTE_LIBRTE_ICE_PMD)
321 && !(PacketIsICMPv6(p) && PacketGetICMPv6(p)->
type == 143)
326 rte_eth_tx_burst(p->dpdk_v.out_port_id, p->dpdk_v.out_queue_id, &p->dpdk_v.mbuf, 1);
333 retval = rte_eth_tx_burst(
334 p->dpdk_v.out_port_id, p->dpdk_v.out_queue_id, &p->dpdk_v.mbuf, 1);
336 SCLogDebug(
"Unable to transmit the packet on port %u queue %u",
337 p->dpdk_v.out_port_id, p->dpdk_v.out_queue_id);
338 rte_pktmbuf_free(p->dpdk_v.mbuf);
339 p->dpdk_v.mbuf = NULL;
343 rte_pktmbuf_free(p->dpdk_v.mbuf);
344 p->dpdk_v.mbuf = NULL;
358 rte_eth_stats_reset(ptv->port_id);
359 rte_eth_xstats_reset(ptv->port_id);
361 if (ptv->intr_enabled && !InterruptsRXEnable(ptv->port_id, ptv->queue_id))
367 static inline void LoopHandleTimeoutOnIdle(
ThreadVars *
tv)
369 static thread_local uint64_t last_timeout_msec = 0;
372 if (msecs > last_timeout_msec + 100) {
373 TmThreadsCaptureHandleTimeout(
tv, NULL);
374 last_timeout_msec = msecs;
382 static inline bool RXPacketCountHeuristic(
ThreadVars *
tv, DPDKThreadVars *ptv, uint16_t nb_rx)
384 static thread_local uint32_t zero_pkt_polls_cnt = 0;
387 zero_pkt_polls_cnt = 0;
391 LoopHandleTimeoutOnIdle(
tv);
392 if (!ptv->intr_enabled)
395 zero_pkt_polls_cnt++;
396 if (zero_pkt_polls_cnt <= MIN_ZERO_POLL_COUNT)
399 uint32_t pwd_idle_hint = InterruptsSleepHeuristic(zero_pkt_polls_cnt);
400 if (pwd_idle_hint < STANDARD_SLEEP_TIME_US) {
401 rte_delay_us(pwd_idle_hint);
403 InterruptsTurnOnOff(ptv->port_id, ptv->queue_id,
true);
404 struct rte_epoll_event event;
405 rte_epoll_wait(RTE_EPOLL_PER_THREAD, &event, 1, MAX_EPOLL_TIMEOUT_MS);
406 InterruptsTurnOnOff(ptv->port_id, ptv->queue_id,
false);
417 static inline Packet *PacketInitFromMbuf(DPDKThreadVars *ptv,
struct rte_mbuf *mbuf)
430 p->dpdk_v.mbuf = mbuf;
432 p->dpdk_v.copy_mode = ptv->copy_mode;
433 p->dpdk_v.out_port_id = ptv->out_port_id;
434 p->dpdk_v.out_queue_id = ptv->queue_id;
440 uint64_t ol_flags = p->dpdk_v.mbuf->ol_flags;
441 if ((ol_flags & RTE_MBUF_F_RX_IP_CKSUM_MASK) == RTE_MBUF_F_RX_IP_CKSUM_GOOD &&
442 (ol_flags & RTE_MBUF_F_RX_L4_CKSUM_MASK) == RTE_MBUF_F_RX_L4_CKSUM_GOOD) {
443 SCLogDebug(
"HW detected GOOD IP and L4 chsum, ignoring validation");
446 if ((ol_flags & RTE_MBUF_F_RX_IP_CKSUM_MASK) == RTE_MBUF_F_RX_IP_CKSUM_BAD) {
452 if ((ol_flags & RTE_MBUF_F_RX_L4_CKSUM_MASK) == RTE_MBUF_F_RX_L4_CKSUM_BAD) {
463 static inline void DPDKSegmentedMbufWarning(
struct rte_mbuf *mbuf)
465 static thread_local
bool segmented_mbufs_warned =
false;
466 if (!segmented_mbufs_warned && !rte_pktmbuf_is_contiguous(mbuf)) {
467 char warn_s[] =
"Segmented mbufs detected! Redmine Ticket #6012 "
468 "Check your configuration or report the issue";
469 enum rte_proc_type_t eal_t = rte_eal_process_type();
470 if (eal_t == RTE_PROC_SECONDARY) {
472 "try to increase mbuf size in your primary application",
474 }
else if (eal_t == RTE_PROC_PRIMARY) {
476 "try to increase MTU in your suricata.yaml",
480 segmented_mbufs_warned =
true;
484 static void HandleShutdown(DPDKThreadVars *ptv)
488 while (
SC_ATOMIC_GET(ptv->workers_sync->worker_checked_in) < ptv->workers_sync->worker_cnt) {
491 if (ptv->queue_id == 0) {
498 rte_eth_dev_stop(ptv->out_port_id);
501 rte_eth_dev_stop(ptv->port_id);
504 DPDKDumpCounters(ptv);
507 static void PeriodicDPDKDumpCounters(DPDKThreadVars *ptv)
509 static thread_local
SCTime_t last_dump = { 0 };
512 if (current_time.
secs != last_dump.secs) {
513 DPDKDumpCounters(ptv);
514 last_dump = current_time;
524 DPDKThreadVars *ptv = (DPDKThreadVars *)data;
525 ptv->slot = ((
TmSlot *)slot)->slot_next;
526 TmEcode ret = ReceiveDPDKLoopInit(
tv, ptv);
537 rte_eth_rx_burst(ptv->port_id, ptv->queue_id, ptv->received_mbufs, BURST_SIZE);
538 if (RXPacketCountHeuristic(
tv, ptv, nb_rx)) {
542 ptv->pkts += (uint64_t)nb_rx;
543 for (uint16_t i = 0; i < nb_rx; i++) {
544 Packet *p = PacketInitFromMbuf(ptv, ptv->received_mbufs[i]);
546 rte_pktmbuf_free(ptv->received_mbufs[i]);
549 DPDKSegmentedMbufWarning(ptv->received_mbufs[i]);
550 PacketSetData(p, rte_pktmbuf_mtod(p->dpdk_v.mbuf, uint8_t *),
551 rte_pktmbuf_pkt_len(p->dpdk_v.mbuf));
552 if (TmThreadsSlotProcessPkt(ptv->tv, ptv->slot, p) !=
TM_ECODE_OK) {
554 DPDKFreeMbufArray(ptv->received_mbufs, nb_rx - i - 1, i + 1);
559 PeriodicDPDKDumpCounters(ptv);
577 int retval, thread_numa;
578 DPDKThreadVars *ptv = NULL;
581 if (initdata == NULL) {
582 SCLogError(
"DPDK configuration is NULL in thread initialization");
586 ptv =
SCCalloc(1,
sizeof(DPDKThreadVars));
604 ptv->copy_mode = dpdk_config->copy_mode;
605 ptv->checksum_mode = dpdk_config->checksum_mode;
607 ptv->threads = dpdk_config->threads;
608 ptv->intr_enabled = (dpdk_config->flags &
DPDK_IRQ_MODE) ?
true :
false;
609 ptv->port_id = dpdk_config->port_id;
610 ptv->out_port_id = dpdk_config->out_port_id;
611 ptv->port_socket_id = dpdk_config->socket_id;
613 ptv->pkt_mempool = dpdk_config->pkt_mempool;
614 dpdk_config->pkt_mempool = NULL;
616 thread_numa = GetNumaNode();
617 if (thread_numa >= 0 && ptv->port_socket_id != SOCKET_ID_ANY &&
618 thread_numa != ptv->port_socket_id) {
620 SCLogPerf(
"%s: NIC is on NUMA %d, thread on NUMA %d", dpdk_config->iface,
621 ptv->port_socket_id, thread_numa);
624 ptv->workers_sync = dpdk_config->workers_sync;
626 ptv->queue_id = queue_id;
629 if (queue_id == dpdk_config->threads - 1) {
630 retval = rte_eth_dev_start(ptv->port_id);
632 SCLogError(
"%s: error (%s) during device startup", dpdk_config->iface,
633 rte_strerror(-retval));
637 struct rte_eth_dev_info dev_info;
638 retval = rte_eth_dev_info_get(ptv->port_id, &dev_info);
640 SCLogError(
"%s: error (%s) when getting device info", dpdk_config->iface,
641 rte_strerror(-retval));
646 DevicePostStartPMDSpecificActions(ptv, dev_info.driver_name);
648 uint16_t inconsistent_numa_cnt =
SC_ATOMIC_GET(dpdk_config->inconsistent_numa_cnt);
649 if (inconsistent_numa_cnt > 0 && ptv->port_socket_id != SOCKET_ID_ANY) {
650 SCLogWarning(
"%s: NIC is on NUMA %d, %u threads on different NUMA node(s)",
651 dpdk_config->iface, ptv->port_socket_id, inconsistent_numa_cnt);
652 }
else if (ptv->port_socket_id == SOCKET_ID_ANY && rte_socket_count() > 1) {
654 "%s: unable to determine NIC's NUMA node, degraded performance can be expected",
657 if (ptv->intr_enabled) {
658 rte_spinlock_init(&intr_lock[ptv->port_id]);
663 dpdk_config->DerefFunc(dpdk_config);
667 if (dpdk_config != NULL)
668 dpdk_config->DerefFunc(dpdk_config);
674 static void PrintDPDKPortXstats(uint32_t port_id,
const char *port_name)
676 struct rte_eth_xstat *xstats;
677 struct rte_eth_xstat_name *xstats_names;
679 int32_t
len = rte_eth_xstats_get(port_id, NULL, 0);
681 FatalError(
"Error (%s) getting count of rte_eth_xstats failed on port %s",
682 rte_strerror(-
len), port_name);
686 FatalError(
"Failed to allocate memory for the rte_eth_xstat structure");
688 int32_t ret = rte_eth_xstats_get(port_id, xstats,
len);
689 if (ret < 0 || ret >
len) {
691 FatalError(
"Error (%s) getting rte_eth_xstats failed on port %s", rte_strerror(-ret),
694 xstats_names =
SCCalloc(
len,
sizeof(*xstats_names));
695 if (xstats_names == NULL) {
697 FatalError(
"Failed to allocate memory for the rte_eth_xstat_name array");
699 ret = rte_eth_xstats_get_names(port_id, xstats_names,
len);
700 if (ret < 0 || ret >
len) {
703 FatalError(
"Error (%s) getting names of rte_eth_xstats failed on port %s",
704 rte_strerror(-ret), port_name);
706 for (int32_t i = 0; i <
len; i++) {
707 if (xstats[i].value > 0)
708 SCLogPerf(
"Port %u (%s) - %s: %" PRIu64, port_id, port_name, xstats_names[i].
name,
721 static void ReceiveDPDKThreadExitStats(
ThreadVars *
tv,
void *data)
725 DPDKThreadVars *ptv = (DPDKThreadVars *)data;
727 if (ptv->queue_id == 0) {
728 struct rte_eth_stats eth_stats;
729 PrintDPDKPortXstats(ptv->port_id, ptv->livedev->dev);
730 retval = rte_eth_stats_get(ptv->port_id, ð_stats);
732 SCLogError(
"%s: failed to get stats (%s)", ptv->livedev->dev, strerror(-retval));
735 SCLogPerf(
"%s: total RX stats: packets %" PRIu64
" bytes: %" PRIu64
" missed: %" PRIu64
736 " errors: %" PRIu64
" nombufs: %" PRIu64,
737 ptv->livedev->dev, eth_stats.ipackets, eth_stats.ibytes, eth_stats.imissed,
738 eth_stats.ierrors, eth_stats.rx_nombuf);
740 SCLogPerf(
"%s: total TX stats: packets %" PRIu64
" bytes: %" PRIu64
" errors: %" PRIu64,
741 ptv->livedev->dev, eth_stats.opackets, eth_stats.obytes, eth_stats.oerrors);
744 DPDKDumpCounters(ptv);
756 DPDKThreadVars *ptv = (DPDKThreadVars *)data;
758 if (ptv->queue_id == 0) {
759 struct rte_eth_dev_info dev_info;
760 int retval = rte_eth_dev_info_get(ptv->port_id, &dev_info);
762 SCLogError(
"%s: error (%s) when getting device info", ptv->livedev->dev,
763 rte_strerror(-retval));
767 DevicePreClosePMDSpecificActions(ptv, dev_info.driver_name);
769 if (ptv->workers_sync) {
770 SCFree(ptv->workers_sync);
774 ptv->pkt_mempool = NULL;