55 #define RSS_HKEY_LEN 40
57 uint8_t rss_hkey[] = { 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D,
58 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D,
59 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A };
62 #define ROUNDUP(x, y) ((((x) + ((y)-1)) / (y)) * (y))
73 static char *AllocArgument(
size_t arg_len);
74 static char *AllocAndSetArgument(
const char *arg);
75 static char *AllocAndSetOption(
const char *arg);
77 static void ArgumentsInit(
struct Arguments *args,
unsigned capacity);
78 static void ArgumentsCleanup(
struct Arguments *args);
79 static void ArgumentsAdd(
struct Arguments *args,
char *value);
80 static void ArgumentsAddOptionAndArgument(
struct Arguments *args,
const char *opt,
const char *arg);
81 static void InitEal(
void);
83 static void ConfigSetIface(
DPDKIfaceConfig *iconf,
const char *entry_str);
84 static int ConfigSetThreads(
DPDKIfaceConfig *iconf,
const char *entry_str);
85 static int ConfigSetRxQueues(
DPDKIfaceConfig *iconf, uint16_t nb_queues);
86 static int ConfigSetTxQueues(
DPDKIfaceConfig *iconf, uint16_t nb_queues);
87 static int ConfigSetMempoolSize(
DPDKIfaceConfig *iconf, intmax_t entry_int);
88 static int ConfigSetMempoolCacheSize(
DPDKIfaceConfig *iconf,
const char *entry_str);
89 static int ConfigSetRxDescriptors(
DPDKIfaceConfig *iconf, intmax_t entry_int);
90 static int ConfigSetTxDescriptors(
DPDKIfaceConfig *iconf, intmax_t entry_int);
92 static bool ConfigSetPromiscuousMode(
DPDKIfaceConfig *iconf,
int entry_bool);
94 static int ConfigSetChecksumChecks(
DPDKIfaceConfig *iconf,
int entry_bool);
95 static int ConfigSetChecksumOffload(
DPDKIfaceConfig *iconf,
int entry_bool);
96 static int ConfigSetCopyIface(
DPDKIfaceConfig *iconf,
const char *entry_str);
97 static int ConfigSetCopyMode(
DPDKIfaceConfig *iconf,
const char *entry_str);
98 static int ConfigSetCopyIfaceSettings(
DPDKIfaceConfig *iconf,
const char *iface,
const char *mode);
104 const struct rte_eth_dev_info *dev_info,
struct rte_eth_conf *port_conf);
105 static int DeviceConfigureQueues(
DPDKIfaceConfig *iconf,
const struct rte_eth_dev_info *dev_info,
106 const struct rte_eth_conf *port_conf);
110 static void *ParseDpdkConfigAndConfigureDevice(
const char *iface);
111 static void DPDKDerefConfig(
void *conf);
113 #define DPDK_CONFIG_DEFAULT_THREADS "auto"
114 #define DPDK_CONFIG_DEFAULT_MEMPOOL_SIZE 65535
115 #define DPDK_CONFIG_DEFAULT_MEMPOOL_CACHE_SIZE "auto"
116 #define DPDK_CONFIG_DEFAULT_RX_DESCRIPTORS 1024
117 #define DPDK_CONFIG_DEFAULT_TX_DESCRIPTORS 1024
118 #define DPDK_CONFIG_DEFAULT_RSS_HASH_FUNCTIONS RTE_ETH_RSS_IP
119 #define DPDK_CONFIG_DEFAULT_MTU 1500
120 #define DPDK_CONFIG_DEFAULT_PROMISCUOUS_MODE 1
121 #define DPDK_CONFIG_DEFAULT_MULTICAST_MODE 1
122 #define DPDK_CONFIG_DEFAULT_CHECKSUM_VALIDATION 1
123 #define DPDK_CONFIG_DEFAULT_CHECKSUM_VALIDATION_OFFLOAD 1
124 #define DPDK_CONFIG_DEFAULT_COPY_MODE "none"
125 #define DPDK_CONFIG_DEFAULT_COPY_INTERFACE "none"
129 .promisc =
"promisc",
130 .multicast =
"multicast",
131 .checksum_checks =
"checksum-checks",
132 .checksum_checks_offload =
"checksum-checks-offload",
134 .rss_hf =
"rss-hash-functions",
135 .mempool_size =
"mempool-size",
136 .mempool_cache_size =
"mempool-cache-size",
137 .rx_descriptors =
"rx-descriptors",
138 .tx_descriptors =
"tx-descriptors",
139 .copy_mode =
"copy-mode",
140 .copy_iface =
"copy-iface",
143 static int GreatestDivisorUpTo(uint32_t num, uint32_t max_num)
145 for (
int i = max_num; i >= 2; i--) {
153 static char *AllocArgument(
size_t arg_len)
159 ptr = (
char *)
SCCalloc(arg_len,
sizeof(
char));
161 FatalError(
"Could not allocate memory for an argument");
171 static char *AllocAndSetArgument(
const char *arg)
175 FatalError(
"Passed argument is NULL in DPDK config initialization");
178 size_t arg_len = strlen(arg);
180 ptr = AllocArgument(arg_len);
181 strlcpy(ptr, arg, arg_len + 1);
185 static char *AllocAndSetOption(
const char *arg)
189 FatalError(
"Passed option is NULL in DPDK config initialization");
192 size_t arg_len = strlen(arg);
193 uint8_t is_long_arg = arg_len > 1;
194 const char *dash_prefix = is_long_arg ?
"--" :
"-";
195 size_t full_len = arg_len + strlen(dash_prefix);
197 ptr = AllocArgument(full_len);
198 strlcpy(ptr, dash_prefix, strlen(dash_prefix) + 1);
199 strlcat(ptr, arg, full_len + 1);
203 static void ArgumentsInit(
struct Arguments *args,
unsigned capacity)
206 args->argv =
SCCalloc(capacity,
sizeof(*args->argv));
207 if (args->argv == NULL)
208 FatalError(
"Could not allocate memory for Arguments structure");
210 args->capacity = capacity;
215 static void ArgumentsCleanup(
struct Arguments *args)
218 for (
int i = 0; i < args->argc; i++) {
219 if (args->argv[i] != NULL) {
221 args->argv[i] = NULL;
231 static void ArgumentsAdd(
struct Arguments *args,
char *value)
234 if (args->argc + 1 > args->capacity)
235 FatalError(
"No capacity for more arguments (Max: %" PRIu32
")", EAL_ARGS);
237 args->argv[args->argc++] = value;
241 static void ArgumentsAddOptionAndArgument(
struct Arguments *args,
const char *opt,
const char *arg)
247 option = AllocAndSetOption(opt);
248 ArgumentsAdd(args, option);
251 if (arg == NULL || arg[0] ==
'\0')
254 argument = AllocAndSetArgument(arg);
255 ArgumentsAdd(args, argument);
259 static void InitEal(
void)
265 struct Arguments args;
268 if (eal_params == NULL) {
269 FatalError(
"DPDK EAL parameters not found in the config");
272 ArgumentsInit(&args, EAL_ARGS);
273 ArgumentsAdd(&args, AllocAndSetArgument(
"suricata"));
277 const char *key = param->
name;
280 ArgumentsAddOptionAndArgument(&args, key, (
const char *)val->
val);
284 ArgumentsAddOptionAndArgument(&args, param->
name, param->
val);
288 eal_argv =
SCCalloc(args.argc,
sizeof(*args.argv));
289 if (eal_argv == NULL) {
290 FatalError(
"Failed to allocate memory for the array of DPDK EAL arguments");
292 memcpy(eal_argv, args.argv, args.argc *
sizeof(*args.argv));
294 rte_log_set_global_level(RTE_LOG_WARNING);
295 retval = rte_eal_init(args.argc, eal_argv);
297 ArgumentsCleanup(&args);
301 FatalError(
"DPDK EAL initialization error: %s", rte_strerror(-retval));
306 static void DPDKDerefConfig(
void *conf)
312 if (iconf->pkt_mempool != NULL) {
313 rte_mempool_free(iconf->pkt_mempool);
327 FatalError(
"Could not allocate memory for DPDKIfaceConfig");
329 ptr->pkt_mempool = NULL;
330 ptr->out_port_id = -1;
333 ptr->DerefFunc = DPDKDerefConfig;
340 static void ConfigSetIface(
DPDKIfaceConfig *iconf,
const char *entry_str)
345 if (entry_str == NULL || entry_str[0] ==
'\0')
346 FatalError(
"Interface name in DPDK config is NULL or empty");
348 retval = rte_eth_dev_get_port_by_name(entry_str, &iconf->port_id);
350 FatalError(
"Interface \"%s\": %s", entry_str, rte_strerror(-retval));
352 strlcpy(iconf->iface, entry_str,
sizeof(iconf->iface));
356 static int ConfigSetThreads(
DPDKIfaceConfig *iconf,
const char *entry_str)
359 static int32_t remaining_auto_cpus = -1;
361 SCLogError(
"DPDK runmode requires configured thread affinity");
367 SCLogError(
"Specify worker-cpu-set list in the threading section");
372 SCLogError(
"Specify management-cpu-set list in the threading section");
378 "\"all\" specified in worker CPU cores affinity, excluding management threads");
379 UtilAffinityCpusExclude(wtaf, mtaf);
383 if (sched_cpus == 0) {
384 SCLogError(
"No worker CPU cores with configured affinity were configured");
386 }
else if (UtilAffinityCpusOverlap(wtaf, mtaf) != 0) {
387 SCLogWarning(
"Worker threads should not overlap with management threads in the CPU core "
388 "affinity configuration");
392 if (active_runmode && !strcmp(
"single", active_runmode)) {
397 if (entry_str == NULL) {
398 SCLogError(
"Number of threads for interface \"%s\" not specified", iconf->iface);
402 if (strcmp(entry_str,
"auto") == 0) {
404 if (iconf->threads == 0) {
405 SCLogError(
"Not enough worker CPU cores with affinity were configured");
409 if (remaining_auto_cpus > 0) {
411 remaining_auto_cpus--;
412 }
else if (remaining_auto_cpus == -1) {
414 if (remaining_auto_cpus > 0) {
416 remaining_auto_cpus--;
419 SCLogConfig(
"%s: auto-assigned %u threads", iconf->iface, iconf->threads);
424 SCLogError(
"Threads entry for interface %s contain non-numerical characters - \"%s\"",
425 iconf->iface, entry_str);
429 if (iconf->threads <= 0) {
430 SCLogError(
"%s: positive number of threads required", iconf->iface);
437 static int ConfigSetRxQueues(
DPDKIfaceConfig *iconf, uint16_t nb_queues)
440 iconf->nb_rx_queues = nb_queues;
441 if (iconf->nb_rx_queues < 1) {
442 SCLogError(
"%s: positive number of RX queues is required", iconf->iface);
449 static int ConfigSetTxQueues(
DPDKIfaceConfig *iconf, uint16_t nb_queues)
452 iconf->nb_tx_queues = nb_queues;
453 if (iconf->nb_tx_queues < 1) {
454 SCLogError(
"%s: positive number of TX queues is required", iconf->iface);
461 static int ConfigSetMempoolSize(
DPDKIfaceConfig *iconf, intmax_t entry_int)
464 if (entry_int <= 0) {
465 SCLogError(
"%s: positive memory pool size is required", iconf->iface);
469 iconf->mempool_size = entry_int;
473 static int ConfigSetMempoolCacheSize(
DPDKIfaceConfig *iconf,
const char *entry_str)
476 if (entry_str == NULL || entry_str[0] ==
'\0' || strcmp(entry_str,
"auto") == 0) {
481 if (iconf->mempool_size == 0) {
482 SCLogError(
"%s: cannot calculate mempool cache size of a mempool with size %d",
483 iconf->iface, iconf->mempool_size);
487 uint32_t max_cache_size =
MAX(RTE_MEMPOOL_CACHE_MAX_SIZE, iconf->mempool_size / 1.5);
488 iconf->mempool_cache_size = GreatestDivisorUpTo(iconf->mempool_size, max_cache_size);
493 SCLogError(
"%s: mempool cache size entry contain non-numerical characters - \"%s\"",
494 iconf->iface, entry_str);
498 if (iconf->mempool_cache_size <= 0 || iconf->mempool_cache_size > RTE_MEMPOOL_CACHE_MAX_SIZE) {
499 SCLogError(
"%s: mempool cache size requires a positive number smaller than %" PRIu32,
500 iconf->iface, RTE_MEMPOOL_CACHE_MAX_SIZE);
507 static int ConfigSetRxDescriptors(
DPDKIfaceConfig *iconf, intmax_t entry_int)
510 if (entry_int <= 0) {
511 SCLogError(
"%s: positive number of RX descriptors is required", iconf->iface);
515 iconf->nb_rx_desc = entry_int;
519 static int ConfigSetTxDescriptors(
DPDKIfaceConfig *iconf, intmax_t entry_int)
522 if (entry_int <= 0) {
523 SCLogError(
"%s: positive number of TX descriptors is required", iconf->iface);
527 iconf->nb_tx_desc = entry_int;
531 static int ConfigSetRSSHashFunctions(
DPDKIfaceConfig *iconf,
const char *entry_str)
534 if (entry_str == NULL || entry_str[0] ==
'\0' || strcmp(entry_str,
"auto") == 0) {
535 iconf->rss_hf = DPDK_CONFIG_DEFAULT_RSS_HASH_FUNCTIONS;
540 SCLogError(
"%s: RSS hash functions entry contain non-numerical characters - \"%s\"",
541 iconf->iface, entry_str);
551 if (entry_int < RTE_ETHER_MIN_MTU || entry_int > RTE_ETHER_MAX_JUMBO_FRAME_LEN) {
552 SCLogError(
"%s: MTU size can only be between %" PRIu32
" and %" PRIu32, iconf->iface,
553 RTE_ETHER_MIN_MTU, RTE_ETHER_MAX_JUMBO_FRAME_LEN);
557 iconf->mtu = entry_int;
561 static bool ConfigSetPromiscuousMode(
DPDKIfaceConfig *iconf,
int entry_bool)
579 static int ConfigSetChecksumChecks(
DPDKIfaceConfig *iconf,
int entry_bool)
588 static int ConfigSetChecksumOffload(
DPDKIfaceConfig *iconf,
int entry_bool)
597 static int ConfigSetCopyIface(
DPDKIfaceConfig *iconf,
const char *entry_str)
602 if (entry_str == NULL || entry_str[0] ==
'\0' || strcmp(entry_str,
"none") == 0) {
603 iconf->out_iface = NULL;
607 retval = rte_eth_dev_get_port_by_name(entry_str, &iconf->out_port_id);
609 SCLogError(
"%s: name of the copy interface (%s) is invalid (err %s)", iconf->iface,
610 entry_str, rte_strerror(-retval));
614 iconf->out_iface = entry_str;
618 static int ConfigSetCopyMode(
DPDKIfaceConfig *iconf,
const char *entry_str)
621 if (entry_str == NULL) {
622 SCLogWarning(
"%s: no copy mode specified, changing to %s ", iconf->iface,
623 DPDK_CONFIG_DEFAULT_COPY_MODE);
624 entry_str = DPDK_CONFIG_DEFAULT_COPY_MODE;
627 if (strcmp(entry_str,
"none") != 0 && strcmp(entry_str,
"tap") != 0 &&
628 strcmp(entry_str,
"ips") != 0) {
629 SCLogWarning(
"%s: copy mode \"%s\" is not one of the possible values (none|tap|ips). "
631 entry_str, iconf->iface, DPDK_CONFIG_DEFAULT_COPY_MODE);
632 entry_str = DPDK_CONFIG_DEFAULT_COPY_MODE;
635 if (strcmp(entry_str,
"none") == 0) {
637 }
else if (strcmp(entry_str,
"tap") == 0) {
639 }
else if (strcmp(entry_str,
"ips") == 0) {
646 static int ConfigSetCopyIfaceSettings(
DPDKIfaceConfig *iconf,
const char *iface,
const char *mode)
651 retval = ConfigSetCopyIface(iconf, iface);
655 retval = ConfigSetCopyMode(iconf, mode);
660 if (iconf->out_iface != NULL)
661 iconf->out_iface = NULL;
665 if (iconf->out_iface == NULL || strlen(iconf->out_iface) <= 0) {
666 SCLogError(
"%s: copy mode enabled but interface not set", iconf->iface);
679 const char *entry_str = NULL;
680 intmax_t entry_int = 0;
682 const char *copy_iface_str = NULL;
683 const char *copy_mode_str = NULL;
685 ConfigSetIface(iconf, iface);
689 FatalError(
"failed to find DPDK configuration for the interface %s", iconf->iface);
693 ? ConfigSetThreads(iconf, DPDK_CONFIG_DEFAULT_THREADS)
694 : ConfigSetThreads(iconf, entry_str);
699 retval = ConfigSetRxQueues(iconf, (uint16_t)iconf->threads);
704 retval = ConfigSetTxQueues(iconf, (uint16_t)iconf->threads);
709 if_root, if_default, dpdk_yaml.
mempool_size, &entry_int) != 1
710 ? ConfigSetMempoolSize(iconf, DPDK_CONFIG_DEFAULT_MEMPOOL_SIZE)
711 : ConfigSetMempoolSize(iconf, entry_int);
717 ? ConfigSetMempoolCacheSize(iconf, DPDK_CONFIG_DEFAULT_MEMPOOL_CACHE_SIZE)
718 : ConfigSetMempoolCacheSize(iconf, entry_str);
724 ? ConfigSetRxDescriptors(iconf, DPDK_CONFIG_DEFAULT_RX_DESCRIPTORS)
725 : ConfigSetRxDescriptors(iconf, entry_int);
731 ? ConfigSetTxDescriptors(iconf, DPDK_CONFIG_DEFAULT_TX_DESCRIPTORS)
732 : ConfigSetTxDescriptors(iconf, entry_int);
737 ? ConfigSetMtu(iconf, DPDK_CONFIG_DEFAULT_MTU)
738 : ConfigSetMtu(iconf, entry_int);
743 ? ConfigSetRSSHashFunctions(iconf, NULL)
744 : ConfigSetRSSHashFunctions(iconf, entry_str);
749 if_root, if_default, dpdk_yaml.
promisc, &entry_bool) != 1
750 ? ConfigSetPromiscuousMode(iconf, DPDK_CONFIG_DEFAULT_PROMISCUOUS_MODE)
751 : ConfigSetPromiscuousMode(iconf, entry_bool);
756 if_root, if_default, dpdk_yaml.
multicast, &entry_bool) != 1
757 ? ConfigSetMulticast(iconf, DPDK_CONFIG_DEFAULT_MULTICAST_MODE)
758 : ConfigSetMulticast(iconf, entry_bool);
764 ? ConfigSetChecksumChecks(iconf, DPDK_CONFIG_DEFAULT_CHECKSUM_VALIDATION)
765 : ConfigSetChecksumChecks(iconf, entry_bool);
771 ? ConfigSetChecksumOffload(
772 iconf, DPDK_CONFIG_DEFAULT_CHECKSUM_VALIDATION_OFFLOAD)
773 : ConfigSetChecksumOffload(iconf, entry_bool);
784 if_root, if_default, dpdk_yaml.
copy_iface, ©_iface_str);
790 retval = ConfigSetCopyIfaceSettings(iconf, copy_iface_str, copy_mode_str);
797 static int32_t ConfigValidateThreads(uint16_t iface_threads)
799 static uint32_t total_cpus = 0;
800 total_cpus += iface_threads;
803 SCLogError(
"Specify worker-cpu-set list in the threading section");
807 SCLogError(
"Interfaces requested more cores than configured in the threading section "
808 "(requested %d configured %d",
825 retval = ConfigLoad(iconf, iface);
826 if (retval < 0 || ConfigValidateThreads(iconf->threads) != 0) {
827 iconf->DerefFunc(iconf);
834 static void DeviceSetPMDSpecificRSS(
struct rte_eth_rss_conf *rss_conf,
const char *driver_name)
837 if (strcmp(driver_name,
"net_i40e") == 0)
838 i40eDeviceSetRSSConf(rss_conf);
839 if (strcmp(driver_name,
"net_ice") == 0)
840 iceDeviceSetRSSHashFunction(&rss_conf->rss_hf);
841 if (strcmp(driver_name,
"net_ixgbe") == 0)
842 ixgbeDeviceSetRSSHashFunction(&rss_conf->rss_hf);
843 if (strcmp(driver_name,
"net_e1000_igb") == 0)
844 rss_conf->rss_hf = (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_IPV6_EX);
848 static int GetFirstSetBitPosition(uint64_t bits)
850 for (uint64_t i = 0; i < 64; i++) {
857 static void DumpRSSFlags(
const uint64_t requested,
const uint64_t actual)
862 "RTE_ETH_RSS_IP %sset", ((requested & RTE_ETH_RSS_IP) == RTE_ETH_RSS_IP) ?
"" :
"NOT ");
864 ((requested & RTE_ETH_RSS_TCP) == RTE_ETH_RSS_TCP) ?
"" :
"NOT ");
866 ((requested & RTE_ETH_RSS_UDP) == RTE_ETH_RSS_UDP) ?
"" :
"NOT ");
868 ((requested & RTE_ETH_RSS_SCTP) == RTE_ETH_RSS_SCTP) ?
"" :
"NOT ");
870 ((requested & RTE_ETH_RSS_TUNNEL) == RTE_ETH_RSS_TUNNEL) ?
"" :
"NOT ");
873 SCLogConfig(
"RTE_ETH_RSS_IPV4 (Bit position: %d) %sset",
874 GetFirstSetBitPosition(RTE_ETH_RSS_IPV4), (requested & RTE_ETH_RSS_IPV4) ?
"" :
"NOT ");
875 SCLogConfig(
"RTE_ETH_RSS_FRAG_IPV4 (Bit position: %d) %sset",
876 GetFirstSetBitPosition(RTE_ETH_RSS_FRAG_IPV4),
877 (requested & RTE_ETH_RSS_FRAG_IPV4) ?
"" :
"NOT ");
878 SCLogConfig(
"RTE_ETH_RSS_NONFRAG_IPV4_TCP (Bit position: %d) %sset",
879 GetFirstSetBitPosition(RTE_ETH_RSS_NONFRAG_IPV4_TCP),
880 (requested & RTE_ETH_RSS_NONFRAG_IPV4_TCP) ?
"" :
"NOT ");
881 SCLogConfig(
"RTE_ETH_RSS_NONFRAG_IPV4_UDP (Bit position: %d) %sset",
882 GetFirstSetBitPosition(RTE_ETH_RSS_NONFRAG_IPV4_UDP),
883 (requested & RTE_ETH_RSS_NONFRAG_IPV4_UDP) ?
"" :
"NOT ");
884 SCLogConfig(
"RTE_ETH_RSS_NONFRAG_IPV4_SCTP (Bit position: %d) %sset",
885 GetFirstSetBitPosition(RTE_ETH_RSS_NONFRAG_IPV4_SCTP),
886 (requested & RTE_ETH_RSS_NONFRAG_IPV4_SCTP) ?
"" :
"NOT ");
887 SCLogConfig(
"RTE_ETH_RSS_NONFRAG_IPV4_OTHER (Bit position: %d) %sset",
888 GetFirstSetBitPosition(RTE_ETH_RSS_NONFRAG_IPV4_OTHER),
889 (requested & RTE_ETH_RSS_NONFRAG_IPV4_OTHER) ?
"" :
"NOT ");
890 SCLogConfig(
"RTE_ETH_RSS_IPV6 (Bit position: %d) %sset",
891 GetFirstSetBitPosition(RTE_ETH_RSS_IPV6), (requested & RTE_ETH_RSS_IPV6) ?
"" :
"NOT ");
892 SCLogConfig(
"RTE_ETH_RSS_FRAG_IPV6 (Bit position: %d) %sset",
893 GetFirstSetBitPosition(RTE_ETH_RSS_FRAG_IPV6),
894 (requested & RTE_ETH_RSS_FRAG_IPV6) ?
"" :
"NOT ");
895 SCLogConfig(
"RTE_ETH_RSS_NONFRAG_IPV6_TCP (Bit position: %d) %sset",
896 GetFirstSetBitPosition(RTE_ETH_RSS_NONFRAG_IPV6_TCP),
897 (requested & RTE_ETH_RSS_NONFRAG_IPV6_TCP) ?
"" :
"NOT ");
898 SCLogConfig(
"RTE_ETH_RSS_NONFRAG_IPV6_UDP (Bit position: %d) %sset",
899 GetFirstSetBitPosition(RTE_ETH_RSS_NONFRAG_IPV6_UDP),
900 (requested & RTE_ETH_RSS_NONFRAG_IPV6_UDP) ?
"" :
"NOT ");
901 SCLogConfig(
"RTE_ETH_RSS_NONFRAG_IPV6_SCTP (Bit position: %d) %sset",
902 GetFirstSetBitPosition(RTE_ETH_RSS_NONFRAG_IPV6_SCTP),
903 (requested & RTE_ETH_RSS_NONFRAG_IPV6_SCTP) ?
"" :
"NOT ");
904 SCLogConfig(
"RTE_ETH_RSS_NONFRAG_IPV6_OTHER (Bit position: %d) %sset",
905 GetFirstSetBitPosition(RTE_ETH_RSS_NONFRAG_IPV6_OTHER),
906 (requested & RTE_ETH_RSS_NONFRAG_IPV6_OTHER) ?
"" :
"NOT ");
908 SCLogConfig(
"RTE_ETH_RSS_L2_PAYLOAD (Bit position: %d) %sset",
909 GetFirstSetBitPosition(RTE_ETH_RSS_L2_PAYLOAD),
910 (requested & RTE_ETH_RSS_L2_PAYLOAD) ?
"" :
"NOT ");
911 SCLogConfig(
"RTE_ETH_RSS_IPV6_EX (Bit position: %d) %sset",
912 GetFirstSetBitPosition(RTE_ETH_RSS_IPV6_EX),
913 (requested & RTE_ETH_RSS_IPV6_EX) ?
"" :
"NOT ");
914 SCLogConfig(
"RTE_ETH_RSS_IPV6_TCP_EX (Bit position: %d) %sset",
915 GetFirstSetBitPosition(RTE_ETH_RSS_IPV6_TCP_EX),
916 (requested & RTE_ETH_RSS_IPV6_TCP_EX) ?
"" :
"NOT ");
917 SCLogConfig(
"RTE_ETH_RSS_IPV6_UDP_EX (Bit position: %d) %sset",
918 GetFirstSetBitPosition(RTE_ETH_RSS_IPV6_UDP_EX),
919 (requested & RTE_ETH_RSS_IPV6_UDP_EX) ?
"" :
"NOT ");
921 SCLogConfig(
"RTE_ETH_RSS_PORT (Bit position: %d) %sset",
922 GetFirstSetBitPosition(RTE_ETH_RSS_PORT), (requested & RTE_ETH_RSS_PORT) ?
"" :
"NOT ");
923 SCLogConfig(
"RTE_ETH_RSS_VXLAN (Bit position: %d) %sset",
924 GetFirstSetBitPosition(RTE_ETH_RSS_VXLAN),
925 (requested & RTE_ETH_RSS_VXLAN) ?
"" :
"NOT ");
926 SCLogConfig(
"RTE_ETH_RSS_NVGRE (Bit position: %d) %sset",
927 GetFirstSetBitPosition(RTE_ETH_RSS_NVGRE),
928 (requested & RTE_ETH_RSS_NVGRE) ?
"" :
"NOT ");
929 SCLogConfig(
"RTE_ETH_RSS_GTPU (Bit position: %d) %sset",
930 GetFirstSetBitPosition(RTE_ETH_RSS_GTPU), (requested & RTE_ETH_RSS_GTPU) ?
"" :
"NOT ");
932 SCLogConfig(
"RTE_ETH_RSS_L3_SRC_ONLY (Bit position: %d) %sset",
933 GetFirstSetBitPosition(RTE_ETH_RSS_L3_SRC_ONLY),
934 (requested & RTE_ETH_RSS_L3_SRC_ONLY) ?
"" :
"NOT ");
935 SCLogConfig(
"RTE_ETH_RSS_L3_DST_ONLY (Bit position: %d) %sset",
936 GetFirstSetBitPosition(RTE_ETH_RSS_L3_DST_ONLY),
937 (requested & RTE_ETH_RSS_L3_DST_ONLY) ?
"" :
"NOT ");
938 SCLogConfig(
"RTE_ETH_RSS_L4_SRC_ONLY (Bit position: %d) %sset",
939 GetFirstSetBitPosition(RTE_ETH_RSS_L4_SRC_ONLY),
940 (requested & RTE_ETH_RSS_L4_SRC_ONLY) ?
"" :
"NOT ");
941 SCLogConfig(
"RTE_ETH_RSS_L4_DST_ONLY (Bit position: %d) %sset",
942 GetFirstSetBitPosition(RTE_ETH_RSS_L4_DST_ONLY),
943 (requested & RTE_ETH_RSS_L4_DST_ONLY) ?
"" :
"NOT ");
946 "RTE_ETH_RSS_IP %sset", ((actual & RTE_ETH_RSS_IP) == RTE_ETH_RSS_IP) ?
"" :
"NOT ");
948 "RTE_ETH_RSS_TCP %sset", ((actual & RTE_ETH_RSS_TCP) == RTE_ETH_RSS_TCP) ?
"" :
"NOT ");
950 "RTE_ETH_RSS_UDP %sset", ((actual & RTE_ETH_RSS_UDP) == RTE_ETH_RSS_UDP) ?
"" :
"NOT ");
952 ((actual & RTE_ETH_RSS_SCTP) == RTE_ETH_RSS_SCTP) ?
"" :
"NOT ");
954 ((actual & RTE_ETH_RSS_TUNNEL) == RTE_ETH_RSS_TUNNEL) ?
"" :
"NOT ");
957 SCLogConfig(
"RTE_ETH_RSS_IPV4 %sset", (actual & RTE_ETH_RSS_IPV4) ?
"" :
"NOT ");
958 SCLogConfig(
"RTE_ETH_RSS_FRAG_IPV4 %sset", (actual & RTE_ETH_RSS_FRAG_IPV4) ?
"" :
"NOT ");
960 (actual & RTE_ETH_RSS_NONFRAG_IPV4_TCP) ?
"" :
"NOT ");
962 (actual & RTE_ETH_RSS_NONFRAG_IPV4_UDP) ?
"" :
"NOT ");
964 (actual & RTE_ETH_RSS_NONFRAG_IPV4_SCTP) ?
"" :
"NOT ");
965 SCLogConfig(
"RTE_ETH_RSS_NONFRAG_IPV4_OTHER %sset",
966 (actual & RTE_ETH_RSS_NONFRAG_IPV4_OTHER) ?
"" :
"NOT ");
967 SCLogConfig(
"RTE_ETH_RSS_IPV6 %sset", (actual & RTE_ETH_RSS_IPV6) ?
"" :
"NOT ");
968 SCLogConfig(
"RTE_ETH_RSS_FRAG_IPV6 %sset", (actual & RTE_ETH_RSS_FRAG_IPV6) ?
"" :
"NOT ");
970 (actual & RTE_ETH_RSS_NONFRAG_IPV6_TCP) ?
"" :
"NOT ");
972 (actual & RTE_ETH_RSS_NONFRAG_IPV6_UDP) ?
"" :
"NOT ");
974 (actual & RTE_ETH_RSS_NONFRAG_IPV6_SCTP) ?
"" :
"NOT ");
975 SCLogConfig(
"RTE_ETH_RSS_NONFRAG_IPV6_OTHER %sset",
976 (actual & RTE_ETH_RSS_NONFRAG_IPV6_OTHER) ?
"" :
"NOT ");
978 SCLogConfig(
"RTE_ETH_RSS_L2_PAYLOAD %sset", (actual & RTE_ETH_RSS_L2_PAYLOAD) ?
"" :
"NOT ");
979 SCLogConfig(
"RTE_ETH_RSS_IPV6_EX %sset", (actual & RTE_ETH_RSS_IPV6_EX) ?
"" :
"NOT ");
980 SCLogConfig(
"RTE_ETH_RSS_IPV6_TCP_EX %sset", (actual & RTE_ETH_RSS_IPV6_TCP_EX) ?
"" :
"NOT ");
981 SCLogConfig(
"RTE_ETH_RSS_IPV6_UDP_EX %sset", (actual & RTE_ETH_RSS_IPV6_UDP_EX) ?
"" :
"NOT ");
983 SCLogConfig(
"RTE_ETH_RSS_PORT %sset", (actual & RTE_ETH_RSS_PORT) ?
"" :
"NOT ");
984 SCLogConfig(
"RTE_ETH_RSS_VXLAN %sset", (actual & RTE_ETH_RSS_VXLAN) ?
"" :
"NOT ");
985 SCLogConfig(
"RTE_ETH_RSS_NVGRE %sset", (actual & RTE_ETH_RSS_NVGRE) ?
"" :
"NOT ");
986 SCLogConfig(
"RTE_ETH_RSS_GTPU %sset", (actual & RTE_ETH_RSS_GTPU) ?
"" :
"NOT ");
988 SCLogConfig(
"RTE_ETH_RSS_L3_SRC_ONLY %sset", (actual & RTE_ETH_RSS_L3_SRC_ONLY) ?
"" :
"NOT ");
989 SCLogConfig(
"RTE_ETH_RSS_L3_DST_ONLY %sset", (actual & RTE_ETH_RSS_L3_DST_ONLY) ?
"" :
"NOT ");
990 SCLogConfig(
"RTE_ETH_RSS_L4_SRC_ONLY %sset", (actual & RTE_ETH_RSS_L4_SRC_ONLY) ?
"" :
"NOT ");
991 SCLogConfig(
"RTE_ETH_RSS_L4_DST_ONLY %sset", (actual & RTE_ETH_RSS_L4_DST_ONLY) ?
"" :
"NOT ");
994 static void DumpRXOffloadCapabilities(
const uint64_t rx_offld_capa)
996 SCLogConfig(
"RTE_ETH_RX_OFFLOAD_VLAN_STRIP - %savailable",
997 rx_offld_capa & RTE_ETH_RX_OFFLOAD_VLAN_STRIP ?
"" :
"NOT ");
998 SCLogConfig(
"RTE_ETH_RX_OFFLOAD_IPV4_CKSUM - %savailable",
999 rx_offld_capa & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM ?
"" :
"NOT ");
1000 SCLogConfig(
"RTE_ETH_RX_OFFLOAD_UDP_CKSUM - %savailable",
1001 rx_offld_capa & RTE_ETH_RX_OFFLOAD_UDP_CKSUM ?
"" :
"NOT ");
1002 SCLogConfig(
"RTE_ETH_RX_OFFLOAD_TCP_CKSUM - %savailable",
1003 rx_offld_capa & RTE_ETH_RX_OFFLOAD_TCP_CKSUM ?
"" :
"NOT ");
1004 SCLogConfig(
"RTE_ETH_RX_OFFLOAD_TCP_LRO - %savailable",
1005 rx_offld_capa & RTE_ETH_RX_OFFLOAD_TCP_LRO ?
"" :
"NOT ");
1006 SCLogConfig(
"RTE_ETH_RX_OFFLOAD_QINQ_STRIP - %savailable",
1007 rx_offld_capa & RTE_ETH_RX_OFFLOAD_QINQ_STRIP ?
"" :
"NOT ");
1008 SCLogConfig(
"RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM - %savailable",
1009 rx_offld_capa & RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM ?
"" :
"NOT ");
1010 SCLogConfig(
"RTE_ETH_RX_OFFLOAD_MACSEC_STRIP - %savailable",
1011 rx_offld_capa & RTE_ETH_RX_OFFLOAD_MACSEC_STRIP ?
"" :
"NOT ");
1012 #if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
1013 SCLogConfig(
"RTE_ETH_RX_OFFLOAD_HEADER_SPLIT - %savailable",
1014 rx_offld_capa & RTE_ETH_RX_OFFLOAD_HEADER_SPLIT ?
"" :
"NOT ");
1016 SCLogConfig(
"RTE_ETH_RX_OFFLOAD_VLAN_FILTER - %savailable",
1017 rx_offld_capa & RTE_ETH_RX_OFFLOAD_VLAN_FILTER ?
"" :
"NOT ");
1018 SCLogConfig(
"RTE_ETH_RX_OFFLOAD_VLAN_EXTEND - %savailable",
1019 rx_offld_capa & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND ?
"" :
"NOT ");
1020 SCLogConfig(
"RTE_ETH_RX_OFFLOAD_SCATTER - %savailable",
1021 rx_offld_capa & RTE_ETH_RX_OFFLOAD_SCATTER ?
"" :
"NOT ");
1022 SCLogConfig(
"RTE_ETH_RX_OFFLOAD_TIMESTAMP - %savailable",
1023 rx_offld_capa & RTE_ETH_RX_OFFLOAD_TIMESTAMP ?
"" :
"NOT ");
1024 SCLogConfig(
"RTE_ETH_RX_OFFLOAD_SECURITY - %savailable",
1025 rx_offld_capa & RTE_ETH_RX_OFFLOAD_SECURITY ?
"" :
"NOT ");
1026 SCLogConfig(
"RTE_ETH_RX_OFFLOAD_KEEP_CRC - %savailable",
1027 rx_offld_capa & RTE_ETH_RX_OFFLOAD_KEEP_CRC ?
"" :
"NOT ");
1028 SCLogConfig(
"RTE_ETH_RX_OFFLOAD_SCTP_CKSUM - %savailable",
1029 rx_offld_capa & RTE_ETH_RX_OFFLOAD_SCTP_CKSUM ?
"" :
"NOT ");
1030 SCLogConfig(
"RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM - %savailable",
1031 rx_offld_capa & RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM ?
"" :
"NOT ");
1032 SCLogConfig(
"RTE_ETH_RX_OFFLOAD_RSS_HASH - %savailable",
1033 rx_offld_capa & RTE_ETH_RX_OFFLOAD_RSS_HASH ?
"" :
"NOT ");
1034 #if RTE_VERSION >= RTE_VERSION_NUM(20, 11, 0, 0)
1035 SCLogConfig(
"RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT - %savailable",
1036 rx_offld_capa & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT ?
"" :
"NOT ");
1040 static int DeviceValidateMTU(
const DPDKIfaceConfig *iconf,
const struct rte_eth_dev_info *dev_info)
1042 if (iconf->mtu > dev_info->max_mtu || iconf->mtu < dev_info->min_mtu) {
1044 "Min MTU: %" PRIu16
" Max MTU: %" PRIu16,
1045 iconf->iface, dev_info->min_mtu, dev_info->max_mtu);
1049 #if RTE_VERSION < RTE_VERSION_NUM(21, 11, 0, 0)
1051 if (iconf->mtu > RTE_ETHER_MAX_LEN &&
1052 !(dev_info->rx_offload_capa & DEV_RX_OFFLOAD_JUMBO_FRAME)) {
1053 SCLogError(
"%s: jumbo frames not supported, set MTU to 1500", iconf->iface);
1061 static void DeviceSetMTU(
struct rte_eth_conf *port_conf, uint16_t mtu)
1063 #if RTE_VERSION >= RTE_VERSION_NUM(21, 11, 0, 0)
1064 port_conf->rxmode.mtu = mtu;
1066 port_conf->rxmode.max_rx_pkt_len = mtu;
1067 if (mtu > RTE_ETHER_MAX_LEN) {
1068 port_conf->rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
1078 static int32_t DeviceSetSocketID(uint16_t port_id, int32_t *socket_id)
1081 int retval = rte_eth_dev_socket_id(port_id);
1082 *socket_id = retval;
1084 #if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0) // DPDK API changed since 22.11
1085 retval = -rte_errno;
1087 if (retval == SOCKET_ID_ANY)
1095 const struct rte_eth_dev_info *dev_info,
struct rte_eth_conf *port_conf)
1097 DumpRXOffloadCapabilities(dev_info->rx_offload_capa);
1098 *port_conf = (
struct rte_eth_conf){
1100 .mq_mode = RTE_ETH_MQ_RX_NONE,
1104 .mq_mode = RTE_ETH_MQ_TX_NONE,
1110 if (dev_info->rx_offload_capa & RTE_ETH_RX_OFFLOAD_RSS_HASH) {
1111 if (iconf->nb_rx_queues > 1) {
1112 SCLogConfig(
"%s: RSS enabled for %d queues", iconf->iface, iconf->nb_rx_queues);
1113 port_conf->rx_adv_conf.rss_conf = (
struct rte_eth_rss_conf){
1114 .rss_key = rss_hkey,
1115 .rss_key_len = RSS_HKEY_LEN,
1116 .rss_hf = iconf->rss_hf,
1119 const char *dev_driver = dev_info->driver_name;
1120 if (strcmp(dev_info->driver_name,
"net_bonding") == 0) {
1121 dev_driver = BondingDeviceDriverGet(iconf->port_id);
1124 DeviceSetPMDSpecificRSS(&port_conf->rx_adv_conf.rss_conf, dev_driver);
1126 uint64_t rss_hf_tmp =
1127 port_conf->rx_adv_conf.rss_conf.rss_hf & dev_info->flow_type_rss_offloads;
1128 if (port_conf->rx_adv_conf.rss_conf.rss_hf != rss_hf_tmp) {
1129 DumpRSSFlags(port_conf->rx_adv_conf.rss_conf.rss_hf, rss_hf_tmp);
1131 SCLogWarning(
"%s: modified RSS hash function based on hardware support: "
1132 "requested:%#" PRIx64
", configured:%#" PRIx64,
1133 iconf->iface, port_conf->rx_adv_conf.rss_conf.rss_hf, rss_hf_tmp);
1134 port_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf_tmp;
1136 port_conf->rxmode.mq_mode = RTE_ETH_MQ_RX_RSS;
1139 port_conf->rx_adv_conf.rss_conf.rss_key = NULL;
1140 port_conf->rx_adv_conf.rss_conf.rss_hf = 0;
1143 SCLogConfig(
"%s: RSS not supported", iconf->iface);
1147 SCLogConfig(
"%s: checksum validation disabled", iconf->iface);
1148 }
else if ((dev_info->rx_offload_capa & RTE_ETH_RX_OFFLOAD_CHECKSUM) ==
1149 RTE_ETH_RX_OFFLOAD_CHECKSUM) {
1152 SCLogConfig(
"%s: IP, TCP and UDP checksum validation offloaded", iconf->iface);
1153 port_conf->rxmode.offloads |= RTE_ETH_RX_OFFLOAD_CHECKSUM;
1156 SCLogConfig(
"%s: checksum validation enabled (but can be offloaded)", iconf->iface);
1160 DeviceSetMTU(port_conf, iconf->mtu);
1162 if (dev_info->tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) {
1163 port_conf->txmode.offloads |= RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
1167 static int DeviceConfigureQueues(
DPDKIfaceConfig *iconf,
const struct rte_eth_dev_info *dev_info,
1168 const struct rte_eth_conf *port_conf)
1174 struct rte_eth_rxconf rxq_conf;
1175 struct rte_eth_txconf txq_conf;
1177 char mempool_name[64];
1178 snprintf(mempool_name, 64,
"mempool_%.20s", iconf->iface);
1180 mtu_size = iconf->mtu + RTE_ETHER_CRC_LEN + RTE_ETHER_HDR_LEN + 4;
1181 mbuf_size = ROUNDUP(mtu_size, 1024) + RTE_PKTMBUF_HEADROOM;
1182 SCLogConfig(
"%s: creating packet mbuf pool %s of size %d, cache size %d, mbuf size %d",
1183 iconf->iface, mempool_name, iconf->mempool_size, iconf->mempool_cache_size, mbuf_size);
1185 iconf->pkt_mempool = rte_pktmbuf_pool_create(mempool_name, iconf->mempool_size,
1186 iconf->mempool_cache_size, 0, mbuf_size, (
int)iconf->socket_id);
1187 if (iconf->pkt_mempool == NULL) {
1188 retval = -rte_errno;
1189 SCLogError(
"%s: rte_pktmbuf_pool_create failed with code %d (mempool: %s) - %s",
1190 iconf->iface, rte_errno, mempool_name, rte_strerror(rte_errno));
1194 for (uint16_t queue_id = 0; queue_id < iconf->nb_rx_queues; queue_id++) {
1195 rxq_conf = dev_info->default_rxconf;
1196 rxq_conf.offloads = port_conf->rxmode.offloads;
1197 rxq_conf.rx_thresh.hthresh = 0;
1198 rxq_conf.rx_thresh.pthresh = 0;
1199 rxq_conf.rx_thresh.wthresh = 0;
1200 rxq_conf.rx_free_thresh = 0;
1201 rxq_conf.rx_drop_en = 0;
1202 SCLogConfig(
"%s: rx queue setup: queue:%d port:%d rx_desc:%d tx_desc:%d rx: hthresh: %d "
1203 "pthresh %d wthresh %d free_thresh %d drop_en %d offloads %lu",
1204 iconf->iface, queue_id, iconf->port_id, iconf->nb_rx_desc, iconf->nb_tx_desc,
1205 rxq_conf.rx_thresh.hthresh, rxq_conf.rx_thresh.pthresh, rxq_conf.rx_thresh.wthresh,
1206 rxq_conf.rx_free_thresh, rxq_conf.rx_drop_en, rxq_conf.offloads);
1208 retval = rte_eth_rx_queue_setup(iconf->port_id, queue_id, iconf->nb_rx_desc,
1209 iconf->socket_id, &rxq_conf, iconf->pkt_mempool);
1211 rte_mempool_free(iconf->pkt_mempool);
1213 "%s: rte_eth_rx_queue_setup failed with code %d for device queue %u of port %u",
1214 iconf->iface, retval, queue_id, iconf->port_id);
1219 for (uint16_t queue_id = 0; queue_id < iconf->nb_tx_queues; queue_id++) {
1220 txq_conf = dev_info->default_txconf;
1221 txq_conf.offloads = port_conf->txmode.offloads;
1222 SCLogConfig(
"%s: tx queue setup: queue:%d port:%d", iconf->iface, queue_id, iconf->port_id);
1223 retval = rte_eth_tx_queue_setup(
1224 iconf->port_id, queue_id, iconf->nb_tx_desc, iconf->socket_id, &txq_conf);
1226 rte_mempool_free(iconf->pkt_mempool);
1228 "%s: rte_eth_tx_queue_setup failed with code %d for device queue %u of port %u",
1229 iconf->iface, retval, queue_id, iconf->port_id);
1242 ConfigInit(&out_iconf);
1243 if (out_iconf == NULL) {
1244 FatalError(
"Copy interface of the interface \"%s\" is NULL", iconf->iface);
1247 retval = ConfigLoad(out_iconf, iconf->out_iface);
1249 SCLogError(
"%s: fail to load config of interface", iconf->out_iface);
1250 out_iconf->DerefFunc(out_iconf);
1254 if (iconf->nb_rx_queues != out_iconf->nb_tx_queues) {
1256 SCLogError(
"%s: configured %d RX queues but copy interface %s has %d TX queues"
1257 " - number of queues must be equal",
1258 iconf->iface, iconf->nb_rx_queues, out_iconf->iface, out_iconf->nb_tx_queues);
1259 out_iconf->DerefFunc(out_iconf);
1261 }
else if (iconf->mtu != out_iconf->mtu) {
1262 SCLogError(
"%s: configured MTU of %d but copy interface %s has MTU set to %d"
1263 " - MTU must be equal",
1264 iconf->iface, iconf->mtu, out_iconf->iface, out_iconf->mtu);
1265 out_iconf->DerefFunc(out_iconf);
1267 }
else if (iconf->copy_mode != out_iconf->copy_mode) {
1268 SCLogError(
"%s: copy modes of interfaces %s and %s are not equal", iconf->iface,
1269 iconf->iface, out_iconf->iface);
1270 out_iconf->DerefFunc(out_iconf);
1272 }
else if (strcmp(iconf->iface, out_iconf->out_iface) != 0) {
1274 SCLogError(
"%s: copy interface of %s is not set to %s", iconf->iface, out_iconf->iface,
1276 out_iconf->DerefFunc(out_iconf);
1280 out_iconf->DerefFunc(out_iconf);
1289 if (iconf->out_iface != NULL) {
1290 retval = rte_eth_dev_get_port_by_name(iconf->out_iface, &iconf->out_port_id);
1292 SCLogError(
"%s: failed to obtain out iface %s port id (err=%d)", iconf->iface,
1293 iconf->out_iface, retval);
1297 int32_t out_port_socket_id;
1298 retval = DeviceSetSocketID(iconf->port_id, &out_port_socket_id);
1300 SCLogError(
"%s: invalid socket id (err=%d)", iconf->out_iface, retval);
1304 if (iconf->socket_id != out_port_socket_id) {
1305 SCLogWarning(
"%s: out iface %s is not on the same NUMA node", iconf->iface,
1309 retval = DeviceValidateOutIfaceConfig(iconf);
1316 SCLogInfo(
"%s: DPDK IPS mode activated: %s->%s", iconf->iface, iconf->iface,
1319 SCLogInfo(
"%s: DPDK TAP mode activated: %s->%s", iconf->iface, iconf->iface,
1333 static int32_t DeviceVerifyPostConfigure(
1334 const DPDKIfaceConfig *iconf,
const struct rte_eth_dev_info *dev_info)
1336 struct rte_eth_dev_info post_conf_dev_info = { 0 };
1337 int32_t ret = rte_eth_dev_info_get(iconf->port_id, &post_conf_dev_info);
1339 SCLogError(
"%s: getting device info failed (err: %s)", iconf->iface, rte_strerror(-ret));
1343 if (dev_info->flow_type_rss_offloads != post_conf_dev_info.flow_type_rss_offloads ||
1344 dev_info->rx_offload_capa != post_conf_dev_info.rx_offload_capa ||
1345 dev_info->tx_offload_capa != post_conf_dev_info.tx_offload_capa ||
1346 dev_info->max_rx_queues != post_conf_dev_info.max_rx_queues ||
1347 dev_info->max_tx_queues != post_conf_dev_info.max_tx_queues ||
1348 dev_info->max_mtu != post_conf_dev_info.max_mtu) {
1349 SCLogWarning(
"%s: device information severely changed after configuration, reconfiguring",
1354 if (strcmp(dev_info->driver_name,
"net_bonding") == 0) {
1355 ret = BondingAllDevicesSameDriver(iconf->port_id);
1357 SCLogError(
"%s: bond port uses port with different DPDK drivers", iconf->iface);
1368 int32_t retval = rte_eth_dev_get_port_by_name(iconf->iface, &(iconf->port_id));
1370 SCLogError(
"%s: getting port id failed (err: %s)", iconf->iface, rte_strerror(-retval));
1374 if (!rte_eth_dev_is_valid_port(iconf->port_id)) {
1375 SCLogError(
"%s: specified port %d is invalid", iconf->iface, iconf->port_id);
1379 retval = DeviceSetSocketID(iconf->port_id, &iconf->socket_id);
1381 SCLogError(
"%s: invalid socket id (err: %s)", iconf->iface, rte_strerror(-retval));
1385 struct rte_eth_dev_info dev_info = { 0 };
1386 retval = rte_eth_dev_info_get(iconf->port_id, &dev_info);
1388 SCLogError(
"%s: getting device info failed (err: %s)", iconf->iface, rte_strerror(-retval));
1392 if (iconf->nb_rx_queues > dev_info.max_rx_queues) {
1393 SCLogError(
"%s: configured RX queues %u is higher than device maximum (%" PRIu16
")",
1394 iconf->iface, iconf->nb_rx_queues, dev_info.max_rx_queues);
1398 if (iconf->nb_tx_queues > dev_info.max_tx_queues) {
1399 SCLogError(
"%s: configured TX queues %u is higher than device maximum (%" PRIu16
")",
1400 iconf->iface, iconf->nb_tx_queues, dev_info.max_tx_queues);
1404 retval = DeviceValidateMTU(iconf, &dev_info);
1408 struct rte_eth_conf port_conf = { 0 };
1409 DeviceInitPortConf(iconf, &dev_info, &port_conf);
1410 if (port_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM) {
1415 retval = rte_eth_dev_configure(
1416 iconf->port_id, iconf->nb_rx_queues, iconf->nb_tx_queues, &port_conf);
1418 SCLogError(
"%s: failed to configure the device (port %u, err %s)", iconf->iface,
1419 iconf->port_id, rte_strerror(-retval));
1423 retval = DeviceVerifyPostConfigure(iconf, &dev_info);
1427 retval = rte_eth_dev_adjust_nb_rx_tx_desc(
1428 iconf->port_id, &iconf->nb_rx_desc, &iconf->nb_tx_desc);
1430 SCLogError(
"%s: failed to adjust device queue descriptors (port %u, err %d)", iconf->iface,
1431 iconf->port_id, retval);
1435 retval = iconf->flags &
DPDK_MULTICAST ? rte_eth_allmulticast_enable(iconf->port_id)
1436 : rte_eth_allmulticast_disable(iconf->port_id);
1437 if (retval == -ENOTSUP) {
1438 retval = rte_eth_allmulticast_get(iconf->port_id);
1442 SCLogError(
"%s: Allmulticast setting of port (%" PRIu16
1443 ") can not be configured. Set it to %s",
1444 iconf->iface, iconf->port_id, retval == 1 ?
"true" :
"false");
1445 }
else if (retval < 0) {
1446 SCLogError(
"%s: failed to get multicast mode (port %u, err %d)", iconf->iface,
1447 iconf->port_id, retval);
1450 }
else if (retval < 0) {
1451 SCLogError(
"%s: error when changing multicast setting (port %u err %d)", iconf->iface,
1452 iconf->port_id, retval);
1456 retval = iconf->flags &
DPDK_PROMISC ? rte_eth_promiscuous_enable(iconf->port_id)
1457 : rte_eth_promiscuous_disable(iconf->port_id);
1458 if (retval == -ENOTSUP) {
1459 retval = rte_eth_promiscuous_get(iconf->port_id);
1462 SCLogError(
"%s: promiscuous setting of port (%" PRIu16
1463 ") can not be configured. Set it to %s",
1464 iconf->iface, iconf->port_id, retval == 1 ?
"true" :
"false");
1466 }
else if (retval < 0) {
1467 SCLogError(
"%s: failed to get promiscuous mode (port %u, err=%d)", iconf->iface,
1468 iconf->port_id, retval);
1471 }
else if (retval < 0) {
1472 SCLogError(
"%s: error when changing promiscuous setting (port %u, err %d)", iconf->iface,
1473 iconf->port_id, retval);
1478 SCLogConfig(
"%s: setting MTU to %d", iconf->iface, iconf->mtu);
1479 retval = rte_eth_dev_set_mtu(iconf->port_id, iconf->mtu);
1480 if (retval == -ENOTSUP) {
1481 SCLogWarning(
"%s: changing MTU on port %u is not supported, ignoring the setting",
1482 iconf->iface, iconf->port_id);
1484 retval = rte_eth_dev_get_mtu(iconf->port_id, &iconf->mtu);
1486 SCLogError(
"%s: failed to retrieve MTU (port %u, err %d)", iconf->iface, iconf->port_id,
1490 }
else if (retval < 0) {
1491 SCLogError(
"%s: failed to set MTU to %u (port %u, err %d)", iconf->iface, iconf->mtu,
1492 iconf->port_id, retval);
1496 retval = DeviceConfigureQueues(iconf, &dev_info, &port_conf);
1501 retval = DeviceConfigureIPS(iconf);
1509 static void *ParseDpdkConfigAndConfigureDevice(
const char *iface)
1513 if (iconf == NULL) {
1514 FatalError(
"DPDK configuration could not be parsed");
1517 retval = DeviceConfigure(iconf);
1518 if (retval == -EAGAIN) {
1520 retval = DeviceConfigure(iconf);
1524 iconf->DerefFunc(iconf);
1525 retval = rte_eal_cleanup();
1527 FatalError(
"EAL cleanup failed: %s", strerror(-retval));
1529 FatalError(
"%s: failed to configure", iface);
1540 if (ldev_instance == NULL) {
1541 FatalError(
"Device %s is not registered as a live device", iface);
1543 ldev_instance->dpdk_vars.pkt_mp = iconf->pkt_mempool;
1560 static int DPDKConfigGetThreadsCount(
void *conf)
1566 return dpdk_conf->threads;
1571 static int DPDKRunModeIsIPS(
void)
1574 const char dpdk_node_query[] =
"dpdk.interfaces";
1576 if (dpdk_node == NULL) {
1577 FatalError(
"Unable to get %s configuration node", dpdk_node_query);
1580 const char default_iface[] =
"default";
1583 bool has_ips =
false;
1584 bool has_ids =
false;
1585 for (
int ldev = 0; ldev < nlive; ldev++) {
1587 if (live_dev == NULL)
1588 FatalError(
"Unable to get device id %d from LiveDevice list", ldev);
1591 if (if_root == NULL) {
1592 if (if_default == NULL)
1593 FatalError(
"Unable to get %s or %s interface", live_dev, default_iface);
1595 if_root = if_default;
1598 const char *copymodestr = NULL;
1600 if (strcmp(copymodestr,
"ips") == 0) {
1609 if (has_ids && has_ips) {
1610 FatalError(
"Copy-mode of interface %s mixes with the previously set copy-modes "
1611 "(only IDS/TAP and IPS copy-mode combinations are allowed in DPDK",
1619 static void DPDKRunModeEnableIPS(
void)
1621 if (DPDKRunModeIsIPS()) {
1635 "Workers DPDK mode, each thread does all"
1636 " tasks from acquisition to logging",
1661 SCLogDebug(
"RunModeIdsDpdkWorkers initialised");