72 SCLogError(
"Error creating thread %s: you do not have support for Napatech adapter "
73 "enabled please recompile with --enable-napatech",
95 #ifdef NAPATECH_ENABLE_BYPASS
96 static int NapatechBypassCallback(
Packet *p);
133 static TmEcode NapatechStreamInit(
void)
144 FatalError(
"Failed to allocate memory for numa detection array: %s", strerror(errno));
147 for (i = 0; i <= numa_max_node(); ++i) {
163 static TmEcode NapatechStreamDeInit(
void)
204 #ifdef NAPATECH_ENABLE_BYPASS
211 uint8_t type_of_service;
212 uint16_t total_length;
214 uint16_t fragment_offset;
215 uint8_t time_to_live;
216 uint8_t next_proto_id;
217 uint16_t hdr_checksum;
231 uint8_t src_addr[16];
232 uint8_t dst_addr[16];
243 uint16_t dgram_cksum;
268 #define RTE_PTYPE_L2_ETHER 0x10000000
269 #define RTE_PTYPE_L3_IPV4 0x01000000
270 #define RTE_PTYPE_L3_IPV6 0x04000000
271 #define RTE_PTYPE_L4_TCP 0x00100000
272 #define RTE_PTYPE_L4_UDP 0x00200000
277 #define RTE_PTYPE_L3_MASK 0x0f000000
278 #define RTE_PTYPE_L4_MASK 0x00f00000
280 #define COLOR_IS_SPAN 0x00001000
282 static int is_inline = 0;
283 static int inline_port_map[
MAX_PORTS] = { -1 };
297 if ((inline_port_map[port] == -1) && (inline_port_map[peer] == -1)) {
298 inline_port_map[port] = peer;
299 inline_port_map[peer] = port;
301 SCLogError(
"Port pairing is already configured.");
318 static int port_adapter_map[
MAX_PORTS] = { -1 };
321 NtInfoStream_t h_info_stream;
323 if (
unlikely(port_adapter_map[port] == -1)) {
324 if ((status = NT_InfoOpen(&h_info_stream,
"ExampleInfo")) != NT_SUCCESS) {
329 h_info.cmd = NT_INFO_CMD_READ_PORT_V9;
330 h_info.u.port_v9.portNo = (uint8_t) port;
331 if ((status = NT_InfoRead(h_info_stream, &h_info)) != NT_SUCCESS) {
334 NT_InfoClose(h_info_stream);
337 port_adapter_map[port] = h_info.u.port_v9.data.adapterNo;
339 return port_adapter_map[port];
375 static int CompareIPv6Addr(uint8_t addr_a[16], uint8_t addr_b[16]) {
377 for (pos = 0; pos < 16; ++pos) {
378 if (addr_a[pos] < addr_b[pos]) {
380 }
else if (addr_a[pos] > addr_b[pos]) {
398 static NtFlowStream_t InitFlowStream(
int adapter,
int stream_id)
401 NtFlowStream_t hFlowStream;
406 NT_FlowOpenAttrInit(&attr);
407 NT_FlowOpenAttrSetAdapterNo(&attr, adapter);
409 snprintf(flow_name,
sizeof(flow_name),
"Flow_stream_%d", stream_id );
410 SCLogDebug(
"Opening flow programming stream: %s", flow_name);
411 if ((status = NT_FlowOpen_Attr(&hFlowStream, flow_name, &attr)) != NT_SUCCESS) {
412 SCLogWarning(
"Napatech bypass functionality not supported by the FPGA version on adapter "
413 "%d - disabling support.",
432 static int ProgramFlow(
Packet *p,
int inline_mode)
435 memset(&flow_match, 0,
sizeof(flow_match));
445 uint32_t packet_type = ((ntpv->dyn3->color_hi << 14) & 0xFFFFC000) | ntpv->dyn3->color_lo;
446 uint8_t *packet = (uint8_t *) ntpv->dyn3 + ntpv->dyn3->descrLength;
448 uint32_t layer3 = packet_type & RTE_PTYPE_L3_MASK;
449 uint32_t layer4 = packet_type & RTE_PTYPE_L4_MASK;
450 uint32_t is_span = packet_type & COLOR_IS_SPAN;
461 uint32_t do_swap = 0;
474 struct IPv4Tuple4 v4Tuple;
475 struct IPv6Tuple4 v6Tuple;
476 struct ipv4_hdr *pIPv4_hdr = NULL;
477 struct ipv6_hdr *pIPv6_hdr = NULL;
480 case RTE_PTYPE_L3_IPV4:
482 pIPv4_hdr = (
struct ipv4_hdr *) (packet + ntpv->dyn3->offset0);
484 v4Tuple.sa = pIPv4_hdr->src_addr;
485 v4Tuple.da = pIPv4_hdr->dst_addr;
487 do_swap = (htonl(pIPv4_hdr->src_addr) > htonl(pIPv4_hdr->dst_addr));
490 v4Tuple.sa = pIPv4_hdr->src_addr;
491 v4Tuple.da = pIPv4_hdr->dst_addr;
493 v4Tuple.sa = pIPv4_hdr->dst_addr;
494 v4Tuple.da = pIPv4_hdr->src_addr;
499 case RTE_PTYPE_L3_IPV6:
501 pIPv6_hdr = (
struct ipv6_hdr *) (packet + ntpv->dyn3->offset0);
502 do_swap = (CompareIPv6Addr(pIPv6_hdr->src_addr, pIPv6_hdr->dst_addr) > 0);
505 memcpy(&(v6Tuple.sa), pIPv6_hdr->src_addr, 16);
506 memcpy(&(v6Tuple.da), pIPv6_hdr->dst_addr, 16);
511 memcpy(&(v6Tuple.sa), pIPv6_hdr->src_addr, 16);
512 memcpy(&(v6Tuple.da), pIPv6_hdr->dst_addr, 16);
514 memcpy(&(v6Tuple.sa), pIPv6_hdr->dst_addr, 16);
515 memcpy(&(v6Tuple.da), pIPv6_hdr->src_addr, 16);
527 case RTE_PTYPE_L4_TCP:
529 struct tcp_hdr *tcp_hdr = (
struct tcp_hdr *) (packet + ntpv->dyn3->offset1);
530 if (layer3 == RTE_PTYPE_L3_IPV4) {
532 v4Tuple.dp = tcp_hdr->dst_port;
533 v4Tuple.sp = tcp_hdr->src_port;
534 flow_match.keyId = NAPATECH_KEYTYPE_IPV4;
537 v4Tuple.sp = tcp_hdr->src_port;
538 v4Tuple.dp = tcp_hdr->dst_port;
540 v4Tuple.sp = tcp_hdr->dst_port;
541 v4Tuple.dp = tcp_hdr->src_port;
543 flow_match.keyId = NAPATECH_KEYTYPE_IPV4_SPAN;
545 memcpy(&(flow_match.keyData), &v4Tuple,
sizeof(v4Tuple));
548 v6Tuple.dp = tcp_hdr->dst_port;
549 v6Tuple.sp = tcp_hdr->src_port;
550 flow_match.keyId = NAPATECH_KEYTYPE_IPV6;
553 v6Tuple.sp = tcp_hdr->src_port;
554 v6Tuple.dp = tcp_hdr->dst_port;
556 v6Tuple.dp = tcp_hdr->src_port;
557 v6Tuple.sp = tcp_hdr->dst_port;
559 flow_match.keyId = NAPATECH_KEYTYPE_IPV6_SPAN;
561 memcpy(&(flow_match.keyData), &v6Tuple,
sizeof(v6Tuple));
563 flow_match.ipProtocolField = 6;
566 case RTE_PTYPE_L4_UDP:
568 struct udp_hdr *udp_hdr = (
struct udp_hdr *) (packet + ntpv->dyn3->offset1);
569 if (layer3 == RTE_PTYPE_L3_IPV4) {
571 v4Tuple.dp = udp_hdr->dst_port;
572 v4Tuple.sp = udp_hdr->src_port;
573 flow_match.keyId = NAPATECH_KEYTYPE_IPV4;
576 v4Tuple.sp = udp_hdr->src_port;
577 v4Tuple.dp = udp_hdr->dst_port;
579 v4Tuple.dp = udp_hdr->src_port;
580 v4Tuple.sp = udp_hdr->dst_port;
582 flow_match.keyId = NAPATECH_KEYTYPE_IPV4_SPAN;
584 memcpy(&(flow_match.keyData), &v4Tuple,
sizeof(v4Tuple));
587 v6Tuple.dp = udp_hdr->dst_port;
588 v6Tuple.sp = udp_hdr->src_port;
589 flow_match.keyId = NAPATECH_KEYTYPE_IPV6;
592 v6Tuple.sp = udp_hdr->src_port;
593 v6Tuple.dp = udp_hdr->dst_port;
595 v6Tuple.dp = udp_hdr->src_port;
596 v6Tuple.sp = udp_hdr->dst_port;
598 flow_match.keyId = NAPATECH_KEYTYPE_IPV6_SPAN;
600 memcpy(&(flow_match.keyData), &v6Tuple,
sizeof(v6Tuple));
602 flow_match.ipProtocolField = 17;
616 flow_match.keySetId = NAPATECH_FLOWTYPE_DROP;
619 flow_match.keySetId = NAPATECH_FLOWTYPE_PASS;
621 flow_match.keySetId = NAPATECH_FLOWTYPE_DROP;
625 if (NT_FlowWrite(ntpv->
flow_stream, &flow_match, -1) != NT_SUCCESS) {
640 static int NapatechBypassCallback(
Packet *p)
682 FatalError(
"Failed to allocate memory for NAPATECH thread vars.");
694 *data = (
void *) ntv;
709 static void NapatechReleasePacket(
struct Packet_ *p)
715 #ifdef NAPATECH_ENABLE_BYPASS
717 p->
ntpv.dyn3->wireLength = 0;
725 if (p->
ntpv.bypass == 1) {
726 ProgramFlow(p, is_inline);
740 static int GetNumaNode(
void)
745 #if defined(__linux__)
746 cpu = sched_getcpu();
747 node = numa_node_of_cpu(cpu);
749 SCLogWarning(
"Auto configuration of NUMA node is not supported on this OS.");
761 static void RecommendNUMAConfig(
void)
764 int set_cpu_affinity = 0;
766 p = buffer =
SCCalloc(
sizeof(
char), (32 * (numa_max_node() + 1) + 1));
767 if (buffer == NULL) {
768 FatalError(
"Failed to allocate memory for temporary buffer: %s", strerror(errno));
771 if (
ConfGetBool(
"threading.set-cpu-affinity", &set_cpu_affinity) != 1) {
772 set_cpu_affinity = 0;
775 if (set_cpu_affinity) {
776 SCLogPerf(
"Minimum host buffers that should be defined in ntservice.ini:");
777 for (
int i = 0; i <= numa_max_node(); ++i) {
779 p += snprintf(p, 32,
"%s[%d, 16, %d]", (i == 0 ?
"" :
","),
782 SCLogPerf(
"E.g.: HostBuffersRx=%s", buffer);
799 char error_buffer[100];
801 NtNetBuf_t packet_buffer;
803 uint64_t hba_pkt_drops = 0;
804 uint64_t hba_byte_drops = 0;
805 uint16_t hba_pkt = 0;
807 int set_cpu_affinity = 0;
809 int is_autoconfig = 0;
814 #ifdef NAPATECH_ENABLE_BYPASS
821 for (adapter = 0; adapter < NapatechGetNumAdapters(); ++adapter) {
822 flow_stream[adapter] = InitFlowStream(adapter, ntv->
stream_id);
827 if (
ConfGetBool(
"napatech.auto-config", &is_autoconfig) == 0) {
832 numa_node = GetNumaNode();
834 if (numa_node <= numa_max_node()) {
838 if (
ConfGetBool(
"threading.set-cpu-affinity", &set_cpu_affinity) != 1) {
839 set_cpu_affinity = 0;
842 if (set_cpu_affinity) {
850 RecommendNUMAConfig();
852 #ifdef NAPATECH_ENABLE_BYPASS
853 if (
ConfGetBool(
"napatech.inline", &is_inline) == 0) {
859 inline_port_map[i] = -1;
868 if (status == 0x20002061) {
869 FatalError(
"Check host buffer configuration in ntservice.ini"
870 " or try running /opt/napatech3/bin/ntpl -e "
871 "\"delete=all\" to clean-up stream NUMA config.");
872 }
else if (status == 0x20000008) {
873 FatalError(
"Check napatech.ports in the suricata config file.");
875 SCLogNotice(
"Napatech packet input engine started.");
880 "Napatech Packet Loop Started - cpu: %3d, cpu_numa: %3d stream: %3u ",
881 sched_getcpu(), numa_node, ntv->
stream_id);
886 FatalError(
"Failed to allocate memory for NAPATECH stream counter.");
888 snprintf(s_hbad_pkt, 32,
"nt%d.hba_drop", ntv->
stream_id);
895 if ((status = NT_NetRxOpen(&(ntv->
rx_stream),
"SuricataStream",
896 NT_NET_INTERFACE_PACKET, ntv->
stream_id, ntv->
hba)) != NT_SUCCESS) {
915 status = NT_NetRxGet(ntv->
rx_stream, &packet_buffer, 1000);
917 status == NT_STATUS_TIMEOUT || status == NT_STATUS_TRYAGAIN)) {
918 if (status == NT_STATUS_TIMEOUT) {
919 TmThreadsCaptureHandleTimeout(
tv, NULL);
922 }
else if (
unlikely(status != NT_SUCCESS)) {
924 SCLogInfo(
"Failed to read from Napatech Stream %d: %s",
931 NT_NetRxRelease(ntv->
rx_stream, packet_buffer);
935 #ifdef NAPATECH_ENABLE_BYPASS
940 pkt_ts = NT_NET_GET_PKT_TIMESTAMP(packet_buffer);
947 switch (NT_NET_GET_PKT_TIMESTAMP_TYPE(packet_buffer)) {
948 case NT_TIMESTAMP_TYPE_NATIVE_UNIX:
950 ((pkt_ts % 100000000) / 100) + ((pkt_ts % 100) > 50 ? 1 : 0));
952 case NT_TIMESTAMP_TYPE_PCAP:
955 case NT_TIMESTAMP_TYPE_PCAP_NANOTIME:
957 ((pkt_ts & 0xFFFFFFFF) / 1000) + ((pkt_ts % 1000) > 500 ? 1 : 0));
959 case NT_TIMESTAMP_TYPE_NATIVE_NDIS:
962 ((pkt_ts % 100000000) / 100) + ((pkt_ts % 100) > 50 ? 1 : 0));
965 SCLogError(
"Packet from Napatech Stream: %u does not have a supported timestamp "
968 NT_NetRxRelease(ntv->
rx_stream, packet_buffer);
974 stat_cmd.cmd = NT_NETRX_READ_CMD_STREAM_DROP;
976 if (
unlikely((status = NT_NetRxRead(ntv->
rx_stream, &stat_cmd)) != NT_SUCCESS)) {
978 SCLogInfo(
"Couldn't retrieve drop statistics from the RX stream: %u",
981 hba_pkt_drops = stat_cmd.u.streamDrop.pktsDropped;
988 #ifdef NAPATECH_ENABLE_BYPASS
989 p->
ntpv.dyn3 = _NT_NET_GET_PKT_DESCR_PTR_DYN3(packet_buffer);
990 p->
BypassPacketsFlow = (NapatechIsBypassSupported() ? NapatechBypassCallback : NULL);
991 NT_NET_SET_PKT_TXPORT(packet_buffer, inline_port_map[p->
ntpv.dyn3->rxPort]);
1001 if (
unlikely(
PacketSetData(p, (uint8_t *)NT_NET_GET_PKT_L2_PTR(packet_buffer), NT_NET_GET_PKT_WIRE_LENGTH(packet_buffer)))) {
1023 SCLogInfo(
"Host Buffer Allowance Drops - pkts: %ld, bytes: %ld", hba_pkt_drops, hba_byte_drops);
1045 SCLogInfo(
"nt%lu - pkts: %lu; drop: %lu (%5.2f%%); bytes: %lu",
1059 SCLogInfo(
"--- Total Packets: %ld Total Dropped: %ld (%5.2f%%)",
1062 #ifdef NAPATECH_ENABLE_BYPASS
1063 SCLogInfo(
"--- BypassCB - Total: %ld, UDP: %ld, TCP: %ld, Unhandled: %ld",
1114 SCLogError(
"Datalink type %" PRId32
" not yet supported in module NapatechDecode",
1140 *data = (
void *)
dtv;