suricata
source-napatech.c
Go to the documentation of this file.
1 /* Copyright (C) 2012-2020 Open Information Security Foundation
2  *
3  * You can copy, redistribute or modify this Program under the terms of
4  * the GNU General Public License version 2 as published by the Free
5  * Software Foundation.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10  * GNU General Public License for more details.
11  *
12  * You should have received a copy of the GNU General Public License
13  * version 2 along with this program; if not, write to the Free Software
14  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
15  * 02110-1301, USA.
16  */
17 
18 /**
19  * \file
20  *
21  - * \author nPulse Technologies, LLC.
22  - * \author Matt Keeler <mk@npulsetech.com>
23  * *
24  * Support for NAPATECH adapter with the 3GD Driver/API.
25  * Requires libntapi from Napatech A/S.
26  *
27  */
28 #include "suricata-common.h"
29 #include "suricata.h"
30 #include "threadvars.h"
31 #include "util-optimize.h"
32 #include "tm-queuehandlers.h"
33 #include "tm-threads.h"
34 #include "tm-modules.h"
35 #include "util-privs.h"
36 #include "tmqh-packetpool.h"
37 #include "util-napatech.h"
38 #include "source-napatech.h"
39 
40 #ifndef HAVE_NAPATECH
41 
42 TmEcode NoNapatechSupportExit(ThreadVars*, const void*, void**);
43 
45 {
46  tmm_modules[TMM_RECEIVENAPATECH].name = "NapatechStream";
47  tmm_modules[TMM_RECEIVENAPATECH].ThreadInit = NoNapatechSupportExit;
53 }
54 
56 {
57  tmm_modules[TMM_DECODENAPATECH].name = "NapatechDecode";
58  tmm_modules[TMM_DECODENAPATECH].ThreadInit = NoNapatechSupportExit;
65 }
66 
67 TmEcode NoNapatechSupportExit(ThreadVars *tv, const void *initdata, void **data)
68 {
70  "Error creating thread %s: you do not have support for Napatech adapter "
71  "enabled please recompile with --enable-napatech",
72  tv->name);
73  exit(EXIT_FAILURE);
74 }
75 
76 #else /* Implied we do have NAPATECH support */
77 
78 
79 #include <numa.h>
80 #include <nt.h>
81 
82 extern int max_pending_packets;
83 
84 typedef struct NapatechThreadVars_
85 {
87  NtNetStreamRx_t rx_stream;
88  uint16_t stream_id;
89  int hba;
92 
93 #ifdef NAPATECH_ENABLE_BYPASS
94 static int NapatechBypassCallback(Packet *p);
95 #endif
96 
97 TmEcode NapatechStreamThreadInit(ThreadVars *, const void *, void **);
99 TmEcode NapatechPacketLoop(ThreadVars *tv, void *data, void *slot);
100 
101 TmEcode NapatechDecodeThreadInit(ThreadVars *, const void *, void **);
104 
105 /* These are used as the threads are exiting to get a comprehensive count of
106  * all the packets received and dropped.
107  */
108 SC_ATOMIC_DECLARE(uint64_t, total_packets);
109 SC_ATOMIC_DECLARE(uint64_t, total_drops);
110 SC_ATOMIC_DECLARE(uint16_t, total_tallied);
111 
112 /* Streams are counted as they are instantiated in order to know when all threads
113  * are running*/
114 SC_ATOMIC_DECLARE(uint16_t, stream_count);
115 
116 SC_ATOMIC_DECLARE(uint16_t, numa0_count);
117 SC_ATOMIC_DECLARE(uint16_t, numa1_count);
118 SC_ATOMIC_DECLARE(uint16_t, numa2_count);
119 SC_ATOMIC_DECLARE(uint16_t, numa3_count);
120 
121 SC_ATOMIC_DECLARE(uint64_t, flow_callback_cnt);
122 SC_ATOMIC_DECLARE(uint64_t, flow_callback_handled_pkts);
123 SC_ATOMIC_DECLARE(uint64_t, flow_callback_udp_pkts);
124 SC_ATOMIC_DECLARE(uint64_t, flow_callback_tcp_pkts);
125 SC_ATOMIC_DECLARE(uint64_t, flow_callback_unhandled_pkts);
126 
127 /**
128  * \brief Register the Napatech receiver (reader) module.
129  */
131 {
132  tmm_modules[TMM_RECEIVENAPATECH].name = "NapatechStream";
142 
143  SC_ATOMIC_INIT(total_packets);
144  SC_ATOMIC_INIT(total_drops);
145  SC_ATOMIC_INIT(total_tallied);
146  SC_ATOMIC_INIT(stream_count);
147 
148  SC_ATOMIC_INIT(numa0_count);
149  SC_ATOMIC_INIT(numa1_count);
150  SC_ATOMIC_INIT(numa2_count);
151  SC_ATOMIC_INIT(numa3_count);
152 
153  SC_ATOMIC_INIT(flow_callback_cnt);
154  SC_ATOMIC_INIT(flow_callback_handled_pkts);
155  SC_ATOMIC_INIT(flow_callback_udp_pkts);
156  SC_ATOMIC_INIT(flow_callback_tcp_pkts);
157  SC_ATOMIC_INIT(flow_callback_unhandled_pkts);
158 }
159 
160 /**
161  * \brief Register the Napatech decoder module.
162  */
164 {
165  tmm_modules[TMM_DECODENAPATECH].name = "NapatechDecode";
173 }
174 
175 #ifdef NAPATECH_ENABLE_BYPASS
176 /**
177  * \brief template of IPv4 header
178  */
179 struct ipv4_hdr
180 {
181  uint8_t version_ihl; /**< version and header length */
182  uint8_t type_of_service; /**< type of service */
183  uint16_t total_length; /**< length of packet */
184  uint16_t packet_id; /**< packet ID */
185  uint16_t fragment_offset; /**< fragmentation offset */
186  uint8_t time_to_live; /**< time to live */
187  uint8_t next_proto_id; /**< protocol ID */
188  uint16_t hdr_checksum; /**< header checksum */
189  uint32_t src_addr; /**< source address */
190  uint32_t dst_addr; /**< destination address */
191 } __attribute__ ((__packed__));
192 
193 /**
194  * \brief template of IPv6 header
195  */
196 struct ipv6_hdr
197 {
198  uint32_t vtc_flow; /**< IP version, traffic class & flow label. */
199  uint16_t payload_len; /**< IP packet length - includes sizeof(ip_header). */
200  uint8_t proto; /**< Protocol, next header. */
201  uint8_t hop_limits; /**< Hop limits. */
202  uint8_t src_addr[16]; /**< IP address of source host. */
203  uint8_t dst_addr[16]; /**< IP address of destination host(s). */
204 } __attribute__ ((__packed__));
205 
206 /**
207  * \brief template of UDP header
208  */
209 struct udp_hdr
210 {
211  uint16_t src_port; /**< UDP source port. */
212  uint16_t dst_port; /**< UDP destination port. */
213  uint16_t dgram_len; /**< UDP datagram length */
214  uint16_t dgram_cksum; /**< UDP datagram checksum */
215 } __attribute__ ((__packed__));
216 
217 /**
218  * \brief template of TCP header
219  */
220 struct tcp_hdr
221 {
222  uint16_t src_port; /**< TCP source port. */
223  uint16_t dst_port; /**< TCP destination port. */
224  uint32_t sent_seq; /**< TX data sequence number. */
225  uint32_t recv_ack; /**< RX data acknowledgement sequence number. */
226  uint8_t data_off; /**< Data offset. */
227  uint8_t tcp_flags; /**< TCP flags */
228  uint16_t rx_win; /**< RX flow control window. */
229  uint16_t cksum; /**< TCP checksum. */
230  uint16_t tcp_urp; /**< TCP urgent pointer, if any. */
231 } __attribute__ ((__packed__));
232 
233 
234 /* The hardware will assign a "color" value indicating what filters are matched
235  * by a given packet. These constants indicate what bits are set in the color
236  * field for different protocols
237  *
238  */
239 #define RTE_PTYPE_L2_ETHER 0x10000000
240 #define RTE_PTYPE_L3_IPV4 0x01000000
241 #define RTE_PTYPE_L3_IPV6 0x04000000
242 #define RTE_PTYPE_L4_TCP 0x00100000
243 #define RTE_PTYPE_L4_UDP 0x00200000
244 
245 /* These masks are used to extract layer 3 and layer 4 protocol
246  * values from the color field in the packet descriptor.
247  */
248 #define RTE_PTYPE_L3_MASK 0x0f000000
249 #define RTE_PTYPE_L4_MASK 0x00f00000
250 
251 #define COLOR_IS_SPAN 0x00001000
252 
253 static int is_inline = 0;
254 static int inline_port_map[MAX_PORTS] = { -1 };
255 
256 /**
257  * \brief Binds two ports together for inline operation.
258  *
259  * Get the ID of an adapter on which a given port resides.
260  *
261  * \param port one of the ports in a pairing.
262  * \param peer the other port in a pairing.
263  * \return ID of the adapter.
264  *
265  */
266 int NapatechSetPortmap(int port, int peer)
267 {
268  if ((inline_port_map[port] == -1) && (inline_port_map[peer] == -1)) {
269  inline_port_map[port] = peer;
270  inline_port_map[peer] = port;
271  } else {
273  "Port pairing is already configured.");
274  return 0;
275  }
276  return 1;
277 }
278 
279 /**
280  * \brief Returns the ID of the adapter
281  *
282  * Get the ID of an adapter on which a given port resides.
283  *
284  * \param port for which adapter ID is requested.
285  * \return ID of the adapter.
286  *
287  */
288 int NapatechGetAdapter(uint8_t port)
289 {
290  static int port_adapter_map[MAX_PORTS] = { -1 };
291  int status;
292  NtInfo_t h_info; /* Info handle */
293  NtInfoStream_t h_info_stream; /* Info stream handle */
294 
295  if (unlikely(port_adapter_map[port] == -1)) {
296  if ((status = NT_InfoOpen(&h_info_stream, "ExampleInfo")) != NT_SUCCESS) {
298  return -1;
299  }
300  /* Read the system info */
301  h_info.cmd = NT_INFO_CMD_READ_PORT_V9;
302  h_info.u.port_v9.portNo = (uint8_t) port;
303  if ((status = NT_InfoRead(h_info_stream, &h_info)) != NT_SUCCESS) {
304  /* Get the status code as text */
306  NT_InfoClose(h_info_stream);
307  return -1;
308  }
309  port_adapter_map[port] = h_info.u.port_v9.data.adapterNo;
310  }
311  return port_adapter_map[port];
312 }
313 
314 /**
315  * \brief IPv4 4-tuple convenience structure
316  */
317 struct IPv4Tuple4
318 {
319  uint32_t sa; /*!< Source address */
320  uint32_t da; /*!< Destination address */
321  uint16_t sp; /*!< Source port */
322  uint16_t dp; /*!< Destination port */
323 };
324 
325 /**
326  * \brief IPv6 4-tuple convenience structure
327  */
328 struct IPv6Tuple4
329 {
330  uint8_t sa[16]; /*!< Source address */
331  uint8_t da[16]; /*!< Destination address */
332  uint16_t sp; /*!< Source port */
333  uint16_t dp; /*!< Destination port */
334 };
335 
336 
337 /**
338  * \brief Compares the byte order value of two IPv6 addresses.
339  *
340  *
341  * \param addr_a The first address to compare
342  * \param addr_b The second adress to compare
343  *
344  * \return -1 if addr_a < addr_b
345  * 1 if addr_a > addr_b
346  * 0 if addr_a == addr_b
347  */
348 static int CompareIPv6Addr(uint8_t addr_a[16], uint8_t addr_b[16]) {
349  uint16_t pos;
350  for (pos = 0; pos < 16; ++pos) {
351  if (addr_a[pos] < addr_b[pos]) {
352  return -1;
353  } else if (addr_a[pos] > addr_b[pos]) {
354  return 1;
355  } /* else they are equal - check next position*/
356  }
357 
358  /* if we get here the addresses are equal */
359  return 0;
360 }
361 
362 /**
363  * \brief Initializes the FlowStreams used to program flow data.
364  *
365  * Opens a FlowStream on the adapter associated with the rx port. This
366  * FlowStream is subsequently used to program the adapter with
367  * flows to bypass.
368  *
369  * \return the flow stream handle, NULL if failure.
370  */
371 static NtFlowStream_t InitFlowStream(int adapter, int stream_id)
372 {
373  int status;
374  NtFlowStream_t hFlowStream;
375 
376  NtFlowAttr_t attr;
377  char flow_name[80];
378 
379  NT_FlowOpenAttrInit(&attr);
380  NT_FlowOpenAttrSetAdapterNo(&attr, adapter);
381 
382  snprintf(flow_name, sizeof(flow_name), "Flow_stream_%d", stream_id );
383  SCLogDebug("Opening flow programming stream: %s", flow_name);
384  if ((status = NT_FlowOpen_Attr(&hFlowStream, flow_name, &attr)) != NT_SUCCESS) {
386  "Napatech bypass functionality not supported by the FPGA version on adapter %d - disabling support.",
387  adapter);
388  return NULL;
389  }
390  return hFlowStream;
391 }
392 
393 /**
394  * \brief Callback function to process Bypass events on Napatech Adapter.
395  *
396  * Callback function that sets up the Flow tables on the Napatech card
397  * so that subsequent packets from this flow are bypassed on the hardware.
398  *
399  * \param p packet containing information about the flow to be bypassed
400  * \param is_inline indicates if Suricata is being run in inline mode.
401  *
402  * \return Error code indicating success (1) or failure (0).
403  *
404  */
405 static int ProgramFlow(Packet *p, int is_inline)
406 {
407  NtFlow_t flow_match;
408  memset(&flow_match, 0, sizeof(flow_match));
409 
410  NapatechPacketVars *ntpv = &(p->ntpv);
411 
412  /*
413  * The hardware decoder will "color" the packets according to the protocols
414  * in the packet and the port the packet arrived on. packet_type gets
415  * these bits and we mask out layer3, layer4, and is_span to determine
416  * the protocols and if the packet is coming in from a SPAN port.
417  */
418  uint32_t packet_type = ((ntpv->dyn3->color_hi << 14) & 0xFFFFC000) | ntpv->dyn3->color_lo;
419  uint8_t *packet = (uint8_t *) ntpv->dyn3 + ntpv->dyn3->descrLength;
420 
421  uint32_t layer3 = packet_type & RTE_PTYPE_L3_MASK;
422  uint32_t layer4 = packet_type & RTE_PTYPE_L4_MASK;
423  uint32_t is_span = packet_type & COLOR_IS_SPAN;
424 
425  /*
426  * When we're programming the flows to arrive on a span port,
427  * where upstream and downstream packets arrive on the same port,
428  * the hardware is configured to swap the source and dest
429  * fields if the src addr > dest addr. We need to program the
430  * flow tables to match. We'll compare addresses and set
431  * do_swap accordingly.
432  */
433 
434  uint32_t do_swap = 0;
435 
436  SC_ATOMIC_ADD(flow_callback_cnt, 1);
437 
438  /* Only bypass TCP and UDP */
439  if (PKT_IS_TCP(p)) {
440  SC_ATOMIC_ADD(flow_callback_tcp_pkts, 1);
441  } else if PKT_IS_UDP(p) {
442  SC_ATOMIC_ADD(flow_callback_udp_pkts, 1);
443  } else {
444  SC_ATOMIC_ADD(flow_callback_unhandled_pkts, 1);
445  }
446 
447  struct IPv4Tuple4 v4Tuple;
448  struct IPv6Tuple4 v6Tuple;
449  struct ipv4_hdr *pIPv4_hdr = NULL;
450  struct ipv6_hdr *pIPv6_hdr = NULL;
451 
452  switch (layer3) {
453  case RTE_PTYPE_L3_IPV4:
454  {
455  pIPv4_hdr = (struct ipv4_hdr *) (packet + ntpv->dyn3->offset0);
456  if (!is_span) {
457  v4Tuple.sa = pIPv4_hdr->src_addr;
458  v4Tuple.da = pIPv4_hdr->dst_addr;
459  } else {
460  do_swap = (htonl(pIPv4_hdr->src_addr) > htonl(pIPv4_hdr->dst_addr));
461  if (!do_swap) {
462  /* already in order */
463  v4Tuple.sa = pIPv4_hdr->src_addr;
464  v4Tuple.da = pIPv4_hdr->dst_addr;
465  } else { /* swap */
466  v4Tuple.sa = pIPv4_hdr->dst_addr;
467  v4Tuple.da = pIPv4_hdr->src_addr;
468  }
469  }
470  break;
471  }
472  case RTE_PTYPE_L3_IPV6:
473  {
474  pIPv6_hdr = (struct ipv6_hdr *) (packet + ntpv->dyn3->offset0);
475  do_swap = (CompareIPv6Addr(pIPv6_hdr->src_addr, pIPv6_hdr->dst_addr) > 0);
476 
477  if (!is_span) {
478  memcpy(&(v6Tuple.sa), pIPv6_hdr->src_addr, 16);
479  memcpy(&(v6Tuple.da), pIPv6_hdr->dst_addr, 16);
480  } else {
481  /* sort src/dest address before programming */
482  if (!do_swap) {
483  /* already in order */
484  memcpy(&(v6Tuple.sa), pIPv6_hdr->src_addr, 16);
485  memcpy(&(v6Tuple.da), pIPv6_hdr->dst_addr, 16);
486  } else { /* swap the addresses */
487  memcpy(&(v6Tuple.sa), pIPv6_hdr->dst_addr, 16);
488  memcpy(&(v6Tuple.da), pIPv6_hdr->src_addr, 16);
489  }
490  }
491  break;
492  }
493  default:
494  {
495  return 0;
496  }
497  }
498 
499  switch (layer4) {
500  case RTE_PTYPE_L4_TCP:
501  {
502  struct tcp_hdr *tcp_hdr = (struct tcp_hdr *) (packet + ntpv->dyn3->offset1);
503  if (layer3 == RTE_PTYPE_L3_IPV4) {
504  if (!is_span) {
505  v4Tuple.dp = tcp_hdr->dst_port;
506  v4Tuple.sp = tcp_hdr->src_port;
507  flow_match.keyId = NAPATECH_KEYTYPE_IPV4;
508  } else {
509  if (!do_swap) {
510  v4Tuple.sp = tcp_hdr->src_port;
511  v4Tuple.dp = tcp_hdr->dst_port;
512  } else {
513  v4Tuple.sp = tcp_hdr->dst_port;
514  v4Tuple.dp = tcp_hdr->src_port;
515  }
516  flow_match.keyId = NAPATECH_KEYTYPE_IPV4_SPAN;
517  }
518  memcpy(&(flow_match.keyData), &v4Tuple, sizeof(v4Tuple));
519  } else {
520  if (!is_span) {
521  v6Tuple.dp = tcp_hdr->dst_port;
522  v6Tuple.sp = tcp_hdr->src_port;
523  flow_match.keyId = NAPATECH_KEYTYPE_IPV6;
524  } else {
525  if (!do_swap) {
526  v6Tuple.sp = tcp_hdr->src_port;
527  v6Tuple.dp = tcp_hdr->dst_port;
528  } else {
529  v6Tuple.dp = tcp_hdr->src_port;
530  v6Tuple.sp = tcp_hdr->dst_port;
531  }
532  flow_match.keyId = NAPATECH_KEYTYPE_IPV6_SPAN;
533  }
534  memcpy(&(flow_match.keyData), &v6Tuple, sizeof(v6Tuple));
535  }
536  flow_match.ipProtocolField = 6;
537  break;
538  }
539  case RTE_PTYPE_L4_UDP:
540  {
541  struct udp_hdr *udp_hdr = (struct udp_hdr *) (packet + ntpv->dyn3->offset1);
542  if (layer3 == RTE_PTYPE_L3_IPV4) {
543  if (!is_span) {
544  v4Tuple.dp = udp_hdr->dst_port;
545  v4Tuple.sp = udp_hdr->src_port;
546  flow_match.keyId = NAPATECH_KEYTYPE_IPV4;
547  } else {
548  if (!do_swap) {
549  v4Tuple.sp = udp_hdr->src_port;
550  v4Tuple.dp = udp_hdr->dst_port;
551  } else {
552  v4Tuple.dp = udp_hdr->src_port;
553  v4Tuple.sp = udp_hdr->dst_port;
554  }
555  flow_match.keyId = NAPATECH_KEYTYPE_IPV4_SPAN;
556  }
557  memcpy(&(flow_match.keyData), &v4Tuple, sizeof(v4Tuple));
558  } else { /* layer3 is IPV6 */
559  if (!is_span) {
560  v6Tuple.dp = udp_hdr->dst_port;
561  v6Tuple.sp = udp_hdr->src_port;
562  flow_match.keyId = NAPATECH_KEYTYPE_IPV6;
563  } else {
564  if (!do_swap) {
565  v6Tuple.sp = udp_hdr->src_port;
566  v6Tuple.dp = udp_hdr->dst_port;
567  } else {
568  v6Tuple.dp = udp_hdr->src_port;
569  v6Tuple.sp = udp_hdr->dst_port;
570  }
571  flow_match.keyId = NAPATECH_KEYTYPE_IPV6_SPAN;
572  }
573  memcpy(&(flow_match.keyData), &v6Tuple, sizeof(v6Tuple));
574  }
575  flow_match.ipProtocolField = 17;
576  break;
577  }
578  default:
579  {
580  return 0;
581  }
582  }
583 
584  flow_match.op = 1; /* program flow */
585  flow_match.gfi = 1; /* Generate FlowInfo records */
586  flow_match.tau = 1; /* tcp automatic unlearn */
587 
589  flow_match.keySetId = NAPATECH_FLOWTYPE_DROP;
590  } else {
591  if (is_inline) {
592  flow_match.keySetId = NAPATECH_FLOWTYPE_PASS;
593  } else {
594  flow_match.keySetId = NAPATECH_FLOWTYPE_DROP;
595  }
596  }
597 
598  if (NT_FlowWrite(ntpv->flow_stream, &flow_match, -1) != NT_SUCCESS) {
600  SCLogError(SC_ERR_NAPATECH_OPEN_FAILED,"NT_FlowWrite failed!.");
601  exit(EXIT_FAILURE);
602  }
603  }
604 
605  return 1;
606 }
607 
608 /**
609  * \brief Callback from Suricata when a flow that should be bypassed
610  * is identified.
611  */
612 
613 static int NapatechBypassCallback(Packet *p)
614 {
615  NapatechPacketVars *ntpv = &(p->ntpv);
616 
617  /*
618  * Since, at this point, we don't know what action to take,
619  * simply mark this packet as one that should be
620  * bypassed when the packet is returned by suricata with a
621  * pass/drop verdict.
622  */
623  ntpv->bypass = 1;
624 
625  return 1;
626 }
627 
628 #endif
629 
630 /**
631  * \brief Initialize the Napatech receiver thread, generate a single
632  * NapatechThreadVar structure for each thread, this will
633  * contain a NtNetStreamRx_t stream handle which is used when the
634  * thread executes to acquire the packets.
635  *
636  * \param tv Thread variable to ThreadVars
637  * \param initdata Initial data to the adapter passed from the user,
638  * this is processed by the user.
639  *
640  * For now, we assume that we have only a single name for the NAPATECH
641  * adapter.
642  *
643  * \param data data pointer gets populated with
644  *
645  */
646 TmEcode NapatechStreamThreadInit(ThreadVars *tv, const void *initdata, void **data)
647 {
648  SCEnter();
649  struct NapatechStreamDevConf *conf = (struct NapatechStreamDevConf *) initdata;
650  uint16_t stream_id = conf->stream_id;
651  *data = NULL;
652 
653  NapatechThreadVars *ntv = SCCalloc(1, sizeof (NapatechThreadVars));
654  if (unlikely(ntv == NULL)) {
655  SCLogError(SC_ERR_MEM_ALLOC, "Failed to allocate memory for NAPATECH thread vars.");
656  exit(EXIT_FAILURE);
657  }
658 
659  memset(ntv, 0, sizeof (NapatechThreadVars));
660  ntv->stream_id = stream_id;
661  ntv->tv = tv;
662  ntv->hba = conf->hba;
663  SCLogDebug("Started processing packets from NAPATECH Stream: %lu", ntv->stream_id);
664 
665  *data = (void *) ntv;
667 }
668 
669 /**
670  * \brief Callback to indicate that the packet buffer can be returned to the hardware.
671  *
672  * Called when Suricata is done processing the packet. Before the packet is released
673  * this also checks the action to see if the packet should be dropped and programs the
674  * flow hardware if the flow is to be bypassed and the Napatech packet buffer is released.
675  *
676  *
677  * \param p Packet to return to the system.
678  *
679  */
680 static void NapatechReleasePacket(struct Packet_ *p)
681 {
682  /*
683  * If the packet is to be dropped we need to set the wirelength
684  * before releasing the Napatech buffer back to NTService.
685  */
686  if (is_inline && PACKET_TEST_ACTION(p, ACTION_DROP)) {
687  p->ntpv.dyn3->wireLength = 0;
688  }
689 
690 #ifdef NAPATECH_ENABLE_BYPASS
691  /*
692  * If this flow is to be programmed for hardware bypass we do it now. This is done
693  * here because the action is not available in the packet structure at the time of the
694  * bypass callback and it needs to be done before we release the packet structure.
695  */
696  if (p->ntpv.bypass == 1) {
697  ProgramFlow(p, is_inline);
698  }
699 #endif
700 
701  NT_NetRxRelease(p->ntpv.rx_stream, p->ntpv.nt_packet_buf);
703 }
704 
705 /**
706  * \brief Returns the NUMA node associated with the currently running thread.
707  *
708  * \return ID of the NUMA node.
709  *
710  */
711 static int GetNumaNode(void)
712 {
713  int cpu = 0;
714  int node = 0;
715 
716 #if defined(__linux__)
717  cpu = sched_getcpu();
718  node = numa_node_of_cpu(cpu);
719 #else
721  "Auto configuration of NUMA node is not supported on this OS.");
722 #endif
723 
724  return node;
725 }
726 
727 /**
728  * \brief Outputs hints on the optimal host-buffer configuration to aid tuning.
729  *
730  * \param log_level of the currently running instance.
731  *
732  */
733 static void RecommendNUMAConfig(SCLogLevel log_level)
734 {
735  char string0[16];
736  char string1[16];
737  char string2[16];
738  char string3[16];
739  int set_cpu_affinity = 0;
740 
741  if (ConfGetBool("threading.set-cpu-affinity", &set_cpu_affinity) != 1) {
742  set_cpu_affinity = 0;
743  }
744 
745  if (set_cpu_affinity) {
746  SCLog(log_level, __FILE__, __FUNCTION__, __LINE__,
747  "Minimum host buffers that should be defined in ntservice.ini:");
748 
749  SCLog(log_level, __FILE__, __FUNCTION__, __LINE__, " NUMA Node 0: %d",
750  (SC_ATOMIC_GET(numa0_count)));
751 
752  if (numa_max_node() >= 1)
753  SCLog(log_level, __FILE__, __FUNCTION__, __LINE__,
754  " NUMA Node 1: %d ", (SC_ATOMIC_GET(numa1_count)));
755 
756  if (numa_max_node() >= 2)
757  SCLog(log_level, __FILE__, __FUNCTION__, __LINE__,
758  " NUMA Node 2: %d ", (SC_ATOMIC_GET(numa2_count)));
759 
760  if (numa_max_node() >= 3)
761  SCLog(log_level, __FILE__, __FUNCTION__, __LINE__,
762  " NUMA Node 3: %d ", (SC_ATOMIC_GET(numa3_count)));
763 
764  snprintf(string0, 16, "[%d, 16, 0]", SC_ATOMIC_GET(numa0_count));
765  snprintf(string1, 16, (numa_max_node() >= 1 ? ",[%d, 16, 1]" : ""),
766  SC_ATOMIC_GET(numa1_count));
767  snprintf(string2, 16, (numa_max_node() >= 2 ? ",[%d, 16, 2]" : ""),
768  SC_ATOMIC_GET(numa2_count));
769  snprintf(string3, 16, (numa_max_node() >= 3 ? ",[%d, 16, 3]" : ""),
770  SC_ATOMIC_GET(numa3_count));
771 
772  SCLog(log_level, __FILE__, __FUNCTION__, __LINE__,
773  "E.g.: HostBuffersRx=%s%s%s%s", string0, string1, string2,
774  string3);
775  } else if (log_level == SC_LOG_ERROR) {
777  "Or, try running /opt/napatech3/bin/ntpl -e \"delete=all\" to clean-up stream NUMA config.");
778  }
779 }
780 
781 /**
782  * \brief Main Napatechpacket processing loop
783  *
784  * \param tv Thread variable to ThreadVars
785  * \param data Pointer to NapatechThreadVars with data specific to Napatech
786  * \param slot TMSlot where this instance is running.
787  *
788  */
789 TmEcode NapatechPacketLoop(ThreadVars *tv, void *data, void *slot)
790 {
791  int32_t status;
792  char error_buffer[100];
793  uint64_t pkt_ts;
794  NtNetBuf_t packet_buffer;
795  NapatechThreadVars *ntv = (NapatechThreadVars *) data;
796  uint64_t hba_pkt_drops = 0;
797  uint64_t hba_byte_drops = 0;
798  uint16_t hba_pkt = 0;
799  int numa_node = -1;
800  int set_cpu_affinity = 0;
801  int closer = 0;
802  int is_autoconfig = 0;
803 
804  /* This just keeps the startup output more orderly. */
805  usleep(200000 * ntv->stream_id);
806 
807 #ifdef NAPATECH_ENABLE_BYPASS
808  NtFlowStream_t flow_stream[MAX_ADAPTERS] = { 0 };
809 
810  /* Get a FlowStream handle for each adapter so we can efficiently find the
811  * correct handle corresponding to the port on which a packet is received.
812  */
813  int adapter = 0;
814  for (adapter = 0; adapter < NapatechGetNumAdapters(); ++adapter) {
815  flow_stream[adapter] = InitFlowStream(adapter, ntv->stream_id);
816  }
817 #endif
818 
819  if (ConfGetBool("napatech.auto-config", &is_autoconfig) == 0) {
820  is_autoconfig = 0;
821  }
822 
823  if (is_autoconfig) {
824  numa_node = GetNumaNode();
825  switch (numa_node) {
826  case 0:
827  SC_ATOMIC_ADD(numa0_count, 1);
828  break;
829  case 1:
830  SC_ATOMIC_ADD(numa1_count, 1);
831  break;
832  case 2:
833  SC_ATOMIC_ADD(numa2_count, 1);
834  break;
835  case 3:
836  SC_ATOMIC_ADD(numa3_count, 1);
837  break;
838  default:
839  break;
840  }
841 
842  if (ConfGetBool("threading.set-cpu-affinity", &set_cpu_affinity) != 1) {
843  set_cpu_affinity = 0;
844  }
845 
846  if (set_cpu_affinity) {
847  NapatechSetupNuma(ntv->stream_id, numa_node);
848  }
849 
850  numa_node = GetNumaNode();
851  SC_ATOMIC_ADD(stream_count, 1);
852  if (SC_ATOMIC_GET(stream_count) == NapatechGetNumConfiguredStreams()) {
853 
854  if (ConfGetBool("napatech.inline", &is_inline) == 0) {
855  is_inline = 0;
856  }
857 
858 #ifdef NAPATECH_ENABLE_BYPASS
859  /* Initialize the port map before we setup traffic filters */
860  for (int i = 0; i < MAX_PORTS; ++i) {
861  inline_port_map[i] = -1;
862  }
863 #endif
864  /* The last thread to run sets up and deletes the streams */
867 
868  closer = 1;
869 
870  if (status == 0x20002061) {
872  "Check host buffer configuration in ntservice.ini.");
873  RecommendNUMAConfig(SC_LOG_ERROR);
874  exit(EXIT_FAILURE);
875 
876  } else if (status == 0x20000008) {
878  "Check napatech.ports in the suricata config file.");
879  exit(EXIT_FAILURE);
880  }
881  RecommendNUMAConfig(SC_LOG_PERF);
882  SCLogNotice("Napatech packet input engine started.");
883  }
884  } // is_autoconfig
885 
886  SCLogInfo(
887  "Napatech Packet Loop Started - cpu: %3d, cpu_numa: %3d stream: %3u ",
888  sched_getcpu(), numa_node, ntv->stream_id);
889 
890  if (ntv->hba > 0) {
891  char *s_hbad_pkt = SCCalloc(1, 32);
892  if (unlikely(s_hbad_pkt == NULL)) {
894  "Failed to allocate memory for NAPATECH stream counter.");
895  exit(EXIT_FAILURE);
896  }
897  snprintf(s_hbad_pkt, 32, "nt%d.hba_drop", ntv->stream_id);
898  hba_pkt = StatsRegisterCounter(s_hbad_pkt, tv);
900  StatsSetUI64(tv, hba_pkt, 0);
901  }
902  SCLogDebug("Opening NAPATECH Stream: %lu for processing", ntv->stream_id);
903 
904  if ((status = NT_NetRxOpen(&(ntv->rx_stream), "SuricataStream",
905  NT_NET_INTERFACE_PACKET, ntv->stream_id, ntv->hba)) != NT_SUCCESS) {
906 
908  SCFree(ntv);
910  }
911  TmSlot *s = (TmSlot *) slot;
912  ntv->slot = s->slot_next;
913 
914  while (!(suricata_ctl_flags & SURICATA_STOP)) {
915  /* make sure we have at least one packet in the packet pool, to prevent
916  * us from alloc'ing packets at line rate */
917  PacketPoolWait();
918 
919  /* Napatech returns packets 1 at a time */
920  status = NT_NetRxGet(ntv->rx_stream, &packet_buffer, 1000);
921  if (unlikely(
922  status == NT_STATUS_TIMEOUT || status == NT_STATUS_TRYAGAIN)) {
923  if (status == NT_STATUS_TIMEOUT) {
924  TmThreadsCaptureHandleTimeout(tv, NULL);
925  }
926  continue;
927  } else if (unlikely(status != NT_SUCCESS)) {
929  SCLogInfo("Failed to read from Napatech Stream %d: %s",
930  ntv->stream_id, error_buffer);
931  break;
932  }
933 
935 #ifdef NAPATECH_ENABLE_BYPASS
936  p->ntpv.bypass = 0;
937 #endif
938 
939  p->ntpv.rx_stream = ntv->rx_stream;
940 
941  if (unlikely(p == NULL)) {
942  NT_NetRxRelease(ntv->rx_stream, packet_buffer);
944  }
945 
946  pkt_ts = NT_NET_GET_PKT_TIMESTAMP(packet_buffer);
947 
948  /*
949  * Handle the different timestamp forms that the napatech cards could use
950  * - NT_TIMESTAMP_TYPE_NATIVE is not supported due to having an base
951  * of 0 as opposed to NATIVE_UNIX which has a base of 1/1/1970
952  */
953  switch (NT_NET_GET_PKT_TIMESTAMP_TYPE(packet_buffer)) {
954  case NT_TIMESTAMP_TYPE_NATIVE_UNIX:
955  p->ts.tv_sec = pkt_ts / 100000000;
956  p->ts.tv_usec = ((pkt_ts % 100000000) / 100) + ((pkt_ts % 100) > 50 ? 1 : 0);
957  break;
958  case NT_TIMESTAMP_TYPE_PCAP:
959  p->ts.tv_sec = pkt_ts >> 32;
960  p->ts.tv_usec = pkt_ts & 0xFFFFFFFF;
961  break;
962  case NT_TIMESTAMP_TYPE_PCAP_NANOTIME:
963  p->ts.tv_sec = pkt_ts >> 32;
964  p->ts.tv_usec = ((pkt_ts & 0xFFFFFFFF) / 1000) + ((pkt_ts % 1000) > 500 ? 1 : 0);
965  break;
966  case NT_TIMESTAMP_TYPE_NATIVE_NDIS:
967  /* number of seconds between 1/1/1601 and 1/1/1970 */
968  p->ts.tv_sec = (pkt_ts / 100000000) - 11644473600;
969  p->ts.tv_usec = ((pkt_ts % 100000000) / 100) + ((pkt_ts % 100) > 50 ? 1 : 0);
970  break;
971  default:
973  "Packet from Napatech Stream: %u does not have a supported timestamp format",
974  ntv->stream_id);
975  NT_NetRxRelease(ntv->rx_stream, packet_buffer);
977  }
978 
979  if (unlikely(ntv->hba > 0)) {
980  NtNetRx_t stat_cmd;
981  stat_cmd.cmd = NT_NETRX_READ_CMD_STREAM_DROP;
982  /* Update drop counter */
983  if (unlikely((status = NT_NetRxRead(ntv->rx_stream, &stat_cmd)) != NT_SUCCESS)) {
985  SCLogInfo("Couldn't retrieve drop statistics from the RX stream: %u",
986  ntv->stream_id);
987  } else {
988  hba_pkt_drops = stat_cmd.u.streamDrop.pktsDropped;
989 
990  StatsSetUI64(tv, hba_pkt, hba_pkt_drops);
991  }
993  }
994 
995 #ifdef NAPATECH_ENABLE_BYPASS
996  p->ntpv.dyn3 = _NT_NET_GET_PKT_DESCR_PTR_DYN3(packet_buffer);
997  p->BypassPacketsFlow = (NapatechIsBypassSupported() ? NapatechBypassCallback : NULL);
998  NT_NET_SET_PKT_TXPORT(packet_buffer, inline_port_map[p->ntpv.dyn3->rxPort]);
999  p->ntpv.flow_stream = flow_stream[NapatechGetAdapter(p->ntpv.dyn3->rxPort)];
1000 
1001 #endif
1002 
1003  p->ReleasePacket = NapatechReleasePacket;
1004  p->ntpv.nt_packet_buf = packet_buffer;
1005  p->ntpv.stream_id = ntv->stream_id;
1007 
1008  if (unlikely(PacketSetData(p, (uint8_t *)NT_NET_GET_PKT_L2_PTR(packet_buffer), NT_NET_GET_PKT_WIRE_LENGTH(packet_buffer)))) {
1009  TmqhOutputPacketpool(ntv->tv, p);
1010  NT_NetRxRelease(ntv->rx_stream, packet_buffer);
1012  }
1013 
1014  if (unlikely(TmThreadsSlotProcessPkt(ntv->tv, ntv->slot, p) != TM_ECODE_OK)) {
1015  NT_NetRxRelease(ntv->rx_stream, packet_buffer);
1017  }
1018 
1019  /*
1020  * At this point the packet and the Napatech Packet Buffer have been returned
1021  * to the system in the NapatechReleasePacket() Callback.
1022  */
1023 
1025  } // while
1026 
1027  if (closer) {
1029  }
1030 
1031  if (unlikely(ntv->hba > 0)) {
1032  SCLogInfo("Host Buffer Allowance Drops - pkts: %ld, bytes: %ld", hba_pkt_drops, hba_byte_drops);
1033  }
1034 
1036 }
1037 
1038 /**
1039  * \brief Print some stats to the log at program exit.
1040  *
1041  * \param tv Pointer to ThreadVars.
1042  * \param data Pointer to data, ErfFileThreadVars.
1043  */
1045 {
1046  NapatechThreadVars *ntv = (NapatechThreadVars *) data;
1048 
1049  double percent = 0;
1050  if (stat.current_drop_packets > 0)
1051  percent = (((double) stat.current_drop_packets)
1052  / (stat.current_packets + stat.current_drop_packets)) * 100;
1053 
1054  SCLogInfo("nt%lu - pkts: %lu; drop: %lu (%5.2f%%); bytes: %lu",
1055  (uint64_t) ntv->stream_id, stat.current_packets,
1056  stat.current_drop_packets, percent, stat.current_bytes);
1057 
1058  SC_ATOMIC_ADD(total_packets, stat.current_packets);
1059  SC_ATOMIC_ADD(total_drops, stat.current_drop_packets);
1060  SC_ATOMIC_ADD(total_tallied, 1);
1061 
1062  if (SC_ATOMIC_GET(total_tallied) == NapatechGetNumConfiguredStreams()) {
1063  if (SC_ATOMIC_GET(total_drops) > 0)
1064  percent = (((double) SC_ATOMIC_GET(total_drops)) / (SC_ATOMIC_GET(total_packets)
1065  + SC_ATOMIC_GET(total_drops))) * 100;
1066 
1067  SCLogInfo(" ");
1068  SCLogInfo("--- Total Packets: %ld Total Dropped: %ld (%5.2f%%)",
1069  SC_ATOMIC_GET(total_packets), SC_ATOMIC_GET(total_drops), percent);
1070 
1071 #ifdef NAPATECH_ENABLE_BYPASS
1072  SCLogInfo("--- BypassCB - Total: %ld, UDP: %ld, TCP: %ld, Unhandled: %ld",
1073  SC_ATOMIC_GET(flow_callback_cnt),
1074  SC_ATOMIC_GET(flow_callback_udp_pkts),
1075  SC_ATOMIC_GET(flow_callback_tcp_pkts),
1076  SC_ATOMIC_GET(flow_callback_unhandled_pkts));
1077 #endif
1078  }
1079 }
1080 
1081 /**
1082  * \brief Deinitializes the NAPATECH card.
1083  * \param tv pointer to ThreadVars
1084  * \param data pointer that gets cast into PcapThreadVars for ptv
1085  */
1087 {
1088  SCEnter();
1089  NapatechThreadVars *ntv = (NapatechThreadVars *) data;
1090 
1091  SCLogDebug("Closing Napatech Stream: %d", ntv->stream_id);
1092  NT_NetRxClose(ntv->rx_stream);
1093 
1095 }
1096 
1097 /**
1098  * \brief This function passes off to link type decoders.
1099  *
1100  * NapatechDecode decodes packets from Napatech and passes
1101  * them off to the proper link type decoder.
1102  *
1103  * \param t pointer to ThreadVars
1104  * \param p pointer to the current packet
1105  * \param data pointer that gets cast into PcapThreadVars for ptv
1106  */
1108 {
1109  SCEnter();
1110 
1112 
1114 
1115  // update counters
1117 
1118  switch (p->datalink) {
1119  case LINKTYPE_ETHERNET:
1121  break;
1122  default:
1124  "Datalink type %" PRId32 " not yet supported in module NapatechDecode",
1125  p->datalink);
1126  break;
1127  }
1128 
1131 }
1132 
1133 /**
1134  * \brief Initialization of Napatech Thread.
1135  *
1136  * \param t pointer to ThreadVars
1137  * \param initdata - unused.
1138  * \param data pointer that gets cast into DecoderThreadVars
1139  */
1140 TmEcode NapatechDecodeThreadInit(ThreadVars *tv, const void *initdata, void **data)
1141 {
1142  SCEnter();
1143  DecodeThreadVars *dtv = NULL;
1145  if (dtv == NULL) {
1147  }
1148 
1150  *data = (void *) dtv;
1152 }
1153 
1154 /**
1155  * \brief Deinitialization of Napatech Thread.
1156  *
1157  * \param tv pointer to ThreadVars
1158  * \param data pointer that gets cast into DecoderThreadVars
1159  */
1161 {
1162  if (data != NULL) {
1163  DecodeThreadVarsFree(tv, data);
1164  }
1166 }
1167 
1168 #endif /* HAVE_NAPATECH */
TmModule_::cap_flags
uint8_t cap_flags
Definition: tm-modules.h:67
TMM_RECEIVENAPATECH
@ TMM_RECEIVENAPATECH
Definition: tm-threads-common.h:57
PKT_IS_UDP
#define PKT_IS_UDP(p)
Definition: decode.h:257
NapatechPacketVars_::stream_id
uint64_t stream_id
Definition: util-napatech.h:31
tm-threads.h
NapatechStreamThreadInit
TmEcode NapatechStreamThreadInit(ThreadVars *, const void *, void **)
Initialize the Napatech receiver thread, generate a single NapatechThreadVar structure for each threa...
Definition: source-napatech.c:646
NapatechGetCurrentStats
NapatechCurrentStats NapatechGetCurrentStats(uint16_t id)
Definition: util-napatech.c:193
max_pending_packets
int max_pending_packets
Definition: suricata.c:212
NapatechPacketVars_
Definition: util-napatech.h:30
ThreadVars_::name
char name[16]
Definition: threadvars.h:65
PacketFreeOrRelease
void PacketFreeOrRelease(Packet *p)
Return a packet to where it was allocated.
Definition: decode.c:165
SC_ATOMIC_INIT
#define SC_ATOMIC_INIT(name)
wrapper for initializing an atomic variable.
Definition: util-atomic.h:286
PKT_IS_PSEUDOPKT
#define PKT_IS_PSEUDOPKT(p)
return 1 if the packet is a pseudo packet
Definition: decode.h:1125
ConfGetBool
int ConfGetBool(const char *name, int *val)
Retrieve a configuration value as an boolen.
Definition: conf.c:517
NapatechSetPortmap
int NapatechSetPortmap(int port, int peer)
unlikely
#define unlikely(expr)
Definition: util-optimize.h:35
NapatechThreadVars_::rx_stream
NtNetStreamRx_t rx_stream
Definition: source-napatech.c:87
NapatechStreamDevConf
Definition: source-napatech.h:35
SC_ERR_NAPATECH_NOSUPPORT
@ SC_ERR_NAPATECH_NOSUPPORT
Definition: util-error.h:248
SCLogDebug
#define SCLogDebug(...)
Definition: util-debug.h:298
TMM_DECODENAPATECH
@ TMM_DECODENAPATECH
Definition: tm-threads-common.h:58
NapatechDecode
TmEcode NapatechDecode(ThreadVars *, Packet *, void *)
This function passes off to link type decoders.
Definition: source-napatech.c:1107
MAX_ADAPTERS
#define MAX_ADAPTERS
Definition: util-napatech.h:60
SC_ATOMIC_ADD
#define SC_ATOMIC_ADD(name, val)
add a value to our atomic variable
Definition: util-atomic.h:304
StatsSetUI64
void StatsSetUI64(ThreadVars *tv, uint16_t id, uint64_t x)
Sets a value of type double to the local counter.
Definition: counters.c:191
NapatechStreamDevConf::stream_id
uint16_t stream_id
Definition: source-napatech.h:36
NapatechDeleteFilters
uint32_t NapatechDeleteFilters(void)
Definition: util-napatech.c:1354
SC_ERR_NAPATECH_TIMESTAMP_TYPE_NOT_SUPPORTED
@ SC_ERR_NAPATECH_TIMESTAMP_TYPE_NOT_SUPPORTED
Definition: util-error.h:250
SURICATA_STOP
#define SURICATA_STOP
Definition: suricata.h:90
StatsSetupPrivate
int StatsSetupPrivate(ThreadVars *tv)
Definition: counters.c:1200
tm-modules.h
util-privs.h
NapatechThreadVars_::hba
int hba
Definition: source-napatech.c:89
StatsSyncCountersIfSignalled
#define StatsSyncCountersIfSignalled(tv)
Definition: counters.h:137
PacketDecodeFinalize
void PacketDecodeFinalize(ThreadVars *tv, DecodeThreadVars *dtv, Packet *p)
Finalize decoding of a packet.
Definition: decode.c:118
proto
uint8_t proto
Definition: decode-template.h:0
NapatechStreamDevConf::hba
intmax_t hba
Definition: source-napatech.h:37
SC_WARN_COMPATIBILITY
@ SC_WARN_COMPATIBILITY
Definition: util-error.h:193
TmqhOutputPacketpool
void TmqhOutputPacketpool(ThreadVars *t, Packet *p)
Definition: tmqh-packetpool.c:375
Packet_::BypassPacketsFlow
int(* BypassPacketsFlow)(struct Packet_ *)
Definition: decode.h:490
TM_ECODE_FAILED
@ TM_ECODE_FAILED
Definition: tm-threads-common.h:79
tmqh-packetpool.h
TmModule_::PktAcqLoop
TmEcode(* PktAcqLoop)(ThreadVars *, void *, void *)
Definition: tm-modules.h:54
SCLog
void SCLog(int x, const char *file, const char *func, const int line, const char *fmt,...)
Definition: util-debug.c:618
TM_ECODE_OK
@ TM_ECODE_OK
Definition: tm-threads-common.h:78
SC_ERR_NAPATECH_OPEN_FAILED
@ SC_ERR_NAPATECH_OPEN_FAILED
Definition: util-error.h:246
NapatechThreadVars_
Definition: source-napatech.c:85
TmModule_::ThreadDeinit
TmEcode(* ThreadDeinit)(ThreadVars *, void *)
Definition: tm-modules.h:49
Packet_::datalink
int datalink
Definition: decode.h:577
NapatechStreamThreadDeinit
TmEcode NapatechStreamThreadDeinit(ThreadVars *tv, void *data)
Deinitializes the NAPATECH card.
Definition: source-napatech.c:1086
__attribute__
enum @31 __attribute__
DNP3 application header.
Definition: detect-engine-analyzer.c:583
PKT_IS_TCP
#define PKT_IS_TCP(p)
Definition: decode.h:256
DecodeRegisterPerfCounters
void DecodeRegisterPerfCounters(DecodeThreadVars *dtv, ThreadVars *tv)
Definition: decode.c:478
SC_LOG_ERROR
@ SC_LOG_ERROR
Definition: util-debug.h:56
NapatechCurrentStats_::current_packets
uint64_t current_packets
Definition: util-napatech.h:51
TmModule_::PktAcqBreakLoop
TmEcode(* PktAcqBreakLoop)(ThreadVars *, void *)
Definition: tm-modules.h:57
source-napatech.h
SCEnter
#define SCEnter(...)
Definition: util-debug.h:300
GET_PKT_DATA
#define GET_PKT_DATA(p)
Definition: decode.h:228
ThreadVars_
Per thread variable structure.
Definition: threadvars.h:58
TmModule_::Func
TmEcode(* Func)(ThreadVars *, Packet *, void *)
Definition: tm-modules.h:52
SC_CAP_NET_ADMIN
#define SC_CAP_NET_ADMIN
Definition: util-privs.h:31
NapatechThreadVars_::slot
TmSlot * slot
Definition: source-napatech.c:90
BUG_ON
#define BUG_ON(x)
Definition: suricata-common.h:282
NapatechGetAdapter
int NapatechGetAdapter(uint8_t port)
PacketPoolWait
void PacketPoolWait(void)
Definition: tmqh-packetpool.c:84
NAPATECH_ERROR
#define NAPATECH_ERROR(err_type, status)
Definition: util-napatech.h:65
util-napatech.h
Packet_
Definition: decode.h:411
TM_FLAG_DECODE_TM
#define TM_FLAG_DECODE_TM
Definition: tm-modules.h:32
TmModuleNapatechDecodeRegister
void TmModuleNapatechDecodeRegister(void)
Register the Napatech decoder module.
Definition: source-napatech.c:163
tmm_modules
TmModule tmm_modules[TMM_SIZE]
Definition: tm-modules.c:33
GET_PKT_LEN
#define GET_PKT_LEN(p)
Definition: decode.h:227
SC_ERR_NAPATECH_PARSE_CONFIG
@ SC_ERR_NAPATECH_PARSE_CONFIG
Definition: util-error.h:255
NapatechGetNumConfiguredStreams
uint16_t NapatechGetNumConfiguredStreams(void)
Definition: runmode-napatech.c:55
TmModule_::RegisterTests
void(* RegisterTests)(void)
Definition: tm-modules.h:65
TmSlot_
Definition: tm-threads.h:52
NapatechCurrentStats_::current_bytes
uint64_t current_bytes
Definition: util-napatech.h:52
SC_ATOMIC_DECLARE
SC_ATOMIC_DECLARE(uint64_t, total_packets)
TmEcode
TmEcode
Definition: tm-threads-common.h:77
NapatechDecodeThreadInit
TmEcode NapatechDecodeThreadInit(ThreadVars *, const void *, void **)
Initialization of Napatech Thread.
Definition: source-napatech.c:1140
TmModule_::name
const char * name
Definition: tm-modules.h:44
SCLogInfo
#define SCLogInfo(...)
Macro used to log INFORMATIONAL messages.
Definition: util-debug.h:217
SCLogLevel
SCLogLevel
The various log levels NOTE: when adding new level, don't forget to update SCLogMapLogLevelToSyslogLe...
Definition: util-debug.h:50
TM_FLAG_RECEIVE_TM
#define TM_FLAG_RECEIVE_TM
Definition: tm-modules.h:31
dtv
DecodeThreadVars * dtv
Definition: fuzz_decodepcapfile.c:30
Packet_::ntpv
NapatechPacketVars ntpv
Definition: decode.h:608
tm-queuehandlers.h
Packet_::ReleasePacket
void(* ReleasePacket)(struct Packet_ *)
Definition: decode.h:487
NapatechThreadVars
struct NapatechThreadVars_ NapatechThreadVars
DecodeThreadVarsFree
void DecodeThreadVarsFree(ThreadVars *tv, DecodeThreadVars *dtv)
Definition: decode.c:638
Packet_::ts
struct timeval ts
Definition: decode.h:454
suricata-common.h
ACTION_DROP
#define ACTION_DROP
Definition: action-globals.h:30
SCLogError
#define SCLogError(err_code,...)
Macro used to log ERROR messages.
Definition: util-debug.h:257
SC_LOG_PERF
@ SC_LOG_PERF
Definition: util-debug.h:60
TmModule_::ThreadInit
TmEcode(* ThreadInit)(ThreadVars *, const void *, void **)
Definition: tm-modules.h:47
NapatechGetNumFirstStream
uint16_t NapatechGetNumFirstStream(void)
Definition: runmode-napatech.c:60
tv
ThreadVars * tv
Definition: fuzz_decodepcapfile.c:29
util-optimize.h
TmModule_::ThreadExitPrintStats
void(* ThreadExitPrintStats)(ThreadVars *, void *)
Definition: tm-modules.h:48
threadvars.h
SC_ERR_NAPATECH_STREAMS_REGISTER_FAILED
@ SC_ERR_NAPATECH_STREAMS_REGISTER_FAILED
Definition: util-error.h:253
NapatechGetNumLastStream
uint16_t NapatechGetNumLastStream(void)
Definition: runmode-napatech.c:65
NapatechPacketVars_::nt_packet_buf
NtNetBuf_t nt_packet_buf
Definition: util-napatech.h:32
SCLogWarning
#define SCLogWarning(err_code,...)
Macro used to log WARNING messages.
Definition: util-debug.h:244
MAX_PORTS
#define MAX_PORTS
Definition: util-napatech.h:59
SCFree
#define SCFree(p)
Definition: util-mem.h:61
DecodeThreadVars_
Structure to hold thread specific data for all decode modules.
Definition: decode.h:625
NapatechThreadVars_::tv
ThreadVars * tv
Definition: source-napatech.c:86
NapatechDecodeThreadDeinit
TmEcode NapatechDecodeThreadDeinit(ThreadVars *tv, void *data)
Deinitialization of Napatech Thread.
Definition: source-napatech.c:1160
payload_len
uint16_t payload_len
Definition: stream-tcp-private.h:1
DecodeThreadVarsAlloc
DecodeThreadVars * DecodeThreadVarsAlloc(ThreadVars *tv)
Alloc and setup DecodeThreadVars.
Definition: decode.c:619
PacketSetData
int PacketSetData(Packet *p, const uint8_t *pktdata, uint32_t pktlen)
Set data for Packet and set length when zero copy is used.
Definition: decode.c:658
SC_ERR_DATALINK_UNIMPLEMENTED
@ SC_ERR_DATALINK_UNIMPLEMENTED
Definition: util-error.h:68
NapatechStreamThreadExitStats
void NapatechStreamThreadExitStats(ThreadVars *, void *)
Print some stats to the log at program exit.
Definition: source-napatech.c:1044
NapatechCurrentStats_::current_drop_packets
uint64_t current_drop_packets
Definition: util-napatech.h:53
SC_ERR_MEM_ALLOC
@ SC_ERR_MEM_ALLOC
Definition: util-error.h:31
TmModuleNapatechStreamRegister
void TmModuleNapatechStreamRegister(void)
Register the Napatech receiver (reader) module.
Definition: source-napatech.c:130
suricata.h
NapatechPacketVars_::rx_stream
NtNetStreamRx_t rx_stream
Definition: util-napatech.h:33
NapatechSetupTraffic
uint32_t NapatechSetupTraffic(uint32_t first_stream, uint32_t last_stream)
Definition: util-napatech.c:1381
NapatechPacketVars_::flow_stream
NtFlowStream_t flow_stream
Definition: util-napatech.h:34
NapatechThreadVars_::stream_id
uint16_t stream_id
Definition: source-napatech.c:88
NapatechCurrentStats_
Definition: util-napatech.h:50
TmSlot_::slot_next
struct TmSlot_ * slot_next
Definition: tm-threads.h:61
SC_ATOMIC_GET
#define SC_ATOMIC_GET(name)
Get the value from the atomic variable.
Definition: util-atomic.h:347
SCLogNotice
#define SCLogNotice(...)
Macro used to log NOTICE messages.
Definition: util-debug.h:232
StatsRegisterCounter
uint16_t StatsRegisterCounter(const char *name, struct ThreadVars_ *tv)
Registers a normal, unqualified counter.
Definition: counters.c:945
SCCalloc
#define SCCalloc(nm, sz)
Definition: util-mem.h:53
SCReturnInt
#define SCReturnInt(x)
Definition: util-debug.h:304
PacketGetFromQueueOrAlloc
Packet * PacketGetFromQueueOrAlloc(void)
Get a packet. We try to get a packet from the packetpool first, but if that is empty we alloc a packe...
Definition: decode.c:180
SC_CAP_NET_RAW
#define SC_CAP_NET_RAW
Definition: util-privs.h:32
DecodeEthernet
int DecodeEthernet(ThreadVars *tv, DecodeThreadVars *dtv, Packet *p, const uint8_t *pkt, uint32_t len)
Definition: decode-ethernet.c:41
TmModule_::flags
uint8_t flags
Definition: tm-modules.h:70
DecodeUpdatePacketCounters
void DecodeUpdatePacketCounters(ThreadVars *tv, const DecodeThreadVars *dtv, const Packet *p)
Definition: decode.c:585
LINKTYPE_ETHERNET
#define LINKTYPE_ETHERNET
Definition: decode.h:1066
suricata_ctl_flags
volatile uint8_t suricata_ctl_flags
Definition: suricata.c:198
NapatechSetupNuma
bool NapatechSetupNuma(uint32_t stream, uint32_t numa)
Definition: util-napatech.c:1243
NapatechPacketLoop
TmEcode NapatechPacketLoop(ThreadVars *tv, void *data, void *slot)
Main Napatechpacket processing loop.
Definition: source-napatech.c:789
PACKET_TEST_ACTION
#define PACKET_TEST_ACTION(p, a)
Definition: decode.h:850