suricata
source-napatech.c
Go to the documentation of this file.
1 /* Copyright (C) 2012-2020 Open Information Security Foundation
2  *
3  * You can copy, redistribute or modify this Program under the terms of
4  * the GNU General Public License version 2 as published by the Free
5  * Software Foundation.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10  * GNU General Public License for more details.
11  *
12  * You should have received a copy of the GNU General Public License
13  * version 2 along with this program; if not, write to the Free Software
14  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
15  * 02110-1301, USA.
16  */
17 
18 /**
19  * \file
20  *
21  - * \author nPulse Technologies, LLC.
22  - * \author Matt Keeler <mk@npulsetech.com>
23  * *
24  * Support for NAPATECH adapter with the 3GD Driver/API.
25  * Requires libntapi from Napatech A/S.
26  *
27  */
28 #include "suricata-common.h"
29 #include "decode.h"
30 #include "packet.h"
31 #include "suricata.h"
32 #include "threadvars.h"
33 #include "util-datalink.h"
34 #include "util-optimize.h"
35 #include "tm-queuehandlers.h"
36 #include "tm-threads.h"
37 #include "tm-modules.h"
38 #include "util-privs.h"
39 #include "tmqh-packetpool.h"
40 #include "util-napatech.h"
41 #include "source-napatech.h"
42 
43 #ifndef HAVE_NAPATECH
44 
45 TmEcode NoNapatechSupportExit(ThreadVars*, const void*, void**);
46 
48 {
49  tmm_modules[TMM_RECEIVENAPATECH].name = "NapatechStream";
50  tmm_modules[TMM_RECEIVENAPATECH].ThreadInit = NoNapatechSupportExit;
55 }
56 
58 {
59  tmm_modules[TMM_DECODENAPATECH].name = "NapatechDecode";
60  tmm_modules[TMM_DECODENAPATECH].ThreadInit = NoNapatechSupportExit;
66 }
67 
68 TmEcode NoNapatechSupportExit(ThreadVars *tv, const void *initdata, void **data)
69 {
71  "Error creating thread %s: you do not have support for Napatech adapter "
72  "enabled please recompile with --enable-napatech",
73  tv->name);
74  exit(EXIT_FAILURE);
75 }
76 
77 #else /* Implied we do have NAPATECH support */
78 
79 
80 #include <numa.h>
81 #include <nt.h>
82 
83 extern int max_pending_packets;
84 
85 typedef struct NapatechThreadVars_
86 {
88  NtNetStreamRx_t rx_stream;
89  uint16_t stream_id;
90  int hba;
93 
94 #ifdef NAPATECH_ENABLE_BYPASS
95 static int NapatechBypassCallback(Packet *p);
96 #endif
97 
98 TmEcode NapatechStreamThreadInit(ThreadVars *, const void *, void **);
100 TmEcode NapatechPacketLoop(ThreadVars *tv, void *data, void *slot);
101 
102 TmEcode NapatechDecodeThreadInit(ThreadVars *, const void *, void **);
105 
106 /* These are used as the threads are exiting to get a comprehensive count of
107  * all the packets received and dropped.
108  */
109 SC_ATOMIC_DECLARE(uint64_t, total_packets);
110 SC_ATOMIC_DECLARE(uint64_t, total_drops);
111 SC_ATOMIC_DECLARE(uint16_t, total_tallied);
112 
113 /* Streams are counted as they are instantiated in order to know when all threads
114  * are running*/
115 SC_ATOMIC_DECLARE(uint16_t, stream_count);
116 
117 SC_ATOMIC_DECLARE(uint16_t, numa0_count);
118 SC_ATOMIC_DECLARE(uint16_t, numa1_count);
119 SC_ATOMIC_DECLARE(uint16_t, numa2_count);
120 SC_ATOMIC_DECLARE(uint16_t, numa3_count);
121 
122 SC_ATOMIC_DECLARE(uint64_t, flow_callback_cnt);
123 SC_ATOMIC_DECLARE(uint64_t, flow_callback_handled_pkts);
124 SC_ATOMIC_DECLARE(uint64_t, flow_callback_udp_pkts);
125 SC_ATOMIC_DECLARE(uint64_t, flow_callback_tcp_pkts);
126 SC_ATOMIC_DECLARE(uint64_t, flow_callback_unhandled_pkts);
127 
128 /**
129  * \brief Register the Napatech receiver (reader) module.
130  */
132 {
133  tmm_modules[TMM_RECEIVENAPATECH].name = "NapatechStream";
142 
143  SC_ATOMIC_INIT(total_packets);
144  SC_ATOMIC_INIT(total_drops);
145  SC_ATOMIC_INIT(total_tallied);
146  SC_ATOMIC_INIT(stream_count);
147 
148  SC_ATOMIC_INIT(numa0_count);
149  SC_ATOMIC_INIT(numa1_count);
150  SC_ATOMIC_INIT(numa2_count);
151  SC_ATOMIC_INIT(numa3_count);
152 
153  SC_ATOMIC_INIT(flow_callback_cnt);
154  SC_ATOMIC_INIT(flow_callback_handled_pkts);
155  SC_ATOMIC_INIT(flow_callback_udp_pkts);
156  SC_ATOMIC_INIT(flow_callback_tcp_pkts);
157  SC_ATOMIC_INIT(flow_callback_unhandled_pkts);
158 }
159 
160 /**
161  * \brief Register the Napatech decoder module.
162  */
164 {
165  tmm_modules[TMM_DECODENAPATECH].name = "NapatechDecode";
172 }
173 
174 #ifdef NAPATECH_ENABLE_BYPASS
175 /**
176  * \brief template of IPv4 header
177  */
178 struct ipv4_hdr
179 {
180  uint8_t version_ihl; /**< version and header length */
181  uint8_t type_of_service; /**< type of service */
182  uint16_t total_length; /**< length of packet */
183  uint16_t packet_id; /**< packet ID */
184  uint16_t fragment_offset; /**< fragmentation offset */
185  uint8_t time_to_live; /**< time to live */
186  uint8_t next_proto_id; /**< protocol ID */
187  uint16_t hdr_checksum; /**< header checksum */
188  uint32_t src_addr; /**< source address */
189  uint32_t dst_addr; /**< destination address */
190 } __attribute__ ((__packed__));
191 
192 /**
193  * \brief template of IPv6 header
194  */
195 struct ipv6_hdr
196 {
197  uint32_t vtc_flow; /**< IP version, traffic class & flow label. */
198  uint16_t payload_len; /**< IP packet length - includes sizeof(ip_header). */
199  uint8_t proto; /**< Protocol, next header. */
200  uint8_t hop_limits; /**< Hop limits. */
201  uint8_t src_addr[16]; /**< IP address of source host. */
202  uint8_t dst_addr[16]; /**< IP address of destination host(s). */
203 } __attribute__ ((__packed__));
204 
205 /**
206  * \brief template of UDP header
207  */
208 struct udp_hdr
209 {
210  uint16_t src_port; /**< UDP source port. */
211  uint16_t dst_port; /**< UDP destination port. */
212  uint16_t dgram_len; /**< UDP datagram length */
213  uint16_t dgram_cksum; /**< UDP datagram checksum */
214 } __attribute__ ((__packed__));
215 
216 /**
217  * \brief template of TCP header
218  */
219 struct tcp_hdr
220 {
221  uint16_t src_port; /**< TCP source port. */
222  uint16_t dst_port; /**< TCP destination port. */
223  uint32_t sent_seq; /**< TX data sequence number. */
224  uint32_t recv_ack; /**< RX data acknowledgement sequence number. */
225  uint8_t data_off; /**< Data offset. */
226  uint8_t tcp_flags; /**< TCP flags */
227  uint16_t rx_win; /**< RX flow control window. */
228  uint16_t cksum; /**< TCP checksum. */
229  uint16_t tcp_urp; /**< TCP urgent pointer, if any. */
230 } __attribute__ ((__packed__));
231 
232 
233 /* The hardware will assign a "color" value indicating what filters are matched
234  * by a given packet. These constants indicate what bits are set in the color
235  * field for different protocols
236  *
237  */
238 #define RTE_PTYPE_L2_ETHER 0x10000000
239 #define RTE_PTYPE_L3_IPV4 0x01000000
240 #define RTE_PTYPE_L3_IPV6 0x04000000
241 #define RTE_PTYPE_L4_TCP 0x00100000
242 #define RTE_PTYPE_L4_UDP 0x00200000
243 
244 /* These masks are used to extract layer 3 and layer 4 protocol
245  * values from the color field in the packet descriptor.
246  */
247 #define RTE_PTYPE_L3_MASK 0x0f000000
248 #define RTE_PTYPE_L4_MASK 0x00f00000
249 
250 #define COLOR_IS_SPAN 0x00001000
251 
252 static int is_inline = 0;
253 static int inline_port_map[MAX_PORTS] = { -1 };
254 
255 /**
256  * \brief Binds two ports together for inline operation.
257  *
258  * Get the ID of an adapter on which a given port resides.
259  *
260  * \param port one of the ports in a pairing.
261  * \param peer the other port in a pairing.
262  * \return ID of the adapter.
263  *
264  */
265 int NapatechSetPortmap(int port, int peer)
266 {
267  if ((inline_port_map[port] == -1) && (inline_port_map[peer] == -1)) {
268  inline_port_map[port] = peer;
269  inline_port_map[peer] = port;
270  } else {
272  "Port pairing is already configured.");
273  return 0;
274  }
275  return 1;
276 }
277 
278 /**
279  * \brief Returns the ID of the adapter
280  *
281  * Get the ID of an adapter on which a given port resides.
282  *
283  * \param port for which adapter ID is requested.
284  * \return ID of the adapter.
285  *
286  */
287 int NapatechGetAdapter(uint8_t port)
288 {
289  static int port_adapter_map[MAX_PORTS] = { -1 };
290  int status;
291  NtInfo_t h_info; /* Info handle */
292  NtInfoStream_t h_info_stream; /* Info stream handle */
293 
294  if (unlikely(port_adapter_map[port] == -1)) {
295  if ((status = NT_InfoOpen(&h_info_stream, "ExampleInfo")) != NT_SUCCESS) {
297  return -1;
298  }
299  /* Read the system info */
300  h_info.cmd = NT_INFO_CMD_READ_PORT_V9;
301  h_info.u.port_v9.portNo = (uint8_t) port;
302  if ((status = NT_InfoRead(h_info_stream, &h_info)) != NT_SUCCESS) {
303  /* Get the status code as text */
305  NT_InfoClose(h_info_stream);
306  return -1;
307  }
308  port_adapter_map[port] = h_info.u.port_v9.data.adapterNo;
309  }
310  return port_adapter_map[port];
311 }
312 
313 /**
314  * \brief IPv4 4-tuple convenience structure
315  */
316 struct IPv4Tuple4
317 {
318  uint32_t sa; /*!< Source address */
319  uint32_t da; /*!< Destination address */
320  uint16_t sp; /*!< Source port */
321  uint16_t dp; /*!< Destination port */
322 };
323 
324 /**
325  * \brief IPv6 4-tuple convenience structure
326  */
327 struct IPv6Tuple4
328 {
329  uint8_t sa[16]; /*!< Source address */
330  uint8_t da[16]; /*!< Destination address */
331  uint16_t sp; /*!< Source port */
332  uint16_t dp; /*!< Destination port */
333 };
334 
335 
336 /**
337  * \brief Compares the byte order value of two IPv6 addresses.
338  *
339  *
340  * \param addr_a The first address to compare
341  * \param addr_b The second adress to compare
342  *
343  * \return -1 if addr_a < addr_b
344  * 1 if addr_a > addr_b
345  * 0 if addr_a == addr_b
346  */
347 static int CompareIPv6Addr(uint8_t addr_a[16], uint8_t addr_b[16]) {
348  uint16_t pos;
349  for (pos = 0; pos < 16; ++pos) {
350  if (addr_a[pos] < addr_b[pos]) {
351  return -1;
352  } else if (addr_a[pos] > addr_b[pos]) {
353  return 1;
354  } /* else they are equal - check next position*/
355  }
356 
357  /* if we get here the addresses are equal */
358  return 0;
359 }
360 
361 /**
362  * \brief Initializes the FlowStreams used to program flow data.
363  *
364  * Opens a FlowStream on the adapter associated with the rx port. This
365  * FlowStream is subsequently used to program the adapter with
366  * flows to bypass.
367  *
368  * \return the flow stream handle, NULL if failure.
369  */
370 static NtFlowStream_t InitFlowStream(int adapter, int stream_id)
371 {
372  int status;
373  NtFlowStream_t hFlowStream;
374 
375  NtFlowAttr_t attr;
376  char flow_name[80];
377 
378  NT_FlowOpenAttrInit(&attr);
379  NT_FlowOpenAttrSetAdapterNo(&attr, adapter);
380 
381  snprintf(flow_name, sizeof(flow_name), "Flow_stream_%d", stream_id );
382  SCLogDebug("Opening flow programming stream: %s", flow_name);
383  if ((status = NT_FlowOpen_Attr(&hFlowStream, flow_name, &attr)) != NT_SUCCESS) {
385  "Napatech bypass functionality not supported by the FPGA version on adapter %d - disabling support.",
386  adapter);
387  return NULL;
388  }
389  return hFlowStream;
390 }
391 
392 /**
393  * \brief Callback function to process Bypass events on Napatech Adapter.
394  *
395  * Callback function that sets up the Flow tables on the Napatech card
396  * so that subsequent packets from this flow are bypassed on the hardware.
397  *
398  * \param p packet containing information about the flow to be bypassed
399  * \param is_inline indicates if Suricata is being run in inline mode.
400  *
401  * \return Error code indicating success (1) or failure (0).
402  *
403  */
404 static int ProgramFlow(Packet *p, int is_inline)
405 {
406  NtFlow_t flow_match;
407  memset(&flow_match, 0, sizeof(flow_match));
408 
409  NapatechPacketVars *ntpv = &(p->ntpv);
410 
411  /*
412  * The hardware decoder will "color" the packets according to the protocols
413  * in the packet and the port the packet arrived on. packet_type gets
414  * these bits and we mask out layer3, layer4, and is_span to determine
415  * the protocols and if the packet is coming in from a SPAN port.
416  */
417  uint32_t packet_type = ((ntpv->dyn3->color_hi << 14) & 0xFFFFC000) | ntpv->dyn3->color_lo;
418  uint8_t *packet = (uint8_t *) ntpv->dyn3 + ntpv->dyn3->descrLength;
419 
420  uint32_t layer3 = packet_type & RTE_PTYPE_L3_MASK;
421  uint32_t layer4 = packet_type & RTE_PTYPE_L4_MASK;
422  uint32_t is_span = packet_type & COLOR_IS_SPAN;
423 
424  /*
425  * When we're programming the flows to arrive on a span port,
426  * where upstream and downstream packets arrive on the same port,
427  * the hardware is configured to swap the source and dest
428  * fields if the src addr > dest addr. We need to program the
429  * flow tables to match. We'll compare addresses and set
430  * do_swap accordingly.
431  */
432 
433  uint32_t do_swap = 0;
434 
435  SC_ATOMIC_ADD(flow_callback_cnt, 1);
436 
437  /* Only bypass TCP and UDP */
438  if (PKT_IS_TCP(p)) {
439  SC_ATOMIC_ADD(flow_callback_tcp_pkts, 1);
440  } else if PKT_IS_UDP(p) {
441  SC_ATOMIC_ADD(flow_callback_udp_pkts, 1);
442  } else {
443  SC_ATOMIC_ADD(flow_callback_unhandled_pkts, 1);
444  }
445 
446  struct IPv4Tuple4 v4Tuple;
447  struct IPv6Tuple4 v6Tuple;
448  struct ipv4_hdr *pIPv4_hdr = NULL;
449  struct ipv6_hdr *pIPv6_hdr = NULL;
450 
451  switch (layer3) {
452  case RTE_PTYPE_L3_IPV4:
453  {
454  pIPv4_hdr = (struct ipv4_hdr *) (packet + ntpv->dyn3->offset0);
455  if (!is_span) {
456  v4Tuple.sa = pIPv4_hdr->src_addr;
457  v4Tuple.da = pIPv4_hdr->dst_addr;
458  } else {
459  do_swap = (htonl(pIPv4_hdr->src_addr) > htonl(pIPv4_hdr->dst_addr));
460  if (!do_swap) {
461  /* already in order */
462  v4Tuple.sa = pIPv4_hdr->src_addr;
463  v4Tuple.da = pIPv4_hdr->dst_addr;
464  } else { /* swap */
465  v4Tuple.sa = pIPv4_hdr->dst_addr;
466  v4Tuple.da = pIPv4_hdr->src_addr;
467  }
468  }
469  break;
470  }
471  case RTE_PTYPE_L3_IPV6:
472  {
473  pIPv6_hdr = (struct ipv6_hdr *) (packet + ntpv->dyn3->offset0);
474  do_swap = (CompareIPv6Addr(pIPv6_hdr->src_addr, pIPv6_hdr->dst_addr) > 0);
475 
476  if (!is_span) {
477  memcpy(&(v6Tuple.sa), pIPv6_hdr->src_addr, 16);
478  memcpy(&(v6Tuple.da), pIPv6_hdr->dst_addr, 16);
479  } else {
480  /* sort src/dest address before programming */
481  if (!do_swap) {
482  /* already in order */
483  memcpy(&(v6Tuple.sa), pIPv6_hdr->src_addr, 16);
484  memcpy(&(v6Tuple.da), pIPv6_hdr->dst_addr, 16);
485  } else { /* swap the addresses */
486  memcpy(&(v6Tuple.sa), pIPv6_hdr->dst_addr, 16);
487  memcpy(&(v6Tuple.da), pIPv6_hdr->src_addr, 16);
488  }
489  }
490  break;
491  }
492  default:
493  {
494  return 0;
495  }
496  }
497 
498  switch (layer4) {
499  case RTE_PTYPE_L4_TCP:
500  {
501  struct tcp_hdr *tcp_hdr = (struct tcp_hdr *) (packet + ntpv->dyn3->offset1);
502  if (layer3 == RTE_PTYPE_L3_IPV4) {
503  if (!is_span) {
504  v4Tuple.dp = tcp_hdr->dst_port;
505  v4Tuple.sp = tcp_hdr->src_port;
506  flow_match.keyId = NAPATECH_KEYTYPE_IPV4;
507  } else {
508  if (!do_swap) {
509  v4Tuple.sp = tcp_hdr->src_port;
510  v4Tuple.dp = tcp_hdr->dst_port;
511  } else {
512  v4Tuple.sp = tcp_hdr->dst_port;
513  v4Tuple.dp = tcp_hdr->src_port;
514  }
515  flow_match.keyId = NAPATECH_KEYTYPE_IPV4_SPAN;
516  }
517  memcpy(&(flow_match.keyData), &v4Tuple, sizeof(v4Tuple));
518  } else {
519  if (!is_span) {
520  v6Tuple.dp = tcp_hdr->dst_port;
521  v6Tuple.sp = tcp_hdr->src_port;
522  flow_match.keyId = NAPATECH_KEYTYPE_IPV6;
523  } else {
524  if (!do_swap) {
525  v6Tuple.sp = tcp_hdr->src_port;
526  v6Tuple.dp = tcp_hdr->dst_port;
527  } else {
528  v6Tuple.dp = tcp_hdr->src_port;
529  v6Tuple.sp = tcp_hdr->dst_port;
530  }
531  flow_match.keyId = NAPATECH_KEYTYPE_IPV6_SPAN;
532  }
533  memcpy(&(flow_match.keyData), &v6Tuple, sizeof(v6Tuple));
534  }
535  flow_match.ipProtocolField = 6;
536  break;
537  }
538  case RTE_PTYPE_L4_UDP:
539  {
540  struct udp_hdr *udp_hdr = (struct udp_hdr *) (packet + ntpv->dyn3->offset1);
541  if (layer3 == RTE_PTYPE_L3_IPV4) {
542  if (!is_span) {
543  v4Tuple.dp = udp_hdr->dst_port;
544  v4Tuple.sp = udp_hdr->src_port;
545  flow_match.keyId = NAPATECH_KEYTYPE_IPV4;
546  } else {
547  if (!do_swap) {
548  v4Tuple.sp = udp_hdr->src_port;
549  v4Tuple.dp = udp_hdr->dst_port;
550  } else {
551  v4Tuple.dp = udp_hdr->src_port;
552  v4Tuple.sp = udp_hdr->dst_port;
553  }
554  flow_match.keyId = NAPATECH_KEYTYPE_IPV4_SPAN;
555  }
556  memcpy(&(flow_match.keyData), &v4Tuple, sizeof(v4Tuple));
557  } else { /* layer3 is IPV6 */
558  if (!is_span) {
559  v6Tuple.dp = udp_hdr->dst_port;
560  v6Tuple.sp = udp_hdr->src_port;
561  flow_match.keyId = NAPATECH_KEYTYPE_IPV6;
562  } else {
563  if (!do_swap) {
564  v6Tuple.sp = udp_hdr->src_port;
565  v6Tuple.dp = udp_hdr->dst_port;
566  } else {
567  v6Tuple.dp = udp_hdr->src_port;
568  v6Tuple.sp = udp_hdr->dst_port;
569  }
570  flow_match.keyId = NAPATECH_KEYTYPE_IPV6_SPAN;
571  }
572  memcpy(&(flow_match.keyData), &v6Tuple, sizeof(v6Tuple));
573  }
574  flow_match.ipProtocolField = 17;
575  break;
576  }
577  default:
578  {
579  return 0;
580  }
581  }
582 
583  flow_match.op = 1; /* program flow */
584  flow_match.gfi = 1; /* Generate FlowInfo records */
585  flow_match.tau = 1; /* tcp automatic unlearn */
586 
587  if (PacketCheckAction(p, ACTION_DROP)) {
588  flow_match.keySetId = NAPATECH_FLOWTYPE_DROP;
589  } else {
590  if (is_inline) {
591  flow_match.keySetId = NAPATECH_FLOWTYPE_PASS;
592  } else {
593  flow_match.keySetId = NAPATECH_FLOWTYPE_DROP;
594  }
595  }
596 
597  if (NT_FlowWrite(ntpv->flow_stream, &flow_match, -1) != NT_SUCCESS) {
599  SCLogError(SC_ERR_NAPATECH_OPEN_FAILED,"NT_FlowWrite failed!.");
600  exit(EXIT_FAILURE);
601  }
602  }
603 
604  return 1;
605 }
606 
607 /**
608  * \brief Callback from Suricata when a flow that should be bypassed
609  * is identified.
610  */
611 
612 static int NapatechBypassCallback(Packet *p)
613 {
614  NapatechPacketVars *ntpv = &(p->ntpv);
615 
616  /*
617  * Since, at this point, we don't know what action to take,
618  * simply mark this packet as one that should be
619  * bypassed when the packet is returned by suricata with a
620  * pass/drop verdict.
621  */
622  ntpv->bypass = 1;
623 
624  return 1;
625 }
626 
627 #endif
628 
629 /**
630  * \brief Initialize the Napatech receiver thread, generate a single
631  * NapatechThreadVar structure for each thread, this will
632  * contain a NtNetStreamRx_t stream handle which is used when the
633  * thread executes to acquire the packets.
634  *
635  * \param tv Thread variable to ThreadVars
636  * \param initdata Initial data to the adapter passed from the user,
637  * this is processed by the user.
638  *
639  * For now, we assume that we have only a single name for the NAPATECH
640  * adapter.
641  *
642  * \param data data pointer gets populated with
643  *
644  */
645 TmEcode NapatechStreamThreadInit(ThreadVars *tv, const void *initdata, void **data)
646 {
647  SCEnter();
648  struct NapatechStreamDevConf *conf = (struct NapatechStreamDevConf *) initdata;
649  uint16_t stream_id = conf->stream_id;
650  *data = NULL;
651 
652  NapatechThreadVars *ntv = SCCalloc(1, sizeof (NapatechThreadVars));
653  if (unlikely(ntv == NULL)) {
655  "Failed to allocate memory for NAPATECH thread vars.");
656  }
657 
658  memset(ntv, 0, sizeof (NapatechThreadVars));
659  ntv->stream_id = stream_id;
660  ntv->tv = tv;
661  ntv->hba = conf->hba;
662 
664 
665  SCLogDebug("Started processing packets from NAPATECH Stream: %lu", ntv->stream_id);
666 
667  *data = (void *) ntv;
669 }
670 
671 /**
672  * \brief Callback to indicate that the packet buffer can be returned to the hardware.
673  *
674  * Called when Suricata is done processing the packet. Before the packet is released
675  * this also checks the action to see if the packet should be dropped and programs the
676  * flow hardware if the flow is to be bypassed and the Napatech packet buffer is released.
677  *
678  *
679  * \param p Packet to return to the system.
680  *
681  */
682 static void NapatechReleasePacket(struct Packet_ *p)
683 {
684  /*
685  * If the packet is to be dropped we need to set the wirelength
686  * before releasing the Napatech buffer back to NTService.
687  */
688 #ifdef NAPATECH_ENABLE_BYPASS
689  if (is_inline && PacketCheckAction(p, ACTION_DROP)) {
690  p->ntpv.dyn3->wireLength = 0;
691  }
692 
693  /*
694  * If this flow is to be programmed for hardware bypass we do it now. This is done
695  * here because the action is not available in the packet structure at the time of the
696  * bypass callback and it needs to be done before we release the packet structure.
697  */
698  if (p->ntpv.bypass == 1) {
699  ProgramFlow(p, is_inline);
700  }
701 #endif
702 
703  NT_NetRxRelease(p->ntpv.rx_stream, p->ntpv.nt_packet_buf);
705 }
706 
707 /**
708  * \brief Returns the NUMA node associated with the currently running thread.
709  *
710  * \return ID of the NUMA node.
711  *
712  */
713 static int GetNumaNode(void)
714 {
715  int cpu = 0;
716  int node = 0;
717 
718 #if defined(__linux__)
719  cpu = sched_getcpu();
720  node = numa_node_of_cpu(cpu);
721 #else
723  "Auto configuration of NUMA node is not supported on this OS.");
724 #endif
725 
726  return node;
727 }
728 
729 /**
730  * \brief Outputs hints on the optimal host-buffer configuration to aid tuning.
731  *
732  * \param log_level of the currently running instance.
733  *
734  */
735 static void RecommendNUMAConfig(SCLogLevel log_level)
736 {
737  char string0[16];
738  char string1[16];
739  char string2[16];
740  char string3[16];
741  int set_cpu_affinity = 0;
742 
743  if (ConfGetBool("threading.set-cpu-affinity", &set_cpu_affinity) != 1) {
744  set_cpu_affinity = 0;
745  }
746 
747  if (set_cpu_affinity) {
748  SCLog(log_level, __FILE__, __FUNCTION__, __LINE__,
749  "Minimum host buffers that should be defined in ntservice.ini:");
750 
751  SCLog(log_level, __FILE__, __FUNCTION__, __LINE__, " NUMA Node 0: %d",
752  (SC_ATOMIC_GET(numa0_count)));
753 
754  if (numa_max_node() >= 1)
755  SCLog(log_level, __FILE__, __FUNCTION__, __LINE__,
756  " NUMA Node 1: %d ", (SC_ATOMIC_GET(numa1_count)));
757 
758  if (numa_max_node() >= 2)
759  SCLog(log_level, __FILE__, __FUNCTION__, __LINE__,
760  " NUMA Node 2: %d ", (SC_ATOMIC_GET(numa2_count)));
761 
762  if (numa_max_node() >= 3)
763  SCLog(log_level, __FILE__, __FUNCTION__, __LINE__,
764  " NUMA Node 3: %d ", (SC_ATOMIC_GET(numa3_count)));
765 
766  snprintf(string0, 16, "[%d, 16, 0]", SC_ATOMIC_GET(numa0_count));
767  snprintf(string1, 16, (numa_max_node() >= 1 ? ",[%d, 16, 1]" : ""),
768  SC_ATOMIC_GET(numa1_count));
769  snprintf(string2, 16, (numa_max_node() >= 2 ? ",[%d, 16, 2]" : ""),
770  SC_ATOMIC_GET(numa2_count));
771  snprintf(string3, 16, (numa_max_node() >= 3 ? ",[%d, 16, 3]" : ""),
772  SC_ATOMIC_GET(numa3_count));
773 
774  SCLog(log_level, __FILE__, __FUNCTION__, __LINE__,
775  "E.g.: HostBuffersRx=%s%s%s%s", string0, string1, string2,
776  string3);
777  } else if (log_level == SC_LOG_ERROR) {
779  "Or, try running /opt/napatech3/bin/ntpl -e \"delete=all\" to clean-up stream NUMA config.");
780  }
781 }
782 
783 /**
784  * \brief Main Napatechpacket processing loop
785  *
786  * \param tv Thread variable to ThreadVars
787  * \param data Pointer to NapatechThreadVars with data specific to Napatech
788  * \param slot TMSlot where this instance is running.
789  *
790  */
791 TmEcode NapatechPacketLoop(ThreadVars *tv, void *data, void *slot)
792 {
793  int32_t status;
794  char error_buffer[100];
795  uint64_t pkt_ts;
796  NtNetBuf_t packet_buffer;
797  NapatechThreadVars *ntv = (NapatechThreadVars *) data;
798  uint64_t hba_pkt_drops = 0;
799  uint64_t hba_byte_drops = 0;
800  uint16_t hba_pkt = 0;
801  int numa_node = -1;
802  int set_cpu_affinity = 0;
803  int closer = 0;
804  int is_autoconfig = 0;
805 
806  /* This just keeps the startup output more orderly. */
807  usleep(200000 * ntv->stream_id);
808 
809 #ifdef NAPATECH_ENABLE_BYPASS
810  NtFlowStream_t flow_stream[MAX_ADAPTERS] = { 0 };
811 
812  /* Get a FlowStream handle for each adapter so we can efficiently find the
813  * correct handle corresponding to the port on which a packet is received.
814  */
815  int adapter = 0;
816  for (adapter = 0; adapter < NapatechGetNumAdapters(); ++adapter) {
817  flow_stream[adapter] = InitFlowStream(adapter, ntv->stream_id);
818  }
819 #endif
820 
821  if (ConfGetBool("napatech.auto-config", &is_autoconfig) == 0) {
822  is_autoconfig = 0;
823  }
824 
825  if (is_autoconfig) {
826  numa_node = GetNumaNode();
827  switch (numa_node) {
828  case 0:
829  SC_ATOMIC_ADD(numa0_count, 1);
830  break;
831  case 1:
832  SC_ATOMIC_ADD(numa1_count, 1);
833  break;
834  case 2:
835  SC_ATOMIC_ADD(numa2_count, 1);
836  break;
837  case 3:
838  SC_ATOMIC_ADD(numa3_count, 1);
839  break;
840  default:
841  break;
842  }
843 
844  if (ConfGetBool("threading.set-cpu-affinity", &set_cpu_affinity) != 1) {
845  set_cpu_affinity = 0;
846  }
847 
848  if (set_cpu_affinity) {
849  NapatechSetupNuma(ntv->stream_id, numa_node);
850  }
851 
852  numa_node = GetNumaNode();
853  SC_ATOMIC_ADD(stream_count, 1);
854  if (SC_ATOMIC_GET(stream_count) == NapatechGetNumConfiguredStreams()) {
855 
856 #ifdef NAPATECH_ENABLE_BYPASS
857  if (ConfGetBool("napatech.inline", &is_inline) == 0) {
858  is_inline = 0;
859  }
860 
861  /* Initialize the port map before we setup traffic filters */
862  for (int i = 0; i < MAX_PORTS; ++i) {
863  inline_port_map[i] = -1;
864  }
865 #endif
866  /* The last thread to run sets up and deletes the streams */
869 
870  closer = 1;
871 
872  if (status == 0x20002061) {
874  "Check host buffer configuration in ntservice.ini.");
875  RecommendNUMAConfig(SC_LOG_ERROR);
876  exit(EXIT_FAILURE);
877 
878  } else if (status == 0x20000008) {
880  "Check napatech.ports in the suricata config file.");
881  }
882  RecommendNUMAConfig(SC_LOG_PERF);
883  SCLogNotice("Napatech packet input engine started.");
884  }
885  } // is_autoconfig
886 
887  SCLogInfo(
888  "Napatech Packet Loop Started - cpu: %3d, cpu_numa: %3d stream: %3u ",
889  sched_getcpu(), numa_node, ntv->stream_id);
890 
891  if (ntv->hba > 0) {
892  char *s_hbad_pkt = SCCalloc(1, 32);
893  if (unlikely(s_hbad_pkt == NULL)) {
895  "Failed to allocate memory for NAPATECH stream counter.");
896  }
897  snprintf(s_hbad_pkt, 32, "nt%d.hba_drop", ntv->stream_id);
898  hba_pkt = StatsRegisterCounter(s_hbad_pkt, tv);
900  StatsSetUI64(tv, hba_pkt, 0);
901  }
902  SCLogDebug("Opening NAPATECH Stream: %lu for processing", ntv->stream_id);
903 
904  if ((status = NT_NetRxOpen(&(ntv->rx_stream), "SuricataStream",
905  NT_NET_INTERFACE_PACKET, ntv->stream_id, ntv->hba)) != NT_SUCCESS) {
906 
908  SCFree(ntv);
910  }
911  TmSlot *s = (TmSlot *) slot;
912  ntv->slot = s->slot_next;
913 
914  // Indicate that the thread is actually running its application level code (i.e., it can poll
915  // packets)
917 
918  while (!(suricata_ctl_flags & SURICATA_STOP)) {
919  /* make sure we have at least one packet in the packet pool, to prevent
920  * us from alloc'ing packets at line rate */
921  PacketPoolWait();
922 
923  /* Napatech returns packets 1 at a time */
924  status = NT_NetRxGet(ntv->rx_stream, &packet_buffer, 1000);
925  if (unlikely(
926  status == NT_STATUS_TIMEOUT || status == NT_STATUS_TRYAGAIN)) {
927  if (status == NT_STATUS_TIMEOUT) {
928  TmThreadsCaptureHandleTimeout(tv, NULL);
929  }
930  continue;
931  } else if (unlikely(status != NT_SUCCESS)) {
933  SCLogInfo("Failed to read from Napatech Stream %d: %s",
934  ntv->stream_id, error_buffer);
935  break;
936  }
937 
939 #ifdef NAPATECH_ENABLE_BYPASS
940  p->ntpv.bypass = 0;
941 #endif
942 
943  p->ntpv.rx_stream = ntv->rx_stream;
944 
945  if (unlikely(p == NULL)) {
946  NT_NetRxRelease(ntv->rx_stream, packet_buffer);
948  }
949 
950  pkt_ts = NT_NET_GET_PKT_TIMESTAMP(packet_buffer);
951 
952  /*
953  * Handle the different timestamp forms that the napatech cards could use
954  * - NT_TIMESTAMP_TYPE_NATIVE is not supported due to having an base
955  * of 0 as opposed to NATIVE_UNIX which has a base of 1/1/1970
956  */
957  switch (NT_NET_GET_PKT_TIMESTAMP_TYPE(packet_buffer)) {
958  case NT_TIMESTAMP_TYPE_NATIVE_UNIX:
959  p->ts.tv_sec = pkt_ts / 100000000;
960  p->ts.tv_usec = ((pkt_ts % 100000000) / 100) + ((pkt_ts % 100) > 50 ? 1 : 0);
961  break;
962  case NT_TIMESTAMP_TYPE_PCAP:
963  p->ts.tv_sec = pkt_ts >> 32;
964  p->ts.tv_usec = pkt_ts & 0xFFFFFFFF;
965  break;
966  case NT_TIMESTAMP_TYPE_PCAP_NANOTIME:
967  p->ts.tv_sec = pkt_ts >> 32;
968  p->ts.tv_usec = ((pkt_ts & 0xFFFFFFFF) / 1000) + ((pkt_ts % 1000) > 500 ? 1 : 0);
969  break;
970  case NT_TIMESTAMP_TYPE_NATIVE_NDIS:
971  /* number of seconds between 1/1/1601 and 1/1/1970 */
972  p->ts.tv_sec = (pkt_ts / 100000000) - 11644473600;
973  p->ts.tv_usec = ((pkt_ts % 100000000) / 100) + ((pkt_ts % 100) > 50 ? 1 : 0);
974  break;
975  default:
977  "Packet from Napatech Stream: %u does not have a supported timestamp format",
978  ntv->stream_id);
979  NT_NetRxRelease(ntv->rx_stream, packet_buffer);
981  }
982 
983  if (unlikely(ntv->hba > 0)) {
984  NtNetRx_t stat_cmd;
985  stat_cmd.cmd = NT_NETRX_READ_CMD_STREAM_DROP;
986  /* Update drop counter */
987  if (unlikely((status = NT_NetRxRead(ntv->rx_stream, &stat_cmd)) != NT_SUCCESS)) {
989  SCLogInfo("Couldn't retrieve drop statistics from the RX stream: %u",
990  ntv->stream_id);
991  } else {
992  hba_pkt_drops = stat_cmd.u.streamDrop.pktsDropped;
993 
994  StatsSetUI64(tv, hba_pkt, hba_pkt_drops);
995  }
997  }
998 
999 #ifdef NAPATECH_ENABLE_BYPASS
1000  p->ntpv.dyn3 = _NT_NET_GET_PKT_DESCR_PTR_DYN3(packet_buffer);
1001  p->BypassPacketsFlow = (NapatechIsBypassSupported() ? NapatechBypassCallback : NULL);
1002  NT_NET_SET_PKT_TXPORT(packet_buffer, inline_port_map[p->ntpv.dyn3->rxPort]);
1003  p->ntpv.flow_stream = flow_stream[NapatechGetAdapter(p->ntpv.dyn3->rxPort)];
1004 
1005 #endif
1006 
1007  p->ReleasePacket = NapatechReleasePacket;
1008  p->ntpv.nt_packet_buf = packet_buffer;
1009  p->ntpv.stream_id = ntv->stream_id;
1011 
1012  if (unlikely(PacketSetData(p, (uint8_t *)NT_NET_GET_PKT_L2_PTR(packet_buffer), NT_NET_GET_PKT_WIRE_LENGTH(packet_buffer)))) {
1013  TmqhOutputPacketpool(ntv->tv, p);
1015  }
1016 
1017  if (unlikely(TmThreadsSlotProcessPkt(ntv->tv, ntv->slot, p) != TM_ECODE_OK)) {
1019  }
1020 
1021  /*
1022  * At this point the packet and the Napatech Packet Buffer have been returned
1023  * to the system in the NapatechReleasePacket() Callback.
1024  */
1025 
1027  } // while
1028 
1029  if (closer) {
1031  }
1032 
1033  if (unlikely(ntv->hba > 0)) {
1034  SCLogInfo("Host Buffer Allowance Drops - pkts: %ld, bytes: %ld", hba_pkt_drops, hba_byte_drops);
1035  }
1036 
1038 }
1039 
1040 /**
1041  * \brief Print some stats to the log at program exit.
1042  *
1043  * \param tv Pointer to ThreadVars.
1044  * \param data Pointer to data, ErfFileThreadVars.
1045  */
1047 {
1048  NapatechThreadVars *ntv = (NapatechThreadVars *) data;
1050 
1051  double percent = 0;
1052  if (stat.current_drop_packets > 0)
1053  percent = (((double) stat.current_drop_packets)
1054  / (stat.current_packets + stat.current_drop_packets)) * 100;
1055 
1056  SCLogInfo("nt%lu - pkts: %lu; drop: %lu (%5.2f%%); bytes: %lu",
1057  (uint64_t) ntv->stream_id, stat.current_packets,
1058  stat.current_drop_packets, percent, stat.current_bytes);
1059 
1060  SC_ATOMIC_ADD(total_packets, stat.current_packets);
1061  SC_ATOMIC_ADD(total_drops, stat.current_drop_packets);
1062  SC_ATOMIC_ADD(total_tallied, 1);
1063 
1064  if (SC_ATOMIC_GET(total_tallied) == NapatechGetNumConfiguredStreams()) {
1065  if (SC_ATOMIC_GET(total_drops) > 0)
1066  percent = (((double) SC_ATOMIC_GET(total_drops)) / (SC_ATOMIC_GET(total_packets)
1067  + SC_ATOMIC_GET(total_drops))) * 100;
1068 
1069  SCLogInfo(" ");
1070  SCLogInfo("--- Total Packets: %ld Total Dropped: %ld (%5.2f%%)",
1071  SC_ATOMIC_GET(total_packets), SC_ATOMIC_GET(total_drops), percent);
1072 
1073 #ifdef NAPATECH_ENABLE_BYPASS
1074  SCLogInfo("--- BypassCB - Total: %ld, UDP: %ld, TCP: %ld, Unhandled: %ld",
1075  SC_ATOMIC_GET(flow_callback_cnt),
1076  SC_ATOMIC_GET(flow_callback_udp_pkts),
1077  SC_ATOMIC_GET(flow_callback_tcp_pkts),
1078  SC_ATOMIC_GET(flow_callback_unhandled_pkts));
1079 #endif
1080  }
1081 }
1082 
1083 /**
1084  * \brief Deinitializes the NAPATECH card.
1085  * \param tv pointer to ThreadVars
1086  * \param data pointer that gets cast into PcapThreadVars for ptv
1087  */
1089 {
1090  SCEnter();
1091  NapatechThreadVars *ntv = (NapatechThreadVars *) data;
1092 
1093  SCLogDebug("Closing Napatech Stream: %d", ntv->stream_id);
1094  NT_NetRxClose(ntv->rx_stream);
1095 
1097 }
1098 
1099 /**
1100  * \brief This function passes off to link type decoders.
1101  *
1102  * NapatechDecode decodes packets from Napatech and passes
1103  * them off to the proper link type decoder.
1104  *
1105  * \param t pointer to ThreadVars
1106  * \param p pointer to the current packet
1107  * \param data pointer that gets cast into PcapThreadVars for ptv
1108  */
1110 {
1111  SCEnter();
1112 
1114 
1116 
1117  // update counters
1119 
1120  switch (p->datalink) {
1121  case LINKTYPE_ETHERNET:
1123  break;
1124  default:
1126  "Datalink type %" PRId32 " not yet supported in module NapatechDecode",
1127  p->datalink);
1128  break;
1129  }
1130 
1133 }
1134 
1135 /**
1136  * \brief Initialization of Napatech Thread.
1137  *
1138  * \param t pointer to ThreadVars
1139  * \param initdata - unused.
1140  * \param data pointer that gets cast into DecoderThreadVars
1141  */
1142 TmEcode NapatechDecodeThreadInit(ThreadVars *tv, const void *initdata, void **data)
1143 {
1144  SCEnter();
1145  DecodeThreadVars *dtv = NULL;
1147  if (dtv == NULL) {
1149  }
1150 
1152  *data = (void *) dtv;
1154 }
1155 
1156 /**
1157  * \brief Deinitialization of Napatech Thread.
1158  *
1159  * \param tv pointer to ThreadVars
1160  * \param data pointer that gets cast into DecoderThreadVars
1161  */
1163 {
1164  if (data != NULL) {
1165  DecodeThreadVarsFree(tv, data);
1166  }
1168 }
1169 
1170 #endif /* HAVE_NAPATECH */
TmModule_::cap_flags
uint8_t cap_flags
Definition: tm-modules.h:67
PacketCheckAction
bool PacketCheckAction(const Packet *p, const uint8_t a)
Definition: packet.c:48
TMM_RECEIVENAPATECH
@ TMM_RECEIVENAPATECH
Definition: tm-threads-common.h:61
PKT_IS_UDP
#define PKT_IS_UDP(p)
Definition: decode.h:245
NapatechPacketVars_::stream_id
uint64_t stream_id
Definition: util-napatech.h:31
tm-threads.h
NapatechStreamThreadInit
TmEcode NapatechStreamThreadInit(ThreadVars *, const void *, void **)
Initialize the Napatech receiver thread, generate a single NapatechThreadVar structure for each threa...
Definition: source-napatech.c:645
NapatechGetCurrentStats
NapatechCurrentStats NapatechGetCurrentStats(uint16_t id)
Definition: util-napatech.c:193
max_pending_packets
int max_pending_packets
Definition: suricata.c:175
NapatechPacketVars_
Definition: util-napatech.h:30
ThreadVars_::name
char name[16]
Definition: threadvars.h:64
PacketFreeOrRelease
void PacketFreeOrRelease(Packet *p)
Return a packet to where it was allocated.
Definition: decode.c:191
SC_ATOMIC_INIT
#define SC_ATOMIC_INIT(name)
wrapper for initializing an atomic variable.
Definition: util-atomic.h:315
PKT_IS_PSEUDOPKT
#define PKT_IS_PSEUDOPKT(p)
return 1 if the packet is a pseudo packet
Definition: decode.h:1052
ConfGetBool
int ConfGetBool(const char *name, int *val)
Retrieve a configuration value as an boolen.
Definition: conf.c:473
NapatechSetPortmap
int NapatechSetPortmap(int port, int peer)
unlikely
#define unlikely(expr)
Definition: util-optimize.h:35
NapatechThreadVars_::rx_stream
NtNetStreamRx_t rx_stream
Definition: source-napatech.c:88
NapatechStreamDevConf
Definition: source-napatech.h:35
SC_ERR_NAPATECH_NOSUPPORT
@ SC_ERR_NAPATECH_NOSUPPORT
Definition: util-error.h:248
SCLogDebug
#define SCLogDebug(...)
Definition: util-debug.h:296
TmThreadsSetFlag
void TmThreadsSetFlag(ThreadVars *tv, uint32_t flag)
Set a thread flag.
Definition: tm-threads.c:98
TMM_DECODENAPATECH
@ TMM_DECODENAPATECH
Definition: tm-threads-common.h:62
NapatechDecode
TmEcode NapatechDecode(ThreadVars *, Packet *, void *)
This function passes off to link type decoders.
Definition: source-napatech.c:1109
MAX_ADAPTERS
#define MAX_ADAPTERS
Definition: util-napatech.h:60
SC_ATOMIC_ADD
#define SC_ATOMIC_ADD(name, val)
add a value to our atomic variable
Definition: util-atomic.h:333
StatsSetUI64
void StatsSetUI64(ThreadVars *tv, uint16_t id, uint64_t x)
Sets a value of type double to the local counter.
Definition: counters.c:210
THV_RUNNING
#define THV_RUNNING
Definition: threadvars.h:54
NapatechStreamDevConf::stream_id
uint16_t stream_id
Definition: source-napatech.h:36
NapatechDeleteFilters
uint32_t NapatechDeleteFilters(void)
Definition: util-napatech.c:1338
SC_ERR_NAPATECH_TIMESTAMP_TYPE_NOT_SUPPORTED
@ SC_ERR_NAPATECH_TIMESTAMP_TYPE_NOT_SUPPORTED
Definition: util-error.h:250
SURICATA_STOP
#define SURICATA_STOP
Definition: suricata.h:89
StatsSetupPrivate
int StatsSetupPrivate(ThreadVars *tv)
Definition: counters.c:1210
tm-modules.h
util-privs.h
NapatechThreadVars_::hba
int hba
Definition: source-napatech.c:90
StatsSyncCountersIfSignalled
#define StatsSyncCountersIfSignalled(tv)
Definition: counters.h:140
PacketDecodeFinalize
void PacketDecodeFinalize(ThreadVars *tv, DecodeThreadVars *dtv, Packet *p)
Finalize decoding of a packet.
Definition: decode.c:147
proto
uint8_t proto
Definition: decode-template.h:0
NapatechStreamDevConf::hba
intmax_t hba
Definition: source-napatech.h:37
SC_WARN_COMPATIBILITY
@ SC_WARN_COMPATIBILITY
Definition: util-error.h:193
TmqhOutputPacketpool
void TmqhOutputPacketpool(ThreadVars *t, Packet *p)
Definition: tmqh-packetpool.c:357
Packet_::BypassPacketsFlow
int(* BypassPacketsFlow)(struct Packet_ *)
Definition: decode.h:512
TM_ECODE_FAILED
@ TM_ECODE_FAILED
Definition: tm-threads-common.h:83
tmqh-packetpool.h
TmModule_::PktAcqLoop
TmEcode(* PktAcqLoop)(ThreadVars *, void *, void *)
Definition: tm-modules.h:54
SCLog
void SCLog(int x, const char *file, const char *func, const int line, const char *fmt,...)
Definition: util-debug.c:592
TM_ECODE_OK
@ TM_ECODE_OK
Definition: tm-threads-common.h:82
SC_ERR_NAPATECH_OPEN_FAILED
@ SC_ERR_NAPATECH_OPEN_FAILED
Definition: util-error.h:246
NapatechThreadVars_
Definition: source-napatech.c:86
TmModule_::ThreadDeinit
TmEcode(* ThreadDeinit)(ThreadVars *, void *)
Definition: tm-modules.h:49
Packet_::datalink
int datalink
Definition: decode.h:601
NapatechStreamThreadDeinit
TmEcode NapatechStreamThreadDeinit(ThreadVars *tv, void *data)
Deinitializes the NAPATECH card.
Definition: source-napatech.c:1088
__attribute__
enum @24 __attribute__
DNP3 application header.
PKT_IS_TCP
#define PKT_IS_TCP(p)
Definition: decode.h:244
DecodeRegisterPerfCounters
void DecodeRegisterPerfCounters(DecodeThreadVars *dtv, ThreadVars *tv)
Definition: decode.c:520
SC_LOG_ERROR
@ SC_LOG_ERROR
Definition: util-debug.h:54
decode.h
NapatechCurrentStats_::current_packets
uint64_t current_packets
Definition: util-napatech.h:51
TmModule_::PktAcqBreakLoop
TmEcode(* PktAcqBreakLoop)(ThreadVars *, void *)
Definition: tm-modules.h:57
source-napatech.h
SCEnter
#define SCEnter(...)
Definition: util-debug.h:298
GET_PKT_DATA
#define GET_PKT_DATA(p)
Definition: decode.h:216
ThreadVars_
Per thread variable structure.
Definition: threadvars.h:57
TmModule_::Func
TmEcode(* Func)(ThreadVars *, Packet *, void *)
Definition: tm-modules.h:52
SC_CAP_NET_ADMIN
#define SC_CAP_NET_ADMIN
Definition: util-privs.h:31
NapatechThreadVars_::slot
TmSlot * slot
Definition: source-napatech.c:91
BUG_ON
#define BUG_ON(x)
Definition: suricata-common.h:289
NapatechGetAdapter
int NapatechGetAdapter(uint8_t port)
PacketPoolWait
void PacketPoolWait(void)
Definition: tmqh-packetpool.c:69
NAPATECH_ERROR
#define NAPATECH_ERROR(err_type, status)
Definition: util-napatech.h:65
util-napatech.h
Packet_
Definition: decode.h:425
TM_FLAG_DECODE_TM
#define TM_FLAG_DECODE_TM
Definition: tm-modules.h:32
TmModuleNapatechDecodeRegister
void TmModuleNapatechDecodeRegister(void)
Register the Napatech decoder module.
Definition: source-napatech.c:163
tmm_modules
TmModule tmm_modules[TMM_SIZE]
Definition: tm-modules.c:33
GET_PKT_LEN
#define GET_PKT_LEN(p)
Definition: decode.h:215
SC_ERR_NAPATECH_PARSE_CONFIG
@ SC_ERR_NAPATECH_PARSE_CONFIG
Definition: util-error.h:255
NapatechGetNumConfiguredStreams
uint16_t NapatechGetNumConfiguredStreams(void)
Definition: runmode-napatech.c:55
TmSlot_
Definition: tm-threads.h:53
NapatechCurrentStats_::current_bytes
uint64_t current_bytes
Definition: util-napatech.h:52
SC_ATOMIC_DECLARE
SC_ATOMIC_DECLARE(uint64_t, total_packets)
TmEcode
TmEcode
Definition: tm-threads-common.h:81
NapatechDecodeThreadInit
TmEcode NapatechDecodeThreadInit(ThreadVars *, const void *, void **)
Initialization of Napatech Thread.
Definition: source-napatech.c:1142
TmModule_::name
const char * name
Definition: tm-modules.h:44
SCLogInfo
#define SCLogInfo(...)
Macro used to log INFORMATIONAL messages.
Definition: util-debug.h:215
SCLogLevel
SCLogLevel
The various log levels NOTE: when adding new level, don't forget to update SCLogMapLogLevelToSyslogLe...
Definition: util-debug.h:48
TM_FLAG_RECEIVE_TM
#define TM_FLAG_RECEIVE_TM
Definition: tm-modules.h:31
dtv
DecodeThreadVars * dtv
Definition: fuzz_decodepcapfile.c:33
Packet_::ntpv
NapatechPacketVars ntpv
Definition: decode.h:499
tm-queuehandlers.h
Packet_::ReleasePacket
void(* ReleasePacket)(struct Packet_ *)
Definition: decode.h:509
NapatechThreadVars
struct NapatechThreadVars_ NapatechThreadVars
DecodeThreadVarsFree
void DecodeThreadVarsFree(ThreadVars *tv, DecodeThreadVars *dtv)
Definition: decode.c:700
Packet_::ts
struct timeval ts
Definition: decode.h:468
suricata-common.h
packet.h
ACTION_DROP
#define ACTION_DROP
Definition: action-globals.h:30
SCLogError
#define SCLogError(err_code,...)
Macro used to log ERROR messages.
Definition: util-debug.h:255
SC_LOG_PERF
@ SC_LOG_PERF
Definition: util-debug.h:58
TmModule_::ThreadInit
TmEcode(* ThreadInit)(ThreadVars *, const void *, void **)
Definition: tm-modules.h:47
FatalError
#define FatalError(x,...)
Definition: util-debug.h:530
NapatechGetNumFirstStream
uint16_t NapatechGetNumFirstStream(void)
Definition: runmode-napatech.c:60
tv
ThreadVars * tv
Definition: fuzz_decodepcapfile.c:32
util-optimize.h
TmModule_::ThreadExitPrintStats
void(* ThreadExitPrintStats)(ThreadVars *, void *)
Definition: tm-modules.h:48
threadvars.h
SC_ERR_NAPATECH_STREAMS_REGISTER_FAILED
@ SC_ERR_NAPATECH_STREAMS_REGISTER_FAILED
Definition: util-error.h:253
NapatechGetNumLastStream
uint16_t NapatechGetNumLastStream(void)
Definition: runmode-napatech.c:65
NapatechPacketVars_::nt_packet_buf
NtNetBuf_t nt_packet_buf
Definition: util-napatech.h:32
SCLogWarning
#define SCLogWarning(err_code,...)
Macro used to log WARNING messages.
Definition: util-debug.h:242
MAX_PORTS
#define MAX_PORTS
Definition: util-napatech.h:59
SCFree
#define SCFree(p)
Definition: util-mem.h:61
DecodeThreadVars_
Structure to hold thread specific data for all decode modules.
Definition: decode.h:659
SC_ERR_FATAL
@ SC_ERR_FATAL
Definition: util-error.h:203
NapatechThreadVars_::tv
ThreadVars * tv
Definition: source-napatech.c:87
NapatechDecodeThreadDeinit
TmEcode NapatechDecodeThreadDeinit(ThreadVars *tv, void *data)
Deinitialization of Napatech Thread.
Definition: source-napatech.c:1162
payload_len
uint16_t payload_len
Definition: stream-tcp-private.h:1
DecodeThreadVarsAlloc
DecodeThreadVars * DecodeThreadVarsAlloc(ThreadVars *tv)
Alloc and setup DecodeThreadVars.
Definition: decode.c:681
PacketSetData
int PacketSetData(Packet *p, const uint8_t *pktdata, uint32_t pktlen)
Set data for Packet and set length when zero copy is used.
Definition: decode.c:720
SC_ERR_DATALINK_UNIMPLEMENTED
@ SC_ERR_DATALINK_UNIMPLEMENTED
Definition: util-error.h:68
NapatechStreamThreadExitStats
void NapatechStreamThreadExitStats(ThreadVars *, void *)
Print some stats to the log at program exit.
Definition: source-napatech.c:1046
NapatechCurrentStats_::current_drop_packets
uint64_t current_drop_packets
Definition: util-napatech.h:53
TmModuleNapatechStreamRegister
void TmModuleNapatechStreamRegister(void)
Register the Napatech receiver (reader) module.
Definition: source-napatech.c:131
suricata.h
NapatechPacketVars_::rx_stream
NtNetStreamRx_t rx_stream
Definition: util-napatech.h:33
NapatechSetupTraffic
uint32_t NapatechSetupTraffic(uint32_t first_stream, uint32_t last_stream)
Definition: util-napatech.c:1365
NapatechPacketVars_::flow_stream
NtFlowStream_t flow_stream
Definition: util-napatech.h:34
NapatechThreadVars_::stream_id
uint16_t stream_id
Definition: source-napatech.c:89
NapatechCurrentStats_
Definition: util-napatech.h:50
TmSlot_::slot_next
struct TmSlot_ * slot_next
Definition: tm-threads.h:62
SC_ATOMIC_GET
#define SC_ATOMIC_GET(name)
Get the value from the atomic variable.
Definition: util-atomic.h:376
SCLogNotice
#define SCLogNotice(...)
Macro used to log NOTICE messages.
Definition: util-debug.h:230
StatsRegisterCounter
uint16_t StatsRegisterCounter(const char *name, struct ThreadVars_ *tv)
Registers a normal, unqualified counter.
Definition: counters.c:955
SCCalloc
#define SCCalloc(nm, sz)
Definition: util-mem.h:53
SCReturnInt
#define SCReturnInt(x)
Definition: util-debug.h:302
PacketGetFromQueueOrAlloc
Packet * PacketGetFromQueueOrAlloc(void)
Get a packet. We try to get a packet from the packetpool first, but if that is empty we alloc a packe...
Definition: decode.c:208
SC_CAP_NET_RAW
#define SC_CAP_NET_RAW
Definition: util-privs.h:32
DecodeEthernet
int DecodeEthernet(ThreadVars *tv, DecodeThreadVars *dtv, Packet *p, const uint8_t *pkt, uint32_t len)
Definition: decode-ethernet.c:42
TmModule_::flags
uint8_t flags
Definition: tm-modules.h:70
DecodeUpdatePacketCounters
void DecodeUpdatePacketCounters(ThreadVars *tv, const DecodeThreadVars *dtv, const Packet *p)
Definition: decode.c:647
LINKTYPE_ETHERNET
#define LINKTYPE_ETHERNET
Definition: decode.h:965
suricata_ctl_flags
volatile uint8_t suricata_ctl_flags
Definition: suricata.c:161
NapatechSetupNuma
bool NapatechSetupNuma(uint32_t stream, uint32_t numa)
Definition: util-napatech.c:1227
NapatechPacketLoop
TmEcode NapatechPacketLoop(ThreadVars *tv, void *data, void *slot)
Main Napatechpacket processing loop.
Definition: source-napatech.c:791