suricata
source-napatech.c
Go to the documentation of this file.
1 /* Copyright (C) 2012-2020 Open Information Security Foundation
2  *
3  * You can copy, redistribute or modify this Program under the terms of
4  * the GNU General Public License version 2 as published by the Free
5  * Software Foundation.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10  * GNU General Public License for more details.
11  *
12  * You should have received a copy of the GNU General Public License
13  * version 2 along with this program; if not, write to the Free Software
14  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
15  * 02110-1301, USA.
16  */
17 
18 /**
19  * \file
20  *
21  - * \author nPulse Technologies, LLC.
22  - * \author Matt Keeler <mk@npulsetech.com>
23  * *
24  * Support for NAPATECH adapter with the 3GD Driver/API.
25  * Requires libntapi from Napatech A/S.
26  *
27  */
28 #include "suricata-common.h"
29 #include "decode.h"
30 #include "packet.h"
31 #include "suricata.h"
32 #include "threadvars.h"
33 #include "util-datalink.h"
34 #include "util-optimize.h"
35 #include "tm-queuehandlers.h"
36 #include "tm-threads.h"
37 #include "tm-modules.h"
38 #include "util-privs.h"
39 #include "tmqh-packetpool.h"
40 #include "util-napatech.h"
41 #include "source-napatech.h"
42 
43 #ifndef HAVE_NAPATECH
44 
45 TmEcode NoNapatechSupportExit(ThreadVars*, const void*, void**);
46 
48 {
49  tmm_modules[TMM_RECEIVENAPATECH].name = "NapatechStream";
50  tmm_modules[TMM_RECEIVENAPATECH].ThreadInit = NoNapatechSupportExit;
55 }
56 
58 {
59  tmm_modules[TMM_DECODENAPATECH].name = "NapatechDecode";
60  tmm_modules[TMM_DECODENAPATECH].ThreadInit = NoNapatechSupportExit;
66 }
67 
68 TmEcode NoNapatechSupportExit(ThreadVars *tv, const void *initdata, void **data)
69 {
70  SCLogError("Error creating thread %s: you do not have support for Napatech adapter "
71  "enabled please recompile with --enable-napatech",
72  tv->name);
73  exit(EXIT_FAILURE);
74 }
75 
76 #else /* Implied we do have NAPATECH support */
77 
78 
79 #include <numa.h>
80 #include <nt.h>
81 
82 extern int max_pending_packets;
83 
84 typedef struct NapatechThreadVars_
85 {
87  NtNetStreamRx_t rx_stream;
88  uint16_t stream_id;
89  int hba;
92 
93 #ifdef NAPATECH_ENABLE_BYPASS
94 static int NapatechBypassCallback(Packet *p);
95 #endif
96 
97 TmEcode NapatechStreamThreadInit(ThreadVars *, const void *, void **);
99 TmEcode NapatechPacketLoop(ThreadVars *tv, void *data, void *slot);
100 
101 TmEcode NapatechDecodeThreadInit(ThreadVars *, const void *, void **);
104 
105 /* These are used as the threads are exiting to get a comprehensive count of
106  * all the packets received and dropped.
107  */
108 SC_ATOMIC_DECLARE(uint64_t, total_packets);
109 SC_ATOMIC_DECLARE(uint64_t, total_drops);
110 SC_ATOMIC_DECLARE(uint16_t, total_tallied);
111 
112 /* Streams are counted as they are instantiated in order to know when all threads
113  * are running*/
114 SC_ATOMIC_DECLARE(uint16_t, stream_count);
115 
116 SC_ATOMIC_DECLARE(uint16_t, numa0_count);
117 SC_ATOMIC_DECLARE(uint16_t, numa1_count);
118 SC_ATOMIC_DECLARE(uint16_t, numa2_count);
119 SC_ATOMIC_DECLARE(uint16_t, numa3_count);
120 
121 SC_ATOMIC_DECLARE(uint64_t, flow_callback_cnt);
122 SC_ATOMIC_DECLARE(uint64_t, flow_callback_handled_pkts);
123 SC_ATOMIC_DECLARE(uint64_t, flow_callback_udp_pkts);
124 SC_ATOMIC_DECLARE(uint64_t, flow_callback_tcp_pkts);
125 SC_ATOMIC_DECLARE(uint64_t, flow_callback_unhandled_pkts);
126 
127 /**
128  * \brief Register the Napatech receiver (reader) module.
129  */
131 {
132  tmm_modules[TMM_RECEIVENAPATECH].name = "NapatechStream";
141 
142  SC_ATOMIC_INIT(total_packets);
143  SC_ATOMIC_INIT(total_drops);
144  SC_ATOMIC_INIT(total_tallied);
145  SC_ATOMIC_INIT(stream_count);
146 
147  SC_ATOMIC_INIT(numa0_count);
148  SC_ATOMIC_INIT(numa1_count);
149  SC_ATOMIC_INIT(numa2_count);
150  SC_ATOMIC_INIT(numa3_count);
151 
152  SC_ATOMIC_INIT(flow_callback_cnt);
153  SC_ATOMIC_INIT(flow_callback_handled_pkts);
154  SC_ATOMIC_INIT(flow_callback_udp_pkts);
155  SC_ATOMIC_INIT(flow_callback_tcp_pkts);
156  SC_ATOMIC_INIT(flow_callback_unhandled_pkts);
157 }
158 
159 /**
160  * \brief Register the Napatech decoder module.
161  */
163 {
164  tmm_modules[TMM_DECODENAPATECH].name = "NapatechDecode";
171 }
172 
173 #ifdef NAPATECH_ENABLE_BYPASS
174 /**
175  * \brief template of IPv4 header
176  */
177 struct ipv4_hdr
178 {
179  uint8_t version_ihl; /**< version and header length */
180  uint8_t type_of_service; /**< type of service */
181  uint16_t total_length; /**< length of packet */
182  uint16_t packet_id; /**< packet ID */
183  uint16_t fragment_offset; /**< fragmentation offset */
184  uint8_t time_to_live; /**< time to live */
185  uint8_t next_proto_id; /**< protocol ID */
186  uint16_t hdr_checksum; /**< header checksum */
187  uint32_t src_addr; /**< source address */
188  uint32_t dst_addr; /**< destination address */
189 } __attribute__ ((__packed__));
190 
191 /**
192  * \brief template of IPv6 header
193  */
194 struct ipv6_hdr
195 {
196  uint32_t vtc_flow; /**< IP version, traffic class & flow label. */
197  uint16_t payload_len; /**< IP packet length - includes sizeof(ip_header). */
198  uint8_t proto; /**< Protocol, next header. */
199  uint8_t hop_limits; /**< Hop limits. */
200  uint8_t src_addr[16]; /**< IP address of source host. */
201  uint8_t dst_addr[16]; /**< IP address of destination host(s). */
202 } __attribute__ ((__packed__));
203 
204 /**
205  * \brief template of UDP header
206  */
207 struct udp_hdr
208 {
209  uint16_t src_port; /**< UDP source port. */
210  uint16_t dst_port; /**< UDP destination port. */
211  uint16_t dgram_len; /**< UDP datagram length */
212  uint16_t dgram_cksum; /**< UDP datagram checksum */
213 } __attribute__ ((__packed__));
214 
215 /**
216  * \brief template of TCP header
217  */
218 struct tcp_hdr
219 {
220  uint16_t src_port; /**< TCP source port. */
221  uint16_t dst_port; /**< TCP destination port. */
222  uint32_t sent_seq; /**< TX data sequence number. */
223  uint32_t recv_ack; /**< RX data acknowledgement sequence number. */
224  uint8_t data_off; /**< Data offset. */
225  uint8_t tcp_flags; /**< TCP flags */
226  uint16_t rx_win; /**< RX flow control window. */
227  uint16_t cksum; /**< TCP checksum. */
228  uint16_t tcp_urp; /**< TCP urgent pointer, if any. */
229 } __attribute__ ((__packed__));
230 
231 
232 /* The hardware will assign a "color" value indicating what filters are matched
233  * by a given packet. These constants indicate what bits are set in the color
234  * field for different protocols
235  *
236  */
237 #define RTE_PTYPE_L2_ETHER 0x10000000
238 #define RTE_PTYPE_L3_IPV4 0x01000000
239 #define RTE_PTYPE_L3_IPV6 0x04000000
240 #define RTE_PTYPE_L4_TCP 0x00100000
241 #define RTE_PTYPE_L4_UDP 0x00200000
242 
243 /* These masks are used to extract layer 3 and layer 4 protocol
244  * values from the color field in the packet descriptor.
245  */
246 #define RTE_PTYPE_L3_MASK 0x0f000000
247 #define RTE_PTYPE_L4_MASK 0x00f00000
248 
249 #define COLOR_IS_SPAN 0x00001000
250 
251 static int is_inline = 0;
252 static int inline_port_map[MAX_PORTS] = { -1 };
253 
254 /**
255  * \brief Binds two ports together for inline operation.
256  *
257  * Get the ID of an adapter on which a given port resides.
258  *
259  * \param port one of the ports in a pairing.
260  * \param peer the other port in a pairing.
261  * \return ID of the adapter.
262  *
263  */
264 int NapatechSetPortmap(int port, int peer)
265 {
266  if ((inline_port_map[port] == -1) && (inline_port_map[peer] == -1)) {
267  inline_port_map[port] = peer;
268  inline_port_map[peer] = port;
269  } else {
270  SCLogError("Port pairing is already configured.");
271  return 0;
272  }
273  return 1;
274 }
275 
276 /**
277  * \brief Returns the ID of the adapter
278  *
279  * Get the ID of an adapter on which a given port resides.
280  *
281  * \param port for which adapter ID is requested.
282  * \return ID of the adapter.
283  *
284  */
285 int NapatechGetAdapter(uint8_t port)
286 {
287  static int port_adapter_map[MAX_PORTS] = { -1 };
288  int status;
289  NtInfo_t h_info; /* Info handle */
290  NtInfoStream_t h_info_stream; /* Info stream handle */
291 
292  if (unlikely(port_adapter_map[port] == -1)) {
293  if ((status = NT_InfoOpen(&h_info_stream, "ExampleInfo")) != NT_SUCCESS) {
294  NAPATECH_ERROR(status);
295  return -1;
296  }
297  /* Read the system info */
298  h_info.cmd = NT_INFO_CMD_READ_PORT_V9;
299  h_info.u.port_v9.portNo = (uint8_t) port;
300  if ((status = NT_InfoRead(h_info_stream, &h_info)) != NT_SUCCESS) {
301  /* Get the status code as text */
302  NAPATECH_ERROR(status);
303  NT_InfoClose(h_info_stream);
304  return -1;
305  }
306  port_adapter_map[port] = h_info.u.port_v9.data.adapterNo;
307  }
308  return port_adapter_map[port];
309 }
310 
311 /**
312  * \brief IPv4 4-tuple convenience structure
313  */
314 struct IPv4Tuple4
315 {
316  uint32_t sa; /*!< Source address */
317  uint32_t da; /*!< Destination address */
318  uint16_t sp; /*!< Source port */
319  uint16_t dp; /*!< Destination port */
320 };
321 
322 /**
323  * \brief IPv6 4-tuple convenience structure
324  */
325 struct IPv6Tuple4
326 {
327  uint8_t sa[16]; /*!< Source address */
328  uint8_t da[16]; /*!< Destination address */
329  uint16_t sp; /*!< Source port */
330  uint16_t dp; /*!< Destination port */
331 };
332 
333 
334 /**
335  * \brief Compares the byte order value of two IPv6 addresses.
336  *
337  *
338  * \param addr_a The first address to compare
339  * \param addr_b The second adress to compare
340  *
341  * \return -1 if addr_a < addr_b
342  * 1 if addr_a > addr_b
343  * 0 if addr_a == addr_b
344  */
345 static int CompareIPv6Addr(uint8_t addr_a[16], uint8_t addr_b[16]) {
346  uint16_t pos;
347  for (pos = 0; pos < 16; ++pos) {
348  if (addr_a[pos] < addr_b[pos]) {
349  return -1;
350  } else if (addr_a[pos] > addr_b[pos]) {
351  return 1;
352  } /* else they are equal - check next position*/
353  }
354 
355  /* if we get here the addresses are equal */
356  return 0;
357 }
358 
359 /**
360  * \brief Initializes the FlowStreams used to program flow data.
361  *
362  * Opens a FlowStream on the adapter associated with the rx port. This
363  * FlowStream is subsequently used to program the adapter with
364  * flows to bypass.
365  *
366  * \return the flow stream handle, NULL if failure.
367  */
368 static NtFlowStream_t InitFlowStream(int adapter, int stream_id)
369 {
370  int status;
371  NtFlowStream_t hFlowStream;
372 
373  NtFlowAttr_t attr;
374  char flow_name[80];
375 
376  NT_FlowOpenAttrInit(&attr);
377  NT_FlowOpenAttrSetAdapterNo(&attr, adapter);
378 
379  snprintf(flow_name, sizeof(flow_name), "Flow_stream_%d", stream_id );
380  SCLogDebug("Opening flow programming stream: %s", flow_name);
381  if ((status = NT_FlowOpen_Attr(&hFlowStream, flow_name, &attr)) != NT_SUCCESS) {
382  SCLogWarning("Napatech bypass functionality not supported by the FPGA version on adapter "
383  "%d - disabling support.",
384  adapter);
385  return NULL;
386  }
387  return hFlowStream;
388 }
389 
390 /**
391  * \brief Callback function to process Bypass events on Napatech Adapter.
392  *
393  * Callback function that sets up the Flow tables on the Napatech card
394  * so that subsequent packets from this flow are bypassed on the hardware.
395  *
396  * \param p packet containing information about the flow to be bypassed
397  * \param is_inline indicates if Suricata is being run in inline mode.
398  *
399  * \return Error code indicating success (1) or failure (0).
400  *
401  */
402 static int ProgramFlow(Packet *p, int is_inline)
403 {
404  NtFlow_t flow_match;
405  memset(&flow_match, 0, sizeof(flow_match));
406 
407  NapatechPacketVars *ntpv = &(p->ntpv);
408 
409  /*
410  * The hardware decoder will "color" the packets according to the protocols
411  * in the packet and the port the packet arrived on. packet_type gets
412  * these bits and we mask out layer3, layer4, and is_span to determine
413  * the protocols and if the packet is coming in from a SPAN port.
414  */
415  uint32_t packet_type = ((ntpv->dyn3->color_hi << 14) & 0xFFFFC000) | ntpv->dyn3->color_lo;
416  uint8_t *packet = (uint8_t *) ntpv->dyn3 + ntpv->dyn3->descrLength;
417 
418  uint32_t layer3 = packet_type & RTE_PTYPE_L3_MASK;
419  uint32_t layer4 = packet_type & RTE_PTYPE_L4_MASK;
420  uint32_t is_span = packet_type & COLOR_IS_SPAN;
421 
422  /*
423  * When we're programming the flows to arrive on a span port,
424  * where upstream and downstream packets arrive on the same port,
425  * the hardware is configured to swap the source and dest
426  * fields if the src addr > dest addr. We need to program the
427  * flow tables to match. We'll compare addresses and set
428  * do_swap accordingly.
429  */
430 
431  uint32_t do_swap = 0;
432 
433  SC_ATOMIC_ADD(flow_callback_cnt, 1);
434 
435  /* Only bypass TCP and UDP */
436  if (PKT_IS_TCP(p)) {
437  SC_ATOMIC_ADD(flow_callback_tcp_pkts, 1);
438  } else if PKT_IS_UDP(p) {
439  SC_ATOMIC_ADD(flow_callback_udp_pkts, 1);
440  } else {
441  SC_ATOMIC_ADD(flow_callback_unhandled_pkts, 1);
442  }
443 
444  struct IPv4Tuple4 v4Tuple;
445  struct IPv6Tuple4 v6Tuple;
446  struct ipv4_hdr *pIPv4_hdr = NULL;
447  struct ipv6_hdr *pIPv6_hdr = NULL;
448 
449  switch (layer3) {
450  case RTE_PTYPE_L3_IPV4:
451  {
452  pIPv4_hdr = (struct ipv4_hdr *) (packet + ntpv->dyn3->offset0);
453  if (!is_span) {
454  v4Tuple.sa = pIPv4_hdr->src_addr;
455  v4Tuple.da = pIPv4_hdr->dst_addr;
456  } else {
457  do_swap = (htonl(pIPv4_hdr->src_addr) > htonl(pIPv4_hdr->dst_addr));
458  if (!do_swap) {
459  /* already in order */
460  v4Tuple.sa = pIPv4_hdr->src_addr;
461  v4Tuple.da = pIPv4_hdr->dst_addr;
462  } else { /* swap */
463  v4Tuple.sa = pIPv4_hdr->dst_addr;
464  v4Tuple.da = pIPv4_hdr->src_addr;
465  }
466  }
467  break;
468  }
469  case RTE_PTYPE_L3_IPV6:
470  {
471  pIPv6_hdr = (struct ipv6_hdr *) (packet + ntpv->dyn3->offset0);
472  do_swap = (CompareIPv6Addr(pIPv6_hdr->src_addr, pIPv6_hdr->dst_addr) > 0);
473 
474  if (!is_span) {
475  memcpy(&(v6Tuple.sa), pIPv6_hdr->src_addr, 16);
476  memcpy(&(v6Tuple.da), pIPv6_hdr->dst_addr, 16);
477  } else {
478  /* sort src/dest address before programming */
479  if (!do_swap) {
480  /* already in order */
481  memcpy(&(v6Tuple.sa), pIPv6_hdr->src_addr, 16);
482  memcpy(&(v6Tuple.da), pIPv6_hdr->dst_addr, 16);
483  } else { /* swap the addresses */
484  memcpy(&(v6Tuple.sa), pIPv6_hdr->dst_addr, 16);
485  memcpy(&(v6Tuple.da), pIPv6_hdr->src_addr, 16);
486  }
487  }
488  break;
489  }
490  default:
491  {
492  return 0;
493  }
494  }
495 
496  switch (layer4) {
497  case RTE_PTYPE_L4_TCP:
498  {
499  struct tcp_hdr *tcp_hdr = (struct tcp_hdr *) (packet + ntpv->dyn3->offset1);
500  if (layer3 == RTE_PTYPE_L3_IPV4) {
501  if (!is_span) {
502  v4Tuple.dp = tcp_hdr->dst_port;
503  v4Tuple.sp = tcp_hdr->src_port;
504  flow_match.keyId = NAPATECH_KEYTYPE_IPV4;
505  } else {
506  if (!do_swap) {
507  v4Tuple.sp = tcp_hdr->src_port;
508  v4Tuple.dp = tcp_hdr->dst_port;
509  } else {
510  v4Tuple.sp = tcp_hdr->dst_port;
511  v4Tuple.dp = tcp_hdr->src_port;
512  }
513  flow_match.keyId = NAPATECH_KEYTYPE_IPV4_SPAN;
514  }
515  memcpy(&(flow_match.keyData), &v4Tuple, sizeof(v4Tuple));
516  } else {
517  if (!is_span) {
518  v6Tuple.dp = tcp_hdr->dst_port;
519  v6Tuple.sp = tcp_hdr->src_port;
520  flow_match.keyId = NAPATECH_KEYTYPE_IPV6;
521  } else {
522  if (!do_swap) {
523  v6Tuple.sp = tcp_hdr->src_port;
524  v6Tuple.dp = tcp_hdr->dst_port;
525  } else {
526  v6Tuple.dp = tcp_hdr->src_port;
527  v6Tuple.sp = tcp_hdr->dst_port;
528  }
529  flow_match.keyId = NAPATECH_KEYTYPE_IPV6_SPAN;
530  }
531  memcpy(&(flow_match.keyData), &v6Tuple, sizeof(v6Tuple));
532  }
533  flow_match.ipProtocolField = 6;
534  break;
535  }
536  case RTE_PTYPE_L4_UDP:
537  {
538  struct udp_hdr *udp_hdr = (struct udp_hdr *) (packet + ntpv->dyn3->offset1);
539  if (layer3 == RTE_PTYPE_L3_IPV4) {
540  if (!is_span) {
541  v4Tuple.dp = udp_hdr->dst_port;
542  v4Tuple.sp = udp_hdr->src_port;
543  flow_match.keyId = NAPATECH_KEYTYPE_IPV4;
544  } else {
545  if (!do_swap) {
546  v4Tuple.sp = udp_hdr->src_port;
547  v4Tuple.dp = udp_hdr->dst_port;
548  } else {
549  v4Tuple.dp = udp_hdr->src_port;
550  v4Tuple.sp = udp_hdr->dst_port;
551  }
552  flow_match.keyId = NAPATECH_KEYTYPE_IPV4_SPAN;
553  }
554  memcpy(&(flow_match.keyData), &v4Tuple, sizeof(v4Tuple));
555  } else { /* layer3 is IPV6 */
556  if (!is_span) {
557  v6Tuple.dp = udp_hdr->dst_port;
558  v6Tuple.sp = udp_hdr->src_port;
559  flow_match.keyId = NAPATECH_KEYTYPE_IPV6;
560  } else {
561  if (!do_swap) {
562  v6Tuple.sp = udp_hdr->src_port;
563  v6Tuple.dp = udp_hdr->dst_port;
564  } else {
565  v6Tuple.dp = udp_hdr->src_port;
566  v6Tuple.sp = udp_hdr->dst_port;
567  }
568  flow_match.keyId = NAPATECH_KEYTYPE_IPV6_SPAN;
569  }
570  memcpy(&(flow_match.keyData), &v6Tuple, sizeof(v6Tuple));
571  }
572  flow_match.ipProtocolField = 17;
573  break;
574  }
575  default:
576  {
577  return 0;
578  }
579  }
580 
581  flow_match.op = 1; /* program flow */
582  flow_match.gfi = 1; /* Generate FlowInfo records */
583  flow_match.tau = 1; /* tcp automatic unlearn */
584 
585  if (PacketCheckAction(p, ACTION_DROP)) {
586  flow_match.keySetId = NAPATECH_FLOWTYPE_DROP;
587  } else {
588  if (is_inline) {
589  flow_match.keySetId = NAPATECH_FLOWTYPE_PASS;
590  } else {
591  flow_match.keySetId = NAPATECH_FLOWTYPE_DROP;
592  }
593  }
594 
595  if (NT_FlowWrite(ntpv->flow_stream, &flow_match, -1) != NT_SUCCESS) {
597  SCLogError("NT_FlowWrite failed!.");
598  exit(EXIT_FAILURE);
599  }
600  }
601 
602  return 1;
603 }
604 
605 /**
606  * \brief Callback from Suricata when a flow that should be bypassed
607  * is identified.
608  */
609 
610 static int NapatechBypassCallback(Packet *p)
611 {
612  NapatechPacketVars *ntpv = &(p->ntpv);
613 
614  /*
615  * Since, at this point, we don't know what action to take,
616  * simply mark this packet as one that should be
617  * bypassed when the packet is returned by suricata with a
618  * pass/drop verdict.
619  */
620  ntpv->bypass = 1;
621 
622  return 1;
623 }
624 
625 #endif
626 
627 /**
628  * \brief Initialize the Napatech receiver thread, generate a single
629  * NapatechThreadVar structure for each thread, this will
630  * contain a NtNetStreamRx_t stream handle which is used when the
631  * thread executes to acquire the packets.
632  *
633  * \param tv Thread variable to ThreadVars
634  * \param initdata Initial data to the adapter passed from the user,
635  * this is processed by the user.
636  *
637  * For now, we assume that we have only a single name for the NAPATECH
638  * adapter.
639  *
640  * \param data data pointer gets populated with
641  *
642  */
643 TmEcode NapatechStreamThreadInit(ThreadVars *tv, const void *initdata, void **data)
644 {
645  SCEnter();
646  struct NapatechStreamDevConf *conf = (struct NapatechStreamDevConf *) initdata;
647  uint16_t stream_id = conf->stream_id;
648  *data = NULL;
649 
650  NapatechThreadVars *ntv = SCCalloc(1, sizeof (NapatechThreadVars));
651  if (unlikely(ntv == NULL)) {
652  FatalError("Failed to allocate memory for NAPATECH thread vars.");
653  }
654 
655  memset(ntv, 0, sizeof (NapatechThreadVars));
656  ntv->stream_id = stream_id;
657  ntv->tv = tv;
658  ntv->hba = conf->hba;
659 
661 
662  SCLogDebug("Started processing packets from NAPATECH Stream: %lu", ntv->stream_id);
663 
664  *data = (void *) ntv;
666 }
667 
668 /**
669  * \brief Callback to indicate that the packet buffer can be returned to the hardware.
670  *
671  * Called when Suricata is done processing the packet. Before the packet is released
672  * this also checks the action to see if the packet should be dropped and programs the
673  * flow hardware if the flow is to be bypassed and the Napatech packet buffer is released.
674  *
675  *
676  * \param p Packet to return to the system.
677  *
678  */
679 static void NapatechReleasePacket(struct Packet_ *p)
680 {
681  /*
682  * If the packet is to be dropped we need to set the wirelength
683  * before releasing the Napatech buffer back to NTService.
684  */
685 #ifdef NAPATECH_ENABLE_BYPASS
686  if (is_inline && PacketCheckAction(p, ACTION_DROP)) {
687  p->ntpv.dyn3->wireLength = 0;
688  }
689 
690  /*
691  * If this flow is to be programmed for hardware bypass we do it now. This is done
692  * here because the action is not available in the packet structure at the time of the
693  * bypass callback and it needs to be done before we release the packet structure.
694  */
695  if (p->ntpv.bypass == 1) {
696  ProgramFlow(p, is_inline);
697  }
698 #endif
699 
700  NT_NetRxRelease(p->ntpv.rx_stream, p->ntpv.nt_packet_buf);
702 }
703 
704 /**
705  * \brief Returns the NUMA node associated with the currently running thread.
706  *
707  * \return ID of the NUMA node.
708  *
709  */
710 static int GetNumaNode(void)
711 {
712  int cpu = 0;
713  int node = 0;
714 
715 #if defined(__linux__)
716  cpu = sched_getcpu();
717  node = numa_node_of_cpu(cpu);
718 #else
719  SCLogWarning("Auto configuration of NUMA node is not supported on this OS.");
720 #endif
721 
722  return node;
723 }
724 
725 /**
726  * \brief Outputs hints on the optimal host-buffer configuration to aid tuning.
727  *
728  * \param log_level of the currently running instance.
729  *
730  */
731 static void RecommendNUMAConfig(SCLogLevel log_level)
732 {
733  char string0[16];
734  char string1[16];
735  char string2[16];
736  char string3[16];
737  int set_cpu_affinity = 0;
738 
739  if (ConfGetBool("threading.set-cpu-affinity", &set_cpu_affinity) != 1) {
740  set_cpu_affinity = 0;
741  }
742 
743  if (set_cpu_affinity) {
744  SCLog(log_level, __FILE__, __FUNCTION__, __LINE__,
745  "Minimum host buffers that should be defined in ntservice.ini:");
746 
747  SCLog(log_level, __FILE__, __FUNCTION__, __LINE__, " NUMA Node 0: %d",
748  (SC_ATOMIC_GET(numa0_count)));
749 
750  if (numa_max_node() >= 1)
751  SCLog(log_level, __FILE__, __FUNCTION__, __LINE__,
752  " NUMA Node 1: %d ", (SC_ATOMIC_GET(numa1_count)));
753 
754  if (numa_max_node() >= 2)
755  SCLog(log_level, __FILE__, __FUNCTION__, __LINE__,
756  " NUMA Node 2: %d ", (SC_ATOMIC_GET(numa2_count)));
757 
758  if (numa_max_node() >= 3)
759  SCLog(log_level, __FILE__, __FUNCTION__, __LINE__,
760  " NUMA Node 3: %d ", (SC_ATOMIC_GET(numa3_count)));
761 
762  snprintf(string0, 16, "[%d, 16, 0]", SC_ATOMIC_GET(numa0_count));
763  snprintf(string1, 16, (numa_max_node() >= 1 ? ",[%d, 16, 1]" : ""),
764  SC_ATOMIC_GET(numa1_count));
765  snprintf(string2, 16, (numa_max_node() >= 2 ? ",[%d, 16, 2]" : ""),
766  SC_ATOMIC_GET(numa2_count));
767  snprintf(string3, 16, (numa_max_node() >= 3 ? ",[%d, 16, 3]" : ""),
768  SC_ATOMIC_GET(numa3_count));
769 
770  SCLog(log_level, __FILE__, __FUNCTION__, __LINE__,
771  "E.g.: HostBuffersRx=%s%s%s%s", string0, string1, string2,
772  string3);
773  } else if (log_level == SC_LOG_ERROR) {
774  SCLogError("Or, try running /opt/napatech3/bin/ntpl -e \"delete=all\" to clean-up stream "
775  "NUMA config.");
776  }
777 }
778 
779 /**
780  * \brief Main Napatechpacket processing loop
781  *
782  * \param tv Thread variable to ThreadVars
783  * \param data Pointer to NapatechThreadVars with data specific to Napatech
784  * \param slot TMSlot where this instance is running.
785  *
786  */
787 TmEcode NapatechPacketLoop(ThreadVars *tv, void *data, void *slot)
788 {
789  int32_t status;
790  char error_buffer[100];
791  uint64_t pkt_ts;
792  NtNetBuf_t packet_buffer;
793  NapatechThreadVars *ntv = (NapatechThreadVars *) data;
794  uint64_t hba_pkt_drops = 0;
795  uint64_t hba_byte_drops = 0;
796  uint16_t hba_pkt = 0;
797  int numa_node = -1;
798  int set_cpu_affinity = 0;
799  int closer = 0;
800  int is_autoconfig = 0;
801 
802  /* This just keeps the startup output more orderly. */
803  usleep(200000 * ntv->stream_id);
804 
805 #ifdef NAPATECH_ENABLE_BYPASS
806  NtFlowStream_t flow_stream[MAX_ADAPTERS] = { 0 };
807 
808  /* Get a FlowStream handle for each adapter so we can efficiently find the
809  * correct handle corresponding to the port on which a packet is received.
810  */
811  int adapter = 0;
812  for (adapter = 0; adapter < NapatechGetNumAdapters(); ++adapter) {
813  flow_stream[adapter] = InitFlowStream(adapter, ntv->stream_id);
814  }
815 #endif
816 
817  if (ConfGetBool("napatech.auto-config", &is_autoconfig) == 0) {
818  is_autoconfig = 0;
819  }
820 
821  if (is_autoconfig) {
822  numa_node = GetNumaNode();
823  switch (numa_node) {
824  case 0:
825  SC_ATOMIC_ADD(numa0_count, 1);
826  break;
827  case 1:
828  SC_ATOMIC_ADD(numa1_count, 1);
829  break;
830  case 2:
831  SC_ATOMIC_ADD(numa2_count, 1);
832  break;
833  case 3:
834  SC_ATOMIC_ADD(numa3_count, 1);
835  break;
836  default:
837  break;
838  }
839 
840  if (ConfGetBool("threading.set-cpu-affinity", &set_cpu_affinity) != 1) {
841  set_cpu_affinity = 0;
842  }
843 
844  if (set_cpu_affinity) {
845  NapatechSetupNuma(ntv->stream_id, numa_node);
846  }
847 
848  numa_node = GetNumaNode();
849  SC_ATOMIC_ADD(stream_count, 1);
850  if (SC_ATOMIC_GET(stream_count) == NapatechGetNumConfiguredStreams()) {
851 
852 #ifdef NAPATECH_ENABLE_BYPASS
853  if (ConfGetBool("napatech.inline", &is_inline) == 0) {
854  is_inline = 0;
855  }
856 
857  /* Initialize the port map before we setup traffic filters */
858  for (int i = 0; i < MAX_PORTS; ++i) {
859  inline_port_map[i] = -1;
860  }
861 #endif
862  /* The last thread to run sets up and deletes the streams */
865 
866  closer = 1;
867 
868  if (status == 0x20002061) {
869  SCLogError("Check host buffer configuration in ntservice.ini.");
870  RecommendNUMAConfig(SC_LOG_ERROR);
871  exit(EXIT_FAILURE);
872 
873  } else if (status == 0x20000008) {
874  FatalError("Check napatech.ports in the suricata config file.");
875  }
876  RecommendNUMAConfig(SC_LOG_PERF);
877  SCLogNotice("Napatech packet input engine started.");
878  }
879  } // is_autoconfig
880 
881  SCLogInfo(
882  "Napatech Packet Loop Started - cpu: %3d, cpu_numa: %3d stream: %3u ",
883  sched_getcpu(), numa_node, ntv->stream_id);
884 
885  if (ntv->hba > 0) {
886  char *s_hbad_pkt = SCCalloc(1, 32);
887  if (unlikely(s_hbad_pkt == NULL)) {
888  FatalError("Failed to allocate memory for NAPATECH stream counter.");
889  }
890  snprintf(s_hbad_pkt, 32, "nt%d.hba_drop", ntv->stream_id);
891  hba_pkt = StatsRegisterCounter(s_hbad_pkt, tv);
893  StatsSetUI64(tv, hba_pkt, 0);
894  }
895  SCLogDebug("Opening NAPATECH Stream: %lu for processing", ntv->stream_id);
896 
897  if ((status = NT_NetRxOpen(&(ntv->rx_stream), "SuricataStream",
898  NT_NET_INTERFACE_PACKET, ntv->stream_id, ntv->hba)) != NT_SUCCESS) {
899 
900  NAPATECH_ERROR(status);
901  SCFree(ntv);
903  }
904  TmSlot *s = (TmSlot *) slot;
905  ntv->slot = s->slot_next;
906 
907  // Indicate that the thread is actually running its application level code (i.e., it can poll
908  // packets)
910 
911  while (!(suricata_ctl_flags & SURICATA_STOP)) {
912  /* make sure we have at least one packet in the packet pool, to prevent
913  * us from alloc'ing packets at line rate */
914  PacketPoolWait();
915 
916  /* Napatech returns packets 1 at a time */
917  status = NT_NetRxGet(ntv->rx_stream, &packet_buffer, 1000);
918  if (unlikely(
919  status == NT_STATUS_TIMEOUT || status == NT_STATUS_TRYAGAIN)) {
920  if (status == NT_STATUS_TIMEOUT) {
921  TmThreadsCaptureHandleTimeout(tv, NULL);
922  }
923  continue;
924  } else if (unlikely(status != NT_SUCCESS)) {
925  NAPATECH_ERROR(status);
926  SCLogInfo("Failed to read from Napatech Stream %d: %s",
927  ntv->stream_id, error_buffer);
928  break;
929  }
930 
932 #ifdef NAPATECH_ENABLE_BYPASS
933  p->ntpv.bypass = 0;
934 #endif
935 
936  p->ntpv.rx_stream = ntv->rx_stream;
937 
938  if (unlikely(p == NULL)) {
939  NT_NetRxRelease(ntv->rx_stream, packet_buffer);
941  }
942 
943  pkt_ts = NT_NET_GET_PKT_TIMESTAMP(packet_buffer);
944 
945  /*
946  * Handle the different timestamp forms that the napatech cards could use
947  * - NT_TIMESTAMP_TYPE_NATIVE is not supported due to having an base
948  * of 0 as opposed to NATIVE_UNIX which has a base of 1/1/1970
949  */
950  switch (NT_NET_GET_PKT_TIMESTAMP_TYPE(packet_buffer)) {
951  case NT_TIMESTAMP_TYPE_NATIVE_UNIX:
952  p->ts = SCTIME_FROM_SECS(pkt_ts / 100000000);
953  p->ts += SCTIME_FROM_USECS(
954  ((pkt_ts % 100000000) / 100) + ((pkt_ts % 100) > 50 ? 1 : 0));
955  break;
956  case NT_TIMESTAMP_TYPE_PCAP:
957  p->ts = SCTIME_FROM_SECS(pkt_ts >> 32);
958  p->ts += SCTIME_FROM_USECS(pkt_ts & 0xFFFFFFFF);
959  break;
960  case NT_TIMESTAMP_TYPE_PCAP_NANOTIME:
961  p->ts = SCTIME_FROM_SECS(pkt_ts >> 32);
962  p->ts += SCTIME_FROM_USECS(
963  ((pkt_ts & 0xFFFFFFFF) / 1000) + ((pkt_ts % 1000) > 500 ? 1 : 0));
964  break;
965  case NT_TIMESTAMP_TYPE_NATIVE_NDIS:
966  /* number of seconds between 1/1/1601 and 1/1/1970 */
967  p->ts = SCTIME_FROM_SECS((pkt_ts / 100000000) - 11644473600);
968  p->ts += SCTIME_FROM_USECS(
969  ((pkt_ts % 100000000) / 100) + ((pkt_ts % 100) > 50 ? 1 : 0));
970  break;
971  default:
972  SCLogError("Packet from Napatech Stream: %u does not have a supported timestamp "
973  "format",
974  ntv->stream_id);
975  NT_NetRxRelease(ntv->rx_stream, packet_buffer);
977  }
978 
979  if (unlikely(ntv->hba > 0)) {
980  NtNetRx_t stat_cmd;
981  stat_cmd.cmd = NT_NETRX_READ_CMD_STREAM_DROP;
982  /* Update drop counter */
983  if (unlikely((status = NT_NetRxRead(ntv->rx_stream, &stat_cmd)) != NT_SUCCESS)) {
984  NAPATECH_ERROR(status);
985  SCLogInfo("Couldn't retrieve drop statistics from the RX stream: %u",
986  ntv->stream_id);
987  } else {
988  hba_pkt_drops = stat_cmd.u.streamDrop.pktsDropped;
989 
990  StatsSetUI64(tv, hba_pkt, hba_pkt_drops);
991  }
993  }
994 
995 #ifdef NAPATECH_ENABLE_BYPASS
996  p->ntpv.dyn3 = _NT_NET_GET_PKT_DESCR_PTR_DYN3(packet_buffer);
997  p->BypassPacketsFlow = (NapatechIsBypassSupported() ? NapatechBypassCallback : NULL);
998  NT_NET_SET_PKT_TXPORT(packet_buffer, inline_port_map[p->ntpv.dyn3->rxPort]);
999  p->ntpv.flow_stream = flow_stream[NapatechGetAdapter(p->ntpv.dyn3->rxPort)];
1000 
1001 #endif
1002 
1003  p->ReleasePacket = NapatechReleasePacket;
1004  p->ntpv.nt_packet_buf = packet_buffer;
1005  p->ntpv.stream_id = ntv->stream_id;
1007 
1008  if (unlikely(PacketSetData(p, (uint8_t *)NT_NET_GET_PKT_L2_PTR(packet_buffer), NT_NET_GET_PKT_WIRE_LENGTH(packet_buffer)))) {
1009  TmqhOutputPacketpool(ntv->tv, p);
1011  }
1012 
1013  if (unlikely(TmThreadsSlotProcessPkt(ntv->tv, ntv->slot, p) != TM_ECODE_OK)) {
1015  }
1016 
1017  /*
1018  * At this point the packet and the Napatech Packet Buffer have been returned
1019  * to the system in the NapatechReleasePacket() Callback.
1020  */
1021 
1023  } // while
1024 
1025  if (closer) {
1027  }
1028 
1029  if (unlikely(ntv->hba > 0)) {
1030  SCLogInfo("Host Buffer Allowance Drops - pkts: %ld, bytes: %ld", hba_pkt_drops, hba_byte_drops);
1031  }
1032 
1034 }
1035 
1036 /**
1037  * \brief Print some stats to the log at program exit.
1038  *
1039  * \param tv Pointer to ThreadVars.
1040  * \param data Pointer to data, ErfFileThreadVars.
1041  */
1043 {
1044  NapatechThreadVars *ntv = (NapatechThreadVars *) data;
1046 
1047  double percent = 0;
1048  if (stat.current_drop_packets > 0)
1049  percent = (((double) stat.current_drop_packets)
1050  / (stat.current_packets + stat.current_drop_packets)) * 100;
1051 
1052  SCLogInfo("nt%lu - pkts: %lu; drop: %lu (%5.2f%%); bytes: %lu",
1053  (uint64_t) ntv->stream_id, stat.current_packets,
1054  stat.current_drop_packets, percent, stat.current_bytes);
1055 
1056  SC_ATOMIC_ADD(total_packets, stat.current_packets);
1057  SC_ATOMIC_ADD(total_drops, stat.current_drop_packets);
1058  SC_ATOMIC_ADD(total_tallied, 1);
1059 
1060  if (SC_ATOMIC_GET(total_tallied) == NapatechGetNumConfiguredStreams()) {
1061  if (SC_ATOMIC_GET(total_drops) > 0)
1062  percent = (((double) SC_ATOMIC_GET(total_drops)) / (SC_ATOMIC_GET(total_packets)
1063  + SC_ATOMIC_GET(total_drops))) * 100;
1064 
1065  SCLogInfo(" ");
1066  SCLogInfo("--- Total Packets: %ld Total Dropped: %ld (%5.2f%%)",
1067  SC_ATOMIC_GET(total_packets), SC_ATOMIC_GET(total_drops), percent);
1068 
1069 #ifdef NAPATECH_ENABLE_BYPASS
1070  SCLogInfo("--- BypassCB - Total: %ld, UDP: %ld, TCP: %ld, Unhandled: %ld",
1071  SC_ATOMIC_GET(flow_callback_cnt),
1072  SC_ATOMIC_GET(flow_callback_udp_pkts),
1073  SC_ATOMIC_GET(flow_callback_tcp_pkts),
1074  SC_ATOMIC_GET(flow_callback_unhandled_pkts));
1075 #endif
1076  }
1077 }
1078 
1079 /**
1080  * \brief Deinitializes the NAPATECH card.
1081  * \param tv pointer to ThreadVars
1082  * \param data pointer that gets cast into PcapThreadVars for ptv
1083  */
1085 {
1086  SCEnter();
1087  NapatechThreadVars *ntv = (NapatechThreadVars *) data;
1088 
1089  SCLogDebug("Closing Napatech Stream: %d", ntv->stream_id);
1090  NT_NetRxClose(ntv->rx_stream);
1091 
1093 }
1094 
1095 /**
1096  * \brief This function passes off to link type decoders.
1097  *
1098  * NapatechDecode decodes packets from Napatech and passes
1099  * them off to the proper link type decoder.
1100  *
1101  * \param t pointer to ThreadVars
1102  * \param p pointer to the current packet
1103  * \param data pointer that gets cast into PcapThreadVars for ptv
1104  */
1106 {
1107  SCEnter();
1108 
1110 
1112 
1113  // update counters
1115 
1116  switch (p->datalink) {
1117  case LINKTYPE_ETHERNET:
1119  break;
1120  default:
1121  SCLogError("Datalink type %" PRId32 " not yet supported in module NapatechDecode",
1122  p->datalink);
1123  break;
1124  }
1125 
1128 }
1129 
1130 /**
1131  * \brief Initialization of Napatech Thread.
1132  *
1133  * \param t pointer to ThreadVars
1134  * \param initdata - unused.
1135  * \param data pointer that gets cast into DecoderThreadVars
1136  */
1137 TmEcode NapatechDecodeThreadInit(ThreadVars *tv, const void *initdata, void **data)
1138 {
1139  SCEnter();
1140  DecodeThreadVars *dtv = NULL;
1142  if (dtv == NULL) {
1144  }
1145 
1147  *data = (void *) dtv;
1149 }
1150 
1151 /**
1152  * \brief Deinitialization of Napatech Thread.
1153  *
1154  * \param tv pointer to ThreadVars
1155  * \param data pointer that gets cast into DecoderThreadVars
1156  */
1158 {
1159  if (data != NULL) {
1160  DecodeThreadVarsFree(tv, data);
1161  }
1163 }
1164 
1165 #endif /* HAVE_NAPATECH */
TmModule_::cap_flags
uint8_t cap_flags
Definition: tm-modules.h:67
PacketCheckAction
bool PacketCheckAction(const Packet *p, const uint8_t a)
Definition: packet.c:48
TMM_RECEIVENAPATECH
@ TMM_RECEIVENAPATECH
Definition: tm-threads-common.h:63
PKT_IS_UDP
#define PKT_IS_UDP(p)
Definition: decode.h:248
NapatechPacketVars_::stream_id
uint64_t stream_id
Definition: util-napatech.h:31
tm-threads.h
NapatechStreamThreadInit
TmEcode NapatechStreamThreadInit(ThreadVars *, const void *, void **)
Initialize the Napatech receiver thread, generate a single NapatechThreadVar structure for each threa...
Definition: source-napatech.c:643
NapatechGetCurrentStats
NapatechCurrentStats NapatechGetCurrentStats(uint16_t id)
Definition: util-napatech.c:195
max_pending_packets
int max_pending_packets
Definition: suricata.c:178
SCLog
void SCLog(int x, const char *file, const char *func, const int line, const char *module, const char *fmt,...)
Definition: util-debug.c:727
NapatechPacketVars_
Definition: util-napatech.h:30
ThreadVars_::name
char name[16]
Definition: threadvars.h:64
PacketFreeOrRelease
void PacketFreeOrRelease(Packet *p)
Return a packet to where it was allocated.
Definition: decode.c:191
SC_ATOMIC_INIT
#define SC_ATOMIC_INIT(name)
wrapper for initializing an atomic variable.
Definition: util-atomic.h:315
PKT_IS_PSEUDOPKT
#define PKT_IS_PSEUDOPKT(p)
return 1 if the packet is a pseudo packet
Definition: decode.h:1059
ConfGetBool
int ConfGetBool(const char *name, int *val)
Retrieve a configuration value as an boolen.
Definition: conf.c:482
NapatechSetPortmap
int NapatechSetPortmap(int port, int peer)
unlikely
#define unlikely(expr)
Definition: util-optimize.h:35
NapatechThreadVars_::rx_stream
NtNetStreamRx_t rx_stream
Definition: source-napatech.c:87
NapatechStreamDevConf
Definition: source-napatech.h:35
SCLogDebug
#define SCLogDebug(...)
Definition: util-debug.h:269
TmThreadsSetFlag
void TmThreadsSetFlag(ThreadVars *tv, uint32_t flag)
Set a thread flag.
Definition: tm-threads.c:99
TMM_DECODENAPATECH
@ TMM_DECODENAPATECH
Definition: tm-threads-common.h:64
NapatechDecode
TmEcode NapatechDecode(ThreadVars *, Packet *, void *)
This function passes off to link type decoders.
Definition: source-napatech.c:1105
MAX_ADAPTERS
#define MAX_ADAPTERS
Definition: util-napatech.h:60
SC_ATOMIC_ADD
#define SC_ATOMIC_ADD(name, val)
add a value to our atomic variable
Definition: util-atomic.h:333
StatsSetUI64
void StatsSetUI64(ThreadVars *tv, uint16_t id, uint64_t x)
Sets a value of type double to the local counter.
Definition: counters.c:210
THV_RUNNING
#define THV_RUNNING
Definition: threadvars.h:54
NapatechStreamDevConf::stream_id
uint16_t stream_id
Definition: source-napatech.h:36
NapatechDeleteFilters
uint32_t NapatechDeleteFilters(void)
Definition: util-napatech.c:1334
SURICATA_STOP
#define SURICATA_STOP
Definition: suricata.h:89
StatsSetupPrivate
int StatsSetupPrivate(ThreadVars *tv)
Definition: counters.c:1210
SCTIME_FROM_USECS
#define SCTIME_FROM_USECS(us)
Definition: util-time.h:66
tm-modules.h
util-privs.h
NapatechThreadVars_::hba
int hba
Definition: source-napatech.c:89
StatsSyncCountersIfSignalled
#define StatsSyncCountersIfSignalled(tv)
Definition: counters.h:140
PacketDecodeFinalize
void PacketDecodeFinalize(ThreadVars *tv, DecodeThreadVars *dtv, Packet *p)
Finalize decoding of a packet.
Definition: decode.c:147
proto
uint8_t proto
Definition: decode-template.h:0
NapatechStreamDevConf::hba
intmax_t hba
Definition: source-napatech.h:37
TmqhOutputPacketpool
void TmqhOutputPacketpool(ThreadVars *t, Packet *p)
Definition: tmqh-packetpool.c:356
Packet_::BypassPacketsFlow
int(* BypassPacketsFlow)(struct Packet_ *)
Definition: decode.h:518
TM_ECODE_FAILED
@ TM_ECODE_FAILED
Definition: tm-threads-common.h:85
tmqh-packetpool.h
TmModule_::PktAcqLoop
TmEcode(* PktAcqLoop)(ThreadVars *, void *, void *)
Definition: tm-modules.h:54
TM_ECODE_OK
@ TM_ECODE_OK
Definition: tm-threads-common.h:84
NapatechThreadVars_
Definition: source-napatech.c:85
TmModule_::ThreadDeinit
TmEcode(* ThreadDeinit)(ThreadVars *, void *)
Definition: tm-modules.h:49
Packet_::datalink
int datalink
Definition: decode.h:607
NapatechStreamThreadDeinit
TmEcode NapatechStreamThreadDeinit(ThreadVars *tv, void *data)
Deinitializes the NAPATECH card.
Definition: source-napatech.c:1084
__attribute__
enum @23 __attribute__
DNP3 application header.
PKT_IS_TCP
#define PKT_IS_TCP(p)
Definition: decode.h:247
DecodeRegisterPerfCounters
void DecodeRegisterPerfCounters(DecodeThreadVars *dtv, ThreadVars *tv)
Definition: decode.c:526
SCTIME_FROM_SECS
#define SCTIME_FROM_SECS(s)
Definition: util-time.h:61
SC_LOG_ERROR
@ SC_LOG_ERROR
Definition: util-debug.h:51
decode.h
NapatechCurrentStats_::current_packets
uint64_t current_packets
Definition: util-napatech.h:51
TmModule_::PktAcqBreakLoop
TmEcode(* PktAcqBreakLoop)(ThreadVars *, void *)
Definition: tm-modules.h:57
Packet_::ts
SCTime_t ts
Definition: decode.h:471
source-napatech.h
SCEnter
#define SCEnter(...)
Definition: util-debug.h:271
GET_PKT_DATA
#define GET_PKT_DATA(p)
Definition: decode.h:219
ThreadVars_
Per thread variable structure.
Definition: threadvars.h:57
TmModule_::Func
TmEcode(* Func)(ThreadVars *, Packet *, void *)
Definition: tm-modules.h:52
SCLogWarning
#define SCLogWarning(...)
Macro used to log WARNING messages.
Definition: util-debug.h:249
SC_CAP_NET_ADMIN
#define SC_CAP_NET_ADMIN
Definition: util-privs.h:31
NapatechThreadVars_::slot
TmSlot * slot
Definition: source-napatech.c:90
BUG_ON
#define BUG_ON(x)
Definition: suricata-common.h:289
NapatechGetAdapter
int NapatechGetAdapter(uint8_t port)
PacketPoolWait
void PacketPoolWait(void)
Definition: tmqh-packetpool.c:69
util-napatech.h
Packet_
Definition: decode.h:428
TM_FLAG_DECODE_TM
#define TM_FLAG_DECODE_TM
Definition: tm-modules.h:32
TmModuleNapatechDecodeRegister
void TmModuleNapatechDecodeRegister(void)
Register the Napatech decoder module.
Definition: source-napatech.c:162
tmm_modules
TmModule tmm_modules[TMM_SIZE]
Definition: tm-modules.c:33
GET_PKT_LEN
#define GET_PKT_LEN(p)
Definition: decode.h:218
NapatechGetNumConfiguredStreams
uint16_t NapatechGetNumConfiguredStreams(void)
Definition: runmode-napatech.c:55
TmSlot_
Definition: tm-threads.h:53
NapatechCurrentStats_::current_bytes
uint64_t current_bytes
Definition: util-napatech.h:52
SC_ATOMIC_DECLARE
SC_ATOMIC_DECLARE(uint64_t, total_packets)
TmEcode
TmEcode
Definition: tm-threads-common.h:83
NapatechDecodeThreadInit
TmEcode NapatechDecodeThreadInit(ThreadVars *, const void *, void **)
Initialization of Napatech Thread.
Definition: source-napatech.c:1137
TmModule_::name
const char * name
Definition: tm-modules.h:44
SCLogInfo
#define SCLogInfo(...)
Macro used to log INFORMATIONAL messages.
Definition: util-debug.h:224
SCLogLevel
SCLogLevel
The various log levels NOTE: when adding new level, don't forget to update SCLogMapLogLevelToSyslogLe...
Definition: util-debug.h:48
TM_FLAG_RECEIVE_TM
#define TM_FLAG_RECEIVE_TM
Definition: tm-modules.h:31
dtv
DecodeThreadVars * dtv
Definition: fuzz_decodepcapfile.c:33
Packet_::ntpv
NapatechPacketVars ntpv
Definition: decode.h:502
tm-queuehandlers.h
Packet_::ReleasePacket
void(* ReleasePacket)(struct Packet_ *)
Definition: decode.h:515
NapatechThreadVars
struct NapatechThreadVars_ NapatechThreadVars
DecodeThreadVarsFree
void DecodeThreadVarsFree(ThreadVars *tv, DecodeThreadVars *dtv)
Definition: decode.c:708
suricata-common.h
packet.h
ACTION_DROP
#define ACTION_DROP
Definition: action-globals.h:30
SC_LOG_PERF
@ SC_LOG_PERF
Definition: util-debug.h:55
TmModule_::ThreadInit
TmEcode(* ThreadInit)(ThreadVars *, const void *, void **)
Definition: tm-modules.h:47
FatalError
#define FatalError(...)
Definition: util-debug.h:502
NapatechGetNumFirstStream
uint16_t NapatechGetNumFirstStream(void)
Definition: runmode-napatech.c:60
tv
ThreadVars * tv
Definition: fuzz_decodepcapfile.c:32
util-optimize.h
TmModule_::ThreadExitPrintStats
void(* ThreadExitPrintStats)(ThreadVars *, void *)
Definition: tm-modules.h:48
threadvars.h
NapatechGetNumLastStream
uint16_t NapatechGetNumLastStream(void)
Definition: runmode-napatech.c:65
SCLogError
#define SCLogError(...)
Macro used to log ERROR messages.
Definition: util-debug.h:261
NapatechPacketVars_::nt_packet_buf
NtNetBuf_t nt_packet_buf
Definition: util-napatech.h:32
MAX_PORTS
#define MAX_PORTS
Definition: util-napatech.h:59
SCFree
#define SCFree(p)
Definition: util-mem.h:61
DecodeThreadVars_
Structure to hold thread specific data for all decode modules.
Definition: decode.h:664
NapatechThreadVars_::tv
ThreadVars * tv
Definition: source-napatech.c:86
NapatechDecodeThreadDeinit
TmEcode NapatechDecodeThreadDeinit(ThreadVars *tv, void *data)
Deinitialization of Napatech Thread.
Definition: source-napatech.c:1157
payload_len
uint16_t payload_len
Definition: stream-tcp-private.h:1
DecodeThreadVarsAlloc
DecodeThreadVars * DecodeThreadVarsAlloc(ThreadVars *tv)
Alloc and setup DecodeThreadVars.
Definition: decode.c:689
PacketSetData
int PacketSetData(Packet *p, const uint8_t *pktdata, uint32_t pktlen)
Set data for Packet and set length when zero copy is used.
Definition: decode.c:728
NapatechStreamThreadExitStats
void NapatechStreamThreadExitStats(ThreadVars *, void *)
Print some stats to the log at program exit.
Definition: source-napatech.c:1042
NapatechCurrentStats_::current_drop_packets
uint64_t current_drop_packets
Definition: util-napatech.h:53
TmModuleNapatechStreamRegister
void TmModuleNapatechStreamRegister(void)
Register the Napatech receiver (reader) module.
Definition: source-napatech.c:130
suricata.h
NapatechPacketVars_::rx_stream
NtNetStreamRx_t rx_stream
Definition: util-napatech.h:33
NapatechSetupTraffic
uint32_t NapatechSetupTraffic(uint32_t first_stream, uint32_t last_stream)
Definition: util-napatech.c:1361
NapatechPacketVars_::flow_stream
NtFlowStream_t flow_stream
Definition: util-napatech.h:34
NapatechThreadVars_::stream_id
uint16_t stream_id
Definition: source-napatech.c:88
NapatechCurrentStats_
Definition: util-napatech.h:50
TmSlot_::slot_next
struct TmSlot_ * slot_next
Definition: tm-threads.h:62
SC_ATOMIC_GET
#define SC_ATOMIC_GET(name)
Get the value from the atomic variable.
Definition: util-atomic.h:376
NAPATECH_ERROR
#define NAPATECH_ERROR(status)
Definition: util-napatech.h:65
SCLogNotice
#define SCLogNotice(...)
Macro used to log NOTICE messages.
Definition: util-debug.h:237
StatsRegisterCounter
uint16_t StatsRegisterCounter(const char *name, struct ThreadVars_ *tv)
Registers a normal, unqualified counter.
Definition: counters.c:955
SCCalloc
#define SCCalloc(nm, sz)
Definition: util-mem.h:53
SCReturnInt
#define SCReturnInt(x)
Definition: util-debug.h:275
PacketGetFromQueueOrAlloc
Packet * PacketGetFromQueueOrAlloc(void)
Get a packet. We try to get a packet from the packetpool first, but if that is empty we alloc a packe...
Definition: decode.c:208
SC_CAP_NET_RAW
#define SC_CAP_NET_RAW
Definition: util-privs.h:32
DecodeEthernet
int DecodeEthernet(ThreadVars *tv, DecodeThreadVars *dtv, Packet *p, const uint8_t *pkt, uint32_t len)
Definition: decode-ethernet.c:42
TmModule_::flags
uint8_t flags
Definition: tm-modules.h:70
DecodeUpdatePacketCounters
void DecodeUpdatePacketCounters(ThreadVars *tv, const DecodeThreadVars *dtv, const Packet *p)
Definition: decode.c:655
LINKTYPE_ETHERNET
#define LINKTYPE_ETHERNET
Definition: decode.h:972
suricata_ctl_flags
volatile uint8_t suricata_ctl_flags
Definition: suricata.c:164
NapatechSetupNuma
bool NapatechSetupNuma(uint32_t stream, uint32_t numa)
Definition: util-napatech.c:1223
NapatechPacketLoop
TmEcode NapatechPacketLoop(ThreadVars *tv, void *data, void *slot)
Main Napatechpacket processing loop.
Definition: source-napatech.c:787