suricata
util-napatech.c
Go to the documentation of this file.
1 /* Copyright (C) 2017 Open Information Security Foundation
2  *
3  * You can copy, redistribute or modify this Program under the terms of
4  * the GNU General Public License version 2 as published by the Free
5  * Software Foundation.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10  * GNU General Public License for more details.
11  *
12  * You should have received a copy of the GNU General Public License
13  * version 2 along with this program; if not, write to the Free Software
14  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
15  * 02110-1301, USA.
16  */
17 /**
18  * \file
19  *
20  * \author Napatech Inc.
21  * \author Phil Young <py@napatech.com>
22  *
23  *
24  */
25 #include "suricata-common.h"
26 
27 #ifdef HAVE_NAPATECH
28 #include "suricata.h"
29 #include "util-device.h"
30 #include "util-cpu.h"
31 #include "util-byte.h"
32 #include "threadvars.h"
33 #include "tm-threads.h"
34 #include "util-napatech.h"
35 #include "source-napatech.h"
36 
37 #ifdef NAPATECH_ENABLE_BYPASS
38 
39 /*
40  * counters to track the number of flows programmed on
41  * the adapter.
42  */
43 typedef struct FlowStatsCounters_
44 {
45  uint16_t active_bypass_flows;
46  uint16_t total_bypass_flows;
47 } FlowStatsCounters;
48 
49 static NtFlowStream_t hFlowStream[MAX_ADAPTERS];
50 
51 static int bypass_supported;
52 int NapatechIsBypassSupported(void)
53 {
54  return bypass_supported;
55 }
56 
57 /**
58  * \brief Returns the number of Napatech Adapters in the system.
59  *
60  * \return count of the Napatech adapters present in the system.
61  */
62 static int GetNumAdapters(void)
63 {
64  NtInfoStream_t hInfo;
65  NtInfo_t hInfoSys;
66  int status;
67 
68  if ((status = NT_InfoOpen(&hInfo, "InfoStream")) != NT_SUCCESS) {
70  exit(EXIT_FAILURE);
71  }
72 
73  hInfoSys.cmd = NT_INFO_CMD_READ_SYSTEM;
74  if ((status = NT_InfoRead(hInfo, &hInfoSys)) != NT_SUCCESS) {
76  exit(EXIT_FAILURE);
77  }
78 
79  int num_adapters = hInfoSys.u.system.data.numAdapters;
80 
81  NT_InfoClose(hInfo);
82  return num_adapters;
83 }
84 
85 /**
86  * \brief Initializes the FlowStreams used to program flow data.
87  *
88  * Opens a FlowStream on each adapter present in the system. This
89  * FlowStream is subsequently used to program the adapter with
90  * flows to bypass.
91  *
92  * \return 1 if Bypass functionality is supported; zero otherwise.
93  */
94 int NapatechInitFlowStreams(void)
95 {
96  int status;
97  int adapter = 0;
98  int num_adapters = GetNumAdapters();
99  SCLogInfo("Found %d Napatech adapters.\n", num_adapters);
100  memset(&hFlowStream, 0, sizeof(hFlowStream));
101 
102  if (!NapatechUseHWBypass()) {
103  /* HW Bypass is disabled in the conf file */
104  return 0;
105  }
106 
107  for (adapter = 0; adapter < num_adapters; ++adapter) {
108  NtFlowAttr_t attr;
109  char flow_name[80];
110 
111  NT_FlowOpenAttrInit(&attr);
112  NT_FlowOpenAttrSetAdapterNo(&attr, adapter);
113 
114  snprintf(flow_name, sizeof(flow_name), "Flow stream %d", adapter );
115  SCLogInfo("Opening flow programming stream: %s\n", flow_name);
116  if ((status = NT_FlowOpen_Attr(&hFlowStream[adapter], flow_name, &attr)) != NT_SUCCESS) {
117  SCLogWarning(SC_WARN_COMPATIBILITY, "Napatech bypass functionality not supported by the FPGA version on adapter %d - disabling support.", adapter);
118  bypass_supported = 0;
119  return 0;
120  }
121  }
122 
123  bypass_supported = 1;
124  return bypass_supported;
125 }
126 
127 /**
128  * \brief Returns a pointer to the FlowStream associated with this adapter.
129  *
130  * \return count of the Napatech adapters present in the system.
131  */
132 NtFlowStream_t *NapatechGetFlowStreamPtr(int device)
133 {
134  return &hFlowStream[device];
135 }
136 
137 /**
138  * \brief Closes all open FlowStreams
139  *
140  * \return Success of the operation.
141  */
142 int NapatechCloseFlowStreams(void)
143 {
144  int status = 0;
145  int adapter = 0;
146  int num_adapters = GetNumAdapters();
147 
148  for (adapter = 0; adapter < num_adapters; ++adapter) {
149  if (hFlowStream[adapter]) {
150  SCLogInfo("Closing Napatech Flow Stream on adapter %d.", adapter);
151  if ((status = NT_FlowClose(hFlowStream[adapter])) != NT_SUCCESS) {
153  }
154  hFlowStream[adapter] = NULL;
155  }
156  }
157  return (status == NT_SUCCESS);
158 }
159 
160 
161 /**
162  * \brief Updates statistic counters for Napatech FlowStats
163  *
164  * \param tv Thread variable to ThreadVars
165  * \param hInfo Handle to the Napatech InfoStream.
166  * \param hstat_stream Handle to the Napatech Statistics Stream.
167  * \param flow_counters The flow counters statistics to update.
168  * \param clear_stats Indicates if statistics on the card should be reset to zero.
169  *
170  */
171 static void UpdateFlowStats(
172  ThreadVars *tv,
173  NtInfoStream_t hInfo,
174  NtStatStream_t hstat_stream,
175  FlowStatsCounters flow_counters,
176  int clear_stats
177  )
178 {
179  NtStatistics_t hStat;
180  int status;
181 
182  uint64_t programed = 0;
183  uint64_t removed = 0;
184  int adapter = 0;
185 
186  for (adapter = 0; adapter < GetNumAdapters(); ++adapter) {
187  hStat.cmd = NT_STATISTICS_READ_CMD_FLOW_V0;
188  hStat.u.flowData_v0.clear = clear_stats;
189  hStat.u.flowData_v0.adapterNo = adapter;
190  if ((status = NT_StatRead(hstat_stream, &hStat)) != NT_SUCCESS) {
192  exit(1);
193  }
194  programed = hStat.u.flowData_v0.learnDone;
195  removed = hStat.u.flowData_v0.unlearnDone
196  + hStat.u.flowData_v0.automaticUnlearnDone
197  + hStat.u.flowData_v0.timeoutUnlearnDone;
198  }
199 
200  StatsSetUI64(tv, flow_counters.active_bypass_flows, programed - removed);
201  StatsSetUI64(tv, flow_counters.total_bypass_flows, programed);
202 }
203 
204 #endif /* NAPATECH_ENABLE_BYPASS */
205 
206 
207 /*-----------------------------------------------------------------------------
208  *-----------------------------------------------------------------------------
209  * Statistics code
210  *-----------------------------------------------------------------------------
211  */
212 typedef struct PacketCounters_
213 {
214  uint16_t pkts;
215  uint16_t byte;
216  uint16_t drop_pkts;
217  uint16_t drop_byte;
219 
222 
224 {
225 
226  return current_stats[id];
227 }
228 
233 };
234 
235 #define MAX_HOSTBUFFERS 8
236 
237 /**
238  * \brief Test to see if any of the configured streams are active
239  *
240  * \param hInfo Handle to Napatech Info Stream.
241  * \param hStatsStream Handle to Napatech Statistics stream
242  * \param stream_config array of stream configuration structures
243  * \param num_inst
244  *
245  */
246 static uint16_t TestStreamConfig(
247  NtInfoStream_t hInfo,
248  NtStatStream_t hstat_stream,
250  uint16_t num_inst)
251 {
252  uint16_t num_active = 0;
253 
254  for (uint16_t inst = 0; inst < num_inst; ++inst) {
255  int status;
256  NtStatistics_t stat; // Stat handle.
257 
258  /* Check to see if it is an active stream */
259  memset(&stat, 0, sizeof (NtStatistics_t));
260 
261  /* Read usage data for the chosen stream ID */
262  stat.cmd = NT_STATISTICS_READ_CMD_USAGE_DATA_V0;
263  stat.u.usageData_v0.streamid = (uint8_t) stream_config[inst].stream_id;
264 
265  if ((status = NT_StatRead(hstat_stream, &stat)) != NT_SUCCESS) {
267  return 0;
268  }
269 
270  if (stat.u.usageData_v0.data.numHostBufferUsed > 0) {
271  stream_config[inst].is_active = true;
272  num_active++;
273  } else {
274  stream_config[inst].is_active = false;
275  }
276  }
277 
278  return num_active;
279 }
280 
281 /**
282  * \brief Updates Napatech packet counters
283  *
284  * \param tv Pointer to TheardVars structure
285  * \param hInfo Handle to Napatech Info Stream.
286  * \param hstat_stream Handle to Napatech Statistics stream
287  * \param num_streams the number of streams that are currently active
288  * \param stream_config array of stream configuration structures
289  * \param total_counters - cumulative count of all packets received.
290  * \param dispatch_host, - Count of packets that were delivered to the host buffer
291  * \param dispatch_drop - count of packets that were dropped as a result of a rule
292  * \param dispatch_fwd - count of packets forwarded out the egress port as the result of a rule
293  * \param is_inline - are we running in inline mode?
294  * \param enable_stream_stats - are per thread/stream statistics enabled.
295  * \param stream_counters - counters for each thread/stream configured.
296  *
297  * \return The number of active streams that were updated.
298  *
299  */
300 static uint32_t UpdateStreamStats(ThreadVars *tv,
301  NtInfoStream_t hInfo,
302  NtStatStream_t hstat_stream,
303  uint16_t num_streams,
305  PacketCounters total_counters,
306  PacketCounters dispatch_host,
307  PacketCounters dispatch_drop,
308  PacketCounters dispatch_fwd,
309  int is_inline,
310  int enable_stream_stats,
311  PacketCounters stream_counters[]
312  )
313 {
314  static uint64_t rxPktsStart[MAX_STREAMS] = {0};
315  static uint64_t rxByteStart[MAX_STREAMS] = {0};
316  static uint64_t dropPktStart[MAX_STREAMS] = {0};
317  static uint64_t dropByteStart[MAX_STREAMS] = {0};
318 
319  int status;
320  NtInfo_t hStreamInfo;
321  NtStatistics_t hStat; // Stat handle.
322 
323  /* Query the system to get the number of streams currently instantiated */
324  hStreamInfo.cmd = NT_INFO_CMD_READ_STREAM;
325  if ((status = NT_InfoRead(hInfo, &hStreamInfo)) != NT_SUCCESS) {
327  exit(EXIT_FAILURE);
328  }
329 
330  uint16_t num_active;
331  if ((num_active = TestStreamConfig(hInfo, hstat_stream, stream_config, num_streams)) == 0) {
332  /* None of the configured streams are active */
333  return 0;
334  }
335 
336  /* At least one stream is active so proceed with the stats. */
337  uint16_t inst_id = 0;
338  uint32_t stream_cnt = 0;
339  for (stream_cnt = 0; stream_cnt < num_streams; ++stream_cnt) {
340  while (inst_id < num_streams) {
341  if (stream_config[inst_id].is_active) {
342  break;
343  } else {
344  ++inst_id;
345  }
346  }
347  if (inst_id == num_streams)
348  break;
349 
350  /* Read usage data for the chosen stream ID */
351  memset(&hStat, 0, sizeof (NtStatistics_t));
352  hStat.cmd = NT_STATISTICS_READ_CMD_USAGE_DATA_V0;
353  hStat.u.usageData_v0.streamid = (uint8_t) stream_config[inst_id].stream_id;
354 
355  if ((status = NT_StatRead(hstat_stream, &hStat)) != NT_SUCCESS) {
357  return 0;
358  }
359 
360  uint16_t stream_id = stream_config[inst_id].stream_id;
361  if (stream_config[inst_id].is_active) {
362  uint64_t rx_pkts_total = 0;
363  uint64_t rx_byte_total = 0;
364  uint64_t drop_pkts_total = 0;
365  uint64_t drop_byte_total = 0;
366 
367  for (uint32_t hbCount = 0; hbCount < hStat.u.usageData_v0.data.numHostBufferUsed; hbCount++) {
368  if (unlikely(stream_config[inst_id].initialized == false)) {
369  rxPktsStart[stream_id] += hStat.u.usageData_v0.data.hb[hbCount].stat.rx.frames;
370  rxByteStart[stream_id] += hStat.u.usageData_v0.data.hb[hbCount].stat.rx.bytes;
371  dropPktStart[stream_id] += hStat.u.usageData_v0.data.hb[hbCount].stat.drop.frames;
372  dropByteStart[stream_id] += hStat.u.usageData_v0.data.hb[hbCount].stat.drop.bytes;
373  stream_config[inst_id].initialized = true;
374  } else {
375  rx_pkts_total += hStat.u.usageData_v0.data.hb[hbCount].stat.rx.frames;
376  rx_byte_total += hStat.u.usageData_v0.data.hb[hbCount].stat.rx.bytes;
377  drop_pkts_total += hStat.u.usageData_v0.data.hb[hbCount].stat.drop.frames;
378  drop_byte_total += hStat.u.usageData_v0.data.hb[hbCount].stat.drop.bytes;
379  }
380  }
381 
382  current_stats[stream_id].current_packets = rx_pkts_total - rxPktsStart[stream_id];
383  current_stats[stream_id].current_bytes = rx_byte_total - rxByteStart[stream_id];
384  current_stats[stream_id].current_drop_packets = drop_pkts_total - dropPktStart[stream_id];
385  current_stats[stream_id].current_drop_bytes = drop_byte_total - dropByteStart[stream_id];
386  }
387 
388  if (enable_stream_stats) {
389  StatsSetUI64(tv, stream_counters[inst_id].pkts, current_stats[stream_id].current_packets);
390  StatsSetUI64(tv, stream_counters[inst_id].byte, current_stats[stream_id].current_bytes);
391  StatsSetUI64(tv, stream_counters[inst_id].drop_pkts, current_stats[stream_id].current_drop_packets);
392  StatsSetUI64(tv, stream_counters[inst_id].drop_byte, current_stats[stream_id].current_drop_bytes);
393  }
394 
395  ++inst_id;
396  }
397 
398  uint32_t stream_id;
399  for (stream_id = 0; stream_id < num_streams; ++stream_id) {
400 
401 #ifndef NAPATECH_ENABLE_BYPASS
404 #endif /* NAPATECH_ENABLE_BYPASS */
407  }
408 
409 
410 #ifndef NAPATECH_ENABLE_BYPASS
411  StatsSetUI64(tv, total_counters.pkts, total_stats.current_packets);
412  StatsSetUI64(tv, total_counters.byte, total_stats.current_bytes);
413 #endif /* NAPATECH_ENABLE_BYPASS */
414 
417 
422 
423 
424  /* Read usage data for the chosen stream ID */
425  memset(&hStat, 0, sizeof (NtStatistics_t));
426 
427 #ifdef NAPATECH_ENABLE_BYPASS
428  hStat.cmd = NT_STATISTICS_READ_CMD_QUERY_V3;
429  hStat.u.query_v3.clear = 0;
430 #else /* NAPATECH_ENABLE_BYPASS */
431  /* Older versions of the API have a different structure. */
432  hStat.cmd = NT_STATISTICS_READ_CMD_QUERY_V2;
433  hStat.u.query_v2.clear = 0;
434 #endif /* !NAPATECH_ENABLE_BYPASS */
435 
436  if ((status = NT_StatRead(hstat_stream, &hStat)) != NT_SUCCESS) {
437  if (status == NT_STATUS_TIMEOUT) {
438  SCLogInfo("Statistics timed out - will retry next time.");
439  return 0;
440  } else {
442  return 0;
443  }
444  }
445 
446 #ifdef NAPATECH_ENABLE_BYPASS
447 
448  int adapter = 0;
449  uint64_t total_dispatch_host_pkts = 0;
450  uint64_t total_dispatch_host_byte = 0;
451  uint64_t total_dispatch_drop_pkts = 0;
452  uint64_t total_dispatch_drop_byte = 0;
453  uint64_t total_dispatch_fwd_pkts = 0;
454  uint64_t total_dispatch_fwd_byte = 0;
455 
456  for (adapter = 0; adapter < GetNumAdapters(); ++adapter) {
457  total_dispatch_host_pkts += hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[0].pkts;
458  total_dispatch_host_byte += hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[0].octets;
459 
460  total_dispatch_drop_pkts += hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[1].pkts
461  + hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[3].pkts;
462  total_dispatch_drop_byte += hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[1].octets
463  + hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[3].octets;
464 
465  total_dispatch_fwd_pkts += hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[2].pkts
466  + hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[4].pkts;
467  total_dispatch_fwd_byte += hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[2].octets
468  + hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[4].octets;
469 
470  total_stats.current_packets += hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[0].pkts
471  + hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[1].pkts
472  + hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[2].pkts
473  + hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[3].pkts;
474 
475  total_stats.current_bytes = hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[0].octets
476  + hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[1].octets
477  + hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[2].octets;
478  }
479 
480  StatsSetUI64(tv, dispatch_host.pkts, total_dispatch_host_pkts);
481  StatsSetUI64(tv, dispatch_host.byte, total_dispatch_host_byte);
482 
483  StatsSetUI64(tv, dispatch_drop.pkts, total_dispatch_drop_pkts);
484  StatsSetUI64(tv, dispatch_drop.byte, total_dispatch_drop_byte);
485 
486  if (is_inline) {
487  StatsSetUI64(tv, dispatch_fwd.pkts, total_dispatch_fwd_pkts);
488  StatsSetUI64(tv, dispatch_fwd.byte, total_dispatch_fwd_byte);
489  }
490 
491  StatsSetUI64(tv, total_counters.pkts, total_stats.current_packets);
492  StatsSetUI64(tv, total_counters.byte, total_stats.current_bytes);
493 
494 #endif /* NAPATECH_ENABLE_BYPASS */
495 
496  return num_active;
497 }
498 
499 /**
500  * \brief Statistics processing loop
501  *
502  * Instantiated on the stats thread. Periodically retrieives
503  * statistics from the Napatech card and updates the packet counters
504  *
505  * \param arg Pointer that is caste into a TheardVars structure
506  */
507 static void *NapatechStatsLoop(void *arg)
508 {
509  ThreadVars *tv = (ThreadVars *) arg;
510 
511  int status;
512  NtInfoStream_t hInfo;
513  NtStatStream_t hstat_stream;
514  int is_inline = 0;
515  int enable_stream_stats = 0;
516  PacketCounters stream_counters[MAX_STREAMS];
517 
518  if (ConfGetBool("napatech.inline", &is_inline) == 0) {
519  is_inline = 0;
520  }
521 
522  if (ConfGetBool("napatech.enable-stream-stats", &enable_stream_stats) == 0) {
523  /* default is "no" */
524  enable_stream_stats = 0;
525  }
526 
528  uint16_t stream_cnt = NapatechGetStreamConfig(stream_config);
529 
530  /* Open the info and Statistics */
531  if ((status = NT_InfoOpen(&hInfo, "StatsLoopInfoStream")) != NT_SUCCESS) {
533  return NULL;
534  }
535 
536  if ((status = NT_StatOpen(&hstat_stream, "StatsLoopStatsStream")) != NT_SUCCESS) {
538  return NULL;
539  }
540 
541  NtStatistics_t hStat;
542  memset(&hStat, 0, sizeof (NtStatistics_t));
543 
544 #ifdef NAPATECH_ENABLE_BYPASS
545  hStat.cmd = NT_STATISTICS_READ_CMD_QUERY_V3;
546  hStat.u.query_v3.clear = 1;
547 #else /* NAPATECH_ENABLE_BYPASS */
548  hStat.cmd = NT_STATISTICS_READ_CMD_QUERY_V2;
549  hStat.u.query_v2.clear = 1;
550 #endif /* !NAPATECH_ENABLE_BYPASS */
551 
552  if ((status = NT_StatRead(hstat_stream, &hStat)) != NT_SUCCESS) {
554  return 0;
555  }
556 
557  PacketCounters total_counters;
558  memset(&total_counters, 0, sizeof(total_counters));
559 
560  PacketCounters dispatch_host;
561  memset(&dispatch_host, 0, sizeof(dispatch_host));
562 
563  PacketCounters dispatch_drop;
564  memset(&dispatch_drop, 0, sizeof(dispatch_drop));
565 
566  PacketCounters dispatch_fwd;
567  memset(&dispatch_fwd, 0, sizeof(dispatch_fwd));
568 
569  total_counters.pkts = StatsRegisterCounter("napa_total.pkts", tv);
570  dispatch_host.pkts = StatsRegisterCounter("napa_dispatch_host.pkts", tv);
571  dispatch_drop.pkts = StatsRegisterCounter("napa_dispatch_drop.pkts", tv);
572  if (is_inline) {
573  dispatch_fwd.pkts = StatsRegisterCounter("napa_dispatch_fwd.pkts", tv);
574  }
575 
576  total_counters.byte = StatsRegisterCounter("napa_total.byte", tv);
577  dispatch_host.byte = StatsRegisterCounter("napa_dispatch_host.byte", tv);
578  dispatch_drop.byte = StatsRegisterCounter("napa_dispatch_drop.byte", tv);
579  if (is_inline) {
580  dispatch_fwd.byte = StatsRegisterCounter("napa_dispatch_fwd.byte", tv);
581  }
582 
583  total_counters.drop_pkts = StatsRegisterCounter("napa_total.overflow_drop_pkts", tv);
584  total_counters.drop_byte = StatsRegisterCounter("napa_total.overflow_drop_byte", tv);
585 
586  if (enable_stream_stats) {
587  for (int i = 0; i < stream_cnt; ++i) {
588  char *pkts_buf = SCCalloc(1, 32);
589  if (unlikely(pkts_buf == NULL)) {
591  "Failed to allocate memory for NAPATECH stream counter.");
592  exit(EXIT_FAILURE);
593  }
594 
595  snprintf(pkts_buf, 32, "napa%d.pkts", stream_config[i].stream_id);
596  stream_counters[i].pkts = StatsRegisterCounter(pkts_buf, tv);
597 
598  char *byte_buf = SCCalloc(1, 32);
599  if (unlikely(byte_buf == NULL)) {
601  "Failed to allocate memory for NAPATECH stream counter.");
602  exit(EXIT_FAILURE);
603  }
604  snprintf(byte_buf, 32, "napa%d.bytes", stream_config[i].stream_id);
605  stream_counters[i].byte = StatsRegisterCounter(byte_buf, tv);
606 
607  char *drop_pkts_buf = SCCalloc(1, 32);
608  if (unlikely(drop_pkts_buf == NULL)) {
610  "Failed to allocate memory for NAPATECH stream counter.");
611  exit(EXIT_FAILURE);
612  }
613  snprintf(drop_pkts_buf, 32, "napa%d.drop_pkts", stream_config[i].stream_id);
614  stream_counters[i].drop_pkts = StatsRegisterCounter(drop_pkts_buf, tv);
615 
616  char *drop_byte_buf = SCCalloc(1, 32);
617  if (unlikely(drop_byte_buf == NULL)) {
619  "Failed to allocate memory for NAPATECH stream counter.");
620  exit(EXIT_FAILURE);
621  }
622  snprintf(drop_byte_buf, 32, "napa%d.drop_byte", stream_config[i].stream_id);
623  stream_counters[i].drop_byte = StatsRegisterCounter(drop_byte_buf, tv);
624  }
625  }
626 
627 #ifdef NAPATECH_ENABLE_BYPASS
628  FlowStatsCounters flow_counters;
629  if (bypass_supported) {
630  flow_counters.active_bypass_flows = StatsRegisterCounter("napa_bypass.active_flows", tv);
631  flow_counters.total_bypass_flows = StatsRegisterCounter("napa_bypass.total_flows", tv);
632  }
633 #endif /* NAPATECH_ENABLE_BYPASS */
634 
636 
637  StatsSetUI64(tv, total_counters.pkts, 0);
638  StatsSetUI64(tv, total_counters.byte, 0);
639  StatsSetUI64(tv, total_counters.drop_pkts, 0);
640  StatsSetUI64(tv, total_counters.drop_byte, 0);
641 
642 #ifdef NAPATECH_ENABLE_BYPASS
643  if (bypass_supported) {
644  StatsSetUI64(tv, dispatch_host.pkts, 0);
645  StatsSetUI64(tv, dispatch_drop.pkts, 0);
646 
647  if (is_inline) {
648  StatsSetUI64(tv, dispatch_fwd.pkts, 0);
649  }
650 
651  StatsSetUI64(tv, dispatch_host.byte, 0);
652  StatsSetUI64(tv, dispatch_drop.byte, 0);
653  if (is_inline) {
654  StatsSetUI64(tv, dispatch_fwd.byte, 0);
655  }
656 
657  if (enable_stream_stats) {
658  for (int i = 0; i < stream_cnt; ++i) {
659  StatsSetUI64(tv, stream_counters[i].pkts, 0);
660  StatsSetUI64(tv, stream_counters[i].byte, 0);
661  StatsSetUI64(tv, stream_counters[i].drop_pkts, 0);
662  StatsSetUI64(tv, stream_counters[i].drop_byte, 0);
663  }
664  }
665 
666  StatsSetUI64(tv, flow_counters.active_bypass_flows, 0);
667  StatsSetUI64(tv, flow_counters.total_bypass_flows, 0);
668  UpdateFlowStats(tv, hInfo, hstat_stream, flow_counters, 1);
669  }
670 #endif /* NAPATECH_ENABLE_BYPASS */
671 
672  uint32_t num_active = UpdateStreamStats(tv, hInfo, hstat_stream,
673  stream_cnt, stream_config, total_counters,
674  dispatch_host, dispatch_drop, dispatch_fwd,
675  is_inline, enable_stream_stats, stream_counters);
676 
677  if (!NapatechIsAutoConfigEnabled() && (num_active < stream_cnt)) {
678  SCLogInfo("num_active: %d, stream_cnt: %d", num_active, stream_cnt);
680  "Some or all of the configured streams are not created. Proceeding with active streams.");
681  }
682 
684  while (1) {
686  SCLogDebug("NapatechStatsLoop THV_KILL detected");
687  break;
688  }
689 
690  UpdateStreamStats(tv, hInfo, hstat_stream,
691  stream_cnt, stream_config, total_counters,
692  dispatch_host, dispatch_drop, dispatch_fwd,
693  is_inline, enable_stream_stats,
694  stream_counters);
695 
696 #ifdef NAPATECH_ENABLE_BYPASS
697  if (bypass_supported) {
698  UpdateFlowStats(tv, hInfo, hstat_stream, flow_counters, 0);
699  }
700 #endif /* NAPATECH_ENABLE_BYPASS */
701 
703  usleep(1000000);
704  }
705 
706  /* CLEAN UP NT Resources and Close the info stream */
707  if ((status = NT_InfoClose(hInfo)) != NT_SUCCESS) {
709  return NULL;
710  }
711 
712  /* Close the statistics stream */
713  if ((status = NT_StatClose(hstat_stream)) != NT_SUCCESS) {
715  return NULL;
716  }
717 
718  SCLogDebug("Exiting NapatechStatsLoop");
722 
723  return NULL;
724 }
725 
726 #define MAX_HOSTBUFFER 4
727 #define MAX_STREAMS 256
728 #define HB_HIGHWATER 2048 //1982
729 
730 /**
731  * \brief Tests whether a particular stream_id is actively registered
732  *
733  * \param stream_id - ID of the stream to look up
734  * \param num_registered - The total number of registered streams
735  * \param registered_streams - An array containing actively registered streams.
736  *
737  * \return Bool indicating is the specified stream is registered.
738  *
739  */
740 static bool RegisteredStream(uint16_t stream_id, uint16_t num_registered,
741  NapatechStreamConfig registered_streams[])
742 {
743  for (uint16_t reg_id = 0; reg_id < num_registered; ++reg_id) {
744  if (stream_id == registered_streams[reg_id].stream_id) {
745  return true;
746  }
747  }
748  return false;
749 }
750 
751 /**
752  * \brief Count the number of worker threads defined in the conf file.
753  *
754  * \return - The number of worker threads defined by the configuration
755  */
756 static uint32_t CountWorkerThreads(void)
757 {
758  int worker_count = 0;
759 
760  ConfNode *affinity;
761  ConfNode *root = ConfGetNode("threading.cpu-affinity");
762 
763  if (root != NULL) {
764 
765  TAILQ_FOREACH(affinity, &root->head, next)
766  {
767  if (strcmp(affinity->val, "decode-cpu-set") == 0 ||
768  strcmp(affinity->val, "stream-cpu-set") == 0 ||
769  strcmp(affinity->val, "reject-cpu-set") == 0 ||
770  strcmp(affinity->val, "output-cpu-set") == 0) {
771  continue;
772  }
773 
774  if (strcmp(affinity->val, "worker-cpu-set") == 0) {
775  ConfNode *node = ConfNodeLookupChild(affinity->head.tqh_first, "cpu");
776  ConfNode *lnode;
777 
779 
780  TAILQ_FOREACH(lnode, &node->head, next)
781  {
782  uint8_t start, end;
783  if (strncmp(lnode->val, "all", 4) == 0) {
784  /* check that the sting in the config file is correctly specified */
785  if (cpu_spec != CONFIG_SPECIFIER_UNDEFINED) {
787  "Only one Napatech port specifier type allowed.");
788  exit(EXIT_FAILURE);
789  }
790  cpu_spec = CONFIG_SPECIFIER_RANGE;
791  worker_count = UtilCpuGetNumProcessorsConfigured();
792  } else if (strchr(lnode->val, '-')) {
793  /* check that the sting in the config file is correctly specified */
794  if (cpu_spec != CONFIG_SPECIFIER_UNDEFINED) {
796  "Only one Napatech port specifier type allowed.");
797  exit(EXIT_FAILURE);
798  }
799  cpu_spec = CONFIG_SPECIFIER_RANGE;
800 
801  char copystr[16];
802  strlcpy(copystr, lnode->val, 16);
803 
804  ByteExtractStringUint8(&start, 10, 0, copystr);
805  ByteExtractStringUint8(&end, 10, 0, strchr(copystr, '-') + 1);
806 
807  worker_count = end - start + 1;
808 
809  } else {
810  /* check that the sting in the config file is correctly specified */
811  if (cpu_spec == CONFIG_SPECIFIER_RANGE) {
813  "Napatech port range specifiers cannot be combined with individual stream specifiers.");
814  exit(EXIT_FAILURE);
815  }
816  cpu_spec = CONFIG_SPECIFIER_INDIVIDUAL;
817  ++worker_count;
818  }
819  }
820  break;
821  }
822  }
823  }
824  return worker_count;
825 }
826 
827 /**
828  * \brief Reads and parses the stream configuration defined in the config file.
829  *
830  * \param stream_config - array to be filled in with active stream info.
831  *
832  * \return the number of streams configured or -1 if an error occurred
833  *
834  */
836 {
837  int status;
838  char error_buffer[80]; // Error buffer
839  NtStatStream_t hstat_stream;
840  NtStatistics_t hStat; // Stat handle.
841  NtInfoStream_t info_stream;
842  NtInfo_t info;
843  uint16_t instance_cnt = 0;
844  int use_all_streams = 0;
845  int set_cpu_affinity = 0;
846  ConfNode *ntstreams;
847  uint16_t stream_id = 0;
848  uint16_t start = 0;
849  uint16_t end = 0;
850 
851  for (uint16_t i = 0; i < MAX_STREAMS; ++i) {
852  stream_config[i].stream_id = 0;
853  stream_config[i].is_active = false;
854  stream_config[i].initialized = false;
855  }
856 
857  if (ConfGetBool("napatech.use-all-streams", &use_all_streams) == 0) {
858  /* default is "no" */
859  use_all_streams = 0;
860  }
861 
862  if ((status = NT_InfoOpen(&info_stream, "SuricataStreamInfo")) != NT_SUCCESS) {
864  return -1;
865  }
866 
867  if ((status = NT_StatOpen(&hstat_stream, "StatsStream")) != NT_SUCCESS) {
869  return -1;
870  }
871 
872  if (use_all_streams) {
873  info.cmd = NT_INFO_CMD_READ_STREAM;
874  if ((status = NT_InfoRead(info_stream, &info)) != NT_SUCCESS) {
876  return -1;
877  }
878 
879  while (instance_cnt < info.u.stream.data.count) {
880 
881  /*
882  * For each stream ID query the number of host-buffers used by
883  * the stream. If zero, then that streamID is not used; skip
884  * over it and continue until we get a streamID with a non-zero
885  * count of the host-buffers.
886  */
887  memset(&hStat, 0, sizeof (NtStatistics_t));
888 
889  /* Read usage data for the chosen stream ID */
890  hStat.cmd = NT_STATISTICS_READ_CMD_USAGE_DATA_V0;
891  hStat.u.usageData_v0.streamid = (uint8_t) stream_id;
892 
893  if ((status = NT_StatRead(hstat_stream, &hStat)) != NT_SUCCESS) {
894  /* Get the status code as text */
895  NT_ExplainError(status, error_buffer, sizeof (error_buffer));
896  SCLogError(SC_ERR_NAPATECH_INIT_FAILED, "NT_StatRead() failed: %s\n", error_buffer);
897  return -1;
898  }
899 
900  if (hStat.u.usageData_v0.data.numHostBufferUsed == 0) {
901  ++stream_id;
902  continue;
903  }
904 
905  /* if we get here it is an active stream */
906  stream_config[instance_cnt].stream_id = stream_id++;
907  stream_config[instance_cnt].is_active = true;
908  instance_cnt++;
909  }
910 
911  } else {
912  ConfGetBool("threading.set-cpu-affinity", &set_cpu_affinity);
913  if (NapatechIsAutoConfigEnabled() && (set_cpu_affinity == 1)) {
914  start = 0;
915  end = CountWorkerThreads() - 1;
916  } else {
917  /* When not using the default streams we need to
918  * parse the array of streams from the conf */
919  if ((ntstreams = ConfGetNode("napatech.streams")) == NULL) {
920  SCLogError(SC_ERR_RUNMODE, "Failed retrieving napatech.streams from Config");
921  if (NapatechIsAutoConfigEnabled() && (set_cpu_affinity == 0)) {
923  "if set-cpu-affinity: no in conf then napatech.streams must be defined");
924  }
925  exit(EXIT_FAILURE);
926  }
927 
928  /* Loop through all stream numbers in the array and register the devices */
929  ConfNode *stream;
930  enum CONFIG_SPECIFIER stream_spec = CONFIG_SPECIFIER_UNDEFINED;
931  instance_cnt = 0;
932 
933  TAILQ_FOREACH(stream, &ntstreams->head, next)
934  {
935 
936  if (stream == NULL) {
937  SCLogError(SC_ERR_NAPATECH_INIT_FAILED, "Couldn't Parse Stream Configuration");
938  return -1;
939  }
940 
941  if (strchr(stream->val, '-')) {
942  if (stream_spec != CONFIG_SPECIFIER_UNDEFINED) {
944  "Only one Napatech stream range specifier allowed.");
945  return -1;
946  }
947  stream_spec = CONFIG_SPECIFIER_RANGE;
948 
949  char copystr[16];
950  strlcpy(copystr, stream->val, 16);
951  ByteExtractStringUint16(&start, 10, 0, copystr);
952  ByteExtractStringUint16(&end, 10, 0, strchr(copystr, '-') + 1);
953  } else {
954  if (stream_spec == CONFIG_SPECIFIER_RANGE) {
956  "Napatech range and individual specifiers cannot be combined.");
957  exit(EXIT_FAILURE);
958  }
959  stream_spec = CONFIG_SPECIFIER_INDIVIDUAL;
960  ByteExtractStringUint16(&stream_config[instance_cnt].stream_id, 10, 0, stream->val);
961  start = stream_config[instance_cnt].stream_id;
962  end = stream_config[instance_cnt].stream_id;
963  }
964  }
965  }
966 
967  for (stream_id = start; stream_id <= end; ++stream_id) {
968  /* if we get here it is configured in the .yaml file */
969  stream_config[instance_cnt].stream_id = stream_id;
970 
971  /* Check to see if it is an active stream */
972  memset(&hStat, 0, sizeof (NtStatistics_t));
973 
974  /* Read usage data for the chosen stream ID */
975  hStat.cmd = NT_STATISTICS_READ_CMD_USAGE_DATA_V0;
976  hStat.u.usageData_v0.streamid =
977  (uint8_t) stream_config[instance_cnt].stream_id;
978 
979  if ((status = NT_StatRead(hstat_stream, &hStat)) != NT_SUCCESS) {
981  return -1;
982  }
983 
984  if (hStat.u.usageData_v0.data.numHostBufferUsed > 0) {
985  stream_config[instance_cnt].is_active = true;
986  }
987  instance_cnt++;
988  }
989  }
990 
991  /* Close the statistics stream */
992  if ((status = NT_StatClose(hstat_stream)) != NT_SUCCESS) {
994  return -1;
995  }
996 
997  if ((status = NT_InfoClose(info_stream)) != NT_SUCCESS) {
999  return -1;
1000  }
1001 
1002  return instance_cnt;
1003 }
1004 
1005 static void *NapatechBufMonitorLoop(void *arg)
1006 {
1007  ThreadVars *tv = (ThreadVars *) arg;
1008 
1009  NtInfo_t hStreamInfo;
1010  NtStatistics_t hStat; // Stat handle.
1011  NtInfoStream_t hInfo;
1012  NtStatStream_t hstat_stream;
1013  int status; // Status variable
1014 
1015  const uint32_t alertInterval = 25;
1016 
1017 #ifndef NAPATECH_ENABLE_BYPASS
1018  uint32_t OB_fill_level[MAX_STREAMS] = {0};
1019  uint32_t OB_alert_level[MAX_STREAMS] = {0};
1020  uint32_t ave_OB_fill_level[MAX_STREAMS] = {0};
1021 #endif /* NAPATECH_ENABLE_BYPASS */
1022 
1023  uint32_t HB_fill_level[MAX_STREAMS] = {0};
1024  uint32_t HB_alert_level[MAX_STREAMS] = {0};
1025  uint32_t ave_HB_fill_level[MAX_STREAMS] = {0};
1026 
1027  /* Open the info and Statistics */
1028  if ((status = NT_InfoOpen(&hInfo, "InfoStream")) != NT_SUCCESS) {
1030  exit(EXIT_FAILURE);
1031  }
1032 
1033  if ((status = NT_StatOpen(&hstat_stream, "StatsStream")) != NT_SUCCESS) {
1035  exit(EXIT_FAILURE);
1036  }
1037 
1038  /* Read the info on all streams instantiated in the system */
1039  hStreamInfo.cmd = NT_INFO_CMD_READ_STREAM;
1040  if ((status = NT_InfoRead(hInfo, &hStreamInfo)) != NT_SUCCESS) {
1042  exit(EXIT_FAILURE);
1043  }
1044 
1045  NapatechStreamConfig registered_streams[MAX_STREAMS];
1046  int num_registered = NapatechGetStreamConfig(registered_streams);
1047  if (num_registered == -1) {
1048  exit(EXIT_FAILURE);
1049  }
1050 
1052  while (1) {
1053  if (TmThreadsCheckFlag(tv, THV_KILL)) {
1054  SCLogDebug("NapatechBufMonitorLoop THV_KILL detected");
1055  break;
1056  }
1057 
1058  usleep(200000);
1059 
1060  /* Read the info on all streams instantiated in the system */
1061  hStreamInfo.cmd = NT_INFO_CMD_READ_STREAM;
1062  if ((status = NT_InfoRead(hInfo, &hStreamInfo)) != NT_SUCCESS) {
1064  exit(EXIT_FAILURE);
1065  }
1066 
1067  char pktCntStr[4096];
1068  memset(pktCntStr, 0, sizeof (pktCntStr));
1069 
1070  uint32_t stream_id = 0;
1071  uint32_t stream_cnt = 0;
1072  uint32_t num_streams = hStreamInfo.u.stream.data.count;
1073 
1074  for (stream_cnt = 0; stream_cnt < num_streams; ++stream_cnt) {
1075 
1076  do {
1077 
1078  /* Read usage data for the chosen stream ID */
1079  hStat.cmd = NT_STATISTICS_READ_CMD_USAGE_DATA_V0;
1080  hStat.u.usageData_v0.streamid = (uint8_t) stream_id;
1081 
1082  if ((status = NT_StatRead(hstat_stream, &hStat)) != NT_SUCCESS) {
1084  exit(EXIT_FAILURE);
1085  }
1086 
1087  if (hStat.u.usageData_v0.data.numHostBufferUsed == 0) {
1088  ++stream_id;
1089  continue;
1090  }
1091  } while (hStat.u.usageData_v0.data.numHostBufferUsed == 0);
1092 
1093  if (RegisteredStream(stream_id, num_registered, registered_streams)) {
1094 
1095 #ifndef NAPATECH_ENABLE_BYPASS
1096  ave_OB_fill_level[stream_id] = 0;
1097 #endif /* NAPATECH_ENABLE_BYPASS */
1098 
1099  ave_HB_fill_level[stream_id] = 0;
1100 
1101  for (uint32_t hb_count = 0; hb_count < hStat.u.usageData_v0.data.numHostBufferUsed; hb_count++) {
1102 
1103 #ifndef NAPATECH_ENABLE_BYPASS
1104  OB_fill_level[hb_count] =
1105  ((100 * hStat.u.usageData_v0.data.hb[hb_count].onboardBuffering.used) /
1106  hStat.u.usageData_v0.data.hb[hb_count].onboardBuffering.size);
1107 
1108  if (OB_fill_level[hb_count] > 100) {
1109  OB_fill_level[hb_count] = 100;
1110  }
1111 #endif /* NAPATECH_ENABLE_BYPASS */
1112  uint32_t bufSize = hStat.u.usageData_v0.data.hb[hb_count].enQueuedAdapter / 1024
1113  + hStat.u.usageData_v0.data.hb[hb_count].deQueued / 1024
1114  + hStat.u.usageData_v0.data.hb[hb_count].enQueued / 1024
1115  - HB_HIGHWATER;
1116 
1117  HB_fill_level[hb_count] = (uint32_t)
1118  ((100 * hStat.u.usageData_v0.data.hb[hb_count].deQueued / 1024) /
1119  bufSize);
1120 
1121 #ifndef NAPATECH_ENABLE_BYPASS
1122  ave_OB_fill_level[stream_id] += OB_fill_level[hb_count];
1123 #endif /* NAPATECH_ENABLE_BYPASS */
1124 
1125  ave_HB_fill_level[stream_id] += HB_fill_level[hb_count];
1126  }
1127 
1128 #ifndef NAPATECH_ENABLE_BYPASS
1129  ave_OB_fill_level[stream_id] /= hStat.u.usageData_v0.data.numHostBufferUsed;
1130 #endif /* NAPATECH_ENABLE_BYPASS */
1131 
1132  ave_HB_fill_level[stream_id] /= hStat.u.usageData_v0.data.numHostBufferUsed;
1133 
1134  /* Host Buffer Fill Level warnings... */
1135  if (ave_HB_fill_level[stream_id] >= (HB_alert_level[stream_id] + alertInterval)) {
1136 
1137  while (ave_HB_fill_level[stream_id] >= HB_alert_level[stream_id] + alertInterval) {
1138  HB_alert_level[stream_id] += alertInterval;
1139  }
1140  SCLogPerf("nt%d - Increasing Host Buffer Fill Level : %4d%%",
1141  stream_id, ave_HB_fill_level[stream_id] - 1);
1142  }
1143 
1144  if (HB_alert_level[stream_id] > 0) {
1145  if ((ave_HB_fill_level[stream_id] <= (HB_alert_level[stream_id] - alertInterval))) {
1146  SCLogPerf("nt%d - Decreasing Host Buffer Fill Level: %4d%%",
1147  stream_id, ave_HB_fill_level[stream_id]);
1148 
1149  while (ave_HB_fill_level[stream_id] <= (HB_alert_level[stream_id] - alertInterval)) {
1150  if ((HB_alert_level[stream_id]) > 0) {
1151  HB_alert_level[stream_id] -= alertInterval;
1152  } else break;
1153  }
1154  }
1155  }
1156 
1157 #ifndef NAPATECH_ENABLE_BYPASS
1158  /* On Board SDRAM Fill Level warnings... */
1159  if (ave_OB_fill_level[stream_id] >= (OB_alert_level[stream_id] + alertInterval)) {
1160  while (ave_OB_fill_level[stream_id] >= OB_alert_level[stream_id] + alertInterval) {
1161  OB_alert_level[stream_id] += alertInterval;
1162 
1163  }
1164  SCLogPerf("nt%d - Increasing Adapter SDRAM Fill Level: %4d%%",
1165  stream_id, ave_OB_fill_level[stream_id]);
1166  }
1167 
1168  if (OB_alert_level[stream_id] > 0) {
1169  if ((ave_OB_fill_level[stream_id] <= (OB_alert_level[stream_id] - alertInterval))) {
1170  SCLogPerf("nt%d - Decreasing Adapter SDRAM Fill Level : %4d%%",
1171  stream_id, ave_OB_fill_level[stream_id]);
1172 
1173  while (ave_OB_fill_level[stream_id] <= (OB_alert_level[stream_id] - alertInterval)) {
1174  if ((OB_alert_level[stream_id]) > 0) {
1175  OB_alert_level[stream_id] -= alertInterval;
1176  } else break;
1177  }
1178  }
1179  }
1180 #endif /* NAPATECH_ENABLE_BYPASS */
1181  }
1182  ++stream_id;
1183  }
1184  }
1185 
1186  if ((status = NT_InfoClose(hInfo)) != NT_SUCCESS) {
1188  exit(EXIT_FAILURE);
1189  }
1190 
1191  /* Close the statistics stream */
1192  if ((status = NT_StatClose(hstat_stream)) != NT_SUCCESS) {
1194  exit(EXIT_FAILURE);
1195  }
1196 
1197  SCLogDebug("Exiting NapatechStatsLoop");
1201 
1202  return NULL;
1203 }
1204 
1205 
1207 {
1208  /* Creates the Statistic threads */
1209  ThreadVars *stats_tv = TmThreadCreate("NapatechStats",
1210  NULL, NULL,
1211  NULL, NULL,
1212  "custom", NapatechStatsLoop, 0);
1213 
1214  if (stats_tv == NULL) {
1216  "Error creating a thread for NapatechStats - Killing engine.");
1217  exit(EXIT_FAILURE);
1218  }
1219 
1220  if (TmThreadSpawn(stats_tv) != 0) {
1222  "Failed to spawn thread for NapatechStats - Killing engine.");
1223  exit(EXIT_FAILURE);
1224  }
1225 
1226 #ifdef NAPATECH_ENABLE_BYPASS
1227  if (bypass_supported) {
1228  SCLogInfo("Napatech bypass functionality enabled.");
1229  }
1230 #endif /* NAPATECH_ENABLE_BYPASS */
1231 
1232  ThreadVars *buf_monitor_tv = TmThreadCreate("NapatechBufMonitor",
1233  NULL, NULL,
1234  NULL, NULL,
1235  "custom", NapatechBufMonitorLoop, 0);
1236 
1237  if (buf_monitor_tv == NULL) {
1239  "Error creating a thread for NapatechBufMonitor - Killing engine.");
1240  exit(EXIT_FAILURE);
1241  }
1242 
1243  if (TmThreadSpawn(buf_monitor_tv) != 0) {
1245  "Failed to spawn thread for NapatechBufMonitor - Killing engine.");
1246  exit(EXIT_FAILURE);
1247  }
1248 
1249 
1250  return;
1251 }
1252 
1253 bool NapatechSetupNuma(uint32_t stream, uint32_t numa)
1254 {
1255  uint32_t status = 0;
1256  static NtConfigStream_t hconfig;
1257 
1258  char ntpl_cmd[64];
1259  snprintf(ntpl_cmd, 64, "setup[numanode=%d] = streamid == %d", numa, stream);
1260 
1261  NtNtplInfo_t ntpl_info;
1262 
1263  if ((status = NT_ConfigOpen(&hconfig, "ConfigStream")) != NT_SUCCESS) {
1265  return false;
1266  }
1267 
1268  if ((status = NT_NTPL(hconfig, ntpl_cmd, &ntpl_info, NT_NTPL_PARSER_VALIDATE_NORMAL)) == NT_SUCCESS) {
1269  status = ntpl_info.ntplId;
1270 
1271  } else {
1272  NAPATECH_NTPL_ERROR(ntpl_cmd, ntpl_info, status);
1273  return false;
1274  }
1275 
1276  return status;
1277 }
1278 
1279 static uint32_t NapatechSetHashmode(void)
1280 {
1281  uint32_t status = 0;
1282  const char *hash_mode;
1283  static NtConfigStream_t hconfig;
1284  char ntpl_cmd[64];
1285  NtNtplInfo_t ntpl_info;
1286 
1287  uint32_t filter_id = 0;
1288 
1289  /* Get the hashmode from the conf file. */
1290  ConfGetValue("napatech.hashmode", &hash_mode);
1291 
1292  snprintf(ntpl_cmd, 64, "hashmode = %s", hash_mode);
1293 
1294  /* Issue the NTPL command */
1295  if ((status = NT_ConfigOpen(&hconfig, "ConfigStream")) != NT_SUCCESS) {
1297  return false;
1298  }
1299 
1300  if ((status = NT_NTPL(hconfig, ntpl_cmd, &ntpl_info,
1301  NT_NTPL_PARSER_VALIDATE_NORMAL)) == NT_SUCCESS) {
1302  filter_id = ntpl_info.ntplId;
1303  SCLogConfig("Napatech hashmode: %s ID: %d", hash_mode, status);
1304  } else {
1305  NAPATECH_NTPL_ERROR(ntpl_cmd, ntpl_info, status);
1306  status = 0;
1307  }
1308 
1309  return filter_id;
1310 }
1311 
1312 static uint32_t GetStreamNUMAs(uint32_t stream_id, int stream_numas[])
1313 {
1314  NtStatistics_t hStat; // Stat handle.
1315  NtStatStream_t hstat_stream;
1316  int status; // Status variable
1317 
1318  for (int i = 0; i < MAX_HOSTBUFFERS; ++i)
1319  stream_numas[i] = -1;
1320 
1321  if ((status = NT_StatOpen(&hstat_stream, "StatsStream")) != NT_SUCCESS) {
1323  exit(EXIT_FAILURE);
1324  }
1325 
1326  char pktCntStr[4096];
1327  memset(pktCntStr, 0, sizeof (pktCntStr));
1328 
1329 
1330  /* Read usage data for the chosen stream ID */
1331  hStat.cmd = NT_STATISTICS_READ_CMD_USAGE_DATA_V0;
1332  hStat.u.usageData_v0.streamid = (uint8_t) stream_id;
1333 
1334  if ((status = NT_StatRead(hstat_stream, &hStat)) != NT_SUCCESS) {
1336  exit(EXIT_FAILURE);
1337  }
1338 
1339  for (uint32_t hb_id = 0; hb_id < hStat.u.usageData_v0.data.numHostBufferUsed; ++hb_id) {
1340  stream_numas[hb_id] = hStat.u.usageData_v0.data.hb[hb_id].numaNode;
1341  }
1342 
1343  return hStat.u.usageData_v0.data.numHostBufferUsed;
1344 }
1345 
1346 static int NapatechSetFilter(NtConfigStream_t hconfig, char *ntpl_cmd)
1347 {
1348  int status = 0;
1349  int local_filter_id = 0;
1350 
1351  NtNtplInfo_t ntpl_info;
1352  if ((status = NT_NTPL(hconfig, ntpl_cmd, &ntpl_info,
1353  NT_NTPL_PARSER_VALIDATE_NORMAL)) == NT_SUCCESS) {
1354  SCLogConfig("NTPL filter assignment \"%s\" returned filter id %4d",
1355  ntpl_cmd, local_filter_id);
1356  } else {
1357  NAPATECH_NTPL_ERROR(ntpl_cmd, ntpl_info, status);
1358  exit(EXIT_FAILURE);
1359  }
1360 
1361  return local_filter_id;
1362 }
1363 
1365 {
1366  uint32_t status = 0;
1367  static NtConfigStream_t hconfig;
1368  char ntpl_cmd[64];
1369  NtNtplInfo_t ntpl_info;
1370 
1371  if ((status = NT_ConfigOpen(&hconfig, "ConfigStream")) != NT_SUCCESS) {
1373  exit(EXIT_FAILURE);
1374  }
1375 
1376  snprintf(ntpl_cmd, 64, "delete = all");
1377  if ((status = NT_NTPL(hconfig, ntpl_cmd, &ntpl_info,
1378  NT_NTPL_PARSER_VALIDATE_NORMAL)) == NT_SUCCESS) {
1379  status = ntpl_info.ntplId;
1380  } else {
1381  NAPATECH_NTPL_ERROR(ntpl_cmd, ntpl_info, status);
1382  status = 0;
1383  }
1384 
1385  NT_ConfigClose(hconfig);
1386 
1387  return status;
1388 }
1389 
1390 
1391 uint32_t NapatechSetupTraffic(uint32_t first_stream, uint32_t last_stream)
1392 {
1393 #define PORTS_SPEC_SIZE 64
1394 
1395  struct ports_spec_s {
1396  uint8_t first[MAX_PORTS];
1397  uint8_t second[MAX_PORTS];
1398  bool all;
1399  char str[PORTS_SPEC_SIZE];
1400  } ports_spec;
1401 
1402  ports_spec.all = false;
1403 
1404  ConfNode *ntports;
1405  int iteration = 0;
1406  int status = 0;
1407  NtConfigStream_t hconfig;
1408  char ntpl_cmd[512];
1409  int is_inline = 0;
1410  int is_span_port[MAX_PORTS] = { 0 };
1411 
1412  char span_ports[128];
1413  memset(span_ports, 0, sizeof(span_ports));
1414 
1415  if (ConfGetBool("napatech.inline", &is_inline) == 0) {
1416  is_inline = 0;
1417  }
1418 
1419  NapatechSetHashmode();
1420 
1421  if ((status = NT_ConfigOpen(&hconfig, "ConfigStream")) != NT_SUCCESS) {
1423  exit(EXIT_FAILURE);
1424  }
1425 
1426  if (first_stream == last_stream) {
1427  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1428  "Setup[state=inactive] = StreamId == %d",
1429  first_stream);
1430  } else {
1431  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1432  "Setup[state=inactive] = StreamId == (%d..%d)",
1433  first_stream, last_stream);
1434  }
1435  NapatechSetFilter(hconfig, ntpl_cmd);
1436 
1437 #ifdef NAPATECH_ENABLE_BYPASS
1438  if (NapatechUseHWBypass()) {
1439  SCLogInfo("Napatech Hardware Bypass enabled.");
1440  } else {
1441  SCLogInfo("Napatech Hardware Bypass available but disabled.");
1442  }
1443 #else
1444  if (NapatechUseHWBypass()) {
1445  SCLogInfo("Napatech Hardware Bypass requested in conf but is not available.");
1446  exit(EXIT_FAILURE);
1447  } else {
1448  SCLogInfo("Napatech Hardware Bypass disabled.");
1449  }
1450 
1451  if (is_inline) {
1452  SCLogError(SC_ERR_RUNMODE, "Napatech inline mode not supported. (Only available when Hardware Bypass support is enabled.)");
1453  exit(EXIT_FAILURE);
1454  }
1455 #endif
1456 
1457  if (is_inline) {
1458  SCLogInfo("Napatech configured for inline mode.");
1459  } else {
1460 
1461  SCLogInfo("Napatech configured for passive (non-inline) mode.");
1462  }
1463 
1464  /* When not using the default streams we need to parse
1465  * the array of streams from the conf
1466  */
1467  if ((ntports = ConfGetNode("napatech.ports")) == NULL) {
1468  SCLogError(SC_ERR_RUNMODE, "Failed retrieving napatech.ports from Conf");
1469  exit(EXIT_FAILURE);
1470  }
1471 
1472  /* Loop through all ports in the array */
1473  ConfNode *port;
1474  enum CONFIG_SPECIFIER stream_spec = CONFIG_SPECIFIER_UNDEFINED;
1475 
1476  if (NapatechUseHWBypass()) {
1477  SCLogInfo("Listening on the following Napatech ports:");
1478  }
1479  /* Build the NTPL command using values in the config file. */
1480  TAILQ_FOREACH(port, &ntports->head, next)
1481  {
1482  if (port == NULL) {
1484  "Couldn't Parse Port Configuration");
1485  exit(EXIT_FAILURE);
1486  }
1487 
1488  if (NapatechUseHWBypass()) {
1489 #ifdef NAPATECH_ENABLE_BYPASS
1490  if (strchr(port->val, '-')) {
1491  stream_spec = CONFIG_SPECIFIER_RANGE;
1492 
1493  char copystr[16];
1494  strlcpy(copystr, port->val, sizeof(copystr));
1495  ByteExtractStringUint8(&ports_spec.first[iteration], 10, 0, copystr);
1496  ByteExtractStringUint8(&ports_spec.second[iteration], 10, 0, strchr(copystr, '-')+1);
1497 
1498  if (ports_spec.first[iteration] == ports_spec.second[iteration]) {
1499  if (is_inline) {
1501  "Error with napatec.ports in conf file. When running in inline mode the two ports specifying a segment must be different.");
1502  exit(EXIT_FAILURE);
1503  } else {
1504  /* SPAN port configuration */
1505  is_span_port[ports_spec.first[iteration]] = 1;
1506 
1507  if (strlen(span_ports) == 0) {
1508  snprintf(span_ports, sizeof (span_ports), "%d", ports_spec.first[iteration]);
1509  } else {
1510  char temp[16];
1511  snprintf(temp, sizeof(temp), ",%d", ports_spec.first[iteration]);
1512  strlcat(span_ports, temp, sizeof(span_ports));
1513  }
1514 
1515  }
1516  }
1517 
1518  if (NapatechGetAdapter(ports_spec.first[iteration]) != NapatechGetAdapter(ports_spec.first[iteration])) {
1520  "Invalid napatech.ports specification in conf file.");
1522  "Two ports on a segment must reside on the same adapter. port %d is on adapter %d, port %d is on adapter %d.",
1523  ports_spec.first[iteration],
1524  NapatechGetAdapter(ports_spec.first[iteration]),
1525  ports_spec.second[iteration],
1526  NapatechGetAdapter(ports_spec.second[iteration])
1527  );
1528  exit(EXIT_FAILURE);
1529  }
1530 
1531  NapatechSetPortmap(ports_spec.first[iteration], ports_spec.second[iteration]);
1532  if (ports_spec.first[iteration] == ports_spec.second[iteration]) {
1533  SCLogInfo(" span_port: %d", ports_spec.first[iteration]);
1534  } else {
1535  SCLogInfo(" %s: %d - %d", is_inline ? "inline_ports" : "tap_ports", ports_spec.first[iteration], ports_spec.second[iteration]);
1536  }
1537 
1538  if (iteration == 0) {
1539  if (ports_spec.first[iteration] == ports_spec.second[iteration]) {
1540  snprintf(ports_spec.str, sizeof (ports_spec.str), "%d", ports_spec.first[iteration]);
1541  } else {
1542  snprintf(ports_spec.str, sizeof (ports_spec.str), "%d,%d", ports_spec.first[iteration], ports_spec.second[iteration]);
1543  }
1544  } else {
1545  char temp[16];
1546  if (ports_spec.first[iteration] == ports_spec.second[iteration]) {
1547  snprintf(temp, sizeof(temp), ",%d", ports_spec.first[iteration]);
1548  } else {
1549  snprintf(temp, sizeof(temp), ",%d,%d", ports_spec.first[iteration], ports_spec.second[iteration]);
1550  }
1551  strlcat(ports_spec.str, temp, sizeof(ports_spec.str));
1552  }
1553  } else {
1555  "When using hardware flow bypass ports must be specified as segments. E.g. ports: [0-1, 0-2]");
1556  exit(EXIT_FAILURE);
1557  }
1558 #endif
1559  } else { // !NapatechUseHWBypass()
1560  if (strncmp(port->val, "all", 3) == 0) {
1561  /* check that the sting in the config file is correctly specified */
1562  if (stream_spec != CONFIG_SPECIFIER_UNDEFINED) {
1564  "Only one Napatech port specifier type is allowed.");
1565  exit(EXIT_FAILURE);
1566  }
1567  stream_spec = CONFIG_SPECIFIER_RANGE;
1568 
1569  ports_spec.all = true;
1570  snprintf(ports_spec.str, sizeof (ports_spec.str), "all");
1571  } else if (strchr(port->val, '-')) {
1572  /* check that the sting in the config file is correctly specified */
1573  if (stream_spec != CONFIG_SPECIFIER_UNDEFINED) {
1575  "Only one Napatech port specifier is allowed when hardware bypass is disabled. (E.g. ports: [0-4], NOT ports: [0-1,2-3])");
1576  exit(EXIT_FAILURE);
1577  }
1578  stream_spec = CONFIG_SPECIFIER_RANGE;
1579 
1580  char copystr[16];
1581  strlcpy(copystr, port->val, sizeof (copystr));
1582  ByteExtractStringUint8(&ports_spec.first[iteration], 10, 0, copystr);
1583  ByteExtractStringUint8(&ports_spec.second[iteration], 10, 0, strchr(copystr, '-') + 1);
1584  snprintf(ports_spec.str, sizeof (ports_spec.str), "(%d..%d)", ports_spec.first[iteration], ports_spec.second[iteration]);
1585  } else {
1586  /* check that the sting in the config file is correctly specified */
1587  if (stream_spec == CONFIG_SPECIFIER_RANGE) {
1589  "Napatech port range specifiers cannot be combined with individual stream specifiers.");
1590  exit(EXIT_FAILURE);
1591  }
1592  stream_spec = CONFIG_SPECIFIER_INDIVIDUAL;
1593 
1594  ByteExtractStringUint8(&ports_spec.first[iteration], 10, 0, port->val);
1595 
1596  /* Determine the ports to use on the NTPL assign statement*/
1597  if (iteration == 0) {
1598  snprintf(ports_spec.str, sizeof (ports_spec.str), "%s", port->val);
1599  } else {
1600  strlcat(ports_spec.str, ",", sizeof(ports_spec.str));
1601  strlcat(ports_spec.str, port->val, sizeof(ports_spec.str));
1602  }
1603  }
1604  } // if !NapatechUseHWBypass()
1605  ++iteration;
1606  } /* TAILQ_FOREACH */
1607 
1608 #ifdef NAPATECH_ENABLE_BYPASS
1609  if (bypass_supported) {
1610  if (is_inline) {
1611  char inline_setup_cmd[512];
1612  if (first_stream == last_stream) {
1613  snprintf(inline_setup_cmd, sizeof (ntpl_cmd),
1614  "Setup[TxDescriptor=Dyn;TxPorts=%s;RxCRC=False;TxPortPos=112;UseWL=True] = StreamId == %d",
1615  ports_spec.str, first_stream);
1616  } else {
1617  snprintf(inline_setup_cmd, sizeof (ntpl_cmd),
1618  "Setup[TxDescriptor=Dyn;TxPorts=%s;RxCRC=False;TxPortPos=112;UseWL=True] = StreamId == (%d..%d)",
1619  ports_spec.str, first_stream, last_stream);
1620  }
1621  NapatechSetFilter(hconfig, inline_setup_cmd);
1622  }
1623  /* Build the NTPL command */
1624  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1625  "assign[priority=3;streamid=(%d..%d);colormask=0x10000000;"
1626  "Descriptor=DYN3,length=24,colorbits=32,Offset0=Layer3Header[0],Offset1=Layer4Header[0]]= %s%s",
1627  first_stream, last_stream, ports_spec.all ? "" : "port==", ports_spec.str);
1628  NapatechSetFilter(hconfig, ntpl_cmd);
1629 
1630 
1631  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1632  "assign[priority=2;streamid=(%d..%d);colormask=0x11000000;"
1633  "Descriptor=DYN3,length=24,colorbits=32,Offset0=Layer3Header[0],Offset1=Layer4Header[0]"
1634  "]= %s%s and (Layer3Protocol==IPV4)",
1635  first_stream, last_stream, ports_spec.all ? "" : "port==", ports_spec.str);
1636  NapatechSetFilter(hconfig, ntpl_cmd);
1637 
1638 
1639  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1640  "assign[priority=2;streamid=(%d..%d);colormask=0x14000000;"
1641  "Descriptor=DYN3,length=24,colorbits=32,Offset0=Layer3Header[0],Offset1=Layer4Header[0]]= %s%s and (Layer3Protocol==IPV6)",
1642  first_stream, last_stream, ports_spec.all ? "" : "port==", ports_spec.str);
1643  NapatechSetFilter(hconfig, ntpl_cmd);
1644 
1645  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1646  "assign[priority=2;streamid=(%d..%d);colormask=0x10100000;"
1647  "Descriptor=DYN3,length=24,colorbits=32,Offset0=Layer3Header[0],Offset1=Layer4Header[0]]= %s%s and (Layer4Protocol==TCP)",
1648  first_stream, last_stream, ports_spec.all ? "" : "port==", ports_spec.str);
1649  NapatechSetFilter(hconfig, ntpl_cmd);
1650 
1651  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1652  "assign[priority=2;streamid=(%d..%d);colormask=0x10200000;"
1653  "Descriptor=DYN3,length=24,colorbits=32,Offset0=Layer3Header[0],Offset1=Layer4Header[0]"
1654  "]= %s%s and (Layer4Protocol==UDP)",
1655  first_stream, last_stream, ports_spec.all ? "" : "port==", ports_spec.str);
1656  NapatechSetFilter(hconfig, ntpl_cmd);
1657 
1658  if (strlen(span_ports) > 0) {
1659  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1660  "assign[priority=2;streamid=(%d..%d);colormask=0x00001000;"
1661  "Descriptor=DYN3,length=24,colorbits=32,Offset0=Layer3Header[0],Offset1=Layer4Header[0]"
1662  "]= port==%s",
1663  first_stream, last_stream, span_ports);
1664  NapatechSetFilter(hconfig, ntpl_cmd);
1665  }
1666 
1667  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1668  "KeyType[name=KT%u]={sw_32_32,sw_16_16}",
1669  NAPATECH_KEYTYPE_IPV4);
1670  NapatechSetFilter(hconfig, ntpl_cmd);
1671 
1672  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1673  "KeyDef[name=KDEF%u;KeyType=KT%u;ipprotocolfield=OUTER]=(Layer3Header[12]/32/32,Layer4Header[0]/16/16)",
1674  NAPATECH_KEYTYPE_IPV4, NAPATECH_KEYTYPE_IPV4);
1675  NapatechSetFilter(hconfig, ntpl_cmd);
1676 
1677  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1678  "KeyType[name=KT%u]={32,32,16,16}",
1679  NAPATECH_KEYTYPE_IPV4_SPAN);
1680  NapatechSetFilter(hconfig, ntpl_cmd);
1681 
1682  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1683  "KeyDef[name=KDEF%u;KeyType=KT%u;ipprotocolfield=OUTER;keysort=sorted]=(Layer3Header[12]/32,Layer3Header[16]/32,Layer4Header[0]/16,Layer4Header[2]/16)",
1684  NAPATECH_KEYTYPE_IPV4_SPAN, NAPATECH_KEYTYPE_IPV4_SPAN);
1685  NapatechSetFilter(hconfig, ntpl_cmd);
1686 
1687  /* IPv6 5tuple for inline and tap ports */
1688  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1689  "KeyType[name=KT%u]={sw_128_128,sw_16_16}",
1690  NAPATECH_KEYTYPE_IPV6);
1691  NapatechSetFilter(hconfig, ntpl_cmd);
1692 
1693  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1694  "KeyDef[name=KDEF%u;KeyType=KT%u;ipprotocolfield=OUTER]=(Layer3Header[8]/128/128,Layer4Header[0]/16/16)",
1695  NAPATECH_KEYTYPE_IPV6, NAPATECH_KEYTYPE_IPV6);
1696  NapatechSetFilter(hconfig, ntpl_cmd);
1697 
1698  /* IPv6 5tuple for SPAN Ports */
1699  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1700  "KeyType[name=KT%u]={128,128,16,16}",
1701  NAPATECH_KEYTYPE_IPV6_SPAN);
1702  NapatechSetFilter(hconfig, ntpl_cmd);
1703 
1704  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1705  "KeyDef[name=KDEF%u;KeyType=KT%u;ipprotocolfield=OUTER;keysort=sorted]=(Layer3Header[8]/128,Layer3Header[24]/128,Layer4Header[0]/16,Layer4Header[2]/16)",
1706  NAPATECH_KEYTYPE_IPV6_SPAN, NAPATECH_KEYTYPE_IPV6_SPAN);
1707  NapatechSetFilter(hconfig, ntpl_cmd);
1708 
1709 
1710  int pair;
1711  char ports_ntpl_a[64];
1712  char ports_ntpl_b[64];
1713  memset(ports_ntpl_a, 0, sizeof(ports_ntpl_a));
1714  memset(ports_ntpl_b, 0, sizeof(ports_ntpl_b));
1715 
1716  for (pair = 0; pair < iteration; ++pair) {
1717  char port_str[8];
1718 
1719  if (!is_span_port[ports_spec.first[pair]]) {
1720  snprintf(port_str, sizeof(port_str), "%s%u ", strlen(ports_ntpl_a) == 0 ? "" : ",", ports_spec.first[pair]);
1721  strlcat(ports_ntpl_a, port_str, sizeof(ports_ntpl_a));
1722 
1723  snprintf(port_str, sizeof(port_str), "%s%u ", strlen(ports_ntpl_b) == 0 ? "" : ",", ports_spec.second[pair]);
1724  strlcat(ports_ntpl_b, port_str, sizeof(ports_ntpl_b));
1725  }
1726  }
1727 
1728  if (strlen(ports_ntpl_a) > 0) {
1729  /* This is the assign for dropping upstream traffic */
1730  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1731  "assign[priority=1;streamid=drop;colormask=0x1]=(Layer3Protocol==IPV4)and(port == %s)and(Key(KDEF%u,KeyID=%u)==%u)",
1732  ports_ntpl_a,
1733  NAPATECH_KEYTYPE_IPV4,
1734  NAPATECH_KEYTYPE_IPV4,
1735  NAPATECH_FLOWTYPE_DROP);
1736  NapatechSetFilter(hconfig, ntpl_cmd);
1737  }
1738 
1739  if (strlen(ports_ntpl_b) > 0) {
1740  /* This is the assign for dropping downstream traffic */
1741  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1742  "assign[priority=1;streamid=drop;colormask=0x1]=(Layer3Protocol==IPV4)and(port == %s)and(Key(KDEF%u,KeyID=%u,fieldaction=swap)==%u)",
1743  ports_ntpl_b, //ports_spec.str,
1744  NAPATECH_KEYTYPE_IPV4,
1745  NAPATECH_KEYTYPE_IPV4,
1746  NAPATECH_FLOWTYPE_DROP);
1747  NapatechSetFilter(hconfig, ntpl_cmd);
1748  }
1749 
1750  if (strlen(span_ports) > 0) {
1751  /* This is the assign for dropping SPAN Port traffic */
1752  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1753  "assign[priority=1;streamid=drop;colormask=0x1]=(Layer3Protocol==IPV4)and(port == %s)and(Key(KDEF%u,KeyID=%u)==%u)",
1754  span_ports,
1755  NAPATECH_KEYTYPE_IPV4_SPAN,
1756  NAPATECH_KEYTYPE_IPV4_SPAN,
1757  NAPATECH_FLOWTYPE_DROP);
1758  NapatechSetFilter(hconfig, ntpl_cmd);
1759  }
1760 
1761  if (is_inline) {
1762  for (pair = 0; pair < iteration; ++pair) {
1763  /* This is the assignment for forwarding traffic */
1764  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1765  "assign[priority=1;streamid=drop;DestinationPort=%d;colormask=0x2]=(Layer3Protocol==IPV4)and(port == %d)and(Key(KDEF%u,KeyID=%u)==%u)",
1766  ports_spec.second[pair],
1767  ports_spec.first[pair],
1768  NAPATECH_KEYTYPE_IPV4,
1769  NAPATECH_KEYTYPE_IPV4,
1770  NAPATECH_FLOWTYPE_PASS);
1771  NapatechSetFilter(hconfig, ntpl_cmd);
1772 
1773  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1774  "assign[priority=1;streamid=drop;DestinationPort=%d;colormask=0x2]=(Layer3Protocol==IPV4)and(port == %d)and(Key(KDEF%u,KeyID=%u,fieldaction=swap)==%u)",
1775  ports_spec.first[pair],
1776  ports_spec.second[pair],
1777  NAPATECH_KEYTYPE_IPV4,
1778  NAPATECH_KEYTYPE_IPV4,
1779  NAPATECH_FLOWTYPE_PASS);
1780  NapatechSetFilter(hconfig, ntpl_cmd);
1781  }
1782  }
1783 
1784  if (strlen(ports_ntpl_a) > 0) {
1785  /* This is the assign for dropping upstream traffic */
1786  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1787  "assign[priority=1;streamid=drop;colormask=0x1]=(Layer3Protocol==IPV6)and(port == %s)and(Key(KDEF%u,KeyID=%u)==%u)",
1788  ports_ntpl_a,
1789  NAPATECH_KEYTYPE_IPV6,
1790  NAPATECH_KEYTYPE_IPV6,
1791  NAPATECH_FLOWTYPE_DROP);
1792  NapatechSetFilter(hconfig, ntpl_cmd);
1793  }
1794 
1795  if (strlen(ports_ntpl_b) > 0) {
1796  /* This is the assign for dropping downstream traffic */
1797  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1798  "assign[priority=1;streamid=drop;colormask=0x1]=(Layer3Protocol==IPV6)and(port == %s)and(Key(KDEF%u,KeyID=%u,fieldaction=swap)==%u)",
1799  ports_ntpl_b, //ports_spec.str,
1800  NAPATECH_KEYTYPE_IPV6,
1801  NAPATECH_KEYTYPE_IPV6,
1802  NAPATECH_FLOWTYPE_DROP);
1803  NapatechSetFilter(hconfig, ntpl_cmd);
1804  }
1805 
1806  if (strlen(span_ports) > 0) {
1807  /* This is the assign for dropping SPAN Port traffic */
1808  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1809  "assign[priority=1;streamid=drop;colormask=0x1]=(Layer3Protocol==IPV6)and(port == %s)and(Key(KDEF%u,KeyID=%u)==%u)",
1810  span_ports,
1811  NAPATECH_KEYTYPE_IPV6_SPAN,
1812  NAPATECH_KEYTYPE_IPV6_SPAN,
1813  NAPATECH_FLOWTYPE_DROP);
1814  NapatechSetFilter(hconfig, ntpl_cmd);
1815  }
1816 
1817  if (is_inline) {
1818  for (pair = 0; pair < iteration; ++pair) {
1819  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1820  "assign[priority=1;streamid=drop;DestinationPort=%d;colormask=0x4]=(Layer3Protocol==IPV6)and(port==%d)and(Key(KDEF%u,KeyID=%u)==%u)",
1821  ports_spec.second[pair],
1822  ports_spec.first[pair],
1823  NAPATECH_KEYTYPE_IPV6,
1824  NAPATECH_KEYTYPE_IPV6,
1825  NAPATECH_FLOWTYPE_PASS);
1826  NapatechSetFilter(hconfig, ntpl_cmd);
1827 
1828  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1829  "assign[priority=1;streamid=drop;DestinationPort=%d;colormask=0x4]=(Layer3Protocol==IPV6)and(port==%d)and(Key(KDEF%u,KeyID=%u,fieldaction=swap)==%u)",
1830  ports_spec.first[pair],
1831  ports_spec.second[pair],
1832  NAPATECH_KEYTYPE_IPV6,
1833  NAPATECH_KEYTYPE_IPV6,
1834  NAPATECH_FLOWTYPE_PASS);
1835  NapatechSetFilter(hconfig, ntpl_cmd);
1836  }
1837  }
1838  } else {
1839  if (is_inline) {
1841  "Napatech Inline operation not supported by this FPGA version.");
1842  exit(EXIT_FAILURE);
1843  }
1844 
1846  snprintf(ntpl_cmd, sizeof (ntpl_cmd), "assign[streamid=(%d..%d);colormask=0x0] = %s%s",
1847  first_stream, last_stream, ports_spec.all ? "" : "port==", ports_spec.str);
1848  NapatechSetFilter(hconfig, ntpl_cmd);
1849  }
1850  }
1851 
1852 #else /* NAPATECH_ENABLE_BYPASS */
1853  snprintf(ntpl_cmd, sizeof (ntpl_cmd), "assign[streamid=(%d..%d)] = %s%s",
1854  first_stream, last_stream, ports_spec.all ? "" : "port==", ports_spec.str);
1855  NapatechSetFilter(hconfig, ntpl_cmd);
1856 
1857 #endif /* !NAPATECH_ENABLE_BYPASS */
1858 
1859  SCLogConfig("Host-buffer NUMA assignments: ");
1860  int numa_nodes[MAX_HOSTBUFFERS];
1861  uint32_t stream_id;
1862  for (stream_id = first_stream; stream_id < last_stream; ++stream_id) {
1863  char temp1[256];
1864  char temp2[256];
1865 
1866  uint32_t num_host_buffers = GetStreamNUMAs(stream_id, numa_nodes);
1867 
1868  snprintf(temp1, 256, " stream %d: ", stream_id);
1869 
1870  for (uint32_t hb_id = 0; hb_id < num_host_buffers; ++hb_id) {
1871  snprintf(temp2, 256, "%d ", numa_nodes[hb_id]);
1872  strlcat(temp1, temp2, sizeof(temp1));
1873  }
1874 
1875  SCLogConfig("%s", temp1);
1876  }
1877 
1878  if (first_stream == last_stream) {
1879  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1880  "Setup[state=active] = StreamId == %d",
1881  first_stream);
1882  } else {
1883  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1884  "Setup[state=active] = StreamId == (%d..%d)",
1885  first_stream, last_stream);
1886  }
1887  NapatechSetFilter(hconfig, ntpl_cmd);
1888 
1889  NT_ConfigClose(hconfig);
1890 
1891  return status;
1892 }
1893 
1894 #endif // HAVE_NAPATECH
util-byte.h
PORTS_SPEC_SIZE
#define PORTS_SPEC_SIZE
tm-threads.h
CONFIG_SPECIFIER_UNDEFINED
@ CONFIG_SPECIFIER_UNDEFINED
Definition: util-napatech.c:230
TmThreadSpawn
TmEcode TmThreadSpawn(ThreadVars *tv)
Spawns a thread associated with the ThreadVars instance tv.
Definition: tm-threads.c:1630
NapatechGetCurrentStats
NapatechCurrentStats NapatechGetCurrentStats(uint16_t id)
Definition: util-napatech.c:223
CONFIG_SPECIFIER_RANGE
@ CONFIG_SPECIFIER_RANGE
Definition: util-napatech.c:231
SC_ERR_NAPATECH_INIT_FAILED
@ SC_ERR_NAPATECH_INIT_FAILED
Definition: util-error.h:251
ConfNode_::val
char * val
Definition: conf.h:34
ConfGetBool
int ConfGetBool(const char *name, int *val)
Retrieve a configuration value as an boolen.
Definition: conf.c:517
NapatechSetPortmap
int NapatechSetPortmap(int port, int peer)
unlikely
#define unlikely(expr)
Definition: util-optimize.h:35
SCLogDebug
#define SCLogDebug(...)
Definition: util-debug.h:298
TmThreadsSetFlag
void TmThreadsSetFlag(ThreadVars *tv, uint32_t flag)
Set a thread flag.
Definition: tm-threads.c:97
TmThreadWaitForFlag
void TmThreadWaitForFlag(ThreadVars *tv, uint32_t flags)
Waits till the specified flag(s) is(are) set. We don't bother if the kill flag has been set or not on...
Definition: tm-threads.c:1730
next
struct HtpBodyChunk_ * next
Definition: app-layer-htp.h:0
THV_DEINIT
#define THV_DEINIT
Definition: threadvars.h:46
ConfGetNode
ConfNode * ConfGetNode(const char *name)
Get a ConfNode by name.
Definition: conf.c:176
MAX_ADAPTERS
#define MAX_ADAPTERS
Definition: util-napatech.h:59
UtilCpuGetNumProcessorsConfigured
uint16_t UtilCpuGetNumProcessorsConfigured(void)
Get the number of cpus configured in the system.
Definition: util-cpu.c:58
StatsSetUI64
void StatsSetUI64(ThreadVars *tv, uint16_t id, uint64_t x)
Sets a value of type double to the local counter.
Definition: counters.c:191
NapatechDeleteFilters
uint32_t NapatechDeleteFilters(void)
Definition: util-napatech.c:1364
TAILQ_FOREACH
#define TAILQ_FOREACH(var, head, field)
Definition: queue.h:350
StatsSetupPrivate
int StatsSetupPrivate(ThreadVars *tv)
Definition: counters.c:1200
StatsSyncCountersIfSignalled
#define StatsSyncCountersIfSignalled(tv)
Definition: counters.h:137
SC_ERR_THREAD_CREATE
@ SC_ERR_THREAD_CREATE
Definition: util-error.h:78
NapatechIsAutoConfigEnabled
bool NapatechIsAutoConfigEnabled(void)
Definition: runmode-napatech.c:70
PacketCounters_::byte
uint16_t byte
Definition: util-napatech.c:215
SC_WARN_COMPATIBILITY
@ SC_WARN_COMPATIBILITY
Definition: util-error.h:193
SC_ERR_SHUTDOWN
@ SC_ERR_SHUTDOWN
Definition: util-error.h:220
SC_ERR_RUNMODE
@ SC_ERR_RUNMODE
Definition: util-error.h:219
current_stats
NapatechCurrentStats current_stats[MAX_STREAMS]
Definition: util-napatech.c:221
NapatechStreamConfig_
Definition: util-napatech.h:42
SC_ERR_NAPATECH_OPEN_FAILED
@ SC_ERR_NAPATECH_OPEN_FAILED
Definition: util-error.h:246
strlcpy
size_t strlcpy(char *dst, const char *src, size_t siz)
Definition: util-strlcpyu.c:43
THV_RUNNING_DONE
#define THV_RUNNING_DONE
Definition: threadvars.h:47
NapatechGetStreamConfig
int NapatechGetStreamConfig(NapatechStreamConfig stream_config[])
Reads and parses the stream configuration defined in the config file.
Definition: util-napatech.c:835
total_stats
NapatechCurrentStats total_stats
Definition: util-napatech.c:220
util-device.h
NapatechCurrentStats_::current_packets
uint64_t current_packets
Definition: util-napatech.h:50
strlcat
size_t strlcat(char *, const char *src, size_t siz)
Definition: util-strlcatu.c:45
util-cpu.h
SC_ERR_NAPATECH_CONFIG_STREAM
@ SC_ERR_NAPATECH_CONFIG_STREAM
Definition: util-error.h:252
source-napatech.h
ThreadVars_
Per thread variable structure.
Definition: threadvars.h:58
PacketCounters
struct PacketCounters_ PacketCounters
THV_KILL
#define THV_KILL
Definition: threadvars.h:41
CONFIG_SPECIFIER_INDIVIDUAL
@ CONFIG_SPECIFIER_INDIVIDUAL
Definition: util-napatech.c:232
TmThreadCreate
ThreadVars * TmThreadCreate(const char *name, const char *inq_name, const char *inqh_name, const char *outq_name, const char *outqh_name, const char *slots, void *(*fn_p)(void *), int mucond)
Creates and returns the TV instance for a new thread.
Definition: tm-threads.c:911
NAPATECH_NTPL_ERROR
#define NAPATECH_NTPL_ERROR(ntpl_cmd, ntpl_info, status)
Definition: util-napatech.h:70
stream_config
TcpStreamCnf stream_config
Definition: stream-tcp.c:119
NapatechGetAdapter
int NapatechGetAdapter(uint8_t port)
NAPATECH_ERROR
#define NAPATECH_ERROR(err_type, status)
Definition: util-napatech.h:64
util-napatech.h
PacketCounters_::pkts
uint16_t pkts
Definition: util-napatech.c:214
PacketCounters_::drop_pkts
uint16_t drop_pkts
Definition: util-napatech.c:216
SC_ERR_NAPATECH_PARSE_CONFIG
@ SC_ERR_NAPATECH_PARSE_CONFIG
Definition: util-error.h:255
NapatechCurrentStats_::current_bytes
uint64_t current_bytes
Definition: util-napatech.h:51
HB_HIGHWATER
#define HB_HIGHWATER
Definition: util-napatech.c:728
PacketCounters_::drop_byte
uint16_t drop_byte
Definition: util-napatech.c:217
SCLogInfo
#define SCLogInfo(...)
Macro used to log INFORMATIONAL messages.
Definition: util-debug.h:217
ConfNodeLookupChild
ConfNode * ConfNodeLookupChild(const ConfNode *node, const char *name)
Lookup a child configuration node by name.
Definition: conf.c:815
THV_INIT_DONE
#define THV_INIT_DONE
Definition: threadvars.h:38
CONFIG_SPECIFIER
CONFIG_SPECIFIER
Definition: util-napatech.c:229
suricata-common.h
SCLogPerf
#define SCLogPerf(...)
Definition: util-debug.h:224
SCLogError
#define SCLogError(err_code,...)
Macro used to log ERROR messages.
Definition: util-debug.h:257
NapatechStartStats
void NapatechStartStats(void)
Definition: util-napatech.c:1206
MAX_HOSTBUFFERS
#define MAX_HOSTBUFFERS
Definition: util-napatech.c:235
tv
ThreadVars * tv
Definition: fuzz_decodepcapfile.c:29
threadvars.h
NapatechUseHWBypass
bool NapatechUseHWBypass(void)
Definition: runmode-napatech.c:75
NapatechCurrentStats_::current_drop_bytes
uint64_t current_drop_bytes
Definition: util-napatech.h:53
SCLogConfig
struct SCLogConfig_ SCLogConfig
Holds the config state used by the logging api.
SC_ERR_NAPATECH_STREAMS_REGISTER_FAILED
@ SC_ERR_NAPATECH_STREAMS_REGISTER_FAILED
Definition: util-error.h:253
ByteExtractStringUint8
int ByteExtractStringUint8(uint8_t *res, int base, uint16_t len, const char *str)
Definition: util-byte.c:279
str
#define str(s)
Definition: suricata-common.h:273
SCLogWarning
#define SCLogWarning(err_code,...)
Macro used to log WARNING messages.
Definition: util-debug.h:244
MAX_PORTS
#define MAX_PORTS
Definition: util-napatech.h:58
ConfNode_
Definition: conf.h:32
SC_ERR_THREAD_SPAWN
@ SC_ERR_THREAD_SPAWN
Definition: util-error.h:76
NapatechCurrentStats_::current_drop_packets
uint64_t current_drop_packets
Definition: util-napatech.h:52
SC_ERR_MEM_ALLOC
@ SC_ERR_MEM_ALLOC
Definition: util-error.h:31
suricata.h
ByteExtractStringUint16
int ByteExtractStringUint16(uint16_t *res, int base, uint16_t len, const char *str)
Definition: util-byte.c:259
ConfGetValue
int ConfGetValue(const char *name, const char **vptr)
Retrieve the value of a configuration node.
Definition: conf.c:360
NapatechSetupTraffic
uint32_t NapatechSetupTraffic(uint32_t first_stream, uint32_t last_stream)
Definition: util-napatech.c:1391
MAX_STREAMS
#define MAX_STREAMS
Definition: util-napatech.c:727
NapatechCurrentStats_
Definition: util-napatech.h:49
PacketCounters_
Definition: util-napatech.c:213
TmThreadsCheckFlag
int TmThreadsCheckFlag(ThreadVars *tv, uint32_t flag)
Check if a thread flag is set.
Definition: tm-threads.c:89
StatsRegisterCounter
uint16_t StatsRegisterCounter(const char *name, struct ThreadVars_ *tv)
Registers a normal, unqualified counter.
Definition: counters.c:945
THV_CLOSED
#define THV_CLOSED
Definition: threadvars.h:43
SCCalloc
#define SCCalloc(nm, sz)
Definition: util-mem.h:53
NapatechSetupNuma
bool NapatechSetupNuma(uint32_t stream, uint32_t numa)
Definition: util-napatech.c:1253