suricata
util-napatech.c
Go to the documentation of this file.
1 /* Copyright (C) 2017 Open Information Security Foundation
2  *
3  * You can copy, redistribute or modify this Program under the terms of
4  * the GNU General Public License version 2 as published by the Free
5  * Software Foundation.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10  * GNU General Public License for more details.
11  *
12  * You should have received a copy of the GNU General Public License
13  * version 2 along with this program; if not, write to the Free Software
14  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
15  * 02110-1301, USA.
16  */
17 /**
18  * \file
19  *
20  * \author Napatech Inc.
21  * \author Phil Young <py@napatech.com>
22  *
23  *
24  */
25 #include "suricata-common.h"
26 
27 #ifdef HAVE_NAPATECH
28 #include "suricata.h"
29 #include "util-device.h"
30 #include "util-cpu.h"
31 #include "util-byte.h"
32 #include "threadvars.h"
33 #include "tm-threads.h"
34 #include "util-napatech.h"
35 #include "source-napatech.h"
36 
37 #ifdef NAPATECH_ENABLE_BYPASS
38 
39 /*
40  * counters to track the number of flows programmed on
41  * the adapter.
42  */
43 typedef struct FlowStatsCounters_
44 {
45  uint16_t active_bypass_flows;
46  uint16_t total_bypass_flows;
47 } FlowStatsCounters;
48 
49 
50 static int bypass_supported;
51 int NapatechIsBypassSupported(void)
52 {
53  return bypass_supported;
54 }
55 
56 /**
57  * \brief Returns the number of Napatech Adapters in the system.
58  *
59  * \return count of the Napatech adapters present in the system.
60  */
61 int NapatechGetNumAdapters(void)
62 {
63  NtInfoStream_t hInfo;
64  NtInfo_t hInfoSys;
65  int status;
66  static int num_adapters = -1;
67 
68  if (num_adapters == -1) {
69  if ((status = NT_InfoOpen(&hInfo, "InfoStream")) != NT_SUCCESS) {
71  exit(EXIT_FAILURE);
72  }
73 
74  hInfoSys.cmd = NT_INFO_CMD_READ_SYSTEM;
75  if ((status = NT_InfoRead(hInfo, &hInfoSys)) != NT_SUCCESS) {
77  exit(EXIT_FAILURE);
78  }
79 
80  num_adapters = hInfoSys.u.system.data.numAdapters;
81 
82  NT_InfoClose(hInfo);
83  }
84 
85  return num_adapters;
86 }
87 
88 /**
89  * \brief Verifies that the Napatech adapters support bypass.
90  *
91  * Attempts to opens a FlowStream on each adapter present in the system.
92  * If successful then bypass is supported
93  *
94  * \return 1 if Bypass functionality is supported; zero otherwise.
95  */
96 int NapatechVerifyBypassSupport(void)
97 {
98  int status;
99  int adapter = 0;
100  int num_adapters = NapatechGetNumAdapters();
101  SCLogInfo("Found %d Napatech adapters.", num_adapters);
102  NtFlowStream_t hFlowStream;
103 
104  if (!NapatechUseHWBypass()) {
105  /* HW Bypass is disabled in the conf file */
106  return 0;
107  }
108 
109  for (adapter = 0; adapter < num_adapters; ++adapter) {
110  NtFlowAttr_t attr;
111  char flow_name[80];
112 
113  NT_FlowOpenAttrInit(&attr);
114  NT_FlowOpenAttrSetAdapterNo(&attr, adapter);
115 
116  snprintf(flow_name, sizeof(flow_name), "Flow stream %d", adapter );
117  SCLogInfo("Opening flow programming stream: %s\n", flow_name);
118  if ((status = NT_FlowOpen_Attr(&hFlowStream, flow_name, &attr)) != NT_SUCCESS) {
119  SCLogWarning(SC_WARN_COMPATIBILITY, "Napatech bypass functionality not supported by the FPGA version on adapter %d - disabling support.", adapter);
120  bypass_supported = 0;
121  return 0;
122  }
123  NT_FlowClose(hFlowStream);
124  }
125 
126  bypass_supported = 1;
127  return bypass_supported;
128 }
129 
130 
131 /**
132  * \brief Updates statistic counters for Napatech FlowStats
133  *
134  * \param tv Thread variable to ThreadVars
135  * \param hInfo Handle to the Napatech InfoStream.
136  * \param hstat_stream Handle to the Napatech Statistics Stream.
137  * \param flow_counters The flow counters statistics to update.
138  * \param clear_stats Indicates if statistics on the card should be reset to zero.
139  *
140  */
141 static void UpdateFlowStats(
142  ThreadVars *tv,
143  NtInfoStream_t hInfo,
144  NtStatStream_t hstat_stream,
145  FlowStatsCounters flow_counters,
146  int clear_stats
147  )
148 {
149  NtStatistics_t hStat;
150  int status;
151 
152  uint64_t programed = 0;
153  uint64_t removed = 0;
154  int adapter = 0;
155 
156  for (adapter = 0; adapter < NapatechGetNumAdapters(); ++adapter) {
157  hStat.cmd = NT_STATISTICS_READ_CMD_FLOW_V0;
158  hStat.u.flowData_v0.clear = clear_stats;
159  hStat.u.flowData_v0.adapterNo = adapter;
160  if ((status = NT_StatRead(hstat_stream, &hStat)) != NT_SUCCESS) {
162  exit(1);
163  }
164  programed = hStat.u.flowData_v0.learnDone;
165  removed = hStat.u.flowData_v0.unlearnDone
166  + hStat.u.flowData_v0.automaticUnlearnDone
167  + hStat.u.flowData_v0.timeoutUnlearnDone;
168  }
169 
170  StatsSetUI64(tv, flow_counters.active_bypass_flows, programed - removed);
171  StatsSetUI64(tv, flow_counters.total_bypass_flows, programed);
172 }
173 
174 #endif /* NAPATECH_ENABLE_BYPASS */
175 
176 
177 /*-----------------------------------------------------------------------------
178  *-----------------------------------------------------------------------------
179  * Statistics code
180  *-----------------------------------------------------------------------------
181  */
182 typedef struct PacketCounters_
183 {
184  uint16_t pkts;
185  uint16_t byte;
186  uint16_t drop_pkts;
187  uint16_t drop_byte;
189 
192 
194 {
195 
196  return current_stats[id];
197 }
198 
203 };
204 
205 #define MAX_HOSTBUFFERS 8
206 
207 /**
208  * \brief Test to see if any of the configured streams are active
209  *
210  * \param hInfo Handle to Napatech Info Stream.
211  * \param hStatsStream Handle to Napatech Statistics stream
212  * \param stream_config array of stream configuration structures
213  * \param num_inst
214  *
215  */
216 static uint16_t TestStreamConfig(
217  NtInfoStream_t hInfo,
218  NtStatStream_t hstat_stream,
220  uint16_t num_inst)
221 {
222  uint16_t num_active = 0;
223 
224  for (uint16_t inst = 0; inst < num_inst; ++inst) {
225  int status;
226  NtStatistics_t stat; // Stat handle.
227 
228  /* Check to see if it is an active stream */
229  memset(&stat, 0, sizeof (NtStatistics_t));
230 
231  /* Read usage data for the chosen stream ID */
232  stat.cmd = NT_STATISTICS_READ_CMD_USAGE_DATA_V0;
233  stat.u.usageData_v0.streamid = (uint8_t) stream_config[inst].stream_id;
234 
235  if ((status = NT_StatRead(hstat_stream, &stat)) != NT_SUCCESS) {
237  return 0;
238  }
239 
240  if (stat.u.usageData_v0.data.numHostBufferUsed > 0) {
241  stream_config[inst].is_active = true;
242  num_active++;
243  } else {
244  stream_config[inst].is_active = false;
245  }
246  }
247 
248  return num_active;
249 }
250 
251 /**
252  * \brief Updates Napatech packet counters
253  *
254  * \param tv Pointer to TheardVars structure
255  * \param hInfo Handle to Napatech Info Stream.
256  * \param hstat_stream Handle to Napatech Statistics stream
257  * \param num_streams the number of streams that are currently active
258  * \param stream_config array of stream configuration structures
259  * \param total_counters - cumulative count of all packets received.
260  * \param dispatch_host, - Count of packets that were delivered to the host buffer
261  * \param dispatch_drop - count of packets that were dropped as a result of a rule
262  * \param dispatch_fwd - count of packets forwarded out the egress port as the result of a rule
263  * \param is_inline - are we running in inline mode?
264  * \param enable_stream_stats - are per thread/stream statistics enabled.
265  * \param stream_counters - counters for each thread/stream configured.
266  *
267  * \return The number of active streams that were updated.
268  *
269  */
270 static uint32_t UpdateStreamStats(ThreadVars *tv,
271  NtInfoStream_t hInfo,
272  NtStatStream_t hstat_stream,
273  uint16_t num_streams,
275  PacketCounters total_counters,
276  PacketCounters dispatch_host,
277  PacketCounters dispatch_drop,
278  PacketCounters dispatch_fwd,
279  int is_inline,
280  int enable_stream_stats,
281  PacketCounters stream_counters[]
282  ) {
283  static uint64_t rxPktsStart[MAX_STREAMS] = {0};
284  static uint64_t rxByteStart[MAX_STREAMS] = {0};
285  static uint64_t dropPktStart[MAX_STREAMS] = {0};
286  static uint64_t dropByteStart[MAX_STREAMS] = {0};
287 
288  int status;
289  NtInfo_t hStreamInfo;
290  NtStatistics_t hStat; // Stat handle.
291 
292  /* Query the system to get the number of streams currently instantiated */
293  hStreamInfo.cmd = NT_INFO_CMD_READ_STREAM;
294  if ((status = NT_InfoRead(hInfo, &hStreamInfo)) != NT_SUCCESS) {
296  exit(EXIT_FAILURE);
297  }
298 
299  uint16_t num_active;
300  if ((num_active = TestStreamConfig(hInfo, hstat_stream, stream_config, num_streams)) == 0) {
301  /* None of the configured streams are active */
302  return 0;
303  }
304 
305  /* At least one stream is active so proceed with the stats. */
306  uint16_t inst_id = 0;
307  uint32_t stream_cnt = 0;
308  for (stream_cnt = 0; stream_cnt < num_streams; ++stream_cnt) {
309  while (inst_id < num_streams) {
310  if (stream_config[inst_id].is_active) {
311  break;
312  } else {
313  ++inst_id;
314  }
315  }
316  if (inst_id == num_streams)
317  break;
318 
319  /* Read usage data for the chosen stream ID */
320  memset(&hStat, 0, sizeof (NtStatistics_t));
321  hStat.cmd = NT_STATISTICS_READ_CMD_USAGE_DATA_V0;
322  hStat.u.usageData_v0.streamid = (uint8_t) stream_config[inst_id].stream_id;
323 
324  if ((status = NT_StatRead(hstat_stream, &hStat)) != NT_SUCCESS) {
326  return 0;
327  }
328 
329  uint16_t stream_id = stream_config[inst_id].stream_id;
330  if (stream_config[inst_id].is_active) {
331  uint64_t rx_pkts_total = 0;
332  uint64_t rx_byte_total = 0;
333  uint64_t drop_pkts_total = 0;
334  uint64_t drop_byte_total = 0;
335 
336  for (uint32_t hbCount = 0; hbCount < hStat.u.usageData_v0.data.numHostBufferUsed; hbCount++) {
337  if (unlikely(stream_config[inst_id].initialized == false)) {
338  rxPktsStart[stream_id] += hStat.u.usageData_v0.data.hb[hbCount].stat.rx.frames;
339  rxByteStart[stream_id] += hStat.u.usageData_v0.data.hb[hbCount].stat.rx.bytes;
340  dropPktStart[stream_id] += hStat.u.usageData_v0.data.hb[hbCount].stat.drop.frames;
341  dropByteStart[stream_id] += hStat.u.usageData_v0.data.hb[hbCount].stat.drop.bytes;
342  stream_config[inst_id].initialized = true;
343  } else {
344  rx_pkts_total += hStat.u.usageData_v0.data.hb[hbCount].stat.rx.frames;
345  rx_byte_total += hStat.u.usageData_v0.data.hb[hbCount].stat.rx.bytes;
346  drop_pkts_total += hStat.u.usageData_v0.data.hb[hbCount].stat.drop.frames;
347  drop_byte_total += hStat.u.usageData_v0.data.hb[hbCount].stat.drop.bytes;
348  }
349  }
350 
351  current_stats[stream_id].current_packets = rx_pkts_total - rxPktsStart[stream_id];
352  current_stats[stream_id].current_bytes = rx_byte_total - rxByteStart[stream_id];
353  current_stats[stream_id].current_drop_packets = drop_pkts_total - dropPktStart[stream_id];
354  current_stats[stream_id].current_drop_bytes = drop_byte_total - dropByteStart[stream_id];
355  }
356 
357  if (enable_stream_stats) {
358  StatsSetUI64(tv, stream_counters[inst_id].pkts, current_stats[stream_id].current_packets);
359  StatsSetUI64(tv, stream_counters[inst_id].byte, current_stats[stream_id].current_bytes);
360  StatsSetUI64(tv, stream_counters[inst_id].drop_pkts, current_stats[stream_id].current_drop_packets);
361  StatsSetUI64(tv, stream_counters[inst_id].drop_byte, current_stats[stream_id].current_drop_bytes);
362  }
363 
364  ++inst_id;
365  }
366 
367  uint32_t stream_id;
368  for (stream_id = 0; stream_id < num_streams; ++stream_id) {
369 
370 #ifndef NAPATECH_ENABLE_BYPASS
373 #endif /* NAPATECH_ENABLE_BYPASS */
376  }
377 
378 
379 #ifndef NAPATECH_ENABLE_BYPASS
380  StatsSetUI64(tv, total_counters.pkts, total_stats.current_packets);
381  StatsSetUI64(tv, total_counters.byte, total_stats.current_bytes);
382 #endif /* NAPATECH_ENABLE_BYPASS */
383 
386 
391 
392  /* Read usage data for the chosen stream ID */
393  memset(&hStat, 0, sizeof (NtStatistics_t));
394 
395 #ifdef NAPATECH_ENABLE_BYPASS
396  hStat.cmd = NT_STATISTICS_READ_CMD_QUERY_V3;
397  hStat.u.query_v3.clear = 0;
398 #else /* NAPATECH_ENABLE_BYPASS */
399  /* Older versions of the API have a different structure. */
400  hStat.cmd = NT_STATISTICS_READ_CMD_QUERY_V2;
401  hStat.u.query_v2.clear = 0;
402 #endif /* !NAPATECH_ENABLE_BYPASS */
403 
404  if ((status = NT_StatRead(hstat_stream, &hStat)) != NT_SUCCESS) {
405  if (status == NT_STATUS_TIMEOUT) {
406  SCLogInfo("Statistics timed out - will retry next time.");
407  return 0;
408  } else {
410  return 0;
411  }
412  }
413 
414 #ifdef NAPATECH_ENABLE_BYPASS
415 
416  int adapter = 0;
417  uint64_t total_dispatch_host_pkts = 0;
418  uint64_t total_dispatch_host_byte = 0;
419  uint64_t total_dispatch_drop_pkts = 0;
420  uint64_t total_dispatch_drop_byte = 0;
421  uint64_t total_dispatch_fwd_pkts = 0;
422  uint64_t total_dispatch_fwd_byte = 0;
423 
424  for (adapter = 0; adapter < NapatechGetNumAdapters(); ++adapter) {
425  total_dispatch_host_pkts += hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[0].pkts;
426  total_dispatch_host_byte += hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[0].octets;
427 
428  total_dispatch_drop_pkts += hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[1].pkts
429  + hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[3].pkts;
430  total_dispatch_drop_byte += hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[1].octets
431  + hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[3].octets;
432 
433  total_dispatch_fwd_pkts += hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[2].pkts
434  + hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[4].pkts;
435  total_dispatch_fwd_byte += hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[2].octets
436  + hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[4].octets;
437 
438  total_stats.current_packets += hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[0].pkts
439  + hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[1].pkts
440  + hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[2].pkts
441  + hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[3].pkts;
442 
443  total_stats.current_bytes = hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[0].octets
444  + hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[1].octets
445  + hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[2].octets;
446  }
447 
448  StatsSetUI64(tv, dispatch_host.pkts, total_dispatch_host_pkts);
449  StatsSetUI64(tv, dispatch_host.byte, total_dispatch_host_byte);
450 
451  StatsSetUI64(tv, dispatch_drop.pkts, total_dispatch_drop_pkts);
452  StatsSetUI64(tv, dispatch_drop.byte, total_dispatch_drop_byte);
453 
454  if (is_inline) {
455  StatsSetUI64(tv, dispatch_fwd.pkts, total_dispatch_fwd_pkts);
456  StatsSetUI64(tv, dispatch_fwd.byte, total_dispatch_fwd_byte);
457  }
458 
459  StatsSetUI64(tv, total_counters.pkts, total_stats.current_packets);
460  StatsSetUI64(tv, total_counters.byte, total_stats.current_bytes);
461 
462 #endif /* NAPATECH_ENABLE_BYPASS */
463 
464  return num_active;
465 }
466 
467 /**
468  * \brief Statistics processing loop
469  *
470  * Instantiated on the stats thread. Periodically retrieives
471  * statistics from the Napatech card and updates the packet counters
472  *
473  * \param arg Pointer that is caste into a TheardVars structure
474  */
475 static void *NapatechStatsLoop(void *arg)
476 {
477  ThreadVars *tv = (ThreadVars *) arg;
478 
479  int status;
480  NtInfoStream_t hInfo;
481  NtStatStream_t hstat_stream;
482  int is_inline = 0;
483  int enable_stream_stats = 0;
484  PacketCounters stream_counters[MAX_STREAMS];
485 
486  if (ConfGetBool("napatech.inline", &is_inline) == 0) {
487  is_inline = 0;
488  }
489 
490  if (ConfGetBool("napatech.enable-stream-stats", &enable_stream_stats) == 0) {
491  /* default is "no" */
492  enable_stream_stats = 0;
493  }
494 
496  uint16_t stream_cnt = NapatechGetStreamConfig(stream_config);
497 
498  /* Open the info and Statistics */
499  if ((status = NT_InfoOpen(&hInfo, "StatsLoopInfoStream")) != NT_SUCCESS) {
501  return NULL;
502  }
503 
504  if ((status = NT_StatOpen(&hstat_stream, "StatsLoopStatsStream")) != NT_SUCCESS) {
506  return NULL;
507  }
508 
509  NtStatistics_t hStat;
510  memset(&hStat, 0, sizeof (NtStatistics_t));
511 
512 #ifdef NAPATECH_ENABLE_BYPASS
513  hStat.cmd = NT_STATISTICS_READ_CMD_QUERY_V3;
514  hStat.u.query_v3.clear = 1;
515 #else /* NAPATECH_ENABLE_BYPASS */
516  hStat.cmd = NT_STATISTICS_READ_CMD_QUERY_V2;
517  hStat.u.query_v2.clear = 1;
518 #endif /* !NAPATECH_ENABLE_BYPASS */
519 
520  if ((status = NT_StatRead(hstat_stream, &hStat)) != NT_SUCCESS) {
522  return 0;
523  }
524 
525  PacketCounters total_counters;
526  memset(&total_counters, 0, sizeof(total_counters));
527 
528  PacketCounters dispatch_host;
529  memset(&dispatch_host, 0, sizeof(dispatch_host));
530 
531  PacketCounters dispatch_drop;
532  memset(&dispatch_drop, 0, sizeof(dispatch_drop));
533 
534  PacketCounters dispatch_fwd;
535  memset(&dispatch_fwd, 0, sizeof(dispatch_fwd));
536 
537  total_counters.pkts = StatsRegisterCounter("napa_total.pkts", tv);
538  dispatch_host.pkts = StatsRegisterCounter("napa_dispatch_host.pkts", tv);
539  dispatch_drop.pkts = StatsRegisterCounter("napa_dispatch_drop.pkts", tv);
540  if (is_inline) {
541  dispatch_fwd.pkts = StatsRegisterCounter("napa_dispatch_fwd.pkts", tv);
542  }
543 
544  total_counters.byte = StatsRegisterCounter("napa_total.byte", tv);
545  dispatch_host.byte = StatsRegisterCounter("napa_dispatch_host.byte", tv);
546  dispatch_drop.byte = StatsRegisterCounter("napa_dispatch_drop.byte", tv);
547  if (is_inline) {
548  dispatch_fwd.byte = StatsRegisterCounter("napa_dispatch_fwd.byte", tv);
549  }
550 
551  total_counters.drop_pkts = StatsRegisterCounter("napa_total.overflow_drop_pkts", tv);
552  total_counters.drop_byte = StatsRegisterCounter("napa_total.overflow_drop_byte", tv);
553 
554  if (enable_stream_stats) {
555  for (int i = 0; i < stream_cnt; ++i) {
556  char *pkts_buf = SCCalloc(1, 32);
557  if (unlikely(pkts_buf == NULL)) {
559  "Failed to allocate memory for NAPATECH stream counter.");
560  }
561 
562  snprintf(pkts_buf, 32, "napa%d.pkts", stream_config[i].stream_id);
563  stream_counters[i].pkts = StatsRegisterCounter(pkts_buf, tv);
564 
565  char *byte_buf = SCCalloc(1, 32);
566  if (unlikely(byte_buf == NULL)) {
568  "Failed to allocate memory for NAPATECH stream counter.");
569  }
570  snprintf(byte_buf, 32, "napa%d.bytes", stream_config[i].stream_id);
571  stream_counters[i].byte = StatsRegisterCounter(byte_buf, tv);
572 
573  char *drop_pkts_buf = SCCalloc(1, 32);
574  if (unlikely(drop_pkts_buf == NULL)) {
576  "Failed to allocate memory for NAPATECH stream counter.");
577  }
578  snprintf(drop_pkts_buf, 32, "napa%d.drop_pkts", stream_config[i].stream_id);
579  stream_counters[i].drop_pkts = StatsRegisterCounter(drop_pkts_buf, tv);
580 
581  char *drop_byte_buf = SCCalloc(1, 32);
582  if (unlikely(drop_byte_buf == NULL)) {
584  "Failed to allocate memory for NAPATECH stream counter.");
585  }
586  snprintf(drop_byte_buf, 32, "napa%d.drop_byte", stream_config[i].stream_id);
587  stream_counters[i].drop_byte = StatsRegisterCounter(drop_byte_buf, tv);
588  }
589  }
590 
591 #ifdef NAPATECH_ENABLE_BYPASS
592  FlowStatsCounters flow_counters;
593  if (bypass_supported) {
594  flow_counters.active_bypass_flows = StatsRegisterCounter("napa_bypass.active_flows", tv);
595  flow_counters.total_bypass_flows = StatsRegisterCounter("napa_bypass.total_flows", tv);
596  }
597 #endif /* NAPATECH_ENABLE_BYPASS */
598 
600 
601  StatsSetUI64(tv, total_counters.pkts, 0);
602  StatsSetUI64(tv, total_counters.byte, 0);
603  StatsSetUI64(tv, total_counters.drop_pkts, 0);
604  StatsSetUI64(tv, total_counters.drop_byte, 0);
605 
606 #ifdef NAPATECH_ENABLE_BYPASS
607  if (bypass_supported) {
608  StatsSetUI64(tv, dispatch_host.pkts, 0);
609  StatsSetUI64(tv, dispatch_drop.pkts, 0);
610 
611  if (is_inline) {
612  StatsSetUI64(tv, dispatch_fwd.pkts, 0);
613  }
614 
615  StatsSetUI64(tv, dispatch_host.byte, 0);
616  StatsSetUI64(tv, dispatch_drop.byte, 0);
617  if (is_inline) {
618  StatsSetUI64(tv, dispatch_fwd.byte, 0);
619  }
620 
621  if (enable_stream_stats) {
622  for (int i = 0; i < stream_cnt; ++i) {
623  StatsSetUI64(tv, stream_counters[i].pkts, 0);
624  StatsSetUI64(tv, stream_counters[i].byte, 0);
625  StatsSetUI64(tv, stream_counters[i].drop_pkts, 0);
626  StatsSetUI64(tv, stream_counters[i].drop_byte, 0);
627  }
628  }
629 
630  StatsSetUI64(tv, flow_counters.active_bypass_flows, 0);
631  StatsSetUI64(tv, flow_counters.total_bypass_flows, 0);
632  UpdateFlowStats(tv, hInfo, hstat_stream, flow_counters, 1);
633  }
634 #endif /* NAPATECH_ENABLE_BYPASS */
635 
636  uint32_t num_active = UpdateStreamStats(tv, hInfo, hstat_stream,
637  stream_cnt, stream_config, total_counters,
638  dispatch_host, dispatch_drop, dispatch_fwd,
639  is_inline, enable_stream_stats, stream_counters);
640 
641  if (!NapatechIsAutoConfigEnabled() && (num_active < stream_cnt)) {
642  SCLogInfo("num_active: %d, stream_cnt: %d", num_active, stream_cnt);
644  "Some or all of the configured streams are not created. Proceeding with active streams.");
645  }
646 
648  while (1) {
650  SCLogDebug("NapatechStatsLoop THV_KILL detected");
651  break;
652  }
653 
654  UpdateStreamStats(tv, hInfo, hstat_stream,
655  stream_cnt, stream_config, total_counters,
656  dispatch_host, dispatch_drop, dispatch_fwd,
657  is_inline, enable_stream_stats,
658  stream_counters);
659 
660 #ifdef NAPATECH_ENABLE_BYPASS
661  if (bypass_supported) {
662  UpdateFlowStats(tv, hInfo, hstat_stream, flow_counters, 0);
663  }
664 #endif /* NAPATECH_ENABLE_BYPASS */
665 
667  usleep(1000000);
668  }
669 
670  /* CLEAN UP NT Resources and Close the info stream */
671  if ((status = NT_InfoClose(hInfo)) != NT_SUCCESS) {
673  return NULL;
674  }
675 
676  /* Close the statistics stream */
677  if ((status = NT_StatClose(hstat_stream)) != NT_SUCCESS) {
679  return NULL;
680  }
681 
682  SCLogDebug("Exiting NapatechStatsLoop");
686 
687  return NULL;
688 }
689 
690 #define MAX_HOSTBUFFER 4
691 #define MAX_STREAMS 256
692 #define HB_HIGHWATER 2048 //1982
693 
694 /**
695  * \brief Tests whether a particular stream_id is actively registered
696  *
697  * \param stream_id - ID of the stream to look up
698  * \param num_registered - The total number of registered streams
699  * \param registered_streams - An array containing actively registered streams.
700  *
701  * \return Bool indicating is the specified stream is registered.
702  *
703  */
704 static bool RegisteredStream(uint16_t stream_id, uint16_t num_registered,
705  NapatechStreamConfig registered_streams[])
706 {
707  for (uint16_t reg_id = 0; reg_id < num_registered; ++reg_id) {
708  if (stream_id == registered_streams[reg_id].stream_id) {
709  return true;
710  }
711  }
712  return false;
713 }
714 
715 /**
716  * \brief Count the number of worker threads defined in the conf file.
717  *
718  * \return - The number of worker threads defined by the configuration
719  */
720 static uint32_t CountWorkerThreads(void)
721 {
722  int worker_count = 0;
723 
724  ConfNode *affinity;
725  ConfNode *root = ConfGetNode("threading.cpu-affinity");
726 
727  if (root != NULL) {
728 
729  TAILQ_FOREACH(affinity, &root->head, next)
730  {
731  if (strcmp(affinity->val, "decode-cpu-set") == 0 ||
732  strcmp(affinity->val, "stream-cpu-set") == 0 ||
733  strcmp(affinity->val, "reject-cpu-set") == 0 ||
734  strcmp(affinity->val, "output-cpu-set") == 0) {
735  continue;
736  }
737 
738  if (strcmp(affinity->val, "worker-cpu-set") == 0) {
739  ConfNode *node = ConfNodeLookupChild(affinity->head.tqh_first, "cpu");
740  ConfNode *lnode;
741 
743 
744  TAILQ_FOREACH(lnode, &node->head, next)
745  {
746  uint8_t start, end;
747  if (strncmp(lnode->val, "all", 4) == 0) {
748  /* check that the sting in the config file is correctly specified */
749  if (cpu_spec != CONFIG_SPECIFIER_UNDEFINED) {
751  "Only one Napatech port specifier type allowed.");
752  }
753  cpu_spec = CONFIG_SPECIFIER_RANGE;
754  worker_count = UtilCpuGetNumProcessorsConfigured();
755  } else if (strchr(lnode->val, '-')) {
756  /* check that the sting in the config file is correctly specified */
757  if (cpu_spec != CONFIG_SPECIFIER_UNDEFINED) {
759  "Only one Napatech port specifier type allowed.");
760  }
761  cpu_spec = CONFIG_SPECIFIER_RANGE;
762 
763  char copystr[16];
764  strlcpy(copystr, lnode->val, 16);
765  if (StringParseUint8(&start, 10, 0, (const char *)copystr) < 0) {
766  FatalError(SC_ERR_INVALID_VALUE, "Napatech invalid"
767  " worker range start: '%s'", copystr);
768  }
769  char *end_str = strchr(copystr, '-');
770  if ((end_str == NULL) || (end_str != NULL && StringParseUint8(&end,
771  10, 0, (const char *) (end_str + 1)) < 0)) {
772  FatalError(SC_ERR_INVALID_VALUE, "Napatech invalid"
773  " worker range end: '%s'", (end_str != NULL) ? (const char *)(end_str + 1) : "Null");
774  }
775  if (end < start) {
776  FatalError(SC_ERR_INVALID_VALUE, "Napatech invalid"
777  " worker range start: '%s' is greater than end: '%s'", start, end);
778  }
779  worker_count = end - start + 1;
780 
781  } else {
782  /* check that the sting in the config file is correctly specified */
783  if (cpu_spec == CONFIG_SPECIFIER_RANGE) {
785  "Napatech port range specifiers cannot be combined with individual stream specifiers.");
786  }
787  cpu_spec = CONFIG_SPECIFIER_INDIVIDUAL;
788  ++worker_count;
789  }
790  }
791  break;
792  }
793  }
794  }
795  return worker_count;
796 }
797 
798 /**
799  * \brief Reads and parses the stream configuration defined in the config file.
800  *
801  * \param stream_config - array to be filled in with active stream info.
802  *
803  * \return the number of streams configured or -1 if an error occurred
804  *
805  */
807 {
808  int status;
809  char error_buffer[80]; // Error buffer
810  NtStatStream_t hstat_stream;
811  NtStatistics_t hStat; // Stat handle.
812  NtInfoStream_t info_stream;
813  NtInfo_t info;
814  uint16_t instance_cnt = 0;
815  int use_all_streams = 0;
816  int set_cpu_affinity = 0;
817  ConfNode *ntstreams;
818  uint16_t stream_id = 0;
819  uint8_t start = 0;
820  uint8_t end = 0;
821 
822  for (uint16_t i = 0; i < MAX_STREAMS; ++i) {
823  stream_config[i].stream_id = 0;
824  stream_config[i].is_active = false;
825  stream_config[i].initialized = false;
826  }
827 
828  if (ConfGetBool("napatech.use-all-streams", &use_all_streams) == 0) {
829  /* default is "no" */
830  use_all_streams = 0;
831  }
832 
833  if ((status = NT_InfoOpen(&info_stream, "SuricataStreamInfo")) != NT_SUCCESS) {
835  return -1;
836  }
837 
838  if ((status = NT_StatOpen(&hstat_stream, "StatsStream")) != NT_SUCCESS) {
840  return -1;
841  }
842 
843  if (use_all_streams) {
844  info.cmd = NT_INFO_CMD_READ_STREAM;
845  if ((status = NT_InfoRead(info_stream, &info)) != NT_SUCCESS) {
847  return -1;
848  }
849 
850  while (instance_cnt < info.u.stream.data.count) {
851 
852  /*
853  * For each stream ID query the number of host-buffers used by
854  * the stream. If zero, then that streamID is not used; skip
855  * over it and continue until we get a streamID with a non-zero
856  * count of the host-buffers.
857  */
858  memset(&hStat, 0, sizeof (NtStatistics_t));
859 
860  /* Read usage data for the chosen stream ID */
861  hStat.cmd = NT_STATISTICS_READ_CMD_USAGE_DATA_V0;
862  hStat.u.usageData_v0.streamid = (uint8_t) stream_id;
863 
864  if ((status = NT_StatRead(hstat_stream, &hStat)) != NT_SUCCESS) {
865  /* Get the status code as text */
866  NT_ExplainError(status, error_buffer, sizeof (error_buffer));
867  SCLogError(SC_ERR_NAPATECH_INIT_FAILED, "NT_StatRead() failed: %s\n", error_buffer);
868  return -1;
869  }
870 
871  if (hStat.u.usageData_v0.data.numHostBufferUsed == 0) {
872  ++stream_id;
873  continue;
874  }
875 
876  /* if we get here it is an active stream */
877  stream_config[instance_cnt].stream_id = stream_id++;
878  stream_config[instance_cnt].is_active = true;
879  instance_cnt++;
880  }
881 
882  } else {
883  ConfGetBool("threading.set-cpu-affinity", &set_cpu_affinity);
884  if (NapatechIsAutoConfigEnabled() && (set_cpu_affinity == 1)) {
885  start = 0;
886  end = CountWorkerThreads() - 1;
887  } else {
888  /* When not using the default streams we need to
889  * parse the array of streams from the conf */
890  if ((ntstreams = ConfGetNode("napatech.streams")) == NULL) {
891  SCLogError(SC_ERR_RUNMODE, "Failed retrieving napatech.streams from Config");
892  if (NapatechIsAutoConfigEnabled() && (set_cpu_affinity == 0)) {
894  "if set-cpu-affinity: no in conf then napatech.streams must be defined");
895  }
896  exit(EXIT_FAILURE);
897  }
898 
899  /* Loop through all stream numbers in the array and register the devices */
900  ConfNode *stream;
901  enum CONFIG_SPECIFIER stream_spec = CONFIG_SPECIFIER_UNDEFINED;
902  instance_cnt = 0;
903 
904  TAILQ_FOREACH(stream, &ntstreams->head, next)
905  {
906 
907  if (stream == NULL) {
908  SCLogError(SC_ERR_NAPATECH_INIT_FAILED, "Couldn't Parse Stream Configuration");
909  return -1;
910  }
911 
912  if (strchr(stream->val, '-')) {
913  if (stream_spec != CONFIG_SPECIFIER_UNDEFINED) {
915  "Only one Napatech stream range specifier allowed.");
916  return -1;
917  }
918  stream_spec = CONFIG_SPECIFIER_RANGE;
919 
920  char copystr[16];
921  strlcpy(copystr, stream->val, 16);
922  if (StringParseUint8(&start, 10, 0, (const char *)copystr) < 0) {
923  FatalError(SC_ERR_INVALID_VALUE, "Napatech invalid "
924  "stream id start: '%s'", copystr);
925  }
926  char *end_str = strchr(copystr, '-');
927  if ((end_str == NULL) || (end_str != NULL && StringParseUint8(&end,
928  10, 0, (const char *) (end_str + 1)) < 0)) {
929  FatalError(SC_ERR_INVALID_VALUE, "Napatech invalid "
930  "stream id end: '%s'", (end_str != NULL) ? (const char *)(end_str + 1) : "Null");
931  }
932  } else {
933  if (stream_spec == CONFIG_SPECIFIER_RANGE) {
935  "Napatech range and individual specifiers cannot be combined.");
936  }
937  stream_spec = CONFIG_SPECIFIER_INDIVIDUAL;
938  if (StringParseUint16(&stream_config[instance_cnt].stream_id,
939  10, 0, (const char *)stream->val) < 0) {
940  FatalError(SC_ERR_INVALID_VALUE, "Napatech invalid "
941  "stream id: '%s'", stream->val);
942  }
943  start = stream_config[instance_cnt].stream_id;
944  end = stream_config[instance_cnt].stream_id;
945  }
946  }
947  }
948 
949  for (stream_id = start; stream_id <= end; ++stream_id) {
950  /* if we get here it is configured in the .yaml file */
951  stream_config[instance_cnt].stream_id = stream_id;
952 
953  /* Check to see if it is an active stream */
954  memset(&hStat, 0, sizeof (NtStatistics_t));
955 
956  /* Read usage data for the chosen stream ID */
957  hStat.cmd = NT_STATISTICS_READ_CMD_USAGE_DATA_V0;
958  hStat.u.usageData_v0.streamid =
959  (uint8_t) stream_config[instance_cnt].stream_id;
960 
961  if ((status = NT_StatRead(hstat_stream, &hStat)) != NT_SUCCESS) {
963  return -1;
964  }
965 
966  if (hStat.u.usageData_v0.data.numHostBufferUsed > 0) {
967  stream_config[instance_cnt].is_active = true;
968  }
969  instance_cnt++;
970  }
971  }
972 
973  /* Close the statistics stream */
974  if ((status = NT_StatClose(hstat_stream)) != NT_SUCCESS) {
976  return -1;
977  }
978 
979  if ((status = NT_InfoClose(info_stream)) != NT_SUCCESS) {
981  return -1;
982  }
983 
984  return instance_cnt;
985 }
986 
987 static void *NapatechBufMonitorLoop(void *arg)
988 {
989  ThreadVars *tv = (ThreadVars *) arg;
990 
991  NtInfo_t hStreamInfo;
992  NtStatistics_t hStat; // Stat handle.
993  NtInfoStream_t hInfo;
994  NtStatStream_t hstat_stream;
995  int status; // Status variable
996 
997  const uint32_t alertInterval = 25;
998 
999 #ifndef NAPATECH_ENABLE_BYPASS
1000  uint32_t OB_fill_level[MAX_STREAMS] = {0};
1001  uint32_t OB_alert_level[MAX_STREAMS] = {0};
1002  uint32_t ave_OB_fill_level[MAX_STREAMS] = {0};
1003 #endif /* NAPATECH_ENABLE_BYPASS */
1004 
1005  uint32_t HB_fill_level[MAX_STREAMS] = {0};
1006  uint32_t HB_alert_level[MAX_STREAMS] = {0};
1007  uint32_t ave_HB_fill_level[MAX_STREAMS] = {0};
1008 
1009  /* Open the info and Statistics */
1010  if ((status = NT_InfoOpen(&hInfo, "InfoStream")) != NT_SUCCESS) {
1012  exit(EXIT_FAILURE);
1013  }
1014 
1015  if ((status = NT_StatOpen(&hstat_stream, "StatsStream")) != NT_SUCCESS) {
1017  exit(EXIT_FAILURE);
1018  }
1019 
1020  /* Read the info on all streams instantiated in the system */
1021  hStreamInfo.cmd = NT_INFO_CMD_READ_STREAM;
1022  if ((status = NT_InfoRead(hInfo, &hStreamInfo)) != NT_SUCCESS) {
1024  exit(EXIT_FAILURE);
1025  }
1026 
1027  NapatechStreamConfig registered_streams[MAX_STREAMS];
1028  int num_registered = NapatechGetStreamConfig(registered_streams);
1029  if (num_registered == -1) {
1030  exit(EXIT_FAILURE);
1031  }
1032 
1034  while (1) {
1035  if (TmThreadsCheckFlag(tv, THV_KILL)) {
1036  SCLogDebug("NapatechBufMonitorLoop THV_KILL detected");
1037  break;
1038  }
1039 
1040  usleep(200000);
1041 
1042  /* Read the info on all streams instantiated in the system */
1043  hStreamInfo.cmd = NT_INFO_CMD_READ_STREAM;
1044  if ((status = NT_InfoRead(hInfo, &hStreamInfo)) != NT_SUCCESS) {
1046  exit(EXIT_FAILURE);
1047  }
1048 
1049  char pktCntStr[4096];
1050  memset(pktCntStr, 0, sizeof (pktCntStr));
1051 
1052  uint32_t stream_id = 0;
1053  uint32_t stream_cnt = 0;
1054  uint32_t num_streams = hStreamInfo.u.stream.data.count;
1055 
1056  for (stream_cnt = 0; stream_cnt < num_streams; ++stream_cnt) {
1057 
1058  do {
1059 
1060  /* Read usage data for the chosen stream ID */
1061  hStat.cmd = NT_STATISTICS_READ_CMD_USAGE_DATA_V0;
1062  hStat.u.usageData_v0.streamid = (uint8_t) stream_id;
1063 
1064  if ((status = NT_StatRead(hstat_stream, &hStat)) != NT_SUCCESS) {
1066  exit(EXIT_FAILURE);
1067  }
1068 
1069  if (hStat.u.usageData_v0.data.numHostBufferUsed == 0) {
1070  ++stream_id;
1071  continue;
1072  }
1073  } while (hStat.u.usageData_v0.data.numHostBufferUsed == 0);
1074 
1075  if (RegisteredStream(stream_id, num_registered, registered_streams)) {
1076 
1077 #ifndef NAPATECH_ENABLE_BYPASS
1078  ave_OB_fill_level[stream_id] = 0;
1079 #endif /* NAPATECH_ENABLE_BYPASS */
1080 
1081  ave_HB_fill_level[stream_id] = 0;
1082 
1083  for (uint32_t hb_count = 0; hb_count < hStat.u.usageData_v0.data.numHostBufferUsed; hb_count++) {
1084 
1085 #ifndef NAPATECH_ENABLE_BYPASS
1086  OB_fill_level[hb_count] =
1087  ((100 * hStat.u.usageData_v0.data.hb[hb_count].onboardBuffering.used) /
1088  hStat.u.usageData_v0.data.hb[hb_count].onboardBuffering.size);
1089 
1090  if (OB_fill_level[hb_count] > 100) {
1091  OB_fill_level[hb_count] = 100;
1092  }
1093 #endif /* NAPATECH_ENABLE_BYPASS */
1094  uint32_t bufSize = hStat.u.usageData_v0.data.hb[hb_count].enQueuedAdapter / 1024
1095  + hStat.u.usageData_v0.data.hb[hb_count].deQueued / 1024
1096  + hStat.u.usageData_v0.data.hb[hb_count].enQueued / 1024
1097  - HB_HIGHWATER;
1098 
1099  HB_fill_level[hb_count] = (uint32_t)
1100  ((100 * hStat.u.usageData_v0.data.hb[hb_count].deQueued / 1024) /
1101  bufSize);
1102 
1103 #ifndef NAPATECH_ENABLE_BYPASS
1104  ave_OB_fill_level[stream_id] += OB_fill_level[hb_count];
1105 #endif /* NAPATECH_ENABLE_BYPASS */
1106 
1107  ave_HB_fill_level[stream_id] += HB_fill_level[hb_count];
1108  }
1109 
1110 #ifndef NAPATECH_ENABLE_BYPASS
1111  ave_OB_fill_level[stream_id] /= hStat.u.usageData_v0.data.numHostBufferUsed;
1112 #endif /* NAPATECH_ENABLE_BYPASS */
1113 
1114  ave_HB_fill_level[stream_id] /= hStat.u.usageData_v0.data.numHostBufferUsed;
1115 
1116  /* Host Buffer Fill Level warnings... */
1117  if (ave_HB_fill_level[stream_id] >= (HB_alert_level[stream_id] + alertInterval)) {
1118 
1119  while (ave_HB_fill_level[stream_id] >= HB_alert_level[stream_id] + alertInterval) {
1120  HB_alert_level[stream_id] += alertInterval;
1121  }
1122  SCLogPerf("nt%d - Increasing Host Buffer Fill Level : %4d%%",
1123  stream_id, ave_HB_fill_level[stream_id] - 1);
1124  }
1125 
1126  if (HB_alert_level[stream_id] > 0) {
1127  if ((ave_HB_fill_level[stream_id] <= (HB_alert_level[stream_id] - alertInterval))) {
1128  SCLogPerf("nt%d - Decreasing Host Buffer Fill Level: %4d%%",
1129  stream_id, ave_HB_fill_level[stream_id]);
1130 
1131  while (ave_HB_fill_level[stream_id] <= (HB_alert_level[stream_id] - alertInterval)) {
1132  if ((HB_alert_level[stream_id]) > 0) {
1133  HB_alert_level[stream_id] -= alertInterval;
1134  } else break;
1135  }
1136  }
1137  }
1138 
1139 #ifndef NAPATECH_ENABLE_BYPASS
1140  /* On Board SDRAM Fill Level warnings... */
1141  if (ave_OB_fill_level[stream_id] >= (OB_alert_level[stream_id] + alertInterval)) {
1142  while (ave_OB_fill_level[stream_id] >= OB_alert_level[stream_id] + alertInterval) {
1143  OB_alert_level[stream_id] += alertInterval;
1144 
1145  }
1146  SCLogPerf("nt%d - Increasing Adapter SDRAM Fill Level: %4d%%",
1147  stream_id, ave_OB_fill_level[stream_id]);
1148  }
1149 
1150  if (OB_alert_level[stream_id] > 0) {
1151  if ((ave_OB_fill_level[stream_id] <= (OB_alert_level[stream_id] - alertInterval))) {
1152  SCLogPerf("nt%d - Decreasing Adapter SDRAM Fill Level : %4d%%",
1153  stream_id, ave_OB_fill_level[stream_id]);
1154 
1155  while (ave_OB_fill_level[stream_id] <= (OB_alert_level[stream_id] - alertInterval)) {
1156  if ((OB_alert_level[stream_id]) > 0) {
1157  OB_alert_level[stream_id] -= alertInterval;
1158  } else break;
1159  }
1160  }
1161  }
1162 #endif /* NAPATECH_ENABLE_BYPASS */
1163  }
1164  ++stream_id;
1165  }
1166  }
1167 
1168  if ((status = NT_InfoClose(hInfo)) != NT_SUCCESS) {
1170  exit(EXIT_FAILURE);
1171  }
1172 
1173  /* Close the statistics stream */
1174  if ((status = NT_StatClose(hstat_stream)) != NT_SUCCESS) {
1176  exit(EXIT_FAILURE);
1177  }
1178 
1179  SCLogDebug("Exiting NapatechStatsLoop");
1183 
1184  return NULL;
1185 }
1186 
1187 
1189 {
1190  /* Creates the Statistic threads */
1191  ThreadVars *stats_tv = TmThreadCreate("NapatechStats",
1192  NULL, NULL,
1193  NULL, NULL,
1194  "custom", NapatechStatsLoop, 0);
1195 
1196  if (stats_tv == NULL) {
1198  "Error creating a thread for NapatechStats - Killing engine.");
1199  }
1200 
1201  if (TmThreadSpawn(stats_tv) != 0) {
1203  "Failed to spawn thread for NapatechStats - Killing engine.");
1204  }
1205 
1206 #ifdef NAPATECH_ENABLE_BYPASS
1207  if (bypass_supported) {
1208  SCLogInfo("Napatech bypass functionality enabled.");
1209  }
1210 #endif /* NAPATECH_ENABLE_BYPASS */
1211 
1212  ThreadVars *buf_monitor_tv = TmThreadCreate("NapatechBufMonitor",
1213  NULL, NULL,
1214  NULL, NULL,
1215  "custom", NapatechBufMonitorLoop, 0);
1216 
1217  if (buf_monitor_tv == NULL) {
1219  "Error creating a thread for NapatechBufMonitor - Killing engine.");
1220  }
1221 
1222  if (TmThreadSpawn(buf_monitor_tv) != 0) {
1224  "Failed to spawn thread for NapatechBufMonitor - Killing engine.");
1225  }
1226 
1227 
1228  return;
1229 }
1230 
1231 bool NapatechSetupNuma(uint32_t stream, uint32_t numa)
1232 {
1233  uint32_t status = 0;
1234  static NtConfigStream_t hconfig;
1235 
1236  char ntpl_cmd[64];
1237  snprintf(ntpl_cmd, 64, "setup[numanode=%d] = streamid == %d", numa, stream);
1238 
1239  NtNtplInfo_t ntpl_info;
1240 
1241  if ((status = NT_ConfigOpen(&hconfig, "ConfigStream")) != NT_SUCCESS) {
1243  return false;
1244  }
1245 
1246  if ((status = NT_NTPL(hconfig, ntpl_cmd, &ntpl_info, NT_NTPL_PARSER_VALIDATE_NORMAL)) == NT_SUCCESS) {
1247  status = ntpl_info.ntplId;
1248 
1249  } else {
1250  NAPATECH_NTPL_ERROR(ntpl_cmd, ntpl_info, status);
1251  return false;
1252  }
1253 
1254  return status;
1255 }
1256 
1257 static uint32_t NapatechSetHashmode(void)
1258 {
1259  uint32_t status = 0;
1260  const char *hash_mode;
1261  static NtConfigStream_t hconfig;
1262  char ntpl_cmd[64];
1263  NtNtplInfo_t ntpl_info;
1264 
1265  uint32_t filter_id = 0;
1266 
1267  /* Get the hashmode from the conf file. */
1268  ConfGetValue("napatech.hashmode", &hash_mode);
1269 
1270  snprintf(ntpl_cmd, 64, "hashmode = %s", hash_mode);
1271 
1272  /* Issue the NTPL command */
1273  if ((status = NT_ConfigOpen(&hconfig, "ConfigStream")) != NT_SUCCESS) {
1275  return false;
1276  }
1277 
1278  if ((status = NT_NTPL(hconfig, ntpl_cmd, &ntpl_info,
1279  NT_NTPL_PARSER_VALIDATE_NORMAL)) == NT_SUCCESS) {
1280  filter_id = ntpl_info.ntplId;
1281  SCLogConfig("Napatech hashmode: %s ID: %d", hash_mode, status);
1282  } else {
1283  NAPATECH_NTPL_ERROR(ntpl_cmd, ntpl_info, status);
1284  status = 0;
1285  }
1286 
1287  return filter_id;
1288 }
1289 
1290 static uint32_t GetStreamNUMAs(uint32_t stream_id, int stream_numas[])
1291 {
1292  NtStatistics_t hStat; // Stat handle.
1293  NtStatStream_t hstat_stream;
1294  int status; // Status variable
1295 
1296  for (int i = 0; i < MAX_HOSTBUFFERS; ++i)
1297  stream_numas[i] = -1;
1298 
1299  if ((status = NT_StatOpen(&hstat_stream, "StatsStream")) != NT_SUCCESS) {
1301  exit(EXIT_FAILURE);
1302  }
1303 
1304  char pktCntStr[4096];
1305  memset(pktCntStr, 0, sizeof (pktCntStr));
1306 
1307 
1308  /* Read usage data for the chosen stream ID */
1309  hStat.cmd = NT_STATISTICS_READ_CMD_USAGE_DATA_V0;
1310  hStat.u.usageData_v0.streamid = (uint8_t) stream_id;
1311 
1312  if ((status = NT_StatRead(hstat_stream, &hStat)) != NT_SUCCESS) {
1314  exit(EXIT_FAILURE);
1315  }
1316 
1317  for (uint32_t hb_id = 0; hb_id < hStat.u.usageData_v0.data.numHostBufferUsed; ++hb_id) {
1318  stream_numas[hb_id] = hStat.u.usageData_v0.data.hb[hb_id].numaNode;
1319  }
1320 
1321  return hStat.u.usageData_v0.data.numHostBufferUsed;
1322 }
1323 
1324 static int NapatechSetFilter(NtConfigStream_t hconfig, char *ntpl_cmd)
1325 {
1326  int status = 0;
1327  int local_filter_id = 0;
1328 
1329  NtNtplInfo_t ntpl_info;
1330  if ((status = NT_NTPL(hconfig, ntpl_cmd, &ntpl_info,
1331  NT_NTPL_PARSER_VALIDATE_NORMAL)) == NT_SUCCESS) {
1332  SCLogConfig("NTPL filter assignment \"%s\" returned filter id %4d",
1333  ntpl_cmd, local_filter_id);
1334  } else {
1335  NAPATECH_NTPL_ERROR(ntpl_cmd, ntpl_info, status);
1336  exit(EXIT_FAILURE);
1337  }
1338 
1339  return local_filter_id;
1340 }
1341 
1343 {
1344  uint32_t status = 0;
1345  static NtConfigStream_t hconfig;
1346  char ntpl_cmd[64];
1347  NtNtplInfo_t ntpl_info;
1348 
1349  if ((status = NT_ConfigOpen(&hconfig, "ConfigStream")) != NT_SUCCESS) {
1351  exit(EXIT_FAILURE);
1352  }
1353 
1354  snprintf(ntpl_cmd, 64, "delete = all");
1355  if ((status = NT_NTPL(hconfig, ntpl_cmd, &ntpl_info,
1356  NT_NTPL_PARSER_VALIDATE_NORMAL)) == NT_SUCCESS) {
1357  status = ntpl_info.ntplId;
1358  } else {
1359  NAPATECH_NTPL_ERROR(ntpl_cmd, ntpl_info, status);
1360  status = 0;
1361  }
1362 
1363  NT_ConfigClose(hconfig);
1364 
1365  return status;
1366 }
1367 
1368 
1369 uint32_t NapatechSetupTraffic(uint32_t first_stream, uint32_t last_stream)
1370 {
1371 #define PORTS_SPEC_SIZE 64
1372 
1373  struct ports_spec_s {
1374  uint8_t first[MAX_PORTS];
1375  uint8_t second[MAX_PORTS];
1376  bool all;
1377  char str[PORTS_SPEC_SIZE];
1378  } ports_spec;
1379 
1380  ports_spec.all = false;
1381 
1382  ConfNode *ntports;
1383  int iteration = 0;
1384  int status = 0;
1385  NtConfigStream_t hconfig;
1386  char ntpl_cmd[512];
1387  int is_inline = 0;
1388  int is_span_port[MAX_PORTS] = { 0 };
1389 
1390  char span_ports[128];
1391  memset(span_ports, 0, sizeof(span_ports));
1392 
1393  if (ConfGetBool("napatech.inline", &is_inline) == 0) {
1394  is_inline = 0;
1395  }
1396 
1397  NapatechSetHashmode();
1398 
1399  if ((status = NT_ConfigOpen(&hconfig, "ConfigStream")) != NT_SUCCESS) {
1401  exit(EXIT_FAILURE);
1402  }
1403 
1404  if (first_stream == last_stream) {
1405  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1406  "Setup[state=inactive] = StreamId == %d",
1407  first_stream);
1408  } else {
1409  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1410  "Setup[state=inactive] = StreamId == (%d..%d)",
1411  first_stream, last_stream);
1412  }
1413  NapatechSetFilter(hconfig, ntpl_cmd);
1414 
1415 #ifdef NAPATECH_ENABLE_BYPASS
1416  if (NapatechUseHWBypass()) {
1417  SCLogInfo("Napatech Hardware Bypass enabled.");
1418  } else {
1419  SCLogInfo("Napatech Hardware Bypass available but disabled.");
1420  }
1421 #else
1422  if (NapatechUseHWBypass()) {
1423  SCLogInfo("Napatech Hardware Bypass requested in conf but is not available.");
1424  exit(EXIT_FAILURE);
1425  } else {
1426  SCLogInfo("Napatech Hardware Bypass disabled.");
1427  }
1428 
1429  if (is_inline) {
1431  "Napatech inline mode not supported. (Only available when Hardware Bypass support is enabled.)");
1432  }
1433 #endif
1434 
1435  if (is_inline) {
1436  SCLogInfo("Napatech configured for inline mode.");
1437  } else {
1438 
1439  SCLogInfo("Napatech configured for passive (non-inline) mode.");
1440  }
1441 
1442  /* When not using the default streams we need to parse
1443  * the array of streams from the conf
1444  */
1445  if ((ntports = ConfGetNode("napatech.ports")) == NULL) {
1446  FatalError(SC_ERR_FATAL, "Failed retrieving napatech.ports from Conf");
1447  }
1448 
1449  /* Loop through all ports in the array */
1450  ConfNode *port;
1451  enum CONFIG_SPECIFIER stream_spec = CONFIG_SPECIFIER_UNDEFINED;
1452 
1453  if (NapatechUseHWBypass()) {
1454  SCLogInfo("Listening on the following Napatech ports:");
1455  }
1456  /* Build the NTPL command using values in the config file. */
1457  TAILQ_FOREACH(port, &ntports->head, next)
1458  {
1459  if (port == NULL) {
1461  "Couldn't Parse Port Configuration");
1462  }
1463 
1464  if (NapatechUseHWBypass()) {
1465 #ifdef NAPATECH_ENABLE_BYPASS
1466  if (strchr(port->val, '-')) {
1467  stream_spec = CONFIG_SPECIFIER_RANGE;
1468 
1469  char copystr[16];
1470  strlcpy(copystr, port->val, sizeof(copystr));
1471  ByteExtractStringUint8(&ports_spec.first[iteration], 10, 0, copystr);
1472  ByteExtractStringUint8(&ports_spec.second[iteration], 10, 0, strchr(copystr, '-')+1);
1473 
1474  if (ports_spec.first[iteration] == ports_spec.second[iteration]) {
1475  if (is_inline) {
1477  "Error with napatec.ports in conf file. When running in inline mode the two ports specifying a segment must be different.");
1478  } else {
1479  /* SPAN port configuration */
1480  is_span_port[ports_spec.first[iteration]] = 1;
1481 
1482  if (strlen(span_ports) == 0) {
1483  snprintf(span_ports, sizeof (span_ports), "%d", ports_spec.first[iteration]);
1484  } else {
1485  char temp[16];
1486  snprintf(temp, sizeof(temp), ",%d", ports_spec.first[iteration]);
1487  strlcat(span_ports, temp, sizeof(span_ports));
1488  }
1489 
1490  }
1491  }
1492 
1493  if (NapatechGetAdapter(ports_spec.first[iteration]) != NapatechGetAdapter(ports_spec.first[iteration])) {
1495  "Invalid napatech.ports specification in conf file.");
1497  "Two ports on a segment must reside on the same adapter. port %d is on adapter %d, port %d is on adapter %d.",
1498  ports_spec.first[iteration],
1499  NapatechGetAdapter(ports_spec.first[iteration]),
1500  ports_spec.second[iteration],
1501  NapatechGetAdapter(ports_spec.second[iteration])
1502  );
1503  exit(EXIT_FAILURE);
1504  }
1505 
1506  NapatechSetPortmap(ports_spec.first[iteration], ports_spec.second[iteration]);
1507  if (ports_spec.first[iteration] == ports_spec.second[iteration]) {
1508  SCLogInfo(" span_port: %d", ports_spec.first[iteration]);
1509  } else {
1510  SCLogInfo(" %s: %d - %d", is_inline ? "inline_ports" : "tap_ports", ports_spec.first[iteration], ports_spec.second[iteration]);
1511  }
1512 
1513  if (iteration == 0) {
1514  if (ports_spec.first[iteration] == ports_spec.second[iteration]) {
1515  snprintf(ports_spec.str, sizeof (ports_spec.str), "%d", ports_spec.first[iteration]);
1516  } else {
1517  snprintf(ports_spec.str, sizeof (ports_spec.str), "%d,%d", ports_spec.first[iteration], ports_spec.second[iteration]);
1518  }
1519  } else {
1520  char temp[16];
1521  if (ports_spec.first[iteration] == ports_spec.second[iteration]) {
1522  snprintf(temp, sizeof(temp), ",%d", ports_spec.first[iteration]);
1523  } else {
1524  snprintf(temp, sizeof(temp), ",%d,%d", ports_spec.first[iteration], ports_spec.second[iteration]);
1525  }
1526  strlcat(ports_spec.str, temp, sizeof(ports_spec.str));
1527  }
1528  } else {
1530  "When using hardware flow bypass ports must be specified as segments. E.g. ports: [0-1, 0-2]");
1531  }
1532 #endif
1533  } else { // !NapatechUseHWBypass()
1534  if (strncmp(port->val, "all", 3) == 0) {
1535  /* check that the sting in the config file is correctly specified */
1536  if (stream_spec != CONFIG_SPECIFIER_UNDEFINED) {
1538  "Only one Napatech port specifier type is allowed.");
1539  }
1540  stream_spec = CONFIG_SPECIFIER_RANGE;
1541 
1542  ports_spec.all = true;
1543  snprintf(ports_spec.str, sizeof (ports_spec.str), "all");
1544  } else if (strchr(port->val, '-')) {
1545  /* check that the sting in the config file is correctly specified */
1546  if (stream_spec != CONFIG_SPECIFIER_UNDEFINED) {
1548  "Only one Napatech port specifier is allowed when hardware bypass is disabled. (E.g. ports: [0-4], NOT ports: [0-1,2-3])");
1549  }
1550  stream_spec = CONFIG_SPECIFIER_RANGE;
1551 
1552  char copystr[16];
1553  strlcpy(copystr, port->val, sizeof (copystr));
1554  ByteExtractStringUint8(&ports_spec.first[iteration], 10, 0, copystr);
1555  ByteExtractStringUint8(&ports_spec.second[iteration], 10, 0, strchr(copystr, '-') + 1);
1556  snprintf(ports_spec.str, sizeof (ports_spec.str), "(%d..%d)", ports_spec.first[iteration], ports_spec.second[iteration]);
1557  } else {
1558  /* check that the sting in the config file is correctly specified */
1559  if (stream_spec == CONFIG_SPECIFIER_RANGE) {
1561  "Napatech port range specifiers cannot be combined with individual stream specifiers.");
1562  }
1563  stream_spec = CONFIG_SPECIFIER_INDIVIDUAL;
1564 
1565  ByteExtractStringUint8(&ports_spec.first[iteration], 10, 0, port->val);
1566 
1567  /* Determine the ports to use on the NTPL assign statement*/
1568  if (iteration == 0) {
1569  snprintf(ports_spec.str, sizeof (ports_spec.str), "%s", port->val);
1570  } else {
1571  strlcat(ports_spec.str, ",", sizeof(ports_spec.str));
1572  strlcat(ports_spec.str, port->val, sizeof(ports_spec.str));
1573  }
1574  }
1575  } // if !NapatechUseHWBypass()
1576  ++iteration;
1577  } /* TAILQ_FOREACH */
1578 
1579 #ifdef NAPATECH_ENABLE_BYPASS
1580  if (bypass_supported) {
1581  if (is_inline) {
1582  char inline_setup_cmd[512];
1583  if (first_stream == last_stream) {
1584  snprintf(inline_setup_cmd, sizeof (ntpl_cmd),
1585  "Setup[TxDescriptor=Dyn;TxPorts=%s;RxCRC=False;TxPortPos=112;UseWL=True] = StreamId == %d",
1586  ports_spec.str, first_stream);
1587  } else {
1588  snprintf(inline_setup_cmd, sizeof (ntpl_cmd),
1589  "Setup[TxDescriptor=Dyn;TxPorts=%s;RxCRC=False;TxPortPos=112;UseWL=True] = StreamId == (%d..%d)",
1590  ports_spec.str, first_stream, last_stream);
1591  }
1592  NapatechSetFilter(hconfig, inline_setup_cmd);
1593  }
1594  /* Build the NTPL command */
1595  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1596  "assign[priority=3;streamid=(%d..%d);colormask=0x10000000;"
1597  "Descriptor=DYN3,length=24,colorbits=32,Offset0=Layer3Header[0],Offset1=Layer4Header[0]]= %s%s",
1598  first_stream, last_stream, ports_spec.all ? "" : "port==", ports_spec.str);
1599  NapatechSetFilter(hconfig, ntpl_cmd);
1600 
1601 
1602  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1603  "assign[priority=2;streamid=(%d..%d);colormask=0x11000000;"
1604  "Descriptor=DYN3,length=24,colorbits=32,Offset0=Layer3Header[0],Offset1=Layer4Header[0]"
1605  "]= %s%s and (Layer3Protocol==IPV4)",
1606  first_stream, last_stream, ports_spec.all ? "" : "port==", ports_spec.str);
1607  NapatechSetFilter(hconfig, ntpl_cmd);
1608 
1609 
1610  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1611  "assign[priority=2;streamid=(%d..%d);colormask=0x14000000;"
1612  "Descriptor=DYN3,length=24,colorbits=32,Offset0=Layer3Header[0],Offset1=Layer4Header[0]]= %s%s and (Layer3Protocol==IPV6)",
1613  first_stream, last_stream, ports_spec.all ? "" : "port==", ports_spec.str);
1614  NapatechSetFilter(hconfig, ntpl_cmd);
1615 
1616  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1617  "assign[priority=2;streamid=(%d..%d);colormask=0x10100000;"
1618  "Descriptor=DYN3,length=24,colorbits=32,Offset0=Layer3Header[0],Offset1=Layer4Header[0]]= %s%s and (Layer4Protocol==TCP)",
1619  first_stream, last_stream, ports_spec.all ? "" : "port==", ports_spec.str);
1620  NapatechSetFilter(hconfig, ntpl_cmd);
1621 
1622  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1623  "assign[priority=2;streamid=(%d..%d);colormask=0x10200000;"
1624  "Descriptor=DYN3,length=24,colorbits=32,Offset0=Layer3Header[0],Offset1=Layer4Header[0]"
1625  "]= %s%s and (Layer4Protocol==UDP)",
1626  first_stream, last_stream, ports_spec.all ? "" : "port==", ports_spec.str);
1627  NapatechSetFilter(hconfig, ntpl_cmd);
1628 
1629  if (strlen(span_ports) > 0) {
1630  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1631  "assign[priority=2;streamid=(%d..%d);colormask=0x00001000;"
1632  "Descriptor=DYN3,length=24,colorbits=32,Offset0=Layer3Header[0],Offset1=Layer4Header[0]"
1633  "]= port==%s",
1634  first_stream, last_stream, span_ports);
1635  NapatechSetFilter(hconfig, ntpl_cmd);
1636  }
1637 
1638  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1639  "KeyType[name=KT%u]={sw_32_32,sw_16_16}",
1640  NAPATECH_KEYTYPE_IPV4);
1641  NapatechSetFilter(hconfig, ntpl_cmd);
1642 
1643  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1644  "KeyDef[name=KDEF%u;KeyType=KT%u;ipprotocolfield=OUTER]=(Layer3Header[12]/32/32,Layer4Header[0]/16/16)",
1645  NAPATECH_KEYTYPE_IPV4, NAPATECH_KEYTYPE_IPV4);
1646  NapatechSetFilter(hconfig, ntpl_cmd);
1647 
1648  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1649  "KeyType[name=KT%u]={32,32,16,16}",
1650  NAPATECH_KEYTYPE_IPV4_SPAN);
1651  NapatechSetFilter(hconfig, ntpl_cmd);
1652 
1653  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1654  "KeyDef[name=KDEF%u;KeyType=KT%u;ipprotocolfield=OUTER;keysort=sorted]=(Layer3Header[12]/32,Layer3Header[16]/32,Layer4Header[0]/16,Layer4Header[2]/16)",
1655  NAPATECH_KEYTYPE_IPV4_SPAN, NAPATECH_KEYTYPE_IPV4_SPAN);
1656  NapatechSetFilter(hconfig, ntpl_cmd);
1657 
1658  /* IPv6 5tuple for inline and tap ports */
1659  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1660  "KeyType[name=KT%u]={sw_128_128,sw_16_16}",
1661  NAPATECH_KEYTYPE_IPV6);
1662  NapatechSetFilter(hconfig, ntpl_cmd);
1663 
1664  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1665  "KeyDef[name=KDEF%u;KeyType=KT%u;ipprotocolfield=OUTER]=(Layer3Header[8]/128/128,Layer4Header[0]/16/16)",
1666  NAPATECH_KEYTYPE_IPV6, NAPATECH_KEYTYPE_IPV6);
1667  NapatechSetFilter(hconfig, ntpl_cmd);
1668 
1669  /* IPv6 5tuple for SPAN Ports */
1670  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1671  "KeyType[name=KT%u]={128,128,16,16}",
1672  NAPATECH_KEYTYPE_IPV6_SPAN);
1673  NapatechSetFilter(hconfig, ntpl_cmd);
1674 
1675  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1676  "KeyDef[name=KDEF%u;KeyType=KT%u;ipprotocolfield=OUTER;keysort=sorted]=(Layer3Header[8]/128,Layer3Header[24]/128,Layer4Header[0]/16,Layer4Header[2]/16)",
1677  NAPATECH_KEYTYPE_IPV6_SPAN, NAPATECH_KEYTYPE_IPV6_SPAN);
1678  NapatechSetFilter(hconfig, ntpl_cmd);
1679 
1680 
1681  int pair;
1682  char ports_ntpl_a[64];
1683  char ports_ntpl_b[64];
1684  memset(ports_ntpl_a, 0, sizeof(ports_ntpl_a));
1685  memset(ports_ntpl_b, 0, sizeof(ports_ntpl_b));
1686 
1687  for (pair = 0; pair < iteration; ++pair) {
1688  char port_str[8];
1689 
1690  if (!is_span_port[ports_spec.first[pair]]) {
1691  snprintf(port_str, sizeof(port_str), "%s%u ", strlen(ports_ntpl_a) == 0 ? "" : ",", ports_spec.first[pair]);
1692  strlcat(ports_ntpl_a, port_str, sizeof(ports_ntpl_a));
1693 
1694  snprintf(port_str, sizeof(port_str), "%s%u ", strlen(ports_ntpl_b) == 0 ? "" : ",", ports_spec.second[pair]);
1695  strlcat(ports_ntpl_b, port_str, sizeof(ports_ntpl_b));
1696  }
1697  }
1698 
1699  if (strlen(ports_ntpl_a) > 0) {
1700  /* This is the assign for dropping upstream traffic */
1701  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1702  "assign[priority=1;streamid=drop;colormask=0x1]=(Layer3Protocol==IPV4)and(port == %s)and(Key(KDEF%u,KeyID=%u)==%u)",
1703  ports_ntpl_a,
1704  NAPATECH_KEYTYPE_IPV4,
1705  NAPATECH_KEYTYPE_IPV4,
1706  NAPATECH_FLOWTYPE_DROP);
1707  NapatechSetFilter(hconfig, ntpl_cmd);
1708  }
1709 
1710  if (strlen(ports_ntpl_b) > 0) {
1711  /* This is the assign for dropping downstream traffic */
1712  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1713  "assign[priority=1;streamid=drop;colormask=0x1]=(Layer3Protocol==IPV4)and(port == %s)and(Key(KDEF%u,KeyID=%u,fieldaction=swap)==%u)",
1714  ports_ntpl_b, //ports_spec.str,
1715  NAPATECH_KEYTYPE_IPV4,
1716  NAPATECH_KEYTYPE_IPV4,
1717  NAPATECH_FLOWTYPE_DROP);
1718  NapatechSetFilter(hconfig, ntpl_cmd);
1719  }
1720 
1721  if (strlen(span_ports) > 0) {
1722  /* This is the assign for dropping SPAN Port traffic */
1723  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1724  "assign[priority=1;streamid=drop;colormask=0x1]=(Layer3Protocol==IPV4)and(port == %s)and(Key(KDEF%u,KeyID=%u)==%u)",
1725  span_ports,
1726  NAPATECH_KEYTYPE_IPV4_SPAN,
1727  NAPATECH_KEYTYPE_IPV4_SPAN,
1728  NAPATECH_FLOWTYPE_DROP);
1729  NapatechSetFilter(hconfig, ntpl_cmd);
1730  }
1731 
1732  if (is_inline) {
1733  for (pair = 0; pair < iteration; ++pair) {
1734  /* This is the assignment for forwarding traffic */
1735  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1736  "assign[priority=1;streamid=drop;DestinationPort=%d;colormask=0x2]=(Layer3Protocol==IPV4)and(port == %d)and(Key(KDEF%u,KeyID=%u)==%u)",
1737  ports_spec.second[pair],
1738  ports_spec.first[pair],
1739  NAPATECH_KEYTYPE_IPV4,
1740  NAPATECH_KEYTYPE_IPV4,
1741  NAPATECH_FLOWTYPE_PASS);
1742  NapatechSetFilter(hconfig, ntpl_cmd);
1743 
1744  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1745  "assign[priority=1;streamid=drop;DestinationPort=%d;colormask=0x2]=(Layer3Protocol==IPV4)and(port == %d)and(Key(KDEF%u,KeyID=%u,fieldaction=swap)==%u)",
1746  ports_spec.first[pair],
1747  ports_spec.second[pair],
1748  NAPATECH_KEYTYPE_IPV4,
1749  NAPATECH_KEYTYPE_IPV4,
1750  NAPATECH_FLOWTYPE_PASS);
1751  NapatechSetFilter(hconfig, ntpl_cmd);
1752  }
1753  }
1754 
1755  if (strlen(ports_ntpl_a) > 0) {
1756  /* This is the assign for dropping upstream traffic */
1757  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1758  "assign[priority=1;streamid=drop;colormask=0x1]=(Layer3Protocol==IPV6)and(port == %s)and(Key(KDEF%u,KeyID=%u)==%u)",
1759  ports_ntpl_a,
1760  NAPATECH_KEYTYPE_IPV6,
1761  NAPATECH_KEYTYPE_IPV6,
1762  NAPATECH_FLOWTYPE_DROP);
1763  NapatechSetFilter(hconfig, ntpl_cmd);
1764  }
1765 
1766  if (strlen(ports_ntpl_b) > 0) {
1767  /* This is the assign for dropping downstream traffic */
1768  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1769  "assign[priority=1;streamid=drop;colormask=0x1]=(Layer3Protocol==IPV6)and(port == %s)and(Key(KDEF%u,KeyID=%u,fieldaction=swap)==%u)",
1770  ports_ntpl_b, //ports_spec.str,
1771  NAPATECH_KEYTYPE_IPV6,
1772  NAPATECH_KEYTYPE_IPV6,
1773  NAPATECH_FLOWTYPE_DROP);
1774  NapatechSetFilter(hconfig, ntpl_cmd);
1775  }
1776 
1777  if (strlen(span_ports) > 0) {
1778  /* This is the assign for dropping SPAN Port traffic */
1779  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1780  "assign[priority=1;streamid=drop;colormask=0x1]=(Layer3Protocol==IPV6)and(port == %s)and(Key(KDEF%u,KeyID=%u)==%u)",
1781  span_ports,
1782  NAPATECH_KEYTYPE_IPV6_SPAN,
1783  NAPATECH_KEYTYPE_IPV6_SPAN,
1784  NAPATECH_FLOWTYPE_DROP);
1785  NapatechSetFilter(hconfig, ntpl_cmd);
1786  }
1787 
1788  if (is_inline) {
1789  for (pair = 0; pair < iteration; ++pair) {
1790  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1791  "assign[priority=1;streamid=drop;DestinationPort=%d;colormask=0x4]=(Layer3Protocol==IPV6)and(port==%d)and(Key(KDEF%u,KeyID=%u)==%u)",
1792  ports_spec.second[pair],
1793  ports_spec.first[pair],
1794  NAPATECH_KEYTYPE_IPV6,
1795  NAPATECH_KEYTYPE_IPV6,
1796  NAPATECH_FLOWTYPE_PASS);
1797  NapatechSetFilter(hconfig, ntpl_cmd);
1798 
1799  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1800  "assign[priority=1;streamid=drop;DestinationPort=%d;colormask=0x4]=(Layer3Protocol==IPV6)and(port==%d)and(Key(KDEF%u,KeyID=%u,fieldaction=swap)==%u)",
1801  ports_spec.first[pair],
1802  ports_spec.second[pair],
1803  NAPATECH_KEYTYPE_IPV6,
1804  NAPATECH_KEYTYPE_IPV6,
1805  NAPATECH_FLOWTYPE_PASS);
1806  NapatechSetFilter(hconfig, ntpl_cmd);
1807  }
1808  }
1809  } else {
1810  if (is_inline) {
1812  "Napatech Inline operation not supported by this FPGA version.");
1813  }
1814 
1816  snprintf(ntpl_cmd, sizeof (ntpl_cmd), "assign[streamid=(%d..%d);colormask=0x0] = %s%s",
1817  first_stream, last_stream, ports_spec.all ? "" : "port==", ports_spec.str);
1818  NapatechSetFilter(hconfig, ntpl_cmd);
1819  }
1820  }
1821 
1822 #else /* NAPATECH_ENABLE_BYPASS */
1823  snprintf(ntpl_cmd, sizeof (ntpl_cmd), "assign[streamid=(%d..%d)] = %s%s",
1824  first_stream, last_stream, ports_spec.all ? "" : "port==", ports_spec.str);
1825  NapatechSetFilter(hconfig, ntpl_cmd);
1826 
1827 #endif /* !NAPATECH_ENABLE_BYPASS */
1828 
1829  SCLogConfig("Host-buffer NUMA assignments: ");
1830  int numa_nodes[MAX_HOSTBUFFERS];
1831  uint32_t stream_id;
1832  for (stream_id = first_stream; stream_id < last_stream; ++stream_id) {
1833  char temp1[256];
1834  char temp2[256];
1835 
1836  uint32_t num_host_buffers = GetStreamNUMAs(stream_id, numa_nodes);
1837 
1838  snprintf(temp1, 256, " stream %d: ", stream_id);
1839 
1840  for (uint32_t hb_id = 0; hb_id < num_host_buffers; ++hb_id) {
1841  snprintf(temp2, 256, "%d ", numa_nodes[hb_id]);
1842  strlcat(temp1, temp2, sizeof(temp1));
1843  }
1844 
1845  SCLogConfig("%s", temp1);
1846  }
1847 
1848  if (first_stream == last_stream) {
1849  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1850  "Setup[state=active] = StreamId == %d",
1851  first_stream);
1852  } else {
1853  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1854  "Setup[state=active] = StreamId == (%d..%d)",
1855  first_stream, last_stream);
1856  }
1857  NapatechSetFilter(hconfig, ntpl_cmd);
1858 
1859  NT_ConfigClose(hconfig);
1860 
1861  return status;
1862 }
1863 
1864 #endif // HAVE_NAPATECH
util-byte.h
PORTS_SPEC_SIZE
#define PORTS_SPEC_SIZE
tm-threads.h
CONFIG_SPECIFIER_UNDEFINED
@ CONFIG_SPECIFIER_UNDEFINED
Definition: util-napatech.c:200
TmThreadSpawn
TmEcode TmThreadSpawn(ThreadVars *tv)
Spawns a thread associated with the ThreadVars instance tv.
Definition: tm-threads.c:1708
StringParseUint16
int StringParseUint16(uint16_t *res, int base, uint16_t len, const char *str)
Definition: util-byte.c:336
NapatechGetCurrentStats
NapatechCurrentStats NapatechGetCurrentStats(uint16_t id)
Definition: util-napatech.c:193
SC_ERR_INVALID_VALUE
@ SC_ERR_INVALID_VALUE
Definition: util-error.h:160
CONFIG_SPECIFIER_RANGE
@ CONFIG_SPECIFIER_RANGE
Definition: util-napatech.c:201
SC_ERR_NAPATECH_INIT_FAILED
@ SC_ERR_NAPATECH_INIT_FAILED
Definition: util-error.h:251
ConfNode_::val
char * val
Definition: conf.h:34
ConfGetBool
int ConfGetBool(const char *name, int *val)
Retrieve a configuration value as an boolen.
Definition: conf.c:516
NapatechSetPortmap
int NapatechSetPortmap(int port, int peer)
unlikely
#define unlikely(expr)
Definition: util-optimize.h:35
SCLogDebug
#define SCLogDebug(...)
Definition: util-debug.h:298
TmThreadsSetFlag
void TmThreadsSetFlag(ThreadVars *tv, uint32_t flag)
Set a thread flag.
Definition: tm-threads.c:97
TmThreadWaitForFlag
void TmThreadWaitForFlag(ThreadVars *tv, uint32_t flags)
Waits till the specified flag(s) is(are) set. We don't bother if the kill flag has been set or not on...
Definition: tm-threads.c:1807
next
struct HtpBodyChunk_ * next
Definition: app-layer-htp.h:0
THV_DEINIT
#define THV_DEINIT
Definition: threadvars.h:46
ConfGetNode
ConfNode * ConfGetNode(const char *name)
Get a ConfNode by name.
Definition: conf.c:175
UtilCpuGetNumProcessorsConfigured
uint16_t UtilCpuGetNumProcessorsConfigured(void)
Get the number of cpus configured in the system.
Definition: util-cpu.c:59
StatsSetUI64
void StatsSetUI64(ThreadVars *tv, uint16_t id, uint64_t x)
Sets a value of type double to the local counter.
Definition: counters.c:191
NapatechDeleteFilters
uint32_t NapatechDeleteFilters(void)
Definition: util-napatech.c:1342
TAILQ_FOREACH
#define TAILQ_FOREACH(var, head, field)
Definition: queue.h:350
StatsSetupPrivate
int StatsSetupPrivate(ThreadVars *tv)
Definition: counters.c:1194
StatsSyncCountersIfSignalled
#define StatsSyncCountersIfSignalled(tv)
Definition: counters.h:137
NapatechIsAutoConfigEnabled
bool NapatechIsAutoConfigEnabled(void)
Definition: runmode-napatech.c:70
PacketCounters_::byte
uint16_t byte
Definition: util-napatech.c:185
SC_WARN_COMPATIBILITY
@ SC_WARN_COMPATIBILITY
Definition: util-error.h:193
stream_config
TcpStreamCnf stream_config
Definition: stream-tcp.c:119
SC_ERR_RUNMODE
@ SC_ERR_RUNMODE
Definition: util-error.h:219
current_stats
NapatechCurrentStats current_stats[MAX_STREAMS]
Definition: util-napatech.c:191
NapatechStreamConfig_
Definition: util-napatech.h:43
SC_ERR_NAPATECH_OPEN_FAILED
@ SC_ERR_NAPATECH_OPEN_FAILED
Definition: util-error.h:246
strlcpy
size_t strlcpy(char *dst, const char *src, size_t siz)
Definition: util-strlcpyu.c:43
THV_RUNNING_DONE
#define THV_RUNNING_DONE
Definition: threadvars.h:47
NapatechGetStreamConfig
int NapatechGetStreamConfig(NapatechStreamConfig stream_config[])
Reads and parses the stream configuration defined in the config file.
Definition: util-napatech.c:806
total_stats
NapatechCurrentStats total_stats
Definition: util-napatech.c:190
util-device.h
NapatechCurrentStats_::current_packets
uint64_t current_packets
Definition: util-napatech.h:51
strlcat
size_t strlcat(char *, const char *src, size_t siz)
Definition: util-strlcatu.c:45
util-cpu.h
SC_ERR_NAPATECH_CONFIG_STREAM
@ SC_ERR_NAPATECH_CONFIG_STREAM
Definition: util-error.h:252
source-napatech.h
ThreadVars_
Per thread variable structure.
Definition: threadvars.h:58
PacketCounters
struct PacketCounters_ PacketCounters
THV_KILL
#define THV_KILL
Definition: threadvars.h:41
CONFIG_SPECIFIER_INDIVIDUAL
@ CONFIG_SPECIFIER_INDIVIDUAL
Definition: util-napatech.c:202
TmThreadCreate
ThreadVars * TmThreadCreate(const char *name, const char *inq_name, const char *inqh_name, const char *outq_name, const char *outqh_name, const char *slots, void *(*fn_p)(void *), int mucond)
Creates and returns the TV instance for a new thread.
Definition: tm-threads.c:935
NAPATECH_NTPL_ERROR
#define NAPATECH_NTPL_ERROR(ntpl_cmd, ntpl_info, status)
Definition: util-napatech.h:71
NapatechGetAdapter
int NapatechGetAdapter(uint8_t port)
NAPATECH_ERROR
#define NAPATECH_ERROR(err_type, status)
Definition: util-napatech.h:65
util-napatech.h
PacketCounters_::pkts
uint16_t pkts
Definition: util-napatech.c:184
PacketCounters_::drop_pkts
uint16_t drop_pkts
Definition: util-napatech.c:186
SC_ERR_NAPATECH_PARSE_CONFIG
@ SC_ERR_NAPATECH_PARSE_CONFIG
Definition: util-error.h:255
NapatechCurrentStats_::current_bytes
uint64_t current_bytes
Definition: util-napatech.h:52
HB_HIGHWATER
#define HB_HIGHWATER
Definition: util-napatech.c:692
PacketCounters_::drop_byte
uint16_t drop_byte
Definition: util-napatech.c:187
SCLogInfo
#define SCLogInfo(...)
Macro used to log INFORMATIONAL messages.
Definition: util-debug.h:217
ConfNodeLookupChild
ConfNode * ConfNodeLookupChild(const ConfNode *node, const char *name)
Lookup a child configuration node by name.
Definition: conf.c:814
THV_INIT_DONE
#define THV_INIT_DONE
Definition: threadvars.h:38
CONFIG_SPECIFIER
CONFIG_SPECIFIER
Definition: util-napatech.c:199
suricata-common.h
SCLogPerf
#define SCLogPerf(...)
Definition: util-debug.h:224
SCLogError
#define SCLogError(err_code,...)
Macro used to log ERROR messages.
Definition: util-debug.h:257
NapatechStartStats
void NapatechStartStats(void)
Definition: util-napatech.c:1188
FatalError
#define FatalError(x,...)
Definition: util-debug.h:532
MAX_HOSTBUFFERS
#define MAX_HOSTBUFFERS
Definition: util-napatech.c:205
tv
ThreadVars * tv
Definition: fuzz_decodepcapfile.c:29
threadvars.h
NapatechUseHWBypass
bool NapatechUseHWBypass(void)
Definition: runmode-napatech.c:75
NapatechCurrentStats_::current_drop_bytes
uint64_t current_drop_bytes
Definition: util-napatech.h:54
SCLogConfig
struct SCLogConfig_ SCLogConfig
Holds the config state used by the logging api.
SC_ERR_NAPATECH_STREAMS_REGISTER_FAILED
@ SC_ERR_NAPATECH_STREAMS_REGISTER_FAILED
Definition: util-error.h:253
ByteExtractStringUint8
int ByteExtractStringUint8(uint8_t *res, int base, uint16_t len, const char *str)
Definition: util-byte.c:285
str
#define str(s)
Definition: suricata-common.h:273
SCLogWarning
#define SCLogWarning(err_code,...)
Macro used to log WARNING messages.
Definition: util-debug.h:244
MAX_PORTS
#define MAX_PORTS
Definition: util-napatech.h:59
ConfNode_
Definition: conf.h:32
SC_ERR_FATAL
@ SC_ERR_FATAL
Definition: util-error.h:203
StringParseUint8
int StringParseUint8(uint8_t *res, int base, uint16_t len, const char *str)
Definition: util-byte.c:359
NapatechCurrentStats_::current_drop_packets
uint64_t current_drop_packets
Definition: util-napatech.h:53
suricata.h
ConfGetValue
int ConfGetValue(const char *name, const char **vptr)
Retrieve the value of a configuration node.
Definition: conf.c:359
NapatechSetupTraffic
uint32_t NapatechSetupTraffic(uint32_t first_stream, uint32_t last_stream)
Definition: util-napatech.c:1369
MAX_STREAMS
#define MAX_STREAMS
Definition: util-napatech.c:691
NapatechCurrentStats_
Definition: util-napatech.h:50
PacketCounters_
Definition: util-napatech.c:183
TmThreadsCheckFlag
int TmThreadsCheckFlag(ThreadVars *tv, uint32_t flag)
Check if a thread flag is set.
Definition: tm-threads.c:89
StatsRegisterCounter
uint16_t StatsRegisterCounter(const char *name, struct ThreadVars_ *tv)
Registers a normal, unqualified counter.
Definition: counters.c:939
THV_CLOSED
#define THV_CLOSED
Definition: threadvars.h:43
SCCalloc
#define SCCalloc(nm, sz)
Definition: util-mem.h:53
NapatechSetupNuma
bool NapatechSetupNuma(uint32_t stream, uint32_t numa)
Definition: util-napatech.c:1231