suricata
util-napatech.c
Go to the documentation of this file.
1 /* Copyright (C) 2017 Open Information Security Foundation
2  *
3  * You can copy, redistribute or modify this Program under the terms of
4  * the GNU General Public License version 2 as published by the Free
5  * Software Foundation.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10  * GNU General Public License for more details.
11  *
12  * You should have received a copy of the GNU General Public License
13  * version 2 along with this program; if not, write to the Free Software
14  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
15  * 02110-1301, USA.
16  */
17 /**
18  * \file
19  *
20  * \author Napatech Inc.
21  * \author Phil Young <py@napatech.com>
22  *
23  *
24  */
25 #include "suricata-common.h"
26 
27 #ifdef HAVE_NAPATECH
28 #include "suricata.h"
29 #include "util-device.h"
30 #include "util-cpu.h"
31 #include "util-byte.h"
32 #include "threadvars.h"
33 #include "tm-threads.h"
34 #include "util-napatech.h"
35 #include "source-napatech.h"
36 
37 #ifdef NAPATECH_ENABLE_BYPASS
38 
39 /*
40  * counters to track the number of flows programmed on
41  * the adapter.
42  */
43 typedef struct FlowStatsCounters_
44 {
45  uint16_t active_bypass_flows;
46  uint16_t total_bypass_flows;
47 } FlowStatsCounters;
48 
49 
50 static int bypass_supported;
51 int NapatechIsBypassSupported(void)
52 {
53  return bypass_supported;
54 }
55 
56 /**
57  * \brief Returns the number of Napatech Adapters in the system.
58  *
59  * \return count of the Napatech adapters present in the system.
60  */
61 int NapatechGetNumAdapters(void)
62 {
63  NtInfoStream_t hInfo;
64  NtInfo_t hInfoSys;
65  int status;
66  static int num_adapters = -1;
67 
68  if (num_adapters == -1) {
69  if ((status = NT_InfoOpen(&hInfo, "InfoStream")) != NT_SUCCESS) {
71  exit(EXIT_FAILURE);
72  }
73 
74  hInfoSys.cmd = NT_INFO_CMD_READ_SYSTEM;
75  if ((status = NT_InfoRead(hInfo, &hInfoSys)) != NT_SUCCESS) {
77  exit(EXIT_FAILURE);
78  }
79 
80  num_adapters = hInfoSys.u.system.data.numAdapters;
81 
82  NT_InfoClose(hInfo);
83  }
84 
85  return num_adapters;
86 }
87 
88 /**
89  * \brief Verifies that the Napatech adapters support bypass.
90  *
91  * Attempts to opens a FlowStream on each adapter present in the system.
92  * If successful then bypass is supported
93  *
94  * \return 1 if Bypass functionality is supported; zero otherwise.
95  */
96 int NapatechVerifyBypassSupport(void)
97 {
98  int status;
99  int adapter = 0;
100  int num_adapters = NapatechGetNumAdapters();
101  SCLogInfo("Found %d Napatech adapters.", num_adapters);
102  NtFlowStream_t hFlowStream;
103 
104  if (!NapatechUseHWBypass()) {
105  /* HW Bypass is disabled in the conf file */
106  return 0;
107  }
108 
109  for (adapter = 0; adapter < num_adapters; ++adapter) {
110  NtFlowAttr_t attr;
111  char flow_name[80];
112 
113  NT_FlowOpenAttrInit(&attr);
114  NT_FlowOpenAttrSetAdapterNo(&attr, adapter);
115 
116  snprintf(flow_name, sizeof(flow_name), "Flow stream %d", adapter );
117  SCLogInfo("Opening flow programming stream: %s\n", flow_name);
118  if ((status = NT_FlowOpen_Attr(&hFlowStream, flow_name, &attr)) != NT_SUCCESS) {
119  SCLogWarning(SC_WARN_COMPATIBILITY, "Napatech bypass functionality not supported by the FPGA version on adapter %d - disabling support.", adapter);
120  bypass_supported = 0;
121  return 0;
122  }
123  NT_FlowClose(hFlowStream);
124  }
125 
126  bypass_supported = 1;
127  return bypass_supported;
128 }
129 
130 
131 /**
132  * \brief Updates statistic counters for Napatech FlowStats
133  *
134  * \param tv Thread variable to ThreadVars
135  * \param hInfo Handle to the Napatech InfoStream.
136  * \param hstat_stream Handle to the Napatech Statistics Stream.
137  * \param flow_counters The flow counters statistics to update.
138  * \param clear_stats Indicates if statistics on the card should be reset to zero.
139  *
140  */
141 static void UpdateFlowStats(
142  ThreadVars *tv,
143  NtInfoStream_t hInfo,
144  NtStatStream_t hstat_stream,
145  FlowStatsCounters flow_counters,
146  int clear_stats
147  )
148 {
149  NtStatistics_t hStat;
150  int status;
151 
152  uint64_t programed = 0;
153  uint64_t removed = 0;
154  int adapter = 0;
155 
156  for (adapter = 0; adapter < NapatechGetNumAdapters(); ++adapter) {
157  hStat.cmd = NT_STATISTICS_READ_CMD_FLOW_V0;
158  hStat.u.flowData_v0.clear = clear_stats;
159  hStat.u.flowData_v0.adapterNo = adapter;
160  if ((status = NT_StatRead(hstat_stream, &hStat)) != NT_SUCCESS) {
162  exit(1);
163  }
164  programed = hStat.u.flowData_v0.learnDone;
165  removed = hStat.u.flowData_v0.unlearnDone
166  + hStat.u.flowData_v0.automaticUnlearnDone
167  + hStat.u.flowData_v0.timeoutUnlearnDone;
168  }
169 
170  StatsSetUI64(tv, flow_counters.active_bypass_flows, programed - removed);
171  StatsSetUI64(tv, flow_counters.total_bypass_flows, programed);
172 }
173 
174 #endif /* NAPATECH_ENABLE_BYPASS */
175 
176 
177 /*-----------------------------------------------------------------------------
178  *-----------------------------------------------------------------------------
179  * Statistics code
180  *-----------------------------------------------------------------------------
181  */
182 typedef struct PacketCounters_
183 {
184  uint16_t pkts;
185  uint16_t byte;
186  uint16_t drop_pkts;
187  uint16_t drop_byte;
189 
192 
194 {
195 
196  return current_stats[id];
197 }
198 
203 };
204 
205 #define MAX_HOSTBUFFERS 8
206 
207 /**
208  * \brief Test to see if any of the configured streams are active
209  *
210  * \param hInfo Handle to Napatech Info Stream.
211  * \param hStatsStream Handle to Napatech Statistics stream
212  * \param stream_config array of stream configuration structures
213  * \param num_inst
214  *
215  */
216 static uint16_t TestStreamConfig(
217  NtInfoStream_t hInfo,
218  NtStatStream_t hstat_stream,
220  uint16_t num_inst)
221 {
222  uint16_t num_active = 0;
223 
224  for (uint16_t inst = 0; inst < num_inst; ++inst) {
225  int status;
226  NtStatistics_t stat; // Stat handle.
227 
228  /* Check to see if it is an active stream */
229  memset(&stat, 0, sizeof (NtStatistics_t));
230 
231  /* Read usage data for the chosen stream ID */
232  stat.cmd = NT_STATISTICS_READ_CMD_USAGE_DATA_V0;
233  stat.u.usageData_v0.streamid = (uint8_t) stream_config[inst].stream_id;
234 
235  if ((status = NT_StatRead(hstat_stream, &stat)) != NT_SUCCESS) {
237  return 0;
238  }
239 
240  if (stat.u.usageData_v0.data.numHostBufferUsed > 0) {
241  stream_config[inst].is_active = true;
242  num_active++;
243  } else {
244  stream_config[inst].is_active = false;
245  }
246  }
247 
248  return num_active;
249 }
250 
251 /**
252  * \brief Updates Napatech packet counters
253  *
254  * \param tv Pointer to TheardVars structure
255  * \param hInfo Handle to Napatech Info Stream.
256  * \param hstat_stream Handle to Napatech Statistics stream
257  * \param num_streams the number of streams that are currently active
258  * \param stream_config array of stream configuration structures
259  * \param total_counters - cumulative count of all packets received.
260  * \param dispatch_host, - Count of packets that were delivered to the host buffer
261  * \param dispatch_drop - count of packets that were dropped as a result of a rule
262  * \param dispatch_fwd - count of packets forwarded out the egress port as the result of a rule
263  * \param is_inline - are we running in inline mode?
264  * \param enable_stream_stats - are per thread/stream statistics enabled.
265  * \param stream_counters - counters for each thread/stream configured.
266  *
267  * \return The number of active streams that were updated.
268  *
269  */
270 static uint32_t UpdateStreamStats(ThreadVars *tv,
271  NtInfoStream_t hInfo,
272  NtStatStream_t hstat_stream,
273  uint16_t num_streams,
275  PacketCounters total_counters,
276  PacketCounters dispatch_host,
277  PacketCounters dispatch_drop,
278  PacketCounters dispatch_fwd,
279  int is_inline,
280  int enable_stream_stats,
281  PacketCounters stream_counters[]
282  ) {
283  static uint64_t rxPktsStart[MAX_STREAMS] = {0};
284  static uint64_t rxByteStart[MAX_STREAMS] = {0};
285  static uint64_t dropPktStart[MAX_STREAMS] = {0};
286  static uint64_t dropByteStart[MAX_STREAMS] = {0};
287 
288  int status;
289  NtInfo_t hStreamInfo;
290  NtStatistics_t hStat; // Stat handle.
291 
292  /* Query the system to get the number of streams currently instantiated */
293  hStreamInfo.cmd = NT_INFO_CMD_READ_STREAM;
294  if ((status = NT_InfoRead(hInfo, &hStreamInfo)) != NT_SUCCESS) {
296  exit(EXIT_FAILURE);
297  }
298 
299  uint16_t num_active;
300  if ((num_active = TestStreamConfig(hInfo, hstat_stream, stream_config, num_streams)) == 0) {
301  /* None of the configured streams are active */
302  return 0;
303  }
304 
305  /* At least one stream is active so proceed with the stats. */
306  uint16_t inst_id = 0;
307  uint32_t stream_cnt = 0;
308  for (stream_cnt = 0; stream_cnt < num_streams; ++stream_cnt) {
309  while (inst_id < num_streams) {
310  if (stream_config[inst_id].is_active) {
311  break;
312  } else {
313  ++inst_id;
314  }
315  }
316  if (inst_id == num_streams)
317  break;
318 
319  /* Read usage data for the chosen stream ID */
320  memset(&hStat, 0, sizeof (NtStatistics_t));
321  hStat.cmd = NT_STATISTICS_READ_CMD_USAGE_DATA_V0;
322  hStat.u.usageData_v0.streamid = (uint8_t) stream_config[inst_id].stream_id;
323 
324  if ((status = NT_StatRead(hstat_stream, &hStat)) != NT_SUCCESS) {
326  return 0;
327  }
328 
329  uint16_t stream_id = stream_config[inst_id].stream_id;
330  if (stream_config[inst_id].is_active) {
331  uint64_t rx_pkts_total = 0;
332  uint64_t rx_byte_total = 0;
333  uint64_t drop_pkts_total = 0;
334  uint64_t drop_byte_total = 0;
335 
336  for (uint32_t hbCount = 0; hbCount < hStat.u.usageData_v0.data.numHostBufferUsed; hbCount++) {
337  if (unlikely(stream_config[inst_id].initialized == false)) {
338  rxPktsStart[stream_id] += hStat.u.usageData_v0.data.hb[hbCount].stat.rx.frames;
339  rxByteStart[stream_id] += hStat.u.usageData_v0.data.hb[hbCount].stat.rx.bytes;
340  dropPktStart[stream_id] += hStat.u.usageData_v0.data.hb[hbCount].stat.drop.frames;
341  dropByteStart[stream_id] += hStat.u.usageData_v0.data.hb[hbCount].stat.drop.bytes;
342  stream_config[inst_id].initialized = true;
343  } else {
344  rx_pkts_total += hStat.u.usageData_v0.data.hb[hbCount].stat.rx.frames;
345  rx_byte_total += hStat.u.usageData_v0.data.hb[hbCount].stat.rx.bytes;
346  drop_pkts_total += hStat.u.usageData_v0.data.hb[hbCount].stat.drop.frames;
347  drop_byte_total += hStat.u.usageData_v0.data.hb[hbCount].stat.drop.bytes;
348  }
349  }
350 
351  current_stats[stream_id].current_packets = rx_pkts_total - rxPktsStart[stream_id];
352  current_stats[stream_id].current_bytes = rx_byte_total - rxByteStart[stream_id];
353  current_stats[stream_id].current_drop_packets = drop_pkts_total - dropPktStart[stream_id];
354  current_stats[stream_id].current_drop_bytes = drop_byte_total - dropByteStart[stream_id];
355  }
356 
357  if (enable_stream_stats) {
358  StatsSetUI64(tv, stream_counters[inst_id].pkts, current_stats[stream_id].current_packets);
359  StatsSetUI64(tv, stream_counters[inst_id].byte, current_stats[stream_id].current_bytes);
360  StatsSetUI64(tv, stream_counters[inst_id].drop_pkts, current_stats[stream_id].current_drop_packets);
361  StatsSetUI64(tv, stream_counters[inst_id].drop_byte, current_stats[stream_id].current_drop_bytes);
362  }
363 
364  ++inst_id;
365  }
366 
367  uint32_t stream_id;
368  for (stream_id = 0; stream_id < num_streams; ++stream_id) {
369 
370 #ifndef NAPATECH_ENABLE_BYPASS
373 #endif /* NAPATECH_ENABLE_BYPASS */
376  }
377 
378 
379 #ifndef NAPATECH_ENABLE_BYPASS
380  StatsSetUI64(tv, total_counters.pkts, total_stats.current_packets);
381  StatsSetUI64(tv, total_counters.byte, total_stats.current_bytes);
382 #endif /* NAPATECH_ENABLE_BYPASS */
383 
386 
391 
392  /* Read usage data for the chosen stream ID */
393  memset(&hStat, 0, sizeof (NtStatistics_t));
394 
395 #ifdef NAPATECH_ENABLE_BYPASS
396  hStat.cmd = NT_STATISTICS_READ_CMD_QUERY_V3;
397  hStat.u.query_v3.clear = 0;
398 #else /* NAPATECH_ENABLE_BYPASS */
399  /* Older versions of the API have a different structure. */
400  hStat.cmd = NT_STATISTICS_READ_CMD_QUERY_V2;
401  hStat.u.query_v2.clear = 0;
402 #endif /* !NAPATECH_ENABLE_BYPASS */
403 
404  if ((status = NT_StatRead(hstat_stream, &hStat)) != NT_SUCCESS) {
405  if (status == NT_STATUS_TIMEOUT) {
406  SCLogInfo("Statistics timed out - will retry next time.");
407  return 0;
408  } else {
410  return 0;
411  }
412  }
413 
414 #ifdef NAPATECH_ENABLE_BYPASS
415 
416  int adapter = 0;
417  uint64_t total_dispatch_host_pkts = 0;
418  uint64_t total_dispatch_host_byte = 0;
419  uint64_t total_dispatch_drop_pkts = 0;
420  uint64_t total_dispatch_drop_byte = 0;
421  uint64_t total_dispatch_fwd_pkts = 0;
422  uint64_t total_dispatch_fwd_byte = 0;
423 
424  for (adapter = 0; adapter < NapatechGetNumAdapters(); ++adapter) {
425  total_dispatch_host_pkts += hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[0].pkts;
426  total_dispatch_host_byte += hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[0].octets;
427 
428  total_dispatch_drop_pkts += hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[1].pkts
429  + hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[3].pkts;
430  total_dispatch_drop_byte += hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[1].octets
431  + hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[3].octets;
432 
433  total_dispatch_fwd_pkts += hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[2].pkts
434  + hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[4].pkts;
435  total_dispatch_fwd_byte += hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[2].octets
436  + hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[4].octets;
437 
438  total_stats.current_packets += hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[0].pkts
439  + hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[1].pkts
440  + hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[2].pkts
441  + hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[3].pkts;
442 
443  total_stats.current_bytes = hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[0].octets
444  + hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[1].octets
445  + hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[2].octets;
446  }
447 
448  StatsSetUI64(tv, dispatch_host.pkts, total_dispatch_host_pkts);
449  StatsSetUI64(tv, dispatch_host.byte, total_dispatch_host_byte);
450 
451  StatsSetUI64(tv, dispatch_drop.pkts, total_dispatch_drop_pkts);
452  StatsSetUI64(tv, dispatch_drop.byte, total_dispatch_drop_byte);
453 
454  if (is_inline) {
455  StatsSetUI64(tv, dispatch_fwd.pkts, total_dispatch_fwd_pkts);
456  StatsSetUI64(tv, dispatch_fwd.byte, total_dispatch_fwd_byte);
457  }
458 
459  StatsSetUI64(tv, total_counters.pkts, total_stats.current_packets);
460  StatsSetUI64(tv, total_counters.byte, total_stats.current_bytes);
461 
462 #endif /* NAPATECH_ENABLE_BYPASS */
463 
464  return num_active;
465 }
466 
467 /**
468  * \brief Statistics processing loop
469  *
470  * Instantiated on the stats thread. Periodically retrieives
471  * statistics from the Napatech card and updates the packet counters
472  *
473  * \param arg Pointer that is caste into a TheardVars structure
474  */
475 static void *NapatechStatsLoop(void *arg)
476 {
477  ThreadVars *tv = (ThreadVars *) arg;
478 
479  int status;
480  NtInfoStream_t hInfo;
481  NtStatStream_t hstat_stream;
482  int is_inline = 0;
483  int enable_stream_stats = 0;
484  PacketCounters stream_counters[MAX_STREAMS];
485 
486  if (ConfGetBool("napatech.inline", &is_inline) == 0) {
487  is_inline = 0;
488  }
489 
490  if (ConfGetBool("napatech.enable-stream-stats", &enable_stream_stats) == 0) {
491  /* default is "no" */
492  enable_stream_stats = 0;
493  }
494 
496  uint16_t stream_cnt = NapatechGetStreamConfig(stream_config);
497 
498  /* Open the info and Statistics */
499  if ((status = NT_InfoOpen(&hInfo, "StatsLoopInfoStream")) != NT_SUCCESS) {
501  return NULL;
502  }
503 
504  if ((status = NT_StatOpen(&hstat_stream, "StatsLoopStatsStream")) != NT_SUCCESS) {
506  return NULL;
507  }
508 
509  NtStatistics_t hStat;
510  memset(&hStat, 0, sizeof (NtStatistics_t));
511 
512 #ifdef NAPATECH_ENABLE_BYPASS
513  hStat.cmd = NT_STATISTICS_READ_CMD_QUERY_V3;
514  hStat.u.query_v3.clear = 1;
515 #else /* NAPATECH_ENABLE_BYPASS */
516  hStat.cmd = NT_STATISTICS_READ_CMD_QUERY_V2;
517  hStat.u.query_v2.clear = 1;
518 #endif /* !NAPATECH_ENABLE_BYPASS */
519 
520  if ((status = NT_StatRead(hstat_stream, &hStat)) != NT_SUCCESS) {
522  return 0;
523  }
524 
525  PacketCounters total_counters;
526  memset(&total_counters, 0, sizeof(total_counters));
527 
528  PacketCounters dispatch_host;
529  memset(&dispatch_host, 0, sizeof(dispatch_host));
530 
531  PacketCounters dispatch_drop;
532  memset(&dispatch_drop, 0, sizeof(dispatch_drop));
533 
534  PacketCounters dispatch_fwd;
535  memset(&dispatch_fwd, 0, sizeof(dispatch_fwd));
536 
537  total_counters.pkts = StatsRegisterCounter("napa_total.pkts", tv);
538  dispatch_host.pkts = StatsRegisterCounter("napa_dispatch_host.pkts", tv);
539  dispatch_drop.pkts = StatsRegisterCounter("napa_dispatch_drop.pkts", tv);
540  if (is_inline) {
541  dispatch_fwd.pkts = StatsRegisterCounter("napa_dispatch_fwd.pkts", tv);
542  }
543 
544  total_counters.byte = StatsRegisterCounter("napa_total.byte", tv);
545  dispatch_host.byte = StatsRegisterCounter("napa_dispatch_host.byte", tv);
546  dispatch_drop.byte = StatsRegisterCounter("napa_dispatch_drop.byte", tv);
547  if (is_inline) {
548  dispatch_fwd.byte = StatsRegisterCounter("napa_dispatch_fwd.byte", tv);
549  }
550 
551  total_counters.drop_pkts = StatsRegisterCounter("napa_total.overflow_drop_pkts", tv);
552  total_counters.drop_byte = StatsRegisterCounter("napa_total.overflow_drop_byte", tv);
553 
554  if (enable_stream_stats) {
555  for (int i = 0; i < stream_cnt; ++i) {
556  char *pkts_buf = SCCalloc(1, 32);
557  if (unlikely(pkts_buf == NULL)) {
559  "Failed to allocate memory for NAPATECH stream counter.");
560  }
561 
562  snprintf(pkts_buf, 32, "napa%d.pkts", stream_config[i].stream_id);
563  stream_counters[i].pkts = StatsRegisterCounter(pkts_buf, tv);
564 
565  char *byte_buf = SCCalloc(1, 32);
566  if (unlikely(byte_buf == NULL)) {
568  "Failed to allocate memory for NAPATECH stream counter.");
569  }
570  snprintf(byte_buf, 32, "napa%d.bytes", stream_config[i].stream_id);
571  stream_counters[i].byte = StatsRegisterCounter(byte_buf, tv);
572 
573  char *drop_pkts_buf = SCCalloc(1, 32);
574  if (unlikely(drop_pkts_buf == NULL)) {
576  "Failed to allocate memory for NAPATECH stream counter.");
577  }
578  snprintf(drop_pkts_buf, 32, "napa%d.drop_pkts", stream_config[i].stream_id);
579  stream_counters[i].drop_pkts = StatsRegisterCounter(drop_pkts_buf, tv);
580 
581  char *drop_byte_buf = SCCalloc(1, 32);
582  if (unlikely(drop_byte_buf == NULL)) {
584  "Failed to allocate memory for NAPATECH stream counter.");
585  }
586  snprintf(drop_byte_buf, 32, "napa%d.drop_byte", stream_config[i].stream_id);
587  stream_counters[i].drop_byte = StatsRegisterCounter(drop_byte_buf, tv);
588  }
589  }
590 
591 #ifdef NAPATECH_ENABLE_BYPASS
592  FlowStatsCounters flow_counters;
593  if (bypass_supported) {
594  flow_counters.active_bypass_flows = StatsRegisterCounter("napa_bypass.active_flows", tv);
595  flow_counters.total_bypass_flows = StatsRegisterCounter("napa_bypass.total_flows", tv);
596  }
597 #endif /* NAPATECH_ENABLE_BYPASS */
598 
600 
601  StatsSetUI64(tv, total_counters.pkts, 0);
602  StatsSetUI64(tv, total_counters.byte, 0);
603  StatsSetUI64(tv, total_counters.drop_pkts, 0);
604  StatsSetUI64(tv, total_counters.drop_byte, 0);
605 
606 #ifdef NAPATECH_ENABLE_BYPASS
607  if (bypass_supported) {
608  StatsSetUI64(tv, dispatch_host.pkts, 0);
609  StatsSetUI64(tv, dispatch_drop.pkts, 0);
610 
611  if (is_inline) {
612  StatsSetUI64(tv, dispatch_fwd.pkts, 0);
613  }
614 
615  StatsSetUI64(tv, dispatch_host.byte, 0);
616  StatsSetUI64(tv, dispatch_drop.byte, 0);
617  if (is_inline) {
618  StatsSetUI64(tv, dispatch_fwd.byte, 0);
619  }
620 
621  if (enable_stream_stats) {
622  for (int i = 0; i < stream_cnt; ++i) {
623  StatsSetUI64(tv, stream_counters[i].pkts, 0);
624  StatsSetUI64(tv, stream_counters[i].byte, 0);
625  StatsSetUI64(tv, stream_counters[i].drop_pkts, 0);
626  StatsSetUI64(tv, stream_counters[i].drop_byte, 0);
627  }
628  }
629 
630  StatsSetUI64(tv, flow_counters.active_bypass_flows, 0);
631  StatsSetUI64(tv, flow_counters.total_bypass_flows, 0);
632  UpdateFlowStats(tv, hInfo, hstat_stream, flow_counters, 1);
633  }
634 #endif /* NAPATECH_ENABLE_BYPASS */
635 
636  uint32_t num_active = UpdateStreamStats(tv, hInfo, hstat_stream,
637  stream_cnt, stream_config, total_counters,
638  dispatch_host, dispatch_drop, dispatch_fwd,
639  is_inline, enable_stream_stats, stream_counters);
640 
641  if (!NapatechIsAutoConfigEnabled() && (num_active < stream_cnt)) {
642  SCLogInfo("num_active: %d, stream_cnt: %d", num_active, stream_cnt);
644  "Some or all of the configured streams are not created. Proceeding with active streams.");
645  }
646 
648  while (1) {
650  SCLogDebug("NapatechStatsLoop THV_KILL detected");
651  break;
652  }
653 
654  UpdateStreamStats(tv, hInfo, hstat_stream,
655  stream_cnt, stream_config, total_counters,
656  dispatch_host, dispatch_drop, dispatch_fwd,
657  is_inline, enable_stream_stats,
658  stream_counters);
659 
660 #ifdef NAPATECH_ENABLE_BYPASS
661  if (bypass_supported) {
662  UpdateFlowStats(tv, hInfo, hstat_stream, flow_counters, 0);
663  }
664 #endif /* NAPATECH_ENABLE_BYPASS */
665 
667  usleep(1000000);
668  }
669 
670  /* CLEAN UP NT Resources and Close the info stream */
671  if ((status = NT_InfoClose(hInfo)) != NT_SUCCESS) {
673  return NULL;
674  }
675 
676  /* Close the statistics stream */
677  if ((status = NT_StatClose(hstat_stream)) != NT_SUCCESS) {
679  return NULL;
680  }
681 
682  SCLogDebug("Exiting NapatechStatsLoop");
686 
687  return NULL;
688 }
689 
690 #define MAX_HOSTBUFFER 4
691 #define MAX_STREAMS 256
692 #define HB_HIGHWATER 2048 //1982
693 
694 /**
695  * \brief Tests whether a particular stream_id is actively registered
696  *
697  * \param stream_id - ID of the stream to look up
698  * \param num_registered - The total number of registered streams
699  * \param registered_streams - An array containing actively registered streams.
700  *
701  * \return Bool indicating is the specified stream is registered.
702  *
703  */
704 static bool RegisteredStream(uint16_t stream_id, uint16_t num_registered,
705  NapatechStreamConfig registered_streams[])
706 {
707  for (uint16_t reg_id = 0; reg_id < num_registered; ++reg_id) {
708  if (stream_id == registered_streams[reg_id].stream_id) {
709  return true;
710  }
711  }
712  return false;
713 }
714 
715 /**
716  * \brief Count the number of worker threads defined in the conf file.
717  *
718  * \return - The number of worker threads defined by the configuration
719  */
720 static uint32_t CountWorkerThreads(void)
721 {
722  int worker_count = 0;
723 
724  ConfNode *affinity;
725  ConfNode *root = ConfGetNode("threading.cpu-affinity");
726 
727  if (root != NULL) {
728 
729  TAILQ_FOREACH(affinity, &root->head, next)
730  {
731  if (strcmp(affinity->val, "decode-cpu-set") == 0 ||
732  strcmp(affinity->val, "stream-cpu-set") == 0 ||
733  strcmp(affinity->val, "reject-cpu-set") == 0 ||
734  strcmp(affinity->val, "output-cpu-set") == 0) {
735  continue;
736  }
737 
738  if (strcmp(affinity->val, "worker-cpu-set") == 0) {
739  ConfNode *node = ConfNodeLookupChild(affinity->head.tqh_first, "cpu");
740  ConfNode *lnode;
741 
743 
744  TAILQ_FOREACH(lnode, &node->head, next)
745  {
746  uint8_t start, end;
747  char *end_str;
748  if (strncmp(lnode->val, "all", 4) == 0) {
749  /* check that the sting in the config file is correctly specified */
750  if (cpu_spec != CONFIG_SPECIFIER_UNDEFINED) {
752  "Only one Napatech port specifier type allowed.");
753  }
754  cpu_spec = CONFIG_SPECIFIER_RANGE;
755  worker_count = UtilCpuGetNumProcessorsConfigured();
756  } else if ((end_str = strchr(lnode->val, '-'))) {
757  /* check that the sting in the config file is correctly specified */
758  if (cpu_spec != CONFIG_SPECIFIER_UNDEFINED) {
760  "Only one Napatech port specifier type allowed.");
761  }
762  cpu_spec = CONFIG_SPECIFIER_RANGE;
763 
764 
765  if (StringParseUint8(&start, 10, end_str - lnode->val, (const char *)lnode->val) < 0) {
766  FatalError(SC_ERR_INVALID_VALUE, "Napatech invalid"
767  " worker range start: '%s'", lnode->val);
768  }
769  if (StringParseUint8(&end, 10, 0, (const char *) (end_str + 1)) < 0) {
770  FatalError(SC_ERR_INVALID_VALUE, "Napatech invalid"
771  " worker range end: '%s'", (end_str != NULL) ? (const char *)(end_str + 1) : "Null");
772  }
773  if (end < start) {
774  FatalError(SC_ERR_INVALID_VALUE, "Napatech invalid"
775  " worker range start: '%d' is greater than end: '%d'", start, end);
776  }
777  worker_count = end - start + 1;
778 
779  } else {
780  /* check that the sting in the config file is correctly specified */
781  if (cpu_spec == CONFIG_SPECIFIER_RANGE) {
783  "Napatech port range specifiers cannot be combined with individual stream specifiers.");
784  }
785  cpu_spec = CONFIG_SPECIFIER_INDIVIDUAL;
786  ++worker_count;
787  }
788  }
789  break;
790  }
791  }
792  }
793  return worker_count;
794 }
795 
796 /**
797  * \brief Reads and parses the stream configuration defined in the config file.
798  *
799  * \param stream_config - array to be filled in with active stream info.
800  *
801  * \return the number of streams configured or -1 if an error occurred
802  *
803  */
805 {
806  int status;
807  char error_buffer[80]; // Error buffer
808  NtStatStream_t hstat_stream;
809  NtStatistics_t hStat; // Stat handle.
810  NtInfoStream_t info_stream;
811  NtInfo_t info;
812  uint16_t instance_cnt = 0;
813  int use_all_streams = 0;
814  int set_cpu_affinity = 0;
815  ConfNode *ntstreams;
816  uint16_t stream_id = 0;
817  uint8_t start = 0;
818  uint8_t end = 0;
819 
820  for (uint16_t i = 0; i < MAX_STREAMS; ++i) {
821  stream_config[i].stream_id = 0;
822  stream_config[i].is_active = false;
823  stream_config[i].initialized = false;
824  }
825 
826  if (ConfGetBool("napatech.use-all-streams", &use_all_streams) == 0) {
827  /* default is "no" */
828  use_all_streams = 0;
829  }
830 
831  if ((status = NT_InfoOpen(&info_stream, "SuricataStreamInfo")) != NT_SUCCESS) {
833  return -1;
834  }
835 
836  if ((status = NT_StatOpen(&hstat_stream, "StatsStream")) != NT_SUCCESS) {
838  return -1;
839  }
840 
841  if (use_all_streams) {
842  info.cmd = NT_INFO_CMD_READ_STREAM;
843  if ((status = NT_InfoRead(info_stream, &info)) != NT_SUCCESS) {
845  return -1;
846  }
847 
848  while (instance_cnt < info.u.stream.data.count) {
849 
850  /*
851  * For each stream ID query the number of host-buffers used by
852  * the stream. If zero, then that streamID is not used; skip
853  * over it and continue until we get a streamID with a non-zero
854  * count of the host-buffers.
855  */
856  memset(&hStat, 0, sizeof (NtStatistics_t));
857 
858  /* Read usage data for the chosen stream ID */
859  hStat.cmd = NT_STATISTICS_READ_CMD_USAGE_DATA_V0;
860  hStat.u.usageData_v0.streamid = (uint8_t) stream_id;
861 
862  if ((status = NT_StatRead(hstat_stream, &hStat)) != NT_SUCCESS) {
863  /* Get the status code as text */
864  NT_ExplainError(status, error_buffer, sizeof (error_buffer));
865  SCLogError(SC_ERR_NAPATECH_INIT_FAILED, "NT_StatRead() failed: %s\n", error_buffer);
866  return -1;
867  }
868 
869  if (hStat.u.usageData_v0.data.numHostBufferUsed == 0) {
870  ++stream_id;
871  continue;
872  }
873 
874  /* if we get here it is an active stream */
875  stream_config[instance_cnt].stream_id = stream_id++;
876  stream_config[instance_cnt].is_active = true;
877  instance_cnt++;
878  }
879 
880  } else {
881  ConfGetBool("threading.set-cpu-affinity", &set_cpu_affinity);
882  if (NapatechIsAutoConfigEnabled() && (set_cpu_affinity == 1)) {
883  start = 0;
884  end = CountWorkerThreads() - 1;
885  } else {
886  /* When not using the default streams we need to
887  * parse the array of streams from the conf */
888  if ((ntstreams = ConfGetNode("napatech.streams")) == NULL) {
889  SCLogError(SC_ERR_RUNMODE, "Failed retrieving napatech.streams from Config");
890  if (NapatechIsAutoConfigEnabled() && (set_cpu_affinity == 0)) {
892  "if set-cpu-affinity: no in conf then napatech.streams must be defined");
893  }
894  exit(EXIT_FAILURE);
895  }
896 
897  /* Loop through all stream numbers in the array and register the devices */
898  ConfNode *stream;
899  enum CONFIG_SPECIFIER stream_spec = CONFIG_SPECIFIER_UNDEFINED;
900  instance_cnt = 0;
901 
902  TAILQ_FOREACH(stream, &ntstreams->head, next)
903  {
904 
905  if (stream == NULL) {
906  SCLogError(SC_ERR_NAPATECH_INIT_FAILED, "Couldn't Parse Stream Configuration");
907  return -1;
908  }
909 
910  char *end_str = strchr(stream->val, '-');
911  if (end_str) {
912  if (stream_spec != CONFIG_SPECIFIER_UNDEFINED) {
914  "Only one Napatech stream range specifier allowed.");
915  return -1;
916  }
917  stream_spec = CONFIG_SPECIFIER_RANGE;
918 
919  if (StringParseUint8(&start, 10, end_str - stream->val,
920  (const char *)stream->val) < 0) {
921  FatalError(SC_ERR_INVALID_VALUE, "Napatech invalid "
922  "stream id start: '%s'", stream->val);
923  }
924  if (StringParseUint8(&end, 10, 0, (const char *) (end_str + 1)) < 0) {
925  FatalError(SC_ERR_INVALID_VALUE, "Napatech invalid "
926  "stream id end: '%s'", (end_str != NULL) ? (const char *)(end_str + 1) : "Null");
927  }
928  } else {
929  if (stream_spec == CONFIG_SPECIFIER_RANGE) {
931  "Napatech range and individual specifiers cannot be combined.");
932  }
933  stream_spec = CONFIG_SPECIFIER_INDIVIDUAL;
934  if (StringParseUint8(&stream_config[instance_cnt].stream_id,
935  10, 0, (const char *)stream->val) < 0) {
936  FatalError(SC_ERR_INVALID_VALUE, "Napatech invalid "
937  "stream id: '%s'", stream->val);
938  }
939  start = stream_config[instance_cnt].stream_id;
940  end = stream_config[instance_cnt].stream_id;
941  }
942  }
943  }
944 
945  for (stream_id = start; stream_id <= end; ++stream_id) {
946  /* if we get here it is configured in the .yaml file */
947  stream_config[instance_cnt].stream_id = stream_id;
948 
949  /* Check to see if it is an active stream */
950  memset(&hStat, 0, sizeof (NtStatistics_t));
951 
952  /* Read usage data for the chosen stream ID */
953  hStat.cmd = NT_STATISTICS_READ_CMD_USAGE_DATA_V0;
954  hStat.u.usageData_v0.streamid =
955  (uint8_t) stream_config[instance_cnt].stream_id;
956 
957  if ((status = NT_StatRead(hstat_stream, &hStat)) != NT_SUCCESS) {
959  return -1;
960  }
961 
962  if (hStat.u.usageData_v0.data.numHostBufferUsed > 0) {
963  stream_config[instance_cnt].is_active = true;
964  }
965  instance_cnt++;
966  }
967  }
968 
969  /* Close the statistics stream */
970  if ((status = NT_StatClose(hstat_stream)) != NT_SUCCESS) {
972  return -1;
973  }
974 
975  if ((status = NT_InfoClose(info_stream)) != NT_SUCCESS) {
977  return -1;
978  }
979 
980  return instance_cnt;
981 }
982 
983 static void *NapatechBufMonitorLoop(void *arg)
984 {
985  ThreadVars *tv = (ThreadVars *) arg;
986 
987  NtInfo_t hStreamInfo;
988  NtStatistics_t hStat; // Stat handle.
989  NtInfoStream_t hInfo;
990  NtStatStream_t hstat_stream;
991  int status; // Status variable
992 
993  const uint32_t alertInterval = 25;
994 
995 #ifndef NAPATECH_ENABLE_BYPASS
996  uint32_t OB_fill_level[MAX_STREAMS] = {0};
997  uint32_t OB_alert_level[MAX_STREAMS] = {0};
998  uint32_t ave_OB_fill_level[MAX_STREAMS] = {0};
999 #endif /* NAPATECH_ENABLE_BYPASS */
1000 
1001  uint32_t HB_fill_level[MAX_STREAMS] = {0};
1002  uint32_t HB_alert_level[MAX_STREAMS] = {0};
1003  uint32_t ave_HB_fill_level[MAX_STREAMS] = {0};
1004 
1005  /* Open the info and Statistics */
1006  if ((status = NT_InfoOpen(&hInfo, "InfoStream")) != NT_SUCCESS) {
1008  exit(EXIT_FAILURE);
1009  }
1010 
1011  if ((status = NT_StatOpen(&hstat_stream, "StatsStream")) != NT_SUCCESS) {
1013  exit(EXIT_FAILURE);
1014  }
1015 
1016  /* Read the info on all streams instantiated in the system */
1017  hStreamInfo.cmd = NT_INFO_CMD_READ_STREAM;
1018  if ((status = NT_InfoRead(hInfo, &hStreamInfo)) != NT_SUCCESS) {
1020  exit(EXIT_FAILURE);
1021  }
1022 
1023  NapatechStreamConfig registered_streams[MAX_STREAMS];
1024  int num_registered = NapatechGetStreamConfig(registered_streams);
1025  if (num_registered == -1) {
1026  exit(EXIT_FAILURE);
1027  }
1028 
1030  while (1) {
1031  if (TmThreadsCheckFlag(tv, THV_KILL)) {
1032  SCLogDebug("NapatechBufMonitorLoop THV_KILL detected");
1033  break;
1034  }
1035 
1036  usleep(200000);
1037 
1038  /* Read the info on all streams instantiated in the system */
1039  hStreamInfo.cmd = NT_INFO_CMD_READ_STREAM;
1040  if ((status = NT_InfoRead(hInfo, &hStreamInfo)) != NT_SUCCESS) {
1042  exit(EXIT_FAILURE);
1043  }
1044 
1045  char pktCntStr[4096];
1046  memset(pktCntStr, 0, sizeof (pktCntStr));
1047 
1048  uint32_t stream_id = 0;
1049  uint32_t stream_cnt = 0;
1050  uint32_t num_streams = hStreamInfo.u.stream.data.count;
1051 
1052  for (stream_cnt = 0; stream_cnt < num_streams; ++stream_cnt) {
1053 
1054  do {
1055 
1056  /* Read usage data for the chosen stream ID */
1057  hStat.cmd = NT_STATISTICS_READ_CMD_USAGE_DATA_V0;
1058  hStat.u.usageData_v0.streamid = (uint8_t) stream_id;
1059 
1060  if ((status = NT_StatRead(hstat_stream, &hStat)) != NT_SUCCESS) {
1062  exit(EXIT_FAILURE);
1063  }
1064 
1065  if (hStat.u.usageData_v0.data.numHostBufferUsed == 0) {
1066  ++stream_id;
1067  continue;
1068  }
1069  } while (hStat.u.usageData_v0.data.numHostBufferUsed == 0);
1070 
1071  if (RegisteredStream(stream_id, num_registered, registered_streams)) {
1072 
1073 #ifndef NAPATECH_ENABLE_BYPASS
1074  ave_OB_fill_level[stream_id] = 0;
1075 #endif /* NAPATECH_ENABLE_BYPASS */
1076 
1077  ave_HB_fill_level[stream_id] = 0;
1078 
1079  for (uint32_t hb_count = 0; hb_count < hStat.u.usageData_v0.data.numHostBufferUsed; hb_count++) {
1080 
1081 #ifndef NAPATECH_ENABLE_BYPASS
1082  OB_fill_level[hb_count] =
1083  ((100 * hStat.u.usageData_v0.data.hb[hb_count].onboardBuffering.used) /
1084  hStat.u.usageData_v0.data.hb[hb_count].onboardBuffering.size);
1085 
1086  if (OB_fill_level[hb_count] > 100) {
1087  OB_fill_level[hb_count] = 100;
1088  }
1089 #endif /* NAPATECH_ENABLE_BYPASS */
1090  uint32_t bufSize = hStat.u.usageData_v0.data.hb[hb_count].enQueuedAdapter / 1024
1091  + hStat.u.usageData_v0.data.hb[hb_count].deQueued / 1024
1092  + hStat.u.usageData_v0.data.hb[hb_count].enQueued / 1024
1093  - HB_HIGHWATER;
1094 
1095  HB_fill_level[hb_count] = (uint32_t)
1096  ((100 * hStat.u.usageData_v0.data.hb[hb_count].deQueued / 1024) /
1097  bufSize);
1098 
1099 #ifndef NAPATECH_ENABLE_BYPASS
1100  ave_OB_fill_level[stream_id] += OB_fill_level[hb_count];
1101 #endif /* NAPATECH_ENABLE_BYPASS */
1102 
1103  ave_HB_fill_level[stream_id] += HB_fill_level[hb_count];
1104  }
1105 
1106 #ifndef NAPATECH_ENABLE_BYPASS
1107  ave_OB_fill_level[stream_id] /= hStat.u.usageData_v0.data.numHostBufferUsed;
1108 #endif /* NAPATECH_ENABLE_BYPASS */
1109 
1110  ave_HB_fill_level[stream_id] /= hStat.u.usageData_v0.data.numHostBufferUsed;
1111 
1112  /* Host Buffer Fill Level warnings... */
1113  if (ave_HB_fill_level[stream_id] >= (HB_alert_level[stream_id] + alertInterval)) {
1114 
1115  while (ave_HB_fill_level[stream_id] >= HB_alert_level[stream_id] + alertInterval) {
1116  HB_alert_level[stream_id] += alertInterval;
1117  }
1118  SCLogPerf("nt%d - Increasing Host Buffer Fill Level : %4d%%",
1119  stream_id, ave_HB_fill_level[stream_id] - 1);
1120  }
1121 
1122  if (HB_alert_level[stream_id] > 0) {
1123  if ((ave_HB_fill_level[stream_id] <= (HB_alert_level[stream_id] - alertInterval))) {
1124  SCLogPerf("nt%d - Decreasing Host Buffer Fill Level: %4d%%",
1125  stream_id, ave_HB_fill_level[stream_id]);
1126 
1127  while (ave_HB_fill_level[stream_id] <= (HB_alert_level[stream_id] - alertInterval)) {
1128  if ((HB_alert_level[stream_id]) > 0) {
1129  HB_alert_level[stream_id] -= alertInterval;
1130  } else break;
1131  }
1132  }
1133  }
1134 
1135 #ifndef NAPATECH_ENABLE_BYPASS
1136  /* On Board SDRAM Fill Level warnings... */
1137  if (ave_OB_fill_level[stream_id] >= (OB_alert_level[stream_id] + alertInterval)) {
1138  while (ave_OB_fill_level[stream_id] >= OB_alert_level[stream_id] + alertInterval) {
1139  OB_alert_level[stream_id] += alertInterval;
1140 
1141  }
1142  SCLogPerf("nt%d - Increasing Adapter SDRAM Fill Level: %4d%%",
1143  stream_id, ave_OB_fill_level[stream_id]);
1144  }
1145 
1146  if (OB_alert_level[stream_id] > 0) {
1147  if ((ave_OB_fill_level[stream_id] <= (OB_alert_level[stream_id] - alertInterval))) {
1148  SCLogPerf("nt%d - Decreasing Adapter SDRAM Fill Level : %4d%%",
1149  stream_id, ave_OB_fill_level[stream_id]);
1150 
1151  while (ave_OB_fill_level[stream_id] <= (OB_alert_level[stream_id] - alertInterval)) {
1152  if ((OB_alert_level[stream_id]) > 0) {
1153  OB_alert_level[stream_id] -= alertInterval;
1154  } else break;
1155  }
1156  }
1157  }
1158 #endif /* NAPATECH_ENABLE_BYPASS */
1159  }
1160  ++stream_id;
1161  }
1162  }
1163 
1164  if ((status = NT_InfoClose(hInfo)) != NT_SUCCESS) {
1166  exit(EXIT_FAILURE);
1167  }
1168 
1169  /* Close the statistics stream */
1170  if ((status = NT_StatClose(hstat_stream)) != NT_SUCCESS) {
1172  exit(EXIT_FAILURE);
1173  }
1174 
1175  SCLogDebug("Exiting NapatechStatsLoop");
1179 
1180  return NULL;
1181 }
1182 
1183 
1185 {
1186  /* Creates the Statistic threads */
1187  ThreadVars *stats_tv = TmThreadCreate("NapatechStats",
1188  NULL, NULL,
1189  NULL, NULL,
1190  "custom", NapatechStatsLoop, 0);
1191 
1192  if (stats_tv == NULL) {
1194  "Error creating a thread for NapatechStats - Killing engine.");
1195  }
1196 
1197  if (TmThreadSpawn(stats_tv) != 0) {
1199  "Failed to spawn thread for NapatechStats - Killing engine.");
1200  }
1201 
1202 #ifdef NAPATECH_ENABLE_BYPASS
1203  if (bypass_supported) {
1204  SCLogInfo("Napatech bypass functionality enabled.");
1205  }
1206 #endif /* NAPATECH_ENABLE_BYPASS */
1207 
1208  ThreadVars *buf_monitor_tv = TmThreadCreate("NapatechBufMonitor",
1209  NULL, NULL,
1210  NULL, NULL,
1211  "custom", NapatechBufMonitorLoop, 0);
1212 
1213  if (buf_monitor_tv == NULL) {
1215  "Error creating a thread for NapatechBufMonitor - Killing engine.");
1216  }
1217 
1218  if (TmThreadSpawn(buf_monitor_tv) != 0) {
1220  "Failed to spawn thread for NapatechBufMonitor - Killing engine.");
1221  }
1222 
1223 
1224  return;
1225 }
1226 
1227 bool NapatechSetupNuma(uint32_t stream, uint32_t numa)
1228 {
1229  uint32_t status = 0;
1230  static NtConfigStream_t hconfig;
1231 
1232  char ntpl_cmd[64];
1233  snprintf(ntpl_cmd, 64, "setup[numanode=%d] = streamid == %d", numa, stream);
1234 
1235  NtNtplInfo_t ntpl_info;
1236 
1237  if ((status = NT_ConfigOpen(&hconfig, "ConfigStream")) != NT_SUCCESS) {
1239  return false;
1240  }
1241 
1242  if ((status = NT_NTPL(hconfig, ntpl_cmd, &ntpl_info, NT_NTPL_PARSER_VALIDATE_NORMAL)) == NT_SUCCESS) {
1243  status = ntpl_info.ntplId;
1244 
1245  } else {
1246  NAPATECH_NTPL_ERROR(ntpl_cmd, ntpl_info, status);
1247  return false;
1248  }
1249 
1250  return status;
1251 }
1252 
1253 static uint32_t NapatechSetHashmode(void)
1254 {
1255  uint32_t status = 0;
1256  const char *hash_mode;
1257  static NtConfigStream_t hconfig;
1258  char ntpl_cmd[64];
1259  NtNtplInfo_t ntpl_info;
1260 
1261  uint32_t filter_id = 0;
1262 
1263  /* Get the hashmode from the conf file. */
1264  ConfGetValue("napatech.hashmode", &hash_mode);
1265 
1266  snprintf(ntpl_cmd, 64, "hashmode = %s", hash_mode);
1267 
1268  /* Issue the NTPL command */
1269  if ((status = NT_ConfigOpen(&hconfig, "ConfigStream")) != NT_SUCCESS) {
1271  return false;
1272  }
1273 
1274  if ((status = NT_NTPL(hconfig, ntpl_cmd, &ntpl_info,
1275  NT_NTPL_PARSER_VALIDATE_NORMAL)) == NT_SUCCESS) {
1276  filter_id = ntpl_info.ntplId;
1277  SCLogConfig("Napatech hashmode: %s ID: %d", hash_mode, status);
1278  } else {
1279  NAPATECH_NTPL_ERROR(ntpl_cmd, ntpl_info, status);
1280  status = 0;
1281  }
1282 
1283  return filter_id;
1284 }
1285 
1286 static uint32_t GetStreamNUMAs(uint32_t stream_id, int stream_numas[])
1287 {
1288  NtStatistics_t hStat; // Stat handle.
1289  NtStatStream_t hstat_stream;
1290  int status; // Status variable
1291 
1292  for (int i = 0; i < MAX_HOSTBUFFERS; ++i)
1293  stream_numas[i] = -1;
1294 
1295  if ((status = NT_StatOpen(&hstat_stream, "StatsStream")) != NT_SUCCESS) {
1297  exit(EXIT_FAILURE);
1298  }
1299 
1300  char pktCntStr[4096];
1301  memset(pktCntStr, 0, sizeof (pktCntStr));
1302 
1303 
1304  /* Read usage data for the chosen stream ID */
1305  hStat.cmd = NT_STATISTICS_READ_CMD_USAGE_DATA_V0;
1306  hStat.u.usageData_v0.streamid = (uint8_t) stream_id;
1307 
1308  if ((status = NT_StatRead(hstat_stream, &hStat)) != NT_SUCCESS) {
1310  exit(EXIT_FAILURE);
1311  }
1312 
1313  for (uint32_t hb_id = 0; hb_id < hStat.u.usageData_v0.data.numHostBufferUsed; ++hb_id) {
1314  stream_numas[hb_id] = hStat.u.usageData_v0.data.hb[hb_id].numaNode;
1315  }
1316 
1317  return hStat.u.usageData_v0.data.numHostBufferUsed;
1318 }
1319 
1320 static int NapatechSetFilter(NtConfigStream_t hconfig, char *ntpl_cmd)
1321 {
1322  int status = 0;
1323  int local_filter_id = 0;
1324 
1325  NtNtplInfo_t ntpl_info;
1326  if ((status = NT_NTPL(hconfig, ntpl_cmd, &ntpl_info,
1327  NT_NTPL_PARSER_VALIDATE_NORMAL)) == NT_SUCCESS) {
1328  SCLogConfig("NTPL filter assignment \"%s\" returned filter id %4d",
1329  ntpl_cmd, local_filter_id);
1330  } else {
1331  NAPATECH_NTPL_ERROR(ntpl_cmd, ntpl_info, status);
1332  exit(EXIT_FAILURE);
1333  }
1334 
1335  return local_filter_id;
1336 }
1337 
1339 {
1340  uint32_t status = 0;
1341  static NtConfigStream_t hconfig;
1342  char ntpl_cmd[64];
1343  NtNtplInfo_t ntpl_info;
1344 
1345  if ((status = NT_ConfigOpen(&hconfig, "ConfigStream")) != NT_SUCCESS) {
1347  exit(EXIT_FAILURE);
1348  }
1349 
1350  snprintf(ntpl_cmd, 64, "delete = all");
1351  if ((status = NT_NTPL(hconfig, ntpl_cmd, &ntpl_info,
1352  NT_NTPL_PARSER_VALIDATE_NORMAL)) == NT_SUCCESS) {
1353  status = ntpl_info.ntplId;
1354  } else {
1355  NAPATECH_NTPL_ERROR(ntpl_cmd, ntpl_info, status);
1356  status = 0;
1357  }
1358 
1359  NT_ConfigClose(hconfig);
1360 
1361  return status;
1362 }
1363 
1364 
1365 uint32_t NapatechSetupTraffic(uint32_t first_stream, uint32_t last_stream)
1366 {
1367 #define PORTS_SPEC_SIZE 64
1368 
1369  struct ports_spec_s {
1370  uint8_t first[MAX_PORTS];
1371  uint8_t second[MAX_PORTS];
1372  bool all;
1373  char str[PORTS_SPEC_SIZE];
1374  } ports_spec;
1375 
1376  ports_spec.all = false;
1377 
1378  ConfNode *ntports;
1379  int iteration = 0;
1380  int status = 0;
1381  NtConfigStream_t hconfig;
1382  char ntpl_cmd[512];
1383  int is_inline = 0;
1384 #ifdef NAPATECH_ENABLE_BYPASS
1385  int is_span_port[MAX_PORTS] = { 0 };
1386 #endif
1387 
1388  char span_ports[128];
1389  memset(span_ports, 0, sizeof(span_ports));
1390 
1391  if (ConfGetBool("napatech.inline", &is_inline) == 0) {
1392  is_inline = 0;
1393  }
1394 
1395  NapatechSetHashmode();
1396 
1397  if ((status = NT_ConfigOpen(&hconfig, "ConfigStream")) != NT_SUCCESS) {
1399  exit(EXIT_FAILURE);
1400  }
1401 
1402  if (first_stream == last_stream) {
1403  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1404  "Setup[state=inactive] = StreamId == %d",
1405  first_stream);
1406  } else {
1407  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1408  "Setup[state=inactive] = StreamId == (%d..%d)",
1409  first_stream, last_stream);
1410  }
1411  NapatechSetFilter(hconfig, ntpl_cmd);
1412 
1413 #ifdef NAPATECH_ENABLE_BYPASS
1414  if (NapatechUseHWBypass()) {
1415  SCLogInfo("Napatech Hardware Bypass enabled.");
1416  } else {
1417  SCLogInfo("Napatech Hardware Bypass available but disabled.");
1418  }
1419 #else
1420  if (NapatechUseHWBypass()) {
1421  SCLogInfo("Napatech Hardware Bypass requested in conf but is not available.");
1422  exit(EXIT_FAILURE);
1423  } else {
1424  SCLogInfo("Napatech Hardware Bypass disabled.");
1425  }
1426 
1427  if (is_inline) {
1429  "Napatech inline mode not supported. (Only available when Hardware Bypass support is enabled.)");
1430  }
1431 #endif
1432 
1433  if (is_inline) {
1434  SCLogInfo("Napatech configured for inline mode.");
1435  } else {
1436 
1437  SCLogInfo("Napatech configured for passive (non-inline) mode.");
1438  }
1439 
1440  /* When not using the default streams we need to parse
1441  * the array of streams from the conf
1442  */
1443  if ((ntports = ConfGetNode("napatech.ports")) == NULL) {
1444  FatalError(SC_ERR_FATAL, "Failed retrieving napatech.ports from Conf");
1445  }
1446 
1447  /* Loop through all ports in the array */
1448  ConfNode *port;
1449  enum CONFIG_SPECIFIER stream_spec = CONFIG_SPECIFIER_UNDEFINED;
1450 
1451  if (NapatechUseHWBypass()) {
1452  SCLogInfo("Listening on the following Napatech ports:");
1453  }
1454  /* Build the NTPL command using values in the config file. */
1455  TAILQ_FOREACH(port, &ntports->head, next)
1456  {
1457  if (port == NULL) {
1459  "Couldn't Parse Port Configuration");
1460  }
1461 
1462  if (NapatechUseHWBypass()) {
1463 #ifdef NAPATECH_ENABLE_BYPASS
1464  if (strchr(port->val, '-')) {
1465  stream_spec = CONFIG_SPECIFIER_RANGE;
1466 
1467  ByteExtractStringUint8(&ports_spec.first[iteration], 10, 0, port->val);
1468  ByteExtractStringUint8(&ports_spec.second[iteration], 10, 0, strchr(port->val, '-')+1);
1469 
1470  if (ports_spec.first[iteration] == ports_spec.second[iteration]) {
1471  if (is_inline) {
1473  "Error with napatec.ports in conf file. When running in inline mode the two ports specifying a segment must be different.");
1474  } else {
1475  /* SPAN port configuration */
1476  is_span_port[ports_spec.first[iteration]] = 1;
1477 
1478  if (strlen(span_ports) == 0) {
1479  snprintf(span_ports, sizeof (span_ports), "%d", ports_spec.first[iteration]);
1480  } else {
1481  char temp[16];
1482  snprintf(temp, sizeof(temp), ",%d", ports_spec.first[iteration]);
1483  strlcat(span_ports, temp, sizeof(span_ports));
1484  }
1485 
1486  }
1487  }
1488 
1489  if (NapatechGetAdapter(ports_spec.first[iteration]) != NapatechGetAdapter(ports_spec.first[iteration])) {
1491  "Invalid napatech.ports specification in conf file.");
1493  "Two ports on a segment must reside on the same adapter. port %d is on adapter %d, port %d is on adapter %d.",
1494  ports_spec.first[iteration],
1495  NapatechGetAdapter(ports_spec.first[iteration]),
1496  ports_spec.second[iteration],
1497  NapatechGetAdapter(ports_spec.second[iteration])
1498  );
1499  exit(EXIT_FAILURE);
1500  }
1501 
1502  NapatechSetPortmap(ports_spec.first[iteration], ports_spec.second[iteration]);
1503  if (ports_spec.first[iteration] == ports_spec.second[iteration]) {
1504  SCLogInfo(" span_port: %d", ports_spec.first[iteration]);
1505  } else {
1506  SCLogInfo(" %s: %d - %d", is_inline ? "inline_ports" : "tap_ports", ports_spec.first[iteration], ports_spec.second[iteration]);
1507  }
1508 
1509  if (iteration == 0) {
1510  if (ports_spec.first[iteration] == ports_spec.second[iteration]) {
1511  snprintf(ports_spec.str, sizeof (ports_spec.str), "%d", ports_spec.first[iteration]);
1512  } else {
1513  snprintf(ports_spec.str, sizeof (ports_spec.str), "%d,%d", ports_spec.first[iteration], ports_spec.second[iteration]);
1514  }
1515  } else {
1516  char temp[16];
1517  if (ports_spec.first[iteration] == ports_spec.second[iteration]) {
1518  snprintf(temp, sizeof(temp), ",%d", ports_spec.first[iteration]);
1519  } else {
1520  snprintf(temp, sizeof(temp), ",%d,%d", ports_spec.first[iteration], ports_spec.second[iteration]);
1521  }
1522  strlcat(ports_spec.str, temp, sizeof(ports_spec.str));
1523  }
1524  } else {
1526  "When using hardware flow bypass ports must be specified as segments. E.g. ports: [0-1, 0-2]");
1527  }
1528 #endif
1529  } else { // !NapatechUseHWBypass()
1530  if (strncmp(port->val, "all", 3) == 0) {
1531  /* check that the sting in the config file is correctly specified */
1532  if (stream_spec != CONFIG_SPECIFIER_UNDEFINED) {
1534  "Only one Napatech port specifier type is allowed.");
1535  }
1536  stream_spec = CONFIG_SPECIFIER_RANGE;
1537 
1538  ports_spec.all = true;
1539  snprintf(ports_spec.str, sizeof (ports_spec.str), "all");
1540  } else if (strchr(port->val, '-')) {
1541  /* check that the sting in the config file is correctly specified */
1542  if (stream_spec != CONFIG_SPECIFIER_UNDEFINED) {
1544  "Only one Napatech port specifier is allowed when hardware bypass is disabled. (E.g. ports: [0-4], NOT ports: [0-1,2-3])");
1545  }
1546  stream_spec = CONFIG_SPECIFIER_RANGE;
1547 
1548  ByteExtractStringUint8(&ports_spec.first[iteration], 10, 0, port->val);
1549  ByteExtractStringUint8(&ports_spec.second[iteration], 10, 0, strchr(port->val, '-') + 1);
1550  snprintf(ports_spec.str, sizeof (ports_spec.str), "(%d..%d)", ports_spec.first[iteration], ports_spec.second[iteration]);
1551  } else {
1552  /* check that the sting in the config file is correctly specified */
1553  if (stream_spec == CONFIG_SPECIFIER_RANGE) {
1555  "Napatech port range specifiers cannot be combined with individual stream specifiers.");
1556  }
1557  stream_spec = CONFIG_SPECIFIER_INDIVIDUAL;
1558 
1559  ByteExtractStringUint8(&ports_spec.first[iteration], 10, 0, port->val);
1560 
1561  /* Determine the ports to use on the NTPL assign statement*/
1562  if (iteration == 0) {
1563  snprintf(ports_spec.str, sizeof (ports_spec.str), "%s", port->val);
1564  } else {
1565  strlcat(ports_spec.str, ",", sizeof(ports_spec.str));
1566  strlcat(ports_spec.str, port->val, sizeof(ports_spec.str));
1567  }
1568  }
1569  } // if !NapatechUseHWBypass()
1570  ++iteration;
1571  } /* TAILQ_FOREACH */
1572 
1573 #ifdef NAPATECH_ENABLE_BYPASS
1574  if (bypass_supported) {
1575  if (is_inline) {
1576  char inline_setup_cmd[512];
1577  if (first_stream == last_stream) {
1578  snprintf(inline_setup_cmd, sizeof (ntpl_cmd),
1579  "Setup[TxDescriptor=Dyn;TxPorts=%s;RxCRC=False;TxPortPos=112;UseWL=True] = StreamId == %d",
1580  ports_spec.str, first_stream);
1581  } else {
1582  snprintf(inline_setup_cmd, sizeof (ntpl_cmd),
1583  "Setup[TxDescriptor=Dyn;TxPorts=%s;RxCRC=False;TxPortPos=112;UseWL=True] = StreamId == (%d..%d)",
1584  ports_spec.str, first_stream, last_stream);
1585  }
1586  NapatechSetFilter(hconfig, inline_setup_cmd);
1587  }
1588  /* Build the NTPL command */
1589  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1590  "assign[priority=3;streamid=(%d..%d);colormask=0x10000000;"
1591  "Descriptor=DYN3,length=24,colorbits=32,Offset0=Layer3Header[0],Offset1=Layer4Header[0]]= %s%s",
1592  first_stream, last_stream, ports_spec.all ? "" : "port==", ports_spec.str);
1593  NapatechSetFilter(hconfig, ntpl_cmd);
1594 
1595 
1596  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1597  "assign[priority=2;streamid=(%d..%d);colormask=0x11000000;"
1598  "Descriptor=DYN3,length=24,colorbits=32,Offset0=Layer3Header[0],Offset1=Layer4Header[0]"
1599  "]= %s%s and (Layer3Protocol==IPV4)",
1600  first_stream, last_stream, ports_spec.all ? "" : "port==", ports_spec.str);
1601  NapatechSetFilter(hconfig, ntpl_cmd);
1602 
1603 
1604  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1605  "assign[priority=2;streamid=(%d..%d);colormask=0x14000000;"
1606  "Descriptor=DYN3,length=24,colorbits=32,Offset0=Layer3Header[0],Offset1=Layer4Header[0]]= %s%s and (Layer3Protocol==IPV6)",
1607  first_stream, last_stream, ports_spec.all ? "" : "port==", ports_spec.str);
1608  NapatechSetFilter(hconfig, ntpl_cmd);
1609 
1610  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1611  "assign[priority=2;streamid=(%d..%d);colormask=0x10100000;"
1612  "Descriptor=DYN3,length=24,colorbits=32,Offset0=Layer3Header[0],Offset1=Layer4Header[0]]= %s%s and (Layer4Protocol==TCP)",
1613  first_stream, last_stream, ports_spec.all ? "" : "port==", ports_spec.str);
1614  NapatechSetFilter(hconfig, ntpl_cmd);
1615 
1616  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1617  "assign[priority=2;streamid=(%d..%d);colormask=0x10200000;"
1618  "Descriptor=DYN3,length=24,colorbits=32,Offset0=Layer3Header[0],Offset1=Layer4Header[0]"
1619  "]= %s%s and (Layer4Protocol==UDP)",
1620  first_stream, last_stream, ports_spec.all ? "" : "port==", ports_spec.str);
1621  NapatechSetFilter(hconfig, ntpl_cmd);
1622 
1623  if (strlen(span_ports) > 0) {
1624  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1625  "assign[priority=2;streamid=(%d..%d);colormask=0x00001000;"
1626  "Descriptor=DYN3,length=24,colorbits=32,Offset0=Layer3Header[0],Offset1=Layer4Header[0]"
1627  "]= port==%s",
1628  first_stream, last_stream, span_ports);
1629  NapatechSetFilter(hconfig, ntpl_cmd);
1630  }
1631 
1632  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1633  "KeyType[name=KT%u]={sw_32_32,sw_16_16}",
1634  NAPATECH_KEYTYPE_IPV4);
1635  NapatechSetFilter(hconfig, ntpl_cmd);
1636 
1637  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1638  "KeyDef[name=KDEF%u;KeyType=KT%u;ipprotocolfield=OUTER]=(Layer3Header[12]/32/32,Layer4Header[0]/16/16)",
1639  NAPATECH_KEYTYPE_IPV4, NAPATECH_KEYTYPE_IPV4);
1640  NapatechSetFilter(hconfig, ntpl_cmd);
1641 
1642  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1643  "KeyType[name=KT%u]={32,32,16,16}",
1644  NAPATECH_KEYTYPE_IPV4_SPAN);
1645  NapatechSetFilter(hconfig, ntpl_cmd);
1646 
1647  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1648  "KeyDef[name=KDEF%u;KeyType=KT%u;ipprotocolfield=OUTER;keysort=sorted]=(Layer3Header[12]/32,Layer3Header[16]/32,Layer4Header[0]/16,Layer4Header[2]/16)",
1649  NAPATECH_KEYTYPE_IPV4_SPAN, NAPATECH_KEYTYPE_IPV4_SPAN);
1650  NapatechSetFilter(hconfig, ntpl_cmd);
1651 
1652  /* IPv6 5tuple for inline and tap ports */
1653  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1654  "KeyType[name=KT%u]={sw_128_128,sw_16_16}",
1655  NAPATECH_KEYTYPE_IPV6);
1656  NapatechSetFilter(hconfig, ntpl_cmd);
1657 
1658  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1659  "KeyDef[name=KDEF%u;KeyType=KT%u;ipprotocolfield=OUTER]=(Layer3Header[8]/128/128,Layer4Header[0]/16/16)",
1660  NAPATECH_KEYTYPE_IPV6, NAPATECH_KEYTYPE_IPV6);
1661  NapatechSetFilter(hconfig, ntpl_cmd);
1662 
1663  /* IPv6 5tuple for SPAN Ports */
1664  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1665  "KeyType[name=KT%u]={128,128,16,16}",
1666  NAPATECH_KEYTYPE_IPV6_SPAN);
1667  NapatechSetFilter(hconfig, ntpl_cmd);
1668 
1669  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1670  "KeyDef[name=KDEF%u;KeyType=KT%u;ipprotocolfield=OUTER;keysort=sorted]=(Layer3Header[8]/128,Layer3Header[24]/128,Layer4Header[0]/16,Layer4Header[2]/16)",
1671  NAPATECH_KEYTYPE_IPV6_SPAN, NAPATECH_KEYTYPE_IPV6_SPAN);
1672  NapatechSetFilter(hconfig, ntpl_cmd);
1673 
1674 
1675  int pair;
1676  char ports_ntpl_a[64];
1677  char ports_ntpl_b[64];
1678  memset(ports_ntpl_a, 0, sizeof(ports_ntpl_a));
1679  memset(ports_ntpl_b, 0, sizeof(ports_ntpl_b));
1680 
1681  for (pair = 0; pair < iteration; ++pair) {
1682  char port_str[8];
1683 
1684  if (!is_span_port[ports_spec.first[pair]]) {
1685  snprintf(port_str, sizeof(port_str), "%s%u ", strlen(ports_ntpl_a) == 0 ? "" : ",", ports_spec.first[pair]);
1686  strlcat(ports_ntpl_a, port_str, sizeof(ports_ntpl_a));
1687 
1688  snprintf(port_str, sizeof(port_str), "%s%u ", strlen(ports_ntpl_b) == 0 ? "" : ",", ports_spec.second[pair]);
1689  strlcat(ports_ntpl_b, port_str, sizeof(ports_ntpl_b));
1690  }
1691  }
1692 
1693  if (strlen(ports_ntpl_a) > 0) {
1694  /* This is the assign for dropping upstream traffic */
1695  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1696  "assign[priority=1;streamid=drop;colormask=0x1]=(Layer3Protocol==IPV4)and(port == %s)and(Key(KDEF%u,KeyID=%u)==%u)",
1697  ports_ntpl_a,
1698  NAPATECH_KEYTYPE_IPV4,
1699  NAPATECH_KEYTYPE_IPV4,
1700  NAPATECH_FLOWTYPE_DROP);
1701  NapatechSetFilter(hconfig, ntpl_cmd);
1702  }
1703 
1704  if (strlen(ports_ntpl_b) > 0) {
1705  /* This is the assign for dropping downstream traffic */
1706  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1707  "assign[priority=1;streamid=drop;colormask=0x1]=(Layer3Protocol==IPV4)and(port == %s)and(Key(KDEF%u,KeyID=%u,fieldaction=swap)==%u)",
1708  ports_ntpl_b, //ports_spec.str,
1709  NAPATECH_KEYTYPE_IPV4,
1710  NAPATECH_KEYTYPE_IPV4,
1711  NAPATECH_FLOWTYPE_DROP);
1712  NapatechSetFilter(hconfig, ntpl_cmd);
1713  }
1714 
1715  if (strlen(span_ports) > 0) {
1716  /* This is the assign for dropping SPAN Port traffic */
1717  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1718  "assign[priority=1;streamid=drop;colormask=0x1]=(Layer3Protocol==IPV4)and(port == %s)and(Key(KDEF%u,KeyID=%u)==%u)",
1719  span_ports,
1720  NAPATECH_KEYTYPE_IPV4_SPAN,
1721  NAPATECH_KEYTYPE_IPV4_SPAN,
1722  NAPATECH_FLOWTYPE_DROP);
1723  NapatechSetFilter(hconfig, ntpl_cmd);
1724  }
1725 
1726  if (is_inline) {
1727  for (pair = 0; pair < iteration; ++pair) {
1728  /* This is the assignment for forwarding traffic */
1729  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1730  "assign[priority=1;streamid=drop;DestinationPort=%d;colormask=0x2]=(Layer3Protocol==IPV4)and(port == %d)and(Key(KDEF%u,KeyID=%u)==%u)",
1731  ports_spec.second[pair],
1732  ports_spec.first[pair],
1733  NAPATECH_KEYTYPE_IPV4,
1734  NAPATECH_KEYTYPE_IPV4,
1735  NAPATECH_FLOWTYPE_PASS);
1736  NapatechSetFilter(hconfig, ntpl_cmd);
1737 
1738  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1739  "assign[priority=1;streamid=drop;DestinationPort=%d;colormask=0x2]=(Layer3Protocol==IPV4)and(port == %d)and(Key(KDEF%u,KeyID=%u,fieldaction=swap)==%u)",
1740  ports_spec.first[pair],
1741  ports_spec.second[pair],
1742  NAPATECH_KEYTYPE_IPV4,
1743  NAPATECH_KEYTYPE_IPV4,
1744  NAPATECH_FLOWTYPE_PASS);
1745  NapatechSetFilter(hconfig, ntpl_cmd);
1746  }
1747  }
1748 
1749  if (strlen(ports_ntpl_a) > 0) {
1750  /* This is the assign for dropping upstream traffic */
1751  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1752  "assign[priority=1;streamid=drop;colormask=0x1]=(Layer3Protocol==IPV6)and(port == %s)and(Key(KDEF%u,KeyID=%u)==%u)",
1753  ports_ntpl_a,
1754  NAPATECH_KEYTYPE_IPV6,
1755  NAPATECH_KEYTYPE_IPV6,
1756  NAPATECH_FLOWTYPE_DROP);
1757  NapatechSetFilter(hconfig, ntpl_cmd);
1758  }
1759 
1760  if (strlen(ports_ntpl_b) > 0) {
1761  /* This is the assign for dropping downstream traffic */
1762  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1763  "assign[priority=1;streamid=drop;colormask=0x1]=(Layer3Protocol==IPV6)and(port == %s)and(Key(KDEF%u,KeyID=%u,fieldaction=swap)==%u)",
1764  ports_ntpl_b, //ports_spec.str,
1765  NAPATECH_KEYTYPE_IPV6,
1766  NAPATECH_KEYTYPE_IPV6,
1767  NAPATECH_FLOWTYPE_DROP);
1768  NapatechSetFilter(hconfig, ntpl_cmd);
1769  }
1770 
1771  if (strlen(span_ports) > 0) {
1772  /* This is the assign for dropping SPAN Port traffic */
1773  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1774  "assign[priority=1;streamid=drop;colormask=0x1]=(Layer3Protocol==IPV6)and(port == %s)and(Key(KDEF%u,KeyID=%u)==%u)",
1775  span_ports,
1776  NAPATECH_KEYTYPE_IPV6_SPAN,
1777  NAPATECH_KEYTYPE_IPV6_SPAN,
1778  NAPATECH_FLOWTYPE_DROP);
1779  NapatechSetFilter(hconfig, ntpl_cmd);
1780  }
1781 
1782  if (is_inline) {
1783  for (pair = 0; pair < iteration; ++pair) {
1784  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1785  "assign[priority=1;streamid=drop;DestinationPort=%d;colormask=0x4]=(Layer3Protocol==IPV6)and(port==%d)and(Key(KDEF%u,KeyID=%u)==%u)",
1786  ports_spec.second[pair],
1787  ports_spec.first[pair],
1788  NAPATECH_KEYTYPE_IPV6,
1789  NAPATECH_KEYTYPE_IPV6,
1790  NAPATECH_FLOWTYPE_PASS);
1791  NapatechSetFilter(hconfig, ntpl_cmd);
1792 
1793  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1794  "assign[priority=1;streamid=drop;DestinationPort=%d;colormask=0x4]=(Layer3Protocol==IPV6)and(port==%d)and(Key(KDEF%u,KeyID=%u,fieldaction=swap)==%u)",
1795  ports_spec.first[pair],
1796  ports_spec.second[pair],
1797  NAPATECH_KEYTYPE_IPV6,
1798  NAPATECH_KEYTYPE_IPV6,
1799  NAPATECH_FLOWTYPE_PASS);
1800  NapatechSetFilter(hconfig, ntpl_cmd);
1801  }
1802  }
1803  } else {
1804  if (is_inline) {
1806  "Napatech Inline operation not supported by this FPGA version.");
1807  }
1808 
1810  snprintf(ntpl_cmd, sizeof (ntpl_cmd), "assign[streamid=(%d..%d);colormask=0x0] = %s%s",
1811  first_stream, last_stream, ports_spec.all ? "" : "port==", ports_spec.str);
1812  NapatechSetFilter(hconfig, ntpl_cmd);
1813  }
1814  }
1815 
1816 #else /* NAPATECH_ENABLE_BYPASS */
1817  snprintf(ntpl_cmd, sizeof (ntpl_cmd), "assign[streamid=(%d..%d)] = %s%s",
1818  first_stream, last_stream, ports_spec.all ? "" : "port==", ports_spec.str);
1819  NapatechSetFilter(hconfig, ntpl_cmd);
1820 
1821 #endif /* !NAPATECH_ENABLE_BYPASS */
1822 
1823  SCLogConfig("Host-buffer NUMA assignments: ");
1824  int numa_nodes[MAX_HOSTBUFFERS];
1825  uint32_t stream_id;
1826  for (stream_id = first_stream; stream_id < last_stream; ++stream_id) {
1827  char temp1[256];
1828  char temp2[256];
1829 
1830  uint32_t num_host_buffers = GetStreamNUMAs(stream_id, numa_nodes);
1831 
1832  snprintf(temp1, 256, " stream %d: ", stream_id);
1833 
1834  for (uint32_t hb_id = 0; hb_id < num_host_buffers; ++hb_id) {
1835  snprintf(temp2, 256, "%d ", numa_nodes[hb_id]);
1836  strlcat(temp1, temp2, sizeof(temp1));
1837  }
1838 
1839  SCLogConfig("%s", temp1);
1840  }
1841 
1842  if (first_stream == last_stream) {
1843  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1844  "Setup[state=active] = StreamId == %d",
1845  first_stream);
1846  } else {
1847  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1848  "Setup[state=active] = StreamId == (%d..%d)",
1849  first_stream, last_stream);
1850  }
1851  NapatechSetFilter(hconfig, ntpl_cmd);
1852 
1853  NT_ConfigClose(hconfig);
1854 
1855  return status;
1856 }
1857 
1858 #endif // HAVE_NAPATECH
util-byte.h
PORTS_SPEC_SIZE
#define PORTS_SPEC_SIZE
tm-threads.h
CONFIG_SPECIFIER_UNDEFINED
@ CONFIG_SPECIFIER_UNDEFINED
Definition: util-napatech.c:200
TmThreadSpawn
TmEcode TmThreadSpawn(ThreadVars *tv)
Spawns a thread associated with the ThreadVars instance tv.
Definition: tm-threads.c:1708
NapatechGetCurrentStats
NapatechCurrentStats NapatechGetCurrentStats(uint16_t id)
Definition: util-napatech.c:193
SC_ERR_INVALID_VALUE
@ SC_ERR_INVALID_VALUE
Definition: util-error.h:160
CONFIG_SPECIFIER_RANGE
@ CONFIG_SPECIFIER_RANGE
Definition: util-napatech.c:201
SC_ERR_NAPATECH_INIT_FAILED
@ SC_ERR_NAPATECH_INIT_FAILED
Definition: util-error.h:251
ConfNode_::val
char * val
Definition: conf.h:34
ConfGetBool
int ConfGetBool(const char *name, int *val)
Retrieve a configuration value as an boolen.
Definition: conf.c:516
NapatechSetPortmap
int NapatechSetPortmap(int port, int peer)
unlikely
#define unlikely(expr)
Definition: util-optimize.h:35
SCLogDebug
#define SCLogDebug(...)
Definition: util-debug.h:298
TmThreadsSetFlag
void TmThreadsSetFlag(ThreadVars *tv, uint32_t flag)
Set a thread flag.
Definition: tm-threads.c:97
TmThreadWaitForFlag
void TmThreadWaitForFlag(ThreadVars *tv, uint32_t flags)
Waits till the specified flag(s) is(are) set. We don't bother if the kill flag has been set or not on...
Definition: tm-threads.c:1807
next
struct HtpBodyChunk_ * next
Definition: app-layer-htp.h:0
THV_DEINIT
#define THV_DEINIT
Definition: threadvars.h:46
ConfGetNode
ConfNode * ConfGetNode(const char *name)
Get a ConfNode by name.
Definition: conf.c:175
UtilCpuGetNumProcessorsConfigured
uint16_t UtilCpuGetNumProcessorsConfigured(void)
Get the number of cpus configured in the system.
Definition: util-cpu.c:59
StatsSetUI64
void StatsSetUI64(ThreadVars *tv, uint16_t id, uint64_t x)
Sets a value of type double to the local counter.
Definition: counters.c:191
NapatechDeleteFilters
uint32_t NapatechDeleteFilters(void)
Definition: util-napatech.c:1338
TAILQ_FOREACH
#define TAILQ_FOREACH(var, head, field)
Definition: queue.h:350
StatsSetupPrivate
int StatsSetupPrivate(ThreadVars *tv)
Definition: counters.c:1194
StatsSyncCountersIfSignalled
#define StatsSyncCountersIfSignalled(tv)
Definition: counters.h:137
NapatechIsAutoConfigEnabled
bool NapatechIsAutoConfigEnabled(void)
Definition: runmode-napatech.c:70
PacketCounters_::byte
uint16_t byte
Definition: util-napatech.c:185
SC_WARN_COMPATIBILITY
@ SC_WARN_COMPATIBILITY
Definition: util-error.h:193
stream_config
TcpStreamCnf stream_config
Definition: stream-tcp.c:119
SC_ERR_RUNMODE
@ SC_ERR_RUNMODE
Definition: util-error.h:219
current_stats
NapatechCurrentStats current_stats[MAX_STREAMS]
Definition: util-napatech.c:191
NapatechStreamConfig_
Definition: util-napatech.h:43
SC_ERR_NAPATECH_OPEN_FAILED
@ SC_ERR_NAPATECH_OPEN_FAILED
Definition: util-error.h:246
THV_RUNNING_DONE
#define THV_RUNNING_DONE
Definition: threadvars.h:47
NapatechGetStreamConfig
int NapatechGetStreamConfig(NapatechStreamConfig stream_config[])
Reads and parses the stream configuration defined in the config file.
Definition: util-napatech.c:804
total_stats
NapatechCurrentStats total_stats
Definition: util-napatech.c:190
util-device.h
NapatechCurrentStats_::current_packets
uint64_t current_packets
Definition: util-napatech.h:51
strlcat
size_t strlcat(char *, const char *src, size_t siz)
Definition: util-strlcatu.c:45
util-cpu.h
SC_ERR_NAPATECH_CONFIG_STREAM
@ SC_ERR_NAPATECH_CONFIG_STREAM
Definition: util-error.h:252
source-napatech.h
ThreadVars_
Per thread variable structure.
Definition: threadvars.h:58
PacketCounters
struct PacketCounters_ PacketCounters
THV_KILL
#define THV_KILL
Definition: threadvars.h:41
CONFIG_SPECIFIER_INDIVIDUAL
@ CONFIG_SPECIFIER_INDIVIDUAL
Definition: util-napatech.c:202
TmThreadCreate
ThreadVars * TmThreadCreate(const char *name, const char *inq_name, const char *inqh_name, const char *outq_name, const char *outqh_name, const char *slots, void *(*fn_p)(void *), int mucond)
Creates and returns the TV instance for a new thread.
Definition: tm-threads.c:935
NAPATECH_NTPL_ERROR
#define NAPATECH_NTPL_ERROR(ntpl_cmd, ntpl_info, status)
Definition: util-napatech.h:71
NapatechGetAdapter
int NapatechGetAdapter(uint8_t port)
NAPATECH_ERROR
#define NAPATECH_ERROR(err_type, status)
Definition: util-napatech.h:65
util-napatech.h
PacketCounters_::pkts
uint16_t pkts
Definition: util-napatech.c:184
PacketCounters_::drop_pkts
uint16_t drop_pkts
Definition: util-napatech.c:186
SC_ERR_NAPATECH_PARSE_CONFIG
@ SC_ERR_NAPATECH_PARSE_CONFIG
Definition: util-error.h:255
NapatechCurrentStats_::current_bytes
uint64_t current_bytes
Definition: util-napatech.h:52
HB_HIGHWATER
#define HB_HIGHWATER
Definition: util-napatech.c:692
PacketCounters_::drop_byte
uint16_t drop_byte
Definition: util-napatech.c:187
SCLogInfo
#define SCLogInfo(...)
Macro used to log INFORMATIONAL messages.
Definition: util-debug.h:217
ConfNodeLookupChild
ConfNode * ConfNodeLookupChild(const ConfNode *node, const char *name)
Lookup a child configuration node by name.
Definition: conf.c:814
THV_INIT_DONE
#define THV_INIT_DONE
Definition: threadvars.h:38
CONFIG_SPECIFIER
CONFIG_SPECIFIER
Definition: util-napatech.c:199
suricata-common.h
SCLogPerf
#define SCLogPerf(...)
Definition: util-debug.h:224
SCLogError
#define SCLogError(err_code,...)
Macro used to log ERROR messages.
Definition: util-debug.h:257
NapatechStartStats
void NapatechStartStats(void)
Definition: util-napatech.c:1184
FatalError
#define FatalError(x,...)
Definition: util-debug.h:532
MAX_HOSTBUFFERS
#define MAX_HOSTBUFFERS
Definition: util-napatech.c:205
tv
ThreadVars * tv
Definition: fuzz_decodepcapfile.c:29
threadvars.h
NapatechUseHWBypass
bool NapatechUseHWBypass(void)
Definition: runmode-napatech.c:75
NapatechCurrentStats_::current_drop_bytes
uint64_t current_drop_bytes
Definition: util-napatech.h:54
SCLogConfig
struct SCLogConfig_ SCLogConfig
Holds the config state used by the logging api.
SC_ERR_NAPATECH_STREAMS_REGISTER_FAILED
@ SC_ERR_NAPATECH_STREAMS_REGISTER_FAILED
Definition: util-error.h:253
ByteExtractStringUint8
int ByteExtractStringUint8(uint8_t *res, int base, uint16_t len, const char *str)
Definition: util-byte.c:285
str
#define str(s)
Definition: suricata-common.h:273
SCLogWarning
#define SCLogWarning(err_code,...)
Macro used to log WARNING messages.
Definition: util-debug.h:244
MAX_PORTS
#define MAX_PORTS
Definition: util-napatech.h:59
ConfNode_
Definition: conf.h:32
SC_ERR_FATAL
@ SC_ERR_FATAL
Definition: util-error.h:203
StringParseUint8
int StringParseUint8(uint8_t *res, int base, uint16_t len, const char *str)
Definition: util-byte.c:359
NapatechCurrentStats_::current_drop_packets
uint64_t current_drop_packets
Definition: util-napatech.h:53
suricata.h
ConfGetValue
int ConfGetValue(const char *name, const char **vptr)
Retrieve the value of a configuration node.
Definition: conf.c:359
NapatechSetupTraffic
uint32_t NapatechSetupTraffic(uint32_t first_stream, uint32_t last_stream)
Definition: util-napatech.c:1365
MAX_STREAMS
#define MAX_STREAMS
Definition: util-napatech.c:691
NapatechCurrentStats_
Definition: util-napatech.h:50
PacketCounters_
Definition: util-napatech.c:183
TmThreadsCheckFlag
int TmThreadsCheckFlag(ThreadVars *tv, uint32_t flag)
Check if a thread flag is set.
Definition: tm-threads.c:89
StatsRegisterCounter
uint16_t StatsRegisterCounter(const char *name, struct ThreadVars_ *tv)
Registers a normal, unqualified counter.
Definition: counters.c:939
THV_CLOSED
#define THV_CLOSED
Definition: threadvars.h:43
SCCalloc
#define SCCalloc(nm, sz)
Definition: util-mem.h:53
NapatechSetupNuma
bool NapatechSetupNuma(uint32_t stream, uint32_t numa)
Definition: util-napatech.c:1227