suricata
util-napatech.c
Go to the documentation of this file.
1 /* Copyright (C) 2017-2021 Open Information Security Foundation
2  *
3  * You can copy, redistribute or modify this Program under the terms of
4  * the GNU General Public License version 2 as published by the Free
5  * Software Foundation.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10  * GNU General Public License for more details.
11  *
12  * You should have received a copy of the GNU General Public License
13  * version 2 along with this program; if not, write to the Free Software
14  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
15  * 02110-1301, USA.
16  */
17 /**
18  * \file
19  *
20  * \author Napatech Inc.
21  * \author Phil Young <py@napatech.com>
22  *
23  *
24  */
25 #include "suricata-common.h"
26 
27 #ifdef HAVE_NAPATECH
28 #include "suricata.h"
29 #include "util-device.h"
30 #include "util-cpu.h"
31 #include "util-byte.h"
32 #include "threadvars.h"
33 #include "tm-threads.h"
34 #include "util-napatech.h"
35 #include "source-napatech.h"
36 #include "runmode-napatech.h"
37 
38 #ifdef NAPATECH_ENABLE_BYPASS
39 
40 /*
41  * counters to track the number of flows programmed on
42  * the adapter.
43  */
44 typedef struct FlowStatsCounters_
45 {
46  uint16_t active_bypass_flows;
47  uint16_t total_bypass_flows;
48 } FlowStatsCounters;
49 
50 
51 static int bypass_supported;
52 int NapatechIsBypassSupported(void)
53 {
54  return bypass_supported;
55 }
56 
57 /**
58  * \brief Returns the number of Napatech Adapters in the system.
59  *
60  * \return count of the Napatech adapters present in the system.
61  */
62 int NapatechGetNumAdapters(void)
63 {
64  NtInfoStream_t hInfo;
65  NtInfo_t hInfoSys;
66  int status;
67  static int num_adapters = -1;
68 
69  if (num_adapters == -1) {
70  if ((status = NT_InfoOpen(&hInfo, "InfoStream")) != NT_SUCCESS) {
71  NAPATECH_ERROR(status);
72  exit(EXIT_FAILURE);
73  }
74 
75  hInfoSys.cmd = NT_INFO_CMD_READ_SYSTEM;
76  if ((status = NT_InfoRead(hInfo, &hInfoSys)) != NT_SUCCESS) {
77  NAPATECH_ERROR(status);
78  exit(EXIT_FAILURE);
79  }
80 
81  num_adapters = hInfoSys.u.system.data.numAdapters;
82 
83  NT_InfoClose(hInfo);
84  }
85 
86  return num_adapters;
87 }
88 
89 /**
90  * \brief Verifies that the Napatech adapters support bypass.
91  *
92  * Attempts to opens a FlowStream on each adapter present in the system.
93  * If successful then bypass is supported
94  *
95  * \return 1 if Bypass functionality is supported; zero otherwise.
96  */
97 int NapatechVerifyBypassSupport(void)
98 {
99  int status;
100  int adapter = 0;
101  int num_adapters = NapatechGetNumAdapters();
102  SCLogInfo("Found %d Napatech adapters.", num_adapters);
103  NtFlowStream_t hFlowStream;
104 
105  if (!NapatechUseHWBypass()) {
106  /* HW Bypass is disabled in the conf file */
107  return 0;
108  }
109 
110  for (adapter = 0; adapter < num_adapters; ++adapter) {
111  NtFlowAttr_t attr;
112  char flow_name[80];
113 
114  NT_FlowOpenAttrInit(&attr);
115  NT_FlowOpenAttrSetAdapterNo(&attr, adapter);
116 
117  snprintf(flow_name, sizeof(flow_name), "Flow stream %d", adapter );
118  SCLogInfo("Opening flow programming stream: %s\n", flow_name);
119  if ((status = NT_FlowOpen_Attr(&hFlowStream, flow_name, &attr)) != NT_SUCCESS) {
120  SCLogWarning("Napatech bypass functionality not supported by the FPGA version on "
121  "adapter %d - disabling support.",
122  adapter);
123  bypass_supported = 0;
124  return 0;
125  }
126  NT_FlowClose(hFlowStream);
127  }
128 
129  bypass_supported = 1;
130  return bypass_supported;
131 }
132 
133 
134 /**
135  * \brief Updates statistic counters for Napatech FlowStats
136  *
137  * \param tv Thread variable to ThreadVars
138  * \param hInfo Handle to the Napatech InfoStream.
139  * \param hstat_stream Handle to the Napatech Statistics Stream.
140  * \param flow_counters The flow counters statistics to update.
141  * \param clear_stats Indicates if statistics on the card should be reset to zero.
142  *
143  */
144 static void UpdateFlowStats(
145  ThreadVars *tv,
146  NtInfoStream_t hInfo,
147  NtStatStream_t hstat_stream,
148  FlowStatsCounters flow_counters,
149  int clear_stats
150  )
151 {
152  NtStatistics_t hStat;
153  int status;
154 
155  uint64_t programed = 0;
156  uint64_t removed = 0;
157  int adapter = 0;
158 
159  for (adapter = 0; adapter < NapatechGetNumAdapters(); ++adapter) {
160  hStat.cmd = NT_STATISTICS_READ_CMD_FLOW_V0;
161  hStat.u.flowData_v0.clear = clear_stats;
162  hStat.u.flowData_v0.adapterNo = adapter;
163  if ((status = NT_StatRead(hstat_stream, &hStat)) != NT_SUCCESS) {
164  NAPATECH_ERROR(status);
165  exit(1);
166  }
167  programed = hStat.u.flowData_v0.learnDone;
168  removed = hStat.u.flowData_v0.unlearnDone
169  + hStat.u.flowData_v0.automaticUnlearnDone
170  + hStat.u.flowData_v0.timeoutUnlearnDone;
171  }
172 
173  StatsSetUI64(tv, flow_counters.active_bypass_flows, programed - removed);
174  StatsSetUI64(tv, flow_counters.total_bypass_flows, programed);
175 }
176 
177 #endif /* NAPATECH_ENABLE_BYPASS */
178 
179 
180 /*-----------------------------------------------------------------------------
181  *-----------------------------------------------------------------------------
182  * Statistics code
183  *-----------------------------------------------------------------------------
184  */
185 typedef struct PacketCounters_
186 {
187  uint16_t pkts;
188  uint16_t byte;
189  uint16_t drop_pkts;
190  uint16_t drop_byte;
192 
195 
197 {
198 
199  return current_stats[id];
200 }
201 
206 };
207 
208 #define MAX_HOSTBUFFERS 8
209 
210 /**
211  * \brief Test to see if any of the configured streams are active
212  *
213  * \param hInfo Handle to Napatech Info Stream.
214  * \param hStatsStream Handle to Napatech Statistics stream
215  * \param stream_config array of stream configuration structures
216  * \param num_inst
217  *
218  */
219 static uint16_t TestStreamConfig(
220  NtInfoStream_t hInfo,
221  NtStatStream_t hstat_stream,
223  uint16_t num_inst)
224 {
225  uint16_t num_active = 0;
226 
227  for (uint16_t inst = 0; inst < num_inst; ++inst) {
228  int status;
229  NtStatistics_t stat; // Stat handle.
230 
231  /* Check to see if it is an active stream */
232  memset(&stat, 0, sizeof (NtStatistics_t));
233 
234  /* Read usage data for the chosen stream ID */
235  stat.cmd = NT_STATISTICS_READ_CMD_USAGE_DATA_V0;
236  stat.u.usageData_v0.streamid = (uint8_t) stream_config[inst].stream_id;
237 
238  if ((status = NT_StatRead(hstat_stream, &stat)) != NT_SUCCESS) {
239  NAPATECH_ERROR(status);
240  return 0;
241  }
242 
243  if (stat.u.usageData_v0.data.numHostBufferUsed > 0) {
244  stream_config[inst].is_active = true;
245  num_active++;
246  } else {
247  stream_config[inst].is_active = false;
248  }
249  }
250 
251  return num_active;
252 }
253 
254 /**
255  * \brief Updates Napatech packet counters
256  *
257  * \param tv Pointer to ThreadVars structure
258  * \param hInfo Handle to Napatech Info Stream.
259  * \param hstat_stream Handle to Napatech Statistics stream
260  * \param num_streams the number of streams that are currently active
261  * \param stream_config array of stream configuration structures
262  * \param total_counters - cumulative count of all packets received.
263  * \param dispatch_host, - Count of packets that were delivered to the host buffer
264  * \param dispatch_drop - count of packets that were dropped as a result of a rule
265  * \param dispatch_fwd - count of packets forwarded out the egress port as the result of a rule
266  * \param is_inline - are we running in inline mode?
267  * \param enable_stream_stats - are per thread/stream statistics enabled.
268  * \param stream_counters - counters for each thread/stream configured.
269  *
270  * \return The number of active streams that were updated.
271  *
272  */
273 static uint32_t UpdateStreamStats(ThreadVars *tv,
274  NtInfoStream_t hInfo,
275  NtStatStream_t hstat_stream,
276  uint16_t num_streams,
278  PacketCounters total_counters,
279  PacketCounters dispatch_host,
280  PacketCounters dispatch_drop,
281  PacketCounters dispatch_fwd,
282  int is_inline,
283  int enable_stream_stats,
284  PacketCounters stream_counters[]
285  ) {
286  static uint64_t rxPktsStart[MAX_STREAMS] = {0};
287  static uint64_t rxByteStart[MAX_STREAMS] = {0};
288  static uint64_t dropPktStart[MAX_STREAMS] = {0};
289  static uint64_t dropByteStart[MAX_STREAMS] = {0};
290 
291  int status;
292  NtInfo_t hStreamInfo;
293  NtStatistics_t hStat; // Stat handle.
294 
295  /* Query the system to get the number of streams currently instantiated */
296  hStreamInfo.cmd = NT_INFO_CMD_READ_STREAM;
297  if ((status = NT_InfoRead(hInfo, &hStreamInfo)) != NT_SUCCESS) {
298  NAPATECH_ERROR(status);
299  exit(EXIT_FAILURE);
300  }
301 
302  uint16_t num_active;
303  if ((num_active = TestStreamConfig(hInfo, hstat_stream, stream_config, num_streams)) == 0) {
304  /* None of the configured streams are active */
305  return 0;
306  }
307 
308  /* At least one stream is active so proceed with the stats. */
309  uint16_t inst_id = 0;
310  uint32_t stream_cnt = 0;
311  for (stream_cnt = 0; stream_cnt < num_streams; ++stream_cnt) {
312  while (inst_id < num_streams) {
313  if (stream_config[inst_id].is_active) {
314  break;
315  } else {
316  ++inst_id;
317  }
318  }
319  if (inst_id == num_streams)
320  break;
321 
322  /* Read usage data for the chosen stream ID */
323  memset(&hStat, 0, sizeof (NtStatistics_t));
324  hStat.cmd = NT_STATISTICS_READ_CMD_USAGE_DATA_V0;
325  hStat.u.usageData_v0.streamid = (uint8_t) stream_config[inst_id].stream_id;
326 
327  if ((status = NT_StatRead(hstat_stream, &hStat)) != NT_SUCCESS) {
328  NAPATECH_ERROR(status);
329  return 0;
330  }
331 
332  uint16_t stream_id = stream_config[inst_id].stream_id;
333  if (stream_config[inst_id].is_active) {
334  uint64_t rx_pkts_total = 0;
335  uint64_t rx_byte_total = 0;
336  uint64_t drop_pkts_total = 0;
337  uint64_t drop_byte_total = 0;
338 
339  for (uint32_t hbCount = 0; hbCount < hStat.u.usageData_v0.data.numHostBufferUsed; hbCount++) {
340  if (unlikely(stream_config[inst_id].initialized == false)) {
341  rxPktsStart[stream_id] += hStat.u.usageData_v0.data.hb[hbCount].stat.rx.frames;
342  rxByteStart[stream_id] += hStat.u.usageData_v0.data.hb[hbCount].stat.rx.bytes;
343  dropPktStart[stream_id] += hStat.u.usageData_v0.data.hb[hbCount].stat.drop.frames;
344  dropByteStart[stream_id] += hStat.u.usageData_v0.data.hb[hbCount].stat.drop.bytes;
345  stream_config[inst_id].initialized = true;
346  } else {
347  rx_pkts_total += hStat.u.usageData_v0.data.hb[hbCount].stat.rx.frames;
348  rx_byte_total += hStat.u.usageData_v0.data.hb[hbCount].stat.rx.bytes;
349  drop_pkts_total += hStat.u.usageData_v0.data.hb[hbCount].stat.drop.frames;
350  drop_byte_total += hStat.u.usageData_v0.data.hb[hbCount].stat.drop.bytes;
351  }
352  }
353 
354  current_stats[stream_id].current_packets = rx_pkts_total - rxPktsStart[stream_id];
355  current_stats[stream_id].current_bytes = rx_byte_total - rxByteStart[stream_id];
356  current_stats[stream_id].current_drop_packets = drop_pkts_total - dropPktStart[stream_id];
357  current_stats[stream_id].current_drop_bytes = drop_byte_total - dropByteStart[stream_id];
358  }
359 
360  if (enable_stream_stats) {
361  StatsSetUI64(tv, stream_counters[inst_id].pkts, current_stats[stream_id].current_packets);
362  StatsSetUI64(tv, stream_counters[inst_id].byte, current_stats[stream_id].current_bytes);
363  StatsSetUI64(tv, stream_counters[inst_id].drop_pkts, current_stats[stream_id].current_drop_packets);
364  StatsSetUI64(tv, stream_counters[inst_id].drop_byte, current_stats[stream_id].current_drop_bytes);
365  }
366 
367  ++inst_id;
368  }
369 
370  uint32_t stream_id;
371  for (stream_id = 0; stream_id < num_streams; ++stream_id) {
372 
373 #ifndef NAPATECH_ENABLE_BYPASS
376 #endif /* NAPATECH_ENABLE_BYPASS */
379  }
380 
381 
382 #ifndef NAPATECH_ENABLE_BYPASS
383  StatsSetUI64(tv, total_counters.pkts, total_stats.current_packets);
384  StatsSetUI64(tv, total_counters.byte, total_stats.current_bytes);
385 #endif /* NAPATECH_ENABLE_BYPASS */
386 
389 
394 
395  /* Read usage data for the chosen stream ID */
396  memset(&hStat, 0, sizeof (NtStatistics_t));
397 
398 #ifdef NAPATECH_ENABLE_BYPASS
399  hStat.cmd = NT_STATISTICS_READ_CMD_QUERY_V3;
400  hStat.u.query_v3.clear = 0;
401 #else /* NAPATECH_ENABLE_BYPASS */
402  /* Older versions of the API have a different structure. */
403  hStat.cmd = NT_STATISTICS_READ_CMD_QUERY_V2;
404  hStat.u.query_v2.clear = 0;
405 #endif /* !NAPATECH_ENABLE_BYPASS */
406 
407  if ((status = NT_StatRead(hstat_stream, &hStat)) != NT_SUCCESS) {
408  if (status == NT_STATUS_TIMEOUT) {
409  SCLogInfo("Statistics timed out - will retry next time.");
410  return 0;
411  } else {
412  NAPATECH_ERROR(status);
413  return 0;
414  }
415  }
416 
417 #ifdef NAPATECH_ENABLE_BYPASS
418 
419  int adapter = 0;
420  uint64_t total_dispatch_host_pkts = 0;
421  uint64_t total_dispatch_host_byte = 0;
422  uint64_t total_dispatch_drop_pkts = 0;
423  uint64_t total_dispatch_drop_byte = 0;
424  uint64_t total_dispatch_fwd_pkts = 0;
425  uint64_t total_dispatch_fwd_byte = 0;
426 
427  for (adapter = 0; adapter < NapatechGetNumAdapters(); ++adapter) {
428  total_dispatch_host_pkts += hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[0].pkts;
429  total_dispatch_host_byte += hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[0].octets;
430 
431  total_dispatch_drop_pkts += hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[1].pkts
432  + hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[3].pkts;
433  total_dispatch_drop_byte += hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[1].octets
434  + hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[3].octets;
435 
436  total_dispatch_fwd_pkts += hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[2].pkts
437  + hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[4].pkts;
438  total_dispatch_fwd_byte += hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[2].octets
439  + hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[4].octets;
440 
441  total_stats.current_packets += hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[0].pkts
442  + hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[1].pkts
443  + hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[2].pkts
444  + hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[3].pkts;
445 
446  total_stats.current_bytes = hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[0].octets
447  + hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[1].octets
448  + hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[2].octets;
449  }
450 
451  StatsSetUI64(tv, dispatch_host.pkts, total_dispatch_host_pkts);
452  StatsSetUI64(tv, dispatch_host.byte, total_dispatch_host_byte);
453 
454  StatsSetUI64(tv, dispatch_drop.pkts, total_dispatch_drop_pkts);
455  StatsSetUI64(tv, dispatch_drop.byte, total_dispatch_drop_byte);
456 
457  if (is_inline) {
458  StatsSetUI64(tv, dispatch_fwd.pkts, total_dispatch_fwd_pkts);
459  StatsSetUI64(tv, dispatch_fwd.byte, total_dispatch_fwd_byte);
460  }
461 
462  StatsSetUI64(tv, total_counters.pkts, total_stats.current_packets);
463  StatsSetUI64(tv, total_counters.byte, total_stats.current_bytes);
464 
465 #endif /* NAPATECH_ENABLE_BYPASS */
466 
467  return num_active;
468 }
469 
470 /**
471  * \brief Statistics processing loop
472  *
473  * Instantiated on the stats thread. Periodically retrieves
474  * statistics from the Napatech card and updates the packet counters
475  *
476  * \param arg Pointer that is cast into a ThreadVars structure
477  */
478 static void *NapatechStatsLoop(void *arg)
479 {
480  ThreadVars *tv = (ThreadVars *) arg;
481 
482  int status;
483  NtInfoStream_t hInfo;
484  NtStatStream_t hstat_stream;
485  int is_inline = 0;
486  int enable_stream_stats = 0;
487  PacketCounters stream_counters[MAX_STREAMS];
488 
489  if (ConfGetBool("napatech.inline", &is_inline) == 0) {
490  is_inline = 0;
491  }
492 
493  if (ConfGetBool("napatech.enable-stream-stats", &enable_stream_stats) == 0) {
494  /* default is "no" */
495  enable_stream_stats = 0;
496  }
497 
499  uint16_t stream_cnt = NapatechGetStreamConfig(stream_config);
500 
501  /* Open the info and Statistics */
502  if ((status = NT_InfoOpen(&hInfo, "StatsLoopInfoStream")) != NT_SUCCESS) {
503  NAPATECH_ERROR(status);
504  return NULL;
505  }
506 
507  if ((status = NT_StatOpen(&hstat_stream, "StatsLoopStatsStream")) != NT_SUCCESS) {
508  NAPATECH_ERROR(status);
509  return NULL;
510  }
511 
512  NtStatistics_t hStat;
513  memset(&hStat, 0, sizeof (NtStatistics_t));
514 
515 #ifdef NAPATECH_ENABLE_BYPASS
516  hStat.cmd = NT_STATISTICS_READ_CMD_QUERY_V3;
517  hStat.u.query_v3.clear = 1;
518 #else /* NAPATECH_ENABLE_BYPASS */
519  hStat.cmd = NT_STATISTICS_READ_CMD_QUERY_V2;
520  hStat.u.query_v2.clear = 1;
521 #endif /* !NAPATECH_ENABLE_BYPASS */
522 
523  if ((status = NT_StatRead(hstat_stream, &hStat)) != NT_SUCCESS) {
524  NAPATECH_ERROR(status);
525  return 0;
526  }
527 
528  PacketCounters total_counters;
529  memset(&total_counters, 0, sizeof(total_counters));
530 
531  PacketCounters dispatch_host;
532  memset(&dispatch_host, 0, sizeof(dispatch_host));
533 
534  PacketCounters dispatch_drop;
535  memset(&dispatch_drop, 0, sizeof(dispatch_drop));
536 
537  PacketCounters dispatch_fwd;
538  memset(&dispatch_fwd, 0, sizeof(dispatch_fwd));
539 
540  total_counters.pkts = StatsRegisterCounter("napa_total.pkts", tv);
541  dispatch_host.pkts = StatsRegisterCounter("napa_dispatch_host.pkts", tv);
542  dispatch_drop.pkts = StatsRegisterCounter("napa_dispatch_drop.pkts", tv);
543  if (is_inline) {
544  dispatch_fwd.pkts = StatsRegisterCounter("napa_dispatch_fwd.pkts", tv);
545  }
546 
547  total_counters.byte = StatsRegisterCounter("napa_total.byte", tv);
548  dispatch_host.byte = StatsRegisterCounter("napa_dispatch_host.byte", tv);
549  dispatch_drop.byte = StatsRegisterCounter("napa_dispatch_drop.byte", tv);
550  if (is_inline) {
551  dispatch_fwd.byte = StatsRegisterCounter("napa_dispatch_fwd.byte", tv);
552  }
553 
554  total_counters.drop_pkts = StatsRegisterCounter("napa_total.overflow_drop_pkts", tv);
555  total_counters.drop_byte = StatsRegisterCounter("napa_total.overflow_drop_byte", tv);
556 
557  if (enable_stream_stats) {
558  for (int i = 0; i < stream_cnt; ++i) {
559  char *pkts_buf = SCCalloc(1, 32);
560  if (unlikely(pkts_buf == NULL)) {
561  FatalError("Failed to allocate memory for NAPATECH stream counter.");
562  }
563 
564  snprintf(pkts_buf, 32, "napa%d.pkts", stream_config[i].stream_id);
565  stream_counters[i].pkts = StatsRegisterCounter(pkts_buf, tv);
566 
567  char *byte_buf = SCCalloc(1, 32);
568  if (unlikely(byte_buf == NULL)) {
569  FatalError("Failed to allocate memory for NAPATECH stream counter.");
570  }
571  snprintf(byte_buf, 32, "napa%d.bytes", stream_config[i].stream_id);
572  stream_counters[i].byte = StatsRegisterCounter(byte_buf, tv);
573 
574  char *drop_pkts_buf = SCCalloc(1, 32);
575  if (unlikely(drop_pkts_buf == NULL)) {
576  FatalError("Failed to allocate memory for NAPATECH stream counter.");
577  }
578  snprintf(drop_pkts_buf, 32, "napa%d.drop_pkts", stream_config[i].stream_id);
579  stream_counters[i].drop_pkts = StatsRegisterCounter(drop_pkts_buf, tv);
580 
581  char *drop_byte_buf = SCCalloc(1, 32);
582  if (unlikely(drop_byte_buf == NULL)) {
583  FatalError("Failed to allocate memory for NAPATECH stream counter.");
584  }
585  snprintf(drop_byte_buf, 32, "napa%d.drop_byte", stream_config[i].stream_id);
586  stream_counters[i].drop_byte = StatsRegisterCounter(drop_byte_buf, tv);
587  }
588  }
589 
590 #ifdef NAPATECH_ENABLE_BYPASS
591  FlowStatsCounters flow_counters;
592  if (bypass_supported) {
593  flow_counters.active_bypass_flows = StatsRegisterCounter("napa_bypass.active_flows", tv);
594  flow_counters.total_bypass_flows = StatsRegisterCounter("napa_bypass.total_flows", tv);
595  }
596 #endif /* NAPATECH_ENABLE_BYPASS */
597 
599 
600  StatsSetUI64(tv, total_counters.pkts, 0);
601  StatsSetUI64(tv, total_counters.byte, 0);
602  StatsSetUI64(tv, total_counters.drop_pkts, 0);
603  StatsSetUI64(tv, total_counters.drop_byte, 0);
604 
605 #ifdef NAPATECH_ENABLE_BYPASS
606  if (bypass_supported) {
607  StatsSetUI64(tv, dispatch_host.pkts, 0);
608  StatsSetUI64(tv, dispatch_drop.pkts, 0);
609 
610  if (is_inline) {
611  StatsSetUI64(tv, dispatch_fwd.pkts, 0);
612  }
613 
614  StatsSetUI64(tv, dispatch_host.byte, 0);
615  StatsSetUI64(tv, dispatch_drop.byte, 0);
616  if (is_inline) {
617  StatsSetUI64(tv, dispatch_fwd.byte, 0);
618  }
619 
620  if (enable_stream_stats) {
621  for (int i = 0; i < stream_cnt; ++i) {
622  StatsSetUI64(tv, stream_counters[i].pkts, 0);
623  StatsSetUI64(tv, stream_counters[i].byte, 0);
624  StatsSetUI64(tv, stream_counters[i].drop_pkts, 0);
625  StatsSetUI64(tv, stream_counters[i].drop_byte, 0);
626  }
627  }
628 
629  StatsSetUI64(tv, flow_counters.active_bypass_flows, 0);
630  StatsSetUI64(tv, flow_counters.total_bypass_flows, 0);
631  UpdateFlowStats(tv, hInfo, hstat_stream, flow_counters, 1);
632  }
633 #endif /* NAPATECH_ENABLE_BYPASS */
634 
635  uint32_t num_active = UpdateStreamStats(tv, hInfo, hstat_stream,
636  stream_cnt, stream_config, total_counters,
637  dispatch_host, dispatch_drop, dispatch_fwd,
638  is_inline, enable_stream_stats, stream_counters);
639 
640  if (!NapatechIsAutoConfigEnabled() && (num_active < stream_cnt)) {
641  SCLogInfo("num_active: %d, stream_cnt: %d", num_active, stream_cnt);
642  SCLogWarning("Some or all of the configured streams are not created. Proceeding with "
643  "active streams.");
644  }
645 
647  while (1) {
649  SCLogDebug("NapatechStatsLoop THV_KILL detected");
650  break;
651  }
652 
653  UpdateStreamStats(tv, hInfo, hstat_stream,
654  stream_cnt, stream_config, total_counters,
655  dispatch_host, dispatch_drop, dispatch_fwd,
656  is_inline, enable_stream_stats,
657  stream_counters);
658 
659 #ifdef NAPATECH_ENABLE_BYPASS
660  if (bypass_supported) {
661  UpdateFlowStats(tv, hInfo, hstat_stream, flow_counters, 0);
662  }
663 #endif /* NAPATECH_ENABLE_BYPASS */
664 
666  usleep(1000000);
667  }
668 
669  /* CLEAN UP NT Resources and Close the info stream */
670  if ((status = NT_InfoClose(hInfo)) != NT_SUCCESS) {
671  NAPATECH_ERROR(status);
672  return NULL;
673  }
674 
675  /* Close the statistics stream */
676  if ((status = NT_StatClose(hstat_stream)) != NT_SUCCESS) {
677  NAPATECH_ERROR(status);
678  return NULL;
679  }
680 
681  SCLogDebug("Exiting NapatechStatsLoop");
685 
686  return NULL;
687 }
688 
689 #define MAX_HOSTBUFFER 4
690 #define MAX_STREAMS 256
691 #define HB_HIGHWATER 2048 //1982
692 
693 /**
694  * \brief Tests whether a particular stream_id is actively registered
695  *
696  * \param stream_id - ID of the stream to look up
697  * \param num_registered - The total number of registered streams
698  * \param registered_streams - An array containing actively registered streams.
699  *
700  * \return Bool indicating is the specified stream is registered.
701  *
702  */
703 static bool RegisteredStream(uint16_t stream_id, uint16_t num_registered,
704  NapatechStreamConfig registered_streams[])
705 {
706  for (uint16_t reg_id = 0; reg_id < num_registered; ++reg_id) {
707  if (stream_id == registered_streams[reg_id].stream_id) {
708  return true;
709  }
710  }
711  return false;
712 }
713 
714 /**
715  * \brief Count the number of worker threads defined in the conf file.
716  *
717  * \return - The number of worker threads defined by the configuration
718  */
719 static uint32_t CountWorkerThreads(void)
720 {
721  int worker_count = 0;
722 
723  ConfNode *affinity;
724  ConfNode *root = ConfGetNode("threading.cpu-affinity");
725 
726  if (root != NULL) {
727 
728  TAILQ_FOREACH(affinity, &root->head, next)
729  {
730  if (strcmp(affinity->val, "decode-cpu-set") == 0 ||
731  strcmp(affinity->val, "stream-cpu-set") == 0 ||
732  strcmp(affinity->val, "reject-cpu-set") == 0 ||
733  strcmp(affinity->val, "output-cpu-set") == 0) {
734  continue;
735  }
736 
737  if (strcmp(affinity->val, "worker-cpu-set") == 0) {
738  ConfNode *node = ConfNodeLookupChild(affinity->head.tqh_first, "cpu");
739  ConfNode *lnode;
740 
742 
743  TAILQ_FOREACH(lnode, &node->head, next)
744  {
745  uint8_t start, end;
746  char *end_str;
747  if (strncmp(lnode->val, "all", 4) == 0) {
748  /* check that the sting in the config file is correctly specified */
749  if (cpu_spec != CONFIG_SPECIFIER_UNDEFINED) {
750  FatalError("Only one Napatech port specifier type allowed.");
751  }
752  cpu_spec = CONFIG_SPECIFIER_RANGE;
753  worker_count = UtilCpuGetNumProcessorsConfigured();
754  } else if ((end_str = strchr(lnode->val, '-'))) {
755  /* check that the sting in the config file is correctly specified */
756  if (cpu_spec != CONFIG_SPECIFIER_UNDEFINED) {
757  FatalError("Only one Napatech port specifier type allowed.");
758  }
759  cpu_spec = CONFIG_SPECIFIER_RANGE;
760 
761 
762  if (StringParseUint8(&start, 10, end_str - lnode->val, (const char *)lnode->val) < 0) {
763  FatalError("Napatech invalid"
764  " worker range start: '%s'",
765  lnode->val);
766  }
767  if (StringParseUint8(&end, 10, 0, (const char *) (end_str + 1)) < 0) {
768  FatalError("Napatech invalid"
769  " worker range end: '%s'",
770  (end_str != NULL) ? (const char *)(end_str + 1) : "Null");
771  }
772  if (end < start) {
773  FatalError("Napatech invalid"
774  " worker range start: '%d' is greater than end: '%d'",
775  start, end);
776  }
777  worker_count = end - start + 1;
778 
779  } else {
780  /* check that the sting in the config file is correctly specified */
781  if (cpu_spec == CONFIG_SPECIFIER_RANGE) {
782  FatalError("Napatech port range specifiers cannot be combined with "
783  "individual stream specifiers.");
784  }
785  cpu_spec = CONFIG_SPECIFIER_INDIVIDUAL;
786  ++worker_count;
787  }
788  }
789  break;
790  }
791  }
792  }
793  return worker_count;
794 }
795 
796 /**
797  * \brief Reads and parses the stream configuration defined in the config file.
798  *
799  * \param stream_config - array to be filled in with active stream info.
800  *
801  * \return the number of streams configured or -1 if an error occurred
802  *
803  */
805 {
806  int status;
807  char error_buffer[80]; // Error buffer
808  NtStatStream_t hstat_stream;
809  NtStatistics_t hStat; // Stat handle.
810  NtInfoStream_t info_stream;
811  NtInfo_t info;
812  uint16_t instance_cnt = 0;
813  int use_all_streams = 0;
814  int set_cpu_affinity = 0;
815  ConfNode *ntstreams;
816  uint16_t stream_id = 0;
817  uint8_t start = 0;
818  uint8_t end = 0;
819 
820  for (uint16_t i = 0; i < MAX_STREAMS; ++i) {
821  stream_config[i].stream_id = 0;
822  stream_config[i].is_active = false;
823  stream_config[i].initialized = false;
824  }
825 
826  if (ConfGetBool("napatech.use-all-streams", &use_all_streams) == 0) {
827  /* default is "no" */
828  use_all_streams = 0;
829  }
830 
831  if ((status = NT_InfoOpen(&info_stream, "SuricataStreamInfo")) != NT_SUCCESS) {
832  NAPATECH_ERROR(status);
833  return -1;
834  }
835 
836  if ((status = NT_StatOpen(&hstat_stream, "StatsStream")) != NT_SUCCESS) {
837  NAPATECH_ERROR(status);
838  return -1;
839  }
840 
841  if (use_all_streams) {
842  info.cmd = NT_INFO_CMD_READ_STREAM;
843  if ((status = NT_InfoRead(info_stream, &info)) != NT_SUCCESS) {
844  NAPATECH_ERROR(status);
845  return -1;
846  }
847 
848  while (instance_cnt < info.u.stream.data.count) {
849 
850  /*
851  * For each stream ID query the number of host-buffers used by
852  * the stream. If zero, then that streamID is not used; skip
853  * over it and continue until we get a streamID with a non-zero
854  * count of the host-buffers.
855  */
856  memset(&hStat, 0, sizeof (NtStatistics_t));
857 
858  /* Read usage data for the chosen stream ID */
859  hStat.cmd = NT_STATISTICS_READ_CMD_USAGE_DATA_V0;
860  hStat.u.usageData_v0.streamid = (uint8_t) stream_id;
861 
862  if ((status = NT_StatRead(hstat_stream, &hStat)) != NT_SUCCESS) {
863  /* Get the status code as text */
864  NT_ExplainError(status, error_buffer, sizeof (error_buffer));
865  SCLogError("NT_StatRead() failed: %s\n", error_buffer);
866  return -1;
867  }
868 
869  if (hStat.u.usageData_v0.data.numHostBufferUsed == 0) {
870  ++stream_id;
871  continue;
872  }
873 
874  /* if we get here it is an active stream */
875  stream_config[instance_cnt].stream_id = stream_id++;
876  stream_config[instance_cnt].is_active = true;
877  instance_cnt++;
878  }
879 
880  } else {
881  (void)ConfGetBool("threading.set-cpu-affinity", &set_cpu_affinity);
882  if (NapatechIsAutoConfigEnabled() && (set_cpu_affinity == 1)) {
883  start = 0;
884  end = CountWorkerThreads() - 1;
885  } else {
886  /* When not using the default streams we need to
887  * parse the array of streams from the conf */
888  if ((ntstreams = ConfGetNode("napatech.streams")) == NULL) {
889  SCLogError("Failed retrieving napatech.streams from Config");
890  if (NapatechIsAutoConfigEnabled() && (set_cpu_affinity == 0)) {
891  SCLogError("if set-cpu-affinity: no in conf then napatech.streams must be "
892  "defined");
893  }
894  exit(EXIT_FAILURE);
895  }
896 
897  /* Loop through all stream numbers in the array and register the devices */
898  ConfNode *stream;
899  enum CONFIG_SPECIFIER stream_spec = CONFIG_SPECIFIER_UNDEFINED;
900  instance_cnt = 0;
901 
902  TAILQ_FOREACH(stream, &ntstreams->head, next)
903  {
904 
905  if (stream == NULL) {
906  SCLogError("Couldn't Parse Stream Configuration");
907  return -1;
908  }
909 
910  char *end_str = strchr(stream->val, '-');
911  if (end_str) {
912  if (stream_spec != CONFIG_SPECIFIER_UNDEFINED) {
913  SCLogError("Only one Napatech stream range specifier allowed.");
914  return -1;
915  }
916  stream_spec = CONFIG_SPECIFIER_RANGE;
917 
918  if (StringParseUint8(&start, 10, end_str - stream->val,
919  (const char *)stream->val) < 0) {
920  FatalError("Napatech invalid "
921  "stream id start: '%s'",
922  stream->val);
923  }
924  if (StringParseUint8(&end, 10, 0, (const char *) (end_str + 1)) < 0) {
925  FatalError("Napatech invalid "
926  "stream id end: '%s'",
927  (end_str != NULL) ? (const char *)(end_str + 1) : "Null");
928  }
929  } else {
930  if (stream_spec == CONFIG_SPECIFIER_RANGE) {
931  FatalError("Napatech range and individual specifiers cannot be combined.");
932  }
933  stream_spec = CONFIG_SPECIFIER_INDIVIDUAL;
934  if (StringParseUint8(&stream_config[instance_cnt].stream_id,
935  10, 0, (const char *)stream->val) < 0) {
936  FatalError("Napatech invalid "
937  "stream id: '%s'",
938  stream->val);
939  }
940  start = stream_config[instance_cnt].stream_id;
941  end = stream_config[instance_cnt].stream_id;
942  }
943  }
944  }
945 
946  for (stream_id = start; stream_id <= end; ++stream_id) {
947  /* if we get here it is configured in the .yaml file */
948  stream_config[instance_cnt].stream_id = stream_id;
949 
950  /* Check to see if it is an active stream */
951  memset(&hStat, 0, sizeof (NtStatistics_t));
952 
953  /* Read usage data for the chosen stream ID */
954  hStat.cmd = NT_STATISTICS_READ_CMD_USAGE_DATA_V0;
955  hStat.u.usageData_v0.streamid =
956  (uint8_t) stream_config[instance_cnt].stream_id;
957 
958  if ((status = NT_StatRead(hstat_stream, &hStat)) != NT_SUCCESS) {
959  NAPATECH_ERROR(status);
960  return -1;
961  }
962 
963  if (hStat.u.usageData_v0.data.numHostBufferUsed > 0) {
964  stream_config[instance_cnt].is_active = true;
965  }
966  instance_cnt++;
967  }
968  }
969 
970  /* Close the statistics stream */
971  if ((status = NT_StatClose(hstat_stream)) != NT_SUCCESS) {
972  NAPATECH_ERROR(status);
973  return -1;
974  }
975 
976  if ((status = NT_InfoClose(info_stream)) != NT_SUCCESS) {
977  NAPATECH_ERROR(status);
978  return -1;
979  }
980 
981  return instance_cnt;
982 }
983 
984 static void *NapatechBufMonitorLoop(void *arg)
985 {
986  ThreadVars *tv = (ThreadVars *) arg;
987 
988  NtInfo_t hStreamInfo;
989  NtStatistics_t hStat; // Stat handle.
990  NtInfoStream_t hInfo;
991  NtStatStream_t hstat_stream;
992  int status; // Status variable
993 
994  const uint32_t alertInterval = 25;
995 
996 #ifndef NAPATECH_ENABLE_BYPASS
997  uint32_t OB_fill_level[MAX_STREAMS] = {0};
998  uint32_t OB_alert_level[MAX_STREAMS] = {0};
999  uint32_t ave_OB_fill_level[MAX_STREAMS] = {0};
1000 #endif /* NAPATECH_ENABLE_BYPASS */
1001 
1002  uint32_t HB_fill_level[MAX_STREAMS] = {0};
1003  uint32_t HB_alert_level[MAX_STREAMS] = {0};
1004  uint32_t ave_HB_fill_level[MAX_STREAMS] = {0};
1005 
1006  /* Open the info and Statistics */
1007  if ((status = NT_InfoOpen(&hInfo, "InfoStream")) != NT_SUCCESS) {
1008  NAPATECH_ERROR(status);
1009  exit(EXIT_FAILURE);
1010  }
1011 
1012  if ((status = NT_StatOpen(&hstat_stream, "StatsStream")) != NT_SUCCESS) {
1013  NAPATECH_ERROR(status);
1014  exit(EXIT_FAILURE);
1015  }
1016 
1017  /* Read the info on all streams instantiated in the system */
1018  hStreamInfo.cmd = NT_INFO_CMD_READ_STREAM;
1019  if ((status = NT_InfoRead(hInfo, &hStreamInfo)) != NT_SUCCESS) {
1020  NAPATECH_ERROR(status);
1021  exit(EXIT_FAILURE);
1022  }
1023 
1024  NapatechStreamConfig registered_streams[MAX_STREAMS];
1025  int num_registered = NapatechGetStreamConfig(registered_streams);
1026  if (num_registered == -1) {
1027  exit(EXIT_FAILURE);
1028  }
1029 
1031  while (1) {
1032  if (TmThreadsCheckFlag(tv, THV_KILL)) {
1033  SCLogDebug("NapatechBufMonitorLoop THV_KILL detected");
1034  break;
1035  }
1036 
1037  usleep(200000);
1038 
1039  /* Read the info on all streams instantiated in the system */
1040  hStreamInfo.cmd = NT_INFO_CMD_READ_STREAM;
1041  if ((status = NT_InfoRead(hInfo, &hStreamInfo)) != NT_SUCCESS) {
1042  NAPATECH_ERROR(status);
1043  exit(EXIT_FAILURE);
1044  }
1045 
1046  char pktCntStr[4096];
1047  memset(pktCntStr, 0, sizeof (pktCntStr));
1048 
1049  uint32_t stream_id = 0;
1050  uint32_t stream_cnt = 0;
1051  uint32_t num_streams = hStreamInfo.u.stream.data.count;
1052 
1053  for (stream_cnt = 0; stream_cnt < num_streams; ++stream_cnt) {
1054 
1055  do {
1056 
1057  /* Read usage data for the chosen stream ID */
1058  hStat.cmd = NT_STATISTICS_READ_CMD_USAGE_DATA_V0;
1059  hStat.u.usageData_v0.streamid = (uint8_t) stream_id;
1060 
1061  if ((status = NT_StatRead(hstat_stream, &hStat)) != NT_SUCCESS) {
1062  NAPATECH_ERROR(status);
1063  exit(EXIT_FAILURE);
1064  }
1065 
1066  if (hStat.u.usageData_v0.data.numHostBufferUsed == 0) {
1067  ++stream_id;
1068  continue;
1069  }
1070  } while (hStat.u.usageData_v0.data.numHostBufferUsed == 0);
1071 
1072  if (RegisteredStream(stream_id, num_registered, registered_streams)) {
1073 
1074 #ifndef NAPATECH_ENABLE_BYPASS
1075  ave_OB_fill_level[stream_id] = 0;
1076 #endif /* NAPATECH_ENABLE_BYPASS */
1077 
1078  ave_HB_fill_level[stream_id] = 0;
1079 
1080  for (uint32_t hb_count = 0; hb_count < hStat.u.usageData_v0.data.numHostBufferUsed; hb_count++) {
1081 
1082 #ifndef NAPATECH_ENABLE_BYPASS
1083  OB_fill_level[hb_count] =
1084  ((100 * hStat.u.usageData_v0.data.hb[hb_count].onboardBuffering.used) /
1085  hStat.u.usageData_v0.data.hb[hb_count].onboardBuffering.size);
1086 
1087  if (OB_fill_level[hb_count] > 100) {
1088  OB_fill_level[hb_count] = 100;
1089  }
1090 #endif /* NAPATECH_ENABLE_BYPASS */
1091  uint32_t bufSize = hStat.u.usageData_v0.data.hb[hb_count].enQueuedAdapter / 1024
1092  + hStat.u.usageData_v0.data.hb[hb_count].deQueued / 1024
1093  + hStat.u.usageData_v0.data.hb[hb_count].enQueued / 1024
1094  - HB_HIGHWATER;
1095 
1096  HB_fill_level[hb_count] = (uint32_t)
1097  ((100 * hStat.u.usageData_v0.data.hb[hb_count].deQueued / 1024) /
1098  bufSize);
1099 
1100 #ifndef NAPATECH_ENABLE_BYPASS
1101  ave_OB_fill_level[stream_id] += OB_fill_level[hb_count];
1102 #endif /* NAPATECH_ENABLE_BYPASS */
1103 
1104  ave_HB_fill_level[stream_id] += HB_fill_level[hb_count];
1105  }
1106 
1107 #ifndef NAPATECH_ENABLE_BYPASS
1108  ave_OB_fill_level[stream_id] /= hStat.u.usageData_v0.data.numHostBufferUsed;
1109 #endif /* NAPATECH_ENABLE_BYPASS */
1110 
1111  ave_HB_fill_level[stream_id] /= hStat.u.usageData_v0.data.numHostBufferUsed;
1112 
1113  /* Host Buffer Fill Level warnings... */
1114  if (ave_HB_fill_level[stream_id] >= (HB_alert_level[stream_id] + alertInterval)) {
1115 
1116  while (ave_HB_fill_level[stream_id] >= HB_alert_level[stream_id] + alertInterval) {
1117  HB_alert_level[stream_id] += alertInterval;
1118  }
1119  SCLogPerf("nt%d - Increasing Host Buffer Fill Level : %4d%%",
1120  stream_id, ave_HB_fill_level[stream_id] - 1);
1121  }
1122 
1123  if (HB_alert_level[stream_id] > 0) {
1124  if ((ave_HB_fill_level[stream_id] <= (HB_alert_level[stream_id] - alertInterval))) {
1125  SCLogPerf("nt%d - Decreasing Host Buffer Fill Level: %4d%%",
1126  stream_id, ave_HB_fill_level[stream_id]);
1127 
1128  while (ave_HB_fill_level[stream_id] <= (HB_alert_level[stream_id] - alertInterval)) {
1129  if ((HB_alert_level[stream_id]) > 0) {
1130  HB_alert_level[stream_id] -= alertInterval;
1131  } else break;
1132  }
1133  }
1134  }
1135 
1136 #ifndef NAPATECH_ENABLE_BYPASS
1137  /* On Board SDRAM Fill Level warnings... */
1138  if (ave_OB_fill_level[stream_id] >= (OB_alert_level[stream_id] + alertInterval)) {
1139  while (ave_OB_fill_level[stream_id] >= OB_alert_level[stream_id] + alertInterval) {
1140  OB_alert_level[stream_id] += alertInterval;
1141 
1142  }
1143  SCLogPerf("nt%d - Increasing Adapter SDRAM Fill Level: %4d%%",
1144  stream_id, ave_OB_fill_level[stream_id]);
1145  }
1146 
1147  if (OB_alert_level[stream_id] > 0) {
1148  if ((ave_OB_fill_level[stream_id] <= (OB_alert_level[stream_id] - alertInterval))) {
1149  SCLogPerf("nt%d - Decreasing Adapter SDRAM Fill Level : %4d%%",
1150  stream_id, ave_OB_fill_level[stream_id]);
1151 
1152  while (ave_OB_fill_level[stream_id] <= (OB_alert_level[stream_id] - alertInterval)) {
1153  if ((OB_alert_level[stream_id]) > 0) {
1154  OB_alert_level[stream_id] -= alertInterval;
1155  } else break;
1156  }
1157  }
1158  }
1159 #endif /* NAPATECH_ENABLE_BYPASS */
1160  }
1161  ++stream_id;
1162  }
1163  }
1164 
1165  if ((status = NT_InfoClose(hInfo)) != NT_SUCCESS) {
1166  NAPATECH_ERROR(status);
1167  exit(EXIT_FAILURE);
1168  }
1169 
1170  /* Close the statistics stream */
1171  if ((status = NT_StatClose(hstat_stream)) != NT_SUCCESS) {
1172  NAPATECH_ERROR(status);
1173  exit(EXIT_FAILURE);
1174  }
1175 
1176  SCLogDebug("Exiting NapatechStatsLoop");
1180 
1181  return NULL;
1182 }
1183 
1184 
1186 {
1187  /* Creates the Statistic threads */
1188  ThreadVars *stats_tv = TmThreadCreate("NapatechStats",
1189  NULL, NULL,
1190  NULL, NULL,
1191  "custom", NapatechStatsLoop, 0);
1192 
1193  if (stats_tv == NULL) {
1194  FatalError("Error creating a thread for NapatechStats - Killing engine.");
1195  }
1196 
1197  if (TmThreadSpawn(stats_tv) != 0) {
1198  FatalError("Failed to spawn thread for NapatechStats - Killing engine.");
1199  }
1200 
1201 #ifdef NAPATECH_ENABLE_BYPASS
1202  if (bypass_supported) {
1203  SCLogInfo("Napatech bypass functionality enabled.");
1204  }
1205 #endif /* NAPATECH_ENABLE_BYPASS */
1206 
1207  ThreadVars *buf_monitor_tv = TmThreadCreate("NapatechBufMonitor",
1208  NULL, NULL,
1209  NULL, NULL,
1210  "custom", NapatechBufMonitorLoop, 0);
1211 
1212  if (buf_monitor_tv == NULL) {
1213  FatalError("Error creating a thread for NapatechBufMonitor - Killing engine.");
1214  }
1215 
1216  if (TmThreadSpawn(buf_monitor_tv) != 0) {
1217  FatalError("Failed to spawn thread for NapatechBufMonitor - Killing engine.");
1218  }
1219 }
1220 
1221 bool NapatechSetupNuma(uint32_t stream, uint32_t numa)
1222 {
1223  uint32_t status = 0;
1224  static NtConfigStream_t hconfig;
1225 
1226  char ntpl_cmd[64];
1227  snprintf(ntpl_cmd, 64, "setup[numanode=%d] = streamid == %d", numa, stream);
1228 
1229  NtNtplInfo_t ntpl_info;
1230 
1231  if ((status = NT_ConfigOpen(&hconfig, "ConfigStream")) != NT_SUCCESS) {
1232  NAPATECH_ERROR(status);
1233  return false;
1234  }
1235 
1236  if ((status = NT_NTPL(hconfig, ntpl_cmd, &ntpl_info, NT_NTPL_PARSER_VALIDATE_NORMAL)) == NT_SUCCESS) {
1237  status = ntpl_info.ntplId;
1238 
1239  } else {
1240  NAPATECH_NTPL_ERROR(ntpl_cmd, ntpl_info, status);
1241  return false;
1242  }
1243 
1244  return status;
1245 }
1246 
1247 static uint32_t NapatechSetHashmode(void)
1248 {
1249  uint32_t status = 0;
1250  const char *hash_mode;
1251  static NtConfigStream_t hconfig;
1252  char ntpl_cmd[64];
1253  NtNtplInfo_t ntpl_info;
1254 
1255  uint32_t filter_id = 0;
1256 
1257  /* Get the hashmode from the conf file. */
1258  ConfGet("napatech.hashmode", &hash_mode);
1259 
1260  snprintf(ntpl_cmd, 64, "hashmode = %s", hash_mode);
1261 
1262  /* Issue the NTPL command */
1263  if ((status = NT_ConfigOpen(&hconfig, "ConfigStream")) != NT_SUCCESS) {
1264  NAPATECH_ERROR(status);
1265  return false;
1266  }
1267 
1268  if ((status = NT_NTPL(hconfig, ntpl_cmd, &ntpl_info,
1269  NT_NTPL_PARSER_VALIDATE_NORMAL)) == NT_SUCCESS) {
1270  filter_id = ntpl_info.ntplId;
1271  SCLogConfig("Napatech hashmode: %s ID: %d", hash_mode, status);
1272  } else {
1273  NAPATECH_NTPL_ERROR(ntpl_cmd, ntpl_info, status);
1274  status = 0;
1275  }
1276 
1277  return filter_id;
1278 }
1279 
1280 static uint32_t GetStreamNUMAs(uint32_t stream_id, int stream_numas[])
1281 {
1282  NtStatistics_t hStat; // Stat handle.
1283  NtStatStream_t hstat_stream;
1284  int status; // Status variable
1285 
1286  for (int i = 0; i < MAX_HOSTBUFFERS; ++i)
1287  stream_numas[i] = -1;
1288 
1289  if ((status = NT_StatOpen(&hstat_stream, "StatsStream")) != NT_SUCCESS) {
1290  NAPATECH_ERROR(status);
1291  exit(EXIT_FAILURE);
1292  }
1293 
1294  char pktCntStr[4096];
1295  memset(pktCntStr, 0, sizeof (pktCntStr));
1296 
1297 
1298  /* Read usage data for the chosen stream ID */
1299  hStat.cmd = NT_STATISTICS_READ_CMD_USAGE_DATA_V0;
1300  hStat.u.usageData_v0.streamid = (uint8_t) stream_id;
1301 
1302  if ((status = NT_StatRead(hstat_stream, &hStat)) != NT_SUCCESS) {
1303  NAPATECH_ERROR(status);
1304  exit(EXIT_FAILURE);
1305  }
1306 
1307  for (uint32_t hb_id = 0; hb_id < hStat.u.usageData_v0.data.numHostBufferUsed; ++hb_id) {
1308  stream_numas[hb_id] = hStat.u.usageData_v0.data.hb[hb_id].numaNode;
1309  }
1310 
1311  return hStat.u.usageData_v0.data.numHostBufferUsed;
1312 }
1313 
1314 static int NapatechSetFilter(NtConfigStream_t hconfig, char *ntpl_cmd)
1315 {
1316  int status = 0;
1317  int local_filter_id = 0;
1318 
1319  NtNtplInfo_t ntpl_info;
1320  if ((status = NT_NTPL(hconfig, ntpl_cmd, &ntpl_info,
1321  NT_NTPL_PARSER_VALIDATE_NORMAL)) == NT_SUCCESS) {
1322  SCLogConfig("NTPL filter assignment \"%s\" returned filter id %4d",
1323  ntpl_cmd, local_filter_id);
1324  } else {
1325  NAPATECH_NTPL_ERROR(ntpl_cmd, ntpl_info, status);
1326  exit(EXIT_FAILURE);
1327  }
1328 
1329  return local_filter_id;
1330 }
1331 
1333 {
1334  uint32_t status = 0;
1335  static NtConfigStream_t hconfig;
1336  char ntpl_cmd[64];
1337  NtNtplInfo_t ntpl_info;
1338 
1339  if ((status = NT_ConfigOpen(&hconfig, "ConfigStream")) != NT_SUCCESS) {
1340  NAPATECH_ERROR(status);
1341  exit(EXIT_FAILURE);
1342  }
1343 
1344  snprintf(ntpl_cmd, 64, "delete = all");
1345  if ((status = NT_NTPL(hconfig, ntpl_cmd, &ntpl_info,
1346  NT_NTPL_PARSER_VALIDATE_NORMAL)) == NT_SUCCESS) {
1347  status = ntpl_info.ntplId;
1348  } else {
1349  NAPATECH_NTPL_ERROR(ntpl_cmd, ntpl_info, status);
1350  status = 0;
1351  }
1352 
1353  NT_ConfigClose(hconfig);
1354 
1355  return status;
1356 }
1357 
1358 
1359 uint32_t NapatechSetupTraffic(uint32_t first_stream, uint32_t last_stream)
1360 {
1361 #define PORTS_SPEC_SIZE 64
1362 
1363  struct ports_spec_s {
1364  uint8_t first[MAX_PORTS];
1365  uint8_t second[MAX_PORTS];
1366  bool all;
1367  char str[PORTS_SPEC_SIZE];
1368  } ports_spec;
1369 
1370  ports_spec.all = false;
1371 
1372  ConfNode *ntports;
1373  int iteration = 0;
1374  int status = 0;
1375  NtConfigStream_t hconfig;
1376  char ntpl_cmd[512];
1377  int is_inline = 0;
1378 #ifdef NAPATECH_ENABLE_BYPASS
1379  int is_span_port[MAX_PORTS] = { 0 };
1380 #endif
1381 
1382  char span_ports[128];
1383  memset(span_ports, 0, sizeof(span_ports));
1384 
1385  if (ConfGetBool("napatech.inline", &is_inline) == 0) {
1386  is_inline = 0;
1387  }
1388 
1389  NapatechSetHashmode();
1390 
1391  if ((status = NT_ConfigOpen(&hconfig, "ConfigStream")) != NT_SUCCESS) {
1392  NAPATECH_ERROR(status);
1393  exit(EXIT_FAILURE);
1394  }
1395 
1396  if (first_stream == last_stream) {
1397  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1398  "Setup[state=inactive] = StreamId == %d",
1399  first_stream);
1400  } else {
1401  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1402  "Setup[state=inactive] = StreamId == (%d..%d)",
1403  first_stream, last_stream);
1404  }
1405  NapatechSetFilter(hconfig, ntpl_cmd);
1406 
1407 #ifdef NAPATECH_ENABLE_BYPASS
1408  if (NapatechUseHWBypass()) {
1409  SCLogInfo("Napatech Hardware Bypass enabled.");
1410  }
1411 #else
1412  if (NapatechUseHWBypass()) {
1413  SCLogInfo("Napatech Hardware Bypass requested in conf but is not available.");
1414  exit(EXIT_FAILURE);
1415  } else {
1416  SCLogInfo("Napatech Hardware Bypass disabled.");
1417  }
1418 #endif
1419 
1420  if (is_inline) {
1421  SCLogInfo("Napatech configured for inline mode.");
1422  } else {
1423 
1424  SCLogInfo("Napatech configured for passive (non-inline) mode.");
1425  }
1426 
1427  /* When not using the default streams we need to parse
1428  * the array of streams from the conf
1429  */
1430  if ((ntports = ConfGetNode("napatech.ports")) == NULL) {
1431  FatalError("Failed retrieving napatech.ports from Conf");
1432  }
1433 
1434  /* Loop through all ports in the array */
1435  ConfNode *port;
1436  enum CONFIG_SPECIFIER stream_spec = CONFIG_SPECIFIER_UNDEFINED;
1437 
1438  if (NapatechUseHWBypass()) {
1439  SCLogInfo("Listening on the following Napatech ports:");
1440  }
1441  /* Build the NTPL command using values in the config file. */
1442  TAILQ_FOREACH(port, &ntports->head, next)
1443  {
1444  if (port == NULL) {
1445  FatalError("Couldn't Parse Port Configuration");
1446  }
1447 
1448  if (NapatechUseHWBypass()) {
1449 #ifdef NAPATECH_ENABLE_BYPASS
1450  if (strchr(port->val, '-')) {
1451  stream_spec = CONFIG_SPECIFIER_RANGE;
1452 
1453  if (ByteExtractStringUint8(&ports_spec.first[iteration], 10, 0, port->val) == -1) {
1454  FatalError("Invalid value '%s' in napatech.ports specification in conf file.",
1455  port->val);
1456  }
1457 
1458  if (ByteExtractStringUint8(&ports_spec.second[iteration], 10, 0,
1459  strchr(port->val, '-') + 1) == -1) {
1460  FatalError("Invalid value '%s' in napatech.ports specification in conf file.",
1461  port->val);
1462  }
1463 
1464  if (ports_spec.first[iteration] == ports_spec.second[iteration]) {
1465  if (is_inline) {
1466  FatalError(
1467  "Error with napatech.ports in conf file. When running in inline "
1468  "mode the two ports specifying a segment must be different.");
1469  } else {
1470  /* SPAN port configuration */
1471  is_span_port[ports_spec.first[iteration]] = 1;
1472 
1473  if (strlen(span_ports) == 0) {
1474  snprintf(span_ports, sizeof (span_ports), "%d", ports_spec.first[iteration]);
1475  } else {
1476  char temp[16];
1477  snprintf(temp, sizeof(temp), ",%d", ports_spec.first[iteration]);
1478  strlcat(span_ports, temp, sizeof(span_ports));
1479  }
1480 
1481  }
1482  }
1483 
1484  if (NapatechGetAdapter(ports_spec.first[iteration]) != NapatechGetAdapter(ports_spec.first[iteration])) {
1485  SCLogError("Invalid napatech.ports specification in conf file.");
1486  SCLogError("Two ports on a segment must reside on the same adapter. port %d "
1487  "is on adapter %d, port %d is on adapter %d.",
1488  ports_spec.first[iteration],
1489  NapatechGetAdapter(ports_spec.first[iteration]),
1490  ports_spec.second[iteration],
1491  NapatechGetAdapter(ports_spec.second[iteration]));
1492  exit(EXIT_FAILURE);
1493  }
1494 
1495  NapatechSetPortmap(ports_spec.first[iteration], ports_spec.second[iteration]);
1496  if (ports_spec.first[iteration] == ports_spec.second[iteration]) {
1497  SCLogInfo(" span_port: %d", ports_spec.first[iteration]);
1498  } else {
1499  SCLogInfo(" %s: %d - %d", is_inline ? "inline_ports" : "tap_ports", ports_spec.first[iteration], ports_spec.second[iteration]);
1500  }
1501 
1502  if (iteration == 0) {
1503  if (ports_spec.first[iteration] == ports_spec.second[iteration]) {
1504  snprintf(ports_spec.str, sizeof (ports_spec.str), "%d", ports_spec.first[iteration]);
1505  } else {
1506  snprintf(ports_spec.str, sizeof (ports_spec.str), "%d,%d", ports_spec.first[iteration], ports_spec.second[iteration]);
1507  }
1508  } else {
1509  char temp[16];
1510  if (ports_spec.first[iteration] == ports_spec.second[iteration]) {
1511  snprintf(temp, sizeof(temp), ",%d", ports_spec.first[iteration]);
1512  } else {
1513  snprintf(temp, sizeof(temp), ",%d,%d", ports_spec.first[iteration], ports_spec.second[iteration]);
1514  }
1515  strlcat(ports_spec.str, temp, sizeof(ports_spec.str));
1516  }
1517  } else {
1518  FatalError("When using hardware flow bypass ports must be specified as segments. "
1519  "E.g. ports: [0-1, 0-2]");
1520  }
1521 #endif
1522  } else { // !NapatechUseHWBypass()
1523  if (strncmp(port->val, "all", 3) == 0) {
1524  /* check that the sting in the config file is correctly specified */
1525  if (stream_spec != CONFIG_SPECIFIER_UNDEFINED) {
1526  FatalError("Only one Napatech port specifier type is allowed.");
1527  }
1528  stream_spec = CONFIG_SPECIFIER_RANGE;
1529 
1530  ports_spec.all = true;
1531  snprintf(ports_spec.str, sizeof (ports_spec.str), "all");
1532  } else if (strchr(port->val, '-')) {
1533  /* check that the sting in the config file is correctly specified */
1534  if (stream_spec != CONFIG_SPECIFIER_UNDEFINED) {
1535  FatalError("Only one Napatech port specifier is allowed when hardware bypass "
1536  "is disabled. (E.g. ports: [0-4], NOT ports: [0-1,2-3])");
1537  }
1538  stream_spec = CONFIG_SPECIFIER_RANGE;
1539 
1540  if (ByteExtractStringUint8(&ports_spec.first[iteration], 10, 0, port->val) == -1) {
1541  FatalError("Invalid value '%s' in napatech.ports specification in conf file.",
1542  port->val);
1543  }
1544 
1545  if (ByteExtractStringUint8(&ports_spec.second[iteration], 10, 0,
1546  strchr(port->val, '-') + 1) == -1) {
1547  FatalError("Invalid value '%s' in napatech.ports specification in conf file.",
1548  port->val);
1549  }
1550 
1551  snprintf(ports_spec.str, sizeof (ports_spec.str), "(%d..%d)", ports_spec.first[iteration], ports_spec.second[iteration]);
1552  } else {
1553  /* check that the sting in the config file is correctly specified */
1554  if (stream_spec == CONFIG_SPECIFIER_RANGE) {
1555  FatalError("Napatech port range specifiers cannot be combined with individual "
1556  "stream specifiers.");
1557  }
1558  stream_spec = CONFIG_SPECIFIER_INDIVIDUAL;
1559 
1560  if (ByteExtractStringUint8(&ports_spec.first[iteration], 10, 0, port->val) == -1) {
1561  FatalError("Invalid value '%s' in napatech.ports specification in conf file.",
1562  port->val);
1563  }
1564 
1565  /* Determine the ports to use on the NTPL assign statement*/
1566  if (iteration == 0) {
1567  snprintf(ports_spec.str, sizeof (ports_spec.str), "%s", port->val);
1568  } else {
1569  strlcat(ports_spec.str, ",", sizeof(ports_spec.str));
1570  strlcat(ports_spec.str, port->val, sizeof(ports_spec.str));
1571  }
1572  }
1573  } // if !NapatechUseHWBypass()
1574  ++iteration;
1575  } /* TAILQ_FOREACH */
1576 
1577 #ifdef NAPATECH_ENABLE_BYPASS
1578  if (bypass_supported) {
1579  if (is_inline) {
1580  char inline_setup_cmd[512];
1581  if (first_stream == last_stream) {
1582  snprintf(inline_setup_cmd, sizeof (ntpl_cmd),
1583  "Setup[TxDescriptor=Dyn;TxPorts=%s;RxCRC=False;TxPortPos=112;UseWL=True] = StreamId == %d",
1584  ports_spec.str, first_stream);
1585  } else {
1586  snprintf(inline_setup_cmd, sizeof (ntpl_cmd),
1587  "Setup[TxDescriptor=Dyn;TxPorts=%s;RxCRC=False;TxPortPos=112;UseWL=True] = StreamId == (%d..%d)",
1588  ports_spec.str, first_stream, last_stream);
1589  }
1590  NapatechSetFilter(hconfig, inline_setup_cmd);
1591  }
1592  /* Build the NTPL command */
1593  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1594  "assign[priority=3;streamid=(%d..%d);colormask=0x10000000;"
1595  "Descriptor=DYN3,length=24,colorbits=32,Offset0=Layer3Header[0],Offset1=Layer4Header[0]]= %s%s",
1596  first_stream, last_stream, ports_spec.all ? "" : "port==", ports_spec.str);
1597  NapatechSetFilter(hconfig, ntpl_cmd);
1598 
1599 
1600  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1601  "assign[priority=2;streamid=(%d..%d);colormask=0x11000000;"
1602  "Descriptor=DYN3,length=24,colorbits=32,Offset0=Layer3Header[0],Offset1=Layer4Header[0]"
1603  "]= %s%s and (Layer3Protocol==IPV4)",
1604  first_stream, last_stream, ports_spec.all ? "" : "port==", ports_spec.str);
1605  NapatechSetFilter(hconfig, ntpl_cmd);
1606 
1607 
1608  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1609  "assign[priority=2;streamid=(%d..%d);colormask=0x14000000;"
1610  "Descriptor=DYN3,length=24,colorbits=32,Offset0=Layer3Header[0],Offset1=Layer4Header[0]]= %s%s and (Layer3Protocol==IPV6)",
1611  first_stream, last_stream, ports_spec.all ? "" : "port==", ports_spec.str);
1612  NapatechSetFilter(hconfig, ntpl_cmd);
1613 
1614  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1615  "assign[priority=2;streamid=(%d..%d);colormask=0x10100000;"
1616  "Descriptor=DYN3,length=24,colorbits=32,Offset0=Layer3Header[0],Offset1=Layer4Header[0]]= %s%s and (Layer4Protocol==TCP)",
1617  first_stream, last_stream, ports_spec.all ? "" : "port==", ports_spec.str);
1618  NapatechSetFilter(hconfig, ntpl_cmd);
1619 
1620  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1621  "assign[priority=2;streamid=(%d..%d);colormask=0x10200000;"
1622  "Descriptor=DYN3,length=24,colorbits=32,Offset0=Layer3Header[0],Offset1=Layer4Header[0]"
1623  "]= %s%s and (Layer4Protocol==UDP)",
1624  first_stream, last_stream, ports_spec.all ? "" : "port==", ports_spec.str);
1625  NapatechSetFilter(hconfig, ntpl_cmd);
1626 
1627  if (strlen(span_ports) > 0) {
1628  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1629  "assign[priority=2;streamid=(%d..%d);colormask=0x00001000;"
1630  "Descriptor=DYN3,length=24,colorbits=32,Offset0=Layer3Header[0],Offset1=Layer4Header[0]"
1631  "]= port==%s",
1632  first_stream, last_stream, span_ports);
1633  NapatechSetFilter(hconfig, ntpl_cmd);
1634  }
1635 
1636  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1637  "KeyType[name=KT%u]={sw_32_32,sw_16_16}",
1638  NAPATECH_KEYTYPE_IPV4);
1639  NapatechSetFilter(hconfig, ntpl_cmd);
1640 
1641  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1642  "KeyDef[name=KDEF%u;KeyType=KT%u;ipprotocolfield=OUTER]=(Layer3Header[12]/32/32,Layer4Header[0]/16/16)",
1643  NAPATECH_KEYTYPE_IPV4, NAPATECH_KEYTYPE_IPV4);
1644  NapatechSetFilter(hconfig, ntpl_cmd);
1645 
1646  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1647  "KeyType[name=KT%u]={32,32,16,16}",
1648  NAPATECH_KEYTYPE_IPV4_SPAN);
1649  NapatechSetFilter(hconfig, ntpl_cmd);
1650 
1651  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1652  "KeyDef[name=KDEF%u;KeyType=KT%u;ipprotocolfield=OUTER;keysort=sorted]=(Layer3Header[12]/32,Layer3Header[16]/32,Layer4Header[0]/16,Layer4Header[2]/16)",
1653  NAPATECH_KEYTYPE_IPV4_SPAN, NAPATECH_KEYTYPE_IPV4_SPAN);
1654  NapatechSetFilter(hconfig, ntpl_cmd);
1655 
1656  /* IPv6 5tuple for inline and tap ports */
1657  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1658  "KeyType[name=KT%u]={sw_128_128,sw_16_16}",
1659  NAPATECH_KEYTYPE_IPV6);
1660  NapatechSetFilter(hconfig, ntpl_cmd);
1661 
1662  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1663  "KeyDef[name=KDEF%u;KeyType=KT%u;ipprotocolfield=OUTER]=(Layer3Header[8]/128/128,Layer4Header[0]/16/16)",
1664  NAPATECH_KEYTYPE_IPV6, NAPATECH_KEYTYPE_IPV6);
1665  NapatechSetFilter(hconfig, ntpl_cmd);
1666 
1667  /* IPv6 5tuple for SPAN Ports */
1668  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1669  "KeyType[name=KT%u]={128,128,16,16}",
1670  NAPATECH_KEYTYPE_IPV6_SPAN);
1671  NapatechSetFilter(hconfig, ntpl_cmd);
1672 
1673  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1674  "KeyDef[name=KDEF%u;KeyType=KT%u;ipprotocolfield=OUTER;keysort=sorted]=(Layer3Header[8]/128,Layer3Header[24]/128,Layer4Header[0]/16,Layer4Header[2]/16)",
1675  NAPATECH_KEYTYPE_IPV6_SPAN, NAPATECH_KEYTYPE_IPV6_SPAN);
1676  NapatechSetFilter(hconfig, ntpl_cmd);
1677 
1678 
1679  int pair;
1680  char ports_ntpl_a[64];
1681  char ports_ntpl_b[64];
1682  memset(ports_ntpl_a, 0, sizeof(ports_ntpl_a));
1683  memset(ports_ntpl_b, 0, sizeof(ports_ntpl_b));
1684 
1685  for (pair = 0; pair < iteration; ++pair) {
1686  char port_str[8];
1687 
1688  if (!is_span_port[ports_spec.first[pair]]) {
1689  snprintf(port_str, sizeof(port_str), "%s%u ", strlen(ports_ntpl_a) == 0 ? "" : ",", ports_spec.first[pair]);
1690  strlcat(ports_ntpl_a, port_str, sizeof(ports_ntpl_a));
1691 
1692  snprintf(port_str, sizeof(port_str), "%s%u ", strlen(ports_ntpl_b) == 0 ? "" : ",", ports_spec.second[pair]);
1693  strlcat(ports_ntpl_b, port_str, sizeof(ports_ntpl_b));
1694  }
1695  }
1696 
1697  if (strlen(ports_ntpl_a) > 0) {
1698  /* This is the assign for dropping upstream traffic */
1699  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1700  "assign[priority=1;streamid=drop;colormask=0x1]=(Layer3Protocol==IPV4)and(port == %s)and(Key(KDEF%u,KeyID=%u)==%u)",
1701  ports_ntpl_a,
1702  NAPATECH_KEYTYPE_IPV4,
1703  NAPATECH_KEYTYPE_IPV4,
1704  NAPATECH_FLOWTYPE_DROP);
1705  NapatechSetFilter(hconfig, ntpl_cmd);
1706  }
1707 
1708  if (strlen(ports_ntpl_b) > 0) {
1709  /* This is the assign for dropping downstream traffic */
1710  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1711  "assign[priority=1;streamid=drop;colormask=0x1]=(Layer3Protocol==IPV4)and(port == %s)and(Key(KDEF%u,KeyID=%u,fieldaction=swap)==%u)",
1712  ports_ntpl_b, //ports_spec.str,
1713  NAPATECH_KEYTYPE_IPV4,
1714  NAPATECH_KEYTYPE_IPV4,
1715  NAPATECH_FLOWTYPE_DROP);
1716  NapatechSetFilter(hconfig, ntpl_cmd);
1717  }
1718 
1719  if (strlen(span_ports) > 0) {
1720  /* This is the assign for dropping SPAN Port traffic */
1721  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1722  "assign[priority=1;streamid=drop;colormask=0x1]=(Layer3Protocol==IPV4)and(port == %s)and(Key(KDEF%u,KeyID=%u)==%u)",
1723  span_ports,
1724  NAPATECH_KEYTYPE_IPV4_SPAN,
1725  NAPATECH_KEYTYPE_IPV4_SPAN,
1726  NAPATECH_FLOWTYPE_DROP);
1727  NapatechSetFilter(hconfig, ntpl_cmd);
1728  }
1729 
1730  if (is_inline) {
1731  for (pair = 0; pair < iteration; ++pair) {
1732  /* This is the assignment for forwarding traffic */
1733  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1734  "assign[priority=1;streamid=drop;DestinationPort=%d;colormask=0x2]=(Layer3Protocol==IPV4)and(port == %d)and(Key(KDEF%u,KeyID=%u)==%u)",
1735  ports_spec.second[pair],
1736  ports_spec.first[pair],
1737  NAPATECH_KEYTYPE_IPV4,
1738  NAPATECH_KEYTYPE_IPV4,
1739  NAPATECH_FLOWTYPE_PASS);
1740  NapatechSetFilter(hconfig, ntpl_cmd);
1741 
1742  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1743  "assign[priority=1;streamid=drop;DestinationPort=%d;colormask=0x2]=(Layer3Protocol==IPV4)and(port == %d)and(Key(KDEF%u,KeyID=%u,fieldaction=swap)==%u)",
1744  ports_spec.first[pair],
1745  ports_spec.second[pair],
1746  NAPATECH_KEYTYPE_IPV4,
1747  NAPATECH_KEYTYPE_IPV4,
1748  NAPATECH_FLOWTYPE_PASS);
1749  NapatechSetFilter(hconfig, ntpl_cmd);
1750  }
1751  }
1752 
1753  if (strlen(ports_ntpl_a) > 0) {
1754  /* This is the assign for dropping upstream traffic */
1755  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1756  "assign[priority=1;streamid=drop;colormask=0x1]=(Layer3Protocol==IPV6)and(port == %s)and(Key(KDEF%u,KeyID=%u)==%u)",
1757  ports_ntpl_a,
1758  NAPATECH_KEYTYPE_IPV6,
1759  NAPATECH_KEYTYPE_IPV6,
1760  NAPATECH_FLOWTYPE_DROP);
1761  NapatechSetFilter(hconfig, ntpl_cmd);
1762  }
1763 
1764  if (strlen(ports_ntpl_b) > 0) {
1765  /* This is the assign for dropping downstream traffic */
1766  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1767  "assign[priority=1;streamid=drop;colormask=0x1]=(Layer3Protocol==IPV6)and(port == %s)and(Key(KDEF%u,KeyID=%u,fieldaction=swap)==%u)",
1768  ports_ntpl_b, //ports_spec.str,
1769  NAPATECH_KEYTYPE_IPV6,
1770  NAPATECH_KEYTYPE_IPV6,
1771  NAPATECH_FLOWTYPE_DROP);
1772  NapatechSetFilter(hconfig, ntpl_cmd);
1773  }
1774 
1775  if (strlen(span_ports) > 0) {
1776  /* This is the assign for dropping SPAN Port traffic */
1777  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1778  "assign[priority=1;streamid=drop;colormask=0x1]=(Layer3Protocol==IPV6)and(port == %s)and(Key(KDEF%u,KeyID=%u)==%u)",
1779  span_ports,
1780  NAPATECH_KEYTYPE_IPV6_SPAN,
1781  NAPATECH_KEYTYPE_IPV6_SPAN,
1782  NAPATECH_FLOWTYPE_DROP);
1783  NapatechSetFilter(hconfig, ntpl_cmd);
1784  }
1785 
1786  if (is_inline) {
1787  for (pair = 0; pair < iteration; ++pair) {
1788  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1789  "assign[priority=1;streamid=drop;DestinationPort=%d;colormask=0x4]=(Layer3Protocol==IPV6)and(port==%d)and(Key(KDEF%u,KeyID=%u)==%u)",
1790  ports_spec.second[pair],
1791  ports_spec.first[pair],
1792  NAPATECH_KEYTYPE_IPV6,
1793  NAPATECH_KEYTYPE_IPV6,
1794  NAPATECH_FLOWTYPE_PASS);
1795  NapatechSetFilter(hconfig, ntpl_cmd);
1796 
1797  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1798  "assign[priority=1;streamid=drop;DestinationPort=%d;colormask=0x4]=(Layer3Protocol==IPV6)and(port==%d)and(Key(KDEF%u,KeyID=%u,fieldaction=swap)==%u)",
1799  ports_spec.first[pair],
1800  ports_spec.second[pair],
1801  NAPATECH_KEYTYPE_IPV6,
1802  NAPATECH_KEYTYPE_IPV6,
1803  NAPATECH_FLOWTYPE_PASS);
1804  NapatechSetFilter(hconfig, ntpl_cmd);
1805  }
1806  }
1807  } else {
1808  if (is_inline) {
1809  FatalError("Napatech Inline operation not supported by this FPGA version.");
1810  }
1811 
1813  snprintf(ntpl_cmd, sizeof (ntpl_cmd), "assign[streamid=(%d..%d);colormask=0x0] = %s%s",
1814  first_stream, last_stream, ports_spec.all ? "" : "port==", ports_spec.str);
1815  NapatechSetFilter(hconfig, ntpl_cmd);
1816  }
1817  }
1818 
1819 #else /* NAPATECH_ENABLE_BYPASS */
1820  snprintf(ntpl_cmd, sizeof (ntpl_cmd), "assign[streamid=(%d..%d)] = %s%s",
1821  first_stream, last_stream, ports_spec.all ? "" : "port==", ports_spec.str);
1822  NapatechSetFilter(hconfig, ntpl_cmd);
1823 
1824 #endif /* !NAPATECH_ENABLE_BYPASS */
1825 
1826  SCLogConfig("Host-buffer NUMA assignments: ");
1827  int numa_nodes[MAX_HOSTBUFFERS];
1828  uint32_t stream_id;
1829  for (stream_id = first_stream; stream_id < last_stream; ++stream_id) {
1830  char temp1[256];
1831  char temp2[256];
1832 
1833  uint32_t num_host_buffers = GetStreamNUMAs(stream_id, numa_nodes);
1834 
1835  snprintf(temp1, 256, " stream %d: ", stream_id);
1836 
1837  for (uint32_t hb_id = 0; hb_id < num_host_buffers; ++hb_id) {
1838  snprintf(temp2, 256, "%d ", numa_nodes[hb_id]);
1839  strlcat(temp1, temp2, sizeof(temp1));
1840  }
1841 
1842  SCLogConfig("%s", temp1);
1843  }
1844 
1845  if (first_stream == last_stream) {
1846  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1847  "Setup[state=active] = StreamId == %d",
1848  first_stream);
1849  } else {
1850  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1851  "Setup[state=active] = StreamId == (%d..%d)",
1852  first_stream, last_stream);
1853  }
1854  NapatechSetFilter(hconfig, ntpl_cmd);
1855 
1856  NT_ConfigClose(hconfig);
1857 
1858  return status;
1859 }
1860 
1861 #endif // HAVE_NAPATECH
util-byte.h
PORTS_SPEC_SIZE
#define PORTS_SPEC_SIZE
tm-threads.h
CONFIG_SPECIFIER_UNDEFINED
@ CONFIG_SPECIFIER_UNDEFINED
Definition: util-napatech.c:203
TmThreadSpawn
TmEcode TmThreadSpawn(ThreadVars *tv)
Spawns a thread associated with the ThreadVars instance tv.
Definition: tm-threads.c:1650
NapatechGetCurrentStats
NapatechCurrentStats NapatechGetCurrentStats(uint16_t id)
Definition: util-napatech.c:196
CONFIG_SPECIFIER_RANGE
@ CONFIG_SPECIFIER_RANGE
Definition: util-napatech.c:204
ConfNode_::val
char * val
Definition: conf.h:34
ConfGetBool
int ConfGetBool(const char *name, int *val)
Retrieve a configuration value as a boolean.
Definition: conf.c:482
NapatechSetPortmap
int NapatechSetPortmap(int port, int peer)
unlikely
#define unlikely(expr)
Definition: util-optimize.h:35
SCLogDebug
#define SCLogDebug(...)
Definition: util-debug.h:269
TmThreadsSetFlag
void TmThreadsSetFlag(ThreadVars *tv, uint32_t flag)
Set a thread flag.
Definition: tm-threads.c:99
TmThreadWaitForFlag
void TmThreadWaitForFlag(ThreadVars *tv, uint32_t flags)
Waits till the specified flag(s) is(are) set. We don't bother if the kill flag has been set or not on...
Definition: tm-threads.c:1765
next
struct HtpBodyChunk_ * next
Definition: app-layer-htp.h:0
THV_DEINIT
#define THV_DEINIT
Definition: threadvars.h:44
ConfGetNode
ConfNode * ConfGetNode(const char *name)
Get a ConfNode by name.
Definition: conf.c:181
UtilCpuGetNumProcessorsConfigured
uint16_t UtilCpuGetNumProcessorsConfigured(void)
Get the number of cpus configured in the system.
Definition: util-cpu.c:59
StatsSetUI64
void StatsSetUI64(ThreadVars *tv, uint16_t id, uint64_t x)
Sets a value of type double to the local counter.
Definition: counters.c:207
THV_RUNNING
#define THV_RUNNING
Definition: threadvars.h:54
NapatechDeleteFilters
uint32_t NapatechDeleteFilters(void)
Definition: util-napatech.c:1332
TAILQ_FOREACH
#define TAILQ_FOREACH(var, head, field)
Definition: queue.h:252
StatsSetupPrivate
int StatsSetupPrivate(ThreadVars *tv)
Definition: counters.c:1214
NapatechIsAutoConfigEnabled
bool NapatechIsAutoConfigEnabled(void)
Definition: runmode-napatech.c:70
PacketCounters_::byte
uint16_t byte
Definition: util-napatech.c:188
stream_config
TcpStreamCnf stream_config
Definition: stream-tcp.c:219
current_stats
NapatechCurrentStats current_stats[MAX_STREAMS]
Definition: util-napatech.c:194
NapatechStreamConfig_
Definition: util-napatech.h:43
THV_RUNNING_DONE
#define THV_RUNNING_DONE
Definition: threadvars.h:45
NapatechGetStreamConfig
int NapatechGetStreamConfig(NapatechStreamConfig stream_config[])
Reads and parses the stream configuration defined in the config file.
Definition: util-napatech.c:804
ConfGet
int ConfGet(const char *name, const char **vptr)
Retrieve the value of a configuration node.
Definition: conf.c:335
StringParseUint8
int StringParseUint8(uint8_t *res, int base, size_t len, const char *str)
Definition: util-byte.c:361
total_stats
NapatechCurrentStats total_stats
Definition: util-napatech.c:193
util-device.h
NapatechCurrentStats_::current_packets
uint64_t current_packets
Definition: util-napatech.h:51
strlcat
size_t strlcat(char *, const char *src, size_t siz)
Definition: util-strlcatu.c:45
util-cpu.h
source-napatech.h
ThreadVars_
Per thread variable structure.
Definition: threadvars.h:57
PacketCounters
struct PacketCounters_ PacketCounters
THV_KILL
#define THV_KILL
Definition: threadvars.h:39
CONFIG_SPECIFIER_INDIVIDUAL
@ CONFIG_SPECIFIER_INDIVIDUAL
Definition: util-napatech.c:205
TmThreadCreate
ThreadVars * TmThreadCreate(const char *name, const char *inq_name, const char *inqh_name, const char *outq_name, const char *outqh_name, const char *slots, void *(*fn_p)(void *), int mucond)
Creates and returns the TV instance for a new thread.
Definition: tm-threads.c:904
NAPATECH_NTPL_ERROR
#define NAPATECH_NTPL_ERROR(ntpl_cmd, ntpl_info, status)
Definition: util-napatech.h:72
SCLogWarning
#define SCLogWarning(...)
Macro used to log WARNING messages.
Definition: util-debug.h:249
NapatechGetAdapter
int NapatechGetAdapter(uint8_t port)
util-napatech.h
PacketCounters_::pkts
uint16_t pkts
Definition: util-napatech.c:187
PacketCounters_::drop_pkts
uint16_t drop_pkts
Definition: util-napatech.c:189
NapatechCurrentStats_::current_bytes
uint64_t current_bytes
Definition: util-napatech.h:52
HB_HIGHWATER
#define HB_HIGHWATER
Definition: util-napatech.c:691
PacketCounters_::drop_byte
uint16_t drop_byte
Definition: util-napatech.c:190
SCLogInfo
#define SCLogInfo(...)
Macro used to log INFORMATIONAL messages.
Definition: util-debug.h:224
ConfNodeLookupChild
ConfNode * ConfNodeLookupChild(const ConfNode *node, const char *name)
Lookup a child configuration node by name.
Definition: conf.c:781
THV_INIT_DONE
#define THV_INIT_DONE
Definition: threadvars.h:36
CONFIG_SPECIFIER
CONFIG_SPECIFIER
Definition: util-napatech.c:202
ByteExtractStringUint8
int ByteExtractStringUint8(uint8_t *res, int base, size_t len, const char *str)
Definition: util-byte.c:285
suricata-common.h
SCLogPerf
#define SCLogPerf(...)
Definition: util-debug.h:230
NapatechStartStats
void NapatechStartStats(void)
Definition: util-napatech.c:1185
FatalError
#define FatalError(...)
Definition: util-debug.h:502
MAX_HOSTBUFFERS
#define MAX_HOSTBUFFERS
Definition: util-napatech.c:208
tv
ThreadVars * tv
Definition: fuzz_decodepcapfile.c:32
threadvars.h
NapatechUseHWBypass
bool NapatechUseHWBypass(void)
Definition: runmode-napatech.c:75
NapatechCurrentStats_::current_drop_bytes
uint64_t current_drop_bytes
Definition: util-napatech.h:54
SCLogConfig
struct SCLogConfig_ SCLogConfig
Holds the config state used by the logging api.
str
#define str(s)
Definition: suricata-common.h:291
SCLogError
#define SCLogError(...)
Macro used to log ERROR messages.
Definition: util-debug.h:261
MAX_PORTS
#define MAX_PORTS
Definition: util-napatech.h:59
ConfNode_
Definition: conf.h:32
runmode-napatech.h
NapatechCurrentStats_::current_drop_packets
uint64_t current_drop_packets
Definition: util-napatech.h:53
suricata.h
NapatechSetupTraffic
uint32_t NapatechSetupTraffic(uint32_t first_stream, uint32_t last_stream)
Definition: util-napatech.c:1359
MAX_STREAMS
#define MAX_STREAMS
Definition: util-napatech.c:690
NapatechCurrentStats_
Definition: util-napatech.h:50
StatsSyncCountersIfSignalled
void StatsSyncCountersIfSignalled(ThreadVars *tv)
Definition: counters.c:454
PacketCounters_
Definition: util-napatech.c:186
TmThreadsCheckFlag
int TmThreadsCheckFlag(ThreadVars *tv, uint32_t flag)
Check if a thread flag is set.
Definition: tm-threads.c:91
NAPATECH_ERROR
#define NAPATECH_ERROR(status)
Definition: util-napatech.h:65
StatsRegisterCounter
uint16_t StatsRegisterCounter(const char *name, struct ThreadVars_ *tv)
Registers a normal, unqualified counter.
Definition: counters.c:961
THV_CLOSED
#define THV_CLOSED
Definition: threadvars.h:41
SCCalloc
#define SCCalloc(nm, sz)
Definition: util-mem.h:53
NapatechSetupNuma
bool NapatechSetupNuma(uint32_t stream, uint32_t numa)
Definition: util-napatech.c:1221