suricata
util-napatech.c
Go to the documentation of this file.
1 /* Copyright (C) 2017-2021 Open Information Security Foundation
2  *
3  * You can copy, redistribute or modify this Program under the terms of
4  * the GNU General Public License version 2 as published by the Free
5  * Software Foundation.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10  * GNU General Public License for more details.
11  *
12  * You should have received a copy of the GNU General Public License
13  * version 2 along with this program; if not, write to the Free Software
14  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
15  * 02110-1301, USA.
16  */
17 /**
18  * \file
19  *
20  * \author Napatech Inc.
21  * \author Phil Young <py@napatech.com>
22  *
23  *
24  */
25 #include "suricata-common.h"
26 
27 #ifdef HAVE_NAPATECH
28 #include "suricata.h"
29 #include "util-device.h"
30 #include "util-cpu.h"
31 #include "util-byte.h"
32 #include "threadvars.h"
33 #include "tm-threads.h"
34 #include "util-napatech.h"
35 #include "source-napatech.h"
36 
37 #ifdef NAPATECH_ENABLE_BYPASS
38 
39 /*
40  * counters to track the number of flows programmed on
41  * the adapter.
42  */
43 typedef struct FlowStatsCounters_
44 {
45  uint16_t active_bypass_flows;
46  uint16_t total_bypass_flows;
47 } FlowStatsCounters;
48 
49 
50 static int bypass_supported;
51 int NapatechIsBypassSupported(void)
52 {
53  return bypass_supported;
54 }
55 
56 /**
57  * \brief Returns the number of Napatech Adapters in the system.
58  *
59  * \return count of the Napatech adapters present in the system.
60  */
61 int NapatechGetNumAdapters(void)
62 {
63  NtInfoStream_t hInfo;
64  NtInfo_t hInfoSys;
65  int status;
66  static int num_adapters = -1;
67 
68  if (num_adapters == -1) {
69  if ((status = NT_InfoOpen(&hInfo, "InfoStream")) != NT_SUCCESS) {
70  NAPATECH_ERROR(status);
71  exit(EXIT_FAILURE);
72  }
73 
74  hInfoSys.cmd = NT_INFO_CMD_READ_SYSTEM;
75  if ((status = NT_InfoRead(hInfo, &hInfoSys)) != NT_SUCCESS) {
76  NAPATECH_ERROR(status);
77  exit(EXIT_FAILURE);
78  }
79 
80  num_adapters = hInfoSys.u.system.data.numAdapters;
81 
82  NT_InfoClose(hInfo);
83  }
84 
85  return num_adapters;
86 }
87 
88 /**
89  * \brief Verifies that the Napatech adapters support bypass.
90  *
91  * Attempts to opens a FlowStream on each adapter present in the system.
92  * If successful then bypass is supported
93  *
94  * \return 1 if Bypass functionality is supported; zero otherwise.
95  */
96 int NapatechVerifyBypassSupport(void)
97 {
98  int status;
99  int adapter = 0;
100  int num_adapters = NapatechGetNumAdapters();
101  SCLogInfo("Found %d Napatech adapters.", num_adapters);
102  NtFlowStream_t hFlowStream;
103 
104  if (!NapatechUseHWBypass()) {
105  /* HW Bypass is disabled in the conf file */
106  return 0;
107  }
108 
109  for (adapter = 0; adapter < num_adapters; ++adapter) {
110  NtFlowAttr_t attr;
111  char flow_name[80];
112 
113  NT_FlowOpenAttrInit(&attr);
114  NT_FlowOpenAttrSetAdapterNo(&attr, adapter);
115 
116  snprintf(flow_name, sizeof(flow_name), "Flow stream %d", adapter );
117  SCLogInfo("Opening flow programming stream: %s\n", flow_name);
118  if ((status = NT_FlowOpen_Attr(&hFlowStream, flow_name, &attr)) != NT_SUCCESS) {
119  SCLogWarning("Napatech bypass functionality not supported by the FPGA version on "
120  "adapter %d - disabling support.",
121  adapter);
122  bypass_supported = 0;
123  return 0;
124  }
125  NT_FlowClose(hFlowStream);
126  }
127 
128  bypass_supported = 1;
129  return bypass_supported;
130 }
131 
132 
133 /**
134  * \brief Updates statistic counters for Napatech FlowStats
135  *
136  * \param tv Thread variable to ThreadVars
137  * \param hInfo Handle to the Napatech InfoStream.
138  * \param hstat_stream Handle to the Napatech Statistics Stream.
139  * \param flow_counters The flow counters statistics to update.
140  * \param clear_stats Indicates if statistics on the card should be reset to zero.
141  *
142  */
143 static void UpdateFlowStats(
144  ThreadVars *tv,
145  NtInfoStream_t hInfo,
146  NtStatStream_t hstat_stream,
147  FlowStatsCounters flow_counters,
148  int clear_stats
149  )
150 {
151  NtStatistics_t hStat;
152  int status;
153 
154  uint64_t programed = 0;
155  uint64_t removed = 0;
156  int adapter = 0;
157 
158  for (adapter = 0; adapter < NapatechGetNumAdapters(); ++adapter) {
159  hStat.cmd = NT_STATISTICS_READ_CMD_FLOW_V0;
160  hStat.u.flowData_v0.clear = clear_stats;
161  hStat.u.flowData_v0.adapterNo = adapter;
162  if ((status = NT_StatRead(hstat_stream, &hStat)) != NT_SUCCESS) {
163  NAPATECH_ERROR(status);
164  exit(1);
165  }
166  programed = hStat.u.flowData_v0.learnDone;
167  removed = hStat.u.flowData_v0.unlearnDone
168  + hStat.u.flowData_v0.automaticUnlearnDone
169  + hStat.u.flowData_v0.timeoutUnlearnDone;
170  }
171 
172  StatsSetUI64(tv, flow_counters.active_bypass_flows, programed - removed);
173  StatsSetUI64(tv, flow_counters.total_bypass_flows, programed);
174 }
175 
176 #endif /* NAPATECH_ENABLE_BYPASS */
177 
178 
179 /*-----------------------------------------------------------------------------
180  *-----------------------------------------------------------------------------
181  * Statistics code
182  *-----------------------------------------------------------------------------
183  */
184 typedef struct PacketCounters_
185 {
186  uint16_t pkts;
187  uint16_t byte;
188  uint16_t drop_pkts;
189  uint16_t drop_byte;
191 
194 
196 {
197 
198  return current_stats[id];
199 }
200 
205 };
206 
207 #define MAX_HOSTBUFFERS 8
208 
209 /**
210  * \brief Test to see if any of the configured streams are active
211  *
212  * \param hInfo Handle to Napatech Info Stream.
213  * \param hStatsStream Handle to Napatech Statistics stream
214  * \param stream_config array of stream configuration structures
215  * \param num_inst
216  *
217  */
218 static uint16_t TestStreamConfig(
219  NtInfoStream_t hInfo,
220  NtStatStream_t hstat_stream,
222  uint16_t num_inst)
223 {
224  uint16_t num_active = 0;
225 
226  for (uint16_t inst = 0; inst < num_inst; ++inst) {
227  int status;
228  NtStatistics_t stat; // Stat handle.
229 
230  /* Check to see if it is an active stream */
231  memset(&stat, 0, sizeof (NtStatistics_t));
232 
233  /* Read usage data for the chosen stream ID */
234  stat.cmd = NT_STATISTICS_READ_CMD_USAGE_DATA_V0;
235  stat.u.usageData_v0.streamid = (uint8_t) stream_config[inst].stream_id;
236 
237  if ((status = NT_StatRead(hstat_stream, &stat)) != NT_SUCCESS) {
238  NAPATECH_ERROR(status);
239  return 0;
240  }
241 
242  if (stat.u.usageData_v0.data.numHostBufferUsed > 0) {
243  stream_config[inst].is_active = true;
244  num_active++;
245  } else {
246  stream_config[inst].is_active = false;
247  }
248  }
249 
250  return num_active;
251 }
252 
253 /**
254  * \brief Updates Napatech packet counters
255  *
256  * \param tv Pointer to TheardVars structure
257  * \param hInfo Handle to Napatech Info Stream.
258  * \param hstat_stream Handle to Napatech Statistics stream
259  * \param num_streams the number of streams that are currently active
260  * \param stream_config array of stream configuration structures
261  * \param total_counters - cumulative count of all packets received.
262  * \param dispatch_host, - Count of packets that were delivered to the host buffer
263  * \param dispatch_drop - count of packets that were dropped as a result of a rule
264  * \param dispatch_fwd - count of packets forwarded out the egress port as the result of a rule
265  * \param is_inline - are we running in inline mode?
266  * \param enable_stream_stats - are per thread/stream statistics enabled.
267  * \param stream_counters - counters for each thread/stream configured.
268  *
269  * \return The number of active streams that were updated.
270  *
271  */
272 static uint32_t UpdateStreamStats(ThreadVars *tv,
273  NtInfoStream_t hInfo,
274  NtStatStream_t hstat_stream,
275  uint16_t num_streams,
277  PacketCounters total_counters,
278  PacketCounters dispatch_host,
279  PacketCounters dispatch_drop,
280  PacketCounters dispatch_fwd,
281  int is_inline,
282  int enable_stream_stats,
283  PacketCounters stream_counters[]
284  ) {
285  static uint64_t rxPktsStart[MAX_STREAMS] = {0};
286  static uint64_t rxByteStart[MAX_STREAMS] = {0};
287  static uint64_t dropPktStart[MAX_STREAMS] = {0};
288  static uint64_t dropByteStart[MAX_STREAMS] = {0};
289 
290  int status;
291  NtInfo_t hStreamInfo;
292  NtStatistics_t hStat; // Stat handle.
293 
294  /* Query the system to get the number of streams currently instantiated */
295  hStreamInfo.cmd = NT_INFO_CMD_READ_STREAM;
296  if ((status = NT_InfoRead(hInfo, &hStreamInfo)) != NT_SUCCESS) {
297  NAPATECH_ERROR(status);
298  exit(EXIT_FAILURE);
299  }
300 
301  uint16_t num_active;
302  if ((num_active = TestStreamConfig(hInfo, hstat_stream, stream_config, num_streams)) == 0) {
303  /* None of the configured streams are active */
304  return 0;
305  }
306 
307  /* At least one stream is active so proceed with the stats. */
308  uint16_t inst_id = 0;
309  uint32_t stream_cnt = 0;
310  for (stream_cnt = 0; stream_cnt < num_streams; ++stream_cnt) {
311  while (inst_id < num_streams) {
312  if (stream_config[inst_id].is_active) {
313  break;
314  } else {
315  ++inst_id;
316  }
317  }
318  if (inst_id == num_streams)
319  break;
320 
321  /* Read usage data for the chosen stream ID */
322  memset(&hStat, 0, sizeof (NtStatistics_t));
323  hStat.cmd = NT_STATISTICS_READ_CMD_USAGE_DATA_V0;
324  hStat.u.usageData_v0.streamid = (uint8_t) stream_config[inst_id].stream_id;
325 
326  if ((status = NT_StatRead(hstat_stream, &hStat)) != NT_SUCCESS) {
327  NAPATECH_ERROR(status);
328  return 0;
329  }
330 
331  uint16_t stream_id = stream_config[inst_id].stream_id;
332  if (stream_config[inst_id].is_active) {
333  uint64_t rx_pkts_total = 0;
334  uint64_t rx_byte_total = 0;
335  uint64_t drop_pkts_total = 0;
336  uint64_t drop_byte_total = 0;
337 
338  for (uint32_t hbCount = 0; hbCount < hStat.u.usageData_v0.data.numHostBufferUsed; hbCount++) {
339  if (unlikely(stream_config[inst_id].initialized == false)) {
340  rxPktsStart[stream_id] += hStat.u.usageData_v0.data.hb[hbCount].stat.rx.frames;
341  rxByteStart[stream_id] += hStat.u.usageData_v0.data.hb[hbCount].stat.rx.bytes;
342  dropPktStart[stream_id] += hStat.u.usageData_v0.data.hb[hbCount].stat.drop.frames;
343  dropByteStart[stream_id] += hStat.u.usageData_v0.data.hb[hbCount].stat.drop.bytes;
344  stream_config[inst_id].initialized = true;
345  } else {
346  rx_pkts_total += hStat.u.usageData_v0.data.hb[hbCount].stat.rx.frames;
347  rx_byte_total += hStat.u.usageData_v0.data.hb[hbCount].stat.rx.bytes;
348  drop_pkts_total += hStat.u.usageData_v0.data.hb[hbCount].stat.drop.frames;
349  drop_byte_total += hStat.u.usageData_v0.data.hb[hbCount].stat.drop.bytes;
350  }
351  }
352 
353  current_stats[stream_id].current_packets = rx_pkts_total - rxPktsStart[stream_id];
354  current_stats[stream_id].current_bytes = rx_byte_total - rxByteStart[stream_id];
355  current_stats[stream_id].current_drop_packets = drop_pkts_total - dropPktStart[stream_id];
356  current_stats[stream_id].current_drop_bytes = drop_byte_total - dropByteStart[stream_id];
357  }
358 
359  if (enable_stream_stats) {
360  StatsSetUI64(tv, stream_counters[inst_id].pkts, current_stats[stream_id].current_packets);
361  StatsSetUI64(tv, stream_counters[inst_id].byte, current_stats[stream_id].current_bytes);
362  StatsSetUI64(tv, stream_counters[inst_id].drop_pkts, current_stats[stream_id].current_drop_packets);
363  StatsSetUI64(tv, stream_counters[inst_id].drop_byte, current_stats[stream_id].current_drop_bytes);
364  }
365 
366  ++inst_id;
367  }
368 
369  uint32_t stream_id;
370  for (stream_id = 0; stream_id < num_streams; ++stream_id) {
371 
372 #ifndef NAPATECH_ENABLE_BYPASS
375 #endif /* NAPATECH_ENABLE_BYPASS */
378  }
379 
380 
381 #ifndef NAPATECH_ENABLE_BYPASS
382  StatsSetUI64(tv, total_counters.pkts, total_stats.current_packets);
383  StatsSetUI64(tv, total_counters.byte, total_stats.current_bytes);
384 #endif /* NAPATECH_ENABLE_BYPASS */
385 
388 
393 
394  /* Read usage data for the chosen stream ID */
395  memset(&hStat, 0, sizeof (NtStatistics_t));
396 
397 #ifdef NAPATECH_ENABLE_BYPASS
398  hStat.cmd = NT_STATISTICS_READ_CMD_QUERY_V3;
399  hStat.u.query_v3.clear = 0;
400 #else /* NAPATECH_ENABLE_BYPASS */
401  /* Older versions of the API have a different structure. */
402  hStat.cmd = NT_STATISTICS_READ_CMD_QUERY_V2;
403  hStat.u.query_v2.clear = 0;
404 #endif /* !NAPATECH_ENABLE_BYPASS */
405 
406  if ((status = NT_StatRead(hstat_stream, &hStat)) != NT_SUCCESS) {
407  if (status == NT_STATUS_TIMEOUT) {
408  SCLogInfo("Statistics timed out - will retry next time.");
409  return 0;
410  } else {
411  NAPATECH_ERROR(status);
412  return 0;
413  }
414  }
415 
416 #ifdef NAPATECH_ENABLE_BYPASS
417 
418  int adapter = 0;
419  uint64_t total_dispatch_host_pkts = 0;
420  uint64_t total_dispatch_host_byte = 0;
421  uint64_t total_dispatch_drop_pkts = 0;
422  uint64_t total_dispatch_drop_byte = 0;
423  uint64_t total_dispatch_fwd_pkts = 0;
424  uint64_t total_dispatch_fwd_byte = 0;
425 
426  for (adapter = 0; adapter < NapatechGetNumAdapters(); ++adapter) {
427  total_dispatch_host_pkts += hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[0].pkts;
428  total_dispatch_host_byte += hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[0].octets;
429 
430  total_dispatch_drop_pkts += hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[1].pkts
431  + hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[3].pkts;
432  total_dispatch_drop_byte += hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[1].octets
433  + hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[3].octets;
434 
435  total_dispatch_fwd_pkts += hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[2].pkts
436  + hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[4].pkts;
437  total_dispatch_fwd_byte += hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[2].octets
438  + hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[4].octets;
439 
440  total_stats.current_packets += hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[0].pkts
441  + hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[1].pkts
442  + hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[2].pkts
443  + hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[3].pkts;
444 
445  total_stats.current_bytes = hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[0].octets
446  + hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[1].octets
447  + hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[2].octets;
448  }
449 
450  StatsSetUI64(tv, dispatch_host.pkts, total_dispatch_host_pkts);
451  StatsSetUI64(tv, dispatch_host.byte, total_dispatch_host_byte);
452 
453  StatsSetUI64(tv, dispatch_drop.pkts, total_dispatch_drop_pkts);
454  StatsSetUI64(tv, dispatch_drop.byte, total_dispatch_drop_byte);
455 
456  if (is_inline) {
457  StatsSetUI64(tv, dispatch_fwd.pkts, total_dispatch_fwd_pkts);
458  StatsSetUI64(tv, dispatch_fwd.byte, total_dispatch_fwd_byte);
459  }
460 
461  StatsSetUI64(tv, total_counters.pkts, total_stats.current_packets);
462  StatsSetUI64(tv, total_counters.byte, total_stats.current_bytes);
463 
464 #endif /* NAPATECH_ENABLE_BYPASS */
465 
466  return num_active;
467 }
468 
469 /**
470  * \brief Statistics processing loop
471  *
472  * Instantiated on the stats thread. Periodically retrieves
473  * statistics from the Napatech card and updates the packet counters
474  *
475  * \param arg Pointer that is cast into a TheardVars structure
476  */
477 static void *NapatechStatsLoop(void *arg)
478 {
479  ThreadVars *tv = (ThreadVars *) arg;
480 
481  int status;
482  NtInfoStream_t hInfo;
483  NtStatStream_t hstat_stream;
484  int is_inline = 0;
485  int enable_stream_stats = 0;
486  PacketCounters stream_counters[MAX_STREAMS];
487 
488  if (ConfGetBool("napatech.inline", &is_inline) == 0) {
489  is_inline = 0;
490  }
491 
492  if (ConfGetBool("napatech.enable-stream-stats", &enable_stream_stats) == 0) {
493  /* default is "no" */
494  enable_stream_stats = 0;
495  }
496 
498  uint16_t stream_cnt = NapatechGetStreamConfig(stream_config);
499 
500  /* Open the info and Statistics */
501  if ((status = NT_InfoOpen(&hInfo, "StatsLoopInfoStream")) != NT_SUCCESS) {
502  NAPATECH_ERROR(status);
503  return NULL;
504  }
505 
506  if ((status = NT_StatOpen(&hstat_stream, "StatsLoopStatsStream")) != NT_SUCCESS) {
507  NAPATECH_ERROR(status);
508  return NULL;
509  }
510 
511  NtStatistics_t hStat;
512  memset(&hStat, 0, sizeof (NtStatistics_t));
513 
514 #ifdef NAPATECH_ENABLE_BYPASS
515  hStat.cmd = NT_STATISTICS_READ_CMD_QUERY_V3;
516  hStat.u.query_v3.clear = 1;
517 #else /* NAPATECH_ENABLE_BYPASS */
518  hStat.cmd = NT_STATISTICS_READ_CMD_QUERY_V2;
519  hStat.u.query_v2.clear = 1;
520 #endif /* !NAPATECH_ENABLE_BYPASS */
521 
522  if ((status = NT_StatRead(hstat_stream, &hStat)) != NT_SUCCESS) {
523  NAPATECH_ERROR(status);
524  return 0;
525  }
526 
527  PacketCounters total_counters;
528  memset(&total_counters, 0, sizeof(total_counters));
529 
530  PacketCounters dispatch_host;
531  memset(&dispatch_host, 0, sizeof(dispatch_host));
532 
533  PacketCounters dispatch_drop;
534  memset(&dispatch_drop, 0, sizeof(dispatch_drop));
535 
536  PacketCounters dispatch_fwd;
537  memset(&dispatch_fwd, 0, sizeof(dispatch_fwd));
538 
539  total_counters.pkts = StatsRegisterCounter("napa_total.pkts", tv);
540  dispatch_host.pkts = StatsRegisterCounter("napa_dispatch_host.pkts", tv);
541  dispatch_drop.pkts = StatsRegisterCounter("napa_dispatch_drop.pkts", tv);
542  if (is_inline) {
543  dispatch_fwd.pkts = StatsRegisterCounter("napa_dispatch_fwd.pkts", tv);
544  }
545 
546  total_counters.byte = StatsRegisterCounter("napa_total.byte", tv);
547  dispatch_host.byte = StatsRegisterCounter("napa_dispatch_host.byte", tv);
548  dispatch_drop.byte = StatsRegisterCounter("napa_dispatch_drop.byte", tv);
549  if (is_inline) {
550  dispatch_fwd.byte = StatsRegisterCounter("napa_dispatch_fwd.byte", tv);
551  }
552 
553  total_counters.drop_pkts = StatsRegisterCounter("napa_total.overflow_drop_pkts", tv);
554  total_counters.drop_byte = StatsRegisterCounter("napa_total.overflow_drop_byte", tv);
555 
556  if (enable_stream_stats) {
557  for (int i = 0; i < stream_cnt; ++i) {
558  char *pkts_buf = SCCalloc(1, 32);
559  if (unlikely(pkts_buf == NULL)) {
560  FatalError("Failed to allocate memory for NAPATECH stream counter.");
561  }
562 
563  snprintf(pkts_buf, 32, "napa%d.pkts", stream_config[i].stream_id);
564  stream_counters[i].pkts = StatsRegisterCounter(pkts_buf, tv);
565 
566  char *byte_buf = SCCalloc(1, 32);
567  if (unlikely(byte_buf == NULL)) {
568  FatalError("Failed to allocate memory for NAPATECH stream counter.");
569  }
570  snprintf(byte_buf, 32, "napa%d.bytes", stream_config[i].stream_id);
571  stream_counters[i].byte = StatsRegisterCounter(byte_buf, tv);
572 
573  char *drop_pkts_buf = SCCalloc(1, 32);
574  if (unlikely(drop_pkts_buf == NULL)) {
575  FatalError("Failed to allocate memory for NAPATECH stream counter.");
576  }
577  snprintf(drop_pkts_buf, 32, "napa%d.drop_pkts", stream_config[i].stream_id);
578  stream_counters[i].drop_pkts = StatsRegisterCounter(drop_pkts_buf, tv);
579 
580  char *drop_byte_buf = SCCalloc(1, 32);
581  if (unlikely(drop_byte_buf == NULL)) {
582  FatalError("Failed to allocate memory for NAPATECH stream counter.");
583  }
584  snprintf(drop_byte_buf, 32, "napa%d.drop_byte", stream_config[i].stream_id);
585  stream_counters[i].drop_byte = StatsRegisterCounter(drop_byte_buf, tv);
586  }
587  }
588 
589 #ifdef NAPATECH_ENABLE_BYPASS
590  FlowStatsCounters flow_counters;
591  if (bypass_supported) {
592  flow_counters.active_bypass_flows = StatsRegisterCounter("napa_bypass.active_flows", tv);
593  flow_counters.total_bypass_flows = StatsRegisterCounter("napa_bypass.total_flows", tv);
594  }
595 #endif /* NAPATECH_ENABLE_BYPASS */
596 
598 
599  StatsSetUI64(tv, total_counters.pkts, 0);
600  StatsSetUI64(tv, total_counters.byte, 0);
601  StatsSetUI64(tv, total_counters.drop_pkts, 0);
602  StatsSetUI64(tv, total_counters.drop_byte, 0);
603 
604 #ifdef NAPATECH_ENABLE_BYPASS
605  if (bypass_supported) {
606  StatsSetUI64(tv, dispatch_host.pkts, 0);
607  StatsSetUI64(tv, dispatch_drop.pkts, 0);
608 
609  if (is_inline) {
610  StatsSetUI64(tv, dispatch_fwd.pkts, 0);
611  }
612 
613  StatsSetUI64(tv, dispatch_host.byte, 0);
614  StatsSetUI64(tv, dispatch_drop.byte, 0);
615  if (is_inline) {
616  StatsSetUI64(tv, dispatch_fwd.byte, 0);
617  }
618 
619  if (enable_stream_stats) {
620  for (int i = 0; i < stream_cnt; ++i) {
621  StatsSetUI64(tv, stream_counters[i].pkts, 0);
622  StatsSetUI64(tv, stream_counters[i].byte, 0);
623  StatsSetUI64(tv, stream_counters[i].drop_pkts, 0);
624  StatsSetUI64(tv, stream_counters[i].drop_byte, 0);
625  }
626  }
627 
628  StatsSetUI64(tv, flow_counters.active_bypass_flows, 0);
629  StatsSetUI64(tv, flow_counters.total_bypass_flows, 0);
630  UpdateFlowStats(tv, hInfo, hstat_stream, flow_counters, 1);
631  }
632 #endif /* NAPATECH_ENABLE_BYPASS */
633 
634  uint32_t num_active = UpdateStreamStats(tv, hInfo, hstat_stream,
635  stream_cnt, stream_config, total_counters,
636  dispatch_host, dispatch_drop, dispatch_fwd,
637  is_inline, enable_stream_stats, stream_counters);
638 
639  if (!NapatechIsAutoConfigEnabled() && (num_active < stream_cnt)) {
640  SCLogInfo("num_active: %d, stream_cnt: %d", num_active, stream_cnt);
641  SCLogWarning("Some or all of the configured streams are not created. Proceeding with "
642  "active streams.");
643  }
644 
646  while (1) {
648  SCLogDebug("NapatechStatsLoop THV_KILL detected");
649  break;
650  }
651 
652  UpdateStreamStats(tv, hInfo, hstat_stream,
653  stream_cnt, stream_config, total_counters,
654  dispatch_host, dispatch_drop, dispatch_fwd,
655  is_inline, enable_stream_stats,
656  stream_counters);
657 
658 #ifdef NAPATECH_ENABLE_BYPASS
659  if (bypass_supported) {
660  UpdateFlowStats(tv, hInfo, hstat_stream, flow_counters, 0);
661  }
662 #endif /* NAPATECH_ENABLE_BYPASS */
663 
665  usleep(1000000);
666  }
667 
668  /* CLEAN UP NT Resources and Close the info stream */
669  if ((status = NT_InfoClose(hInfo)) != NT_SUCCESS) {
670  NAPATECH_ERROR(status);
671  return NULL;
672  }
673 
674  /* Close the statistics stream */
675  if ((status = NT_StatClose(hstat_stream)) != NT_SUCCESS) {
676  NAPATECH_ERROR(status);
677  return NULL;
678  }
679 
680  SCLogDebug("Exiting NapatechStatsLoop");
684 
685  return NULL;
686 }
687 
688 #define MAX_HOSTBUFFER 4
689 #define MAX_STREAMS 256
690 #define HB_HIGHWATER 2048 //1982
691 
692 /**
693  * \brief Tests whether a particular stream_id is actively registered
694  *
695  * \param stream_id - ID of the stream to look up
696  * \param num_registered - The total number of registered streams
697  * \param registered_streams - An array containing actively registered streams.
698  *
699  * \return Bool indicating is the specified stream is registered.
700  *
701  */
702 static bool RegisteredStream(uint16_t stream_id, uint16_t num_registered,
703  NapatechStreamConfig registered_streams[])
704 {
705  for (uint16_t reg_id = 0; reg_id < num_registered; ++reg_id) {
706  if (stream_id == registered_streams[reg_id].stream_id) {
707  return true;
708  }
709  }
710  return false;
711 }
712 
713 /**
714  * \brief Count the number of worker threads defined in the conf file.
715  *
716  * \return - The number of worker threads defined by the configuration
717  */
718 static uint32_t CountWorkerThreads(void)
719 {
720  int worker_count = 0;
721 
722  ConfNode *affinity;
723  ConfNode *root = ConfGetNode("threading.cpu-affinity");
724 
725  if (root != NULL) {
726 
727  TAILQ_FOREACH(affinity, &root->head, next)
728  {
729  if (strcmp(affinity->val, "decode-cpu-set") == 0 ||
730  strcmp(affinity->val, "stream-cpu-set") == 0 ||
731  strcmp(affinity->val, "reject-cpu-set") == 0 ||
732  strcmp(affinity->val, "output-cpu-set") == 0) {
733  continue;
734  }
735 
736  if (strcmp(affinity->val, "worker-cpu-set") == 0) {
737  ConfNode *node = ConfNodeLookupChild(affinity->head.tqh_first, "cpu");
738  ConfNode *lnode;
739 
741 
742  TAILQ_FOREACH(lnode, &node->head, next)
743  {
744  uint8_t start, end;
745  char *end_str;
746  if (strncmp(lnode->val, "all", 4) == 0) {
747  /* check that the sting in the config file is correctly specified */
748  if (cpu_spec != CONFIG_SPECIFIER_UNDEFINED) {
749  FatalError("Only one Napatech port specifier type allowed.");
750  }
751  cpu_spec = CONFIG_SPECIFIER_RANGE;
752  worker_count = UtilCpuGetNumProcessorsConfigured();
753  } else if ((end_str = strchr(lnode->val, '-'))) {
754  /* check that the sting in the config file is correctly specified */
755  if (cpu_spec != CONFIG_SPECIFIER_UNDEFINED) {
756  FatalError("Only one Napatech port specifier type allowed.");
757  }
758  cpu_spec = CONFIG_SPECIFIER_RANGE;
759 
760 
761  if (StringParseUint8(&start, 10, end_str - lnode->val, (const char *)lnode->val) < 0) {
762  FatalError("Napatech invalid"
763  " worker range start: '%s'",
764  lnode->val);
765  }
766  if (StringParseUint8(&end, 10, 0, (const char *) (end_str + 1)) < 0) {
767  FatalError("Napatech invalid"
768  " worker range end: '%s'",
769  (end_str != NULL) ? (const char *)(end_str + 1) : "Null");
770  }
771  if (end < start) {
772  FatalError("Napatech invalid"
773  " worker range start: '%d' is greater than end: '%d'",
774  start, end);
775  }
776  worker_count = end - start + 1;
777 
778  } else {
779  /* check that the sting in the config file is correctly specified */
780  if (cpu_spec == CONFIG_SPECIFIER_RANGE) {
781  FatalError("Napatech port range specifiers cannot be combined with "
782  "individual stream specifiers.");
783  }
784  cpu_spec = CONFIG_SPECIFIER_INDIVIDUAL;
785  ++worker_count;
786  }
787  }
788  break;
789  }
790  }
791  }
792  return worker_count;
793 }
794 
795 /**
796  * \brief Reads and parses the stream configuration defined in the config file.
797  *
798  * \param stream_config - array to be filled in with active stream info.
799  *
800  * \return the number of streams configured or -1 if an error occurred
801  *
802  */
804 {
805  int status;
806  char error_buffer[80]; // Error buffer
807  NtStatStream_t hstat_stream;
808  NtStatistics_t hStat; // Stat handle.
809  NtInfoStream_t info_stream;
810  NtInfo_t info;
811  uint16_t instance_cnt = 0;
812  int use_all_streams = 0;
813  int set_cpu_affinity = 0;
814  ConfNode *ntstreams;
815  uint16_t stream_id = 0;
816  uint8_t start = 0;
817  uint8_t end = 0;
818 
819  for (uint16_t i = 0; i < MAX_STREAMS; ++i) {
820  stream_config[i].stream_id = 0;
821  stream_config[i].is_active = false;
822  stream_config[i].initialized = false;
823  }
824 
825  if (ConfGetBool("napatech.use-all-streams", &use_all_streams) == 0) {
826  /* default is "no" */
827  use_all_streams = 0;
828  }
829 
830  if ((status = NT_InfoOpen(&info_stream, "SuricataStreamInfo")) != NT_SUCCESS) {
831  NAPATECH_ERROR(status);
832  return -1;
833  }
834 
835  if ((status = NT_StatOpen(&hstat_stream, "StatsStream")) != NT_SUCCESS) {
836  NAPATECH_ERROR(status);
837  return -1;
838  }
839 
840  if (use_all_streams) {
841  info.cmd = NT_INFO_CMD_READ_STREAM;
842  if ((status = NT_InfoRead(info_stream, &info)) != NT_SUCCESS) {
843  NAPATECH_ERROR(status);
844  return -1;
845  }
846 
847  while (instance_cnt < info.u.stream.data.count) {
848 
849  /*
850  * For each stream ID query the number of host-buffers used by
851  * the stream. If zero, then that streamID is not used; skip
852  * over it and continue until we get a streamID with a non-zero
853  * count of the host-buffers.
854  */
855  memset(&hStat, 0, sizeof (NtStatistics_t));
856 
857  /* Read usage data for the chosen stream ID */
858  hStat.cmd = NT_STATISTICS_READ_CMD_USAGE_DATA_V0;
859  hStat.u.usageData_v0.streamid = (uint8_t) stream_id;
860 
861  if ((status = NT_StatRead(hstat_stream, &hStat)) != NT_SUCCESS) {
862  /* Get the status code as text */
863  NT_ExplainError(status, error_buffer, sizeof (error_buffer));
864  SCLogError("NT_StatRead() failed: %s\n", error_buffer);
865  return -1;
866  }
867 
868  if (hStat.u.usageData_v0.data.numHostBufferUsed == 0) {
869  ++stream_id;
870  continue;
871  }
872 
873  /* if we get here it is an active stream */
874  stream_config[instance_cnt].stream_id = stream_id++;
875  stream_config[instance_cnt].is_active = true;
876  instance_cnt++;
877  }
878 
879  } else {
880  (void)ConfGetBool("threading.set-cpu-affinity", &set_cpu_affinity);
881  if (NapatechIsAutoConfigEnabled() && (set_cpu_affinity == 1)) {
882  start = 0;
883  end = CountWorkerThreads() - 1;
884  } else {
885  /* When not using the default streams we need to
886  * parse the array of streams from the conf */
887  if ((ntstreams = ConfGetNode("napatech.streams")) == NULL) {
888  SCLogError("Failed retrieving napatech.streams from Config");
889  if (NapatechIsAutoConfigEnabled() && (set_cpu_affinity == 0)) {
890  SCLogError("if set-cpu-affinity: no in conf then napatech.streams must be "
891  "defined");
892  }
893  exit(EXIT_FAILURE);
894  }
895 
896  /* Loop through all stream numbers in the array and register the devices */
897  ConfNode *stream;
898  enum CONFIG_SPECIFIER stream_spec = CONFIG_SPECIFIER_UNDEFINED;
899  instance_cnt = 0;
900 
901  TAILQ_FOREACH(stream, &ntstreams->head, next)
902  {
903 
904  if (stream == NULL) {
905  SCLogError("Couldn't Parse Stream Configuration");
906  return -1;
907  }
908 
909  char *end_str = strchr(stream->val, '-');
910  if (end_str) {
911  if (stream_spec != CONFIG_SPECIFIER_UNDEFINED) {
912  SCLogError("Only one Napatech stream range specifier allowed.");
913  return -1;
914  }
915  stream_spec = CONFIG_SPECIFIER_RANGE;
916 
917  if (StringParseUint8(&start, 10, end_str - stream->val,
918  (const char *)stream->val) < 0) {
919  FatalError("Napatech invalid "
920  "stream id start: '%s'",
921  stream->val);
922  }
923  if (StringParseUint8(&end, 10, 0, (const char *) (end_str + 1)) < 0) {
924  FatalError("Napatech invalid "
925  "stream id end: '%s'",
926  (end_str != NULL) ? (const char *)(end_str + 1) : "Null");
927  }
928  } else {
929  if (stream_spec == CONFIG_SPECIFIER_RANGE) {
930  FatalError("Napatech range and individual specifiers cannot be combined.");
931  }
932  stream_spec = CONFIG_SPECIFIER_INDIVIDUAL;
933  if (StringParseUint8(&stream_config[instance_cnt].stream_id,
934  10, 0, (const char *)stream->val) < 0) {
935  FatalError("Napatech invalid "
936  "stream id: '%s'",
937  stream->val);
938  }
939  start = stream_config[instance_cnt].stream_id;
940  end = stream_config[instance_cnt].stream_id;
941  }
942  }
943  }
944 
945  for (stream_id = start; stream_id <= end; ++stream_id) {
946  /* if we get here it is configured in the .yaml file */
947  stream_config[instance_cnt].stream_id = stream_id;
948 
949  /* Check to see if it is an active stream */
950  memset(&hStat, 0, sizeof (NtStatistics_t));
951 
952  /* Read usage data for the chosen stream ID */
953  hStat.cmd = NT_STATISTICS_READ_CMD_USAGE_DATA_V0;
954  hStat.u.usageData_v0.streamid =
955  (uint8_t) stream_config[instance_cnt].stream_id;
956 
957  if ((status = NT_StatRead(hstat_stream, &hStat)) != NT_SUCCESS) {
958  NAPATECH_ERROR(status);
959  return -1;
960  }
961 
962  if (hStat.u.usageData_v0.data.numHostBufferUsed > 0) {
963  stream_config[instance_cnt].is_active = true;
964  }
965  instance_cnt++;
966  }
967  }
968 
969  /* Close the statistics stream */
970  if ((status = NT_StatClose(hstat_stream)) != NT_SUCCESS) {
971  NAPATECH_ERROR(status);
972  return -1;
973  }
974 
975  if ((status = NT_InfoClose(info_stream)) != NT_SUCCESS) {
976  NAPATECH_ERROR(status);
977  return -1;
978  }
979 
980  return instance_cnt;
981 }
982 
983 static void *NapatechBufMonitorLoop(void *arg)
984 {
985  ThreadVars *tv = (ThreadVars *) arg;
986 
987  NtInfo_t hStreamInfo;
988  NtStatistics_t hStat; // Stat handle.
989  NtInfoStream_t hInfo;
990  NtStatStream_t hstat_stream;
991  int status; // Status variable
992 
993  const uint32_t alertInterval = 25;
994 
995 #ifndef NAPATECH_ENABLE_BYPASS
996  uint32_t OB_fill_level[MAX_STREAMS] = {0};
997  uint32_t OB_alert_level[MAX_STREAMS] = {0};
998  uint32_t ave_OB_fill_level[MAX_STREAMS] = {0};
999 #endif /* NAPATECH_ENABLE_BYPASS */
1000 
1001  uint32_t HB_fill_level[MAX_STREAMS] = {0};
1002  uint32_t HB_alert_level[MAX_STREAMS] = {0};
1003  uint32_t ave_HB_fill_level[MAX_STREAMS] = {0};
1004 
1005  /* Open the info and Statistics */
1006  if ((status = NT_InfoOpen(&hInfo, "InfoStream")) != NT_SUCCESS) {
1007  NAPATECH_ERROR(status);
1008  exit(EXIT_FAILURE);
1009  }
1010 
1011  if ((status = NT_StatOpen(&hstat_stream, "StatsStream")) != NT_SUCCESS) {
1012  NAPATECH_ERROR(status);
1013  exit(EXIT_FAILURE);
1014  }
1015 
1016  /* Read the info on all streams instantiated in the system */
1017  hStreamInfo.cmd = NT_INFO_CMD_READ_STREAM;
1018  if ((status = NT_InfoRead(hInfo, &hStreamInfo)) != NT_SUCCESS) {
1019  NAPATECH_ERROR(status);
1020  exit(EXIT_FAILURE);
1021  }
1022 
1023  NapatechStreamConfig registered_streams[MAX_STREAMS];
1024  int num_registered = NapatechGetStreamConfig(registered_streams);
1025  if (num_registered == -1) {
1026  exit(EXIT_FAILURE);
1027  }
1028 
1030  while (1) {
1031  if (TmThreadsCheckFlag(tv, THV_KILL)) {
1032  SCLogDebug("NapatechBufMonitorLoop THV_KILL detected");
1033  break;
1034  }
1035 
1036  usleep(200000);
1037 
1038  /* Read the info on all streams instantiated in the system */
1039  hStreamInfo.cmd = NT_INFO_CMD_READ_STREAM;
1040  if ((status = NT_InfoRead(hInfo, &hStreamInfo)) != NT_SUCCESS) {
1041  NAPATECH_ERROR(status);
1042  exit(EXIT_FAILURE);
1043  }
1044 
1045  char pktCntStr[4096];
1046  memset(pktCntStr, 0, sizeof (pktCntStr));
1047 
1048  uint32_t stream_id = 0;
1049  uint32_t stream_cnt = 0;
1050  uint32_t num_streams = hStreamInfo.u.stream.data.count;
1051 
1052  for (stream_cnt = 0; stream_cnt < num_streams; ++stream_cnt) {
1053 
1054  do {
1055 
1056  /* Read usage data for the chosen stream ID */
1057  hStat.cmd = NT_STATISTICS_READ_CMD_USAGE_DATA_V0;
1058  hStat.u.usageData_v0.streamid = (uint8_t) stream_id;
1059 
1060  if ((status = NT_StatRead(hstat_stream, &hStat)) != NT_SUCCESS) {
1061  NAPATECH_ERROR(status);
1062  exit(EXIT_FAILURE);
1063  }
1064 
1065  if (hStat.u.usageData_v0.data.numHostBufferUsed == 0) {
1066  ++stream_id;
1067  continue;
1068  }
1069  } while (hStat.u.usageData_v0.data.numHostBufferUsed == 0);
1070 
1071  if (RegisteredStream(stream_id, num_registered, registered_streams)) {
1072 
1073 #ifndef NAPATECH_ENABLE_BYPASS
1074  ave_OB_fill_level[stream_id] = 0;
1075 #endif /* NAPATECH_ENABLE_BYPASS */
1076 
1077  ave_HB_fill_level[stream_id] = 0;
1078 
1079  for (uint32_t hb_count = 0; hb_count < hStat.u.usageData_v0.data.numHostBufferUsed; hb_count++) {
1080 
1081 #ifndef NAPATECH_ENABLE_BYPASS
1082  OB_fill_level[hb_count] =
1083  ((100 * hStat.u.usageData_v0.data.hb[hb_count].onboardBuffering.used) /
1084  hStat.u.usageData_v0.data.hb[hb_count].onboardBuffering.size);
1085 
1086  if (OB_fill_level[hb_count] > 100) {
1087  OB_fill_level[hb_count] = 100;
1088  }
1089 #endif /* NAPATECH_ENABLE_BYPASS */
1090  uint32_t bufSize = hStat.u.usageData_v0.data.hb[hb_count].enQueuedAdapter / 1024
1091  + hStat.u.usageData_v0.data.hb[hb_count].deQueued / 1024
1092  + hStat.u.usageData_v0.data.hb[hb_count].enQueued / 1024
1093  - HB_HIGHWATER;
1094 
1095  HB_fill_level[hb_count] = (uint32_t)
1096  ((100 * hStat.u.usageData_v0.data.hb[hb_count].deQueued / 1024) /
1097  bufSize);
1098 
1099 #ifndef NAPATECH_ENABLE_BYPASS
1100  ave_OB_fill_level[stream_id] += OB_fill_level[hb_count];
1101 #endif /* NAPATECH_ENABLE_BYPASS */
1102 
1103  ave_HB_fill_level[stream_id] += HB_fill_level[hb_count];
1104  }
1105 
1106 #ifndef NAPATECH_ENABLE_BYPASS
1107  ave_OB_fill_level[stream_id] /= hStat.u.usageData_v0.data.numHostBufferUsed;
1108 #endif /* NAPATECH_ENABLE_BYPASS */
1109 
1110  ave_HB_fill_level[stream_id] /= hStat.u.usageData_v0.data.numHostBufferUsed;
1111 
1112  /* Host Buffer Fill Level warnings... */
1113  if (ave_HB_fill_level[stream_id] >= (HB_alert_level[stream_id] + alertInterval)) {
1114 
1115  while (ave_HB_fill_level[stream_id] >= HB_alert_level[stream_id] + alertInterval) {
1116  HB_alert_level[stream_id] += alertInterval;
1117  }
1118  SCLogPerf("nt%d - Increasing Host Buffer Fill Level : %4d%%",
1119  stream_id, ave_HB_fill_level[stream_id] - 1);
1120  }
1121 
1122  if (HB_alert_level[stream_id] > 0) {
1123  if ((ave_HB_fill_level[stream_id] <= (HB_alert_level[stream_id] - alertInterval))) {
1124  SCLogPerf("nt%d - Decreasing Host Buffer Fill Level: %4d%%",
1125  stream_id, ave_HB_fill_level[stream_id]);
1126 
1127  while (ave_HB_fill_level[stream_id] <= (HB_alert_level[stream_id] - alertInterval)) {
1128  if ((HB_alert_level[stream_id]) > 0) {
1129  HB_alert_level[stream_id] -= alertInterval;
1130  } else break;
1131  }
1132  }
1133  }
1134 
1135 #ifndef NAPATECH_ENABLE_BYPASS
1136  /* On Board SDRAM Fill Level warnings... */
1137  if (ave_OB_fill_level[stream_id] >= (OB_alert_level[stream_id] + alertInterval)) {
1138  while (ave_OB_fill_level[stream_id] >= OB_alert_level[stream_id] + alertInterval) {
1139  OB_alert_level[stream_id] += alertInterval;
1140 
1141  }
1142  SCLogPerf("nt%d - Increasing Adapter SDRAM Fill Level: %4d%%",
1143  stream_id, ave_OB_fill_level[stream_id]);
1144  }
1145 
1146  if (OB_alert_level[stream_id] > 0) {
1147  if ((ave_OB_fill_level[stream_id] <= (OB_alert_level[stream_id] - alertInterval))) {
1148  SCLogPerf("nt%d - Decreasing Adapter SDRAM Fill Level : %4d%%",
1149  stream_id, ave_OB_fill_level[stream_id]);
1150 
1151  while (ave_OB_fill_level[stream_id] <= (OB_alert_level[stream_id] - alertInterval)) {
1152  if ((OB_alert_level[stream_id]) > 0) {
1153  OB_alert_level[stream_id] -= alertInterval;
1154  } else break;
1155  }
1156  }
1157  }
1158 #endif /* NAPATECH_ENABLE_BYPASS */
1159  }
1160  ++stream_id;
1161  }
1162  }
1163 
1164  if ((status = NT_InfoClose(hInfo)) != NT_SUCCESS) {
1165  NAPATECH_ERROR(status);
1166  exit(EXIT_FAILURE);
1167  }
1168 
1169  /* Close the statistics stream */
1170  if ((status = NT_StatClose(hstat_stream)) != NT_SUCCESS) {
1171  NAPATECH_ERROR(status);
1172  exit(EXIT_FAILURE);
1173  }
1174 
1175  SCLogDebug("Exiting NapatechStatsLoop");
1179 
1180  return NULL;
1181 }
1182 
1183 
1185 {
1186  /* Creates the Statistic threads */
1187  ThreadVars *stats_tv = TmThreadCreate("NapatechStats",
1188  NULL, NULL,
1189  NULL, NULL,
1190  "custom", NapatechStatsLoop, 0);
1191 
1192  if (stats_tv == NULL) {
1193  FatalError("Error creating a thread for NapatechStats - Killing engine.");
1194  }
1195 
1196  if (TmThreadSpawn(stats_tv) != 0) {
1197  FatalError("Failed to spawn thread for NapatechStats - Killing engine.");
1198  }
1199 
1200 #ifdef NAPATECH_ENABLE_BYPASS
1201  if (bypass_supported) {
1202  SCLogInfo("Napatech bypass functionality enabled.");
1203  }
1204 #endif /* NAPATECH_ENABLE_BYPASS */
1205 
1206  ThreadVars *buf_monitor_tv = TmThreadCreate("NapatechBufMonitor",
1207  NULL, NULL,
1208  NULL, NULL,
1209  "custom", NapatechBufMonitorLoop, 0);
1210 
1211  if (buf_monitor_tv == NULL) {
1212  FatalError("Error creating a thread for NapatechBufMonitor - Killing engine.");
1213  }
1214 
1215  if (TmThreadSpawn(buf_monitor_tv) != 0) {
1216  FatalError("Failed to spawn thread for NapatechBufMonitor - Killing engine.");
1217  }
1218 
1219 
1220  return;
1221 }
1222 
1223 bool NapatechSetupNuma(uint32_t stream, uint32_t numa)
1224 {
1225  uint32_t status = 0;
1226  static NtConfigStream_t hconfig;
1227 
1228  char ntpl_cmd[64];
1229  snprintf(ntpl_cmd, 64, "setup[numanode=%d] = streamid == %d", numa, stream);
1230 
1231  NtNtplInfo_t ntpl_info;
1232 
1233  if ((status = NT_ConfigOpen(&hconfig, "ConfigStream")) != NT_SUCCESS) {
1234  NAPATECH_ERROR(status);
1235  return false;
1236  }
1237 
1238  if ((status = NT_NTPL(hconfig, ntpl_cmd, &ntpl_info, NT_NTPL_PARSER_VALIDATE_NORMAL)) == NT_SUCCESS) {
1239  status = ntpl_info.ntplId;
1240 
1241  } else {
1242  NAPATECH_NTPL_ERROR(ntpl_cmd, ntpl_info, status);
1243  return false;
1244  }
1245 
1246  return status;
1247 }
1248 
1249 static uint32_t NapatechSetHashmode(void)
1250 {
1251  uint32_t status = 0;
1252  const char *hash_mode;
1253  static NtConfigStream_t hconfig;
1254  char ntpl_cmd[64];
1255  NtNtplInfo_t ntpl_info;
1256 
1257  uint32_t filter_id = 0;
1258 
1259  /* Get the hashmode from the conf file. */
1260  ConfGet("napatech.hashmode", &hash_mode);
1261 
1262  snprintf(ntpl_cmd, 64, "hashmode = %s", hash_mode);
1263 
1264  /* Issue the NTPL command */
1265  if ((status = NT_ConfigOpen(&hconfig, "ConfigStream")) != NT_SUCCESS) {
1266  NAPATECH_ERROR(status);
1267  return false;
1268  }
1269 
1270  if ((status = NT_NTPL(hconfig, ntpl_cmd, &ntpl_info,
1271  NT_NTPL_PARSER_VALIDATE_NORMAL)) == NT_SUCCESS) {
1272  filter_id = ntpl_info.ntplId;
1273  SCLogConfig("Napatech hashmode: %s ID: %d", hash_mode, status);
1274  } else {
1275  NAPATECH_NTPL_ERROR(ntpl_cmd, ntpl_info, status);
1276  status = 0;
1277  }
1278 
1279  return filter_id;
1280 }
1281 
1282 static uint32_t GetStreamNUMAs(uint32_t stream_id, int stream_numas[])
1283 {
1284  NtStatistics_t hStat; // Stat handle.
1285  NtStatStream_t hstat_stream;
1286  int status; // Status variable
1287 
1288  for (int i = 0; i < MAX_HOSTBUFFERS; ++i)
1289  stream_numas[i] = -1;
1290 
1291  if ((status = NT_StatOpen(&hstat_stream, "StatsStream")) != NT_SUCCESS) {
1292  NAPATECH_ERROR(status);
1293  exit(EXIT_FAILURE);
1294  }
1295 
1296  char pktCntStr[4096];
1297  memset(pktCntStr, 0, sizeof (pktCntStr));
1298 
1299 
1300  /* Read usage data for the chosen stream ID */
1301  hStat.cmd = NT_STATISTICS_READ_CMD_USAGE_DATA_V0;
1302  hStat.u.usageData_v0.streamid = (uint8_t) stream_id;
1303 
1304  if ((status = NT_StatRead(hstat_stream, &hStat)) != NT_SUCCESS) {
1305  NAPATECH_ERROR(status);
1306  exit(EXIT_FAILURE);
1307  }
1308 
1309  for (uint32_t hb_id = 0; hb_id < hStat.u.usageData_v0.data.numHostBufferUsed; ++hb_id) {
1310  stream_numas[hb_id] = hStat.u.usageData_v0.data.hb[hb_id].numaNode;
1311  }
1312 
1313  return hStat.u.usageData_v0.data.numHostBufferUsed;
1314 }
1315 
1316 static int NapatechSetFilter(NtConfigStream_t hconfig, char *ntpl_cmd)
1317 {
1318  int status = 0;
1319  int local_filter_id = 0;
1320 
1321  NtNtplInfo_t ntpl_info;
1322  if ((status = NT_NTPL(hconfig, ntpl_cmd, &ntpl_info,
1323  NT_NTPL_PARSER_VALIDATE_NORMAL)) == NT_SUCCESS) {
1324  SCLogConfig("NTPL filter assignment \"%s\" returned filter id %4d",
1325  ntpl_cmd, local_filter_id);
1326  } else {
1327  NAPATECH_NTPL_ERROR(ntpl_cmd, ntpl_info, status);
1328  exit(EXIT_FAILURE);
1329  }
1330 
1331  return local_filter_id;
1332 }
1333 
1335 {
1336  uint32_t status = 0;
1337  static NtConfigStream_t hconfig;
1338  char ntpl_cmd[64];
1339  NtNtplInfo_t ntpl_info;
1340 
1341  if ((status = NT_ConfigOpen(&hconfig, "ConfigStream")) != NT_SUCCESS) {
1342  NAPATECH_ERROR(status);
1343  exit(EXIT_FAILURE);
1344  }
1345 
1346  snprintf(ntpl_cmd, 64, "delete = all");
1347  if ((status = NT_NTPL(hconfig, ntpl_cmd, &ntpl_info,
1348  NT_NTPL_PARSER_VALIDATE_NORMAL)) == NT_SUCCESS) {
1349  status = ntpl_info.ntplId;
1350  } else {
1351  NAPATECH_NTPL_ERROR(ntpl_cmd, ntpl_info, status);
1352  status = 0;
1353  }
1354 
1355  NT_ConfigClose(hconfig);
1356 
1357  return status;
1358 }
1359 
1360 
1361 uint32_t NapatechSetupTraffic(uint32_t first_stream, uint32_t last_stream)
1362 {
1363 #define PORTS_SPEC_SIZE 64
1364 
1365  struct ports_spec_s {
1366  uint8_t first[MAX_PORTS];
1367  uint8_t second[MAX_PORTS];
1368  bool all;
1369  char str[PORTS_SPEC_SIZE];
1370  } ports_spec;
1371 
1372  ports_spec.all = false;
1373 
1374  ConfNode *ntports;
1375  int iteration = 0;
1376  int status = 0;
1377  NtConfigStream_t hconfig;
1378  char ntpl_cmd[512];
1379  int is_inline = 0;
1380 #ifdef NAPATECH_ENABLE_BYPASS
1381  int is_span_port[MAX_PORTS] = { 0 };
1382 #endif
1383 
1384  char span_ports[128];
1385  memset(span_ports, 0, sizeof(span_ports));
1386 
1387  if (ConfGetBool("napatech.inline", &is_inline) == 0) {
1388  is_inline = 0;
1389  }
1390 
1391  NapatechSetHashmode();
1392 
1393  if ((status = NT_ConfigOpen(&hconfig, "ConfigStream")) != NT_SUCCESS) {
1394  NAPATECH_ERROR(status);
1395  exit(EXIT_FAILURE);
1396  }
1397 
1398  if (first_stream == last_stream) {
1399  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1400  "Setup[state=inactive] = StreamId == %d",
1401  first_stream);
1402  } else {
1403  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1404  "Setup[state=inactive] = StreamId == (%d..%d)",
1405  first_stream, last_stream);
1406  }
1407  NapatechSetFilter(hconfig, ntpl_cmd);
1408 
1409 #ifdef NAPATECH_ENABLE_BYPASS
1410  if (NapatechUseHWBypass()) {
1411  SCLogInfo("Napatech Hardware Bypass enabled.");
1412  } else {
1413  SCLogInfo("Napatech Hardware Bypass available but disabled.");
1414  }
1415 #else
1416  if (NapatechUseHWBypass()) {
1417  SCLogInfo("Napatech Hardware Bypass requested in conf but is not available.");
1418  exit(EXIT_FAILURE);
1419  } else {
1420  SCLogInfo("Napatech Hardware Bypass disabled.");
1421  }
1422 #endif
1423 
1424  if (is_inline) {
1425  SCLogInfo("Napatech configured for inline mode.");
1426  } else {
1427 
1428  SCLogInfo("Napatech configured for passive (non-inline) mode.");
1429  }
1430 
1431  /* When not using the default streams we need to parse
1432  * the array of streams from the conf
1433  */
1434  if ((ntports = ConfGetNode("napatech.ports")) == NULL) {
1435  FatalError("Failed retrieving napatech.ports from Conf");
1436  }
1437 
1438  /* Loop through all ports in the array */
1439  ConfNode *port;
1440  enum CONFIG_SPECIFIER stream_spec = CONFIG_SPECIFIER_UNDEFINED;
1441 
1442  if (NapatechUseHWBypass()) {
1443  SCLogInfo("Listening on the following Napatech ports:");
1444  }
1445  /* Build the NTPL command using values in the config file. */
1446  TAILQ_FOREACH(port, &ntports->head, next)
1447  {
1448  if (port == NULL) {
1449  FatalError("Couldn't Parse Port Configuration");
1450  }
1451 
1452  if (NapatechUseHWBypass()) {
1453 #ifdef NAPATECH_ENABLE_BYPASS
1454  if (strchr(port->val, '-')) {
1455  stream_spec = CONFIG_SPECIFIER_RANGE;
1456 
1457  ByteExtractStringUint8(&ports_spec.first[iteration], 10, 0, port->val);
1458  ByteExtractStringUint8(&ports_spec.second[iteration], 10, 0, strchr(port->val, '-')+1);
1459 
1460  if (ports_spec.first[iteration] == ports_spec.second[iteration]) {
1461  if (is_inline) {
1462  FatalError("Error with napatec.ports in conf file. When running in inline "
1463  "mode the two ports specifying a segment must be different.");
1464  } else {
1465  /* SPAN port configuration */
1466  is_span_port[ports_spec.first[iteration]] = 1;
1467 
1468  if (strlen(span_ports) == 0) {
1469  snprintf(span_ports, sizeof (span_ports), "%d", ports_spec.first[iteration]);
1470  } else {
1471  char temp[16];
1472  snprintf(temp, sizeof(temp), ",%d", ports_spec.first[iteration]);
1473  strlcat(span_ports, temp, sizeof(span_ports));
1474  }
1475 
1476  }
1477  }
1478 
1479  if (NapatechGetAdapter(ports_spec.first[iteration]) != NapatechGetAdapter(ports_spec.first[iteration])) {
1480  SCLogError("Invalid napatech.ports specification in conf file.");
1481  SCLogError("Two ports on a segment must reside on the same adapter. port %d "
1482  "is on adapter %d, port %d is on adapter %d.",
1483  ports_spec.first[iteration],
1484  NapatechGetAdapter(ports_spec.first[iteration]),
1485  ports_spec.second[iteration],
1486  NapatechGetAdapter(ports_spec.second[iteration]));
1487  exit(EXIT_FAILURE);
1488  }
1489 
1490  NapatechSetPortmap(ports_spec.first[iteration], ports_spec.second[iteration]);
1491  if (ports_spec.first[iteration] == ports_spec.second[iteration]) {
1492  SCLogInfo(" span_port: %d", ports_spec.first[iteration]);
1493  } else {
1494  SCLogInfo(" %s: %d - %d", is_inline ? "inline_ports" : "tap_ports", ports_spec.first[iteration], ports_spec.second[iteration]);
1495  }
1496 
1497  if (iteration == 0) {
1498  if (ports_spec.first[iteration] == ports_spec.second[iteration]) {
1499  snprintf(ports_spec.str, sizeof (ports_spec.str), "%d", ports_spec.first[iteration]);
1500  } else {
1501  snprintf(ports_spec.str, sizeof (ports_spec.str), "%d,%d", ports_spec.first[iteration], ports_spec.second[iteration]);
1502  }
1503  } else {
1504  char temp[16];
1505  if (ports_spec.first[iteration] == ports_spec.second[iteration]) {
1506  snprintf(temp, sizeof(temp), ",%d", ports_spec.first[iteration]);
1507  } else {
1508  snprintf(temp, sizeof(temp), ",%d,%d", ports_spec.first[iteration], ports_spec.second[iteration]);
1509  }
1510  strlcat(ports_spec.str, temp, sizeof(ports_spec.str));
1511  }
1512  } else {
1513  FatalError("When using hardware flow bypass ports must be specified as segments. "
1514  "E.g. ports: [0-1, 0-2]");
1515  }
1516 #endif
1517  } else { // !NapatechUseHWBypass()
1518  if (strncmp(port->val, "all", 3) == 0) {
1519  /* check that the sting in the config file is correctly specified */
1520  if (stream_spec != CONFIG_SPECIFIER_UNDEFINED) {
1521  FatalError("Only one Napatech port specifier type is allowed.");
1522  }
1523  stream_spec = CONFIG_SPECIFIER_RANGE;
1524 
1525  ports_spec.all = true;
1526  snprintf(ports_spec.str, sizeof (ports_spec.str), "all");
1527  } else if (strchr(port->val, '-')) {
1528  /* check that the sting in the config file is correctly specified */
1529  if (stream_spec != CONFIG_SPECIFIER_UNDEFINED) {
1530  FatalError("Only one Napatech port specifier is allowed when hardware bypass "
1531  "is disabled. (E.g. ports: [0-4], NOT ports: [0-1,2-3])");
1532  }
1533  stream_spec = CONFIG_SPECIFIER_RANGE;
1534 
1535  ByteExtractStringUint8(&ports_spec.first[iteration], 10, 0, port->val);
1536  ByteExtractStringUint8(&ports_spec.second[iteration], 10, 0, strchr(port->val, '-') + 1);
1537  snprintf(ports_spec.str, sizeof (ports_spec.str), "(%d..%d)", ports_spec.first[iteration], ports_spec.second[iteration]);
1538  } else {
1539  /* check that the sting in the config file is correctly specified */
1540  if (stream_spec == CONFIG_SPECIFIER_RANGE) {
1541  FatalError("Napatech port range specifiers cannot be combined with individual "
1542  "stream specifiers.");
1543  }
1544  stream_spec = CONFIG_SPECIFIER_INDIVIDUAL;
1545 
1546  ByteExtractStringUint8(&ports_spec.first[iteration], 10, 0, port->val);
1547 
1548  /* Determine the ports to use on the NTPL assign statement*/
1549  if (iteration == 0) {
1550  snprintf(ports_spec.str, sizeof (ports_spec.str), "%s", port->val);
1551  } else {
1552  strlcat(ports_spec.str, ",", sizeof(ports_spec.str));
1553  strlcat(ports_spec.str, port->val, sizeof(ports_spec.str));
1554  }
1555  }
1556  } // if !NapatechUseHWBypass()
1557  ++iteration;
1558  } /* TAILQ_FOREACH */
1559 
1560 #ifdef NAPATECH_ENABLE_BYPASS
1561  if (bypass_supported) {
1562  if (is_inline) {
1563  char inline_setup_cmd[512];
1564  if (first_stream == last_stream) {
1565  snprintf(inline_setup_cmd, sizeof (ntpl_cmd),
1566  "Setup[TxDescriptor=Dyn;TxPorts=%s;RxCRC=False;TxPortPos=112;UseWL=True] = StreamId == %d",
1567  ports_spec.str, first_stream);
1568  } else {
1569  snprintf(inline_setup_cmd, sizeof (ntpl_cmd),
1570  "Setup[TxDescriptor=Dyn;TxPorts=%s;RxCRC=False;TxPortPos=112;UseWL=True] = StreamId == (%d..%d)",
1571  ports_spec.str, first_stream, last_stream);
1572  }
1573  NapatechSetFilter(hconfig, inline_setup_cmd);
1574  }
1575  /* Build the NTPL command */
1576  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1577  "assign[priority=3;streamid=(%d..%d);colormask=0x10000000;"
1578  "Descriptor=DYN3,length=24,colorbits=32,Offset0=Layer3Header[0],Offset1=Layer4Header[0]]= %s%s",
1579  first_stream, last_stream, ports_spec.all ? "" : "port==", ports_spec.str);
1580  NapatechSetFilter(hconfig, ntpl_cmd);
1581 
1582 
1583  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1584  "assign[priority=2;streamid=(%d..%d);colormask=0x11000000;"
1585  "Descriptor=DYN3,length=24,colorbits=32,Offset0=Layer3Header[0],Offset1=Layer4Header[0]"
1586  "]= %s%s and (Layer3Protocol==IPV4)",
1587  first_stream, last_stream, ports_spec.all ? "" : "port==", ports_spec.str);
1588  NapatechSetFilter(hconfig, ntpl_cmd);
1589 
1590 
1591  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1592  "assign[priority=2;streamid=(%d..%d);colormask=0x14000000;"
1593  "Descriptor=DYN3,length=24,colorbits=32,Offset0=Layer3Header[0],Offset1=Layer4Header[0]]= %s%s and (Layer3Protocol==IPV6)",
1594  first_stream, last_stream, ports_spec.all ? "" : "port==", ports_spec.str);
1595  NapatechSetFilter(hconfig, ntpl_cmd);
1596 
1597  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1598  "assign[priority=2;streamid=(%d..%d);colormask=0x10100000;"
1599  "Descriptor=DYN3,length=24,colorbits=32,Offset0=Layer3Header[0],Offset1=Layer4Header[0]]= %s%s and (Layer4Protocol==TCP)",
1600  first_stream, last_stream, ports_spec.all ? "" : "port==", ports_spec.str);
1601  NapatechSetFilter(hconfig, ntpl_cmd);
1602 
1603  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1604  "assign[priority=2;streamid=(%d..%d);colormask=0x10200000;"
1605  "Descriptor=DYN3,length=24,colorbits=32,Offset0=Layer3Header[0],Offset1=Layer4Header[0]"
1606  "]= %s%s and (Layer4Protocol==UDP)",
1607  first_stream, last_stream, ports_spec.all ? "" : "port==", ports_spec.str);
1608  NapatechSetFilter(hconfig, ntpl_cmd);
1609 
1610  if (strlen(span_ports) > 0) {
1611  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1612  "assign[priority=2;streamid=(%d..%d);colormask=0x00001000;"
1613  "Descriptor=DYN3,length=24,colorbits=32,Offset0=Layer3Header[0],Offset1=Layer4Header[0]"
1614  "]= port==%s",
1615  first_stream, last_stream, span_ports);
1616  NapatechSetFilter(hconfig, ntpl_cmd);
1617  }
1618 
1619  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1620  "KeyType[name=KT%u]={sw_32_32,sw_16_16}",
1621  NAPATECH_KEYTYPE_IPV4);
1622  NapatechSetFilter(hconfig, ntpl_cmd);
1623 
1624  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1625  "KeyDef[name=KDEF%u;KeyType=KT%u;ipprotocolfield=OUTER]=(Layer3Header[12]/32/32,Layer4Header[0]/16/16)",
1626  NAPATECH_KEYTYPE_IPV4, NAPATECH_KEYTYPE_IPV4);
1627  NapatechSetFilter(hconfig, ntpl_cmd);
1628 
1629  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1630  "KeyType[name=KT%u]={32,32,16,16}",
1631  NAPATECH_KEYTYPE_IPV4_SPAN);
1632  NapatechSetFilter(hconfig, ntpl_cmd);
1633 
1634  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1635  "KeyDef[name=KDEF%u;KeyType=KT%u;ipprotocolfield=OUTER;keysort=sorted]=(Layer3Header[12]/32,Layer3Header[16]/32,Layer4Header[0]/16,Layer4Header[2]/16)",
1636  NAPATECH_KEYTYPE_IPV4_SPAN, NAPATECH_KEYTYPE_IPV4_SPAN);
1637  NapatechSetFilter(hconfig, ntpl_cmd);
1638 
1639  /* IPv6 5tuple for inline and tap ports */
1640  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1641  "KeyType[name=KT%u]={sw_128_128,sw_16_16}",
1642  NAPATECH_KEYTYPE_IPV6);
1643  NapatechSetFilter(hconfig, ntpl_cmd);
1644 
1645  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1646  "KeyDef[name=KDEF%u;KeyType=KT%u;ipprotocolfield=OUTER]=(Layer3Header[8]/128/128,Layer4Header[0]/16/16)",
1647  NAPATECH_KEYTYPE_IPV6, NAPATECH_KEYTYPE_IPV6);
1648  NapatechSetFilter(hconfig, ntpl_cmd);
1649 
1650  /* IPv6 5tuple for SPAN Ports */
1651  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1652  "KeyType[name=KT%u]={128,128,16,16}",
1653  NAPATECH_KEYTYPE_IPV6_SPAN);
1654  NapatechSetFilter(hconfig, ntpl_cmd);
1655 
1656  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1657  "KeyDef[name=KDEF%u;KeyType=KT%u;ipprotocolfield=OUTER;keysort=sorted]=(Layer3Header[8]/128,Layer3Header[24]/128,Layer4Header[0]/16,Layer4Header[2]/16)",
1658  NAPATECH_KEYTYPE_IPV6_SPAN, NAPATECH_KEYTYPE_IPV6_SPAN);
1659  NapatechSetFilter(hconfig, ntpl_cmd);
1660 
1661 
1662  int pair;
1663  char ports_ntpl_a[64];
1664  char ports_ntpl_b[64];
1665  memset(ports_ntpl_a, 0, sizeof(ports_ntpl_a));
1666  memset(ports_ntpl_b, 0, sizeof(ports_ntpl_b));
1667 
1668  for (pair = 0; pair < iteration; ++pair) {
1669  char port_str[8];
1670 
1671  if (!is_span_port[ports_spec.first[pair]]) {
1672  snprintf(port_str, sizeof(port_str), "%s%u ", strlen(ports_ntpl_a) == 0 ? "" : ",", ports_spec.first[pair]);
1673  strlcat(ports_ntpl_a, port_str, sizeof(ports_ntpl_a));
1674 
1675  snprintf(port_str, sizeof(port_str), "%s%u ", strlen(ports_ntpl_b) == 0 ? "" : ",", ports_spec.second[pair]);
1676  strlcat(ports_ntpl_b, port_str, sizeof(ports_ntpl_b));
1677  }
1678  }
1679 
1680  if (strlen(ports_ntpl_a) > 0) {
1681  /* This is the assign for dropping upstream traffic */
1682  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1683  "assign[priority=1;streamid=drop;colormask=0x1]=(Layer3Protocol==IPV4)and(port == %s)and(Key(KDEF%u,KeyID=%u)==%u)",
1684  ports_ntpl_a,
1685  NAPATECH_KEYTYPE_IPV4,
1686  NAPATECH_KEYTYPE_IPV4,
1687  NAPATECH_FLOWTYPE_DROP);
1688  NapatechSetFilter(hconfig, ntpl_cmd);
1689  }
1690 
1691  if (strlen(ports_ntpl_b) > 0) {
1692  /* This is the assign for dropping downstream traffic */
1693  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1694  "assign[priority=1;streamid=drop;colormask=0x1]=(Layer3Protocol==IPV4)and(port == %s)and(Key(KDEF%u,KeyID=%u,fieldaction=swap)==%u)",
1695  ports_ntpl_b, //ports_spec.str,
1696  NAPATECH_KEYTYPE_IPV4,
1697  NAPATECH_KEYTYPE_IPV4,
1698  NAPATECH_FLOWTYPE_DROP);
1699  NapatechSetFilter(hconfig, ntpl_cmd);
1700  }
1701 
1702  if (strlen(span_ports) > 0) {
1703  /* This is the assign for dropping SPAN Port traffic */
1704  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1705  "assign[priority=1;streamid=drop;colormask=0x1]=(Layer3Protocol==IPV4)and(port == %s)and(Key(KDEF%u,KeyID=%u)==%u)",
1706  span_ports,
1707  NAPATECH_KEYTYPE_IPV4_SPAN,
1708  NAPATECH_KEYTYPE_IPV4_SPAN,
1709  NAPATECH_FLOWTYPE_DROP);
1710  NapatechSetFilter(hconfig, ntpl_cmd);
1711  }
1712 
1713  if (is_inline) {
1714  for (pair = 0; pair < iteration; ++pair) {
1715  /* This is the assignment for forwarding traffic */
1716  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1717  "assign[priority=1;streamid=drop;DestinationPort=%d;colormask=0x2]=(Layer3Protocol==IPV4)and(port == %d)and(Key(KDEF%u,KeyID=%u)==%u)",
1718  ports_spec.second[pair],
1719  ports_spec.first[pair],
1720  NAPATECH_KEYTYPE_IPV4,
1721  NAPATECH_KEYTYPE_IPV4,
1722  NAPATECH_FLOWTYPE_PASS);
1723  NapatechSetFilter(hconfig, ntpl_cmd);
1724 
1725  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1726  "assign[priority=1;streamid=drop;DestinationPort=%d;colormask=0x2]=(Layer3Protocol==IPV4)and(port == %d)and(Key(KDEF%u,KeyID=%u,fieldaction=swap)==%u)",
1727  ports_spec.first[pair],
1728  ports_spec.second[pair],
1729  NAPATECH_KEYTYPE_IPV4,
1730  NAPATECH_KEYTYPE_IPV4,
1731  NAPATECH_FLOWTYPE_PASS);
1732  NapatechSetFilter(hconfig, ntpl_cmd);
1733  }
1734  }
1735 
1736  if (strlen(ports_ntpl_a) > 0) {
1737  /* This is the assign for dropping upstream traffic */
1738  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1739  "assign[priority=1;streamid=drop;colormask=0x1]=(Layer3Protocol==IPV6)and(port == %s)and(Key(KDEF%u,KeyID=%u)==%u)",
1740  ports_ntpl_a,
1741  NAPATECH_KEYTYPE_IPV6,
1742  NAPATECH_KEYTYPE_IPV6,
1743  NAPATECH_FLOWTYPE_DROP);
1744  NapatechSetFilter(hconfig, ntpl_cmd);
1745  }
1746 
1747  if (strlen(ports_ntpl_b) > 0) {
1748  /* This is the assign for dropping downstream traffic */
1749  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1750  "assign[priority=1;streamid=drop;colormask=0x1]=(Layer3Protocol==IPV6)and(port == %s)and(Key(KDEF%u,KeyID=%u,fieldaction=swap)==%u)",
1751  ports_ntpl_b, //ports_spec.str,
1752  NAPATECH_KEYTYPE_IPV6,
1753  NAPATECH_KEYTYPE_IPV6,
1754  NAPATECH_FLOWTYPE_DROP);
1755  NapatechSetFilter(hconfig, ntpl_cmd);
1756  }
1757 
1758  if (strlen(span_ports) > 0) {
1759  /* This is the assign for dropping SPAN Port traffic */
1760  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1761  "assign[priority=1;streamid=drop;colormask=0x1]=(Layer3Protocol==IPV6)and(port == %s)and(Key(KDEF%u,KeyID=%u)==%u)",
1762  span_ports,
1763  NAPATECH_KEYTYPE_IPV6_SPAN,
1764  NAPATECH_KEYTYPE_IPV6_SPAN,
1765  NAPATECH_FLOWTYPE_DROP);
1766  NapatechSetFilter(hconfig, ntpl_cmd);
1767  }
1768 
1769  if (is_inline) {
1770  for (pair = 0; pair < iteration; ++pair) {
1771  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1772  "assign[priority=1;streamid=drop;DestinationPort=%d;colormask=0x4]=(Layer3Protocol==IPV6)and(port==%d)and(Key(KDEF%u,KeyID=%u)==%u)",
1773  ports_spec.second[pair],
1774  ports_spec.first[pair],
1775  NAPATECH_KEYTYPE_IPV6,
1776  NAPATECH_KEYTYPE_IPV6,
1777  NAPATECH_FLOWTYPE_PASS);
1778  NapatechSetFilter(hconfig, ntpl_cmd);
1779 
1780  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1781  "assign[priority=1;streamid=drop;DestinationPort=%d;colormask=0x4]=(Layer3Protocol==IPV6)and(port==%d)and(Key(KDEF%u,KeyID=%u,fieldaction=swap)==%u)",
1782  ports_spec.first[pair],
1783  ports_spec.second[pair],
1784  NAPATECH_KEYTYPE_IPV6,
1785  NAPATECH_KEYTYPE_IPV6,
1786  NAPATECH_FLOWTYPE_PASS);
1787  NapatechSetFilter(hconfig, ntpl_cmd);
1788  }
1789  }
1790  } else {
1791  if (is_inline) {
1792  FatalError("Napatech Inline operation not supported by this FPGA version.");
1793  }
1794 
1796  snprintf(ntpl_cmd, sizeof (ntpl_cmd), "assign[streamid=(%d..%d);colormask=0x0] = %s%s",
1797  first_stream, last_stream, ports_spec.all ? "" : "port==", ports_spec.str);
1798  NapatechSetFilter(hconfig, ntpl_cmd);
1799  }
1800  }
1801 
1802 #else /* NAPATECH_ENABLE_BYPASS */
1803  snprintf(ntpl_cmd, sizeof (ntpl_cmd), "assign[streamid=(%d..%d)] = %s%s",
1804  first_stream, last_stream, ports_spec.all ? "" : "port==", ports_spec.str);
1805  NapatechSetFilter(hconfig, ntpl_cmd);
1806 
1807 #endif /* !NAPATECH_ENABLE_BYPASS */
1808 
1809  SCLogConfig("Host-buffer NUMA assignments: ");
1810  int numa_nodes[MAX_HOSTBUFFERS];
1811  uint32_t stream_id;
1812  for (stream_id = first_stream; stream_id < last_stream; ++stream_id) {
1813  char temp1[256];
1814  char temp2[256];
1815 
1816  uint32_t num_host_buffers = GetStreamNUMAs(stream_id, numa_nodes);
1817 
1818  snprintf(temp1, 256, " stream %d: ", stream_id);
1819 
1820  for (uint32_t hb_id = 0; hb_id < num_host_buffers; ++hb_id) {
1821  snprintf(temp2, 256, "%d ", numa_nodes[hb_id]);
1822  strlcat(temp1, temp2, sizeof(temp1));
1823  }
1824 
1825  SCLogConfig("%s", temp1);
1826  }
1827 
1828  if (first_stream == last_stream) {
1829  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1830  "Setup[state=active] = StreamId == %d",
1831  first_stream);
1832  } else {
1833  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1834  "Setup[state=active] = StreamId == (%d..%d)",
1835  first_stream, last_stream);
1836  }
1837  NapatechSetFilter(hconfig, ntpl_cmd);
1838 
1839  NT_ConfigClose(hconfig);
1840 
1841  return status;
1842 }
1843 
1844 #endif // HAVE_NAPATECH
util-byte.h
PORTS_SPEC_SIZE
#define PORTS_SPEC_SIZE
tm-threads.h
CONFIG_SPECIFIER_UNDEFINED
@ CONFIG_SPECIFIER_UNDEFINED
Definition: util-napatech.c:202
TmThreadSpawn
TmEcode TmThreadSpawn(ThreadVars *tv)
Spawns a thread associated with the ThreadVars instance tv.
Definition: tm-threads.c:1646
NapatechGetCurrentStats
NapatechCurrentStats NapatechGetCurrentStats(uint16_t id)
Definition: util-napatech.c:195
CONFIG_SPECIFIER_RANGE
@ CONFIG_SPECIFIER_RANGE
Definition: util-napatech.c:203
ConfNode_::val
char * val
Definition: conf.h:34
ConfGetBool
int ConfGetBool(const char *name, int *val)
Retrieve a configuration value as an boolen.
Definition: conf.c:482
NapatechSetPortmap
int NapatechSetPortmap(int port, int peer)
unlikely
#define unlikely(expr)
Definition: util-optimize.h:35
SCLogDebug
#define SCLogDebug(...)
Definition: util-debug.h:269
TmThreadsSetFlag
void TmThreadsSetFlag(ThreadVars *tv, uint32_t flag)
Set a thread flag.
Definition: tm-threads.c:99
TmThreadWaitForFlag
void TmThreadWaitForFlag(ThreadVars *tv, uint32_t flags)
Waits till the specified flag(s) is(are) set. We don't bother if the kill flag has been set or not on...
Definition: tm-threads.c:1765
next
struct HtpBodyChunk_ * next
Definition: app-layer-htp.h:0
THV_DEINIT
#define THV_DEINIT
Definition: threadvars.h:44
ConfGetNode
ConfNode * ConfGetNode(const char *name)
Get a ConfNode by name.
Definition: conf.c:181
UtilCpuGetNumProcessorsConfigured
uint16_t UtilCpuGetNumProcessorsConfigured(void)
Get the number of cpus configured in the system.
Definition: util-cpu.c:59
StatsSetUI64
void StatsSetUI64(ThreadVars *tv, uint16_t id, uint64_t x)
Sets a value of type double to the local counter.
Definition: counters.c:210
NapatechDeleteFilters
uint32_t NapatechDeleteFilters(void)
Definition: util-napatech.c:1334
TAILQ_FOREACH
#define TAILQ_FOREACH(var, head, field)
Definition: queue.h:252
StatsSetupPrivate
int StatsSetupPrivate(ThreadVars *tv)
Definition: counters.c:1210
StatsSyncCountersIfSignalled
#define StatsSyncCountersIfSignalled(tv)
Definition: counters.h:140
NapatechIsAutoConfigEnabled
bool NapatechIsAutoConfigEnabled(void)
Definition: runmode-napatech.c:70
PacketCounters_::byte
uint16_t byte
Definition: util-napatech.c:187
stream_config
TcpStreamCnf stream_config
Definition: stream-tcp.c:115
current_stats
NapatechCurrentStats current_stats[MAX_STREAMS]
Definition: util-napatech.c:193
NapatechStreamConfig_
Definition: util-napatech.h:43
THV_RUNNING_DONE
#define THV_RUNNING_DONE
Definition: threadvars.h:45
NapatechGetStreamConfig
int NapatechGetStreamConfig(NapatechStreamConfig stream_config[])
Reads and parses the stream configuration defined in the config file.
Definition: util-napatech.c:803
ConfGet
int ConfGet(const char *name, const char **vptr)
Retrieve the value of a configuration node.
Definition: conf.c:335
StringParseUint8
int StringParseUint8(uint8_t *res, int base, size_t len, const char *str)
Definition: util-byte.c:361
total_stats
NapatechCurrentStats total_stats
Definition: util-napatech.c:192
util-device.h
NapatechCurrentStats_::current_packets
uint64_t current_packets
Definition: util-napatech.h:51
strlcat
size_t strlcat(char *, const char *src, size_t siz)
Definition: util-strlcatu.c:45
util-cpu.h
source-napatech.h
ThreadVars_
Per thread variable structure.
Definition: threadvars.h:57
PacketCounters
struct PacketCounters_ PacketCounters
THV_KILL
#define THV_KILL
Definition: threadvars.h:39
CONFIG_SPECIFIER_INDIVIDUAL
@ CONFIG_SPECIFIER_INDIVIDUAL
Definition: util-napatech.c:204
TmThreadCreate
ThreadVars * TmThreadCreate(const char *name, const char *inq_name, const char *inqh_name, const char *outq_name, const char *outqh_name, const char *slots, void *(*fn_p)(void *), int mucond)
Creates and returns the TV instance for a new thread.
Definition: tm-threads.c:902
NAPATECH_NTPL_ERROR
#define NAPATECH_NTPL_ERROR(ntpl_cmd, ntpl_info, status)
Definition: util-napatech.h:72
SCLogWarning
#define SCLogWarning(...)
Macro used to log WARNING messages.
Definition: util-debug.h:249
NapatechGetAdapter
int NapatechGetAdapter(uint8_t port)
util-napatech.h
PacketCounters_::pkts
uint16_t pkts
Definition: util-napatech.c:186
PacketCounters_::drop_pkts
uint16_t drop_pkts
Definition: util-napatech.c:188
NapatechCurrentStats_::current_bytes
uint64_t current_bytes
Definition: util-napatech.h:52
HB_HIGHWATER
#define HB_HIGHWATER
Definition: util-napatech.c:690
PacketCounters_::drop_byte
uint16_t drop_byte
Definition: util-napatech.c:189
SCLogInfo
#define SCLogInfo(...)
Macro used to log INFORMATIONAL messages.
Definition: util-debug.h:224
ConfNodeLookupChild
ConfNode * ConfNodeLookupChild(const ConfNode *node, const char *name)
Lookup a child configuration node by name.
Definition: conf.c:785
THV_INIT_DONE
#define THV_INIT_DONE
Definition: threadvars.h:36
CONFIG_SPECIFIER
CONFIG_SPECIFIER
Definition: util-napatech.c:201
ByteExtractStringUint8
int ByteExtractStringUint8(uint8_t *res, int base, size_t len, const char *str)
Definition: util-byte.c:285
suricata-common.h
SCLogPerf
#define SCLogPerf(...)
Definition: util-debug.h:230
NapatechStartStats
void NapatechStartStats(void)
Definition: util-napatech.c:1184
FatalError
#define FatalError(...)
Definition: util-debug.h:502
MAX_HOSTBUFFERS
#define MAX_HOSTBUFFERS
Definition: util-napatech.c:207
tv
ThreadVars * tv
Definition: fuzz_decodepcapfile.c:32
threadvars.h
NapatechUseHWBypass
bool NapatechUseHWBypass(void)
Definition: runmode-napatech.c:75
NapatechCurrentStats_::current_drop_bytes
uint64_t current_drop_bytes
Definition: util-napatech.h:54
SCLogConfig
struct SCLogConfig_ SCLogConfig
Holds the config state used by the logging api.
str
#define str(s)
Definition: suricata-common.h:280
SCLogError
#define SCLogError(...)
Macro used to log ERROR messages.
Definition: util-debug.h:261
MAX_PORTS
#define MAX_PORTS
Definition: util-napatech.h:59
ConfNode_
Definition: conf.h:32
NapatechCurrentStats_::current_drop_packets
uint64_t current_drop_packets
Definition: util-napatech.h:53
suricata.h
NapatechSetupTraffic
uint32_t NapatechSetupTraffic(uint32_t first_stream, uint32_t last_stream)
Definition: util-napatech.c:1361
MAX_STREAMS
#define MAX_STREAMS
Definition: util-napatech.c:689
NapatechCurrentStats_
Definition: util-napatech.h:50
PacketCounters_
Definition: util-napatech.c:185
TmThreadsCheckFlag
int TmThreadsCheckFlag(ThreadVars *tv, uint32_t flag)
Check if a thread flag is set.
Definition: tm-threads.c:91
NAPATECH_ERROR
#define NAPATECH_ERROR(status)
Definition: util-napatech.h:65
StatsRegisterCounter
uint16_t StatsRegisterCounter(const char *name, struct ThreadVars_ *tv)
Registers a normal, unqualified counter.
Definition: counters.c:955
THV_CLOSED
#define THV_CLOSED
Definition: threadvars.h:41
SCCalloc
#define SCCalloc(nm, sz)
Definition: util-mem.h:53
NapatechSetupNuma
bool NapatechSetupNuma(uint32_t stream, uint32_t numa)
Definition: util-napatech.c:1223