suricata
util-napatech.c
Go to the documentation of this file.
1 /* Copyright (C) 2017-2021 Open Information Security Foundation
2  *
3  * You can copy, redistribute or modify this Program under the terms of
4  * the GNU General Public License version 2 as published by the Free
5  * Software Foundation.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10  * GNU General Public License for more details.
11  *
12  * You should have received a copy of the GNU General Public License
13  * version 2 along with this program; if not, write to the Free Software
14  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
15  * 02110-1301, USA.
16  */
17 /**
18  * \file
19  *
20  * \author Napatech Inc.
21  * \author Phil Young <py@napatech.com>
22  *
23  *
24  */
25 #include "suricata-common.h"
26 
27 #ifdef HAVE_NAPATECH
28 #include "suricata.h"
29 #include "util-device.h"
30 #include "util-cpu.h"
31 #include "util-byte.h"
32 #include "threadvars.h"
33 #include "tm-threads.h"
34 #include "util-napatech.h"
35 #include "source-napatech.h"
36 #include "runmode-napatech.h"
37 
38 #ifdef NAPATECH_ENABLE_BYPASS
39 
40 /*
41  * counters to track the number of flows programmed on
42  * the adapter.
43  */
44 typedef struct FlowStatsCounters_
45 {
46  uint16_t active_bypass_flows;
47  uint16_t total_bypass_flows;
48 } FlowStatsCounters;
49 
50 
51 static int bypass_supported;
52 int NapatechIsBypassSupported(void)
53 {
54  return bypass_supported;
55 }
56 
57 /**
58  * \brief Returns the number of Napatech Adapters in the system.
59  *
60  * \return count of the Napatech adapters present in the system.
61  */
62 int NapatechGetNumAdapters(void)
63 {
64  NtInfoStream_t hInfo;
65  NtInfo_t hInfoSys;
66  int status;
67  static int num_adapters = -1;
68 
69  if (num_adapters == -1) {
70  if ((status = NT_InfoOpen(&hInfo, "InfoStream")) != NT_SUCCESS) {
71  NAPATECH_ERROR(status);
72  exit(EXIT_FAILURE);
73  }
74 
75  hInfoSys.cmd = NT_INFO_CMD_READ_SYSTEM;
76  if ((status = NT_InfoRead(hInfo, &hInfoSys)) != NT_SUCCESS) {
77  NAPATECH_ERROR(status);
78  exit(EXIT_FAILURE);
79  }
80 
81  num_adapters = hInfoSys.u.system.data.numAdapters;
82 
83  NT_InfoClose(hInfo);
84  }
85 
86  return num_adapters;
87 }
88 
89 /**
90  * \brief Verifies that the Napatech adapters support bypass.
91  *
92  * Attempts to opens a FlowStream on each adapter present in the system.
93  * If successful then bypass is supported
94  *
95  * \return 1 if Bypass functionality is supported; zero otherwise.
96  */
97 int NapatechVerifyBypassSupport(void)
98 {
99  int status;
100  int adapter = 0;
101  int num_adapters = NapatechGetNumAdapters();
102  SCLogInfo("Found %d Napatech adapters.", num_adapters);
103  NtFlowStream_t hFlowStream;
104 
105  if (!NapatechUseHWBypass()) {
106  /* HW Bypass is disabled in the conf file */
107  return 0;
108  }
109 
110  for (adapter = 0; adapter < num_adapters; ++adapter) {
111  NtFlowAttr_t attr;
112  char flow_name[80];
113 
114  NT_FlowOpenAttrInit(&attr);
115  NT_FlowOpenAttrSetAdapterNo(&attr, adapter);
116 
117  snprintf(flow_name, sizeof(flow_name), "Flow stream %d", adapter );
118  SCLogInfo("Opening flow programming stream: %s\n", flow_name);
119  if ((status = NT_FlowOpen_Attr(&hFlowStream, flow_name, &attr)) != NT_SUCCESS) {
120  SCLogWarning("Napatech bypass functionality not supported by the FPGA version on "
121  "adapter %d - disabling support.",
122  adapter);
123  bypass_supported = 0;
124  return 0;
125  }
126  NT_FlowClose(hFlowStream);
127  }
128 
129  bypass_supported = 1;
130  return bypass_supported;
131 }
132 
133 
134 /**
135  * \brief Updates statistic counters for Napatech FlowStats
136  *
137  * \param tv Thread variable to ThreadVars
138  * \param hInfo Handle to the Napatech InfoStream.
139  * \param hstat_stream Handle to the Napatech Statistics Stream.
140  * \param flow_counters The flow counters statistics to update.
141  * \param clear_stats Indicates if statistics on the card should be reset to zero.
142  *
143  */
144 static void UpdateFlowStats(
145  ThreadVars *tv,
146  NtInfoStream_t hInfo,
147  NtStatStream_t hstat_stream,
148  FlowStatsCounters flow_counters,
149  int clear_stats
150  )
151 {
152  NtStatistics_t hStat;
153  int status;
154 
155  uint64_t programed = 0;
156  uint64_t removed = 0;
157  int adapter = 0;
158 
159  for (adapter = 0; adapter < NapatechGetNumAdapters(); ++adapter) {
160  hStat.cmd = NT_STATISTICS_READ_CMD_FLOW_V0;
161  hStat.u.flowData_v0.clear = clear_stats;
162  hStat.u.flowData_v0.adapterNo = adapter;
163  if ((status = NT_StatRead(hstat_stream, &hStat)) != NT_SUCCESS) {
164  NAPATECH_ERROR(status);
165  exit(1);
166  }
167  programed = hStat.u.flowData_v0.learnDone;
168  removed = hStat.u.flowData_v0.unlearnDone
169  + hStat.u.flowData_v0.automaticUnlearnDone
170  + hStat.u.flowData_v0.timeoutUnlearnDone;
171  }
172 
173  StatsSetUI64(tv, flow_counters.active_bypass_flows, programed - removed);
174  StatsSetUI64(tv, flow_counters.total_bypass_flows, programed);
175 }
176 
177 #endif /* NAPATECH_ENABLE_BYPASS */
178 
179 
180 /*-----------------------------------------------------------------------------
181  *-----------------------------------------------------------------------------
182  * Statistics code
183  *-----------------------------------------------------------------------------
184  */
185 typedef struct PacketCounters_
186 {
187  uint16_t pkts;
188  uint16_t byte;
189  uint16_t drop_pkts;
190  uint16_t drop_byte;
192 
195 
197 {
198 
199  return current_stats[id];
200 }
201 
206 };
207 
208 #define MAX_HOSTBUFFERS 8
209 
210 /**
211  * \brief Test to see if any of the configured streams are active
212  *
213  * \param hInfo Handle to Napatech Info Stream.
214  * \param hStatsStream Handle to Napatech Statistics stream
215  * \param stream_config array of stream configuration structures
216  * \param num_inst
217  *
218  */
219 static uint16_t TestStreamConfig(
220  NtInfoStream_t hInfo,
221  NtStatStream_t hstat_stream,
223  uint16_t num_inst)
224 {
225  uint16_t num_active = 0;
226 
227  for (uint16_t inst = 0; inst < num_inst; ++inst) {
228  int status;
229  NtStatistics_t stat; // Stat handle.
230 
231  /* Check to see if it is an active stream */
232  memset(&stat, 0, sizeof (NtStatistics_t));
233 
234  /* Read usage data for the chosen stream ID */
235  stat.cmd = NT_STATISTICS_READ_CMD_USAGE_DATA_V0;
236  stat.u.usageData_v0.streamid = (uint8_t) stream_config[inst].stream_id;
237 
238  if ((status = NT_StatRead(hstat_stream, &stat)) != NT_SUCCESS) {
239  NAPATECH_ERROR(status);
240  return 0;
241  }
242 
243  if (stat.u.usageData_v0.data.numHostBufferUsed > 0) {
244  stream_config[inst].is_active = true;
245  num_active++;
246  } else {
247  stream_config[inst].is_active = false;
248  }
249  }
250 
251  return num_active;
252 }
253 
254 /**
255  * \brief Updates Napatech packet counters
256  *
257  * \param tv Pointer to ThreadVars structure
258  * \param hInfo Handle to Napatech Info Stream.
259  * \param hstat_stream Handle to Napatech Statistics stream
260  * \param num_streams the number of streams that are currently active
261  * \param stream_config array of stream configuration structures
262  * \param total_counters - cumulative count of all packets received.
263  * \param dispatch_host, - Count of packets that were delivered to the host buffer
264  * \param dispatch_drop - count of packets that were dropped as a result of a rule
265  * \param dispatch_fwd - count of packets forwarded out the egress port as the result of a rule
266  * \param is_inline - are we running in inline mode?
267  * \param enable_stream_stats - are per thread/stream statistics enabled.
268  * \param stream_counters - counters for each thread/stream configured.
269  *
270  * \return The number of active streams that were updated.
271  *
272  */
273 static uint32_t UpdateStreamStats(ThreadVars *tv,
274  NtInfoStream_t hInfo,
275  NtStatStream_t hstat_stream,
276  uint16_t num_streams,
278  PacketCounters total_counters,
279  PacketCounters dispatch_host,
280  PacketCounters dispatch_drop,
281  PacketCounters dispatch_fwd,
282  int is_inline,
283  int enable_stream_stats,
284  PacketCounters stream_counters[]
285  ) {
286  static uint64_t rxPktsStart[MAX_STREAMS] = {0};
287  static uint64_t rxByteStart[MAX_STREAMS] = {0};
288  static uint64_t dropPktStart[MAX_STREAMS] = {0};
289  static uint64_t dropByteStart[MAX_STREAMS] = {0};
290 
291  int status;
292  NtInfo_t hStreamInfo;
293  NtStatistics_t hStat; // Stat handle.
294 
295  /* Query the system to get the number of streams currently instantiated */
296  hStreamInfo.cmd = NT_INFO_CMD_READ_STREAM;
297  if ((status = NT_InfoRead(hInfo, &hStreamInfo)) != NT_SUCCESS) {
298  NAPATECH_ERROR(status);
299  exit(EXIT_FAILURE);
300  }
301 
302  uint16_t num_active;
303  if ((num_active = TestStreamConfig(hInfo, hstat_stream, stream_config, num_streams)) == 0) {
304  /* None of the configured streams are active */
305  return 0;
306  }
307 
308  /* At least one stream is active so proceed with the stats. */
309  uint16_t inst_id = 0;
310  uint32_t stream_cnt = 0;
311  for (stream_cnt = 0; stream_cnt < num_streams; ++stream_cnt) {
312  while (inst_id < num_streams) {
313  if (stream_config[inst_id].is_active) {
314  break;
315  } else {
316  ++inst_id;
317  }
318  }
319  if (inst_id == num_streams)
320  break;
321 
322  /* Read usage data for the chosen stream ID */
323  memset(&hStat, 0, sizeof (NtStatistics_t));
324  hStat.cmd = NT_STATISTICS_READ_CMD_USAGE_DATA_V0;
325  hStat.u.usageData_v0.streamid = (uint8_t) stream_config[inst_id].stream_id;
326 
327  if ((status = NT_StatRead(hstat_stream, &hStat)) != NT_SUCCESS) {
328  NAPATECH_ERROR(status);
329  return 0;
330  }
331 
332  uint16_t stream_id = stream_config[inst_id].stream_id;
333  if (stream_config[inst_id].is_active) {
334  uint64_t rx_pkts_total = 0;
335  uint64_t rx_byte_total = 0;
336  uint64_t drop_pkts_total = 0;
337  uint64_t drop_byte_total = 0;
338 
339  for (uint32_t hbCount = 0; hbCount < hStat.u.usageData_v0.data.numHostBufferUsed; hbCount++) {
340  if (unlikely(stream_config[inst_id].initialized == false)) {
341  rxPktsStart[stream_id] += hStat.u.usageData_v0.data.hb[hbCount].stat.rx.frames;
342  rxByteStart[stream_id] += hStat.u.usageData_v0.data.hb[hbCount].stat.rx.bytes;
343  dropPktStart[stream_id] += hStat.u.usageData_v0.data.hb[hbCount].stat.drop.frames;
344  dropByteStart[stream_id] += hStat.u.usageData_v0.data.hb[hbCount].stat.drop.bytes;
345  stream_config[inst_id].initialized = true;
346  } else {
347  rx_pkts_total += hStat.u.usageData_v0.data.hb[hbCount].stat.rx.frames;
348  rx_byte_total += hStat.u.usageData_v0.data.hb[hbCount].stat.rx.bytes;
349  drop_pkts_total += hStat.u.usageData_v0.data.hb[hbCount].stat.drop.frames;
350  drop_byte_total += hStat.u.usageData_v0.data.hb[hbCount].stat.drop.bytes;
351  }
352  }
353 
354  current_stats[stream_id].current_packets = rx_pkts_total - rxPktsStart[stream_id];
355  current_stats[stream_id].current_bytes = rx_byte_total - rxByteStart[stream_id];
356  current_stats[stream_id].current_drop_packets = drop_pkts_total - dropPktStart[stream_id];
357  current_stats[stream_id].current_drop_bytes = drop_byte_total - dropByteStart[stream_id];
358  }
359 
360  if (enable_stream_stats) {
361  StatsSetUI64(tv, stream_counters[inst_id].pkts, current_stats[stream_id].current_packets);
362  StatsSetUI64(tv, stream_counters[inst_id].byte, current_stats[stream_id].current_bytes);
363  StatsSetUI64(tv, stream_counters[inst_id].drop_pkts, current_stats[stream_id].current_drop_packets);
364  StatsSetUI64(tv, stream_counters[inst_id].drop_byte, current_stats[stream_id].current_drop_bytes);
365  }
366 
367  ++inst_id;
368  }
369 
370  uint32_t stream_id;
371  for (stream_id = 0; stream_id < num_streams; ++stream_id) {
372 
373 #ifndef NAPATECH_ENABLE_BYPASS
376 #endif /* NAPATECH_ENABLE_BYPASS */
379  }
380 
381 
382 #ifndef NAPATECH_ENABLE_BYPASS
383  StatsSetUI64(tv, total_counters.pkts, total_stats.current_packets);
384  StatsSetUI64(tv, total_counters.byte, total_stats.current_bytes);
385 #endif /* NAPATECH_ENABLE_BYPASS */
386 
389 
394 
395  /* Read usage data for the chosen stream ID */
396  memset(&hStat, 0, sizeof (NtStatistics_t));
397 
398 #ifdef NAPATECH_ENABLE_BYPASS
399  hStat.cmd = NT_STATISTICS_READ_CMD_QUERY_V3;
400  hStat.u.query_v3.clear = 0;
401 #else /* NAPATECH_ENABLE_BYPASS */
402  /* Older versions of the API have a different structure. */
403  hStat.cmd = NT_STATISTICS_READ_CMD_QUERY_V2;
404  hStat.u.query_v2.clear = 0;
405 #endif /* !NAPATECH_ENABLE_BYPASS */
406 
407  if ((status = NT_StatRead(hstat_stream, &hStat)) != NT_SUCCESS) {
408  if (status == NT_STATUS_TIMEOUT) {
409  SCLogInfo("Statistics timed out - will retry next time.");
410  return 0;
411  } else {
412  NAPATECH_ERROR(status);
413  return 0;
414  }
415  }
416 
417 #ifdef NAPATECH_ENABLE_BYPASS
418 
419  int adapter = 0;
420  uint64_t total_dispatch_host_pkts = 0;
421  uint64_t total_dispatch_host_byte = 0;
422  uint64_t total_dispatch_drop_pkts = 0;
423  uint64_t total_dispatch_drop_byte = 0;
424  uint64_t total_dispatch_fwd_pkts = 0;
425  uint64_t total_dispatch_fwd_byte = 0;
426 
427  for (adapter = 0; adapter < NapatechGetNumAdapters(); ++adapter) {
428  total_dispatch_host_pkts += hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[0].pkts;
429  total_dispatch_host_byte += hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[0].octets;
430 
431  total_dispatch_drop_pkts += hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[1].pkts
432  + hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[3].pkts;
433  total_dispatch_drop_byte += hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[1].octets
434  + hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[3].octets;
435 
436  total_dispatch_fwd_pkts += hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[2].pkts
437  + hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[4].pkts;
438  total_dispatch_fwd_byte += hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[2].octets
439  + hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[4].octets;
440 
441  total_stats.current_packets += hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[0].pkts
442  + hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[1].pkts
443  + hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[2].pkts
444  + hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[3].pkts;
445 
446  total_stats.current_bytes = hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[0].octets
447  + hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[1].octets
448  + hStat.u.query_v3.data.adapter.aAdapters[adapter].color.aColor[2].octets;
449  }
450 
451  StatsSetUI64(tv, dispatch_host.pkts, total_dispatch_host_pkts);
452  StatsSetUI64(tv, dispatch_host.byte, total_dispatch_host_byte);
453 
454  StatsSetUI64(tv, dispatch_drop.pkts, total_dispatch_drop_pkts);
455  StatsSetUI64(tv, dispatch_drop.byte, total_dispatch_drop_byte);
456 
457  if (is_inline) {
458  StatsSetUI64(tv, dispatch_fwd.pkts, total_dispatch_fwd_pkts);
459  StatsSetUI64(tv, dispatch_fwd.byte, total_dispatch_fwd_byte);
460  }
461 
462  StatsSetUI64(tv, total_counters.pkts, total_stats.current_packets);
463  StatsSetUI64(tv, total_counters.byte, total_stats.current_bytes);
464 
465 #endif /* NAPATECH_ENABLE_BYPASS */
466 
467  return num_active;
468 }
469 
470 /**
471  * \brief Statistics processing loop
472  *
473  * Instantiated on the stats thread. Periodically retrieves
474  * statistics from the Napatech card and updates the packet counters
475  *
476  * \param arg Pointer that is cast into a ThreadVars structure
477  */
478 static void *NapatechStatsLoop(void *arg)
479 {
480  ThreadVars *tv = (ThreadVars *) arg;
481 
482  int status;
483  NtInfoStream_t hInfo;
484  NtStatStream_t hstat_stream;
485  int is_inline = 0;
486  int enable_stream_stats = 0;
487  PacketCounters stream_counters[MAX_STREAMS];
488 
489  if (ConfGetBool("napatech.inline", &is_inline) == 0) {
490  is_inline = 0;
491  }
492 
493  if (ConfGetBool("napatech.enable-stream-stats", &enable_stream_stats) == 0) {
494  /* default is "no" */
495  enable_stream_stats = 0;
496  }
497 
499  uint16_t stream_cnt = NapatechGetStreamConfig(stream_config);
500 
501  /* Open the info and Statistics */
502  if ((status = NT_InfoOpen(&hInfo, "StatsLoopInfoStream")) != NT_SUCCESS) {
503  NAPATECH_ERROR(status);
504  return NULL;
505  }
506 
507  if ((status = NT_StatOpen(&hstat_stream, "StatsLoopStatsStream")) != NT_SUCCESS) {
508  NAPATECH_ERROR(status);
509  return NULL;
510  }
511 
512  NtStatistics_t hStat;
513  memset(&hStat, 0, sizeof (NtStatistics_t));
514 
515 #ifdef NAPATECH_ENABLE_BYPASS
516  hStat.cmd = NT_STATISTICS_READ_CMD_QUERY_V3;
517  hStat.u.query_v3.clear = 1;
518 #else /* NAPATECH_ENABLE_BYPASS */
519  hStat.cmd = NT_STATISTICS_READ_CMD_QUERY_V2;
520  hStat.u.query_v2.clear = 1;
521 #endif /* !NAPATECH_ENABLE_BYPASS */
522 
523  if ((status = NT_StatRead(hstat_stream, &hStat)) != NT_SUCCESS) {
524  NAPATECH_ERROR(status);
525  return 0;
526  }
527 
528  PacketCounters total_counters;
529  memset(&total_counters, 0, sizeof(total_counters));
530 
531  PacketCounters dispatch_host;
532  memset(&dispatch_host, 0, sizeof(dispatch_host));
533 
534  PacketCounters dispatch_drop;
535  memset(&dispatch_drop, 0, sizeof(dispatch_drop));
536 
537  PacketCounters dispatch_fwd;
538  memset(&dispatch_fwd, 0, sizeof(dispatch_fwd));
539 
540  total_counters.pkts = StatsRegisterCounter("napa_total.pkts", tv);
541  dispatch_host.pkts = StatsRegisterCounter("napa_dispatch_host.pkts", tv);
542  dispatch_drop.pkts = StatsRegisterCounter("napa_dispatch_drop.pkts", tv);
543  if (is_inline) {
544  dispatch_fwd.pkts = StatsRegisterCounter("napa_dispatch_fwd.pkts", tv);
545  }
546 
547  total_counters.byte = StatsRegisterCounter("napa_total.byte", tv);
548  dispatch_host.byte = StatsRegisterCounter("napa_dispatch_host.byte", tv);
549  dispatch_drop.byte = StatsRegisterCounter("napa_dispatch_drop.byte", tv);
550  if (is_inline) {
551  dispatch_fwd.byte = StatsRegisterCounter("napa_dispatch_fwd.byte", tv);
552  }
553 
554  total_counters.drop_pkts = StatsRegisterCounter("napa_total.overflow_drop_pkts", tv);
555  total_counters.drop_byte = StatsRegisterCounter("napa_total.overflow_drop_byte", tv);
556 
557  if (enable_stream_stats) {
558  for (int i = 0; i < stream_cnt; ++i) {
559  char *pkts_buf = SCCalloc(1, 32);
560  if (unlikely(pkts_buf == NULL)) {
561  FatalError("Failed to allocate memory for NAPATECH stream counter.");
562  }
563 
564  snprintf(pkts_buf, 32, "napa%d.pkts", stream_config[i].stream_id);
565  stream_counters[i].pkts = StatsRegisterCounter(pkts_buf, tv);
566 
567  char *byte_buf = SCCalloc(1, 32);
568  if (unlikely(byte_buf == NULL)) {
569  FatalError("Failed to allocate memory for NAPATECH stream counter.");
570  }
571  snprintf(byte_buf, 32, "napa%d.bytes", stream_config[i].stream_id);
572  stream_counters[i].byte = StatsRegisterCounter(byte_buf, tv);
573 
574  char *drop_pkts_buf = SCCalloc(1, 32);
575  if (unlikely(drop_pkts_buf == NULL)) {
576  FatalError("Failed to allocate memory for NAPATECH stream counter.");
577  }
578  snprintf(drop_pkts_buf, 32, "napa%d.drop_pkts", stream_config[i].stream_id);
579  stream_counters[i].drop_pkts = StatsRegisterCounter(drop_pkts_buf, tv);
580 
581  char *drop_byte_buf = SCCalloc(1, 32);
582  if (unlikely(drop_byte_buf == NULL)) {
583  FatalError("Failed to allocate memory for NAPATECH stream counter.");
584  }
585  snprintf(drop_byte_buf, 32, "napa%d.drop_byte", stream_config[i].stream_id);
586  stream_counters[i].drop_byte = StatsRegisterCounter(drop_byte_buf, tv);
587  }
588  }
589 
590 #ifdef NAPATECH_ENABLE_BYPASS
591  FlowStatsCounters flow_counters;
592  if (bypass_supported) {
593  flow_counters.active_bypass_flows = StatsRegisterCounter("napa_bypass.active_flows", tv);
594  flow_counters.total_bypass_flows = StatsRegisterCounter("napa_bypass.total_flows", tv);
595  }
596 #endif /* NAPATECH_ENABLE_BYPASS */
597 
599 
600  StatsSetUI64(tv, total_counters.pkts, 0);
601  StatsSetUI64(tv, total_counters.byte, 0);
602  StatsSetUI64(tv, total_counters.drop_pkts, 0);
603  StatsSetUI64(tv, total_counters.drop_byte, 0);
604 
605 #ifdef NAPATECH_ENABLE_BYPASS
606  if (bypass_supported) {
607  StatsSetUI64(tv, dispatch_host.pkts, 0);
608  StatsSetUI64(tv, dispatch_drop.pkts, 0);
609 
610  if (is_inline) {
611  StatsSetUI64(tv, dispatch_fwd.pkts, 0);
612  }
613 
614  StatsSetUI64(tv, dispatch_host.byte, 0);
615  StatsSetUI64(tv, dispatch_drop.byte, 0);
616  if (is_inline) {
617  StatsSetUI64(tv, dispatch_fwd.byte, 0);
618  }
619 
620  if (enable_stream_stats) {
621  for (int i = 0; i < stream_cnt; ++i) {
622  StatsSetUI64(tv, stream_counters[i].pkts, 0);
623  StatsSetUI64(tv, stream_counters[i].byte, 0);
624  StatsSetUI64(tv, stream_counters[i].drop_pkts, 0);
625  StatsSetUI64(tv, stream_counters[i].drop_byte, 0);
626  }
627  }
628 
629  StatsSetUI64(tv, flow_counters.active_bypass_flows, 0);
630  StatsSetUI64(tv, flow_counters.total_bypass_flows, 0);
631  UpdateFlowStats(tv, hInfo, hstat_stream, flow_counters, 1);
632  }
633 #endif /* NAPATECH_ENABLE_BYPASS */
634 
635  uint32_t num_active = UpdateStreamStats(tv, hInfo, hstat_stream,
636  stream_cnt, stream_config, total_counters,
637  dispatch_host, dispatch_drop, dispatch_fwd,
638  is_inline, enable_stream_stats, stream_counters);
639 
640  if (!NapatechIsAutoConfigEnabled() && (num_active < stream_cnt)) {
641  SCLogInfo("num_active: %d, stream_cnt: %d", num_active, stream_cnt);
642  SCLogWarning("Some or all of the configured streams are not created. Proceeding with "
643  "active streams.");
644  }
645 
647  while (1) {
649  SCLogDebug("NapatechStatsLoop THV_KILL detected");
650  break;
651  }
652 
653  UpdateStreamStats(tv, hInfo, hstat_stream,
654  stream_cnt, stream_config, total_counters,
655  dispatch_host, dispatch_drop, dispatch_fwd,
656  is_inline, enable_stream_stats,
657  stream_counters);
658 
659 #ifdef NAPATECH_ENABLE_BYPASS
660  if (bypass_supported) {
661  UpdateFlowStats(tv, hInfo, hstat_stream, flow_counters, 0);
662  }
663 #endif /* NAPATECH_ENABLE_BYPASS */
664 
666  usleep(1000000);
667  }
668 
669  /* CLEAN UP NT Resources and Close the info stream */
670  if ((status = NT_InfoClose(hInfo)) != NT_SUCCESS) {
671  NAPATECH_ERROR(status);
672  return NULL;
673  }
674 
675  /* Close the statistics stream */
676  if ((status = NT_StatClose(hstat_stream)) != NT_SUCCESS) {
677  NAPATECH_ERROR(status);
678  return NULL;
679  }
680 
681  SCLogDebug("Exiting NapatechStatsLoop");
685 
686  return NULL;
687 }
688 
689 #define MAX_HOSTBUFFER 4
690 #define MAX_STREAMS 256
691 #define HB_HIGHWATER 2048 //1982
692 
693 /**
694  * \brief Tests whether a particular stream_id is actively registered
695  *
696  * \param stream_id - ID of the stream to look up
697  * \param num_registered - The total number of registered streams
698  * \param registered_streams - An array containing actively registered streams.
699  *
700  * \return Bool indicating is the specified stream is registered.
701  *
702  */
703 static bool RegisteredStream(uint16_t stream_id, uint16_t num_registered,
704  NapatechStreamConfig registered_streams[])
705 {
706  for (uint16_t reg_id = 0; reg_id < num_registered; ++reg_id) {
707  if (stream_id == registered_streams[reg_id].stream_id) {
708  return true;
709  }
710  }
711  return false;
712 }
713 
714 /**
715  * \brief Count the number of worker threads defined in the conf file.
716  *
717  * \return - The number of worker threads defined by the configuration
718  */
719 static uint32_t CountWorkerThreads(void)
720 {
721  int worker_count = 0;
722 
723  ConfNode *affinity;
724  ConfNode *root = ConfGetNode("threading.cpu-affinity");
725 
726  if (root != NULL) {
727 
728  TAILQ_FOREACH(affinity, &root->head, next)
729  {
730  if (strcmp(affinity->val, "decode-cpu-set") == 0 ||
731  strcmp(affinity->val, "stream-cpu-set") == 0 ||
732  strcmp(affinity->val, "reject-cpu-set") == 0 ||
733  strcmp(affinity->val, "output-cpu-set") == 0) {
734  continue;
735  }
736 
737  if (strcmp(affinity->val, "worker-cpu-set") == 0) {
738  ConfNode *node = ConfNodeLookupChild(affinity->head.tqh_first, "cpu");
739  ConfNode *lnode;
740 
742 
743  TAILQ_FOREACH(lnode, &node->head, next)
744  {
745  uint8_t start, end;
746  char *end_str;
747  if (strncmp(lnode->val, "all", 4) == 0) {
748  /* check that the sting in the config file is correctly specified */
749  if (cpu_spec != CONFIG_SPECIFIER_UNDEFINED) {
750  FatalError("Only one Napatech port specifier type allowed.");
751  }
752  cpu_spec = CONFIG_SPECIFIER_RANGE;
753  worker_count = UtilCpuGetNumProcessorsConfigured();
754  } else if ((end_str = strchr(lnode->val, '-'))) {
755  /* check that the sting in the config file is correctly specified */
756  if (cpu_spec != CONFIG_SPECIFIER_UNDEFINED) {
757  FatalError("Only one Napatech port specifier type allowed.");
758  }
759  cpu_spec = CONFIG_SPECIFIER_RANGE;
760 
761 
762  if (StringParseUint8(&start, 10, end_str - lnode->val, (const char *)lnode->val) < 0) {
763  FatalError("Napatech invalid"
764  " worker range start: '%s'",
765  lnode->val);
766  }
767  if (StringParseUint8(&end, 10, 0, (const char *) (end_str + 1)) < 0) {
768  FatalError("Napatech invalid"
769  " worker range end: '%s'",
770  (end_str != NULL) ? (const char *)(end_str + 1) : "Null");
771  }
772  if (end < start) {
773  FatalError("Napatech invalid"
774  " worker range start: '%d' is greater than end: '%d'",
775  start, end);
776  }
777  worker_count = end - start + 1;
778 
779  } else {
780  /* check that the sting in the config file is correctly specified */
781  if (cpu_spec == CONFIG_SPECIFIER_RANGE) {
782  FatalError("Napatech port range specifiers cannot be combined with "
783  "individual stream specifiers.");
784  }
785  cpu_spec = CONFIG_SPECIFIER_INDIVIDUAL;
786  ++worker_count;
787  }
788  }
789  break;
790  }
791  }
792  }
793  return worker_count;
794 }
795 
796 /**
797  * \brief Reads and parses the stream configuration defined in the config file.
798  *
799  * \param stream_config - array to be filled in with active stream info.
800  *
801  * \return the number of streams configured or -1 if an error occurred
802  *
803  */
805 {
806  int status;
807  char error_buffer[80]; // Error buffer
808  NtStatStream_t hstat_stream;
809  NtStatistics_t hStat; // Stat handle.
810  NtInfoStream_t info_stream;
811  NtInfo_t info;
812  uint16_t instance_cnt = 0;
813  int use_all_streams = 0;
814  int set_cpu_affinity = 0;
815  ConfNode *ntstreams;
816  uint16_t stream_id = 0;
817  uint8_t start = 0;
818  uint8_t end = 0;
819 
820  for (uint16_t i = 0; i < MAX_STREAMS; ++i) {
821  stream_config[i].stream_id = 0;
822  stream_config[i].is_active = false;
823  stream_config[i].initialized = false;
824  }
825 
826  if (ConfGetBool("napatech.use-all-streams", &use_all_streams) == 0) {
827  /* default is "no" */
828  use_all_streams = 0;
829  }
830 
831  if ((status = NT_InfoOpen(&info_stream, "SuricataStreamInfo")) != NT_SUCCESS) {
832  NAPATECH_ERROR(status);
833  return -1;
834  }
835 
836  if ((status = NT_StatOpen(&hstat_stream, "StatsStream")) != NT_SUCCESS) {
837  NAPATECH_ERROR(status);
838  return -1;
839  }
840 
841  if (use_all_streams) {
842  info.cmd = NT_INFO_CMD_READ_STREAM;
843  if ((status = NT_InfoRead(info_stream, &info)) != NT_SUCCESS) {
844  NAPATECH_ERROR(status);
845  return -1;
846  }
847 
848  while (instance_cnt < info.u.stream.data.count) {
849 
850  /*
851  * For each stream ID query the number of host-buffers used by
852  * the stream. If zero, then that streamID is not used; skip
853  * over it and continue until we get a streamID with a non-zero
854  * count of the host-buffers.
855  */
856  memset(&hStat, 0, sizeof (NtStatistics_t));
857 
858  /* Read usage data for the chosen stream ID */
859  hStat.cmd = NT_STATISTICS_READ_CMD_USAGE_DATA_V0;
860  hStat.u.usageData_v0.streamid = (uint8_t) stream_id;
861 
862  if ((status = NT_StatRead(hstat_stream, &hStat)) != NT_SUCCESS) {
863  /* Get the status code as text */
864  NT_ExplainError(status, error_buffer, sizeof (error_buffer));
865  SCLogError("NT_StatRead() failed: %s\n", error_buffer);
866  return -1;
867  }
868 
869  if (hStat.u.usageData_v0.data.numHostBufferUsed == 0) {
870  ++stream_id;
871  continue;
872  }
873 
874  /* if we get here it is an active stream */
875  stream_config[instance_cnt].stream_id = stream_id++;
876  stream_config[instance_cnt].is_active = true;
877  instance_cnt++;
878  }
879 
880  } else {
881  (void)ConfGetBool("threading.set-cpu-affinity", &set_cpu_affinity);
882  if (NapatechIsAutoConfigEnabled() && (set_cpu_affinity == 1)) {
883  start = 0;
884  end = CountWorkerThreads() - 1;
885  } else {
886  /* When not using the default streams we need to
887  * parse the array of streams from the conf */
888  if ((ntstreams = ConfGetNode("napatech.streams")) == NULL) {
889  SCLogError("Failed retrieving napatech.streams from Config");
890  if (NapatechIsAutoConfigEnabled() && (set_cpu_affinity == 0)) {
891  SCLogError("if set-cpu-affinity: no in conf then napatech.streams must be "
892  "defined");
893  }
894  exit(EXIT_FAILURE);
895  }
896 
897  /* Loop through all stream numbers in the array and register the devices */
898  ConfNode *stream;
899  enum CONFIG_SPECIFIER stream_spec = CONFIG_SPECIFIER_UNDEFINED;
900  instance_cnt = 0;
901 
902  TAILQ_FOREACH(stream, &ntstreams->head, next)
903  {
904 
905  if (stream == NULL) {
906  SCLogError("Couldn't Parse Stream Configuration");
907  return -1;
908  }
909 
910  char *end_str = strchr(stream->val, '-');
911  if (end_str) {
912  if (stream_spec != CONFIG_SPECIFIER_UNDEFINED) {
913  SCLogError("Only one Napatech stream range specifier allowed.");
914  return -1;
915  }
916  stream_spec = CONFIG_SPECIFIER_RANGE;
917 
918  if (StringParseUint8(&start, 10, end_str - stream->val,
919  (const char *)stream->val) < 0) {
920  FatalError("Napatech invalid "
921  "stream id start: '%s'",
922  stream->val);
923  }
924  if (StringParseUint8(&end, 10, 0, (const char *) (end_str + 1)) < 0) {
925  FatalError("Napatech invalid "
926  "stream id end: '%s'",
927  (end_str != NULL) ? (const char *)(end_str + 1) : "Null");
928  }
929  } else {
930  if (stream_spec == CONFIG_SPECIFIER_RANGE) {
931  FatalError("Napatech range and individual specifiers cannot be combined.");
932  }
933  stream_spec = CONFIG_SPECIFIER_INDIVIDUAL;
934  if (StringParseUint8(&stream_config[instance_cnt].stream_id,
935  10, 0, (const char *)stream->val) < 0) {
936  FatalError("Napatech invalid "
937  "stream id: '%s'",
938  stream->val);
939  }
940  start = stream_config[instance_cnt].stream_id;
941  end = stream_config[instance_cnt].stream_id;
942  }
943  }
944  }
945 
946  for (stream_id = start; stream_id <= end; ++stream_id) {
947  /* if we get here it is configured in the .yaml file */
948  stream_config[instance_cnt].stream_id = stream_id;
949 
950  /* Check to see if it is an active stream */
951  memset(&hStat, 0, sizeof (NtStatistics_t));
952 
953  /* Read usage data for the chosen stream ID */
954  hStat.cmd = NT_STATISTICS_READ_CMD_USAGE_DATA_V0;
955  hStat.u.usageData_v0.streamid =
956  (uint8_t) stream_config[instance_cnt].stream_id;
957 
958  if ((status = NT_StatRead(hstat_stream, &hStat)) != NT_SUCCESS) {
959  NAPATECH_ERROR(status);
960  return -1;
961  }
962 
963  if (hStat.u.usageData_v0.data.numHostBufferUsed > 0) {
964  stream_config[instance_cnt].is_active = true;
965  }
966  instance_cnt++;
967  }
968  }
969 
970  /* Close the statistics stream */
971  if ((status = NT_StatClose(hstat_stream)) != NT_SUCCESS) {
972  NAPATECH_ERROR(status);
973  return -1;
974  }
975 
976  if ((status = NT_InfoClose(info_stream)) != NT_SUCCESS) {
977  NAPATECH_ERROR(status);
978  return -1;
979  }
980 
981  return instance_cnt;
982 }
983 
984 static void *NapatechBufMonitorLoop(void *arg)
985 {
986  ThreadVars *tv = (ThreadVars *) arg;
987 
988  NtInfo_t hStreamInfo;
989  NtStatistics_t hStat; // Stat handle.
990  NtInfoStream_t hInfo;
991  NtStatStream_t hstat_stream;
992  int status; // Status variable
993 
994  const uint32_t alertInterval = 25;
995 
996 #ifndef NAPATECH_ENABLE_BYPASS
997  uint32_t OB_fill_level[MAX_STREAMS] = {0};
998  uint32_t OB_alert_level[MAX_STREAMS] = {0};
999  uint32_t ave_OB_fill_level[MAX_STREAMS] = {0};
1000 #endif /* NAPATECH_ENABLE_BYPASS */
1001 
1002  uint32_t HB_fill_level[MAX_STREAMS] = {0};
1003  uint32_t HB_alert_level[MAX_STREAMS] = {0};
1004  uint32_t ave_HB_fill_level[MAX_STREAMS] = {0};
1005 
1006  /* Open the info and Statistics */
1007  if ((status = NT_InfoOpen(&hInfo, "InfoStream")) != NT_SUCCESS) {
1008  NAPATECH_ERROR(status);
1009  exit(EXIT_FAILURE);
1010  }
1011 
1012  if ((status = NT_StatOpen(&hstat_stream, "StatsStream")) != NT_SUCCESS) {
1013  NAPATECH_ERROR(status);
1014  exit(EXIT_FAILURE);
1015  }
1016 
1017  /* Read the info on all streams instantiated in the system */
1018  hStreamInfo.cmd = NT_INFO_CMD_READ_STREAM;
1019  if ((status = NT_InfoRead(hInfo, &hStreamInfo)) != NT_SUCCESS) {
1020  NAPATECH_ERROR(status);
1021  exit(EXIT_FAILURE);
1022  }
1023 
1024  NapatechStreamConfig registered_streams[MAX_STREAMS];
1025  int num_registered = NapatechGetStreamConfig(registered_streams);
1026  if (num_registered == -1) {
1027  exit(EXIT_FAILURE);
1028  }
1029 
1031  while (1) {
1032  if (TmThreadsCheckFlag(tv, THV_KILL)) {
1033  SCLogDebug("NapatechBufMonitorLoop THV_KILL detected");
1034  break;
1035  }
1036 
1037  usleep(200000);
1038 
1039  /* Read the info on all streams instantiated in the system */
1040  hStreamInfo.cmd = NT_INFO_CMD_READ_STREAM;
1041  if ((status = NT_InfoRead(hInfo, &hStreamInfo)) != NT_SUCCESS) {
1042  NAPATECH_ERROR(status);
1043  exit(EXIT_FAILURE);
1044  }
1045 
1046  char pktCntStr[4096];
1047  memset(pktCntStr, 0, sizeof (pktCntStr));
1048 
1049  uint32_t stream_id = 0;
1050  uint32_t stream_cnt = 0;
1051  uint32_t num_streams = hStreamInfo.u.stream.data.count;
1052 
1053  for (stream_cnt = 0; stream_cnt < num_streams; ++stream_cnt) {
1054 
1055  do {
1056 
1057  /* Read usage data for the chosen stream ID */
1058  hStat.cmd = NT_STATISTICS_READ_CMD_USAGE_DATA_V0;
1059  hStat.u.usageData_v0.streamid = (uint8_t) stream_id;
1060 
1061  if ((status = NT_StatRead(hstat_stream, &hStat)) != NT_SUCCESS) {
1062  NAPATECH_ERROR(status);
1063  exit(EXIT_FAILURE);
1064  }
1065 
1066  if (hStat.u.usageData_v0.data.numHostBufferUsed == 0) {
1067  ++stream_id;
1068  continue;
1069  }
1070  } while (hStat.u.usageData_v0.data.numHostBufferUsed == 0);
1071 
1072  if (RegisteredStream(stream_id, num_registered, registered_streams)) {
1073 
1074 #ifndef NAPATECH_ENABLE_BYPASS
1075  ave_OB_fill_level[stream_id] = 0;
1076 #endif /* NAPATECH_ENABLE_BYPASS */
1077 
1078  ave_HB_fill_level[stream_id] = 0;
1079 
1080  for (uint32_t hb_count = 0; hb_count < hStat.u.usageData_v0.data.numHostBufferUsed; hb_count++) {
1081 
1082 #ifndef NAPATECH_ENABLE_BYPASS
1083  OB_fill_level[hb_count] =
1084  ((100 * hStat.u.usageData_v0.data.hb[hb_count].onboardBuffering.used) /
1085  hStat.u.usageData_v0.data.hb[hb_count].onboardBuffering.size);
1086 
1087  if (OB_fill_level[hb_count] > 100) {
1088  OB_fill_level[hb_count] = 100;
1089  }
1090 #endif /* NAPATECH_ENABLE_BYPASS */
1091  uint32_t bufSize = hStat.u.usageData_v0.data.hb[hb_count].enQueuedAdapter / 1024
1092  + hStat.u.usageData_v0.data.hb[hb_count].deQueued / 1024
1093  + hStat.u.usageData_v0.data.hb[hb_count].enQueued / 1024
1094  - HB_HIGHWATER;
1095 
1096  HB_fill_level[hb_count] = (uint32_t)
1097  ((100 * hStat.u.usageData_v0.data.hb[hb_count].deQueued / 1024) /
1098  bufSize);
1099 
1100 #ifndef NAPATECH_ENABLE_BYPASS
1101  ave_OB_fill_level[stream_id] += OB_fill_level[hb_count];
1102 #endif /* NAPATECH_ENABLE_BYPASS */
1103 
1104  ave_HB_fill_level[stream_id] += HB_fill_level[hb_count];
1105  }
1106 
1107 #ifndef NAPATECH_ENABLE_BYPASS
1108  ave_OB_fill_level[stream_id] /= hStat.u.usageData_v0.data.numHostBufferUsed;
1109 #endif /* NAPATECH_ENABLE_BYPASS */
1110 
1111  ave_HB_fill_level[stream_id] /= hStat.u.usageData_v0.data.numHostBufferUsed;
1112 
1113  /* Host Buffer Fill Level warnings... */
1114  if (ave_HB_fill_level[stream_id] >= (HB_alert_level[stream_id] + alertInterval)) {
1115 
1116  while (ave_HB_fill_level[stream_id] >= HB_alert_level[stream_id] + alertInterval) {
1117  HB_alert_level[stream_id] += alertInterval;
1118  }
1119  SCLogPerf("nt%d - Increasing Host Buffer Fill Level : %4d%%",
1120  stream_id, ave_HB_fill_level[stream_id] - 1);
1121  }
1122 
1123  if (HB_alert_level[stream_id] > 0) {
1124  if ((ave_HB_fill_level[stream_id] <= (HB_alert_level[stream_id] - alertInterval))) {
1125  SCLogPerf("nt%d - Decreasing Host Buffer Fill Level: %4d%%",
1126  stream_id, ave_HB_fill_level[stream_id]);
1127 
1128  while (ave_HB_fill_level[stream_id] <= (HB_alert_level[stream_id] - alertInterval)) {
1129  if ((HB_alert_level[stream_id]) > 0) {
1130  HB_alert_level[stream_id] -= alertInterval;
1131  } else break;
1132  }
1133  }
1134  }
1135 
1136 #ifndef NAPATECH_ENABLE_BYPASS
1137  /* On Board SDRAM Fill Level warnings... */
1138  if (ave_OB_fill_level[stream_id] >= (OB_alert_level[stream_id] + alertInterval)) {
1139  while (ave_OB_fill_level[stream_id] >= OB_alert_level[stream_id] + alertInterval) {
1140  OB_alert_level[stream_id] += alertInterval;
1141 
1142  }
1143  SCLogPerf("nt%d - Increasing Adapter SDRAM Fill Level: %4d%%",
1144  stream_id, ave_OB_fill_level[stream_id]);
1145  }
1146 
1147  if (OB_alert_level[stream_id] > 0) {
1148  if ((ave_OB_fill_level[stream_id] <= (OB_alert_level[stream_id] - alertInterval))) {
1149  SCLogPerf("nt%d - Decreasing Adapter SDRAM Fill Level : %4d%%",
1150  stream_id, ave_OB_fill_level[stream_id]);
1151 
1152  while (ave_OB_fill_level[stream_id] <= (OB_alert_level[stream_id] - alertInterval)) {
1153  if ((OB_alert_level[stream_id]) > 0) {
1154  OB_alert_level[stream_id] -= alertInterval;
1155  } else break;
1156  }
1157  }
1158  }
1159 #endif /* NAPATECH_ENABLE_BYPASS */
1160  }
1161  ++stream_id;
1162  }
1163  }
1164 
1165  if ((status = NT_InfoClose(hInfo)) != NT_SUCCESS) {
1166  NAPATECH_ERROR(status);
1167  exit(EXIT_FAILURE);
1168  }
1169 
1170  /* Close the statistics stream */
1171  if ((status = NT_StatClose(hstat_stream)) != NT_SUCCESS) {
1172  NAPATECH_ERROR(status);
1173  exit(EXIT_FAILURE);
1174  }
1175 
1176  SCLogDebug("Exiting NapatechStatsLoop");
1180 
1181  return NULL;
1182 }
1183 
1184 
1186 {
1187  /* Creates the Statistic threads */
1188  ThreadVars *stats_tv = TmThreadCreate("NapatechStats",
1189  NULL, NULL,
1190  NULL, NULL,
1191  "custom", NapatechStatsLoop, 0);
1192 
1193  if (stats_tv == NULL) {
1194  FatalError("Error creating a thread for NapatechStats - Killing engine.");
1195  }
1196 
1197  if (TmThreadSpawn(stats_tv) != 0) {
1198  FatalError("Failed to spawn thread for NapatechStats - Killing engine.");
1199  }
1200 
1201 #ifdef NAPATECH_ENABLE_BYPASS
1202  if (bypass_supported) {
1203  SCLogInfo("Napatech bypass functionality enabled.");
1204  }
1205 #endif /* NAPATECH_ENABLE_BYPASS */
1206 
1207  ThreadVars *buf_monitor_tv = TmThreadCreate("NapatechBufMonitor",
1208  NULL, NULL,
1209  NULL, NULL,
1210  "custom", NapatechBufMonitorLoop, 0);
1211 
1212  if (buf_monitor_tv == NULL) {
1213  FatalError("Error creating a thread for NapatechBufMonitor - Killing engine.");
1214  }
1215 
1216  if (TmThreadSpawn(buf_monitor_tv) != 0) {
1217  FatalError("Failed to spawn thread for NapatechBufMonitor - Killing engine.");
1218  }
1219 
1220  return;
1221 }
1222 
1223 bool NapatechSetupNuma(uint32_t stream, uint32_t numa)
1224 {
1225  uint32_t status = 0;
1226  static NtConfigStream_t hconfig;
1227 
1228  char ntpl_cmd[64];
1229  snprintf(ntpl_cmd, 64, "setup[numanode=%d] = streamid == %d", numa, stream);
1230 
1231  NtNtplInfo_t ntpl_info;
1232 
1233  if ((status = NT_ConfigOpen(&hconfig, "ConfigStream")) != NT_SUCCESS) {
1234  NAPATECH_ERROR(status);
1235  return false;
1236  }
1237 
1238  if ((status = NT_NTPL(hconfig, ntpl_cmd, &ntpl_info, NT_NTPL_PARSER_VALIDATE_NORMAL)) == NT_SUCCESS) {
1239  status = ntpl_info.ntplId;
1240 
1241  } else {
1242  NAPATECH_NTPL_ERROR(ntpl_cmd, ntpl_info, status);
1243  return false;
1244  }
1245 
1246  return status;
1247 }
1248 
1249 static uint32_t NapatechSetHashmode(void)
1250 {
1251  uint32_t status = 0;
1252  const char *hash_mode;
1253  static NtConfigStream_t hconfig;
1254  char ntpl_cmd[64];
1255  NtNtplInfo_t ntpl_info;
1256 
1257  uint32_t filter_id = 0;
1258 
1259  /* Get the hashmode from the conf file. */
1260  ConfGet("napatech.hashmode", &hash_mode);
1261 
1262  snprintf(ntpl_cmd, 64, "hashmode = %s", hash_mode);
1263 
1264  /* Issue the NTPL command */
1265  if ((status = NT_ConfigOpen(&hconfig, "ConfigStream")) != NT_SUCCESS) {
1266  NAPATECH_ERROR(status);
1267  return false;
1268  }
1269 
1270  if ((status = NT_NTPL(hconfig, ntpl_cmd, &ntpl_info,
1271  NT_NTPL_PARSER_VALIDATE_NORMAL)) == NT_SUCCESS) {
1272  filter_id = ntpl_info.ntplId;
1273  SCLogConfig("Napatech hashmode: %s ID: %d", hash_mode, status);
1274  } else {
1275  NAPATECH_NTPL_ERROR(ntpl_cmd, ntpl_info, status);
1276  status = 0;
1277  }
1278 
1279  return filter_id;
1280 }
1281 
1282 static uint32_t GetStreamNUMAs(uint32_t stream_id, int stream_numas[])
1283 {
1284  NtStatistics_t hStat; // Stat handle.
1285  NtStatStream_t hstat_stream;
1286  int status; // Status variable
1287 
1288  for (int i = 0; i < MAX_HOSTBUFFERS; ++i)
1289  stream_numas[i] = -1;
1290 
1291  if ((status = NT_StatOpen(&hstat_stream, "StatsStream")) != NT_SUCCESS) {
1292  NAPATECH_ERROR(status);
1293  exit(EXIT_FAILURE);
1294  }
1295 
1296  char pktCntStr[4096];
1297  memset(pktCntStr, 0, sizeof (pktCntStr));
1298 
1299 
1300  /* Read usage data for the chosen stream ID */
1301  hStat.cmd = NT_STATISTICS_READ_CMD_USAGE_DATA_V0;
1302  hStat.u.usageData_v0.streamid = (uint8_t) stream_id;
1303 
1304  if ((status = NT_StatRead(hstat_stream, &hStat)) != NT_SUCCESS) {
1305  NAPATECH_ERROR(status);
1306  exit(EXIT_FAILURE);
1307  }
1308 
1309  for (uint32_t hb_id = 0; hb_id < hStat.u.usageData_v0.data.numHostBufferUsed; ++hb_id) {
1310  stream_numas[hb_id] = hStat.u.usageData_v0.data.hb[hb_id].numaNode;
1311  }
1312 
1313  return hStat.u.usageData_v0.data.numHostBufferUsed;
1314 }
1315 
1316 static int NapatechSetFilter(NtConfigStream_t hconfig, char *ntpl_cmd)
1317 {
1318  int status = 0;
1319  int local_filter_id = 0;
1320 
1321  NtNtplInfo_t ntpl_info;
1322  if ((status = NT_NTPL(hconfig, ntpl_cmd, &ntpl_info,
1323  NT_NTPL_PARSER_VALIDATE_NORMAL)) == NT_SUCCESS) {
1324  SCLogConfig("NTPL filter assignment \"%s\" returned filter id %4d",
1325  ntpl_cmd, local_filter_id);
1326  } else {
1327  NAPATECH_NTPL_ERROR(ntpl_cmd, ntpl_info, status);
1328  exit(EXIT_FAILURE);
1329  }
1330 
1331  return local_filter_id;
1332 }
1333 
1335 {
1336  uint32_t status = 0;
1337  static NtConfigStream_t hconfig;
1338  char ntpl_cmd[64];
1339  NtNtplInfo_t ntpl_info;
1340 
1341  if ((status = NT_ConfigOpen(&hconfig, "ConfigStream")) != NT_SUCCESS) {
1342  NAPATECH_ERROR(status);
1343  exit(EXIT_FAILURE);
1344  }
1345 
1346  snprintf(ntpl_cmd, 64, "delete = all");
1347  if ((status = NT_NTPL(hconfig, ntpl_cmd, &ntpl_info,
1348  NT_NTPL_PARSER_VALIDATE_NORMAL)) == NT_SUCCESS) {
1349  status = ntpl_info.ntplId;
1350  } else {
1351  NAPATECH_NTPL_ERROR(ntpl_cmd, ntpl_info, status);
1352  status = 0;
1353  }
1354 
1355  NT_ConfigClose(hconfig);
1356 
1357  return status;
1358 }
1359 
1360 
1361 uint32_t NapatechSetupTraffic(uint32_t first_stream, uint32_t last_stream)
1362 {
1363 #define PORTS_SPEC_SIZE 64
1364 
1365  struct ports_spec_s {
1366  uint8_t first[MAX_PORTS];
1367  uint8_t second[MAX_PORTS];
1368  bool all;
1369  char str[PORTS_SPEC_SIZE];
1370  } ports_spec;
1371 
1372  ports_spec.all = false;
1373 
1374  ConfNode *ntports;
1375  int iteration = 0;
1376  int status = 0;
1377  NtConfigStream_t hconfig;
1378  char ntpl_cmd[512];
1379  int is_inline = 0;
1380 #ifdef NAPATECH_ENABLE_BYPASS
1381  int is_span_port[MAX_PORTS] = { 0 };
1382 #endif
1383 
1384  char span_ports[128];
1385  memset(span_ports, 0, sizeof(span_ports));
1386 
1387  if (ConfGetBool("napatech.inline", &is_inline) == 0) {
1388  is_inline = 0;
1389  }
1390 
1391  NapatechSetHashmode();
1392 
1393  if ((status = NT_ConfigOpen(&hconfig, "ConfigStream")) != NT_SUCCESS) {
1394  NAPATECH_ERROR(status);
1395  exit(EXIT_FAILURE);
1396  }
1397 
1398  if (first_stream == last_stream) {
1399  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1400  "Setup[state=inactive] = StreamId == %d",
1401  first_stream);
1402  } else {
1403  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1404  "Setup[state=inactive] = StreamId == (%d..%d)",
1405  first_stream, last_stream);
1406  }
1407  NapatechSetFilter(hconfig, ntpl_cmd);
1408 
1409 #ifdef NAPATECH_ENABLE_BYPASS
1410  if (NapatechUseHWBypass()) {
1411  SCLogInfo("Napatech Hardware Bypass enabled.");
1412  }
1413 #else
1414  if (NapatechUseHWBypass()) {
1415  SCLogInfo("Napatech Hardware Bypass requested in conf but is not available.");
1416  exit(EXIT_FAILURE);
1417  } else {
1418  SCLogInfo("Napatech Hardware Bypass disabled.");
1419  }
1420 #endif
1421 
1422  if (is_inline) {
1423  SCLogInfo("Napatech configured for inline mode.");
1424  } else {
1425 
1426  SCLogInfo("Napatech configured for passive (non-inline) mode.");
1427  }
1428 
1429  /* When not using the default streams we need to parse
1430  * the array of streams from the conf
1431  */
1432  if ((ntports = ConfGetNode("napatech.ports")) == NULL) {
1433  FatalError("Failed retrieving napatech.ports from Conf");
1434  }
1435 
1436  /* Loop through all ports in the array */
1437  ConfNode *port;
1438  enum CONFIG_SPECIFIER stream_spec = CONFIG_SPECIFIER_UNDEFINED;
1439 
1440  if (NapatechUseHWBypass()) {
1441  SCLogInfo("Listening on the following Napatech ports:");
1442  }
1443  /* Build the NTPL command using values in the config file. */
1444  TAILQ_FOREACH(port, &ntports->head, next)
1445  {
1446  if (port == NULL) {
1447  FatalError("Couldn't Parse Port Configuration");
1448  }
1449 
1450  if (NapatechUseHWBypass()) {
1451 #ifdef NAPATECH_ENABLE_BYPASS
1452  if (strchr(port->val, '-')) {
1453  stream_spec = CONFIG_SPECIFIER_RANGE;
1454 
1455  if (ByteExtractStringUint8(&ports_spec.first[iteration], 10, 0, port->val) == -1) {
1456  FatalError("Invalid value '%s' in napatech.ports specification in conf file.",
1457  port->val);
1458  }
1459 
1460  if (ByteExtractStringUint8(&ports_spec.second[iteration], 10, 0,
1461  strchr(port->val, '-') + 1) == -1) {
1462  FatalError("Invalid value '%s' in napatech.ports specification in conf file.",
1463  port->val);
1464  }
1465 
1466  if (ports_spec.first[iteration] == ports_spec.second[iteration]) {
1467  if (is_inline) {
1468  FatalError(
1469  "Error with napatech.ports in conf file. When running in inline "
1470  "mode the two ports specifying a segment must be different.");
1471  } else {
1472  /* SPAN port configuration */
1473  is_span_port[ports_spec.first[iteration]] = 1;
1474 
1475  if (strlen(span_ports) == 0) {
1476  snprintf(span_ports, sizeof (span_ports), "%d", ports_spec.first[iteration]);
1477  } else {
1478  char temp[16];
1479  snprintf(temp, sizeof(temp), ",%d", ports_spec.first[iteration]);
1480  strlcat(span_ports, temp, sizeof(span_ports));
1481  }
1482 
1483  }
1484  }
1485 
1486  if (NapatechGetAdapter(ports_spec.first[iteration]) != NapatechGetAdapter(ports_spec.first[iteration])) {
1487  SCLogError("Invalid napatech.ports specification in conf file.");
1488  SCLogError("Two ports on a segment must reside on the same adapter. port %d "
1489  "is on adapter %d, port %d is on adapter %d.",
1490  ports_spec.first[iteration],
1491  NapatechGetAdapter(ports_spec.first[iteration]),
1492  ports_spec.second[iteration],
1493  NapatechGetAdapter(ports_spec.second[iteration]));
1494  exit(EXIT_FAILURE);
1495  }
1496 
1497  NapatechSetPortmap(ports_spec.first[iteration], ports_spec.second[iteration]);
1498  if (ports_spec.first[iteration] == ports_spec.second[iteration]) {
1499  SCLogInfo(" span_port: %d", ports_spec.first[iteration]);
1500  } else {
1501  SCLogInfo(" %s: %d - %d", is_inline ? "inline_ports" : "tap_ports", ports_spec.first[iteration], ports_spec.second[iteration]);
1502  }
1503 
1504  if (iteration == 0) {
1505  if (ports_spec.first[iteration] == ports_spec.second[iteration]) {
1506  snprintf(ports_spec.str, sizeof (ports_spec.str), "%d", ports_spec.first[iteration]);
1507  } else {
1508  snprintf(ports_spec.str, sizeof (ports_spec.str), "%d,%d", ports_spec.first[iteration], ports_spec.second[iteration]);
1509  }
1510  } else {
1511  char temp[16];
1512  if (ports_spec.first[iteration] == ports_spec.second[iteration]) {
1513  snprintf(temp, sizeof(temp), ",%d", ports_spec.first[iteration]);
1514  } else {
1515  snprintf(temp, sizeof(temp), ",%d,%d", ports_spec.first[iteration], ports_spec.second[iteration]);
1516  }
1517  strlcat(ports_spec.str, temp, sizeof(ports_spec.str));
1518  }
1519  } else {
1520  FatalError("When using hardware flow bypass ports must be specified as segments. "
1521  "E.g. ports: [0-1, 0-2]");
1522  }
1523 #endif
1524  } else { // !NapatechUseHWBypass()
1525  if (strncmp(port->val, "all", 3) == 0) {
1526  /* check that the sting in the config file is correctly specified */
1527  if (stream_spec != CONFIG_SPECIFIER_UNDEFINED) {
1528  FatalError("Only one Napatech port specifier type is allowed.");
1529  }
1530  stream_spec = CONFIG_SPECIFIER_RANGE;
1531 
1532  ports_spec.all = true;
1533  snprintf(ports_spec.str, sizeof (ports_spec.str), "all");
1534  } else if (strchr(port->val, '-')) {
1535  /* check that the sting in the config file is correctly specified */
1536  if (stream_spec != CONFIG_SPECIFIER_UNDEFINED) {
1537  FatalError("Only one Napatech port specifier is allowed when hardware bypass "
1538  "is disabled. (E.g. ports: [0-4], NOT ports: [0-1,2-3])");
1539  }
1540  stream_spec = CONFIG_SPECIFIER_RANGE;
1541 
1542  if (ByteExtractStringUint8(&ports_spec.first[iteration], 10, 0, port->val) == -1) {
1543  FatalError("Invalid value '%s' in napatech.ports specification in conf file.",
1544  port->val);
1545  }
1546 
1547  if (ByteExtractStringUint8(&ports_spec.second[iteration], 10, 0,
1548  strchr(port->val, '-') + 1) == -1) {
1549  FatalError("Invalid value '%s' in napatech.ports specification in conf file.",
1550  port->val);
1551  }
1552 
1553  snprintf(ports_spec.str, sizeof (ports_spec.str), "(%d..%d)", ports_spec.first[iteration], ports_spec.second[iteration]);
1554  } else {
1555  /* check that the sting in the config file is correctly specified */
1556  if (stream_spec == CONFIG_SPECIFIER_RANGE) {
1557  FatalError("Napatech port range specifiers cannot be combined with individual "
1558  "stream specifiers.");
1559  }
1560  stream_spec = CONFIG_SPECIFIER_INDIVIDUAL;
1561 
1562  if (ByteExtractStringUint8(&ports_spec.first[iteration], 10, 0, port->val) == -1) {
1563  FatalError("Invalid value '%s' in napatech.ports specification in conf file.",
1564  port->val);
1565  }
1566 
1567  /* Determine the ports to use on the NTPL assign statement*/
1568  if (iteration == 0) {
1569  snprintf(ports_spec.str, sizeof (ports_spec.str), "%s", port->val);
1570  } else {
1571  strlcat(ports_spec.str, ",", sizeof(ports_spec.str));
1572  strlcat(ports_spec.str, port->val, sizeof(ports_spec.str));
1573  }
1574  }
1575  } // if !NapatechUseHWBypass()
1576  ++iteration;
1577  } /* TAILQ_FOREACH */
1578 
1579 #ifdef NAPATECH_ENABLE_BYPASS
1580  if (bypass_supported) {
1581  if (is_inline) {
1582  char inline_setup_cmd[512];
1583  if (first_stream == last_stream) {
1584  snprintf(inline_setup_cmd, sizeof (ntpl_cmd),
1585  "Setup[TxDescriptor=Dyn;TxPorts=%s;RxCRC=False;TxPortPos=112;UseWL=True] = StreamId == %d",
1586  ports_spec.str, first_stream);
1587  } else {
1588  snprintf(inline_setup_cmd, sizeof (ntpl_cmd),
1589  "Setup[TxDescriptor=Dyn;TxPorts=%s;RxCRC=False;TxPortPos=112;UseWL=True] = StreamId == (%d..%d)",
1590  ports_spec.str, first_stream, last_stream);
1591  }
1592  NapatechSetFilter(hconfig, inline_setup_cmd);
1593  }
1594  /* Build the NTPL command */
1595  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1596  "assign[priority=3;streamid=(%d..%d);colormask=0x10000000;"
1597  "Descriptor=DYN3,length=24,colorbits=32,Offset0=Layer3Header[0],Offset1=Layer4Header[0]]= %s%s",
1598  first_stream, last_stream, ports_spec.all ? "" : "port==", ports_spec.str);
1599  NapatechSetFilter(hconfig, ntpl_cmd);
1600 
1601 
1602  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1603  "assign[priority=2;streamid=(%d..%d);colormask=0x11000000;"
1604  "Descriptor=DYN3,length=24,colorbits=32,Offset0=Layer3Header[0],Offset1=Layer4Header[0]"
1605  "]= %s%s and (Layer3Protocol==IPV4)",
1606  first_stream, last_stream, ports_spec.all ? "" : "port==", ports_spec.str);
1607  NapatechSetFilter(hconfig, ntpl_cmd);
1608 
1609 
1610  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1611  "assign[priority=2;streamid=(%d..%d);colormask=0x14000000;"
1612  "Descriptor=DYN3,length=24,colorbits=32,Offset0=Layer3Header[0],Offset1=Layer4Header[0]]= %s%s and (Layer3Protocol==IPV6)",
1613  first_stream, last_stream, ports_spec.all ? "" : "port==", ports_spec.str);
1614  NapatechSetFilter(hconfig, ntpl_cmd);
1615 
1616  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1617  "assign[priority=2;streamid=(%d..%d);colormask=0x10100000;"
1618  "Descriptor=DYN3,length=24,colorbits=32,Offset0=Layer3Header[0],Offset1=Layer4Header[0]]= %s%s and (Layer4Protocol==TCP)",
1619  first_stream, last_stream, ports_spec.all ? "" : "port==", ports_spec.str);
1620  NapatechSetFilter(hconfig, ntpl_cmd);
1621 
1622  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1623  "assign[priority=2;streamid=(%d..%d);colormask=0x10200000;"
1624  "Descriptor=DYN3,length=24,colorbits=32,Offset0=Layer3Header[0],Offset1=Layer4Header[0]"
1625  "]= %s%s and (Layer4Protocol==UDP)",
1626  first_stream, last_stream, ports_spec.all ? "" : "port==", ports_spec.str);
1627  NapatechSetFilter(hconfig, ntpl_cmd);
1628 
1629  if (strlen(span_ports) > 0) {
1630  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1631  "assign[priority=2;streamid=(%d..%d);colormask=0x00001000;"
1632  "Descriptor=DYN3,length=24,colorbits=32,Offset0=Layer3Header[0],Offset1=Layer4Header[0]"
1633  "]= port==%s",
1634  first_stream, last_stream, span_ports);
1635  NapatechSetFilter(hconfig, ntpl_cmd);
1636  }
1637 
1638  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1639  "KeyType[name=KT%u]={sw_32_32,sw_16_16}",
1640  NAPATECH_KEYTYPE_IPV4);
1641  NapatechSetFilter(hconfig, ntpl_cmd);
1642 
1643  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1644  "KeyDef[name=KDEF%u;KeyType=KT%u;ipprotocolfield=OUTER]=(Layer3Header[12]/32/32,Layer4Header[0]/16/16)",
1645  NAPATECH_KEYTYPE_IPV4, NAPATECH_KEYTYPE_IPV4);
1646  NapatechSetFilter(hconfig, ntpl_cmd);
1647 
1648  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1649  "KeyType[name=KT%u]={32,32,16,16}",
1650  NAPATECH_KEYTYPE_IPV4_SPAN);
1651  NapatechSetFilter(hconfig, ntpl_cmd);
1652 
1653  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1654  "KeyDef[name=KDEF%u;KeyType=KT%u;ipprotocolfield=OUTER;keysort=sorted]=(Layer3Header[12]/32,Layer3Header[16]/32,Layer4Header[0]/16,Layer4Header[2]/16)",
1655  NAPATECH_KEYTYPE_IPV4_SPAN, NAPATECH_KEYTYPE_IPV4_SPAN);
1656  NapatechSetFilter(hconfig, ntpl_cmd);
1657 
1658  /* IPv6 5tuple for inline and tap ports */
1659  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1660  "KeyType[name=KT%u]={sw_128_128,sw_16_16}",
1661  NAPATECH_KEYTYPE_IPV6);
1662  NapatechSetFilter(hconfig, ntpl_cmd);
1663 
1664  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1665  "KeyDef[name=KDEF%u;KeyType=KT%u;ipprotocolfield=OUTER]=(Layer3Header[8]/128/128,Layer4Header[0]/16/16)",
1666  NAPATECH_KEYTYPE_IPV6, NAPATECH_KEYTYPE_IPV6);
1667  NapatechSetFilter(hconfig, ntpl_cmd);
1668 
1669  /* IPv6 5tuple for SPAN Ports */
1670  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1671  "KeyType[name=KT%u]={128,128,16,16}",
1672  NAPATECH_KEYTYPE_IPV6_SPAN);
1673  NapatechSetFilter(hconfig, ntpl_cmd);
1674 
1675  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1676  "KeyDef[name=KDEF%u;KeyType=KT%u;ipprotocolfield=OUTER;keysort=sorted]=(Layer3Header[8]/128,Layer3Header[24]/128,Layer4Header[0]/16,Layer4Header[2]/16)",
1677  NAPATECH_KEYTYPE_IPV6_SPAN, NAPATECH_KEYTYPE_IPV6_SPAN);
1678  NapatechSetFilter(hconfig, ntpl_cmd);
1679 
1680 
1681  int pair;
1682  char ports_ntpl_a[64];
1683  char ports_ntpl_b[64];
1684  memset(ports_ntpl_a, 0, sizeof(ports_ntpl_a));
1685  memset(ports_ntpl_b, 0, sizeof(ports_ntpl_b));
1686 
1687  for (pair = 0; pair < iteration; ++pair) {
1688  char port_str[8];
1689 
1690  if (!is_span_port[ports_spec.first[pair]]) {
1691  snprintf(port_str, sizeof(port_str), "%s%u ", strlen(ports_ntpl_a) == 0 ? "" : ",", ports_spec.first[pair]);
1692  strlcat(ports_ntpl_a, port_str, sizeof(ports_ntpl_a));
1693 
1694  snprintf(port_str, sizeof(port_str), "%s%u ", strlen(ports_ntpl_b) == 0 ? "" : ",", ports_spec.second[pair]);
1695  strlcat(ports_ntpl_b, port_str, sizeof(ports_ntpl_b));
1696  }
1697  }
1698 
1699  if (strlen(ports_ntpl_a) > 0) {
1700  /* This is the assign for dropping upstream traffic */
1701  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1702  "assign[priority=1;streamid=drop;colormask=0x1]=(Layer3Protocol==IPV4)and(port == %s)and(Key(KDEF%u,KeyID=%u)==%u)",
1703  ports_ntpl_a,
1704  NAPATECH_KEYTYPE_IPV4,
1705  NAPATECH_KEYTYPE_IPV4,
1706  NAPATECH_FLOWTYPE_DROP);
1707  NapatechSetFilter(hconfig, ntpl_cmd);
1708  }
1709 
1710  if (strlen(ports_ntpl_b) > 0) {
1711  /* This is the assign for dropping downstream traffic */
1712  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1713  "assign[priority=1;streamid=drop;colormask=0x1]=(Layer3Protocol==IPV4)and(port == %s)and(Key(KDEF%u,KeyID=%u,fieldaction=swap)==%u)",
1714  ports_ntpl_b, //ports_spec.str,
1715  NAPATECH_KEYTYPE_IPV4,
1716  NAPATECH_KEYTYPE_IPV4,
1717  NAPATECH_FLOWTYPE_DROP);
1718  NapatechSetFilter(hconfig, ntpl_cmd);
1719  }
1720 
1721  if (strlen(span_ports) > 0) {
1722  /* This is the assign for dropping SPAN Port traffic */
1723  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1724  "assign[priority=1;streamid=drop;colormask=0x1]=(Layer3Protocol==IPV4)and(port == %s)and(Key(KDEF%u,KeyID=%u)==%u)",
1725  span_ports,
1726  NAPATECH_KEYTYPE_IPV4_SPAN,
1727  NAPATECH_KEYTYPE_IPV4_SPAN,
1728  NAPATECH_FLOWTYPE_DROP);
1729  NapatechSetFilter(hconfig, ntpl_cmd);
1730  }
1731 
1732  if (is_inline) {
1733  for (pair = 0; pair < iteration; ++pair) {
1734  /* This is the assignment for forwarding traffic */
1735  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1736  "assign[priority=1;streamid=drop;DestinationPort=%d;colormask=0x2]=(Layer3Protocol==IPV4)and(port == %d)and(Key(KDEF%u,KeyID=%u)==%u)",
1737  ports_spec.second[pair],
1738  ports_spec.first[pair],
1739  NAPATECH_KEYTYPE_IPV4,
1740  NAPATECH_KEYTYPE_IPV4,
1741  NAPATECH_FLOWTYPE_PASS);
1742  NapatechSetFilter(hconfig, ntpl_cmd);
1743 
1744  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1745  "assign[priority=1;streamid=drop;DestinationPort=%d;colormask=0x2]=(Layer3Protocol==IPV4)and(port == %d)and(Key(KDEF%u,KeyID=%u,fieldaction=swap)==%u)",
1746  ports_spec.first[pair],
1747  ports_spec.second[pair],
1748  NAPATECH_KEYTYPE_IPV4,
1749  NAPATECH_KEYTYPE_IPV4,
1750  NAPATECH_FLOWTYPE_PASS);
1751  NapatechSetFilter(hconfig, ntpl_cmd);
1752  }
1753  }
1754 
1755  if (strlen(ports_ntpl_a) > 0) {
1756  /* This is the assign for dropping upstream traffic */
1757  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1758  "assign[priority=1;streamid=drop;colormask=0x1]=(Layer3Protocol==IPV6)and(port == %s)and(Key(KDEF%u,KeyID=%u)==%u)",
1759  ports_ntpl_a,
1760  NAPATECH_KEYTYPE_IPV6,
1761  NAPATECH_KEYTYPE_IPV6,
1762  NAPATECH_FLOWTYPE_DROP);
1763  NapatechSetFilter(hconfig, ntpl_cmd);
1764  }
1765 
1766  if (strlen(ports_ntpl_b) > 0) {
1767  /* This is the assign for dropping downstream traffic */
1768  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1769  "assign[priority=1;streamid=drop;colormask=0x1]=(Layer3Protocol==IPV6)and(port == %s)and(Key(KDEF%u,KeyID=%u,fieldaction=swap)==%u)",
1770  ports_ntpl_b, //ports_spec.str,
1771  NAPATECH_KEYTYPE_IPV6,
1772  NAPATECH_KEYTYPE_IPV6,
1773  NAPATECH_FLOWTYPE_DROP);
1774  NapatechSetFilter(hconfig, ntpl_cmd);
1775  }
1776 
1777  if (strlen(span_ports) > 0) {
1778  /* This is the assign for dropping SPAN Port traffic */
1779  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1780  "assign[priority=1;streamid=drop;colormask=0x1]=(Layer3Protocol==IPV6)and(port == %s)and(Key(KDEF%u,KeyID=%u)==%u)",
1781  span_ports,
1782  NAPATECH_KEYTYPE_IPV6_SPAN,
1783  NAPATECH_KEYTYPE_IPV6_SPAN,
1784  NAPATECH_FLOWTYPE_DROP);
1785  NapatechSetFilter(hconfig, ntpl_cmd);
1786  }
1787 
1788  if (is_inline) {
1789  for (pair = 0; pair < iteration; ++pair) {
1790  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1791  "assign[priority=1;streamid=drop;DestinationPort=%d;colormask=0x4]=(Layer3Protocol==IPV6)and(port==%d)and(Key(KDEF%u,KeyID=%u)==%u)",
1792  ports_spec.second[pair],
1793  ports_spec.first[pair],
1794  NAPATECH_KEYTYPE_IPV6,
1795  NAPATECH_KEYTYPE_IPV6,
1796  NAPATECH_FLOWTYPE_PASS);
1797  NapatechSetFilter(hconfig, ntpl_cmd);
1798 
1799  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1800  "assign[priority=1;streamid=drop;DestinationPort=%d;colormask=0x4]=(Layer3Protocol==IPV6)and(port==%d)and(Key(KDEF%u,KeyID=%u,fieldaction=swap)==%u)",
1801  ports_spec.first[pair],
1802  ports_spec.second[pair],
1803  NAPATECH_KEYTYPE_IPV6,
1804  NAPATECH_KEYTYPE_IPV6,
1805  NAPATECH_FLOWTYPE_PASS);
1806  NapatechSetFilter(hconfig, ntpl_cmd);
1807  }
1808  }
1809  } else {
1810  if (is_inline) {
1811  FatalError("Napatech Inline operation not supported by this FPGA version.");
1812  }
1813 
1815  snprintf(ntpl_cmd, sizeof (ntpl_cmd), "assign[streamid=(%d..%d);colormask=0x0] = %s%s",
1816  first_stream, last_stream, ports_spec.all ? "" : "port==", ports_spec.str);
1817  NapatechSetFilter(hconfig, ntpl_cmd);
1818  }
1819  }
1820 
1821 #else /* NAPATECH_ENABLE_BYPASS */
1822  snprintf(ntpl_cmd, sizeof (ntpl_cmd), "assign[streamid=(%d..%d)] = %s%s",
1823  first_stream, last_stream, ports_spec.all ? "" : "port==", ports_spec.str);
1824  NapatechSetFilter(hconfig, ntpl_cmd);
1825 
1826 #endif /* !NAPATECH_ENABLE_BYPASS */
1827 
1828  SCLogConfig("Host-buffer NUMA assignments: ");
1829  int numa_nodes[MAX_HOSTBUFFERS];
1830  uint32_t stream_id;
1831  for (stream_id = first_stream; stream_id < last_stream; ++stream_id) {
1832  char temp1[256];
1833  char temp2[256];
1834 
1835  uint32_t num_host_buffers = GetStreamNUMAs(stream_id, numa_nodes);
1836 
1837  snprintf(temp1, 256, " stream %d: ", stream_id);
1838 
1839  for (uint32_t hb_id = 0; hb_id < num_host_buffers; ++hb_id) {
1840  snprintf(temp2, 256, "%d ", numa_nodes[hb_id]);
1841  strlcat(temp1, temp2, sizeof(temp1));
1842  }
1843 
1844  SCLogConfig("%s", temp1);
1845  }
1846 
1847  if (first_stream == last_stream) {
1848  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1849  "Setup[state=active] = StreamId == %d",
1850  first_stream);
1851  } else {
1852  snprintf(ntpl_cmd, sizeof (ntpl_cmd),
1853  "Setup[state=active] = StreamId == (%d..%d)",
1854  first_stream, last_stream);
1855  }
1856  NapatechSetFilter(hconfig, ntpl_cmd);
1857 
1858  NT_ConfigClose(hconfig);
1859 
1860  return status;
1861 }
1862 
1863 #endif // HAVE_NAPATECH
util-byte.h
PORTS_SPEC_SIZE
#define PORTS_SPEC_SIZE
tm-threads.h
CONFIG_SPECIFIER_UNDEFINED
@ CONFIG_SPECIFIER_UNDEFINED
Definition: util-napatech.c:203
TmThreadSpawn
TmEcode TmThreadSpawn(ThreadVars *tv)
Spawns a thread associated with the ThreadVars instance tv.
Definition: tm-threads.c:1660
NapatechGetCurrentStats
NapatechCurrentStats NapatechGetCurrentStats(uint16_t id)
Definition: util-napatech.c:196
CONFIG_SPECIFIER_RANGE
@ CONFIG_SPECIFIER_RANGE
Definition: util-napatech.c:204
ConfNode_::val
char * val
Definition: conf.h:34
ConfGetBool
int ConfGetBool(const char *name, int *val)
Retrieve a configuration value as a boolean.
Definition: conf.c:483
NapatechSetPortmap
int NapatechSetPortmap(int port, int peer)
unlikely
#define unlikely(expr)
Definition: util-optimize.h:35
SCLogDebug
#define SCLogDebug(...)
Definition: util-debug.h:269
TmThreadsSetFlag
void TmThreadsSetFlag(ThreadVars *tv, uint32_t flag)
Set a thread flag.
Definition: tm-threads.c:99
TmThreadWaitForFlag
void TmThreadWaitForFlag(ThreadVars *tv, uint32_t flags)
Waits till the specified flag(s) is(are) set. We don't bother if the kill flag has been set or not on...
Definition: tm-threads.c:1780
next
struct HtpBodyChunk_ * next
Definition: app-layer-htp.h:0
THV_DEINIT
#define THV_DEINIT
Definition: threadvars.h:44
ConfGetNode
ConfNode * ConfGetNode(const char *name)
Get a ConfNode by name.
Definition: conf.c:181
UtilCpuGetNumProcessorsConfigured
uint16_t UtilCpuGetNumProcessorsConfigured(void)
Get the number of cpus configured in the system.
Definition: util-cpu.c:59
StatsSetUI64
void StatsSetUI64(ThreadVars *tv, uint16_t id, uint64_t x)
Sets a value of type double to the local counter.
Definition: counters.c:210
THV_RUNNING
#define THV_RUNNING
Definition: threadvars.h:54
NapatechDeleteFilters
uint32_t NapatechDeleteFilters(void)
Definition: util-napatech.c:1334
TAILQ_FOREACH
#define TAILQ_FOREACH(var, head, field)
Definition: queue.h:252
StatsSetupPrivate
int StatsSetupPrivate(ThreadVars *tv)
Definition: counters.c:1224
NapatechIsAutoConfigEnabled
bool NapatechIsAutoConfigEnabled(void)
Definition: runmode-napatech.c:70
PacketCounters_::byte
uint16_t byte
Definition: util-napatech.c:188
stream_config
TcpStreamCnf stream_config
Definition: stream-tcp.c:219
current_stats
NapatechCurrentStats current_stats[MAX_STREAMS]
Definition: util-napatech.c:194
NapatechStreamConfig_
Definition: util-napatech.h:43
THV_RUNNING_DONE
#define THV_RUNNING_DONE
Definition: threadvars.h:45
NapatechGetStreamConfig
int NapatechGetStreamConfig(NapatechStreamConfig stream_config[])
Reads and parses the stream configuration defined in the config file.
Definition: util-napatech.c:804
ConfGet
int ConfGet(const char *name, const char **vptr)
Retrieve the value of a configuration node.
Definition: conf.c:335
StringParseUint8
int StringParseUint8(uint8_t *res, int base, size_t len, const char *str)
Definition: util-byte.c:361
total_stats
NapatechCurrentStats total_stats
Definition: util-napatech.c:193
util-device.h
NapatechCurrentStats_::current_packets
uint64_t current_packets
Definition: util-napatech.h:51
strlcat
size_t strlcat(char *, const char *src, size_t siz)
Definition: util-strlcatu.c:45
util-cpu.h
source-napatech.h
ThreadVars_
Per thread variable structure.
Definition: threadvars.h:57
PacketCounters
struct PacketCounters_ PacketCounters
THV_KILL
#define THV_KILL
Definition: threadvars.h:39
CONFIG_SPECIFIER_INDIVIDUAL
@ CONFIG_SPECIFIER_INDIVIDUAL
Definition: util-napatech.c:205
TmThreadCreate
ThreadVars * TmThreadCreate(const char *name, const char *inq_name, const char *inqh_name, const char *outq_name, const char *outqh_name, const char *slots, void *(*fn_p)(void *), int mucond)
Creates and returns the TV instance for a new thread.
Definition: tm-threads.c:907
NAPATECH_NTPL_ERROR
#define NAPATECH_NTPL_ERROR(ntpl_cmd, ntpl_info, status)
Definition: util-napatech.h:72
SCLogWarning
#define SCLogWarning(...)
Macro used to log WARNING messages.
Definition: util-debug.h:249
NapatechGetAdapter
int NapatechGetAdapter(uint8_t port)
util-napatech.h
PacketCounters_::pkts
uint16_t pkts
Definition: util-napatech.c:187
PacketCounters_::drop_pkts
uint16_t drop_pkts
Definition: util-napatech.c:189
NapatechCurrentStats_::current_bytes
uint64_t current_bytes
Definition: util-napatech.h:52
HB_HIGHWATER
#define HB_HIGHWATER
Definition: util-napatech.c:691
PacketCounters_::drop_byte
uint16_t drop_byte
Definition: util-napatech.c:190
SCLogInfo
#define SCLogInfo(...)
Macro used to log INFORMATIONAL messages.
Definition: util-debug.h:224
ConfNodeLookupChild
ConfNode * ConfNodeLookupChild(const ConfNode *node, const char *name)
Lookup a child configuration node by name.
Definition: conf.c:786
THV_INIT_DONE
#define THV_INIT_DONE
Definition: threadvars.h:36
CONFIG_SPECIFIER
CONFIG_SPECIFIER
Definition: util-napatech.c:202
ByteExtractStringUint8
int ByteExtractStringUint8(uint8_t *res, int base, size_t len, const char *str)
Definition: util-byte.c:285
suricata-common.h
SCLogPerf
#define SCLogPerf(...)
Definition: util-debug.h:230
NapatechStartStats
void NapatechStartStats(void)
Definition: util-napatech.c:1185
FatalError
#define FatalError(...)
Definition: util-debug.h:502
MAX_HOSTBUFFERS
#define MAX_HOSTBUFFERS
Definition: util-napatech.c:208
tv
ThreadVars * tv
Definition: fuzz_decodepcapfile.c:32
threadvars.h
NapatechUseHWBypass
bool NapatechUseHWBypass(void)
Definition: runmode-napatech.c:75
NapatechCurrentStats_::current_drop_bytes
uint64_t current_drop_bytes
Definition: util-napatech.h:54
SCLogConfig
struct SCLogConfig_ SCLogConfig
Holds the config state used by the logging api.
str
#define str(s)
Definition: suricata-common.h:291
SCLogError
#define SCLogError(...)
Macro used to log ERROR messages.
Definition: util-debug.h:261
MAX_PORTS
#define MAX_PORTS
Definition: util-napatech.h:59
ConfNode_
Definition: conf.h:32
runmode-napatech.h
NapatechCurrentStats_::current_drop_packets
uint64_t current_drop_packets
Definition: util-napatech.h:53
suricata.h
NapatechSetupTraffic
uint32_t NapatechSetupTraffic(uint32_t first_stream, uint32_t last_stream)
Definition: util-napatech.c:1361
MAX_STREAMS
#define MAX_STREAMS
Definition: util-napatech.c:690
NapatechCurrentStats_
Definition: util-napatech.h:50
StatsSyncCountersIfSignalled
void StatsSyncCountersIfSignalled(ThreadVars *tv)
Definition: counters.c:461
PacketCounters_
Definition: util-napatech.c:186
TmThreadsCheckFlag
int TmThreadsCheckFlag(ThreadVars *tv, uint32_t flag)
Check if a thread flag is set.
Definition: tm-threads.c:91
NAPATECH_ERROR
#define NAPATECH_ERROR(status)
Definition: util-napatech.h:65
StatsRegisterCounter
uint16_t StatsRegisterCounter(const char *name, struct ThreadVars_ *tv)
Registers a normal, unqualified counter.
Definition: counters.c:971
THV_CLOSED
#define THV_CLOSED
Definition: threadvars.h:41
SCCalloc
#define SCCalloc(nm, sz)
Definition: util-mem.h:53
NapatechSetupNuma
bool NapatechSetupNuma(uint32_t stream, uint32_t numa)
Definition: util-napatech.c:1223