suricata
util-ebpf.c
Go to the documentation of this file.
1 /* Copyright (C) 2018 Open Information Security Foundation
2  *
3  * You can copy, redistribute or modify this Program under the terms of
4  * the GNU General Public License version 2 as published by the Free
5  * Software Foundation.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10  * GNU General Public License for more details.
11  *
12  * You should have received a copy of the GNU General Public License
13  * version 2 along with this program; if not, write to the Free Software
14  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
15  * 02110-1301, USA.
16  */
17 
18 /**
19  * \ingroup afppacket
20  *
21  * @{
22  */
23 
24 /**
25  * \file
26  *
27  * \author Eric Leblond <eric@regit.org>
28  *
29  * eBPF utility
30  *
31  */
32 
33 #define PCAP_DONT_INCLUDE_PCAP_BPF_H 1
34 #define SC_PCAP_DONT_INCLUDE_PCAP_H 1
35 
36 #include "suricata-common.h"
37 #include "flow-bypass.h"
38 
39 #ifdef HAVE_PACKET_EBPF
40 
41 #include <sys/time.h>
42 #include <sys/resource.h>
43 
44 #include "util-ebpf.h"
45 #include "util-cpu.h"
46 #include "util-device.h"
47 
48 #include "device-storage.h"
49 #include "flow-storage.h"
50 
51 #include <bpf/libbpf.h>
52 #include <bpf/bpf.h>
53 #include <net/if.h>
54 #include "config.h"
55 
56 #define BPF_MAP_MAX_COUNT 16
57 
58 #define BYPASSED_FLOW_TIMEOUT 60
59 
60 static int g_livedev_storage_id = -1;
61 static int g_flow_storage_id = -1;
62 
63 struct bpf_map_item {
64  char * name;
65  int fd;
66 };
67 
68 struct bpf_maps_info {
69  struct bpf_map_item array[BPF_MAP_MAX_COUNT];
70  SC_ATOMIC_DECLARE(uint64_t, ipv4_hash_count);
71  SC_ATOMIC_DECLARE(uint64_t, ipv6_hash_count);
72  int last;
73 };
74 
75 typedef struct BypassedIfaceList_ {
76  LiveDevice *dev;
77  struct BypassedIfaceList_ *next;
78 } BypassedIfaceList;
79 
80 static void BpfMapsInfoFree(void *bpf)
81 {
82  struct bpf_maps_info *bpfinfo = (struct bpf_maps_info *)bpf;
83  int i;
84  for (i = 0; i < bpfinfo->last; i ++) {
85  if (bpfinfo->array[i].name) {
86  SCFree(bpfinfo->array[i].name);
87  }
88  }
89  SCFree(bpfinfo);
90 }
91 
92 static void BypassedListFree(void *ifl)
93 {
94  BypassedIfaceList *mifl = (BypassedIfaceList *)ifl;
95  BypassedIfaceList *nifl;
96  while (mifl) {
97  nifl = mifl->next;
98  SCFree(mifl);
99  mifl = nifl;
100  }
101 }
102 
103 static void EBPFDeleteKey(int fd, void *key)
104 {
105  bpf_map_delete_elem(fd, key);
106 }
107 
108 static struct bpf_maps_info *EBPFGetBpfMap(const char *iface)
109 {
110  LiveDevice *livedev = LiveGetDevice(iface);
111  if (livedev == NULL)
112  return NULL;
113  void *data = LiveDevGetStorageById(livedev, g_livedev_storage_id);
114 
115  return (struct bpf_maps_info *)data;
116 }
117 
118 /**
119  * Get file descriptor of a map in the scope of a interface
120  *
121  * \param iface the interface where the map need to be looked for
122  * \param name the name of the map
123  * \return the file descriptor or -1 in case of error
124  */
125 int EBPFGetMapFDByName(const char *iface, const char *name)
126 {
127  int i;
128 
129  if (iface == NULL || name == NULL)
130  return -1;
131  struct bpf_maps_info *bpf_maps = EBPFGetBpfMap(iface);
132  if (bpf_maps == NULL)
133  return -1;
134 
135  for (i = 0; i < BPF_MAP_MAX_COUNT; i++) {
136  if (!bpf_maps->array[i].name)
137  continue;
138  if (!strcmp(bpf_maps->array[i].name, name)) {
139  SCLogDebug("Got fd %d for eBPF map '%s'", bpf_maps->array[i].fd, name);
140  return bpf_maps->array[i].fd;
141  }
142  }
143  return -1;
144 }
145 
146 /**
147  * Load a section of an eBPF file
148  *
149  * This function loads a section inside an eBPF and return
150  * via the parameter val the file descriptor that will be used to
151  * inject the eBPF code into the kernel via a syscall.
152  *
153  * \param path the path of the eBPF file to load
154  * \param section the section in the eBPF file to load
155  * \param val a pointer to an integer that will be the file desc
156  * \return -1 in case of error and 0 in case of success
157  */
158 int EBPFLoadFile(const char *iface, const char *path, const char * section,
159  int *val, uint8_t flags)
160 {
161  int err, pfd;
162  bool found = false;
163  struct bpf_object *bpfobj = NULL;
164  struct bpf_program *bpfprog = NULL;
165  struct bpf_map *map = NULL;
166 
167  if (iface == NULL)
168  return -1;
169  LiveDevice *livedev = LiveGetDevice(iface);
170  if (livedev == NULL)
171  return -1;
172 
173  if (! path) {
174  SCLogError(SC_ERR_INVALID_VALUE, "No file defined to load eBPF from");
175  return -1;
176  }
177 
178  /* Sending the eBPF code to the kernel requires a large amount of
179  * locked memory so we set it to unlimited to avoid a ENOPERM error */
180  struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
181  if (setrlimit(RLIMIT_MEMLOCK, &r) != 0) {
182  SCLogError(SC_ERR_MEM_ALLOC, "Unable to lock memory: %s (%d)",
183  strerror(errno), errno);
184  return -1;
185  }
186 
187  /* Open the eBPF file and parse it */
188  bpfobj = bpf_object__open(path);
189  long error = libbpf_get_error(bpfobj);
190  if (error) {
191  char err_buf[128];
192  libbpf_strerror(error, err_buf,
193  sizeof(err_buf));
195  "Unable to load eBPF objects in '%s': %s",
196  path, err_buf);
197  return -1;
198  }
199 
200  /* Let's check that our section is here */
201  bpf_object__for_each_program(bpfprog, bpfobj) {
202  const char *title = bpf_program__title(bpfprog, 0);
203  if (!strcmp(title, section)) {
204  if (flags & EBPF_SOCKET_FILTER) {
205  bpf_program__set_socket_filter(bpfprog);
206  } else {
207  bpf_program__set_xdp(bpfprog);
208  }
209  found = true;
210  break;
211  }
212  }
213 
214  if (found == false) {
216  "No section '%s' in '%s' file. Will not be able to use the file",
217  section,
218  path);
219  return -1;
220  }
221 
222  err = bpf_object__load(bpfobj);
223  if (err < 0) {
224  if (err == -EPERM) {
226  "Permission issue when loading eBPF object: "
227  "%s (%d)",
228  strerror(err),
229  err);
230  } else {
231  char buf[129];
232  libbpf_strerror(err, buf, sizeof(buf));
234  "Unable to load eBPF object: %s (%d)",
235  buf,
236  err);
237  }
238  return -1;
239  }
240 
241  /* Kernel and userspace are sharing data via map. Userspace access to the
242  * map via a file descriptor. So we need to store the map to fd info. For
243  * that we use bpf_maps_info:: */
244  struct bpf_maps_info *bpf_map_data = SCCalloc(1, sizeof(*bpf_map_data));
245  if (bpf_map_data == NULL) {
246  SCLogError(SC_ERR_MEM_ALLOC, "Can't allocate bpf map array");
247  return -1;
248  }
249  SC_ATOMIC_INIT(bpf_map_data->ipv4_hash_count);
250  SC_ATOMIC_INIT(bpf_map_data->ipv6_hash_count);
251 
252  /* Store the maps in bpf_maps_info:: */
253  bpf_map__for_each(map, bpfobj) {
254  if (bpf_map_data->last == BPF_MAP_MAX_COUNT) {
255  SCLogError(SC_ERR_NOT_SUPPORTED, "Too many BPF maps in eBPF files");
256  break;
257  }
258  SCLogDebug("Got a map '%s' with fd '%d'", bpf_map__name(map), bpf_map__fd(map));
259  bpf_map_data->array[bpf_map_data->last].fd = bpf_map__fd(map);
260  bpf_map_data->array[bpf_map_data->last].name = SCStrdup(bpf_map__name(map));
261  if (!bpf_map_data->array[bpf_map_data->last].name) {
262  SCLogError(SC_ERR_MEM_ALLOC, "Unable to duplicate map name");
263  BpfMapsInfoFree(bpf_map_data);
264  return -1;
265  }
266  bpf_map_data->last++;
267  }
268 
269  /* Attach the bpf_maps_info to the LiveDevice via the device storage */
270  LiveDevSetStorageById(livedev, g_livedev_storage_id, bpf_map_data);
271 
272  /* Finally we get the file descriptor for our eBPF program. We will use
273  * the fd to attach the program to the socket (eBPF case) or to the device
274  * (XDP case). */
275  pfd = bpf_program__fd(bpfprog);
276  if (pfd == -1) {
278  "Unable to find %s section", section);
279  return -1;
280  }
281 
282  *val = pfd;
283  return 0;
284 }
285 
286 /**
287  * Attach a XDP program identified by its file descriptor to a device
288  *
289  * \param iface the name of interface
290  * \param fd the eBPF/XDP program file descriptor
291  * \param a flag to pass to attach function mostly used to set XDP mode
292  * \return -1 in case of error, 0 if success
293  */
294 int EBPFSetupXDP(const char *iface, int fd, uint8_t flags)
295 {
296 #ifdef HAVE_PACKET_XDP
297  unsigned int ifindex = if_nametoindex(iface);
298  if (ifindex == 0) {
300  "Unknown interface '%s'", iface);
301  return -1;
302  }
303  int err = bpf_set_link_xdp_fd(ifindex, fd, flags);
304  if (err != 0) {
305  char buf[129];
306  libbpf_strerror(err, buf, sizeof(buf));
307  SCLogError(SC_ERR_INVALID_VALUE, "Unable to set XDP on '%s': %s (%d)",
308  iface, buf, err);
309  return -1;
310  }
311 #endif
312  return 0;
313 }
314 
315 /**
316  * Decide if an IPV4 flow needs to be timeouted
317  *
318  * The filter is maintaining for each half flow a struct pair:: structure in
319  * the kernel where it does accounting and timestamp update. So by comparing
320  * the current timestamp to the timestamp in the struct pair we can know that
321  * no packet have been seen on a half flow since a certain delay.
322  *
323  * If a per-CPU array map is used, this function has only a per-CPU view so
324  * the flow will be deleted from the table if EBPFBypassedFlowV4Timeout() return
325  * 1 for all CPUs.
326  *
327  * \param fd the file descriptor of the flow table map
328  * \param key the key of the element
329  * \param value the value of the element in the hash
330  * \param curtime the current time
331  * \return 1 if timeouted 0 if not
332  */
333 static int EBPFBypassedFlowV4Timeout(int fd, struct flowv4_keys *key,
334  struct pair *value, struct timespec *curtime)
335 {
336  SCLogDebug("Got curtime %" PRIu64 " and value %" PRIu64 " (sp:%d, dp:%d) %u",
337  curtime->tv_sec, value->time / 1000000000,
338  key->port16[0], key->port16[1], key->ip_proto
339  );
340 
341  if (curtime->tv_sec - value->time / 1000000000 > BYPASSED_FLOW_TIMEOUT) {
342  SCLogDebug("Got no packet for %d -> %d at %" PRIu64,
343  key->port16[0], key->port16[1], value->time);
344  return 1;
345  }
346  return 0;
347 }
348 
349 /**
350  * Decide if an IPV6 flow needs to be timeouted
351  *
352  * The filter is maintaining for each half flow a struct pair:: structure in
353  * the kernel where it does accounting and timestamp update. So by comparing
354  * the current timestamp to the timestamp in the struct pair we can know that
355  * no packet have been seen on a half flow since a certain delay.
356  *
357  * If a per-CPU array map is used, this function has only a per-CPU view so
358  * the flow will be deleted from the table if EBPFBypassedFlowV4Timeout() return
359  * 1 for all CPUs.
360  *
361  * \param fd the file descriptor of the flow table map
362  * \param key the key of the element
363  * \param value the value of the element in the hash
364  * \param curtime the current time
365  * \return 1 if timeouted 0 if not
366  */
367 static int EBPFBypassedFlowV6Timeout(int fd, struct flowv6_keys *key,
368  struct pair *value, struct timespec *curtime)
369 {
370  SCLogDebug("Got curtime %" PRIu64 " and value %" PRIu64 " (sp:%d, dp:%d)",
371  curtime->tv_sec, value->time / 1000000000,
372  key->port16[0], key->port16[1]
373  );
374 
375  if (curtime->tv_sec - value->time / 1000000000 > BYPASSED_FLOW_TIMEOUT) {
376  SCLogDebug("Got no packet for %d -> %d at %" PRIu64,
377  key->port16[0], key->port16[1], value->time);
378  return 1;
379  }
380  return 0;
381 }
382 
383 /**
384  * Bypassed flows cleaning for IPv4
385  *
386  * This function iterates on all the flows of the IPv4 table
387  * looking for timeouted flow to delete from the flow table.
388  */
389 static int EBPFForEachFlowV4Table(LiveDevice *dev, const char *name,
390  struct flows_stats *flowstats,
391  struct timespec *ctime)
392 {
393  int mapfd = EBPFGetMapFDByName(dev->dev, name);
394  struct flowv4_keys key = {}, next_key;
395  int found = 0;
396  unsigned int i;
397  unsigned int nr_cpus = UtilCpuGetNumProcessorsConfigured();
398  if (nr_cpus == 0) {
399  SCLogWarning(SC_ERR_INVALID_VALUE, "Unable to get CPU count");
400  return 0;
401  }
402 
403  uint64_t hash_cnt = 0;
404  while (bpf_map_get_next_key(mapfd, &key, &next_key) == 0) {
405  bool purge = true;
406  uint64_t pkts_cnt = 0;
407  uint64_t bytes_cnt = 0;
408  hash_cnt++;
409  /* We use a per CPU structure so we will get a array of values. */
410  struct pair values_array[nr_cpus];
411  memset(values_array, 0, sizeof(values_array));
412  int res = bpf_map_lookup_elem(mapfd, &key, values_array);
413  if (res < 0) {
414  SCLogDebug("no entry in v4 table for %d -> %d", key.port16[0], key.port16[1]);
415  key = next_key;
416  continue;
417  }
418  for (i = 0; i < nr_cpus; i++) {
419  int ret = EBPFBypassedFlowV4Timeout(mapfd, &key, &values_array[i], ctime);
420  if (ret) {
421  /* no packet for the flow on this CPU, let's start accumulating
422  value so we can compute the counters */
423  SCLogDebug("%d:%lu: Adding pkts %lu bytes %lu", i, values_array[i].time / 1000000000,
424  values_array[i].packets, values_array[i].bytes);
425  pkts_cnt += values_array[i].packets;
426  bytes_cnt += values_array[i].bytes;
427  } else {
428  /* Packet seen on one CPU so we keep the flow */
429  purge = false;
430  break;
431  }
432  }
433  /* No packet seen, we discard the flow and do accounting */
434  if (purge) {
435  SCLogDebug("Got no packet for %d -> %d", key.port16[0], key.port16[1]);
436  SCLogDebug("Dead with pkts %lu bytes %lu", pkts_cnt, bytes_cnt);
437  flowstats->count++;
438  flowstats->packets += pkts_cnt;
439  flowstats->bytes += bytes_cnt;
440  SC_ATOMIC_ADD(dev->bypassed, pkts_cnt);
441  found = 1;
442  EBPFDeleteKey(mapfd, &key);
443  }
444  key = next_key;
445  }
446 
447  struct bpf_maps_info *bpfdata = LiveDevGetStorageById(dev, g_livedev_storage_id);
448  if (bpfdata) {
449  SC_ATOMIC_SET(bpfdata->ipv4_hash_count, hash_cnt);
450  }
451 
452  return found;
453 }
454 
455 /**
456  * Bypassed flows cleaning for IPv6
457  *
458  * This function iterates on all the flows of the IPv4 table
459  * looking for timeouted flow to delete from the flow table.
460  */
461 static int EBPFForEachFlowV6Table(LiveDevice *dev, const char *name,
462  struct flows_stats *flowstats,
463  struct timespec *ctime)
464 {
465  int mapfd = EBPFGetMapFDByName(dev->dev, name);
466  struct flowv6_keys key = {}, next_key;
467  int found = 0;
468  unsigned int i;
469  unsigned int nr_cpus = UtilCpuGetNumProcessorsConfigured();
470  if (nr_cpus == 0) {
471  SCLogWarning(SC_ERR_INVALID_VALUE, "Unable to get CPU count");
472  return 0;
473  }
474 
475  uint64_t hash_cnt = 0;
476  while (bpf_map_get_next_key(mapfd, &key, &next_key) == 0) {
477  bool purge = true;
478  uint64_t pkts_cnt = 0;
479  uint64_t bytes_cnt = 0;
480  hash_cnt++;
481  struct pair values_array[nr_cpus];
482  memset(values_array, 0, sizeof(values_array));
483  int res = bpf_map_lookup_elem(mapfd, &key, values_array);
484  if (res < 0) {
485  SCLogDebug("no entry in v6 table for %d -> %d", key.port16[0], key.port16[1]);
486  key = next_key;
487  continue;
488  }
489  for (i = 0; i < nr_cpus; i++) {
490  int ret = EBPFBypassedFlowV6Timeout(mapfd, &key, &values_array[i], ctime);
491  if (ret) {
492  pkts_cnt += values_array[i].packets;
493  bytes_cnt += values_array[i].bytes;
494  } else {
495  purge = false;
496  break;
497  }
498  }
499  if (purge) {
500  flowstats->count++;
501  flowstats->packets += pkts_cnt;
502  flowstats->bytes += bytes_cnt;
503  SC_ATOMIC_ADD(dev->bypassed, pkts_cnt);
504  found = 1;
505  EBPFDeleteKey(mapfd, &key);
506  }
507  key = next_key;
508  }
509 
510  struct bpf_maps_info *bpfdata = LiveDevGetStorageById(dev, g_livedev_storage_id);
511  if (bpfdata) {
512  SC_ATOMIC_SET(bpfdata->ipv6_hash_count, hash_cnt);
513  }
514  return found;
515 }
516 
517 /**
518  * Flow timeout checking function
519  *
520  * This function is called by the Flow bypass manager to trigger removal
521  * of entries in the kernel/userspace flow table if needed.
522  *
523  */
524 int EBPFCheckBypassedFlowTimeout(struct flows_stats *bypassstats,
525  struct timespec *curtime)
526 {
527  struct flows_stats local_bypassstats = { 0, 0, 0};
528  int ret = 0;
529  int tcount = 0;
530  LiveDevice *ldev = NULL, *ndev;
531 
532  while(LiveDeviceForEach(&ldev, &ndev)) {
533  tcount = EBPFForEachFlowV4Table(ldev, "flow_table_v4",
534  &local_bypassstats, curtime);
535  if (tcount) {
536  bypassstats->count = local_bypassstats.count;
537  bypassstats->packets = local_bypassstats.packets ;
538  bypassstats->bytes = local_bypassstats.bytes;
539  ret = 1;
540  }
541  memset(&local_bypassstats, 0, sizeof(local_bypassstats));
542  tcount = EBPFForEachFlowV6Table(ldev, "flow_table_v6",
543  &local_bypassstats, curtime);
544  if (tcount) {
545  bypassstats->count += local_bypassstats.count;
546  bypassstats->packets += local_bypassstats.packets ;
547  bypassstats->bytes += local_bypassstats.bytes;
548  ret = 1;
549  }
550  }
551  return ret;
552 }
553 
554 #ifdef BUILD_UNIX_SOCKET
555 TmEcode EBPFGetBypassedStats(json_t *cmd, json_t *answer, void *data)
556 {
557  LiveDevice *ldev = NULL, *ndev;
558 
559  json_t *ifaces = NULL;
560  while(LiveDeviceForEach(&ldev, &ndev)) {
561  struct bpf_maps_info *bpfdata = LiveDevGetStorageById(ldev, g_livedev_storage_id);
562  if (bpfdata) {
563  uint64_t ipv4_hash_count = SC_ATOMIC_GET(bpfdata->ipv4_hash_count);
564  uint64_t ipv6_hash_count = SC_ATOMIC_GET(bpfdata->ipv6_hash_count);
565  json_t *iface = json_object();
566  if (ifaces == NULL) {
567  ifaces = json_object();
568  if (ifaces == NULL) {
569  json_object_set_new(answer, "message",
570  json_string("internal error at json object creation"));
571  return TM_ECODE_FAILED;
572  }
573  }
574  json_object_set_new(iface, "ipv4_count", json_integer(ipv4_hash_count));
575  json_object_set_new(iface, "ipv6_count", json_integer(ipv6_hash_count));
576  json_object_set_new(ifaces, ldev->dev, iface);
577  }
578  }
579  if (ifaces) {
580  json_object_set_new(answer, "message", ifaces);
582  }
583 
584  json_object_set_new(answer, "message",
585  json_string("No interface using eBPF bypass"));
587 }
588 #endif
589 
590 void EBPFRegisterExtension(void)
591 {
592  g_livedev_storage_id = LiveDevStorageRegister("bpfmap", sizeof(void *), NULL, BpfMapsInfoFree);
593  g_flow_storage_id = FlowStorageRegister("bypassedlist", sizeof(void *), NULL, BypassedListFree);
594 }
595 
596 
597 #ifdef HAVE_PACKET_XDP
598 
599 static uint32_t g_redirect_iface_cpu_counter = 0;
600 
601 static int EBPFAddCPUToMap(const char *iface, uint32_t i)
602 {
603  int cpumap = EBPFGetMapFDByName(iface, "cpu_map");
604  uint32_t queue_size = 4096;
605  int ret;
606 
607  if (cpumap < 0) {
608  SCLogError(SC_ERR_AFP_CREATE, "Can't find cpu_map");
609  return -1;
610  }
611  ret = bpf_map_update_elem(cpumap, &i, &queue_size, 0);
612  if (ret) {
613  SCLogError(SC_ERR_AFP_CREATE, "Create CPU entry failed (err:%d)", ret);
614  return -1;
615  }
616  int cpus_available = EBPFGetMapFDByName(iface, "cpus_available");
617  if (cpus_available < 0) {
618  SCLogError(SC_ERR_AFP_CREATE, "Can't find cpus_available map");
619  return -1;
620  }
621 
622  ret = bpf_map_update_elem(cpus_available, &g_redirect_iface_cpu_counter, &i, 0);
623  if (ret) {
624  SCLogError(SC_ERR_AFP_CREATE, "Create CPU entry failed (err:%d)", ret);
625  return -1;
626  }
627  return 0;
628 }
629 
630 static void EBPFRedirectMapAddCPU(int i, void *data)
631 {
632  if (EBPFAddCPUToMap(data, i) < 0) {
634  "Unable to add CPU %d to set", i);
635  } else {
636  g_redirect_iface_cpu_counter++;
637  }
638 }
639 
640 void EBPFBuildCPUSet(ConfNode *node, char *iface)
641 {
642  uint32_t key0 = 0;
643  int mapfd = EBPFGetMapFDByName(iface, "cpus_count");
644  if (mapfd < 0) {
646  "Unable to find 'cpus_count' map");
647  return;
648  }
649  g_redirect_iface_cpu_counter = 0;
650  if (node == NULL) {
651  bpf_map_update_elem(mapfd, &key0, &g_redirect_iface_cpu_counter,
652  BPF_ANY);
653  return;
654  }
655  BuildCpusetWithCallback("xdp-cpu-redirect", node,
656  EBPFRedirectMapAddCPU,
657  iface);
658  bpf_map_update_elem(mapfd, &key0, &g_redirect_iface_cpu_counter,
659  BPF_ANY);
660 }
661 
662 int EBPFSetPeerIface(const char *iface, const char *out_iface)
663 {
664  int mapfd = EBPFGetMapFDByName(iface, "tx_peer");
665  if (mapfd < 0) {
667  "Unable to find 'tx_peer' map");
668  return -1;
669  }
670  int intmapfd = EBPFGetMapFDByName(iface, "tx_peer_int");
671  if (intmapfd < 0) {
673  "Unable to find 'tx_peer_int' map");
674  return -1;
675  }
676 
677  int key0 = 0;
678  unsigned int peer_index = if_nametoindex(out_iface);
679  if (peer_index == 0) {
680  SCLogError(SC_ERR_INVALID_VALUE, "No iface '%s'", out_iface);
681  return -1;
682  }
683  int ret = bpf_map_update_elem(mapfd, &key0, &peer_index, BPF_ANY);
684  if (ret) {
685  SCLogError(SC_ERR_AFP_CREATE, "Create peer entry failed (err:%d)", ret);
686  return -1;
687  }
688  ret = bpf_map_update_elem(intmapfd, &key0, &peer_index, BPF_ANY);
689  if (ret) {
690  SCLogError(SC_ERR_AFP_CREATE, "Create peer entry failed (err:%d)", ret);
691  return -1;
692  }
693  return 0;
694 }
695 
696 int EBPFUpdateFlow(Flow *f, Packet *p)
697 {
698  BypassedIfaceList *ifl = (BypassedIfaceList *)FlowGetStorageById(f, g_flow_storage_id);
699  if (ifl == NULL) {
700  ifl = SCCalloc(1, sizeof(*ifl));
701  if (ifl == NULL) {
702  return 0;
703  }
704  ifl->dev = p->livedev;
705  FlowSetStorageById(f, g_flow_storage_id, ifl);
706  return 1;
707  }
708  /* Look for packet iface in the list */
709  BypassedIfaceList *ldev = ifl;
710  while (ldev) {
711  if (p->livedev == ldev->dev) {
712  return 1;
713  }
714  ldev = ldev->next;
715  }
716  /* Call bypass function if ever not in the list */
717  p->BypassPacketsFlow(p);
718 
719  /* Add iface to the list */
720  BypassedIfaceList *nifl = SCCalloc(1, sizeof(*nifl));
721  if (nifl == NULL) {
722  return 0;
723  }
724  nifl->dev = p->livedev;
725  nifl->next = ifl;
726  FlowSetStorageById(f, g_flow_storage_id, nifl);
727  return 1;
728 }
729 
730 #endif /* HAVE_PACKET_XDP */
731 
732 #endif
uint16_t flags
#define SCLogDebug(...)
Definition: util-debug.h:335
int(* BypassPacketsFlow)(struct Packet_ *)
Definition: decode.h:490
#define SC_ATOMIC_DECLARE(type, name)
wrapper to declare an atomic variable including a (spin) lock to protect it.
Definition: util-atomic.h:57
uint16_t UtilCpuGetNumProcessorsConfigured(void)
Get the number of cpus configured in the system.
Definition: util-cpu.c:58
struct HtpBodyChunk_ * next
LiveDevice * LiveDeviceForEach(LiveDevice **ldev, LiveDevice **ndev)
Definition: util-device.c:420
int LiveDevSetStorageById(LiveDevice *d, int id, void *ptr)
Store a pointer in a given LiveDevice storage.
uint64_t count
Definition: flow-bypass.h:28
#define SC_ATOMIC_ADD(name, val)
add a value to our atomic variable
Definition: util-atomic.h:108
void BuildCpusetWithCallback(const char *name, ConfNode *node, void(*Callback)(int i, void *data), void *data)
Definition: util-affinity.c:98
char * dev
Definition: util-device.h:41
#define SC_ATOMIC_INIT(name)
Initialize the previously declared atomic variable and it&#39;s lock.
Definition: util-atomic.h:82
#define SCCalloc(nm, a)
Definition: util-mem.h:205
#define SCLogError(err_code,...)
Macro used to log ERROR messages.
Definition: util-debug.h:294
int FlowSetStorageById(Flow *f, int id, void *ptr)
Definition: flow-storage.c:44
#define SCReturnInt(x)
Definition: util-debug.h:341
LiveDevice * LiveGetDevice(const char *name)
Get a pointer to the device at idx.
Definition: util-device.c:248
#define SCLogWarning(err_code,...)
Macro used to log WARNING messages.
Definition: util-debug.h:281
Definition: conf.h:32
void * LiveDevGetStorageById(LiveDevice *d, int id)
Get a value from a given LiveDevice storage.
#define SC_ATOMIC_SET(name, val)
Set the value for the atomic variable.
Definition: util-atomic.h:208
int FlowStorageRegister(const char *name, const unsigned int size, void *(*Alloc)(unsigned int), void(*Free)(void *))
Definition: flow-storage.c:65
#define SCFree(a)
Definition: util-mem.h:236
PoolThreadReserved res
uint64_t bytes
Definition: flow-bypass.h:30
#define SC_ATOMIC_GET(name)
Get the value from the atomic variable.
Definition: util-atomic.h:193
int LiveDevStorageRegister(const char *name, const unsigned int size, void *(*Alloc)(unsigned int), void(*Free)(void *))
Register a LiveDevice storage.
void * FlowGetStorageById(Flow *f, int id)
Definition: flow-storage.c:39
#define SCStrdup(a)
Definition: util-mem.h:220
struct LiveDevice_ * livedev
Definition: decode.h:558
Flow data structure.
Definition: flow.h:327
uint64_t packets
Definition: flow-bypass.h:29