suricata
flow-hash.c
Go to the documentation of this file.
1 /* Copyright (C) 2007-2013 Open Information Security Foundation
2  *
3  * You can copy, redistribute or modify this Program under the terms of
4  * the GNU General Public License version 2 as published by the Free
5  * Software Foundation.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10  * GNU General Public License for more details.
11  *
12  * You should have received a copy of the GNU General Public License
13  * version 2 along with this program; if not, write to the Free Software
14  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
15  * 02110-1301, USA.
16  */
17 
18 /**
19  * \file
20  *
21  * \author Victor Julien <victor@inliniac.net>
22  * \author Pablo Rincon Crespo <pablo.rincon.crespo@gmail.com>
23  *
24  * Flow Hashing functions.
25  */
26 
27 #include "suricata-common.h"
28 #include "threads.h"
29 
30 #include "decode.h"
31 #include "detect-engine-state.h"
32 
33 #include "flow.h"
34 #include "flow-hash.h"
35 #include "flow-util.h"
36 #include "flow-private.h"
37 #include "flow-manager.h"
38 #include "flow-storage.h"
39 #include "app-layer-parser.h"
40 
41 #include "util-time.h"
42 #include "util-debug.h"
43 
44 #include "util-hash-lookup3.h"
45 
46 #include "conf.h"
47 #include "output.h"
48 #include "output-flow.h"
49 
50 #define FLOW_DEFAULT_FLOW_PRUNE 5
51 
52 SC_ATOMIC_EXTERN(unsigned int, flow_prune_idx);
53 SC_ATOMIC_EXTERN(unsigned int, flow_flags);
54 
55 static Flow *FlowGetUsedFlow(ThreadVars *tv, DecodeThreadVars *dtv);
56 
57 /** \brief compare two raw ipv6 addrs
58  *
59  * \note we don't care about the real ipv6 ip's, this is just
60  * to consistently fill the FlowHashKey6 struct, without all
61  * the SCNtohl calls.
62  *
63  * \warning do not use elsewhere unless you know what you're doing.
64  * detect-engine-address-ipv6.c's AddressIPv6GtU32 is likely
65  * what you are looking for.
66  */
67 static inline int FlowHashRawAddressIPv6GtU32(const uint32_t *a, const uint32_t *b)
68 {
69  int i;
70 
71  for (i = 0; i < 4; i++) {
72  if (a[i] > b[i])
73  return 1;
74  if (a[i] < b[i])
75  break;
76  }
77 
78  return 0;
79 }
80 
81 typedef struct FlowHashKey4_ {
82  union {
83  struct {
84  uint32_t addrs[2];
85  uint16_t ports[2];
86  uint16_t proto; /**< u16 so proto and recur add up to u32 */
87  uint16_t recur; /**< u16 so proto and recur add up to u32 */
88  uint16_t vlan_id[2];
89  };
90  const uint32_t u32[5];
91  };
92 } FlowHashKey4;
93 
94 typedef struct FlowHashKey6_ {
95  union {
96  struct {
97  uint32_t src[4], dst[4];
98  uint16_t ports[2];
99  uint16_t proto; /**< u16 so proto and recur add up to u32 */
100  uint16_t recur; /**< u16 so proto and recur add up to u32 */
101  uint16_t vlan_id[2];
102  };
103  const uint32_t u32[11];
104  };
105 } FlowHashKey6;
106 
107 /* calculate the hash key for this packet
108  *
109  * we're using:
110  * hash_rand -- set at init time
111  * source port
112  * destination port
113  * source address
114  * destination address
115  * recursion level -- for tunnels, make sure different tunnel layers can
116  * never get mixed up.
117  *
118  * For ICMP we only consider UNREACHABLE errors atm.
119  */
120 static inline uint32_t FlowGetHash(const Packet *p)
121 {
122  uint32_t hash = 0;
123 
124  if (p->ip4h != NULL) {
125  if (p->tcph != NULL || p->udph != NULL) {
126  FlowHashKey4 fhk;
127 
128  int ai = (p->src.addr_data32[0] > p->dst.addr_data32[0]);
129  fhk.addrs[1-ai] = p->src.addr_data32[0];
130  fhk.addrs[ai] = p->dst.addr_data32[0];
131 
132  const int pi = (p->sp > p->dp);
133  fhk.ports[1-pi] = p->sp;
134  fhk.ports[pi] = p->dp;
135 
136  fhk.proto = (uint16_t)p->proto;
137  fhk.recur = (uint16_t)p->recursion_level;
138  fhk.vlan_id[0] = p->vlan_id[0];
139  fhk.vlan_id[1] = p->vlan_id[1];
140 
141  hash = hashword(fhk.u32, 5, flow_config.hash_rand);
142 
143  } else if (ICMPV4_DEST_UNREACH_IS_VALID(p)) {
144  uint32_t psrc = IPV4_GET_RAW_IPSRC_U32(ICMPV4_GET_EMB_IPV4(p));
145  uint32_t pdst = IPV4_GET_RAW_IPDST_U32(ICMPV4_GET_EMB_IPV4(p));
146  FlowHashKey4 fhk;
147 
148  const int ai = (psrc > pdst);
149  fhk.addrs[1-ai] = psrc;
150  fhk.addrs[ai] = pdst;
151 
152  const int pi = (p->icmpv4vars.emb_sport > p->icmpv4vars.emb_dport);
153  fhk.ports[1-pi] = p->icmpv4vars.emb_sport;
154  fhk.ports[pi] = p->icmpv4vars.emb_dport;
155 
156  fhk.proto = (uint16_t)ICMPV4_GET_EMB_PROTO(p);
157  fhk.recur = (uint16_t)p->recursion_level;
158  fhk.vlan_id[0] = p->vlan_id[0];
159  fhk.vlan_id[1] = p->vlan_id[1];
160 
161  hash = hashword(fhk.u32, 5, flow_config.hash_rand);
162 
163  } else {
164  FlowHashKey4 fhk;
165  const int ai = (p->src.addr_data32[0] > p->dst.addr_data32[0]);
166  fhk.addrs[1-ai] = p->src.addr_data32[0];
167  fhk.addrs[ai] = p->dst.addr_data32[0];
168  fhk.ports[0] = 0xfeed;
169  fhk.ports[1] = 0xbeef;
170  fhk.proto = (uint16_t)p->proto;
171  fhk.recur = (uint16_t)p->recursion_level;
172  fhk.vlan_id[0] = p->vlan_id[0];
173  fhk.vlan_id[1] = p->vlan_id[1];
174 
175  hash = hashword(fhk.u32, 5, flow_config.hash_rand);
176  }
177  } else if (p->ip6h != NULL) {
178  FlowHashKey6 fhk;
179  if (FlowHashRawAddressIPv6GtU32(p->src.addr_data32, p->dst.addr_data32)) {
180  fhk.src[0] = p->src.addr_data32[0];
181  fhk.src[1] = p->src.addr_data32[1];
182  fhk.src[2] = p->src.addr_data32[2];
183  fhk.src[3] = p->src.addr_data32[3];
184  fhk.dst[0] = p->dst.addr_data32[0];
185  fhk.dst[1] = p->dst.addr_data32[1];
186  fhk.dst[2] = p->dst.addr_data32[2];
187  fhk.dst[3] = p->dst.addr_data32[3];
188  } else {
189  fhk.src[0] = p->dst.addr_data32[0];
190  fhk.src[1] = p->dst.addr_data32[1];
191  fhk.src[2] = p->dst.addr_data32[2];
192  fhk.src[3] = p->dst.addr_data32[3];
193  fhk.dst[0] = p->src.addr_data32[0];
194  fhk.dst[1] = p->src.addr_data32[1];
195  fhk.dst[2] = p->src.addr_data32[2];
196  fhk.dst[3] = p->src.addr_data32[3];
197  }
198 
199  const int pi = (p->sp > p->dp);
200  fhk.ports[1-pi] = p->sp;
201  fhk.ports[pi] = p->dp;
202  fhk.proto = (uint16_t)p->proto;
203  fhk.recur = (uint16_t)p->recursion_level;
204  fhk.vlan_id[0] = p->vlan_id[0];
205  fhk.vlan_id[1] = p->vlan_id[1];
206 
207  hash = hashword(fhk.u32, 11, flow_config.hash_rand);
208  }
209 
210  return hash;
211 }
212 
213 /* Since two or more flows can have the same hash key, we need to compare
214  * the flow with the current flow key. */
215 #define CMP_FLOW(f1,f2) \
216  (((CMP_ADDR(&(f1)->src, &(f2)->src) && \
217  CMP_ADDR(&(f1)->dst, &(f2)->dst) && \
218  CMP_PORT((f1)->sp, (f2)->sp) && CMP_PORT((f1)->dp, (f2)->dp)) || \
219  (CMP_ADDR(&(f1)->src, &(f2)->dst) && \
220  CMP_ADDR(&(f1)->dst, &(f2)->src) && \
221  CMP_PORT((f1)->sp, (f2)->dp) && CMP_PORT((f1)->dp, (f2)->sp))) && \
222  (f1)->proto == (f2)->proto && \
223  (f1)->recursion_level == (f2)->recursion_level && \
224  (f1)->vlan_id[0] == (f2)->vlan_id[0] && \
225  (f1)->vlan_id[1] == (f2)->vlan_id[1])
226 #define CMP_FLOW_ICMP(f1,f2) \
227  (((CMP_ADDR(&(f1)->src, &(f2)->src) && \
228  CMP_ADDR(&(f1)->dst, &(f2)->dst) && \
229  CMP_PORT((f1)->icmp_s.type, (f2)->icmp_s.type) && CMP_PORT((f1)->icmp_d.type, (f2)->icmp_d.type)) || \
230  (CMP_ADDR(&(f1)->src, &(f2)->dst) && \
231  CMP_ADDR(&(f1)->dst, &(f2)->src) && \
232  CMP_PORT((f1)->icmp_d.type, (f2)->icmp_s.type) && CMP_PORT((f1)->icmp_s.type, (f2)->icmp_d.type))) && \
233  (f1)->proto == (f2)->proto && \
234  (f1)->recursion_level == (f2)->recursion_level && \
235  (f1)->vlan_id[0] == (f2)->vlan_id[0] && \
236  (f1)->vlan_id[1] == (f2)->vlan_id[1])
237 
238 /**
239  * \brief See if a ICMP packet belongs to a flow by comparing the embedded
240  * packet in the ICMP error packet to the flow.
241  *
242  * \param f flow
243  * \param p ICMP packet
244  *
245  * \retval 1 match
246  * \retval 0 no match
247  */
248 static inline int FlowCompareICMPv4(Flow *f, const Packet *p)
249 {
251  /* first check the direction of the flow, in other words, the client ->
252  * server direction as it's most likely the ICMP error will be a
253  * response to the clients traffic */
254  if ((f->src.addr_data32[0] == IPV4_GET_RAW_IPSRC_U32( ICMPV4_GET_EMB_IPV4(p) )) &&
255  (f->dst.addr_data32[0] == IPV4_GET_RAW_IPDST_U32( ICMPV4_GET_EMB_IPV4(p) )) &&
256  f->sp == p->icmpv4vars.emb_sport &&
257  f->dp == p->icmpv4vars.emb_dport &&
258  f->proto == ICMPV4_GET_EMB_PROTO(p) &&
259  f->recursion_level == p->recursion_level &&
260  f->vlan_id[0] == p->vlan_id[0] &&
261  f->vlan_id[1] == p->vlan_id[1])
262  {
263  return 1;
264 
265  /* check the less likely case where the ICMP error was a response to
266  * a packet from the server. */
267  } else if ((f->dst.addr_data32[0] == IPV4_GET_RAW_IPSRC_U32( ICMPV4_GET_EMB_IPV4(p) )) &&
268  (f->src.addr_data32[0] == IPV4_GET_RAW_IPDST_U32( ICMPV4_GET_EMB_IPV4(p) )) &&
269  f->dp == p->icmpv4vars.emb_sport &&
270  f->sp == p->icmpv4vars.emb_dport &&
271  f->proto == ICMPV4_GET_EMB_PROTO(p) &&
272  f->recursion_level == p->recursion_level &&
273  f->vlan_id[0] == p->vlan_id[0] &&
274  f->vlan_id[1] == p->vlan_id[1])
275  {
276  return 1;
277  }
278 
279  /* no match, fall through */
280  } else {
281  /* just treat ICMP as a normal proto for now */
282  return CMP_FLOW_ICMP(f, p);
283  }
284 
285  return 0;
286 }
287 
289 {
290  p->flags |= PKT_WANTS_FLOW;
291  p->flow_hash = FlowGetHash(p);
292 }
293 
294 int TcpSessionPacketSsnReuse(const Packet *p, const Flow *f, void *tcp_ssn);
295 
296 static inline int FlowCompare(Flow *f, const Packet *p)
297 {
298  if (p->proto == IPPROTO_ICMP) {
299  return FlowCompareICMPv4(f, p);
300  } else if (p->proto == IPPROTO_TCP) {
301  if (CMP_FLOW(f, p) == 0)
302  return 0;
303 
304  /* if this session is 'reused', we don't return it anymore,
305  * so return false on the compare */
306  if (f->flags & FLOW_TCP_REUSED)
307  return 0;
308 
309  return 1;
310  } else {
311  return CMP_FLOW(f, p);
312  }
313 }
314 
315 /**
316  * \brief Check if we should create a flow based on a packet
317  *
318  * We use this check to filter out flow creation based on:
319  * - ICMP error messages
320  *
321  * \param p packet
322  * \retval 1 true
323  * \retval 0 false
324  */
325 static inline int FlowCreateCheck(const Packet *p)
326 {
327  if (PKT_IS_ICMPV4(p)) {
328  if (ICMPV4_IS_ERROR_MSG(p)) {
329  return 0;
330  }
331  }
332 
333  return 1;
334 }
335 
336 static inline void FlowUpdateCounter(ThreadVars *tv, DecodeThreadVars *dtv,
337  uint8_t proto)
338 {
339 #ifdef UNITTESTS
340  if (tv && dtv) {
341 #endif
342  switch (proto){
343  case IPPROTO_UDP:
344  StatsIncr(tv, dtv->counter_flow_udp);
345  break;
346  case IPPROTO_TCP:
347  StatsIncr(tv, dtv->counter_flow_tcp);
348  break;
349  case IPPROTO_ICMP:
350  StatsIncr(tv, dtv->counter_flow_icmp4);
351  break;
352  case IPPROTO_ICMPV6:
353  StatsIncr(tv, dtv->counter_flow_icmp6);
354  break;
355  }
356 #ifdef UNITTESTS
357  }
358 #endif
359 }
360 
361 /**
362  * \brief Get a new flow
363  *
364  * Get a new flow. We're checking memcap first and will try to make room
365  * if the memcap is reached.
366  *
367  * \param tv thread vars
368  * \param dtv decode thread vars (for flow log api thread data)
369  *
370  * \retval f *LOCKED* flow on succes, NULL on error.
371  */
372 static Flow *FlowGetNew(ThreadVars *tv, DecodeThreadVars *dtv, const Packet *p)
373 {
374  Flow *f = NULL;
375 
376  if (FlowCreateCheck(p) == 0) {
377  return NULL;
378  }
379 
380  /* get a flow from the spare queue */
382  if (f == NULL) {
383  /* If we reached the max memcap, we get a used flow */
384  if (!(FLOW_CHECK_MEMCAP(sizeof(Flow) + FlowStorageSize()))) {
385  /* declare state of emergency */
386  if (!(SC_ATOMIC_GET(flow_flags) & FLOW_EMERGENCY)) {
387  SC_ATOMIC_OR(flow_flags, FLOW_EMERGENCY);
388 
390 
391  /* under high load, waking up the flow mgr each time leads
392  * to high cpu usage. Flows are not timed out much faster if
393  * we check a 1000 times a second. */
395  }
396 
397  f = FlowGetUsedFlow(tv, dtv);
398  if (f == NULL) {
399  /* max memcap reached, so increments the counter */
400  if (tv != NULL && dtv != NULL) {
401  StatsIncr(tv, dtv->counter_flow_memcap);
402  }
403 
404  /* very rare, but we can fail. Just giving up */
405  return NULL;
406  }
407 
408  /* freed a flow, but it's unlocked */
409  } else {
410  /* now see if we can alloc a new flow */
411  f = FlowAlloc();
412  if (f == NULL) {
413  if (tv != NULL && dtv != NULL) {
414  StatsIncr(tv, dtv->counter_flow_memcap);
415  }
416  return NULL;
417  }
418 
419  /* flow is initialized but *unlocked* */
420  }
421  } else {
422  /* flow has been recycled before it went into the spare queue */
423 
424  /* flow is initialized (recylced) but *unlocked* */
425  }
426 
427  FLOWLOCK_WRLOCK(f);
428  FlowUpdateCounter(tv, dtv, p->proto);
429  return f;
430 }
431 
432 static Flow *TcpReuseReplace(ThreadVars *tv, DecodeThreadVars *dtv,
433  FlowBucket *fb, Flow *old_f,
434  const uint32_t hash, const Packet *p)
435 {
436  /* tag flow as reused so future lookups won't find it */
437  old_f->flags |= FLOW_TCP_REUSED;
438  /* get some settings that we move over to the new flow */
439  FlowThreadId thread_id = old_f->thread_id;
440 
441  /* since fb lock is still held this flow won't be found until we are done */
442  FLOWLOCK_UNLOCK(old_f);
443 
444  /* Get a new flow. It will be either a locked flow or NULL */
445  Flow *f = FlowGetNew(tv, dtv, p);
446  if (f == NULL) {
447  return NULL;
448  }
449 
450  /* flow is locked */
451 
452  /* put at the start of the list */
453  f->hnext = fb->head;
454  fb->head->hprev = f;
455  fb->head = f;
456 
457  /* initialize and return */
458  FlowInit(f, p);
459  f->flow_hash = hash;
460  f->fb = fb;
461 
462  f->thread_id = thread_id;
463  return f;
464 }
465 
466 /** \brief Get Flow for packet
467  *
468  * Hash retrieval function for flows. Looks up the hash bucket containing the
469  * flow pointer. Then compares the packet with the found flow to see if it is
470  * the flow we need. If it isn't, walk the list until the right flow is found.
471  *
472  * If the flow is not found or the bucket was emtpy, a new flow is taken from
473  * the queue. FlowDequeue() will alloc new flows as long as we stay within our
474  * memcap limit.
475  *
476  * The p->flow pointer is updated to point to the flow.
477  *
478  * \param tv thread vars
479  * \param dtv decode thread vars (for flow log api thread data)
480  *
481  * \retval f *LOCKED* flow or NULL
482  */
484 {
485  Flow *f = NULL;
486 
487  /* get our hash bucket and lock it */
488  const uint32_t hash = p->flow_hash;
489  FlowBucket *fb = &flow_hash[hash % flow_config.hash_size];
490  FBLOCK_LOCK(fb);
491 
492  SCLogDebug("fb %p fb->head %p", fb, fb->head);
493 
494  /* see if the bucket already has a flow */
495  if (fb->head == NULL) {
496  f = FlowGetNew(tv, dtv, p);
497  if (f == NULL) {
498  FBLOCK_UNLOCK(fb);
499  return NULL;
500  }
501 
502  /* flow is locked */
503  fb->head = f;
504  fb->tail = f;
505 
506  /* got one, now lock, initialize and return */
507  FlowInit(f, p);
508  f->flow_hash = hash;
509  f->fb = fb;
511 
512  FlowReference(dest, f);
513 
514  FBLOCK_UNLOCK(fb);
515  return f;
516  }
517 
518  /* ok, we have a flow in the bucket. Let's find out if it is our flow */
519  f = fb->head;
520 
521  /* see if this is the flow we are looking for */
522  if (FlowCompare(f, p) == 0) {
523  Flow *pf = NULL; /* previous flow */
524 
525  while (f) {
526  pf = f;
527  f = f->hnext;
528 
529  if (f == NULL) {
530  f = pf->hnext = FlowGetNew(tv, dtv, p);
531  if (f == NULL) {
532  FBLOCK_UNLOCK(fb);
533  return NULL;
534  }
535  fb->tail = f;
536 
537  /* flow is locked */
538 
539  f->hprev = pf;
540 
541  /* initialize and return */
542  FlowInit(f, p);
543  f->flow_hash = hash;
544  f->fb = fb;
546 
547  FlowReference(dest, f);
548 
549  FBLOCK_UNLOCK(fb);
550  return f;
551  }
552 
553  if (FlowCompare(f, p) != 0) {
554  /* we found our flow, lets put it on top of the
555  * hash list -- this rewards active flows */
556  if (f->hnext) {
557  f->hnext->hprev = f->hprev;
558  }
559  if (f->hprev) {
560  f->hprev->hnext = f->hnext;
561  }
562  if (f == fb->tail) {
563  fb->tail = f->hprev;
564  }
565 
566  f->hnext = fb->head;
567  f->hprev = NULL;
568  fb->head->hprev = f;
569  fb->head = f;
570 
571  /* found our flow, lock & return */
572  FLOWLOCK_WRLOCK(f);
573  if (unlikely(TcpSessionPacketSsnReuse(p, f, f->protoctx) == 1)) {
574  f = TcpReuseReplace(tv, dtv, fb, f, hash, p);
575  if (f == NULL) {
576  FBLOCK_UNLOCK(fb);
577  return NULL;
578  }
579  }
580 
581  FlowReference(dest, f);
582 
583  FBLOCK_UNLOCK(fb);
584  return f;
585  }
586  }
587  }
588 
589  /* lock & return */
590  FLOWLOCK_WRLOCK(f);
591  if (unlikely(TcpSessionPacketSsnReuse(p, f, f->protoctx) == 1)) {
592  f = TcpReuseReplace(tv, dtv, fb, f, hash, p);
593  if (f == NULL) {
594  FBLOCK_UNLOCK(fb);
595  return NULL;
596  }
597  }
598 
599  FlowReference(dest, f);
600 
601  FBLOCK_UNLOCK(fb);
602  return f;
603 }
604 
605 /** \internal
606  * \brief Get a flow from the hash directly.
607  *
608  * Called in conditions where the spare queue is empty and memcap is reached.
609  *
610  * Walks the hash until a flow can be freed. Timeouts are disregarded, use_cnt
611  * is adhered to. "flow_prune_idx" atomic int makes sure we don't start at the
612  * top each time since that would clear the top of the hash leading to longer
613  * and longer search times under high pressure (observed).
614  *
615  * \param tv thread vars
616  * \param dtv decode thread vars (for flow log api thread data)
617  *
618  * \retval f flow or NULL
619  */
620 static Flow *FlowGetUsedFlow(ThreadVars *tv, DecodeThreadVars *dtv)
621 {
622  uint32_t idx = SC_ATOMIC_GET(flow_prune_idx) % flow_config.hash_size;
623  uint32_t cnt = flow_config.hash_size;
624 
625  while (cnt--) {
626  if (++idx >= flow_config.hash_size)
627  idx = 0;
628 
629  FlowBucket *fb = &flow_hash[idx];
630 
631  if (FBLOCK_TRYLOCK(fb) != 0)
632  continue;
633 
634  Flow *f = fb->tail;
635  if (f == NULL) {
636  FBLOCK_UNLOCK(fb);
637  continue;
638  }
639 
640  if (FLOWLOCK_TRYWRLOCK(f) != 0) {
641  FBLOCK_UNLOCK(fb);
642  continue;
643  }
644 
645  /** never prune a flow that is used by a packet or stream msg
646  * we are currently processing in one of the threads */
647  if (SC_ATOMIC_GET(f->use_cnt) > 0) {
648  FBLOCK_UNLOCK(fb);
649  FLOWLOCK_UNLOCK(f);
650  continue;
651  }
652 
653  /* remove from the hash */
654  if (f->hprev != NULL)
655  f->hprev->hnext = f->hnext;
656  if (f->hnext != NULL)
657  f->hnext->hprev = f->hprev;
658  if (fb->head == f)
659  fb->head = f->hnext;
660  if (fb->tail == f)
661  fb->tail = f->hprev;
662 
663  f->hnext = NULL;
664  f->hprev = NULL;
665  f->fb = NULL;
666  SC_ATOMIC_SET(fb->next_ts, 0);
667  FBLOCK_UNLOCK(fb);
668 
669  int state = SC_ATOMIC_GET(f->flow_state);
670  if (state == FLOW_STATE_NEW)
672  else if (state == FLOW_STATE_ESTABLISHED)
674  else if (state == FLOW_STATE_CLOSED)
676  else if (state == FLOW_STATE_CAPTURE_BYPASSED)
678  else if (state == FLOW_STATE_LOCAL_BYPASSED)
680 
682 
683  if (SC_ATOMIC_GET(flow_flags) & FLOW_EMERGENCY)
685 
686  /* invoke flow log api */
687  if (dtv && dtv->output_flow_thread_data)
688  (void)OutputFlowLog(tv, dtv->output_flow_thread_data, f);
689 
690  FlowClearMemory(f, f->protomap);
691 
693 
694  FLOWLOCK_UNLOCK(f);
695 
696  (void) SC_ATOMIC_ADD(flow_prune_idx, (flow_config.hash_size - cnt));
697  return f;
698  }
699 
700  return NULL;
701 }
#define SC_ATOMIC_OR(name, val)
Bitwise OR a value from our atomic variable.
Definition: util-atomic.h:154
#define FLOW_TCP_REUSED
Definition: flow.h:50
void FlowSetupPacket(Packet *p)
prepare packet for a life with flow Set PKT_WANTS_FLOW flag to incidate workers should do a flow look...
Definition: flow-hash.c:288
uint32_t hashword(const uint32_t *k, size_t length, uint32_t initval)
#define FLOW_CHECK_MEMCAP(size)
check if a memory alloc would fit in the memcap
Definition: flow-util.h:131
UDPHdr * udph
Definition: decode.h:522
#define SCLogDebug(...)
Definition: util-debug.h:335
#define FBLOCK_UNLOCK(fb)
Definition: flow-hash.h:70
uint16_t emb_sport
struct Flow_ * hnext
Definition: flow.h:446
#define ICMPV4_GET_EMB_PROTO(p)
uint8_t proto
Definition: flow.h:343
#define ICMPV4_DEST_UNREACH_IS_VALID(p)
uint32_t addrs[2]
Definition: flow-hash.c:84
#define FLOWLOCK_UNLOCK(fb)
Definition: flow.h:242
Port sp
Definition: flow.h:330
#define FLOW_END_FLAG_STATE_ESTABLISHED
Definition: flow.h:209
#define unlikely(expr)
Definition: util-optimize.h:35
struct FlowHashKey6_ FlowHashKey6
Port sp
Definition: decode.h:413
uint16_t FlowThreadId
Definition: flow.h:304
Port dp
Definition: decode.h:421
#define SC_ATOMIC_ADD(name, val)
add a value to our atomic variable
Definition: util-atomic.h:107
#define FLOW_EMERGENCY
Definition: flow-private.h:37
Address dst
Definition: decode.h:411
#define FLOW_END_FLAG_FORCED
Definition: flow.h:213
#define IPV4_GET_RAW_IPSRC_U32(ip4h)
Definition: decode-ipv4.h:106
uint16_t src
#define FLOWLOCK_WRLOCK(fb)
Definition: flow.h:239
Flow * FlowGetFlowFromHash(ThreadVars *tv, DecodeThreadVars *dtv, const Packet *p, Flow **dest)
Get Flow for packet.
Definition: flow-hash.c:483
uint16_t ports[2]
Definition: flow-hash.c:85
TCPHdr * tcph
Definition: decode.h:520
#define FLOW_END_FLAG_STATE_BYPASSED
Definition: flow.h:215
#define FlowWakeupFlowManagerThread()
Definition: flow-manager.h:34
void * protoctx
Definition: flow.h:395
uint16_t vlan_id[2]
Definition: decode.h:433
const uint32_t u32[11]
Definition: flow-hash.c:103
FlowConfig flow_config
Definition: flow-private.h:97
struct Flow_ * hprev
Definition: flow.h:447
FlowAddress dst
Definition: flow.h:328
uint16_t counter_flow_udp
Definition: decode.h:683
#define CMP_FLOW_ICMP(f1, f2)
Definition: flow-hash.c:226
uint16_t counter_flow_tcp
Definition: decode.h:682
uint16_t vlan_id[2]
Definition: flow.h:345
SC_ATOMIC_EXTERN(unsigned int, flow_prune_idx)
uint16_t dst
IPV6Hdr * ip6h
Definition: decode.h:500
#define FLOW_END_FLAG_STATE_NEW
Definition: flow.h:208
uint8_t proto
Definition: decode.h:428
uint32_t src[4]
Definition: flow-hash.c:97
uint8_t recursion_level
Definition: decode.h:431
Data structures and function prototypes for keeping state for the detection engine.
#define FLOWLOCK_TRYWRLOCK(fb)
Definition: flow.h:241
int TcpSessionPacketSsnReuse(const Packet *p, const Flow *f, void *tcp_ssn)
uint16_t proto
Definition: flow-hash.c:99
Structure to hold thread specific data for all decode modules.
Definition: decode.h:632
TmEcode OutputFlowLog(ThreadVars *tv, void *thread_data, Flow *f)
Run flow logger(s)
Definition: output-flow.c:90
uint16_t recur
Definition: flow-hash.c:100
void * output_flow_thread_data
Definition: decode.h:691
#define FLOW_END_FLAG_STATE_CLOSED
Definition: flow.h:210
void StatsIncr(ThreadVars *tv, uint16_t id)
Increments the local counter.
Definition: counters.c:163
uint32_t dst[4]
Definition: flow-hash.c:97
#define ICMPV4_GET_EMB_IPV4(p)
uint16_t counter_flow_icmp4
Definition: decode.h:684
uint16_t counter_flow_memcap
Definition: decode.h:680
Flow * FlowAlloc(void)
allocate a flow
Definition: flow-util.c:51
struct FlowBucket_ * fb
Definition: flow.h:448
IPV4Hdr * ip4h
Definition: decode.h:498
uint16_t recur
Definition: flow-hash.c:87
uint8_t flow_end_flags
Definition: flow.h:401
#define SC_ATOMIC_SET(name, val)
Set the value for the atomic variable.
Definition: util-atomic.h:207
Flow * FlowDequeue(FlowQueue *q)
remove a flow from the queue
Definition: flow-queue.c:105
FlowThreadId thread_id
Definition: flow.h:421
FlowBucket * flow_hash
Definition: flow-private.h:96
const uint32_t u32[5]
Definition: flow-hash.c:90
Port dp
Definition: flow.h:337
#define FLOW_END_FLAG_EMERGENCY
Definition: flow.h:211
unsigned int FlowStorageSize(void)
Definition: flow-storage.c:34
#define FBLOCK_TRYLOCK(fb)
Definition: flow-hash.h:69
#define PKT_IS_ICMPV4(p)
Definition: decode.h:253
uint16_t ports[2]
Definition: flow-hash.c:98
void FlowUpdateState(Flow *f, enum FlowState s)
Definition: flow.c:1101
uint16_t vlan_id[2]
Definition: flow-hash.c:101
uint16_t emb_dport
uint16_t counter_flow_icmp6
Definition: decode.h:685
#define SC_ATOMIC_GET(name)
Get the value from the atomic variable.
Definition: util-atomic.h:192
#define IPV4_GET_RAW_IPDST_U32(ip4h)
Definition: decode-ipv4.h:108
#define ICMPV4_IS_ERROR_MSG(p)
uint32_t flow_hash
Definition: flow.h:348
uint8_t recursion_level
Definition: flow.h:344
FlowAddress src
Definition: flow.h:328
uint32_t hash_rand
Definition: flow.h:262
#define PKT_WANTS_FLOW
Definition: decode.h:1113
struct FlowHashKey4_ FlowHashKey4
int FlowClearMemory(Flow *f, uint8_t proto_map)
Function clear the flow memory before queueing it to spare flow queue.
Definition: flow.c:1027
uint32_t flow_hash
Definition: decode.h:447
Per thread variable structure.
Definition: threadvars.h:57
void FlowTimeoutsEmergency(void)
Definition: flow-manager.c:94
uint32_t flags
Definition: decode.h:441
void FlowInit(Flow *f, const Packet *p)
Definition: flow-util.c:147
uint8_t protomap
Definition: flow.h:399
Flow data structure.
Definition: flow.h:324
uint16_t vlan_id[2]
Definition: flow-hash.c:88
uint32_t flags
Definition: flow.h:374
ICMPV4Vars icmpv4vars
Definition: decode.h:513
#define FBLOCK_LOCK(fb)
Definition: flow-hash.h:68
Address src
Definition: decode.h:410
FlowQueue flow_spare_q
Definition: flow-private.h:91
uint16_t proto
Definition: flow-hash.c:86
uint32_t hash_size
Definition: flow.h:263
#define CMP_FLOW(f1, f2)
Definition: flow-hash.c:215