suricata
flow-manager.c
Go to the documentation of this file.
1 /* Copyright (C) 2007-2013 Open Information Security Foundation
2  *
3  * You can copy, redistribute or modify this Program under the terms of
4  * the GNU General Public License version 2 as published by the Free
5  * Software Foundation.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10  * GNU General Public License for more details.
11  *
12  * You should have received a copy of the GNU General Public License
13  * version 2 along with this program; if not, write to the Free Software
14  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
15  * 02110-1301, USA.
16  */
17 
18 /**
19  * \file
20  *
21  * \author Anoop Saldanha <anoopsaldanha@gmail.com>
22  * \author Victor Julien <victor@inliniac.net>
23  */
24 
25 #include "suricata-common.h"
26 #include "suricata.h"
27 #include "decode.h"
28 #include "conf.h"
29 #include "threadvars.h"
30 #include "tm-threads.h"
31 #include "runmodes.h"
32 
33 #include "util-random.h"
34 #include "util-time.h"
35 
36 #include "flow.h"
37 #include "flow-queue.h"
38 #include "flow-hash.h"
39 #include "flow-util.h"
40 #include "flow-var.h"
41 #include "flow-private.h"
42 #include "flow-timeout.h"
43 #include "flow-manager.h"
44 
45 #include "stream-tcp-private.h"
46 #include "stream-tcp-reassemble.h"
47 #include "stream-tcp.h"
48 
49 #include "util-unittest.h"
50 #include "util-unittest-helper.h"
51 #include "util-byte.h"
52 
53 #include "util-debug.h"
54 #include "util-privs.h"
55 #include "util-signal.h"
56 
57 #include "threads.h"
58 #include "detect.h"
59 #include "detect-engine-state.h"
60 #include "stream.h"
61 
62 #include "app-layer-parser.h"
63 
64 #include "host-timeout.h"
65 #include "defrag-timeout.h"
66 #include "ippair-timeout.h"
67 
68 #include "output-flow.h"
69 
70 /* Run mode selected at suricata.c */
71 extern int run_mode;
72 
73 /* multi flow mananger support */
74 static uint32_t flowmgr_number = 1;
75 /* atomic counter for flow managers, to assign instance id */
76 SC_ATOMIC_DECLARE(uint32_t, flowmgr_cnt);
77 
78 /* multi flow recycler support */
79 static uint32_t flowrec_number = 1;
80 /* atomic counter for flow recyclers, to assign instance id */
81 SC_ATOMIC_DECLARE(uint32_t, flowrec_cnt);
82 
83 SC_ATOMIC_EXTERN(unsigned int, flow_flags);
84 
85 
88 
89 void FlowTimeoutsInit(void)
90 {
91  SC_ATOMIC_SET(flow_timeouts, flow_timeouts_normal);
92 }
93 
95 {
96  SC_ATOMIC_SET(flow_timeouts, flow_timeouts_emerg);
97 }
98 
99 /* 1 seconds */
100 #define FLOW_NORMAL_MODE_UPDATE_DELAY_SEC 1
101 #define FLOW_NORMAL_MODE_UPDATE_DELAY_NSEC 0
102 /* 0.1 seconds */
103 #define FLOW_EMERG_MODE_UPDATE_DELAY_SEC 0
104 #define FLOW_EMERG_MODE_UPDATE_DELAY_NSEC 100000
105 #define NEW_FLOW_COUNT_COND 10
106 
107 typedef struct FlowTimeoutCounters_ {
108  uint32_t new;
109  uint32_t est;
110  uint32_t clo;
111  uint32_t byp;
112  uint32_t tcp_reuse;
113 
114  uint32_t flows_checked;
115  uint32_t flows_notimeout;
116  uint32_t flows_timeout;
118  uint32_t flows_removed;
119 
120  uint32_t rows_checked;
121  uint32_t rows_skipped;
122  uint32_t rows_empty;
123  uint32_t rows_busy;
124  uint32_t rows_maxlen;
126 
127 /**
128  * \brief Used to disable flow manager thread(s).
129  *
130  * \todo Kinda hackish since it uses the tv name to identify flow manager
131  * thread. We need an all weather identification scheme.
132  */
134 {
135 #ifdef AFLFUZZ_DISABLE_MGTTHREADS
136  return;
137 #endif
138  ThreadVars *tv = NULL;
139 
140  /* wake up threads */
141  uint32_t u;
142  for (u = 0; u < flowmgr_number; u++)
144 
146  /* flow manager thread(s) is/are a part of mgmt threads */
147  tv = tv_root[TVT_MGMT];
148  while (tv != NULL)
149  {
150  if (strncasecmp(tv->name, thread_name_flow_mgr,
151  strlen(thread_name_flow_mgr)) == 0)
152  {
154  }
155  tv = tv->next;
156  }
158 
159  struct timeval start_ts;
160  struct timeval cur_ts;
161  gettimeofday(&start_ts, NULL);
162 
163 again:
164  gettimeofday(&cur_ts, NULL);
165  if ((cur_ts.tv_sec - start_ts.tv_sec) > 60) {
166  FatalError(SC_ERR_SHUTDOWN, "unable to get all flow manager "
167  "threads to shutdown in time");
168  }
169 
171  tv = tv_root[TVT_MGMT];
172  while (tv != NULL)
173  {
174  if (strncasecmp(tv->name, thread_name_flow_mgr,
175  strlen(thread_name_flow_mgr)) == 0)
176  {
179  /* sleep outside lock */
180  SleepMsec(1);
181  goto again;
182  }
183  }
184  tv = tv->next;
185  }
187 
188  /* wake up threads, another try */
189  for (u = 0; u < flowmgr_number; u++)
191 
192  /* reset count, so we can kill and respawn (unix socket) */
193  SC_ATOMIC_SET(flowmgr_cnt, 0);
194  return;
195 }
196 
197 /** \internal
198  * \brief get timeout for flow
199  *
200  * \param f flow
201  * \param state flow state
202  *
203  * \retval timeout timeout in seconds
204  */
205 static inline uint32_t FlowGetFlowTimeout(const Flow *f, enum FlowState state)
206 {
207  uint32_t timeout;
208  FlowProtoTimeoutPtr flow_timeouts = SC_ATOMIC_GET(flow_timeouts);
209  switch(state) {
210  default:
211  case FLOW_STATE_NEW:
212  timeout = flow_timeouts[f->protomap].new_timeout;
213  break;
215  timeout = flow_timeouts[f->protomap].est_timeout;
216  break;
217  case FLOW_STATE_CLOSED:
218  timeout = flow_timeouts[f->protomap].closed_timeout;
219  break;
221  timeout = FLOW_BYPASSED_TIMEOUT;
222  break;
224  timeout = flow_timeouts[f->protomap].bypassed_timeout;
225  break;
226  }
227  return timeout;
228 }
229 
230 /** \internal
231  * \brief check if a flow is timed out
232  *
233  * \param f flow
234  * \param ts timestamp
235  *
236  * \retval 0 not timed out
237  * \retval 1 timed out
238  */
239 static int FlowManagerFlowTimeout(const Flow *f, enum FlowState state, struct timeval *ts, int32_t *next_ts)
240 {
241  /* set the timeout value according to the flow operating mode,
242  * flow's state and protocol.*/
243  uint32_t timeout = FlowGetFlowTimeout(f, state);
244 
245  int32_t flow_times_out_at = (int32_t)(f->lastts.tv_sec + timeout);
246  if (*next_ts == 0 || flow_times_out_at < *next_ts)
247  *next_ts = flow_times_out_at;
248 
249  /* do the timeout check */
250  if (flow_times_out_at >= ts->tv_sec) {
251  return 0;
252  }
253 
254  return 1;
255 }
256 
257 /** \internal
258  * \brief See if we can really discard this flow. Check use_cnt reference
259  * counter and force reassembly if necessary.
260  *
261  * \param f flow
262  * \param ts timestamp
263  *
264  * \retval 0 not timed out just yet
265  * \retval 1 fully timed out, lets kill it
266  */
267 static int FlowManagerFlowTimedOut(Flow *f, struct timeval *ts)
268 {
269  /* never prune a flow that is used by a packet we
270  * are currently processing in one of the threads */
271  if (SC_ATOMIC_GET(f->use_cnt) > 0) {
272  return 0;
273  }
274 
275  int server = 0, client = 0;
276 
277  if (!(f->flags & FLOW_TIMEOUT_REASSEMBLY_DONE) &&
278  FlowForceReassemblyNeedReassembly(f, &server, &client) == 1) {
279  FlowForceReassemblyForFlow(f, server, client);
280  return 0;
281  }
282 #ifdef DEBUG
283  /* this should not be possible */
284  BUG_ON(SC_ATOMIC_GET(f->use_cnt) > 0);
285 #endif
286 
287  return 1;
288 }
289 
290 /**
291  * \internal
292  *
293  * \brief check all flows in a hash row for timing out
294  *
295  * \param f last flow in the hash row
296  * \param ts timestamp
297  * \param emergency bool indicating emergency mode
298  * \param counters ptr to FlowTimeoutCounters structure
299  *
300  * \retval cnt timed out flows
301  */
302 static uint32_t FlowManagerHashRowTimeout(Flow *f, struct timeval *ts,
303  int emergency, FlowTimeoutCounters *counters, int32_t *next_ts)
304 {
305  uint32_t cnt = 0;
306  uint32_t checked = 0;
307 
308  do {
309  checked++;
310 
311  /* check flow timeout based on lastts and state. Both can be
312  * accessed w/o Flow lock as we do have the hash row lock (so flow
313  * can't disappear) and flow_state is atomic. lastts can only
314  * be modified when we have both the flow and hash row lock */
315 
316  enum FlowState state = SC_ATOMIC_GET(f->flow_state);
317 
318  /* timeout logic goes here */
319  if (FlowManagerFlowTimeout(f, state, ts, next_ts) == 0) {
320 
321  counters->flows_notimeout++;
322 
323  f = f->hprev;
324  continue;
325  }
326 
327  /* before grabbing the flow lock, make sure we have at least
328  * 3 packets in the pool */
330 
331  FLOWLOCK_WRLOCK(f);
332 
333  Flow *next_flow = f->hprev;
334 
335  counters->flows_timeout++;
336 
337  /* check if the flow is fully timed out and
338  * ready to be discarded. */
339  if (FlowManagerFlowTimedOut(f, ts) == 1) {
340  /* remove from the hash */
341  if (f->hprev != NULL)
342  f->hprev->hnext = f->hnext;
343  if (f->hnext != NULL)
344  f->hnext->hprev = f->hprev;
345  if (f->fb->head == f)
346  f->fb->head = f->hnext;
347  if (f->fb->tail == f)
348  f->fb->tail = f->hprev;
349 
350  f->hnext = NULL;
351  f->hprev = NULL;
352 
353  if (f->flags & FLOW_TCP_REUSED)
354  counters->tcp_reuse++;
355 
356  if (state == FLOW_STATE_NEW)
358  else if (state == FLOW_STATE_ESTABLISHED)
360  else if (state == FLOW_STATE_CLOSED)
362  else if (state == FLOW_STATE_LOCAL_BYPASSED)
364  else if (state == FLOW_STATE_CAPTURE_BYPASSED)
366 
367  if (emergency)
370 
371  /* no one is referring to this flow, use_cnt 0, removed from hash
372  * so we can unlock it and pass it to the flow recycler */
373  FLOWLOCK_UNLOCK(f);
375 
376  cnt++;
377 
378  switch (state) {
379  case FLOW_STATE_NEW:
380  default:
381  counters->new++;
382  break;
384  counters->est++;
385  break;
386  case FLOW_STATE_CLOSED:
387  counters->clo++;
388  break;
391  counters->byp++;
392  break;
393  }
394  counters->flows_removed++;
395  } else {
396  counters->flows_timeout_inuse++;
397  FLOWLOCK_UNLOCK(f);
398  }
399 
400  f = next_flow;
401  } while (f != NULL);
402 
403  counters->flows_checked += checked;
404  if (checked > counters->rows_maxlen)
405  counters->rows_maxlen = checked;
406 
407  return cnt;
408 }
409 
410 /**
411  * \brief time out flows from the hash
412  *
413  * \param ts timestamp
414  * \param try_cnt number of flows to time out max (0 is unlimited)
415  * \param hash_min min hash index to consider
416  * \param hash_max max hash index to consider
417  * \param counters ptr to FlowTimeoutCounters structure
418  *
419  * \retval cnt number of timed out flow
420  */
421 static uint32_t FlowTimeoutHash(struct timeval *ts, uint32_t try_cnt,
422  uint32_t hash_min, uint32_t hash_max,
423  FlowTimeoutCounters *counters)
424 {
425  uint32_t idx = 0;
426  uint32_t cnt = 0;
427  int emergency = 0;
428 
429  if (SC_ATOMIC_GET(flow_flags) & FLOW_EMERGENCY)
430  emergency = 1;
431 
432  for (idx = hash_min; idx < hash_max; idx++) {
433  FlowBucket *fb = &flow_hash[idx];
434 
435  counters->rows_checked++;
436 
437  int32_t check_ts = SC_ATOMIC_GET(fb->next_ts);
438  if (check_ts > (int32_t)ts->tv_sec) {
439  counters->rows_skipped++;
440  continue;
441  }
442 
443  /* before grabbing the row lock, make sure we have at least
444  * 9 packets in the pool */
446 
447  if (FBLOCK_TRYLOCK(fb) != 0) {
448  counters->rows_busy++;
449  continue;
450  }
451 
452  /* flow hash bucket is now locked */
453 
454  if (fb->tail == NULL) {
455  SC_ATOMIC_SET(fb->next_ts, INT_MAX);
456  counters->rows_empty++;
457  goto next;
458  }
459 
460  int32_t next_ts = 0;
461 
462  /* we have a flow, or more than one */
463  cnt += FlowManagerHashRowTimeout(fb->tail, ts, emergency, counters, &next_ts);
464 
465  SC_ATOMIC_SET(fb->next_ts, next_ts);
466 
467 next:
468  FBLOCK_UNLOCK(fb);
469 
470  if (try_cnt > 0 && cnt >= try_cnt)
471  break;
472  }
473 
474  return cnt;
475 }
476 
477 /**
478  * \internal
479  *
480  * \brief move all flows out of a hash row
481  *
482  * \param f last flow in the hash row
483  *
484  * \retval cnt removed out flows
485  */
486 static uint32_t FlowManagerHashRowCleanup(Flow *f)
487 {
488  uint32_t cnt = 0;
489 
490  do {
491  FLOWLOCK_WRLOCK(f);
492 
493  Flow *next_flow = f->hprev;
494 
495  int state = SC_ATOMIC_GET(f->flow_state);
496 
497  /* remove from the hash */
498  if (f->hprev != NULL)
499  f->hprev->hnext = f->hnext;
500  if (f->hnext != NULL)
501  f->hnext->hprev = f->hprev;
502  if (f->fb->head == f)
503  f->fb->head = f->hnext;
504  if (f->fb->tail == f)
505  f->fb->tail = f->hprev;
506 
507  f->hnext = NULL;
508  f->hprev = NULL;
509 
510  if (state == FLOW_STATE_NEW)
512  else if (state == FLOW_STATE_ESTABLISHED)
514  else if (state == FLOW_STATE_CLOSED)
516 
518 
519  /* no one is referring to this flow, use_cnt 0, removed from hash
520  * so we can unlock it and move it to the recycle queue. */
521  FLOWLOCK_UNLOCK(f);
522 
524 
525  cnt++;
526 
527  f = next_flow;
528  } while (f != NULL);
529 
530  return cnt;
531 }
532 
533 /**
534  * \brief remove all flows from the hash
535  *
536  * \retval cnt number of removes out flows
537  */
538 static uint32_t FlowCleanupHash(void){
539  uint32_t idx = 0;
540  uint32_t cnt = 0;
541 
542  for (idx = 0; idx < flow_config.hash_size; idx++) {
543  FlowBucket *fb = &flow_hash[idx];
544 
545  FBLOCK_LOCK(fb);
546 
547  if (fb->tail != NULL) {
548  /* we have a flow, or more than one */
549  cnt += FlowManagerHashRowCleanup(fb->tail);
550  }
551 
552  FBLOCK_UNLOCK(fb);
553  }
554 
555  return cnt;
556 }
557 
558 extern int g_detect_disabled;
559 
560 typedef struct FlowManagerThreadData_ {
561  uint32_t instance;
562  uint32_t min;
563  uint32_t max;
564 
569  uint16_t flow_mgr_spare;
572  uint16_t flow_tcp_reuse;
573 
579 
585 
587 
588 static TmEcode FlowManagerThreadInit(ThreadVars *t, const void *initdata, void **data)
589 {
591  if (ftd == NULL)
592  return TM_ECODE_FAILED;
593 
594  ftd->instance = SC_ATOMIC_ADD(flowmgr_cnt, 1);
595  SCLogDebug("flow manager instance %u", ftd->instance);
596 
597  /* set the min and max value used for hash row walking
598  * each thread has it's own section of the flow hash */
599  uint32_t range = flow_config.hash_size / flowmgr_number;
600  if (ftd->instance == 1)
601  ftd->max = range;
602  else if (ftd->instance == flowmgr_number) {
603  ftd->min = (range * (ftd->instance - 1));
604  ftd->max = flow_config.hash_size;
605  } else {
606  ftd->min = (range * (ftd->instance - 1));
607  ftd->max = (range * ftd->instance);
608  }
610 
611  SCLogDebug("instance %u hash range %u %u", ftd->instance, ftd->min, ftd->max);
612 
613  /* pass thread data back to caller */
614  *data = ftd;
615 
616  ftd->flow_mgr_cnt_clo = StatsRegisterCounter("flow_mgr.closed_pruned", t);
617  ftd->flow_mgr_cnt_new = StatsRegisterCounter("flow_mgr.new_pruned", t);
618  ftd->flow_mgr_cnt_est = StatsRegisterCounter("flow_mgr.est_pruned", t);
619  ftd->flow_mgr_cnt_byp = StatsRegisterCounter("flow_mgr.bypassed_pruned", t);
620  ftd->flow_mgr_spare = StatsRegisterCounter("flow.spare", t);
621  ftd->flow_emerg_mode_enter = StatsRegisterCounter("flow.emerg_mode_entered", t);
622  ftd->flow_emerg_mode_over = StatsRegisterCounter("flow.emerg_mode_over", t);
623  ftd->flow_tcp_reuse = StatsRegisterCounter("flow.tcp_reuse", t);
624 
625  ftd->flow_mgr_flows_checked = StatsRegisterCounter("flow_mgr.flows_checked", t);
626  ftd->flow_mgr_flows_notimeout = StatsRegisterCounter("flow_mgr.flows_notimeout", t);
627  ftd->flow_mgr_flows_timeout = StatsRegisterCounter("flow_mgr.flows_timeout", t);
628  ftd->flow_mgr_flows_timeout_inuse = StatsRegisterCounter("flow_mgr.flows_timeout_inuse", t);
629  ftd->flow_mgr_flows_removed = StatsRegisterCounter("flow_mgr.flows_removed", t);
630 
631  ftd->flow_mgr_rows_checked = StatsRegisterCounter("flow_mgr.rows_checked", t);
632  ftd->flow_mgr_rows_skipped = StatsRegisterCounter("flow_mgr.rows_skipped", t);
633  ftd->flow_mgr_rows_empty = StatsRegisterCounter("flow_mgr.rows_empty", t);
634  ftd->flow_mgr_rows_busy = StatsRegisterCounter("flow_mgr.rows_busy", t);
635  ftd->flow_mgr_rows_maxlen = StatsRegisterCounter("flow_mgr.rows_maxlen", t);
636 
637  PacketPoolInit();
638  return TM_ECODE_OK;
639 }
640 
641 static TmEcode FlowManagerThreadDeinit(ThreadVars *t, void *data)
642 {
644  SCFree(data);
645  return TM_ECODE_OK;
646 }
647 
648 
649 /** \brief Thread that manages the flow table and times out flows.
650  *
651  * \param td ThreadVars casted to void ptr
652  *
653  * Keeps an eye on the spare list, alloc flows if needed...
654  */
655 static TmEcode FlowManager(ThreadVars *th_v, void *thread_data)
656 {
657  FlowManagerThreadData *ftd = thread_data;
658  struct timeval ts;
659  uint32_t established_cnt = 0, new_cnt = 0, closing_cnt = 0;
660  int emerg = FALSE;
661  int prev_emerg = FALSE;
662  struct timespec cond_time;
663  int flow_update_delay_sec = FLOW_NORMAL_MODE_UPDATE_DELAY_SEC;
664  int flow_update_delay_nsec = FLOW_NORMAL_MODE_UPDATE_DELAY_NSEC;
665 /* VJ leaving disabled for now, as hosts are only used by tags and the numbers
666  * are really low. Might confuse ppl
667  uint16_t flow_mgr_host_prune = StatsRegisterCounter("hosts.pruned", th_v);
668  uint16_t flow_mgr_host_active = StatsRegisterCounter("hosts.active", th_v);
669  uint16_t flow_mgr_host_spare = StatsRegisterCounter("hosts.spare", th_v);
670 */
671  memset(&ts, 0, sizeof(ts));
672 
673  while (1)
674  {
675  if (TmThreadsCheckFlag(th_v, THV_PAUSE)) {
679  }
680 
681  if (SC_ATOMIC_GET(flow_flags) & FLOW_EMERGENCY) {
682  emerg = TRUE;
683 
684  if (emerg == TRUE && prev_emerg == FALSE) {
685  prev_emerg = TRUE;
686 
687  SCLogDebug("Flow emergency mode entered...");
688 
689  StatsIncr(th_v, ftd->flow_emerg_mode_enter);
690  }
691  }
692 
693  /* Get the time */
694  memset(&ts, 0, sizeof(ts));
695  TimeGet(&ts);
696  SCLogDebug("ts %" PRIdMAX "", (intmax_t)ts.tv_sec);
697 
698  /* see if we still have enough spare flows */
699  if (ftd->instance == 1)
701 
702  /* try to time out flows */
703  FlowTimeoutCounters counters = { 0, 0, 0, 0, 0,0,0,0,0,0,0,0,0,0,0};
704  FlowTimeoutHash(&ts, 0 /* check all */, ftd->min, ftd->max, &counters);
705 
706 
707  if (ftd->instance == 1) {
708  DefragTimeoutHash(&ts);
709  //uint32_t hosts_pruned =
710  HostTimeoutHash(&ts);
711  IPPairTimeoutHash(&ts);
712  }
713 /*
714  StatsAddUI64(th_v, flow_mgr_host_prune, (uint64_t)hosts_pruned);
715  uint32_t hosts_active = HostGetActiveCount();
716  StatsSetUI64(th_v, flow_mgr_host_active, (uint64_t)hosts_active);
717  uint32_t hosts_spare = HostGetSpareCount();
718  StatsSetUI64(th_v, flow_mgr_host_spare, (uint64_t)hosts_spare);
719 */
720  StatsAddUI64(th_v, ftd->flow_mgr_cnt_clo, (uint64_t)counters.clo);
721  StatsAddUI64(th_v, ftd->flow_mgr_cnt_new, (uint64_t)counters.new);
722  StatsAddUI64(th_v, ftd->flow_mgr_cnt_est, (uint64_t)counters.est);
723  StatsAddUI64(th_v, ftd->flow_mgr_cnt_byp, (uint64_t)counters.byp);
724  StatsAddUI64(th_v, ftd->flow_tcp_reuse, (uint64_t)counters.tcp_reuse);
725 
726  StatsSetUI64(th_v, ftd->flow_mgr_flows_checked, (uint64_t)counters.flows_checked);
727  StatsSetUI64(th_v, ftd->flow_mgr_flows_notimeout, (uint64_t)counters.flows_notimeout);
728  StatsSetUI64(th_v, ftd->flow_mgr_flows_timeout, (uint64_t)counters.flows_timeout);
729  StatsSetUI64(th_v, ftd->flow_mgr_flows_removed, (uint64_t)counters.flows_removed);
730  StatsSetUI64(th_v, ftd->flow_mgr_flows_timeout_inuse, (uint64_t)counters.flows_timeout_inuse);
731 
732  StatsSetUI64(th_v, ftd->flow_mgr_rows_checked, (uint64_t)counters.rows_checked);
733  StatsSetUI64(th_v, ftd->flow_mgr_rows_skipped, (uint64_t)counters.rows_skipped);
734  StatsSetUI64(th_v, ftd->flow_mgr_rows_maxlen, (uint64_t)counters.rows_maxlen);
735  StatsSetUI64(th_v, ftd->flow_mgr_rows_busy, (uint64_t)counters.rows_busy);
736  StatsSetUI64(th_v, ftd->flow_mgr_rows_empty, (uint64_t)counters.rows_empty);
737 
738  uint32_t len = 0;
740  len = flow_spare_q.len;
742  StatsSetUI64(th_v, ftd->flow_mgr_spare, (uint64_t)len);
743 
744  /* Don't fear, FlowManagerThread is here...
745  * clear emergency bit if we have at least xx flows pruned. */
746  if (emerg == TRUE) {
747  SCLogDebug("flow_sparse_q.len = %"PRIu32" prealloc: %"PRIu32
748  "flow_spare_q status: %"PRIu32"%% flows at the queue",
749  len, flow_config.prealloc, len * 100 / flow_config.prealloc);
750  /* only if we have pruned this "emergency_recovery" percentage
751  * of flows, we will unset the emergency bit */
753  SC_ATOMIC_AND(flow_flags, ~FLOW_EMERGENCY);
754 
756 
757  emerg = FALSE;
758  prev_emerg = FALSE;
759 
760  flow_update_delay_sec = FLOW_NORMAL_MODE_UPDATE_DELAY_SEC;
761  flow_update_delay_nsec = FLOW_NORMAL_MODE_UPDATE_DELAY_NSEC;
762  SCLogInfo("Flow emergency mode over, back to normal... unsetting"
763  " FLOW_EMERGENCY bit (ts.tv_sec: %"PRIuMAX", "
764  "ts.tv_usec:%"PRIuMAX") flow_spare_q status(): %"PRIu32
765  "%% flows at the queue", (uintmax_t)ts.tv_sec,
766  (uintmax_t)ts.tv_usec, len * 100 / flow_config.prealloc);
767 
768  StatsIncr(th_v, ftd->flow_emerg_mode_over);
769  } else {
770  flow_update_delay_sec = FLOW_EMERG_MODE_UPDATE_DELAY_SEC;
771  flow_update_delay_nsec = FLOW_EMERG_MODE_UPDATE_DELAY_NSEC;
772  }
773  }
774 
775  if (TmThreadsCheckFlag(th_v, THV_KILL)) {
776  StatsSyncCounters(th_v);
777  break;
778  }
779 
780  cond_time.tv_sec = time(NULL) + flow_update_delay_sec;
781  cond_time.tv_nsec = flow_update_delay_nsec;
784  &cond_time);
786 
787  SCLogDebug("woke up... %s", SC_ATOMIC_GET(flow_flags) & FLOW_EMERGENCY ? "emergency":"");
788 
790  }
791 
792  SCLogPerf("%" PRIu32 " new flows, %" PRIu32 " established flows were "
793  "timed out, %"PRIu32" flows in closed state", new_cnt,
794  established_cnt, closing_cnt);
795 
796  return TM_ECODE_OK;
797 }
798 
799 /** \brief spawn the flow manager thread */
801 {
802 #ifdef AFLFUZZ_DISABLE_MGTTHREADS
803  return;
804 #endif
805  intmax_t setting = 1;
806  (void)ConfGetInt("flow.managers", &setting);
807 
808  if (setting < 1 || setting > 1024) {
810  "invalid flow.managers setting %"PRIdMAX, setting);
811  exit(EXIT_FAILURE);
812  }
813  flowmgr_number = (uint32_t)setting;
814 
815  SCLogConfig("using %u flow manager threads", flowmgr_number);
818 
820 
821  uint32_t u;
822  for (u = 0; u < flowmgr_number; u++)
823  {
824  ThreadVars *tv_flowmgr = NULL;
825 
826  char name[TM_THREAD_NAME_MAX];
827  snprintf(name, sizeof(name), "%s#%02u", thread_name_flow_mgr, u+1);
828 
829  tv_flowmgr = TmThreadCreateMgmtThreadByName(name,
830  "FlowManager", 0);
831  BUG_ON(tv_flowmgr == NULL);
832 
833  if (tv_flowmgr == NULL) {
834  printf("ERROR: TmThreadsCreate failed\n");
835  exit(1);
836  }
837  if (TmThreadSpawn(tv_flowmgr) != TM_ECODE_OK) {
838  printf("ERROR: TmThreadSpawn failed\n");
839  exit(1);
840  }
841  }
842  return;
843 }
844 
845 typedef struct FlowRecyclerThreadData_ {
848 
849 static TmEcode FlowRecyclerThreadInit(ThreadVars *t, const void *initdata, void **data)
850 {
852  if (ftd == NULL)
853  return TM_ECODE_FAILED;
854 
855  if (OutputFlowLogThreadInit(t, NULL, &ftd->output_thread_data) != TM_ECODE_OK) {
856  SCLogError(SC_ERR_THREAD_INIT, "initializing flow log API for thread failed");
857  SCFree(ftd);
858  return TM_ECODE_FAILED;
859  }
860  SCLogDebug("output_thread_data %p", ftd->output_thread_data);
861 
862  *data = ftd;
863  return TM_ECODE_OK;
864 }
865 
866 static TmEcode FlowRecyclerThreadDeinit(ThreadVars *t, void *data)
867 {
869  if (ftd->output_thread_data != NULL)
871 
872  SCFree(data);
873  return TM_ECODE_OK;
874 }
875 
876 /** \brief Thread that manages timed out flows.
877  *
878  * \param td ThreadVars casted to void ptr
879  */
880 static TmEcode FlowRecycler(ThreadVars *th_v, void *thread_data)
881 {
882  struct timeval ts;
883  struct timespec cond_time;
884  int flow_update_delay_sec = FLOW_NORMAL_MODE_UPDATE_DELAY_SEC;
885  int flow_update_delay_nsec = FLOW_NORMAL_MODE_UPDATE_DELAY_NSEC;
886  uint64_t recycled_cnt = 0;
887  FlowRecyclerThreadData *ftd = (FlowRecyclerThreadData *)thread_data;
888  BUG_ON(ftd == NULL);
889 
890  memset(&ts, 0, sizeof(ts));
891 
892  while (1)
893  {
894  if (TmThreadsCheckFlag(th_v, THV_PAUSE)) {
898  }
899 
900  /* Get the time */
901  memset(&ts, 0, sizeof(ts));
902  TimeGet(&ts);
903  SCLogDebug("ts %" PRIdMAX "", (intmax_t)ts.tv_sec);
904 
905  uint32_t len = 0;
907  len = flow_recycle_q.len;
909 
910  /* Loop through the queue and clean up all flows in it */
911  if (len) {
912  Flow *f;
913 
914  while ((f = FlowDequeue(&flow_recycle_q)) != NULL) {
915  FLOWLOCK_WRLOCK(f);
916 
917  (void)OutputFlowLog(th_v, ftd->output_thread_data, f);
918 
919  FlowClearMemory (f, f->protomap);
920  FLOWLOCK_UNLOCK(f);
921  FlowMoveToSpare(f);
922  recycled_cnt++;
923  }
924  }
925 
926  SCLogDebug("%u flows to recycle", len);
927 
928  if (TmThreadsCheckFlag(th_v, THV_KILL)) {
929  StatsSyncCounters(th_v);
930  break;
931  }
932 
933  cond_time.tv_sec = time(NULL) + flow_update_delay_sec;
934  cond_time.tv_nsec = flow_update_delay_nsec;
937  &flow_recycler_ctrl_mutex, &cond_time);
939 
940  SCLogDebug("woke up...");
941 
943  }
944 
945  SCLogPerf("%"PRIu64" flows processed", recycled_cnt);
946 
947  return TM_ECODE_OK;
948 }
949 
950 static int FlowRecyclerReadyToShutdown(void)
951 {
952  uint32_t len = 0;
954  len = flow_recycle_q.len;
956 
957  return ((len == 0));
958 }
959 
960 /** \brief spawn the flow recycler thread */
962 {
963 #ifdef AFLFUZZ_DISABLE_MGTTHREADS
964  return;
965 #endif
966  intmax_t setting = 1;
967  (void)ConfGetInt("flow.recyclers", &setting);
968 
969  if (setting < 1 || setting > 1024) {
971  "invalid flow.recyclers setting %"PRIdMAX, setting);
972  exit(EXIT_FAILURE);
973  }
974  flowrec_number = (uint32_t)setting;
975 
976  SCLogConfig("using %u flow recycler threads", flowrec_number);
977 
980 
981 
982  uint32_t u;
983  for (u = 0; u < flowrec_number; u++)
984  {
985  ThreadVars *tv_flowmgr = NULL;
986 
987  char name[TM_THREAD_NAME_MAX];
988  snprintf(name, sizeof(name), "%s#%02u", thread_name_flow_rec, u+1);
989 
990  tv_flowmgr = TmThreadCreateMgmtThreadByName(name,
991  "FlowRecycler", 0);
992  BUG_ON(tv_flowmgr == NULL);
993 
994  if (tv_flowmgr == NULL) {
995  printf("ERROR: TmThreadsCreate failed\n");
996  exit(1);
997  }
998  if (TmThreadSpawn(tv_flowmgr) != TM_ECODE_OK) {
999  printf("ERROR: TmThreadSpawn failed\n");
1000  exit(1);
1001  }
1002  }
1003  return;
1004 }
1005 
1006 /**
1007  * \brief Used to disable flow recycler thread(s).
1008  *
1009  * \note this should only be called when the flow manager is already gone
1010  *
1011  * \todo Kinda hackish since it uses the tv name to identify flow recycler
1012  * thread. We need an all weather identification scheme.
1013  */
1015 {
1016 #ifdef AFLFUZZ_DISABLE_MGTTHREADS
1017  return;
1018 #endif
1019  ThreadVars *tv = NULL;
1020  int cnt = 0;
1021 
1022  /* move all flows still in the hash to the recycler queue */
1023  FlowCleanupHash();
1024 
1025  /* make sure all flows are processed */
1026  do {
1028  usleep(10);
1029  } while (FlowRecyclerReadyToShutdown() == 0);
1030 
1031  /* wake up threads */
1032  uint32_t u;
1033  for (u = 0; u < flowrec_number; u++)
1035 
1037  /* flow recycler thread(s) is/are a part of mgmt threads */
1038  tv = tv_root[TVT_MGMT];
1039  while (tv != NULL)
1040  {
1041  if (strncasecmp(tv->name, thread_name_flow_rec,
1042  strlen(thread_name_flow_rec)) == 0)
1043  {
1045  cnt++;
1046  }
1047  tv = tv->next;
1048  }
1050 
1051  struct timeval start_ts;
1052  struct timeval cur_ts;
1053  gettimeofday(&start_ts, NULL);
1054 
1055 again:
1056  gettimeofday(&cur_ts, NULL);
1057  if ((cur_ts.tv_sec - start_ts.tv_sec) > 60) {
1058  FatalError(SC_ERR_SHUTDOWN, "unable to get all flow recycler "
1059  "threads to shutdown in time");
1060  }
1061 
1063  tv = tv_root[TVT_MGMT];
1064  while (tv != NULL)
1065  {
1066  if (strncasecmp(tv->name, thread_name_flow_rec,
1067  strlen(thread_name_flow_rec)) == 0)
1068  {
1071  /* sleep outside lock */
1072  SleepMsec(1);
1073  goto again;
1074  }
1075  }
1076  tv = tv->next;
1077  }
1079 
1080  /* wake up threads, another try */
1081  for (u = 0; u < flowrec_number; u++)
1083 
1084  /* reset count, so we can kill and respawn (unix socket) */
1085  SC_ATOMIC_SET(flowrec_cnt, 0);
1086  return;
1087 }
1088 
1090 {
1091  tmm_modules[TMM_FLOWMANAGER].name = "FlowManager";
1092  tmm_modules[TMM_FLOWMANAGER].ThreadInit = FlowManagerThreadInit;
1093  tmm_modules[TMM_FLOWMANAGER].ThreadDeinit = FlowManagerThreadDeinit;
1094 // tmm_modules[TMM_FLOWMANAGER].RegisterTests = FlowManagerRegisterTests;
1095  tmm_modules[TMM_FLOWMANAGER].Management = FlowManager;
1098  SCLogDebug("%s registered", tmm_modules[TMM_FLOWMANAGER].name);
1099 
1100  SC_ATOMIC_INIT(flowmgr_cnt);
1101  SC_ATOMIC_INIT(flow_timeouts);
1102 }
1103 
1105 {
1106  tmm_modules[TMM_FLOWRECYCLER].name = "FlowRecycler";
1107  tmm_modules[TMM_FLOWRECYCLER].ThreadInit = FlowRecyclerThreadInit;
1108  tmm_modules[TMM_FLOWRECYCLER].ThreadDeinit = FlowRecyclerThreadDeinit;
1109 // tmm_modules[TMM_FLOWRECYCLER].RegisterTests = FlowRecyclerRegisterTests;
1110  tmm_modules[TMM_FLOWRECYCLER].Management = FlowRecycler;
1113  SCLogDebug("%s registered", tmm_modules[TMM_FLOWRECYCLER].name);
1114 
1115  SC_ATOMIC_INIT(flowrec_cnt);
1116 }
1117 
1118 #ifdef UNITTESTS
1119 
1120 /**
1121  * \test Test the timing out of a flow with a fresh TcpSession
1122  * (just initialized, no data segments) in normal mode.
1123  *
1124  * \retval On success it returns 1 and on failure 0.
1125  */
1126 
1127 static int FlowMgrTest01 (void)
1128 {
1129  TcpSession ssn;
1130  Flow f;
1131  FlowBucket fb;
1132  struct timeval ts;
1133 
1135 
1136  memset(&ssn, 0, sizeof(TcpSession));
1137  memset(&f, 0, sizeof(Flow));
1138  memset(&ts, 0, sizeof(ts));
1139  memset(&fb, 0, sizeof(FlowBucket));
1140 
1141  FBLOCK_INIT(&fb);
1142 
1143  FLOW_INITIALIZE(&f);
1145 
1146  TimeGet(&ts);
1147  f.lastts.tv_sec = ts.tv_sec - 5000;
1148  f.protoctx = &ssn;
1149  f.fb = &fb;
1150 
1151  f.proto = IPPROTO_TCP;
1152 
1153  int32_t next_ts = 0;
1154  int state = SC_ATOMIC_GET(f.flow_state);
1155  if (FlowManagerFlowTimeout(&f, state, &ts, &next_ts) != 1 && FlowManagerFlowTimedOut(&f, &ts) != 1) {
1156  FBLOCK_DESTROY(&fb);
1157  FLOW_DESTROY(&f);
1159  return 0;
1160  }
1161 
1162  FBLOCK_DESTROY(&fb);
1163  FLOW_DESTROY(&f);
1164 
1166  return 1;
1167 }
1168 
1169 /**
1170  * \test Test the timing out of a flow with a TcpSession
1171  * (with data segments) in normal mode.
1172  *
1173  * \retval On success it returns 1 and on failure 0.
1174  */
1175 
1176 static int FlowMgrTest02 (void)
1177 {
1178  TcpSession ssn;
1179  Flow f;
1180  FlowBucket fb;
1181  struct timeval ts;
1182  TcpSegment seg;
1183  TcpStream client;
1184 
1186 
1187  memset(&ssn, 0, sizeof(TcpSession));
1188  memset(&f, 0, sizeof(Flow));
1189  memset(&fb, 0, sizeof(FlowBucket));
1190  memset(&ts, 0, sizeof(ts));
1191  memset(&seg, 0, sizeof(TcpSegment));
1192  memset(&client, 0, sizeof(TcpStream));
1193 
1194  FBLOCK_INIT(&fb);
1195  FLOW_INITIALIZE(&f);
1197 
1198  TimeGet(&ts);
1199  TCP_SEG_LEN(&seg) = 3;
1200  TCPSEG_RB_INSERT(&client.seg_tree, &seg);
1201  ssn.client = client;
1202  ssn.server = client;
1203  ssn.state = TCP_ESTABLISHED;
1204  f.lastts.tv_sec = ts.tv_sec - 5000;
1205  f.protoctx = &ssn;
1206  f.fb = &fb;
1207  f.proto = IPPROTO_TCP;
1208 
1209  int32_t next_ts = 0;
1210  int state = SC_ATOMIC_GET(f.flow_state);
1211  if (FlowManagerFlowTimeout(&f, state, &ts, &next_ts) != 1 && FlowManagerFlowTimedOut(&f, &ts) != 1) {
1212  FBLOCK_DESTROY(&fb);
1213  FLOW_DESTROY(&f);
1215  return 0;
1216  }
1217  FBLOCK_DESTROY(&fb);
1218  FLOW_DESTROY(&f);
1220  return 1;
1221 
1222 }
1223 
1224 /**
1225  * \test Test the timing out of a flow with a fresh TcpSession
1226  * (just initialized, no data segments) in emergency mode.
1227  *
1228  * \retval On success it returns 1 and on failure 0.
1229  */
1230 
1231 static int FlowMgrTest03 (void)
1232 {
1233  TcpSession ssn;
1234  Flow f;
1235  FlowBucket fb;
1236  struct timeval ts;
1237 
1239 
1240  memset(&ssn, 0, sizeof(TcpSession));
1241  memset(&f, 0, sizeof(Flow));
1242  memset(&ts, 0, sizeof(ts));
1243  memset(&fb, 0, sizeof(FlowBucket));
1244 
1245  FBLOCK_INIT(&fb);
1246  FLOW_INITIALIZE(&f);
1248 
1249  TimeGet(&ts);
1250  ssn.state = TCP_SYN_SENT;
1251  f.lastts.tv_sec = ts.tv_sec - 300;
1252  f.protoctx = &ssn;
1253  f.fb = &fb;
1254  f.proto = IPPROTO_TCP;
1255  f.flags |= FLOW_EMERGENCY;
1256 
1257  int next_ts = 0;
1258  int state = SC_ATOMIC_GET(f.flow_state);
1259  if (FlowManagerFlowTimeout(&f, state, &ts, &next_ts) != 1 && FlowManagerFlowTimedOut(&f, &ts) != 1) {
1260  FBLOCK_DESTROY(&fb);
1261  FLOW_DESTROY(&f);
1263  return 0;
1264  }
1265 
1266  FBLOCK_DESTROY(&fb);
1267  FLOW_DESTROY(&f);
1269  return 1;
1270 }
1271 
1272 /**
1273  * \test Test the timing out of a flow with a TcpSession
1274  * (with data segments) in emergency mode.
1275  *
1276  * \retval On success it returns 1 and on failure 0.
1277  */
1278 
1279 static int FlowMgrTest04 (void)
1280 {
1281 
1282  TcpSession ssn;
1283  Flow f;
1284  FlowBucket fb;
1285  struct timeval ts;
1286  TcpSegment seg;
1287  TcpStream client;
1288 
1290 
1291  memset(&ssn, 0, sizeof(TcpSession));
1292  memset(&f, 0, sizeof(Flow));
1293  memset(&fb, 0, sizeof(FlowBucket));
1294  memset(&ts, 0, sizeof(ts));
1295  memset(&seg, 0, sizeof(TcpSegment));
1296  memset(&client, 0, sizeof(TcpStream));
1297 
1298  FBLOCK_INIT(&fb);
1299  FLOW_INITIALIZE(&f);
1301 
1302  TimeGet(&ts);
1303  TCP_SEG_LEN(&seg) = 3;
1304  TCPSEG_RB_INSERT(&client.seg_tree, &seg);
1305  ssn.client = client;
1306  ssn.server = client;
1307  ssn.state = TCP_ESTABLISHED;
1308  f.lastts.tv_sec = ts.tv_sec - 5000;
1309  f.protoctx = &ssn;
1310  f.fb = &fb;
1311  f.proto = IPPROTO_TCP;
1312  f.flags |= FLOW_EMERGENCY;
1313 
1314  int next_ts = 0;
1315  int state = SC_ATOMIC_GET(f.flow_state);
1316  if (FlowManagerFlowTimeout(&f, state, &ts, &next_ts) != 1 && FlowManagerFlowTimedOut(&f, &ts) != 1) {
1317  FBLOCK_DESTROY(&fb);
1318  FLOW_DESTROY(&f);
1320  return 0;
1321  }
1322 
1323  FBLOCK_DESTROY(&fb);
1324  FLOW_DESTROY(&f);
1326  return 1;
1327 }
1328 
1329 /**
1330  * \test Test flow allocations when it reach memcap
1331  *
1332  *
1333  * \retval On success it returns 1 and on failure 0.
1334  */
1335 
1336 static int FlowMgrTest05 (void)
1337 {
1338  int result = 0;
1339 
1341  FlowConfig backup;
1342  memcpy(&backup, &flow_config, sizeof(FlowConfig));
1343 
1344  uint32_t ini = 0;
1345  uint32_t end = flow_spare_q.len;
1346  SC_ATOMIC_SET(flow_config.memcap, 10000);
1347  flow_config.prealloc = 100;
1348 
1349  /* Let's get the flow_spare_q empty */
1350  UTHBuildPacketOfFlows(ini, end, 0);
1351 
1352  /* And now let's try to reach the memcap val */
1353  while (FLOW_CHECK_MEMCAP(sizeof(Flow))) {
1354  ini = end + 1;
1355  end = end + 2;
1356  UTHBuildPacketOfFlows(ini, end, 0);
1357  }
1358 
1359  /* should time out normal */
1360  TimeSetIncrementTime(2000);
1361  ini = end + 1;
1362  end = end + 2;;
1363  UTHBuildPacketOfFlows(ini, end, 0);
1364 
1365  struct timeval ts;
1366  TimeGet(&ts);
1367  /* try to time out flows */
1368  FlowTimeoutCounters counters = { 0, 0, 0, 0, 0,0,0,0,0,0,0,0,0,0,0};
1369  FlowTimeoutHash(&ts, 0 /* check all */, 0, flow_config.hash_size, &counters);
1370 
1371  if (flow_recycle_q.len > 0) {
1372  result = 1;
1373  }
1374 
1375  memcpy(&flow_config, &backup, sizeof(FlowConfig));
1376  FlowShutdown();
1377  return result;
1378 }
1379 #endif /* UNITTESTS */
1380 
1381 /**
1382  * \brief Function to register the Flow Unitests.
1383  */
1385 {
1386 #ifdef UNITTESTS
1387  UtRegisterTest("FlowMgrTest01 -- Timeout a flow having fresh TcpSession",
1388  FlowMgrTest01);
1389  UtRegisterTest("FlowMgrTest02 -- Timeout a flow having TcpSession with segments",
1390  FlowMgrTest02);
1391  UtRegisterTest("FlowMgrTest03 -- Timeout a flow in emergency having fresh TcpSession",
1392  FlowMgrTest03);
1393  UtRegisterTest("FlowMgrTest04 -- Timeout a flow in emergency having TcpSession with segments",
1394  FlowMgrTest04);
1395  UtRegisterTest("FlowMgrTest05 -- Test flow Allocations when it reach memcap",
1396  FlowMgrTest05);
1397 #endif /* UNITTESTS */
1398 }
FlowQueue flow_recycle_q
Definition: flow-private.h:94
#define FLOW_TCP_REUSED
Definition: flow.h:50
uint16_t flow_mgr_flows_timeout
Definition: flow-manager.c:576
#define FLOW_CHECK_MEMCAP(size)
check if a memory alloc would fit in the memcap
Definition: flow-util.h:131
#define SCLogDebug(...)
Definition: util-debug.h:335
#define FBLOCK_UNLOCK(fb)
Definition: flow-hash.h:70
#define FLOW_TIMEOUT_REASSEMBLY_DONE
Definition: flow.h:90
uint16_t flow_mgr_flows_timeout_inuse
Definition: flow-manager.c:577
uint8_t cap_flags
Definition: tm-modules.h:67
uint32_t bypassed_timeout
Definition: flow.h:473
#define FLOW_NORMAL_MODE_UPDATE_DELAY_SEC
Definition: flow-manager.c:100
struct HtpBodyChunk_ * next
struct Flow_ * hnext
Definition: flow.h:446
void FlowDisableFlowManagerThread(void)
Used to disable flow manager thread(s).
Definition: flow-manager.c:133
#define BUG_ON(x)
uint32_t new_timeout
Definition: flow.h:470
uint8_t flags
Definition: tm-modules.h:70
uint8_t proto
Definition: flow.h:343
#define THV_RUNNING_DONE
Definition: threadvars.h:45
#define FALSE
#define FQLOCK_LOCK(q)
Definition: flow-queue.h:67
#define FLOWLOCK_UNLOCK(fb)
Definition: flow.h:242
#define FLOW_END_FLAG_STATE_ESTABLISHED
Definition: flow.h:209
FlowProtoTimeout flow_timeouts_normal[FLOW_PROTO_MAX]
Definition: flow-private.h:86
void PacketPoolWaitForN(int n)
Wait until we have the requested amount of packets in the pool.
#define FLOW_END_FLAG_TIMEOUT
Definition: flow.h:212
FlowProtoTimeout flow_timeouts_emerg[FLOW_PROTO_MAX]
Definition: flow-private.h:87
FlowProtoTimeout * FlowProtoTimeoutPtr
Definition: flow-manager.c:86
int FlowForceReassemblyNeedReassembly(Flow *f, int *server, int *client)
Check if a flow needs forced reassembly, or any other processing.
Definition: flow-timeout.c:281
void FlowEnqueue(FlowQueue *q, Flow *f)
add a flow to a queue
Definition: flow-queue.c:72
uint16_t flow_emerg_mode_enter
Definition: flow-manager.c:570
#define FlowTimeoutsReset()
Definition: flow-manager.h:27
#define SCCtrlCondInit
uint32_t prealloc
Definition: flow.h:265
#define SC_ATOMIC_ADD(name, val)
add a value to our atomic variable
Definition: util-atomic.h:107
#define FLOW_EMERG_MODE_UPDATE_DELAY_SEC
Definition: flow-manager.c:103
#define FLOW_QUIET
Definition: flow.h:37
#define FLOW_EMERGENCY
Definition: flow-private.h:37
#define FBLOCK_INIT(fb)
Definition: flow-hash.h:66
#define THV_PAUSED
Definition: threadvars.h:38
#define SC_ATOMIC_AND(name, val)
Bitwise AND a value from our atomic variable.
Definition: util-atomic.h:141
#define FLOWLOCK_WRLOCK(fb)
Definition: flow.h:239
SCCtrlMutex flow_recycler_ctrl_mutex
Definition: flow-manager.h:42
void FlowMoveToSpare(Flow *f)
Transfer a flow from a queue to the spare queue.
Definition: flow-queue.c:146
#define FLOW_END_FLAG_STATE_BYPASSED
Definition: flow.h:215
void FlowMgrRegisterTests(void)
Function to register the Flow Unitests.
SCCtrlCondT flow_manager_ctrl_cond
Definition: flow-manager.h:32
#define TRUE
Flow * head
Definition: flow-hash.h:42
#define SCMutexLock(mut)
void * protoctx
Definition: flow.h:395
#define FBLOCK_DESTROY(fb)
Definition: flow-hash.h:67
FlowConfig flow_config
Definition: flow-private.h:97
void FlowQueueDestroy(FlowQueue *q)
Destroy a flow queue.
Definition: flow-queue.c:61
struct Flow_ * hprev
Definition: flow.h:447
uint16_t StatsRegisterCounter(const char *name, struct ThreadVars_ *tv)
Registers a normal, unqualified counter.
Definition: counters.c:939
#define SCCtrlCondSignal
#define FLOW_NORMAL_MODE_UPDATE_DELAY_NSEC
Definition: flow-manager.c:101
uint32_t HostTimeoutHash(struct timeval *ts)
time out hosts from the hash
Definition: host-timeout.c:156
void TmThreadsSetFlag(ThreadVars *tv, uint16_t flag)
Set a thread flag.
Definition: tm-threads.c:98
#define SC_ATOMIC_INIT(name)
Initialize the previously declared atomic variable and it&#39;s lock.
Definition: util-atomic.h:81
struct FlowManagerThreadData_ FlowManagerThreadData
#define SCCalloc(nm, a)
Definition: util-mem.h:197
#define SCMutexUnlock(mut)
FlowQueue * FlowQueueInit(FlowQueue *q)
Definition: flow-queue.c:47
void StatsSetUI64(ThreadVars *tv, uint16_t id, uint64_t x)
Sets a value of type double to the local counter.
Definition: counters.c:185
#define FLOW_END_FLAG_STATE_NEW
Definition: flow.h:208
int run_mode
Definition: suricata.c:202
SC_ATOMIC_DECLARE(uint32_t, flowmgr_cnt)
int FlowUpdateSpareFlows(void)
Make sure we have enough spare flows.
Definition: flow.c:144
SCMutex tv_root_lock
Definition: tm-threads.c:82
TmEcode(* Management)(ThreadVars *, void *)
Definition: tm-modules.h:59
void TimeGet(struct timeval *tv)
Definition: util-time.c:138
#define TCP_SEG_LEN(seg)
void TmModuleFlowManagerRegister(void)
SCCtrlMutex flow_manager_ctrl_mutex
Definition: flow-manager.h:33
uint32_t IPPairTimeoutHash(struct timeval *ts)
time out ippairs from the hash
Data structures and function prototypes for keeping state for the detection engine.
SC_ATOMIC_EXTERN(unsigned int, flow_flags)
#define SCLogError(err_code,...)
Macro used to log ERROR messages.
Definition: util-debug.h:294
#define FLOW_DESTROY(f)
Definition: flow-util.h:115
struct ThreadVars_ * next
Definition: threadvars.h:111
uint32_t flows_timeout_inuse
Definition: flow-manager.c:117
#define TM_FLAG_MANAGEMENT_TM
Definition: tm-modules.h:36
void UtRegisterTest(const char *name, int(*TestFn)(void))
Register unit test.
#define SleepMsec(msec)
Definition: tm-threads.h:44
uint32_t est_timeout
Definition: flow.h:471
TmEcode(* ThreadDeinit)(ThreadVars *, void *)
Definition: tm-modules.h:49
TmEcode OutputFlowLog(ThreadVars *tv, void *thread_data, Flow *f)
Run flow logger(s)
Definition: output-flow.c:90
ThreadVars * TmThreadCreateMgmtThreadByName(const char *name, const char *module, int mucond)
Creates and returns the TV instance for a Management thread(MGMT). This function supports only custom...
Definition: tm-threads.c:1295
#define FLOW_END_FLAG_STATE_CLOSED
Definition: flow.h:210
uint16_t flow_mgr_rows_checked
Definition: flow-manager.c:580
void StatsIncr(ThreadVars *tv, uint16_t id)
Increments the local counter.
Definition: counters.c:163
Flow * tail
Definition: flow-hash.h:43
void TmThreadsUnsetFlag(ThreadVars *tv, uint16_t flag)
Unset a thread flag.
Definition: tm-threads.c:106
#define FLOW_EMERG_MODE_UPDATE_DELAY_NSEC
Definition: flow-manager.c:104
uint16_t flow_mgr_flows_removed
Definition: flow-manager.c:578
uint16_t StatsRegisterGlobalCounter(const char *name, uint64_t(*Func)(void))
Registers a counter, which represents a global value.
Definition: counters.c:1000
SCCtrlCondT flow_recycler_ctrl_cond
Definition: flow-manager.h:41
void PacketPoolDestroy(void)
uint32_t UTHBuildPacketOfFlows(uint32_t start, uint32_t end, uint8_t dir)
#define FLOW_BYPASSED_TIMEOUT
Definition: flow-private.h:65
int FlowForceReassemblyForFlow(Flow *f, int server, int client)
Definition: flow-timeout.c:337
#define THV_PAUSE
Definition: threadvars.h:37
struct timeval lastts
Definition: flow.h:353
struct FlowBucket_ * fb
Definition: flow.h:448
TmEcode OutputFlowLogThreadInit(ThreadVars *tv, void *initdata, void **data)
thread init for the flow logger This will run the thread init functions for the individual registered...
Definition: output-flow.c:128
#define THV_KILL
Definition: threadvars.h:39
uint8_t flow_end_flags
Definition: flow.h:401
void PacketPoolInit(void)
const char * name
Definition: tm-modules.h:44
void TimeSetIncrementTime(uint32_t tv_sec)
increment the time in the engine
Definition: util-time.c:167
#define SC_ATOMIC_SET(name, val)
Set the value for the atomic variable.
Definition: util-atomic.h:207
uint16_t flow_mgr_rows_skipped
Definition: flow-manager.c:581
struct TCPSEG seg_tree
Flow * FlowDequeue(FlowQueue *q)
remove a flow from the queue
Definition: flow-queue.c:105
FlowBucket * flow_hash
Definition: flow-private.h:96
#define SCLogInfo(...)
Macro used to log INFORMATIONAL messages.
Definition: util-debug.h:254
#define SCCtrlMutexLock(mut)
#define SCFree(a)
Definition: util-mem.h:228
TmModule tmm_modules[TMM_SIZE]
Definition: tm-modules.h:73
#define FLOW_END_FLAG_EMERGENCY
Definition: flow.h:211
int ConfGetInt(const char *name, intmax_t *val)
Retrieve a configuration value as an integer.
Definition: conf.c:437
uint16_t flow_mgr_flows_checked
Definition: flow-manager.c:574
#define SCCtrlCondTimedwait
uint64_t FlowGetMemuse(void)
Definition: flow.c:119
void FlowShutdown(void)
shutdown the flow engine
Definition: flow.c:667
uint64_t ts
#define FBLOCK_TRYLOCK(fb)
Definition: flow-hash.h:69
struct FlowTimeoutCounters_ FlowTimeoutCounters
#define SCLogPerf(...)
Definition: util-debug.h:261
uint32_t closed_timeout
Definition: flow.h:472
#define FatalError(x,...)
Definition: util-debug.h:539
#define StatsSyncCountersIfSignalled(tv)
Definition: counters.h:136
#define FLOW_INITIALIZE(f)
Definition: flow-util.h:39
struct FlowRecyclerThreadData_ FlowRecyclerThreadData
TmEcode(* ThreadInit)(ThreadVars *, const void *, void **)
Definition: tm-modules.h:47
#define TM_THREAD_NAME_MAX
Definition: tm-threads.h:48
TmEcode OutputFlowLogThreadDeinit(ThreadVars *tv, void *thread_data)
Definition: output-flow.c:170
void FlowManagerThreadSpawn()
spawn the flow manager thread
Definition: flow-manager.c:800
int TmThreadsCheckFlag(ThreadVars *tv, uint16_t flag)
Check if a thread flag is set.
Definition: tm-threads.c:90
#define SC_ATOMIC_GET(name)
Get the value from the atomic variable.
Definition: util-atomic.h:192
#define StatsSyncCounters(tv)
Definition: counters.h:133
#define SCCtrlMutexUnlock(mut)
char name[16]
Definition: threadvars.h:59
int g_detect_disabled
Definition: suricata.c:216
uint32_t len
Definition: flow-queue.h:45
void TmModuleFlowRecyclerRegister(void)
void FlowRecyclerThreadSpawn()
spawn the flow recycler thread
Definition: flow-manager.c:961
int FlowClearMemory(Flow *f, uint8_t proto_map)
Function clear the flow memory before queueing it to spare flow queue.
Definition: flow.c:1027
ThreadVars * tv_root[TVT_MAX]
Definition: tm-threads.c:79
uint32_t DefragTimeoutHash(struct timeval *ts)
time out tracker from the hash
uint8_t len
Per thread variable structure.
Definition: threadvars.h:57
void FlowTimeoutsEmergency(void)
Definition: flow-manager.c:94
#define SCCtrlMutexInit(mut, mutattr)
#define FLOW_END_FLAG_SHUTDOWN
Definition: flow.h:214
struct SCLogConfig_ SCLogConfig
Holds the config state used by the logging api.
void StatsAddUI64(ThreadVars *tv, uint16_t id, uint64_t x)
Adds a value of type uint64_t to the local counter.
Definition: counters.c:142
TmEcode TmThreadSpawn(ThreadVars *tv)
Spawns a thread associated with the ThreadVars instance tv.
Definition: tm-threads.c:1867
uint8_t protomap
Definition: flow.h:399
Flow data structure.
Definition: flow.h:324
Definition: flow.h:260
uint32_t flags
Definition: flow.h:374
void FlowTimeoutsInit(void)
Definition: flow-manager.c:89
FlowState
Definition: flow.h:461
#define FBLOCK_LOCK(fb)
Definition: flow-hash.h:68
uint16_t flow_mgr_flows_notimeout
Definition: flow-manager.c:575
const char * thread_name_flow_mgr
Definition: runmodes.c:65
#define FQLOCK_UNLOCK(q)
Definition: flow-queue.h:69
uint32_t emergency_recovery
Definition: flow.h:272
const char * thread_name_flow_rec
Definition: runmodes.c:66
FlowQueue flow_spare_q
Definition: flow-private.h:91
void FlowDisableFlowRecyclerThread(void)
Used to disable flow recycler thread(s).
void TmThreadTestThreadUnPaused(ThreadVars *tv)
Tests if the thread represented in the arg has been unpaused or not.
Definition: tm-threads.c:1965
void FlowInitConfig(char quiet)
initialize the configuration
Definition: flow.c:512
uint32_t hash_size
Definition: flow.h:263