suricata
tmqh-packetpool.c
Go to the documentation of this file.
1 /* Copyright (C) 2007-2014 Open Information Security Foundation
2  *
3  * You can copy, redistribute or modify this Program under the terms of
4  * the GNU General Public License version 2 as published by the Free
5  * Software Foundation.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10  * GNU General Public License for more details.
11  *
12  * You should have received a copy of the GNU General Public License
13  * version 2 along with this program; if not, write to the Free Software
14  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
15  * 02110-1301, USA.
16  */
17 
18 /**
19  * \file
20  *
21  * \author Victor Julien <victor@inliniac.net>
22  *
23  * Packetpool queue handlers. Packet pool is implemented as a stack.
24  */
25 
26 #include "suricata.h"
27 #include "packet-queue.h"
28 #include "decode.h"
29 #include "detect.h"
30 #include "detect-uricontent.h"
31 #include "threads.h"
32 #include "threadvars.h"
33 #include "flow.h"
34 #include "flow-util.h"
35 #include "host.h"
36 
37 #include "stream.h"
38 #include "stream-tcp-reassemble.h"
39 
40 #include "tm-queuehandlers.h"
41 #include "tm-threads.h"
42 #include "tm-modules.h"
43 
44 #include "pkt-var.h"
45 
46 #include "tmqh-packetpool.h"
47 
48 #include "util-debug.h"
49 #include "util-error.h"
50 #include "util-profiling.h"
51 #include "util-device.h"
52 
53 /* Number of freed packet to save for one pool before freeing them. */
54 #define MAX_PENDING_RETURN_PACKETS 32
55 static uint32_t max_pending_return_packets = MAX_PENDING_RETURN_PACKETS;
56 
57 #ifdef TLS
58 __thread PktPool thread_pkt_pool;
59 
60 static inline PktPool *GetThreadPacketPool(void)
61 {
62  return &thread_pkt_pool;
63 }
64 #else
65 /* __thread not supported. */
66 static pthread_key_t pkt_pool_thread_key;
67 static SCMutex pkt_pool_thread_key_mutex = SCMUTEX_INITIALIZER;
68 static int pkt_pool_thread_key_initialized = 0;
69 
70 static void PktPoolThreadDestroy(void * buf)
71 {
72  SCFreeAligned(buf);
73 }
74 
75 static void TmqhPacketPoolInit(void)
76 {
77  SCMutexLock(&pkt_pool_thread_key_mutex);
78  if (pkt_pool_thread_key_initialized) {
79  /* Key has already been created. */
80  SCMutexUnlock(&pkt_pool_thread_key_mutex);
81  return;
82  }
83 
84  /* Create the pthread Key that is used to look up thread specific
85  * data buffer. Needs to be created only once.
86  */
87  int r = pthread_key_create(&pkt_pool_thread_key, PktPoolThreadDestroy);
88  if (r != 0) {
89  SCLogError(SC_ERR_MEM_ALLOC, "pthread_key_create failed with %d", r);
90  exit(EXIT_FAILURE);
91  }
92 
93  pkt_pool_thread_key_initialized = 1;
94  SCMutexUnlock(&pkt_pool_thread_key_mutex);
95 }
96 
97 static PktPool *ThreadPacketPoolCreate(void)
98 {
99  TmqhPacketPoolInit();
100 
101  /* Create a new pool for this thread. */
102  PktPool* pool = (PktPool*)SCMallocAligned(sizeof(PktPool), CLS);
103  if (pool == NULL) {
104  SCLogError(SC_ERR_MEM_ALLOC, "malloc failed");
105  exit(EXIT_FAILURE);
106  }
107  memset(pool,0x0,sizeof(*pool));
108 
109  int r = pthread_setspecific(pkt_pool_thread_key, pool);
110  if (r != 0) {
111  SCLogError(SC_ERR_MEM_ALLOC, "pthread_setspecific failed with %d", r);
112  exit(EXIT_FAILURE);
113  }
114 
115  return pool;
116 }
117 
118 static inline PktPool *GetThreadPacketPool(void)
119 {
120  PktPool* pool = (PktPool*)pthread_getspecific(pkt_pool_thread_key);
121  if (pool == NULL)
122  pool = ThreadPacketPoolCreate();
123 
124  return pool;
125 }
126 #endif
127 
128 /**
129  * \brief TmqhPacketpoolRegister
130  * \initonly
131  */
133 {
134  tmqh_table[TMQH_PACKETPOOL].name = "packetpool";
137 }
138 
139 static int PacketPoolIsEmpty(PktPool *pool)
140 {
141  /* Check local stack first. */
142  if (pool->head || pool->return_stack.head)
143  return 0;
144 
145  return 1;
146 }
147 
148 void PacketPoolWait(void)
149 {
150  PktPool *my_pool = GetThreadPacketPool();
151 
152  if (PacketPoolIsEmpty(my_pool)) {
153  SCMutexLock(&my_pool->return_stack.mutex);
154  SC_ATOMIC_ADD(my_pool->return_stack.sync_now, 1);
155  SCCondWait(&my_pool->return_stack.cond, &my_pool->return_stack.mutex);
156  SCMutexUnlock(&my_pool->return_stack.mutex);
157  }
158 
159  while(PacketPoolIsEmpty(my_pool))
160  cc_barrier();
161 }
162 
163 /** \brief Wait until we have the requested amount of packets in the pool
164  *
165  * In some cases waiting for packets is undesirable. Especially when
166  * a wait would happen under a lock of some kind, other parts of the
167  * engine could have to wait.
168  *
169  * This function only returns when at least N packets are in our pool.
170  *
171  * If counting in our pool's main stack didn't give us the number we
172  * are seeking, we check if the return stack is filled and add those
173  * to our main stack. Then we retry.
174  *
175  * \param n number of packets needed
176  */
178 {
179  PktPool *my_pool = GetThreadPacketPool();
180  Packet *p, *pp;
181 
182  while (1) {
183  PacketPoolWait();
184 
185  /* count packets in our stack */
186  int i = 0;
187  pp = p = my_pool->head;
188  while (p != NULL) {
189  if (++i == n)
190  return;
191 
192  pp = p;
193  p = p->next;
194  }
195 
196  /* check return stack, return to our pool and retry counting */
197  if (my_pool->return_stack.head != NULL) {
198  SCMutexLock(&my_pool->return_stack.mutex);
199  /* Move all the packets from the locked return stack to the local stack. */
200  if (pp) {
201  pp->next = my_pool->return_stack.head;
202  } else {
203  my_pool->head = my_pool->return_stack.head;
204  }
205  my_pool->return_stack.head = NULL;
206  SC_ATOMIC_RESET(my_pool->return_stack.sync_now);
207  SCMutexUnlock(&my_pool->return_stack.mutex);
208 
209  /* or signal that we need packets and wait */
210  } else {
211  SCMutexLock(&my_pool->return_stack.mutex);
212  SC_ATOMIC_ADD(my_pool->return_stack.sync_now, 1);
213  SCCondWait(&my_pool->return_stack.cond, &my_pool->return_stack.mutex);
214  SCMutexUnlock(&my_pool->return_stack.mutex);
215  }
216  }
217 }
218 
219 /** \brief a initialized packet
220  *
221  * \warning Use *only* at init, not at packet runtime
222  */
223 static void PacketPoolStorePacket(Packet *p)
224 {
225  /* Clear the PKT_ALLOC flag, since that indicates to push back
226  * onto the ring buffer. */
227  p->flags &= ~PKT_ALLOC;
228  p->pool = GetThreadPacketPool();
231 }
232 
233 static void PacketPoolGetReturnedPackets(PktPool *pool)
234 {
235  SCMutexLock(&pool->return_stack.mutex);
236  /* Move all the packets from the locked return stack to the local stack. */
237  pool->head = pool->return_stack.head;
238  pool->return_stack.head = NULL;
239  SCMutexUnlock(&pool->return_stack.mutex);
240 }
241 
242 /** \brief Get a new packet from the packet pool
243  *
244  * Only allocates from the thread's local stack, or mallocs new packets.
245  * If the local stack is empty, first move all the return stack packets to
246  * the local stack.
247  * \retval Packet pointer, or NULL on failure.
248  */
250 {
251  PktPool *pool = GetThreadPacketPool();
252 #ifdef DEBUG_VALIDATION
253  BUG_ON(pool->initialized == 0);
254  BUG_ON(pool->destroyed == 1);
255 #endif /* DEBUG_VALIDATION */
256  if (pool->head) {
257  /* Stack is not empty. */
258  Packet *p = pool->head;
259  pool->head = p->next;
260  p->pool = pool;
261  PACKET_REINIT(p);
262  return p;
263  }
264 
265  /* Local Stack is empty, so check the return stack, which requires
266  * locking. */
267  PacketPoolGetReturnedPackets(pool);
268 
269  /* Try to allocate again. Need to check for not empty again, since the
270  * return stack might have been empty too.
271  */
272  if (pool->head) {
273  /* Stack is not empty. */
274  Packet *p = pool->head;
275  pool->head = p->next;
276  p->pool = pool;
277  PACKET_REINIT(p);
278  return p;
279  }
280 
281  /* Failed to allocate a packet, so return NULL. */
282  /* Optionally, could allocate a new packet here. */
283  return NULL;
284 }
285 
286 /** \brief Return packet to Packet pool
287  *
288  */
290 {
291  PktPool *my_pool = GetThreadPacketPool();
292 
294 
295  PktPool *pool = p->pool;
296  if (pool == NULL) {
297  PacketFree(p);
298  return;
299  }
300 #ifdef DEBUG_VALIDATION
301  BUG_ON(pool->initialized == 0);
302  BUG_ON(pool->destroyed == 1);
303  BUG_ON(my_pool->initialized == 0);
304  BUG_ON(my_pool->destroyed == 1);
305 #endif /* DEBUG_VALIDATION */
306 
307  if (pool == my_pool) {
308  /* Push back onto this thread's own stack, so no locking. */
309  p->next = my_pool->head;
310  my_pool->head = p;
311  } else {
312  PktPool *pending_pool = my_pool->pending_pool;
313  if (pending_pool == NULL) {
314  /* No pending packet, so store the current packet. */
315  p->next = NULL;
316  my_pool->pending_pool = pool;
317  my_pool->pending_head = p;
318  my_pool->pending_tail = p;
319  my_pool->pending_count = 1;
320  } else if (pending_pool == pool) {
321  /* Another packet for the pending pool list. */
322  p->next = my_pool->pending_head;
323  my_pool->pending_head = p;
324  my_pool->pending_count++;
325  if (SC_ATOMIC_GET(pool->return_stack.sync_now) || my_pool->pending_count > max_pending_return_packets) {
326  /* Return the entire list of pending packets. */
327  SCMutexLock(&pool->return_stack.mutex);
328  my_pool->pending_tail->next = pool->return_stack.head;
329  pool->return_stack.head = my_pool->pending_head;
330  SC_ATOMIC_RESET(pool->return_stack.sync_now);
331  SCMutexUnlock(&pool->return_stack.mutex);
332  SCCondSignal(&pool->return_stack.cond);
333  /* Clear the list of pending packets to return. */
334  my_pool->pending_pool = NULL;
335  my_pool->pending_head = NULL;
336  my_pool->pending_tail = NULL;
337  my_pool->pending_count = 0;
338  }
339  } else {
340  /* Push onto return stack for this pool */
341  SCMutexLock(&pool->return_stack.mutex);
342  p->next = pool->return_stack.head;
343  pool->return_stack.head = p;
344  SC_ATOMIC_RESET(pool->return_stack.sync_now);
345  SCMutexUnlock(&pool->return_stack.mutex);
346  SCCondSignal(&pool->return_stack.cond);
347  }
348  }
349 }
350 
352 {
353 #ifndef TLS
354  TmqhPacketPoolInit();
355 #endif
356 
357  PktPool *my_pool = GetThreadPacketPool();
358 
359 #ifdef DEBUG_VALIDATION
360  BUG_ON(my_pool->initialized);
361  my_pool->initialized = 1;
362  my_pool->destroyed = 0;
363 #endif /* DEBUG_VALIDATION */
364 
365  SCMutexInit(&my_pool->return_stack.mutex, NULL);
366  SCCondInit(&my_pool->return_stack.cond, NULL);
367  SC_ATOMIC_INIT(my_pool->return_stack.sync_now);
368 }
369 
370 void PacketPoolInit(void)
371 {
372  extern intmax_t max_pending_packets;
373 
374 #ifndef TLS
375  TmqhPacketPoolInit();
376 #endif
377 
378  PktPool *my_pool = GetThreadPacketPool();
379 
380 #ifdef DEBUG_VALIDATION
381  BUG_ON(my_pool->initialized);
382  my_pool->initialized = 1;
383  my_pool->destroyed = 0;
384 #endif /* DEBUG_VALIDATION */
385 
386  SCMutexInit(&my_pool->return_stack.mutex, NULL);
387  SCCondInit(&my_pool->return_stack.cond, NULL);
388  SC_ATOMIC_INIT(my_pool->return_stack.sync_now);
389 
390  /* pre allocate packets */
391  SCLogDebug("preallocating packets... packet size %" PRIuMAX "",
392  (uintmax_t)SIZE_OF_PACKET);
393  int i = 0;
394  for (i = 0; i < max_pending_packets; i++) {
395  Packet *p = PacketGetFromAlloc();
396  if (unlikely(p == NULL)) {
397  SCLogError(SC_ERR_FATAL, "Fatal error encountered while allocating a packet. Exiting...");
398  exit(EXIT_FAILURE);
399  }
400  PacketPoolStorePacket(p);
401  }
402 
403  //SCLogInfo("preallocated %"PRIiMAX" packets. Total memory %"PRIuMAX"",
404  // max_pending_packets, (uintmax_t)(max_pending_packets*SIZE_OF_PACKET));
405 }
406 
408 {
409  Packet *p = NULL;
410  PktPool *my_pool = GetThreadPacketPool();
411 
412 #ifdef DEBUG_VALIDATION
413  BUG_ON(my_pool->destroyed);
414 #endif /* DEBUG_VALIDATION */
415 
416  if (my_pool && my_pool->pending_pool != NULL) {
417  p = my_pool->pending_head;
418  while (p) {
419  Packet *next_p = p->next;
420  PacketFree(p);
421  p = next_p;
422  my_pool->pending_count--;
423  }
424 #ifdef DEBUG_VALIDATION
425  BUG_ON(my_pool->pending_count);
426 #endif /* DEBUG_VALIDATION */
427  my_pool->pending_pool = NULL;
428  my_pool->pending_head = NULL;
429  my_pool->pending_tail = NULL;
430  }
431 
432  while ((p = PacketPoolGetPacket()) != NULL) {
433  PacketFree(p);
434  }
435 
436  SC_ATOMIC_DESTROY(my_pool->return_stack.sync_now);
437 
438 #ifdef DEBUG_VALIDATION
439  my_pool->initialized = 0;
440  my_pool->destroyed = 1;
441 #endif /* DEBUG_VALIDATION */
442 }
443 
445 {
446  return PacketPoolGetPacket();
447 }
448 
450 {
451  bool proot = false;
452 
453  SCEnter();
454  SCLogDebug("Packet %p, p->root %p, alloced %s", p, p->root, p->flags & PKT_ALLOC ? "true" : "false");
455 
456  if (IS_TUNNEL_PKT(p)) {
457  SCLogDebug("Packet %p is a tunnel packet: %s",
458  p,p->root ? "upper layer" : "tunnel root");
459 
460  /* get a lock to access root packet fields */
461  SCMutex *m = p->root ? &p->root->tunnel_mutex : &p->tunnel_mutex;
462  SCMutexLock(m);
463 
464  if (IS_TUNNEL_ROOT_PKT(p)) {
465  SCLogDebug("IS_TUNNEL_ROOT_PKT == TRUE");
466  const uint16_t outstanding = TUNNEL_PKT_TPR(p) - TUNNEL_PKT_RTV(p);
467  SCLogDebug("root pkt: outstanding %u", outstanding);
468  if (outstanding == 0) {
469  SCLogDebug("no tunnel packets outstanding, no more tunnel "
470  "packet(s) depending on this root");
471  /* if this packet is the root and there are no
472  * more tunnel packets to consider
473  *
474  * return it to the pool */
475  } else {
476  SCLogDebug("tunnel root Packet %p: outstanding > 0, so "
477  "packets are still depending on this root, setting "
478  "SET_TUNNEL_PKT_VERDICTED", p);
479  /* if this is the root and there are more tunnel
480  * packets, return this to the pool. It's still referenced
481  * by the tunnel packets, and we will return it
482  * when we handle them */
484 
486  SCMutexUnlock(m);
487  SCReturn;
488  }
489  } else {
490  SCLogDebug("NOT IS_TUNNEL_ROOT_PKT, so tunnel pkt");
491 
493  const uint16_t outstanding = TUNNEL_PKT_TPR(p) - TUNNEL_PKT_RTV(p);
494  SCLogDebug("tunnel pkt: outstanding %u", outstanding);
495  /* all tunnel packets are processed except us. Root already
496  * processed. So return tunnel pkt and root packet to the
497  * pool. */
498  if (outstanding == 0 &&
500  {
501  SCLogDebug("root verdicted == true && no outstanding");
502 
503  /* handle freeing the root as well*/
504  SCLogDebug("setting proot = 1 for root pkt, p->root %p "
505  "(tunnel packet %p)", p->root, p);
506  proot = true;
507 
508  /* fall through */
509 
510  } else {
511  /* root not ready yet, or not the last tunnel packet,
512  * so get rid of the tunnel pkt only */
513 
514  SCLogDebug("NOT IS_TUNNEL_PKT_VERDICTED (%s) || "
515  "outstanding > 0 (%u)",
516  (p->root && IS_TUNNEL_PKT_VERDICTED(p->root)) ? "true" : "false",
517  outstanding);
518 
519  /* fall through */
520  }
521  }
522  SCMutexUnlock(m);
523 
524  SCLogDebug("tunnel stuff done, move on (proot %d)", proot);
525  }
526 
527  /* we're done with the tunnel root now as well */
528  if (proot == true) {
529  SCLogDebug("getting rid of root pkt... alloc'd %s", p->root->flags & PKT_ALLOC ? "true" : "false");
530 
532  p->root->ReleasePacket(p->root);
533  p->root = NULL;
534  }
535 
537 
539  p->ReleasePacket(p);
540 
541  SCReturn;
542 }
543 
544 /**
545  * \brief Release all the packets in the queue back to the packetpool. Mainly
546  * used by threads that have failed, and wants to return the packets back
547  * to the packetpool.
548  *
549  * \param pq Pointer to the packetqueue from which the packets have to be
550  * returned back to the packetpool
551  *
552  * \warning this function assumes that the pq does not use locking
553  */
555 {
556  Packet *p = NULL;
557 
558  if (pq == NULL)
559  return;
560 
561  while ( (p = PacketDequeue(pq)) != NULL)
562  TmqhOutputPacketpool(NULL, p);
563 
564  return;
565 }
566 
567 /** number of packets to keep reserved when calculating the the pending
568  * return packets count. This assumes we need at max 10 packets in one
569  * PacketPoolWaitForN call. The actual number is 9 now, so this has a
570  * bit of margin. */
571 #define RESERVED_PACKETS 10
572 
573 /**
574  * \brief Set the max_pending_return_packets value
575  *
576  * Set it to the max pending packets value, devided by the number
577  * of lister threads. Normally, in autofp these are the stream/detect/log
578  * worker threads.
579  *
580  * The max_pending_return_packets value needs to stay below the packet
581  * pool size of the 'producers' (normally pkt capture threads but also
582  * flow timeout injection ) to avoid a deadlock where all the 'workers'
583  * keep packets in their return pools, while the capture thread can't
584  * continue because its pool is empty.
585  */
587 {
588  extern intmax_t max_pending_packets;
589  intmax_t pending_packets = max_pending_packets;
590  if (pending_packets < RESERVED_PACKETS) {
591  FatalError(SC_ERR_INVALID_ARGUMENT, "'max-pending-packets' setting "
592  "must be at least %d", RESERVED_PACKETS);
593  }
595  if (threads == 0)
596  return;
597 
598  uint32_t packets = (pending_packets / threads) - 1;
599  if (packets < max_pending_return_packets)
600  max_pending_return_packets = packets;
601 
602  /* make sure to have a margin in the return logic */
603  if (max_pending_return_packets >= RESERVED_PACKETS)
604  max_pending_return_packets -= RESERVED_PACKETS;
605 
606  SCLogDebug("detect threads %u, max packets %u, max_pending_return_packets %u",
607  threads, packets, max_pending_return_packets);
608 }
Packet *(* InHandler)(ThreadVars *)
const char * name
#define SCMutex
Tmqh tmqh_table[TMQH_SIZE]
#define SCLogDebug(...)
Definition: util-debug.h:335
#define PACKET_PROFILING_END(p)
#define IS_TUNNEL_ROOT_PKT(p)
Definition: decode.h:884
#define BUG_ON(x)
#define unlikely(expr)
Definition: util-optimize.h:35
void PacketPoolWaitForN(int n)
Wait until we have the requested amount of packets in the pool.
struct Packet_ * next
Definition: decode.h:570
#define PKT_ALLOC
Definition: decode.h:1085
#define SC_ATOMIC_ADD(name, val)
add a value to our atomic variable
Definition: util-atomic.h:107
#define SC_ATOMIC_RESET(name)
Initialize the previously declared atomic variable and it&#39;s lock.
Definition: util-atomic.h:90
#define IS_TUNNEL_PKT(p)
Definition: decode.h:881
void PacketPoolReturnPacket(Packet *p)
Return packet to Packet pool.
void PacketFree(Packet *p)
Return a malloced packet.
Definition: decode.c:102
int max_pending_packets
Definition: suricata.c:215
Packet * pending_tail
#define CLS
#define SCCondWait
#define SCCondInit
void(* ReleasePacket)(struct Packet_ *)
Definition: decode.h:484
#define SCMutexLock(mut)
Packet * PacketPoolGetPacket(void)
Get a new packet from the packet pool.
uint32_t pending_count
#define SC_ATOMIC_DESTROY(name)
Destroy the lock used to protect this variable.
Definition: util-atomic.h:97
struct PktPool_ * pool
Definition: decode.h:599
#define IS_TUNNEL_PKT_VERDICTED(p)
Definition: decode.h:886
void PacketPoolPostRunmodes(void)
Set the max_pending_return_packets value.
#define SC_ATOMIC_INIT(name)
Initialize the previously declared atomic variable and it&#39;s lock.
Definition: util-atomic.h:81
#define SIZE_OF_PACKET
Definition: decode.h:618
#define SCMutexUnlock(mut)
void TmqhOutputPacketpool(ThreadVars *t, Packet *p)
#define SCMUTEX_INITIALIZER
#define RESERVED_PACKETS
#define SCLogError(err_code,...)
Macro used to log ERROR messages.
Definition: util-debug.h:294
#define SET_TUNNEL_PKT_VERDICTED(p)
Definition: decode.h:887
#define SCEnter(...)
Definition: util-debug.h:337
void TmqhPacketpoolRegister(void)
TmqhPacketpoolRegister .
Packet * pending_head
#define TUNNEL_PKT_TPR(p)
Definition: decode.h:879
Packet * TmqhInputPacketpool(ThreadVars *tv)
void PacketPoolInitEmpty(void)
#define SCMutexInit(mut, mutattrs)
void PacketPoolDestroy(void)
void(* OutHandler)(ThreadVars *, Packet *)
#define MAX_PENDING_RETURN_PACKETS
#define TUNNEL_PKT_RTV(p)
Definition: decode.h:878
void PacketPoolInit(void)
void PacketPoolWait(void)
SCMutex tunnel_mutex
Definition: decode.h:587
PktPoolLockedStack return_stack
#define TUNNEL_INCR_PKT_RTV_NOLOCK(p)
Definition: decode.h:868
#define SCMallocAligned(a, b)
wrapper for allocing aligned mem
Definition: util-mem.h:363
#define SCCondSignal
uint32_t TmThreadCountThreadsByTmmFlags(uint8_t flags)
returns a count of all the threads that match the flag
Definition: tm-threads.c:2183
#define FatalError(x,...)
Definition: util-debug.h:539
SCMutex m
Definition: flow-hash.h:105
#define SC_ATOMIC_GET(name)
Get the value from the atomic variable.
Definition: util-atomic.h:192
Packet * PacketDequeue(PacketQueue *q)
Definition: packet-queue.c:167
#define cc_barrier()
Definition: util-optimize.h:43
#define PACKET_REINIT(p)
Recycle a packet structure for reuse.
Definition: decode.h:746
Packet * head
#define TM_FLAG_DETECT_TM
Definition: tm-modules.h:34
#define SCReturn
Definition: util-debug.h:339
Per thread variable structure.
Definition: threadvars.h:57
uint32_t flags
Definition: decode.h:443
#define PACKET_RELEASE_REFS(p)
Definition: decode.h:737
struct PktPool_ * pending_pool
Packet * PacketGetFromAlloc(void)
Get a malloced packet.
Definition: decode.c:141
#define SCFreeAligned(a)
Free aligned memory.
Definition: util-mem.h:388
struct Packet_ * root
Definition: decode.h:577
void TmqhReleasePacketsToPacketPool(PacketQueue *pq)
Release all the packets in the queue back to the packetpool. Mainly used by threads that have failed...