Moved scheduler_t to libhydra.
[strongswan.git] / src / libhydra / processing / scheduler.h
1 /*
2 * Copyright (C) 2009 Tobias Brunner
3 * Copyright (C) 2005-2007 Martin Willi
4 * Copyright (C) 2005 Jan Hutter
5 * Hochschule fuer Technik Rapperswil
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your
10 * option) any later version. See <http://www.fsf.org/copyleft/gpl.txt>.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 * for more details.
16 */
17
18 /**
19 * @defgroup scheduler scheduler
20 * @{ @ingroup hprocessing
21 */
22
23 #ifndef SCHEDULER_H_
24 #define SCHEDULER_H_
25
26 typedef struct scheduler_t scheduler_t;
27
28 #include <library.h>
29 #include <processing/jobs/job.h>
30
31 /**
32 * The scheduler queues timed events which are then passed to the processor.
33 *
34 * The scheduler is implemented as a heap. A heap is a special kind of tree-
35 * based data structure that satisfies the following property: if B is a child
36 * node of A, then key(A) >= (or <=) key(B). So either the element with the
37 * greatest (max-heap) or the smallest (min-heap) key is the root of the heap.
38 * We use a min-heap whith the key being the absolute unix time at which an
39 * event is scheduled. So the root is always the event that will fire next.
40 *
41 * An earlier implementation of the scheduler used a sorted linked list to store
42 * the events. That had the advantage that removing the next event was extremely
43 * fast, also, adding an event scheduled before or after all other events was
44 * equally fast (all in O(1)). The problem was, though, that adding an event
45 * in-between got slower, as the number of events grew larger (O(n)).
46 * For each connection there could be several events: IKE-rekey, NAT-keepalive,
47 * retransmissions, expire (half-open), and others. So a gateway that probably
48 * has to handle thousands of concurrent connnections has to be able to queue a
49 * large number of events as fast as possible. Locking makes this even worse, to
50 * provide thread-safety, no events can be processed, while an event is queued,
51 * so making the insertion fast is even more important.
52 *
53 * That's the advantage of the heap. Adding an element to the heap can be
54 * achieved in O(log n) - on the other hand, removing the root node also
55 * requires O(log n) operations. Consider 10000 queued events. Inserting a new
56 * event in the list implementation required up to 10000 comparisons. In the
57 * heap implementation, the worst case is about 13.3 comparisons. That's a
58 * drastic improvement.
59 *
60 * The implementation itself uses a binary tree mapped to a one-based array to
61 * store the elements. This reduces storage overhead and simplifies navigation:
62 * the children of the node at position n are at position 2n and 2n+1 (likewise
63 * the parent node of the node at position n is at position [n/2]). Thus,
64 * navigating up and down the tree is reduced to simple index computations.
65 *
66 * Adding an element to the heap works as follows: The heap is always filled
67 * from left to right, until a row is full, then the next row is filled. Mapped
68 * to an array this gets as simple as putting the new element to the first free
69 * position. In a one-based array that position equals the number of elements
70 * currently stored in the heap. Then the heap property has to be restored, i.e.
71 * the new element has to be "bubbled up" the tree until the parent node's key
72 * is smaller or the element got the new root of the tree.
73 *
74 * Removing the next event from the heap works similarly. The event itself is
75 * the root node and stored at position 1 of the array. After removing it, the
76 * root has to be replaced and the heap property has to be restored. This is
77 * done by moving the bottom element (last row, rightmost element) to the root
78 * and then "seep it down" by swapping it with child nodes until none of the
79 * children has a smaller key or it is again a leaf node.
80 */
81 struct scheduler_t {
82
83 /**
84 * Adds a event to the queue, using a relative time offset in s.
85 *
86 * @param job job to schedule
87 * @param time relative time to schedule job, in s
88 */
89 void (*schedule_job) (scheduler_t *this, job_t *job, u_int32_t s);
90
91 /**
92 * Adds a event to the queue, using a relative time offset in ms.
93 *
94 * @param job job to schedule
95 * @param time relative time to schedule job, in ms
96 */
97 void (*schedule_job_ms) (scheduler_t *this, job_t *job, u_int32_t ms);
98
99 /**
100 * Adds a event to the queue, using an absolut time.
101 *
102 * The passed timeval should be calculated based on the time_monotonic()
103 * function.
104 *
105 * @param job job to schedule
106 * @param time absolut time to schedule job
107 */
108 void (*schedule_job_tv) (scheduler_t *this, job_t *job, timeval_t tv);
109
110 /**
111 * Returns number of jobs scheduled.
112 *
113 * @return number of scheduled jobs
114 */
115 u_int (*get_job_load) (scheduler_t *this);
116
117 /**
118 * Destroys a scheduler object.
119 */
120 void (*destroy) (scheduler_t *this);
121 };
122
123 /**
124 * Create a scheduler.
125 *
126 * @return scheduler_t object
127 */
128 scheduler_t *scheduler_create(void);
129
130 #endif /** SCHEDULER_H_ @}*/