documented the idea behind the current implementation of the scheduler
[strongswan.git] / src / charon / processing / scheduler.h
1 /*
2 * Copyright (C) 2009 Tobias Brunner
3 * Copyright (C) 2005-2007 Martin Willi
4 * Copyright (C) 2005 Jan Hutter
5 * Hochschule fuer Technik Rapperswil
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your
10 * option) any later version. See <http://www.fsf.org/copyleft/gpl.txt>.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 * for more details.
16 */
17
18 /**
19 * @defgroup scheduler scheduler
20 * @{ @ingroup processing
21 */
22
23 #ifndef SCHEDULER_H_
24 #define SCHEDULER_H_
25
26 typedef struct scheduler_t scheduler_t;
27
28 #include <sys/time.h>
29
30 #include <library.h>
31 #include <processing/jobs/job.h>
32
33 /**
34 * The scheduler queues timed events which are then passed to the processor.
35 *
36 * The scheduler is implemented as a heap. A heap is a special kind of tree-
37 * based data structure that satisfies the following property: if B is a child
38 * node of A, then key(A) >= (or <=) key(B). So either the element with the
39 * greatest (max-heap) or the smallest (min-heap) key is the root of the heap.
40 * We use a min-heap whith the key being the absolute unix time at which an
41 * event is scheduled. So the root is always the event that will fire next.
42 *
43 * An earlier implementation of the scheduler used a sorted linked list to store
44 * the events. That had the advantage that removing the next event was extremely
45 * fast, also, adding an event scheduled before or after all other events was
46 * equally fast (all in O(1)). The problem was, though, that adding an event
47 * in-between got slower, as the number of events grew larger (O(n)).
48 * For each connection there could be several events: IKE-rekey, NAT-keepalive,
49 * retransmissions, expire (half-open), and others. So a gateway that probably
50 * has to handle thousands of concurrent connnections has to be able to queue a
51 * large number of events as fast as possible. Locking makes this even worse, to
52 * provide thread-safety, no events can be processed, while an event is queued,
53 * so making the insertion fast is even more important.
54 *
55 * That's the advantage of the heap. Adding an element to the heap can be
56 * achieved in O(log n) - on the other hand, removing the root node also
57 * requires O(log n) operations. Consider 10000 queued events. Inserting a new
58 * event in the list implementation required up to 10000 comparisons. In the
59 * heap implementation, the worst case is about 13.3 comparisons. That's a
60 * drastic improvement.
61 *
62 * The implementation itself uses a binary tree mapped to a one-based array to
63 * store the elements. This reduces storage overhead and simplifies navigation:
64 * the children of the node at position n are at position 2n and 2n+1 (likewise
65 * the parent node of the node at position n is at position [n/2]). Thus,
66 * navigating up and down the tree is reduced to simple index computations.
67 *
68 * Adding an element to the heap works as follows: The heap is always filled
69 * from left to right, until a row is full, then the next row is filled. Mapped
70 * to an array this gets as simple as putting the new element to the first free
71 * position. In a one-based array that position equals the number of elements
72 * currently stored in the heap. Then the heap property has to be restored, i.e.
73 * the new element has to be "bubbled up" the tree until the parent node's key
74 * is smaller or new element got the new root of the tree.
75 *
76 * Removing the next event from the heap works similarly. The event itself is
77 * the root node and stored at position 1 of the array. After removing it, the
78 * root has to be replaced and the heap property has to be restored. This is
79 * done by moving the bottom element (last row, rightmost element) to the root
80 * and then "seep it down" by swapping it with child nodes until none of the
81 * children has a smaller key or it is again a leaf node.
82 */
83 struct scheduler_t {
84
85 /**
86 * Adds a event to the queue, using a relative time offset in s.
87 *
88 * @param job job to schedule
89 * @param time relative time to schedule job, in s
90 */
91 void (*schedule_job) (scheduler_t *this, job_t *job, u_int32_t s);
92
93 /**
94 * Adds a event to the queue, using a relative time offset in ms.
95 *
96 * @param job job to schedule
97 * @param time relative time to schedule job, in ms
98 */
99 void (*schedule_job_ms) (scheduler_t *this, job_t *job, u_int32_t ms);
100
101 /**
102 * Adds a event to the queue, using an absolut time.
103 *
104 * @param job job to schedule
105 * @param time absolut time to schedule job
106 */
107 void (*schedule_job_tv) (scheduler_t *this, job_t *job, timeval_t tv);
108
109 /**
110 * Returns number of jobs scheduled.
111 *
112 * @return number of scheduled jobs
113 */
114 u_int (*get_job_load) (scheduler_t *this);
115
116 /**
117 * Destroys a scheduler object.
118 */
119 void (*destroy) (scheduler_t *this);
120 };
121
122 /**
123 * Create a scheduler.
124 *
125 * @return scheduler_t object
126 */
127 scheduler_t *scheduler_create(void);
128
129 #endif /** SCHEDULER_H_ @}*/