ike-sa-manager: Log a checkin/failure message for every checkout
[strongswan.git] / src / libcharon / sa / ike_sa_manager.c
1 /*
2 * Copyright (C) 2005-2011 Martin Willi
3 * Copyright (C) 2011 revosec AG
4 * Copyright (C) 2008-2016 Tobias Brunner
5 * Copyright (C) 2005 Jan Hutter
6 * Hochschule fuer Technik Rapperswil
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version. See <http://www.fsf.org/copyleft/gpl.txt>.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 * for more details.
17 */
18
19 #include <string.h>
20 #include <inttypes.h>
21
22 #include "ike_sa_manager.h"
23
24 #include <daemon.h>
25 #include <sa/ike_sa_id.h>
26 #include <bus/bus.h>
27 #include <threading/thread.h>
28 #include <threading/condvar.h>
29 #include <threading/mutex.h>
30 #include <threading/rwlock.h>
31 #include <collections/linked_list.h>
32 #include <crypto/hashers/hasher.h>
33 #include <processing/jobs/delete_ike_sa_job.h>
34
35 /* the default size of the hash table (MUST be a power of 2) */
36 #define DEFAULT_HASHTABLE_SIZE 1
37
38 /* the maximum size of the hash table (MUST be a power of 2) */
39 #define MAX_HASHTABLE_SIZE (1 << 30)
40
41 /* the default number of segments (MUST be a power of 2) */
42 #define DEFAULT_SEGMENT_COUNT 1
43
44 typedef struct entry_t entry_t;
45
46 /**
47 * An entry in the linked list, contains IKE_SA, locking and lookup data.
48 */
49 struct entry_t {
50
51 /**
52 * Number of threads waiting for this ike_sa_t object.
53 */
54 int waiting_threads;
55
56 /**
57 * Condvar where threads can wait until ike_sa_t object is free for use again.
58 */
59 condvar_t *condvar;
60
61 /**
62 * Thread by which this IKE_SA is currently checked out, if any
63 */
64 thread_t *checked_out;
65
66 /**
67 * Does this SA drives out new threads?
68 */
69 bool driveout_new_threads;
70
71 /**
72 * Does this SA drives out waiting threads?
73 */
74 bool driveout_waiting_threads;
75
76 /**
77 * Identification of an IKE_SA (SPIs).
78 */
79 ike_sa_id_t *ike_sa_id;
80
81 /**
82 * The contained ike_sa_t object.
83 */
84 ike_sa_t *ike_sa;
85
86 /**
87 * hash of the IKE_SA_INIT message, used to detect retransmissions
88 */
89 chunk_t init_hash;
90
91 /**
92 * remote host address, required for DoS detection and duplicate
93 * checking (host with same my_id and other_id is *not* considered
94 * a duplicate if the address family differs)
95 */
96 host_t *other;
97
98 /**
99 * As responder: Is this SA half-open?
100 */
101 bool half_open;
102
103 /**
104 * own identity, required for duplicate checking
105 */
106 identification_t *my_id;
107
108 /**
109 * remote identity, required for duplicate checking
110 */
111 identification_t *other_id;
112
113 /**
114 * message ID or hash of currently processing message, -1 if none
115 */
116 u_int32_t processing;
117 };
118
119 /**
120 * Implementation of entry_t.destroy.
121 */
122 static status_t entry_destroy(entry_t *this)
123 {
124 /* also destroy IKE SA */
125 this->ike_sa->destroy(this->ike_sa);
126 this->ike_sa_id->destroy(this->ike_sa_id);
127 chunk_free(&this->init_hash);
128 DESTROY_IF(this->other);
129 DESTROY_IF(this->my_id);
130 DESTROY_IF(this->other_id);
131 this->condvar->destroy(this->condvar);
132 free(this);
133 return SUCCESS;
134 }
135
136 /**
137 * Creates a new entry for the ike_sa_t list.
138 */
139 static entry_t *entry_create()
140 {
141 entry_t *this;
142
143 INIT(this,
144 .condvar = condvar_create(CONDVAR_TYPE_DEFAULT),
145 .processing = -1,
146 );
147
148 return this;
149 }
150
151 /**
152 * Function that matches entry_t objects by ike_sa_id_t.
153 */
154 static bool entry_match_by_id(entry_t *entry, ike_sa_id_t *id)
155 {
156 if (id->equals(id, entry->ike_sa_id))
157 {
158 return TRUE;
159 }
160 if ((id->get_responder_spi(id) == 0 ||
161 entry->ike_sa_id->get_responder_spi(entry->ike_sa_id) == 0) &&
162 (id->get_ike_version(id) == IKEV1_MAJOR_VERSION ||
163 id->is_initiator(id) == entry->ike_sa_id->is_initiator(entry->ike_sa_id)) &&
164 id->get_initiator_spi(id) == entry->ike_sa_id->get_initiator_spi(entry->ike_sa_id))
165 {
166 /* this is TRUE for IKE_SAs that we initiated but have not yet received a response */
167 return TRUE;
168 }
169 return FALSE;
170 }
171
172 /**
173 * Function that matches entry_t objects by ike_sa_t pointers.
174 */
175 static bool entry_match_by_sa(entry_t *entry, ike_sa_t *ike_sa)
176 {
177 return entry->ike_sa == ike_sa;
178 }
179
180 /**
181 * Hash function for ike_sa_id_t objects.
182 */
183 static u_int ike_sa_id_hash(ike_sa_id_t *ike_sa_id)
184 {
185 /* IKEv2 does not mandate random SPIs (RFC 5996, 2.6), they just have to be
186 * locally unique, so we use our randomly allocated SPI whether we are
187 * initiator or responder to ensure a good distribution. The latter is not
188 * possible for IKEv1 as we don't know whether we are original initiator or
189 * not (based on the IKE header). But as RFC 2408, section 2.5.3 proposes
190 * SPIs (Cookies) to be allocated near random (we allocate them randomly
191 * anyway) it seems safe to always use the initiator SPI. */
192 if (ike_sa_id->get_ike_version(ike_sa_id) == IKEV1_MAJOR_VERSION ||
193 ike_sa_id->is_initiator(ike_sa_id))
194 {
195 return ike_sa_id->get_initiator_spi(ike_sa_id);
196 }
197 return ike_sa_id->get_responder_spi(ike_sa_id);
198 }
199
200 typedef struct half_open_t half_open_t;
201
202 /**
203 * Struct to manage half-open IKE_SAs per peer.
204 */
205 struct half_open_t {
206 /** chunk of remote host address */
207 chunk_t other;
208
209 /** the number of half-open IKE_SAs with that host */
210 u_int count;
211
212 /** the number of half-open IKE_SAs we responded to with that host */
213 u_int count_responder;
214 };
215
216 /**
217 * Destroys a half_open_t object.
218 */
219 static void half_open_destroy(half_open_t *this)
220 {
221 chunk_free(&this->other);
222 free(this);
223 }
224
225 typedef struct connected_peers_t connected_peers_t;
226
227 struct connected_peers_t {
228 /** own identity */
229 identification_t *my_id;
230
231 /** remote identity */
232 identification_t *other_id;
233
234 /** ip address family of peer */
235 int family;
236
237 /** list of ike_sa_id_t objects of IKE_SAs between the two identities */
238 linked_list_t *sas;
239 };
240
241 static void connected_peers_destroy(connected_peers_t *this)
242 {
243 this->my_id->destroy(this->my_id);
244 this->other_id->destroy(this->other_id);
245 this->sas->destroy(this->sas);
246 free(this);
247 }
248
249 /**
250 * Function that matches connected_peers_t objects by the given ids.
251 */
252 static inline bool connected_peers_match(connected_peers_t *connected_peers,
253 identification_t *my_id, identification_t *other_id,
254 int family)
255 {
256 return my_id->equals(my_id, connected_peers->my_id) &&
257 other_id->equals(other_id, connected_peers->other_id) &&
258 (!family || family == connected_peers->family);
259 }
260
261 typedef struct init_hash_t init_hash_t;
262
263 struct init_hash_t {
264 /** hash of IKE_SA_INIT or initial phase1 message (data is not cloned) */
265 chunk_t hash;
266
267 /** our SPI allocated for the IKE_SA based on this message */
268 u_int64_t our_spi;
269 };
270
271 typedef struct segment_t segment_t;
272
273 /**
274 * Struct to manage segments of the hash table.
275 */
276 struct segment_t {
277 /** mutex to access a segment exclusively */
278 mutex_t *mutex;
279
280 /** the number of entries in this segment */
281 u_int count;
282 };
283
284 typedef struct shareable_segment_t shareable_segment_t;
285
286 /**
287 * Struct to manage segments of the "half-open" and "connected peers" hash tables.
288 */
289 struct shareable_segment_t {
290 /** rwlock to access a segment non-/exclusively */
291 rwlock_t *lock;
292
293 /** the number of entries in this segment - in case of the "half-open table"
294 * it's the sum of all half_open_t.count in a segment. */
295 u_int count;
296 };
297
298 typedef struct table_item_t table_item_t;
299
300 /**
301 * Instead of using linked_list_t for each bucket we store the data in our own
302 * list to save memory.
303 */
304 struct table_item_t {
305 /** data of this item */
306 void *value;
307
308 /** next item in the overflow list */
309 table_item_t *next;
310 };
311
312 typedef struct private_ike_sa_manager_t private_ike_sa_manager_t;
313
314 /**
315 * Additional private members of ike_sa_manager_t.
316 */
317 struct private_ike_sa_manager_t {
318 /**
319 * Public interface of ike_sa_manager_t.
320 */
321 ike_sa_manager_t public;
322
323 /**
324 * Hash table with entries for the ike_sa_t objects.
325 */
326 table_item_t **ike_sa_table;
327
328 /**
329 * The size of the hash table.
330 */
331 u_int table_size;
332
333 /**
334 * Mask to map the hashes to table rows.
335 */
336 u_int table_mask;
337
338 /**
339 * Segments of the hash table.
340 */
341 segment_t *segments;
342
343 /**
344 * The number of segments.
345 */
346 u_int segment_count;
347
348 /**
349 * Mask to map a table row to a segment.
350 */
351 u_int segment_mask;
352
353 /**
354 * Hash table with half_open_t objects.
355 */
356 table_item_t **half_open_table;
357
358 /**
359 * Segments of the "half-open" hash table.
360 */
361 shareable_segment_t *half_open_segments;
362
363 /**
364 * Total number of half-open IKE_SAs.
365 */
366 refcount_t half_open_count;
367
368 /**
369 * Total number of half-open IKE_SAs as responder.
370 */
371 refcount_t half_open_count_responder;
372
373 /**
374 * Hash table with connected_peers_t objects.
375 */
376 table_item_t **connected_peers_table;
377
378 /**
379 * Segments of the "connected peers" hash table.
380 */
381 shareable_segment_t *connected_peers_segments;
382
383 /**
384 * Hash table with init_hash_t objects.
385 */
386 table_item_t **init_hashes_table;
387
388 /**
389 * Segments of the "hashes" hash table.
390 */
391 segment_t *init_hashes_segments;
392
393 /**
394 * RNG to get random SPIs for our side
395 */
396 rng_t *rng;
397
398 /**
399 * Registered callback for IKE SPIs
400 */
401 struct {
402 spi_cb_t cb;
403 void *data;
404 } spi_cb;
405
406 /**
407 * Lock to access the RNG instance and the callback
408 */
409 rwlock_t *spi_lock;
410
411 /**
412 * reuse existing IKE_SAs in checkout_by_config
413 */
414 bool reuse_ikesa;
415
416 /**
417 * Configured IKE_SA limit, if any
418 */
419 u_int ikesa_limit;
420 };
421
422 /**
423 * Acquire a lock to access the segment of the table row with the given index.
424 * It also works with the segment index directly.
425 */
426 static inline void lock_single_segment(private_ike_sa_manager_t *this,
427 u_int index)
428 {
429 mutex_t *lock = this->segments[index & this->segment_mask].mutex;
430 lock->lock(lock);
431 }
432
433 /**
434 * Release the lock required to access the segment of the table row with the given index.
435 * It also works with the segment index directly.
436 */
437 static inline void unlock_single_segment(private_ike_sa_manager_t *this,
438 u_int index)
439 {
440 mutex_t *lock = this->segments[index & this->segment_mask].mutex;
441 lock->unlock(lock);
442 }
443
444 /**
445 * Lock all segments
446 */
447 static void lock_all_segments(private_ike_sa_manager_t *this)
448 {
449 u_int i;
450
451 for (i = 0; i < this->segment_count; i++)
452 {
453 this->segments[i].mutex->lock(this->segments[i].mutex);
454 }
455 }
456
457 /**
458 * Unlock all segments
459 */
460 static void unlock_all_segments(private_ike_sa_manager_t *this)
461 {
462 u_int i;
463
464 for (i = 0; i < this->segment_count; i++)
465 {
466 this->segments[i].mutex->unlock(this->segments[i].mutex);
467 }
468 }
469
470 typedef struct private_enumerator_t private_enumerator_t;
471
472 /**
473 * hash table enumerator implementation
474 */
475 struct private_enumerator_t {
476
477 /**
478 * implements enumerator interface
479 */
480 enumerator_t enumerator;
481
482 /**
483 * associated ike_sa_manager_t
484 */
485 private_ike_sa_manager_t *manager;
486
487 /**
488 * current segment index
489 */
490 u_int segment;
491
492 /**
493 * currently enumerating entry
494 */
495 entry_t *entry;
496
497 /**
498 * current table row index
499 */
500 u_int row;
501
502 /**
503 * current table item
504 */
505 table_item_t *current;
506
507 /**
508 * previous table item
509 */
510 table_item_t *prev;
511 };
512
513 METHOD(enumerator_t, enumerate, bool,
514 private_enumerator_t *this, entry_t **entry, u_int *segment)
515 {
516 if (this->entry)
517 {
518 this->entry->condvar->signal(this->entry->condvar);
519 this->entry = NULL;
520 }
521 while (this->segment < this->manager->segment_count)
522 {
523 while (this->row < this->manager->table_size)
524 {
525 this->prev = this->current;
526 if (this->current)
527 {
528 this->current = this->current->next;
529 }
530 else
531 {
532 lock_single_segment(this->manager, this->segment);
533 this->current = this->manager->ike_sa_table[this->row];
534 }
535 if (this->current)
536 {
537 *entry = this->entry = this->current->value;
538 *segment = this->segment;
539 return TRUE;
540 }
541 unlock_single_segment(this->manager, this->segment);
542 this->row += this->manager->segment_count;
543 }
544 this->segment++;
545 this->row = this->segment;
546 }
547 return FALSE;
548 }
549
550 METHOD(enumerator_t, enumerator_destroy, void,
551 private_enumerator_t *this)
552 {
553 if (this->entry)
554 {
555 this->entry->condvar->signal(this->entry->condvar);
556 }
557 if (this->current)
558 {
559 unlock_single_segment(this->manager, this->segment);
560 }
561 free(this);
562 }
563
564 /**
565 * Creates an enumerator to enumerate the entries in the hash table.
566 */
567 static enumerator_t* create_table_enumerator(private_ike_sa_manager_t *this)
568 {
569 private_enumerator_t *enumerator;
570
571 INIT(enumerator,
572 .enumerator = {
573 .enumerate = (void*)_enumerate,
574 .destroy = _enumerator_destroy,
575 },
576 .manager = this,
577 );
578 return &enumerator->enumerator;
579 }
580
581 /**
582 * Put an entry into the hash table.
583 * Note: The caller has to unlock the returned segment.
584 */
585 static u_int put_entry(private_ike_sa_manager_t *this, entry_t *entry)
586 {
587 table_item_t *current, *item;
588 u_int row, segment;
589
590 INIT(item,
591 .value = entry,
592 );
593
594 row = ike_sa_id_hash(entry->ike_sa_id) & this->table_mask;
595 segment = row & this->segment_mask;
596
597 lock_single_segment(this, segment);
598 current = this->ike_sa_table[row];
599 if (current)
600 { /* insert at the front of current bucket */
601 item->next = current;
602 }
603 this->ike_sa_table[row] = item;
604 this->segments[segment].count++;
605 return segment;
606 }
607
608 /**
609 * Remove an entry from the hash table.
610 * Note: The caller MUST have a lock on the segment of this entry.
611 */
612 static void remove_entry(private_ike_sa_manager_t *this, entry_t *entry)
613 {
614 table_item_t *item, *prev = NULL;
615 u_int row, segment;
616
617 row = ike_sa_id_hash(entry->ike_sa_id) & this->table_mask;
618 segment = row & this->segment_mask;
619 item = this->ike_sa_table[row];
620 while (item)
621 {
622 if (item->value == entry)
623 {
624 if (prev)
625 {
626 prev->next = item->next;
627 }
628 else
629 {
630 this->ike_sa_table[row] = item->next;
631 }
632 this->segments[segment].count--;
633 free(item);
634 break;
635 }
636 prev = item;
637 item = item->next;
638 }
639 }
640
641 /**
642 * Remove the entry at the current enumerator position.
643 */
644 static void remove_entry_at(private_enumerator_t *this)
645 {
646 this->entry = NULL;
647 if (this->current)
648 {
649 table_item_t *current = this->current;
650
651 this->manager->segments[this->segment].count--;
652 this->current = this->prev;
653
654 if (this->prev)
655 {
656 this->prev->next = current->next;
657 }
658 else
659 {
660 this->manager->ike_sa_table[this->row] = current->next;
661 unlock_single_segment(this->manager, this->segment);
662 }
663 free(current);
664 }
665 }
666
667 /**
668 * Find an entry using the provided match function to compare the entries for
669 * equality.
670 */
671 static status_t get_entry_by_match_function(private_ike_sa_manager_t *this,
672 ike_sa_id_t *ike_sa_id, entry_t **entry, u_int *segment,
673 linked_list_match_t match, void *param)
674 {
675 table_item_t *item;
676 u_int row, seg;
677
678 row = ike_sa_id_hash(ike_sa_id) & this->table_mask;
679 seg = row & this->segment_mask;
680
681 lock_single_segment(this, seg);
682 item = this->ike_sa_table[row];
683 while (item)
684 {
685 if (match(item->value, param))
686 {
687 *entry = item->value;
688 *segment = seg;
689 /* the locked segment has to be unlocked by the caller */
690 return SUCCESS;
691 }
692 item = item->next;
693 }
694 unlock_single_segment(this, seg);
695 return NOT_FOUND;
696 }
697
698 /**
699 * Find an entry by ike_sa_id_t.
700 * Note: On SUCCESS, the caller has to unlock the segment.
701 */
702 static status_t get_entry_by_id(private_ike_sa_manager_t *this,
703 ike_sa_id_t *ike_sa_id, entry_t **entry, u_int *segment)
704 {
705 return get_entry_by_match_function(this, ike_sa_id, entry, segment,
706 (linked_list_match_t)entry_match_by_id, ike_sa_id);
707 }
708
709 /**
710 * Find an entry by IKE_SA pointer.
711 * Note: On SUCCESS, the caller has to unlock the segment.
712 */
713 static status_t get_entry_by_sa(private_ike_sa_manager_t *this,
714 ike_sa_id_t *ike_sa_id, ike_sa_t *ike_sa, entry_t **entry, u_int *segment)
715 {
716 return get_entry_by_match_function(this, ike_sa_id, entry, segment,
717 (linked_list_match_t)entry_match_by_sa, ike_sa);
718 }
719
720 /**
721 * Wait until no other thread is using an IKE_SA, return FALSE if entry not
722 * acquirable.
723 */
724 static bool wait_for_entry(private_ike_sa_manager_t *this, entry_t *entry,
725 u_int segment)
726 {
727 if (entry->driveout_new_threads)
728 {
729 /* we are not allowed to get this */
730 return FALSE;
731 }
732 while (entry->checked_out && !entry->driveout_waiting_threads)
733 {
734 /* so wait until we can get it for us.
735 * we register us as waiting. */
736 entry->waiting_threads++;
737 entry->condvar->wait(entry->condvar, this->segments[segment].mutex);
738 entry->waiting_threads--;
739 }
740 /* hm, a deletion request forbids us to get this SA, get next one */
741 if (entry->driveout_waiting_threads)
742 {
743 /* we must signal here, others may be waiting on it, too */
744 entry->condvar->signal(entry->condvar);
745 return FALSE;
746 }
747 return TRUE;
748 }
749
750 /**
751 * Put a half-open SA into the hash table.
752 */
753 static void put_half_open(private_ike_sa_manager_t *this, entry_t *entry)
754 {
755 table_item_t *item;
756 u_int row, segment;
757 rwlock_t *lock;
758 ike_sa_id_t *ike_id;
759 half_open_t *half_open;
760 chunk_t addr;
761
762 ike_id = entry->ike_sa_id;
763 addr = entry->other->get_address(entry->other);
764 row = chunk_hash(addr) & this->table_mask;
765 segment = row & this->segment_mask;
766 lock = this->half_open_segments[segment].lock;
767 lock->write_lock(lock);
768 item = this->half_open_table[row];
769 while (item)
770 {
771 half_open = item->value;
772
773 if (chunk_equals(addr, half_open->other))
774 {
775 break;
776 }
777 item = item->next;
778 }
779
780 if (!item)
781 {
782 INIT(half_open,
783 .other = chunk_clone(addr),
784 );
785 INIT(item,
786 .value = half_open,
787 .next = this->half_open_table[row],
788 );
789 this->half_open_table[row] = item;
790 }
791 half_open->count++;
792 ref_get(&this->half_open_count);
793 if (!ike_id->is_initiator(ike_id))
794 {
795 half_open->count_responder++;
796 ref_get(&this->half_open_count_responder);
797 }
798 this->half_open_segments[segment].count++;
799 lock->unlock(lock);
800 }
801
802 /**
803 * Remove a half-open SA from the hash table.
804 */
805 static void remove_half_open(private_ike_sa_manager_t *this, entry_t *entry)
806 {
807 table_item_t *item, *prev = NULL;
808 u_int row, segment;
809 rwlock_t *lock;
810 ike_sa_id_t *ike_id;
811 chunk_t addr;
812
813 ike_id = entry->ike_sa_id;
814 addr = entry->other->get_address(entry->other);
815 row = chunk_hash(addr) & this->table_mask;
816 segment = row & this->segment_mask;
817 lock = this->half_open_segments[segment].lock;
818 lock->write_lock(lock);
819 item = this->half_open_table[row];
820 while (item)
821 {
822 half_open_t *half_open = item->value;
823
824 if (chunk_equals(addr, half_open->other))
825 {
826 if (!ike_id->is_initiator(ike_id))
827 {
828 half_open->count_responder--;
829 ignore_result(ref_put(&this->half_open_count_responder));
830 }
831 ignore_result(ref_put(&this->half_open_count));
832 if (--half_open->count == 0)
833 {
834 if (prev)
835 {
836 prev->next = item->next;
837 }
838 else
839 {
840 this->half_open_table[row] = item->next;
841 }
842 half_open_destroy(half_open);
843 free(item);
844 }
845 this->half_open_segments[segment].count--;
846 break;
847 }
848 prev = item;
849 item = item->next;
850 }
851 lock->unlock(lock);
852 }
853
854 /**
855 * Put an SA between two peers into the hash table.
856 */
857 static void put_connected_peers(private_ike_sa_manager_t *this, entry_t *entry)
858 {
859 table_item_t *item;
860 u_int row, segment;
861 rwlock_t *lock;
862 connected_peers_t *connected_peers;
863 chunk_t my_id, other_id;
864 int family;
865
866 my_id = entry->my_id->get_encoding(entry->my_id);
867 other_id = entry->other_id->get_encoding(entry->other_id);
868 family = entry->other->get_family(entry->other);
869 row = chunk_hash_inc(other_id, chunk_hash(my_id)) & this->table_mask;
870 segment = row & this->segment_mask;
871 lock = this->connected_peers_segments[segment].lock;
872 lock->write_lock(lock);
873 item = this->connected_peers_table[row];
874 while (item)
875 {
876 connected_peers = item->value;
877
878 if (connected_peers_match(connected_peers, entry->my_id,
879 entry->other_id, family))
880 {
881 if (connected_peers->sas->find_first(connected_peers->sas,
882 (linked_list_match_t)entry->ike_sa_id->equals,
883 NULL, entry->ike_sa_id) == SUCCESS)
884 {
885 lock->unlock(lock);
886 return;
887 }
888 break;
889 }
890 item = item->next;
891 }
892
893 if (!item)
894 {
895 INIT(connected_peers,
896 .my_id = entry->my_id->clone(entry->my_id),
897 .other_id = entry->other_id->clone(entry->other_id),
898 .family = family,
899 .sas = linked_list_create(),
900 );
901 INIT(item,
902 .value = connected_peers,
903 .next = this->connected_peers_table[row],
904 );
905 this->connected_peers_table[row] = item;
906 }
907 connected_peers->sas->insert_last(connected_peers->sas,
908 entry->ike_sa_id->clone(entry->ike_sa_id));
909 this->connected_peers_segments[segment].count++;
910 lock->unlock(lock);
911 }
912
913 /**
914 * Remove an SA between two peers from the hash table.
915 */
916 static void remove_connected_peers(private_ike_sa_manager_t *this, entry_t *entry)
917 {
918 table_item_t *item, *prev = NULL;
919 u_int row, segment;
920 rwlock_t *lock;
921 chunk_t my_id, other_id;
922 int family;
923
924 my_id = entry->my_id->get_encoding(entry->my_id);
925 other_id = entry->other_id->get_encoding(entry->other_id);
926 family = entry->other->get_family(entry->other);
927
928 row = chunk_hash_inc(other_id, chunk_hash(my_id)) & this->table_mask;
929 segment = row & this->segment_mask;
930
931 lock = this->connected_peers_segments[segment].lock;
932 lock->write_lock(lock);
933 item = this->connected_peers_table[row];
934 while (item)
935 {
936 connected_peers_t *current = item->value;
937
938 if (connected_peers_match(current, entry->my_id, entry->other_id,
939 family))
940 {
941 enumerator_t *enumerator;
942 ike_sa_id_t *ike_sa_id;
943
944 enumerator = current->sas->create_enumerator(current->sas);
945 while (enumerator->enumerate(enumerator, &ike_sa_id))
946 {
947 if (ike_sa_id->equals(ike_sa_id, entry->ike_sa_id))
948 {
949 current->sas->remove_at(current->sas, enumerator);
950 ike_sa_id->destroy(ike_sa_id);
951 this->connected_peers_segments[segment].count--;
952 break;
953 }
954 }
955 enumerator->destroy(enumerator);
956 if (current->sas->get_count(current->sas) == 0)
957 {
958 if (prev)
959 {
960 prev->next = item->next;
961 }
962 else
963 {
964 this->connected_peers_table[row] = item->next;
965 }
966 connected_peers_destroy(current);
967 free(item);
968 }
969 break;
970 }
971 prev = item;
972 item = item->next;
973 }
974 lock->unlock(lock);
975 }
976
977 /**
978 * Get a random SPI for new IKE_SAs
979 */
980 static u_int64_t get_spi(private_ike_sa_manager_t *this)
981 {
982 u_int64_t spi;
983
984 this->spi_lock->read_lock(this->spi_lock);
985 if (this->spi_cb.cb)
986 {
987 spi = this->spi_cb.cb(this->spi_cb.data);
988 }
989 else if (!this->rng ||
990 !this->rng->get_bytes(this->rng, sizeof(spi), (u_int8_t*)&spi))
991 {
992 spi = 0;
993 }
994 this->spi_lock->unlock(this->spi_lock);
995 return spi;
996 }
997
998 /**
999 * Calculate the hash of the initial IKE message. Memory for the hash is
1000 * allocated on success.
1001 *
1002 * @returns TRUE on success
1003 */
1004 static bool get_init_hash(hasher_t *hasher, message_t *message, chunk_t *hash)
1005 {
1006 host_t *src;
1007
1008 if (message->get_first_payload_type(message) == PLV1_FRAGMENT)
1009 { /* only hash the source IP, port and SPI for fragmented init messages */
1010 u_int16_t port;
1011 u_int64_t spi;
1012
1013 src = message->get_source(message);
1014 if (!hasher->allocate_hash(hasher, src->get_address(src), NULL))
1015 {
1016 return FALSE;
1017 }
1018 port = src->get_port(src);
1019 if (!hasher->allocate_hash(hasher, chunk_from_thing(port), NULL))
1020 {
1021 return FALSE;
1022 }
1023 spi = message->get_initiator_spi(message);
1024 return hasher->allocate_hash(hasher, chunk_from_thing(spi), hash);
1025 }
1026 if (message->get_exchange_type(message) == ID_PROT)
1027 { /* include the source for Main Mode as the hash will be the same if
1028 * SPIs are reused by two initiators that use the same proposal */
1029 src = message->get_source(message);
1030
1031 if (!hasher->allocate_hash(hasher, src->get_address(src), NULL))
1032 {
1033 return FALSE;
1034 }
1035 }
1036 return hasher->allocate_hash(hasher, message->get_packet_data(message), hash);
1037 }
1038
1039 /**
1040 * Check if we already have created an IKE_SA based on the initial IKE message
1041 * with the given hash.
1042 * If not the hash is stored, the hash data is not(!) cloned.
1043 *
1044 * Also, the local SPI is returned. In case of a retransmit this is already
1045 * stored together with the hash, otherwise it is newly allocated and should
1046 * be used to create the IKE_SA.
1047 *
1048 * @returns ALREADY_DONE if the message with the given hash has been seen before
1049 * NOT_FOUND if the message hash was not found
1050 * FAILED if the SPI allocation failed
1051 */
1052 static status_t check_and_put_init_hash(private_ike_sa_manager_t *this,
1053 chunk_t init_hash, u_int64_t *our_spi)
1054 {
1055 table_item_t *item;
1056 u_int row, segment;
1057 mutex_t *mutex;
1058 init_hash_t *init;
1059 u_int64_t spi;
1060
1061 row = chunk_hash(init_hash) & this->table_mask;
1062 segment = row & this->segment_mask;
1063 mutex = this->init_hashes_segments[segment].mutex;
1064 mutex->lock(mutex);
1065 item = this->init_hashes_table[row];
1066 while (item)
1067 {
1068 init_hash_t *current = item->value;
1069
1070 if (chunk_equals(init_hash, current->hash))
1071 {
1072 *our_spi = current->our_spi;
1073 mutex->unlock(mutex);
1074 return ALREADY_DONE;
1075 }
1076 item = item->next;
1077 }
1078
1079 spi = get_spi(this);
1080 if (!spi)
1081 {
1082 return FAILED;
1083 }
1084
1085 INIT(init,
1086 .hash = {
1087 .len = init_hash.len,
1088 .ptr = init_hash.ptr,
1089 },
1090 .our_spi = spi,
1091 );
1092 INIT(item,
1093 .value = init,
1094 .next = this->init_hashes_table[row],
1095 );
1096 this->init_hashes_table[row] = item;
1097 *our_spi = init->our_spi;
1098 mutex->unlock(mutex);
1099 return NOT_FOUND;
1100 }
1101
1102 /**
1103 * Remove the hash of an initial IKE message from the cache.
1104 */
1105 static void remove_init_hash(private_ike_sa_manager_t *this, chunk_t init_hash)
1106 {
1107 table_item_t *item, *prev = NULL;
1108 u_int row, segment;
1109 mutex_t *mutex;
1110
1111 row = chunk_hash(init_hash) & this->table_mask;
1112 segment = row & this->segment_mask;
1113 mutex = this->init_hashes_segments[segment].mutex;
1114 mutex->lock(mutex);
1115 item = this->init_hashes_table[row];
1116 while (item)
1117 {
1118 init_hash_t *current = item->value;
1119
1120 if (chunk_equals(init_hash, current->hash))
1121 {
1122 if (prev)
1123 {
1124 prev->next = item->next;
1125 }
1126 else
1127 {
1128 this->init_hashes_table[row] = item->next;
1129 }
1130 free(current);
1131 free(item);
1132 break;
1133 }
1134 prev = item;
1135 item = item->next;
1136 }
1137 mutex->unlock(mutex);
1138 }
1139
1140 METHOD(ike_sa_manager_t, checkout, ike_sa_t*,
1141 private_ike_sa_manager_t *this, ike_sa_id_t *ike_sa_id)
1142 {
1143 ike_sa_t *ike_sa = NULL;
1144 entry_t *entry;
1145 u_int segment;
1146
1147 DBG2(DBG_MGR, "checkout %N SA with SPIs %.16"PRIx64"_i %.16"PRIx64"_r",
1148 ike_version_names, ike_sa_id->get_ike_version(ike_sa_id),
1149 be64toh(ike_sa_id->get_initiator_spi(ike_sa_id)),
1150 be64toh(ike_sa_id->get_responder_spi(ike_sa_id)));
1151
1152 if (get_entry_by_id(this, ike_sa_id, &entry, &segment) == SUCCESS)
1153 {
1154 if (wait_for_entry(this, entry, segment))
1155 {
1156 entry->checked_out = thread_current();
1157 ike_sa = entry->ike_sa;
1158 DBG2(DBG_MGR, "IKE_SA %s[%u] successfully checked out",
1159 ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1160 }
1161 unlock_single_segment(this, segment);
1162 }
1163 charon->bus->set_sa(charon->bus, ike_sa);
1164
1165 if (!ike_sa)
1166 {
1167 DBG2(DBG_MGR, "IKE_SA checkout not successful");
1168 }
1169 return ike_sa;
1170 }
1171
1172 METHOD(ike_sa_manager_t, checkout_new, ike_sa_t*,
1173 private_ike_sa_manager_t* this, ike_version_t version, bool initiator)
1174 {
1175 ike_sa_id_t *ike_sa_id;
1176 ike_sa_t *ike_sa;
1177 u_int8_t ike_version;
1178 u_int64_t spi;
1179
1180 ike_version = version == IKEV1 ? IKEV1_MAJOR_VERSION : IKEV2_MAJOR_VERSION;
1181
1182 spi = get_spi(this);
1183 if (!spi)
1184 {
1185 DBG1(DBG_MGR, "failed to allocate SPI for new IKE_SA");
1186 return NULL;
1187 }
1188
1189 if (initiator)
1190 {
1191 ike_sa_id = ike_sa_id_create(ike_version, spi, 0, TRUE);
1192 }
1193 else
1194 {
1195 ike_sa_id = ike_sa_id_create(ike_version, 0, spi, FALSE);
1196 }
1197 ike_sa = ike_sa_create(ike_sa_id, initiator, version);
1198 ike_sa_id->destroy(ike_sa_id);
1199
1200 if (ike_sa)
1201 {
1202 DBG2(DBG_MGR, "created IKE_SA %s[%u]", ike_sa->get_name(ike_sa),
1203 ike_sa->get_unique_id(ike_sa));
1204 }
1205 return ike_sa;
1206 }
1207
1208 /**
1209 * Get the message ID or message hash to detect early retransmissions
1210 */
1211 static u_int32_t get_message_id_or_hash(message_t *message)
1212 {
1213 if (message->get_major_version(message) == IKEV1_MAJOR_VERSION)
1214 {
1215 /* Use a hash for IKEv1 Phase 1, where we don't have a MID, and Quick
1216 * Mode, where all three messages use the same message ID */
1217 if (message->get_message_id(message) == 0 ||
1218 message->get_exchange_type(message) == QUICK_MODE)
1219 {
1220 return chunk_hash(message->get_packet_data(message));
1221 }
1222 }
1223 return message->get_message_id(message);
1224 }
1225
1226 METHOD(ike_sa_manager_t, checkout_by_message, ike_sa_t*,
1227 private_ike_sa_manager_t* this, message_t *message)
1228 {
1229 u_int segment;
1230 entry_t *entry;
1231 ike_sa_t *ike_sa = NULL;
1232 ike_sa_id_t *id;
1233 ike_version_t ike_version;
1234 bool is_init = FALSE;
1235
1236 id = message->get_ike_sa_id(message);
1237 /* clone the IKE_SA ID so we can modify the initiator flag */
1238 id = id->clone(id);
1239 id->switch_initiator(id);
1240
1241 DBG2(DBG_MGR, "checkout %N SA by message with SPIs %.16"PRIx64"_i "
1242 "%.16"PRIx64"_r", ike_version_names, id->get_ike_version(id),
1243 be64toh(id->get_initiator_spi(id)),
1244 be64toh(id->get_responder_spi(id)));
1245
1246 if (id->get_responder_spi(id) == 0 &&
1247 message->get_message_id(message) == 0)
1248 {
1249 if (message->get_major_version(message) == IKEV2_MAJOR_VERSION)
1250 {
1251 if (message->get_exchange_type(message) == IKE_SA_INIT &&
1252 message->get_request(message))
1253 {
1254 ike_version = IKEV2;
1255 is_init = TRUE;
1256 }
1257 }
1258 else
1259 {
1260 if (message->get_exchange_type(message) == ID_PROT ||
1261 message->get_exchange_type(message) == AGGRESSIVE)
1262 {
1263 ike_version = IKEV1;
1264 is_init = TRUE;
1265 if (id->is_initiator(id))
1266 { /* not set in IKEv1, switch back before applying to new SA */
1267 id->switch_initiator(id);
1268 }
1269 }
1270 }
1271 }
1272
1273 if (is_init)
1274 {
1275 hasher_t *hasher;
1276 u_int64_t our_spi;
1277 chunk_t hash;
1278
1279 hasher = lib->crypto->create_hasher(lib->crypto, HASH_SHA1);
1280 if (!hasher || !get_init_hash(hasher, message, &hash))
1281 {
1282 DBG1(DBG_MGR, "ignoring message, failed to hash message");
1283 DESTROY_IF(hasher);
1284 id->destroy(id);
1285 goto out;
1286 }
1287 hasher->destroy(hasher);
1288
1289 /* ensure this is not a retransmit of an already handled init message */
1290 switch (check_and_put_init_hash(this, hash, &our_spi))
1291 {
1292 case NOT_FOUND:
1293 { /* we've not seen this packet yet, create a new IKE_SA */
1294 if (!this->ikesa_limit ||
1295 this->public.get_count(&this->public) < this->ikesa_limit)
1296 {
1297 id->set_responder_spi(id, our_spi);
1298 ike_sa = ike_sa_create(id, FALSE, ike_version);
1299 if (ike_sa)
1300 {
1301 entry = entry_create();
1302 entry->ike_sa = ike_sa;
1303 entry->ike_sa_id = id;
1304 entry->processing = get_message_id_or_hash(message);
1305 entry->init_hash = hash;
1306
1307 segment = put_entry(this, entry);
1308 entry->checked_out = thread_current();
1309 unlock_single_segment(this, segment);
1310
1311 DBG2(DBG_MGR, "created IKE_SA %s[%u]",
1312 ike_sa->get_name(ike_sa),
1313 ike_sa->get_unique_id(ike_sa));
1314 goto out;
1315 }
1316 else
1317 {
1318 DBG1(DBG_MGR, "creating IKE_SA failed, ignoring message");
1319 }
1320 }
1321 else
1322 {
1323 DBG1(DBG_MGR, "ignoring %N, hitting IKE_SA limit (%u)",
1324 exchange_type_names, message->get_exchange_type(message),
1325 this->ikesa_limit);
1326 }
1327 remove_init_hash(this, hash);
1328 chunk_free(&hash);
1329 id->destroy(id);
1330 goto out;
1331 }
1332 case FAILED:
1333 { /* we failed to allocate an SPI */
1334 chunk_free(&hash);
1335 id->destroy(id);
1336 DBG1(DBG_MGR, "ignoring message, failed to allocate SPI");
1337 goto out;
1338 }
1339 case ALREADY_DONE:
1340 default:
1341 break;
1342 }
1343 /* it looks like we already handled this init message to some degree */
1344 id->set_responder_spi(id, our_spi);
1345 chunk_free(&hash);
1346 }
1347
1348 if (get_entry_by_id(this, id, &entry, &segment) == SUCCESS)
1349 {
1350 /* only check out if we are not already processing it. */
1351 if (entry->processing == get_message_id_or_hash(message))
1352 {
1353 DBG1(DBG_MGR, "ignoring request with ID %u, already processing",
1354 entry->processing);
1355 }
1356 else if (wait_for_entry(this, entry, segment))
1357 {
1358 ike_sa_id_t *ike_id;
1359
1360 ike_id = entry->ike_sa->get_id(entry->ike_sa);
1361 entry->checked_out = thread_current();
1362 if (message->get_first_payload_type(message) != PLV1_FRAGMENT &&
1363 message->get_first_payload_type(message) != PLV2_FRAGMENT)
1364 { /* TODO-FRAG: this fails if there are unencrypted payloads */
1365 entry->processing = get_message_id_or_hash(message);
1366 }
1367 if (ike_id->get_responder_spi(ike_id) == 0)
1368 {
1369 ike_id->set_responder_spi(ike_id, id->get_responder_spi(id));
1370 }
1371 ike_sa = entry->ike_sa;
1372 DBG2(DBG_MGR, "IKE_SA %s[%u] successfully checked out",
1373 ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1374 }
1375 unlock_single_segment(this, segment);
1376 }
1377 else
1378 {
1379 charon->bus->alert(charon->bus, ALERT_INVALID_IKE_SPI, message);
1380 }
1381 id->destroy(id);
1382
1383 out:
1384 charon->bus->set_sa(charon->bus, ike_sa);
1385 if (!ike_sa)
1386 {
1387 DBG2(DBG_MGR, "IKE_SA checkout not successful");
1388 }
1389 return ike_sa;
1390 }
1391
1392 METHOD(ike_sa_manager_t, checkout_by_config, ike_sa_t*,
1393 private_ike_sa_manager_t *this, peer_cfg_t *peer_cfg)
1394 {
1395 enumerator_t *enumerator;
1396 entry_t *entry;
1397 ike_sa_t *ike_sa = NULL;
1398 peer_cfg_t *current_peer;
1399 ike_cfg_t *current_ike;
1400 u_int segment;
1401
1402 DBG2(DBG_MGR, "checkout IKE_SA by config");
1403
1404 if (!this->reuse_ikesa && peer_cfg->get_ike_version(peer_cfg) != IKEV1)
1405 { /* IKE_SA reuse disabled by config (not possible for IKEv1) */
1406 ike_sa = checkout_new(this, peer_cfg->get_ike_version(peer_cfg), TRUE);
1407 charon->bus->set_sa(charon->bus, ike_sa);
1408 goto out;
1409 }
1410
1411 enumerator = create_table_enumerator(this);
1412 while (enumerator->enumerate(enumerator, &entry, &segment))
1413 {
1414 if (!wait_for_entry(this, entry, segment))
1415 {
1416 continue;
1417 }
1418 if (entry->ike_sa->get_state(entry->ike_sa) == IKE_DELETING)
1419 { /* skip IKE_SAs which are not usable, wake other waiting threads */
1420 entry->condvar->signal(entry->condvar);
1421 continue;
1422 }
1423
1424 current_peer = entry->ike_sa->get_peer_cfg(entry->ike_sa);
1425 if (current_peer && current_peer->equals(current_peer, peer_cfg))
1426 {
1427 current_ike = current_peer->get_ike_cfg(current_peer);
1428 if (current_ike->equals(current_ike, peer_cfg->get_ike_cfg(peer_cfg)))
1429 {
1430 entry->checked_out = thread_current();
1431 ike_sa = entry->ike_sa;
1432 DBG2(DBG_MGR, "found existing IKE_SA %u with a '%s' config",
1433 ike_sa->get_unique_id(ike_sa),
1434 current_peer->get_name(current_peer));
1435 break;
1436 }
1437 }
1438 /* other threads might be waiting for this entry */
1439 entry->condvar->signal(entry->condvar);
1440 }
1441 enumerator->destroy(enumerator);
1442
1443 if (!ike_sa)
1444 { /* no IKE_SA using such a config, hand out a new */
1445 ike_sa = checkout_new(this, peer_cfg->get_ike_version(peer_cfg), TRUE);
1446 }
1447 charon->bus->set_sa(charon->bus, ike_sa);
1448
1449 out:
1450 if (!ike_sa)
1451 {
1452 DBG2(DBG_MGR, "IKE_SA checkout not successful");
1453 }
1454 return ike_sa;
1455 }
1456
1457 METHOD(ike_sa_manager_t, checkout_by_id, ike_sa_t*,
1458 private_ike_sa_manager_t *this, u_int32_t id)
1459 {
1460 enumerator_t *enumerator;
1461 entry_t *entry;
1462 ike_sa_t *ike_sa = NULL;
1463 u_int segment;
1464
1465 DBG2(DBG_MGR, "checkout IKE_SA by unique ID %u", id);
1466
1467 enumerator = create_table_enumerator(this);
1468 while (enumerator->enumerate(enumerator, &entry, &segment))
1469 {
1470 if (wait_for_entry(this, entry, segment))
1471 {
1472 if (entry->ike_sa->get_unique_id(entry->ike_sa) == id)
1473 {
1474 ike_sa = entry->ike_sa;
1475 entry->checked_out = thread_current();
1476 break;
1477 }
1478 /* other threads might be waiting for this entry */
1479 entry->condvar->signal(entry->condvar);
1480 }
1481 }
1482 enumerator->destroy(enumerator);
1483
1484 if (ike_sa)
1485 {
1486 DBG2(DBG_MGR, "IKE_SA %s[%u] successfully checked out",
1487 ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1488 }
1489 else
1490 {
1491 DBG2(DBG_MGR, "IKE_SA checkout not successful");
1492 }
1493 charon->bus->set_sa(charon->bus, ike_sa);
1494 return ike_sa;
1495 }
1496
1497 METHOD(ike_sa_manager_t, checkout_by_name, ike_sa_t*,
1498 private_ike_sa_manager_t *this, char *name, bool child)
1499 {
1500 enumerator_t *enumerator, *children;
1501 entry_t *entry;
1502 ike_sa_t *ike_sa = NULL;
1503 child_sa_t *child_sa;
1504 u_int segment;
1505
1506 DBG2(DBG_MGR, "checkout IKE_SA by%s name '%s'", child ? " child" : "", name);
1507
1508 enumerator = create_table_enumerator(this);
1509 while (enumerator->enumerate(enumerator, &entry, &segment))
1510 {
1511 if (wait_for_entry(this, entry, segment))
1512 {
1513 /* look for a child with such a policy name ... */
1514 if (child)
1515 {
1516 children = entry->ike_sa->create_child_sa_enumerator(entry->ike_sa);
1517 while (children->enumerate(children, (void**)&child_sa))
1518 {
1519 if (streq(child_sa->get_name(child_sa), name))
1520 {
1521 ike_sa = entry->ike_sa;
1522 break;
1523 }
1524 }
1525 children->destroy(children);
1526 }
1527 else /* ... or for a IKE_SA with such a connection name */
1528 {
1529 if (streq(entry->ike_sa->get_name(entry->ike_sa), name))
1530 {
1531 ike_sa = entry->ike_sa;
1532 }
1533 }
1534 /* got one, return */
1535 if (ike_sa)
1536 {
1537 entry->checked_out = thread_current();
1538 DBG2(DBG_MGR, "IKE_SA %s[%u] successfully checked out",
1539 ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1540 break;
1541 }
1542 /* other threads might be waiting for this entry */
1543 entry->condvar->signal(entry->condvar);
1544 }
1545 }
1546 enumerator->destroy(enumerator);
1547
1548 charon->bus->set_sa(charon->bus, ike_sa);
1549
1550 if (!ike_sa)
1551 {
1552 DBG2(DBG_MGR, "IKE_SA checkout not successful");
1553 }
1554 return ike_sa;
1555 }
1556
1557 /**
1558 * enumerator filter function, waiting variant
1559 */
1560 static bool enumerator_filter_wait(private_ike_sa_manager_t *this,
1561 entry_t **in, ike_sa_t **out, u_int *segment)
1562 {
1563 if (wait_for_entry(this, *in, *segment))
1564 {
1565 *out = (*in)->ike_sa;
1566 charon->bus->set_sa(charon->bus, *out);
1567 return TRUE;
1568 }
1569 return FALSE;
1570 }
1571
1572 /**
1573 * enumerator filter function, skipping variant
1574 */
1575 static bool enumerator_filter_skip(private_ike_sa_manager_t *this,
1576 entry_t **in, ike_sa_t **out, u_int *segment)
1577 {
1578 if (!(*in)->driveout_new_threads &&
1579 !(*in)->driveout_waiting_threads &&
1580 !(*in)->checked_out)
1581 {
1582 *out = (*in)->ike_sa;
1583 charon->bus->set_sa(charon->bus, *out);
1584 return TRUE;
1585 }
1586 return FALSE;
1587 }
1588
1589 /**
1590 * Reset threads SA after enumeration
1591 */
1592 static void reset_sa(void *data)
1593 {
1594 charon->bus->set_sa(charon->bus, NULL);
1595 }
1596
1597 METHOD(ike_sa_manager_t, create_enumerator, enumerator_t*,
1598 private_ike_sa_manager_t* this, bool wait)
1599 {
1600 return enumerator_create_filter(create_table_enumerator(this),
1601 wait ? (void*)enumerator_filter_wait : (void*)enumerator_filter_skip,
1602 this, reset_sa);
1603 }
1604
1605 METHOD(ike_sa_manager_t, checkin, void,
1606 private_ike_sa_manager_t *this, ike_sa_t *ike_sa)
1607 {
1608 /* to check the SA back in, we look for the pointer of the ike_sa
1609 * in all entries.
1610 * The lookup is done by initiator SPI, so even if the SPI has changed (e.g.
1611 * on reception of a IKE_SA_INIT response) the lookup will work but
1612 * updating of the SPI MAY be necessary...
1613 */
1614 entry_t *entry;
1615 ike_sa_id_t *ike_sa_id;
1616 host_t *other;
1617 identification_t *my_id, *other_id;
1618 u_int segment;
1619
1620 ike_sa_id = ike_sa->get_id(ike_sa);
1621 my_id = ike_sa->get_my_id(ike_sa);
1622 other_id = ike_sa->get_other_eap_id(ike_sa);
1623 other = ike_sa->get_other_host(ike_sa);
1624
1625 DBG2(DBG_MGR, "checkin IKE_SA %s[%u]", ike_sa->get_name(ike_sa),
1626 ike_sa->get_unique_id(ike_sa));
1627
1628 /* look for the entry */
1629 if (get_entry_by_sa(this, ike_sa_id, ike_sa, &entry, &segment) == SUCCESS)
1630 {
1631 /* ike_sa_id must be updated */
1632 entry->ike_sa_id->replace_values(entry->ike_sa_id, ike_sa->get_id(ike_sa));
1633 /* signal waiting threads */
1634 entry->checked_out = NULL;
1635 entry->processing = -1;
1636 /* check if this SA is half-open */
1637 if (entry->half_open && ike_sa->get_state(ike_sa) != IKE_CONNECTING)
1638 {
1639 /* not half open anymore */
1640 entry->half_open = FALSE;
1641 remove_half_open(this, entry);
1642 }
1643 else if (entry->half_open && !other->ip_equals(other, entry->other))
1644 {
1645 /* the other host's IP has changed, we must update the hash table */
1646 remove_half_open(this, entry);
1647 DESTROY_IF(entry->other);
1648 entry->other = other->clone(other);
1649 put_half_open(this, entry);
1650 }
1651 else if (!entry->half_open &&
1652 ike_sa->get_state(ike_sa) == IKE_CONNECTING)
1653 {
1654 /* this is a new half-open SA */
1655 entry->half_open = TRUE;
1656 entry->other = other->clone(other);
1657 put_half_open(this, entry);
1658 }
1659 entry->condvar->signal(entry->condvar);
1660 }
1661 else
1662 {
1663 entry = entry_create();
1664 entry->ike_sa_id = ike_sa_id->clone(ike_sa_id);
1665 entry->ike_sa = ike_sa;
1666 if (ike_sa->get_state(ike_sa) == IKE_CONNECTING)
1667 {
1668 entry->half_open = TRUE;
1669 entry->other = other->clone(other);
1670 put_half_open(this, entry);
1671 }
1672 segment = put_entry(this, entry);
1673 }
1674 DBG2(DBG_MGR, "checkin of IKE_SA successful");
1675
1676 /* apply identities for duplicate test */
1677 if ((ike_sa->get_state(ike_sa) == IKE_ESTABLISHED ||
1678 ike_sa->get_state(ike_sa) == IKE_PASSIVE) &&
1679 entry->my_id == NULL && entry->other_id == NULL)
1680 {
1681 if (ike_sa->get_version(ike_sa) == IKEV1)
1682 {
1683 /* If authenticated and received INITIAL_CONTACT,
1684 * delete any existing IKE_SAs with that peer. */
1685 if (ike_sa->has_condition(ike_sa, COND_INIT_CONTACT_SEEN))
1686 {
1687 /* We can't hold the segment locked while checking the
1688 * uniqueness as this could lead to deadlocks. We mark the
1689 * entry as checked out while we release the lock so no other
1690 * thread can acquire it. Since it is not yet in the list of
1691 * connected peers that will not cause a deadlock as no other
1692 * caller of check_unqiueness() will try to check out this SA */
1693 entry->checked_out = thread_current();
1694 unlock_single_segment(this, segment);
1695
1696 this->public.check_uniqueness(&this->public, ike_sa, TRUE);
1697 ike_sa->set_condition(ike_sa, COND_INIT_CONTACT_SEEN, FALSE);
1698
1699 /* The entry could have been modified in the mean time, e.g.
1700 * because another SA was added/removed next to it or another
1701 * thread is waiting, but it should still exist, so there is no
1702 * need for a lookup via get_entry_by... */
1703 lock_single_segment(this, segment);
1704 entry->checked_out = NULL;
1705 /* We already signaled waiting threads above, we have to do that
1706 * again after checking the SA out and back in again. */
1707 entry->condvar->signal(entry->condvar);
1708 }
1709 }
1710
1711 entry->my_id = my_id->clone(my_id);
1712 entry->other_id = other_id->clone(other_id);
1713 if (!entry->other)
1714 {
1715 entry->other = other->clone(other);
1716 }
1717 put_connected_peers(this, entry);
1718 }
1719
1720 unlock_single_segment(this, segment);
1721
1722 charon->bus->set_sa(charon->bus, NULL);
1723 }
1724
1725 METHOD(ike_sa_manager_t, checkin_and_destroy, void,
1726 private_ike_sa_manager_t *this, ike_sa_t *ike_sa)
1727 {
1728 /* deletion is a bit complex, we must ensure that no thread is waiting for
1729 * this SA.
1730 * We take this SA from the table, and start signaling while threads
1731 * are in the condvar.
1732 */
1733 entry_t *entry;
1734 ike_sa_id_t *ike_sa_id;
1735 u_int segment;
1736
1737 ike_sa_id = ike_sa->get_id(ike_sa);
1738
1739 DBG2(DBG_MGR, "checkin and destroy IKE_SA %s[%u]", ike_sa->get_name(ike_sa),
1740 ike_sa->get_unique_id(ike_sa));
1741
1742 if (get_entry_by_sa(this, ike_sa_id, ike_sa, &entry, &segment) == SUCCESS)
1743 {
1744 if (entry->driveout_waiting_threads && entry->driveout_new_threads)
1745 { /* it looks like flush() has been called and the SA is being deleted
1746 * anyway, just check it in */
1747 DBG2(DBG_MGR, "ignored checkin and destroy of IKE_SA during shutdown");
1748 entry->checked_out = NULL;
1749 entry->condvar->broadcast(entry->condvar);
1750 unlock_single_segment(this, segment);
1751 return;
1752 }
1753
1754 /* drive out waiting threads, as we are in hurry */
1755 entry->driveout_waiting_threads = TRUE;
1756 /* mark it, so no new threads can get this entry */
1757 entry->driveout_new_threads = TRUE;
1758 /* wait until all workers have done their work */
1759 while (entry->waiting_threads)
1760 {
1761 /* wake up all */
1762 entry->condvar->broadcast(entry->condvar);
1763 /* they will wake us again when their work is done */
1764 entry->condvar->wait(entry->condvar, this->segments[segment].mutex);
1765 }
1766 remove_entry(this, entry);
1767 unlock_single_segment(this, segment);
1768
1769 if (entry->half_open)
1770 {
1771 remove_half_open(this, entry);
1772 }
1773 if (entry->my_id && entry->other_id)
1774 {
1775 remove_connected_peers(this, entry);
1776 }
1777 if (entry->init_hash.ptr)
1778 {
1779 remove_init_hash(this, entry->init_hash);
1780 }
1781
1782 entry_destroy(entry);
1783
1784 DBG2(DBG_MGR, "checkin and destroy of IKE_SA successful");
1785 }
1786 else
1787 {
1788 DBG1(DBG_MGR, "tried to checkin and delete nonexisting IKE_SA");
1789 ike_sa->destroy(ike_sa);
1790 }
1791 charon->bus->set_sa(charon->bus, NULL);
1792 }
1793
1794 /**
1795 * Cleanup function for create_id_enumerator
1796 */
1797 static void id_enumerator_cleanup(linked_list_t *ids)
1798 {
1799 ids->destroy_offset(ids, offsetof(ike_sa_id_t, destroy));
1800 }
1801
1802 METHOD(ike_sa_manager_t, create_id_enumerator, enumerator_t*,
1803 private_ike_sa_manager_t *this, identification_t *me,
1804 identification_t *other, int family)
1805 {
1806 table_item_t *item;
1807 u_int row, segment;
1808 rwlock_t *lock;
1809 linked_list_t *ids = NULL;
1810
1811 row = chunk_hash_inc(other->get_encoding(other),
1812 chunk_hash(me->get_encoding(me))) & this->table_mask;
1813 segment = row & this->segment_mask;
1814
1815 lock = this->connected_peers_segments[segment].lock;
1816 lock->read_lock(lock);
1817 item = this->connected_peers_table[row];
1818 while (item)
1819 {
1820 connected_peers_t *current = item->value;
1821
1822 if (connected_peers_match(current, me, other, family))
1823 {
1824 ids = current->sas->clone_offset(current->sas,
1825 offsetof(ike_sa_id_t, clone));
1826 break;
1827 }
1828 item = item->next;
1829 }
1830 lock->unlock(lock);
1831
1832 if (!ids)
1833 {
1834 return enumerator_create_empty();
1835 }
1836 return enumerator_create_cleaner(ids->create_enumerator(ids),
1837 (void*)id_enumerator_cleanup, ids);
1838 }
1839
1840 /**
1841 * Move all CHILD_SAs and virtual IPs from old to new
1842 */
1843 static void adopt_children_and_vips(ike_sa_t *old, ike_sa_t *new)
1844 {
1845 enumerator_t *enumerator;
1846 child_sa_t *child_sa;
1847 host_t *vip;
1848 int chcount = 0, vipcount = 0;
1849
1850 charon->bus->children_migrate(charon->bus, new->get_id(new),
1851 new->get_unique_id(new));
1852 enumerator = old->create_child_sa_enumerator(old);
1853 while (enumerator->enumerate(enumerator, &child_sa))
1854 {
1855 old->remove_child_sa(old, enumerator);
1856 new->add_child_sa(new, child_sa);
1857 chcount++;
1858 }
1859 enumerator->destroy(enumerator);
1860
1861 enumerator = old->create_virtual_ip_enumerator(old, FALSE);
1862 while (enumerator->enumerate(enumerator, &vip))
1863 {
1864 new->add_virtual_ip(new, FALSE, vip);
1865 vipcount++;
1866 }
1867 enumerator->destroy(enumerator);
1868 /* this does not release the addresses, which is good, but it does trigger
1869 * an assign_vips(FALSE) event... */
1870 old->clear_virtual_ips(old, FALSE);
1871 /* ...trigger the analogous event on the new SA */
1872 charon->bus->set_sa(charon->bus, new);
1873 charon->bus->assign_vips(charon->bus, new, TRUE);
1874 charon->bus->children_migrate(charon->bus, NULL, 0);
1875 charon->bus->set_sa(charon->bus, old);
1876
1877 if (chcount || vipcount)
1878 {
1879 DBG1(DBG_IKE, "detected reauth of existing IKE_SA, adopting %d "
1880 "children and %d virtual IPs", chcount, vipcount);
1881 }
1882 }
1883
1884 /**
1885 * Delete an existing IKE_SA due to a unique replace policy
1886 */
1887 static status_t enforce_replace(private_ike_sa_manager_t *this,
1888 ike_sa_t *duplicate, ike_sa_t *new,
1889 identification_t *other, host_t *host)
1890 {
1891 charon->bus->alert(charon->bus, ALERT_UNIQUE_REPLACE);
1892
1893 if (host->equals(host, duplicate->get_other_host(duplicate)))
1894 {
1895 /* looks like a reauthentication attempt */
1896 if (!new->has_condition(new, COND_INIT_CONTACT_SEEN) &&
1897 new->get_version(new) == IKEV1)
1898 {
1899 /* IKEv1 implicitly takes over children, IKEv2 recreates them
1900 * explicitly. */
1901 adopt_children_and_vips(duplicate, new);
1902 }
1903 /* For IKEv1 we have to delay the delete for the old IKE_SA. Some
1904 * peers need to complete the new SA first, otherwise the quick modes
1905 * might get lost. For IKEv2 we do the same, as we want overlapping
1906 * CHILD_SAs to keep connectivity up. */
1907 lib->scheduler->schedule_job(lib->scheduler, (job_t*)
1908 delete_ike_sa_job_create(duplicate->get_id(duplicate), TRUE), 10);
1909 return SUCCESS;
1910 }
1911 DBG1(DBG_IKE, "deleting duplicate IKE_SA for peer '%Y' due to "
1912 "uniqueness policy", other);
1913 return duplicate->delete(duplicate);
1914 }
1915
1916 METHOD(ike_sa_manager_t, check_uniqueness, bool,
1917 private_ike_sa_manager_t *this, ike_sa_t *ike_sa, bool force_replace)
1918 {
1919 bool cancel = FALSE;
1920 peer_cfg_t *peer_cfg;
1921 unique_policy_t policy;
1922 enumerator_t *enumerator;
1923 ike_sa_id_t *id = NULL;
1924 identification_t *me, *other;
1925 host_t *other_host;
1926
1927 peer_cfg = ike_sa->get_peer_cfg(ike_sa);
1928 policy = peer_cfg->get_unique_policy(peer_cfg);
1929 if (policy == UNIQUE_NEVER || (policy == UNIQUE_NO && !force_replace))
1930 {
1931 return FALSE;
1932 }
1933 me = ike_sa->get_my_id(ike_sa);
1934 other = ike_sa->get_other_eap_id(ike_sa);
1935 other_host = ike_sa->get_other_host(ike_sa);
1936
1937 enumerator = create_id_enumerator(this, me, other,
1938 other_host->get_family(other_host));
1939 while (enumerator->enumerate(enumerator, &id))
1940 {
1941 status_t status = SUCCESS;
1942 ike_sa_t *duplicate;
1943
1944 duplicate = checkout(this, id);
1945 if (!duplicate)
1946 {
1947 continue;
1948 }
1949 if (force_replace)
1950 {
1951 DBG1(DBG_IKE, "destroying duplicate IKE_SA for peer '%Y', "
1952 "received INITIAL_CONTACT", other);
1953 charon->bus->ike_updown(charon->bus, duplicate, FALSE);
1954 checkin_and_destroy(this, duplicate);
1955 continue;
1956 }
1957 peer_cfg = duplicate->get_peer_cfg(duplicate);
1958 if (peer_cfg && peer_cfg->equals(peer_cfg, ike_sa->get_peer_cfg(ike_sa)))
1959 {
1960 switch (duplicate->get_state(duplicate))
1961 {
1962 case IKE_ESTABLISHED:
1963 case IKE_REKEYING:
1964 switch (policy)
1965 {
1966 case UNIQUE_REPLACE:
1967 status = enforce_replace(this, duplicate, ike_sa,
1968 other, other_host);
1969 break;
1970 case UNIQUE_KEEP:
1971 /* potential reauthentication? */
1972 if (!other_host->equals(other_host,
1973 duplicate->get_other_host(duplicate)))
1974 {
1975 cancel = TRUE;
1976 /* we keep the first IKE_SA and delete all
1977 * other duplicates that might exist */
1978 policy = UNIQUE_REPLACE;
1979 }
1980 break;
1981 default:
1982 break;
1983 }
1984 break;
1985 default:
1986 break;
1987 }
1988 }
1989 if (status == DESTROY_ME)
1990 {
1991 checkin_and_destroy(this, duplicate);
1992 }
1993 else
1994 {
1995 checkin(this, duplicate);
1996 }
1997 }
1998 enumerator->destroy(enumerator);
1999 /* reset thread's current IKE_SA after checkin */
2000 charon->bus->set_sa(charon->bus, ike_sa);
2001 return cancel;
2002 }
2003
2004 METHOD(ike_sa_manager_t, has_contact, bool,
2005 private_ike_sa_manager_t *this, identification_t *me,
2006 identification_t *other, int family)
2007 {
2008 table_item_t *item;
2009 u_int row, segment;
2010 rwlock_t *lock;
2011 bool found = FALSE;
2012
2013 row = chunk_hash_inc(other->get_encoding(other),
2014 chunk_hash(me->get_encoding(me))) & this->table_mask;
2015 segment = row & this->segment_mask;
2016 lock = this->connected_peers_segments[segment].lock;
2017 lock->read_lock(lock);
2018 item = this->connected_peers_table[row];
2019 while (item)
2020 {
2021 if (connected_peers_match(item->value, me, other, family))
2022 {
2023 found = TRUE;
2024 break;
2025 }
2026 item = item->next;
2027 }
2028 lock->unlock(lock);
2029
2030 return found;
2031 }
2032
2033 METHOD(ike_sa_manager_t, get_count, u_int,
2034 private_ike_sa_manager_t *this)
2035 {
2036 u_int segment, count = 0;
2037 mutex_t *mutex;
2038
2039 for (segment = 0; segment < this->segment_count; segment++)
2040 {
2041 mutex = this->segments[segment & this->segment_mask].mutex;
2042 mutex->lock(mutex);
2043 count += this->segments[segment].count;
2044 mutex->unlock(mutex);
2045 }
2046 return count;
2047 }
2048
2049 METHOD(ike_sa_manager_t, get_half_open_count, u_int,
2050 private_ike_sa_manager_t *this, host_t *ip, bool responder_only)
2051 {
2052 table_item_t *item;
2053 u_int row, segment;
2054 rwlock_t *lock;
2055 chunk_t addr;
2056 u_int count = 0;
2057
2058 if (ip)
2059 {
2060 addr = ip->get_address(ip);
2061 row = chunk_hash(addr) & this->table_mask;
2062 segment = row & this->segment_mask;
2063 lock = this->half_open_segments[segment].lock;
2064 lock->read_lock(lock);
2065 item = this->half_open_table[row];
2066 while (item)
2067 {
2068 half_open_t *half_open = item->value;
2069
2070 if (chunk_equals(addr, half_open->other))
2071 {
2072 count = responder_only ? half_open->count_responder
2073 : half_open->count;
2074 break;
2075 }
2076 item = item->next;
2077 }
2078 lock->unlock(lock);
2079 }
2080 else
2081 {
2082 count = responder_only ? (u_int)ref_cur(&this->half_open_count_responder)
2083 : (u_int)ref_cur(&this->half_open_count);
2084 }
2085 return count;
2086 }
2087
2088 METHOD(ike_sa_manager_t, set_spi_cb, void,
2089 private_ike_sa_manager_t *this, spi_cb_t callback, void *data)
2090 {
2091 this->spi_lock->write_lock(this->spi_lock);
2092 this->spi_cb.cb = callback;
2093 this->spi_cb.data = data;
2094 this->spi_lock->unlock(this->spi_lock);
2095 }
2096
2097 METHOD(ike_sa_manager_t, flush, void,
2098 private_ike_sa_manager_t *this)
2099 {
2100 /* destroy all list entries */
2101 enumerator_t *enumerator;
2102 entry_t *entry;
2103 u_int segment;
2104
2105 lock_all_segments(this);
2106 DBG2(DBG_MGR, "going to destroy IKE_SA manager and all managed IKE_SA's");
2107 /* Step 1: drive out all waiting threads */
2108 DBG2(DBG_MGR, "set driveout flags for all stored IKE_SA's");
2109 enumerator = create_table_enumerator(this);
2110 while (enumerator->enumerate(enumerator, &entry, &segment))
2111 {
2112 /* do not accept new threads, drive out waiting threads */
2113 entry->driveout_new_threads = TRUE;
2114 entry->driveout_waiting_threads = TRUE;
2115 }
2116 enumerator->destroy(enumerator);
2117 DBG2(DBG_MGR, "wait for all threads to leave IKE_SA's");
2118 /* Step 2: wait until all are gone */
2119 enumerator = create_table_enumerator(this);
2120 while (enumerator->enumerate(enumerator, &entry, &segment))
2121 {
2122 while (entry->waiting_threads || entry->checked_out)
2123 {
2124 /* wake up all */
2125 entry->condvar->broadcast(entry->condvar);
2126 /* go sleeping until they are gone */
2127 entry->condvar->wait(entry->condvar, this->segments[segment].mutex);
2128 }
2129 }
2130 enumerator->destroy(enumerator);
2131 DBG2(DBG_MGR, "delete all IKE_SA's");
2132 /* Step 3: initiate deletion of all IKE_SAs */
2133 enumerator = create_table_enumerator(this);
2134 while (enumerator->enumerate(enumerator, &entry, &segment))
2135 {
2136 charon->bus->set_sa(charon->bus, entry->ike_sa);
2137 if (entry->ike_sa->get_version(entry->ike_sa) == IKEV2)
2138 { /* as the delete never gets processed, fire down events */
2139 switch (entry->ike_sa->get_state(entry->ike_sa))
2140 {
2141 case IKE_ESTABLISHED:
2142 case IKE_REKEYING:
2143 case IKE_DELETING:
2144 charon->bus->ike_updown(charon->bus, entry->ike_sa, FALSE);
2145 break;
2146 default:
2147 break;
2148 }
2149 }
2150 entry->ike_sa->delete(entry->ike_sa);
2151 }
2152 enumerator->destroy(enumerator);
2153
2154 DBG2(DBG_MGR, "destroy all entries");
2155 /* Step 4: destroy all entries */
2156 enumerator = create_table_enumerator(this);
2157 while (enumerator->enumerate(enumerator, &entry, &segment))
2158 {
2159 charon->bus->set_sa(charon->bus, entry->ike_sa);
2160 if (entry->half_open)
2161 {
2162 remove_half_open(this, entry);
2163 }
2164 if (entry->my_id && entry->other_id)
2165 {
2166 remove_connected_peers(this, entry);
2167 }
2168 if (entry->init_hash.ptr)
2169 {
2170 remove_init_hash(this, entry->init_hash);
2171 }
2172 remove_entry_at((private_enumerator_t*)enumerator);
2173 entry_destroy(entry);
2174 }
2175 enumerator->destroy(enumerator);
2176 charon->bus->set_sa(charon->bus, NULL);
2177 unlock_all_segments(this);
2178
2179 this->spi_lock->write_lock(this->spi_lock);
2180 this->rng->destroy(this->rng);
2181 this->rng = NULL;
2182 this->spi_cb.cb = NULL;
2183 this->spi_cb.data = NULL;
2184 this->spi_lock->unlock(this->spi_lock);
2185 }
2186
2187 METHOD(ike_sa_manager_t, destroy, void,
2188 private_ike_sa_manager_t *this)
2189 {
2190 u_int i;
2191
2192 /* these are already cleared in flush() above */
2193 free(this->ike_sa_table);
2194 free(this->half_open_table);
2195 free(this->connected_peers_table);
2196 free(this->init_hashes_table);
2197 for (i = 0; i < this->segment_count; i++)
2198 {
2199 this->segments[i].mutex->destroy(this->segments[i].mutex);
2200 this->half_open_segments[i].lock->destroy(this->half_open_segments[i].lock);
2201 this->connected_peers_segments[i].lock->destroy(this->connected_peers_segments[i].lock);
2202 this->init_hashes_segments[i].mutex->destroy(this->init_hashes_segments[i].mutex);
2203 }
2204 free(this->segments);
2205 free(this->half_open_segments);
2206 free(this->connected_peers_segments);
2207 free(this->init_hashes_segments);
2208
2209 this->spi_lock->destroy(this->spi_lock);
2210 free(this);
2211 }
2212
2213 /**
2214 * This function returns the next-highest power of two for the given number.
2215 * The algorithm works by setting all bits on the right-hand side of the most
2216 * significant 1 to 1 and then increments the whole number so it rolls over
2217 * to the nearest power of two. Note: returns 0 for n == 0
2218 */
2219 static u_int get_nearest_powerof2(u_int n)
2220 {
2221 u_int i;
2222
2223 --n;
2224 for (i = 1; i < sizeof(u_int) * 8; i <<= 1)
2225 {
2226 n |= n >> i;
2227 }
2228 return ++n;
2229 }
2230
2231 /*
2232 * Described in header.
2233 */
2234 ike_sa_manager_t *ike_sa_manager_create()
2235 {
2236 private_ike_sa_manager_t *this;
2237 u_int i;
2238
2239 INIT(this,
2240 .public = {
2241 .checkout = _checkout,
2242 .checkout_new = _checkout_new,
2243 .checkout_by_message = _checkout_by_message,
2244 .checkout_by_config = _checkout_by_config,
2245 .checkout_by_id = _checkout_by_id,
2246 .checkout_by_name = _checkout_by_name,
2247 .check_uniqueness = _check_uniqueness,
2248 .has_contact = _has_contact,
2249 .create_enumerator = _create_enumerator,
2250 .create_id_enumerator = _create_id_enumerator,
2251 .checkin = _checkin,
2252 .checkin_and_destroy = _checkin_and_destroy,
2253 .get_count = _get_count,
2254 .get_half_open_count = _get_half_open_count,
2255 .flush = _flush,
2256 .set_spi_cb = _set_spi_cb,
2257 .destroy = _destroy,
2258 },
2259 );
2260
2261 this->rng = lib->crypto->create_rng(lib->crypto, RNG_WEAK);
2262 if (this->rng == NULL)
2263 {
2264 DBG1(DBG_MGR, "manager initialization failed, no RNG supported");
2265 free(this);
2266 return NULL;
2267 }
2268 this->spi_lock = rwlock_create(RWLOCK_TYPE_DEFAULT);
2269
2270 this->ikesa_limit = lib->settings->get_int(lib->settings,
2271 "%s.ikesa_limit", 0, lib->ns);
2272
2273 this->table_size = get_nearest_powerof2(lib->settings->get_int(
2274 lib->settings, "%s.ikesa_table_size",
2275 DEFAULT_HASHTABLE_SIZE, lib->ns));
2276 this->table_size = max(1, min(this->table_size, MAX_HASHTABLE_SIZE));
2277 this->table_mask = this->table_size - 1;
2278
2279 this->segment_count = get_nearest_powerof2(lib->settings->get_int(
2280 lib->settings, "%s.ikesa_table_segments",
2281 DEFAULT_SEGMENT_COUNT, lib->ns));
2282 this->segment_count = max(1, min(this->segment_count, this->table_size));
2283 this->segment_mask = this->segment_count - 1;
2284
2285 this->ike_sa_table = calloc(this->table_size, sizeof(table_item_t*));
2286 this->segments = (segment_t*)calloc(this->segment_count, sizeof(segment_t));
2287 for (i = 0; i < this->segment_count; i++)
2288 {
2289 this->segments[i].mutex = mutex_create(MUTEX_TYPE_RECURSIVE);
2290 this->segments[i].count = 0;
2291 }
2292
2293 /* we use the same table parameters for the table to track half-open SAs */
2294 this->half_open_table = calloc(this->table_size, sizeof(table_item_t*));
2295 this->half_open_segments = calloc(this->segment_count, sizeof(shareable_segment_t));
2296 for (i = 0; i < this->segment_count; i++)
2297 {
2298 this->half_open_segments[i].lock = rwlock_create(RWLOCK_TYPE_DEFAULT);
2299 this->half_open_segments[i].count = 0;
2300 }
2301
2302 /* also for the hash table used for duplicate tests */
2303 this->connected_peers_table = calloc(this->table_size, sizeof(table_item_t*));
2304 this->connected_peers_segments = calloc(this->segment_count, sizeof(shareable_segment_t));
2305 for (i = 0; i < this->segment_count; i++)
2306 {
2307 this->connected_peers_segments[i].lock = rwlock_create(RWLOCK_TYPE_DEFAULT);
2308 this->connected_peers_segments[i].count = 0;
2309 }
2310
2311 /* and again for the table of hashes of seen initial IKE messages */
2312 this->init_hashes_table = calloc(this->table_size, sizeof(table_item_t*));
2313 this->init_hashes_segments = calloc(this->segment_count, sizeof(segment_t));
2314 for (i = 0; i < this->segment_count; i++)
2315 {
2316 this->init_hashes_segments[i].mutex = mutex_create(MUTEX_TYPE_RECURSIVE);
2317 this->init_hashes_segments[i].count = 0;
2318 }
2319
2320 this->reuse_ikesa = lib->settings->get_bool(lib->settings,
2321 "%s.reuse_ikesa", TRUE, lib->ns);
2322 return &this->public;
2323 }