ike-sa-manager: Remove superfluous assignment
[strongswan.git] / src / libcharon / sa / ike_sa_manager.c
1 /*
2 * Copyright (C) 2005-2011 Martin Willi
3 * Copyright (C) 2011 revosec AG
4 * Copyright (C) 2008-2016 Tobias Brunner
5 * Copyright (C) 2005 Jan Hutter
6 * Hochschule fuer Technik Rapperswil
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version. See <http://www.fsf.org/copyleft/gpl.txt>.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 * for more details.
17 */
18
19 #include <string.h>
20 #include <inttypes.h>
21
22 #include "ike_sa_manager.h"
23
24 #include <daemon.h>
25 #include <sa/ike_sa_id.h>
26 #include <bus/bus.h>
27 #include <threading/thread.h>
28 #include <threading/condvar.h>
29 #include <threading/mutex.h>
30 #include <threading/rwlock.h>
31 #include <collections/linked_list.h>
32 #include <crypto/hashers/hasher.h>
33 #include <processing/jobs/delete_ike_sa_job.h>
34
35 /* the default size of the hash table (MUST be a power of 2) */
36 #define DEFAULT_HASHTABLE_SIZE 1
37
38 /* the maximum size of the hash table (MUST be a power of 2) */
39 #define MAX_HASHTABLE_SIZE (1 << 30)
40
41 /* the default number of segments (MUST be a power of 2) */
42 #define DEFAULT_SEGMENT_COUNT 1
43
44 typedef struct entry_t entry_t;
45
46 /**
47 * An entry in the linked list, contains IKE_SA, locking and lookup data.
48 */
49 struct entry_t {
50
51 /**
52 * Number of threads waiting for this ike_sa_t object.
53 */
54 int waiting_threads;
55
56 /**
57 * Condvar where threads can wait until ike_sa_t object is free for use again.
58 */
59 condvar_t *condvar;
60
61 /**
62 * Thread by which this IKE_SA is currently checked out, if any
63 */
64 thread_t *checked_out;
65
66 /**
67 * Does this SA drives out new threads?
68 */
69 bool driveout_new_threads;
70
71 /**
72 * Does this SA drives out waiting threads?
73 */
74 bool driveout_waiting_threads;
75
76 /**
77 * Identification of an IKE_SA (SPIs).
78 */
79 ike_sa_id_t *ike_sa_id;
80
81 /**
82 * The contained ike_sa_t object.
83 */
84 ike_sa_t *ike_sa;
85
86 /**
87 * hash of the IKE_SA_INIT message, used to detect retransmissions
88 */
89 chunk_t init_hash;
90
91 /**
92 * remote host address, required for DoS detection and duplicate
93 * checking (host with same my_id and other_id is *not* considered
94 * a duplicate if the address family differs)
95 */
96 host_t *other;
97
98 /**
99 * As responder: Is this SA half-open?
100 */
101 bool half_open;
102
103 /**
104 * own identity, required for duplicate checking
105 */
106 identification_t *my_id;
107
108 /**
109 * remote identity, required for duplicate checking
110 */
111 identification_t *other_id;
112
113 /**
114 * message ID or hash of currently processing message, -1 if none
115 */
116 uint32_t processing;
117 };
118
119 /**
120 * Implementation of entry_t.destroy.
121 */
122 static status_t entry_destroy(entry_t *this)
123 {
124 /* also destroy IKE SA */
125 this->ike_sa->destroy(this->ike_sa);
126 this->ike_sa_id->destroy(this->ike_sa_id);
127 chunk_free(&this->init_hash);
128 DESTROY_IF(this->other);
129 DESTROY_IF(this->my_id);
130 DESTROY_IF(this->other_id);
131 this->condvar->destroy(this->condvar);
132 free(this);
133 return SUCCESS;
134 }
135
136 /**
137 * Creates a new entry for the ike_sa_t list.
138 */
139 static entry_t *entry_create()
140 {
141 entry_t *this;
142
143 INIT(this,
144 .condvar = condvar_create(CONDVAR_TYPE_DEFAULT),
145 .processing = -1,
146 );
147
148 return this;
149 }
150
151 /**
152 * Function that matches entry_t objects by ike_sa_id_t.
153 */
154 static bool entry_match_by_id(entry_t *entry, ike_sa_id_t *id)
155 {
156 if (id->equals(id, entry->ike_sa_id))
157 {
158 return TRUE;
159 }
160 if ((id->get_responder_spi(id) == 0 ||
161 entry->ike_sa_id->get_responder_spi(entry->ike_sa_id) == 0) &&
162 (id->get_ike_version(id) == IKEV1_MAJOR_VERSION ||
163 id->is_initiator(id) == entry->ike_sa_id->is_initiator(entry->ike_sa_id)) &&
164 id->get_initiator_spi(id) == entry->ike_sa_id->get_initiator_spi(entry->ike_sa_id))
165 {
166 /* this is TRUE for IKE_SAs that we initiated but have not yet received a response */
167 return TRUE;
168 }
169 return FALSE;
170 }
171
172 /**
173 * Function that matches entry_t objects by ike_sa_t pointers.
174 */
175 static bool entry_match_by_sa(entry_t *entry, ike_sa_t *ike_sa)
176 {
177 return entry->ike_sa == ike_sa;
178 }
179
180 /**
181 * Hash function for ike_sa_id_t objects.
182 */
183 static u_int ike_sa_id_hash(ike_sa_id_t *ike_sa_id)
184 {
185 /* IKEv2 does not mandate random SPIs (RFC 5996, 2.6), they just have to be
186 * locally unique, so we use our randomly allocated SPI whether we are
187 * initiator or responder to ensure a good distribution. The latter is not
188 * possible for IKEv1 as we don't know whether we are original initiator or
189 * not (based on the IKE header). But as RFC 2408, section 2.5.3 proposes
190 * SPIs (Cookies) to be allocated near random (we allocate them randomly
191 * anyway) it seems safe to always use the initiator SPI. */
192 if (ike_sa_id->get_ike_version(ike_sa_id) == IKEV1_MAJOR_VERSION ||
193 ike_sa_id->is_initiator(ike_sa_id))
194 {
195 return ike_sa_id->get_initiator_spi(ike_sa_id);
196 }
197 return ike_sa_id->get_responder_spi(ike_sa_id);
198 }
199
200 typedef struct half_open_t half_open_t;
201
202 /**
203 * Struct to manage half-open IKE_SAs per peer.
204 */
205 struct half_open_t {
206 /** chunk of remote host address */
207 chunk_t other;
208
209 /** the number of half-open IKE_SAs with that host */
210 u_int count;
211
212 /** the number of half-open IKE_SAs we responded to with that host */
213 u_int count_responder;
214 };
215
216 /**
217 * Destroys a half_open_t object.
218 */
219 static void half_open_destroy(half_open_t *this)
220 {
221 chunk_free(&this->other);
222 free(this);
223 }
224
225 typedef struct connected_peers_t connected_peers_t;
226
227 struct connected_peers_t {
228 /** own identity */
229 identification_t *my_id;
230
231 /** remote identity */
232 identification_t *other_id;
233
234 /** ip address family of peer */
235 int family;
236
237 /** list of ike_sa_id_t objects of IKE_SAs between the two identities */
238 linked_list_t *sas;
239 };
240
241 static void connected_peers_destroy(connected_peers_t *this)
242 {
243 this->my_id->destroy(this->my_id);
244 this->other_id->destroy(this->other_id);
245 this->sas->destroy(this->sas);
246 free(this);
247 }
248
249 /**
250 * Function that matches connected_peers_t objects by the given ids.
251 */
252 static inline bool connected_peers_match(connected_peers_t *connected_peers,
253 identification_t *my_id, identification_t *other_id,
254 int family)
255 {
256 return my_id->equals(my_id, connected_peers->my_id) &&
257 other_id->equals(other_id, connected_peers->other_id) &&
258 (!family || family == connected_peers->family);
259 }
260
261 typedef struct init_hash_t init_hash_t;
262
263 struct init_hash_t {
264 /** hash of IKE_SA_INIT or initial phase1 message (data is not cloned) */
265 chunk_t hash;
266
267 /** our SPI allocated for the IKE_SA based on this message */
268 uint64_t our_spi;
269 };
270
271 typedef struct segment_t segment_t;
272
273 /**
274 * Struct to manage segments of the hash table.
275 */
276 struct segment_t {
277 /** mutex to access a segment exclusively */
278 mutex_t *mutex;
279
280 /** the number of entries in this segment */
281 u_int count;
282 };
283
284 typedef struct shareable_segment_t shareable_segment_t;
285
286 /**
287 * Struct to manage segments of the "half-open" and "connected peers" hash tables.
288 */
289 struct shareable_segment_t {
290 /** rwlock to access a segment non-/exclusively */
291 rwlock_t *lock;
292
293 /** the number of entries in this segment - in case of the "half-open table"
294 * it's the sum of all half_open_t.count in a segment. */
295 u_int count;
296 };
297
298 typedef struct table_item_t table_item_t;
299
300 /**
301 * Instead of using linked_list_t for each bucket we store the data in our own
302 * list to save memory.
303 */
304 struct table_item_t {
305 /** data of this item */
306 void *value;
307
308 /** next item in the overflow list */
309 table_item_t *next;
310 };
311
312 typedef struct private_ike_sa_manager_t private_ike_sa_manager_t;
313
314 /**
315 * Additional private members of ike_sa_manager_t.
316 */
317 struct private_ike_sa_manager_t {
318 /**
319 * Public interface of ike_sa_manager_t.
320 */
321 ike_sa_manager_t public;
322
323 /**
324 * Hash table with entries for the ike_sa_t objects.
325 */
326 table_item_t **ike_sa_table;
327
328 /**
329 * The size of the hash table.
330 */
331 u_int table_size;
332
333 /**
334 * Mask to map the hashes to table rows.
335 */
336 u_int table_mask;
337
338 /**
339 * Segments of the hash table.
340 */
341 segment_t *segments;
342
343 /**
344 * The number of segments.
345 */
346 u_int segment_count;
347
348 /**
349 * Mask to map a table row to a segment.
350 */
351 u_int segment_mask;
352
353 /**
354 * Hash table with half_open_t objects.
355 */
356 table_item_t **half_open_table;
357
358 /**
359 * Segments of the "half-open" hash table.
360 */
361 shareable_segment_t *half_open_segments;
362
363 /**
364 * Total number of half-open IKE_SAs.
365 */
366 refcount_t half_open_count;
367
368 /**
369 * Total number of half-open IKE_SAs as responder.
370 */
371 refcount_t half_open_count_responder;
372
373 /**
374 * Hash table with connected_peers_t objects.
375 */
376 table_item_t **connected_peers_table;
377
378 /**
379 * Segments of the "connected peers" hash table.
380 */
381 shareable_segment_t *connected_peers_segments;
382
383 /**
384 * Hash table with init_hash_t objects.
385 */
386 table_item_t **init_hashes_table;
387
388 /**
389 * Segments of the "hashes" hash table.
390 */
391 segment_t *init_hashes_segments;
392
393 /**
394 * RNG to get random SPIs for our side
395 */
396 rng_t *rng;
397
398 /**
399 * Registered callback for IKE SPIs
400 */
401 struct {
402 spi_cb_t cb;
403 void *data;
404 } spi_cb;
405
406 /**
407 * Lock to access the RNG instance and the callback
408 */
409 rwlock_t *spi_lock;
410
411 /**
412 * reuse existing IKE_SAs in checkout_by_config
413 */
414 bool reuse_ikesa;
415
416 /**
417 * Configured IKE_SA limit, if any
418 */
419 u_int ikesa_limit;
420 };
421
422 /**
423 * Acquire a lock to access the segment of the table row with the given index.
424 * It also works with the segment index directly.
425 */
426 static inline void lock_single_segment(private_ike_sa_manager_t *this,
427 u_int index)
428 {
429 mutex_t *lock = this->segments[index & this->segment_mask].mutex;
430 lock->lock(lock);
431 }
432
433 /**
434 * Release the lock required to access the segment of the table row with the given index.
435 * It also works with the segment index directly.
436 */
437 static inline void unlock_single_segment(private_ike_sa_manager_t *this,
438 u_int index)
439 {
440 mutex_t *lock = this->segments[index & this->segment_mask].mutex;
441 lock->unlock(lock);
442 }
443
444 /**
445 * Lock all segments
446 */
447 static void lock_all_segments(private_ike_sa_manager_t *this)
448 {
449 u_int i;
450
451 for (i = 0; i < this->segment_count; i++)
452 {
453 this->segments[i].mutex->lock(this->segments[i].mutex);
454 }
455 }
456
457 /**
458 * Unlock all segments
459 */
460 static void unlock_all_segments(private_ike_sa_manager_t *this)
461 {
462 u_int i;
463
464 for (i = 0; i < this->segment_count; i++)
465 {
466 this->segments[i].mutex->unlock(this->segments[i].mutex);
467 }
468 }
469
470 typedef struct private_enumerator_t private_enumerator_t;
471
472 /**
473 * hash table enumerator implementation
474 */
475 struct private_enumerator_t {
476
477 /**
478 * implements enumerator interface
479 */
480 enumerator_t enumerator;
481
482 /**
483 * associated ike_sa_manager_t
484 */
485 private_ike_sa_manager_t *manager;
486
487 /**
488 * current segment index
489 */
490 u_int segment;
491
492 /**
493 * currently enumerating entry
494 */
495 entry_t *entry;
496
497 /**
498 * current table row index
499 */
500 u_int row;
501
502 /**
503 * current table item
504 */
505 table_item_t *current;
506
507 /**
508 * previous table item
509 */
510 table_item_t *prev;
511 };
512
513 METHOD(enumerator_t, enumerate, bool,
514 private_enumerator_t *this, entry_t **entry, u_int *segment)
515 {
516 if (this->entry)
517 {
518 this->entry->condvar->signal(this->entry->condvar);
519 this->entry = NULL;
520 }
521 while (this->segment < this->manager->segment_count)
522 {
523 while (this->row < this->manager->table_size)
524 {
525 this->prev = this->current;
526 if (this->current)
527 {
528 this->current = this->current->next;
529 }
530 else
531 {
532 lock_single_segment(this->manager, this->segment);
533 this->current = this->manager->ike_sa_table[this->row];
534 }
535 if (this->current)
536 {
537 *entry = this->entry = this->current->value;
538 *segment = this->segment;
539 return TRUE;
540 }
541 unlock_single_segment(this->manager, this->segment);
542 this->row += this->manager->segment_count;
543 }
544 this->segment++;
545 this->row = this->segment;
546 }
547 return FALSE;
548 }
549
550 METHOD(enumerator_t, enumerator_destroy, void,
551 private_enumerator_t *this)
552 {
553 if (this->entry)
554 {
555 this->entry->condvar->signal(this->entry->condvar);
556 }
557 if (this->current)
558 {
559 unlock_single_segment(this->manager, this->segment);
560 }
561 free(this);
562 }
563
564 /**
565 * Creates an enumerator to enumerate the entries in the hash table.
566 */
567 static enumerator_t* create_table_enumerator(private_ike_sa_manager_t *this)
568 {
569 private_enumerator_t *enumerator;
570
571 INIT(enumerator,
572 .enumerator = {
573 .enumerate = (void*)_enumerate,
574 .destroy = _enumerator_destroy,
575 },
576 .manager = this,
577 );
578 return &enumerator->enumerator;
579 }
580
581 /**
582 * Put an entry into the hash table.
583 * Note: The caller has to unlock the returned segment.
584 */
585 static u_int put_entry(private_ike_sa_manager_t *this, entry_t *entry)
586 {
587 table_item_t *current, *item;
588 u_int row, segment;
589
590 INIT(item,
591 .value = entry,
592 );
593
594 row = ike_sa_id_hash(entry->ike_sa_id) & this->table_mask;
595 segment = row & this->segment_mask;
596
597 lock_single_segment(this, segment);
598 current = this->ike_sa_table[row];
599 if (current)
600 { /* insert at the front of current bucket */
601 item->next = current;
602 }
603 this->ike_sa_table[row] = item;
604 this->segments[segment].count++;
605 return segment;
606 }
607
608 /**
609 * Remove an entry from the hash table.
610 * Note: The caller MUST have a lock on the segment of this entry.
611 */
612 static void remove_entry(private_ike_sa_manager_t *this, entry_t *entry)
613 {
614 table_item_t *item, *prev = NULL;
615 u_int row, segment;
616
617 row = ike_sa_id_hash(entry->ike_sa_id) & this->table_mask;
618 segment = row & this->segment_mask;
619 item = this->ike_sa_table[row];
620 while (item)
621 {
622 if (item->value == entry)
623 {
624 if (prev)
625 {
626 prev->next = item->next;
627 }
628 else
629 {
630 this->ike_sa_table[row] = item->next;
631 }
632 this->segments[segment].count--;
633 free(item);
634 break;
635 }
636 prev = item;
637 item = item->next;
638 }
639 }
640
641 /**
642 * Remove the entry at the current enumerator position.
643 */
644 static void remove_entry_at(private_enumerator_t *this)
645 {
646 this->entry = NULL;
647 if (this->current)
648 {
649 table_item_t *current = this->current;
650
651 this->manager->segments[this->segment].count--;
652 this->current = this->prev;
653
654 if (this->prev)
655 {
656 this->prev->next = current->next;
657 }
658 else
659 {
660 this->manager->ike_sa_table[this->row] = current->next;
661 unlock_single_segment(this->manager, this->segment);
662 }
663 free(current);
664 }
665 }
666
667 /**
668 * Find an entry using the provided match function to compare the entries for
669 * equality.
670 */
671 static status_t get_entry_by_match_function(private_ike_sa_manager_t *this,
672 ike_sa_id_t *ike_sa_id, entry_t **entry, u_int *segment,
673 linked_list_match_t match, void *param)
674 {
675 table_item_t *item;
676 u_int row, seg;
677
678 row = ike_sa_id_hash(ike_sa_id) & this->table_mask;
679 seg = row & this->segment_mask;
680
681 lock_single_segment(this, seg);
682 item = this->ike_sa_table[row];
683 while (item)
684 {
685 if (match(item->value, param))
686 {
687 *entry = item->value;
688 *segment = seg;
689 /* the locked segment has to be unlocked by the caller */
690 return SUCCESS;
691 }
692 item = item->next;
693 }
694 unlock_single_segment(this, seg);
695 return NOT_FOUND;
696 }
697
698 /**
699 * Find an entry by ike_sa_id_t.
700 * Note: On SUCCESS, the caller has to unlock the segment.
701 */
702 static status_t get_entry_by_id(private_ike_sa_manager_t *this,
703 ike_sa_id_t *ike_sa_id, entry_t **entry, u_int *segment)
704 {
705 return get_entry_by_match_function(this, ike_sa_id, entry, segment,
706 (linked_list_match_t)entry_match_by_id, ike_sa_id);
707 }
708
709 /**
710 * Find an entry by IKE_SA pointer.
711 * Note: On SUCCESS, the caller has to unlock the segment.
712 */
713 static status_t get_entry_by_sa(private_ike_sa_manager_t *this,
714 ike_sa_id_t *ike_sa_id, ike_sa_t *ike_sa, entry_t **entry, u_int *segment)
715 {
716 return get_entry_by_match_function(this, ike_sa_id, entry, segment,
717 (linked_list_match_t)entry_match_by_sa, ike_sa);
718 }
719
720 /**
721 * Wait until no other thread is using an IKE_SA, return FALSE if entry not
722 * acquirable.
723 */
724 static bool wait_for_entry(private_ike_sa_manager_t *this, entry_t *entry,
725 u_int segment)
726 {
727 if (entry->driveout_new_threads)
728 {
729 /* we are not allowed to get this */
730 return FALSE;
731 }
732 while (entry->checked_out && !entry->driveout_waiting_threads)
733 {
734 /* so wait until we can get it for us.
735 * we register us as waiting. */
736 entry->waiting_threads++;
737 entry->condvar->wait(entry->condvar, this->segments[segment].mutex);
738 entry->waiting_threads--;
739 }
740 /* hm, a deletion request forbids us to get this SA, get next one */
741 if (entry->driveout_waiting_threads)
742 {
743 /* we must signal here, others may be waiting on it, too */
744 entry->condvar->signal(entry->condvar);
745 return FALSE;
746 }
747 return TRUE;
748 }
749
750 /**
751 * Put a half-open SA into the hash table.
752 */
753 static void put_half_open(private_ike_sa_manager_t *this, entry_t *entry)
754 {
755 table_item_t *item;
756 u_int row, segment;
757 rwlock_t *lock;
758 ike_sa_id_t *ike_id;
759 half_open_t *half_open;
760 chunk_t addr;
761
762 ike_id = entry->ike_sa_id;
763 addr = entry->other->get_address(entry->other);
764 row = chunk_hash(addr) & this->table_mask;
765 segment = row & this->segment_mask;
766 lock = this->half_open_segments[segment].lock;
767 lock->write_lock(lock);
768 item = this->half_open_table[row];
769 while (item)
770 {
771 half_open = item->value;
772
773 if (chunk_equals(addr, half_open->other))
774 {
775 break;
776 }
777 item = item->next;
778 }
779
780 if (!item)
781 {
782 INIT(half_open,
783 .other = chunk_clone(addr),
784 );
785 INIT(item,
786 .value = half_open,
787 .next = this->half_open_table[row],
788 );
789 this->half_open_table[row] = item;
790 }
791 half_open->count++;
792 ref_get(&this->half_open_count);
793 if (!ike_id->is_initiator(ike_id))
794 {
795 half_open->count_responder++;
796 ref_get(&this->half_open_count_responder);
797 }
798 this->half_open_segments[segment].count++;
799 lock->unlock(lock);
800 }
801
802 /**
803 * Remove a half-open SA from the hash table.
804 */
805 static void remove_half_open(private_ike_sa_manager_t *this, entry_t *entry)
806 {
807 table_item_t *item, *prev = NULL;
808 u_int row, segment;
809 rwlock_t *lock;
810 ike_sa_id_t *ike_id;
811 chunk_t addr;
812
813 ike_id = entry->ike_sa_id;
814 addr = entry->other->get_address(entry->other);
815 row = chunk_hash(addr) & this->table_mask;
816 segment = row & this->segment_mask;
817 lock = this->half_open_segments[segment].lock;
818 lock->write_lock(lock);
819 item = this->half_open_table[row];
820 while (item)
821 {
822 half_open_t *half_open = item->value;
823
824 if (chunk_equals(addr, half_open->other))
825 {
826 if (!ike_id->is_initiator(ike_id))
827 {
828 half_open->count_responder--;
829 ignore_result(ref_put(&this->half_open_count_responder));
830 }
831 ignore_result(ref_put(&this->half_open_count));
832 if (--half_open->count == 0)
833 {
834 if (prev)
835 {
836 prev->next = item->next;
837 }
838 else
839 {
840 this->half_open_table[row] = item->next;
841 }
842 half_open_destroy(half_open);
843 free(item);
844 }
845 this->half_open_segments[segment].count--;
846 break;
847 }
848 prev = item;
849 item = item->next;
850 }
851 lock->unlock(lock);
852 }
853
854 /**
855 * Put an SA between two peers into the hash table.
856 */
857 static void put_connected_peers(private_ike_sa_manager_t *this, entry_t *entry)
858 {
859 table_item_t *item;
860 u_int row, segment;
861 rwlock_t *lock;
862 connected_peers_t *connected_peers;
863 chunk_t my_id, other_id;
864 int family;
865
866 my_id = entry->my_id->get_encoding(entry->my_id);
867 other_id = entry->other_id->get_encoding(entry->other_id);
868 family = entry->other->get_family(entry->other);
869 row = chunk_hash_inc(other_id, chunk_hash(my_id)) & this->table_mask;
870 segment = row & this->segment_mask;
871 lock = this->connected_peers_segments[segment].lock;
872 lock->write_lock(lock);
873 item = this->connected_peers_table[row];
874 while (item)
875 {
876 connected_peers = item->value;
877
878 if (connected_peers_match(connected_peers, entry->my_id,
879 entry->other_id, family))
880 {
881 if (connected_peers->sas->find_first(connected_peers->sas,
882 (linked_list_match_t)entry->ike_sa_id->equals,
883 NULL, entry->ike_sa_id) == SUCCESS)
884 {
885 lock->unlock(lock);
886 return;
887 }
888 break;
889 }
890 item = item->next;
891 }
892
893 if (!item)
894 {
895 INIT(connected_peers,
896 .my_id = entry->my_id->clone(entry->my_id),
897 .other_id = entry->other_id->clone(entry->other_id),
898 .family = family,
899 .sas = linked_list_create(),
900 );
901 INIT(item,
902 .value = connected_peers,
903 .next = this->connected_peers_table[row],
904 );
905 this->connected_peers_table[row] = item;
906 }
907 connected_peers->sas->insert_last(connected_peers->sas,
908 entry->ike_sa_id->clone(entry->ike_sa_id));
909 this->connected_peers_segments[segment].count++;
910 lock->unlock(lock);
911 }
912
913 /**
914 * Remove an SA between two peers from the hash table.
915 */
916 static void remove_connected_peers(private_ike_sa_manager_t *this, entry_t *entry)
917 {
918 table_item_t *item, *prev = NULL;
919 u_int row, segment;
920 rwlock_t *lock;
921 chunk_t my_id, other_id;
922 int family;
923
924 my_id = entry->my_id->get_encoding(entry->my_id);
925 other_id = entry->other_id->get_encoding(entry->other_id);
926 family = entry->other->get_family(entry->other);
927
928 row = chunk_hash_inc(other_id, chunk_hash(my_id)) & this->table_mask;
929 segment = row & this->segment_mask;
930
931 lock = this->connected_peers_segments[segment].lock;
932 lock->write_lock(lock);
933 item = this->connected_peers_table[row];
934 while (item)
935 {
936 connected_peers_t *current = item->value;
937
938 if (connected_peers_match(current, entry->my_id, entry->other_id,
939 family))
940 {
941 enumerator_t *enumerator;
942 ike_sa_id_t *ike_sa_id;
943
944 enumerator = current->sas->create_enumerator(current->sas);
945 while (enumerator->enumerate(enumerator, &ike_sa_id))
946 {
947 if (ike_sa_id->equals(ike_sa_id, entry->ike_sa_id))
948 {
949 current->sas->remove_at(current->sas, enumerator);
950 ike_sa_id->destroy(ike_sa_id);
951 this->connected_peers_segments[segment].count--;
952 break;
953 }
954 }
955 enumerator->destroy(enumerator);
956 if (current->sas->get_count(current->sas) == 0)
957 {
958 if (prev)
959 {
960 prev->next = item->next;
961 }
962 else
963 {
964 this->connected_peers_table[row] = item->next;
965 }
966 connected_peers_destroy(current);
967 free(item);
968 }
969 break;
970 }
971 prev = item;
972 item = item->next;
973 }
974 lock->unlock(lock);
975 }
976
977 /**
978 * Get a random SPI for new IKE_SAs
979 */
980 static uint64_t get_spi(private_ike_sa_manager_t *this)
981 {
982 uint64_t spi;
983
984 this->spi_lock->read_lock(this->spi_lock);
985 if (this->spi_cb.cb)
986 {
987 spi = this->spi_cb.cb(this->spi_cb.data);
988 }
989 else if (!this->rng ||
990 !this->rng->get_bytes(this->rng, sizeof(spi), (uint8_t*)&spi))
991 {
992 spi = 0;
993 }
994 this->spi_lock->unlock(this->spi_lock);
995 return spi;
996 }
997
998 /**
999 * Calculate the hash of the initial IKE message. Memory for the hash is
1000 * allocated on success.
1001 *
1002 * @returns TRUE on success
1003 */
1004 static bool get_init_hash(hasher_t *hasher, message_t *message, chunk_t *hash)
1005 {
1006 host_t *src;
1007
1008 if (message->get_first_payload_type(message) == PLV1_FRAGMENT)
1009 { /* only hash the source IP, port and SPI for fragmented init messages */
1010 uint16_t port;
1011 uint64_t spi;
1012
1013 src = message->get_source(message);
1014 if (!hasher->allocate_hash(hasher, src->get_address(src), NULL))
1015 {
1016 return FALSE;
1017 }
1018 port = src->get_port(src);
1019 if (!hasher->allocate_hash(hasher, chunk_from_thing(port), NULL))
1020 {
1021 return FALSE;
1022 }
1023 spi = message->get_initiator_spi(message);
1024 return hasher->allocate_hash(hasher, chunk_from_thing(spi), hash);
1025 }
1026 if (message->get_exchange_type(message) == ID_PROT)
1027 { /* include the source for Main Mode as the hash will be the same if
1028 * SPIs are reused by two initiators that use the same proposal */
1029 src = message->get_source(message);
1030
1031 if (!hasher->allocate_hash(hasher, src->get_address(src), NULL))
1032 {
1033 return FALSE;
1034 }
1035 }
1036 return hasher->allocate_hash(hasher, message->get_packet_data(message), hash);
1037 }
1038
1039 /**
1040 * Check if we already have created an IKE_SA based on the initial IKE message
1041 * with the given hash.
1042 * If not the hash is stored, the hash data is not(!) cloned.
1043 *
1044 * Also, the local SPI is returned. In case of a retransmit this is already
1045 * stored together with the hash, otherwise it is newly allocated and should
1046 * be used to create the IKE_SA.
1047 *
1048 * @returns ALREADY_DONE if the message with the given hash has been seen before
1049 * NOT_FOUND if the message hash was not found
1050 * FAILED if the SPI allocation failed
1051 */
1052 static status_t check_and_put_init_hash(private_ike_sa_manager_t *this,
1053 chunk_t init_hash, uint64_t *our_spi)
1054 {
1055 table_item_t *item;
1056 u_int row, segment;
1057 mutex_t *mutex;
1058 init_hash_t *init;
1059 uint64_t spi;
1060
1061 row = chunk_hash(init_hash) & this->table_mask;
1062 segment = row & this->segment_mask;
1063 mutex = this->init_hashes_segments[segment].mutex;
1064 mutex->lock(mutex);
1065 item = this->init_hashes_table[row];
1066 while (item)
1067 {
1068 init_hash_t *current = item->value;
1069
1070 if (chunk_equals(init_hash, current->hash))
1071 {
1072 *our_spi = current->our_spi;
1073 mutex->unlock(mutex);
1074 return ALREADY_DONE;
1075 }
1076 item = item->next;
1077 }
1078
1079 spi = get_spi(this);
1080 if (!spi)
1081 {
1082 return FAILED;
1083 }
1084
1085 INIT(init,
1086 .hash = {
1087 .len = init_hash.len,
1088 .ptr = init_hash.ptr,
1089 },
1090 .our_spi = spi,
1091 );
1092 INIT(item,
1093 .value = init,
1094 .next = this->init_hashes_table[row],
1095 );
1096 this->init_hashes_table[row] = item;
1097 *our_spi = init->our_spi;
1098 mutex->unlock(mutex);
1099 return NOT_FOUND;
1100 }
1101
1102 /**
1103 * Remove the hash of an initial IKE message from the cache.
1104 */
1105 static void remove_init_hash(private_ike_sa_manager_t *this, chunk_t init_hash)
1106 {
1107 table_item_t *item, *prev = NULL;
1108 u_int row, segment;
1109 mutex_t *mutex;
1110
1111 row = chunk_hash(init_hash) & this->table_mask;
1112 segment = row & this->segment_mask;
1113 mutex = this->init_hashes_segments[segment].mutex;
1114 mutex->lock(mutex);
1115 item = this->init_hashes_table[row];
1116 while (item)
1117 {
1118 init_hash_t *current = item->value;
1119
1120 if (chunk_equals(init_hash, current->hash))
1121 {
1122 if (prev)
1123 {
1124 prev->next = item->next;
1125 }
1126 else
1127 {
1128 this->init_hashes_table[row] = item->next;
1129 }
1130 free(current);
1131 free(item);
1132 break;
1133 }
1134 prev = item;
1135 item = item->next;
1136 }
1137 mutex->unlock(mutex);
1138 }
1139
1140 METHOD(ike_sa_manager_t, checkout, ike_sa_t*,
1141 private_ike_sa_manager_t *this, ike_sa_id_t *ike_sa_id)
1142 {
1143 ike_sa_t *ike_sa = NULL;
1144 entry_t *entry;
1145 u_int segment;
1146
1147 DBG2(DBG_MGR, "checkout %N SA with SPIs %.16"PRIx64"_i %.16"PRIx64"_r",
1148 ike_version_names, ike_sa_id->get_ike_version(ike_sa_id),
1149 be64toh(ike_sa_id->get_initiator_spi(ike_sa_id)),
1150 be64toh(ike_sa_id->get_responder_spi(ike_sa_id)));
1151
1152 if (get_entry_by_id(this, ike_sa_id, &entry, &segment) == SUCCESS)
1153 {
1154 if (wait_for_entry(this, entry, segment))
1155 {
1156 entry->checked_out = thread_current();
1157 ike_sa = entry->ike_sa;
1158 DBG2(DBG_MGR, "IKE_SA %s[%u] successfully checked out",
1159 ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1160 }
1161 unlock_single_segment(this, segment);
1162 }
1163 charon->bus->set_sa(charon->bus, ike_sa);
1164
1165 if (!ike_sa)
1166 {
1167 DBG2(DBG_MGR, "IKE_SA checkout not successful");
1168 }
1169 return ike_sa;
1170 }
1171
1172 METHOD(ike_sa_manager_t, checkout_new, ike_sa_t*,
1173 private_ike_sa_manager_t* this, ike_version_t version, bool initiator)
1174 {
1175 ike_sa_id_t *ike_sa_id;
1176 ike_sa_t *ike_sa;
1177 uint8_t ike_version;
1178 uint64_t spi;
1179
1180 ike_version = version == IKEV1 ? IKEV1_MAJOR_VERSION : IKEV2_MAJOR_VERSION;
1181
1182 spi = get_spi(this);
1183 if (!spi)
1184 {
1185 DBG1(DBG_MGR, "failed to allocate SPI for new IKE_SA");
1186 return NULL;
1187 }
1188
1189 if (initiator)
1190 {
1191 ike_sa_id = ike_sa_id_create(ike_version, spi, 0, TRUE);
1192 }
1193 else
1194 {
1195 ike_sa_id = ike_sa_id_create(ike_version, 0, spi, FALSE);
1196 }
1197 ike_sa = ike_sa_create(ike_sa_id, initiator, version);
1198 ike_sa_id->destroy(ike_sa_id);
1199
1200 if (ike_sa)
1201 {
1202 DBG2(DBG_MGR, "created IKE_SA %s[%u]", ike_sa->get_name(ike_sa),
1203 ike_sa->get_unique_id(ike_sa));
1204 }
1205 return ike_sa;
1206 }
1207
1208 /**
1209 * Get the message ID or message hash to detect early retransmissions
1210 */
1211 static uint32_t get_message_id_or_hash(message_t *message)
1212 {
1213 if (message->get_major_version(message) == IKEV1_MAJOR_VERSION)
1214 {
1215 /* Use a hash for IKEv1 Phase 1, where we don't have a MID, and Quick
1216 * Mode, where all three messages use the same message ID */
1217 if (message->get_message_id(message) == 0 ||
1218 message->get_exchange_type(message) == QUICK_MODE)
1219 {
1220 return chunk_hash(message->get_packet_data(message));
1221 }
1222 }
1223 return message->get_message_id(message);
1224 }
1225
1226 METHOD(ike_sa_manager_t, checkout_by_message, ike_sa_t*,
1227 private_ike_sa_manager_t* this, message_t *message)
1228 {
1229 u_int segment;
1230 entry_t *entry;
1231 ike_sa_t *ike_sa = NULL;
1232 ike_sa_id_t *id;
1233 ike_version_t ike_version;
1234 bool is_init = FALSE;
1235
1236 id = message->get_ike_sa_id(message);
1237 /* clone the IKE_SA ID so we can modify the initiator flag */
1238 id = id->clone(id);
1239 id->switch_initiator(id);
1240
1241 DBG2(DBG_MGR, "checkout %N SA by message with SPIs %.16"PRIx64"_i "
1242 "%.16"PRIx64"_r", ike_version_names, id->get_ike_version(id),
1243 be64toh(id->get_initiator_spi(id)),
1244 be64toh(id->get_responder_spi(id)));
1245
1246 if (id->get_responder_spi(id) == 0 &&
1247 message->get_message_id(message) == 0)
1248 {
1249 if (message->get_major_version(message) == IKEV2_MAJOR_VERSION)
1250 {
1251 if (message->get_exchange_type(message) == IKE_SA_INIT &&
1252 message->get_request(message))
1253 {
1254 ike_version = IKEV2;
1255 is_init = TRUE;
1256 }
1257 }
1258 else
1259 {
1260 if (message->get_exchange_type(message) == ID_PROT ||
1261 message->get_exchange_type(message) == AGGRESSIVE)
1262 {
1263 ike_version = IKEV1;
1264 is_init = TRUE;
1265 if (id->is_initiator(id))
1266 { /* not set in IKEv1, switch back before applying to new SA */
1267 id->switch_initiator(id);
1268 }
1269 }
1270 }
1271 }
1272
1273 if (is_init)
1274 {
1275 hasher_t *hasher;
1276 uint64_t our_spi;
1277 chunk_t hash;
1278
1279 hasher = lib->crypto->create_hasher(lib->crypto, HASH_SHA1);
1280 if (!hasher || !get_init_hash(hasher, message, &hash))
1281 {
1282 DBG1(DBG_MGR, "ignoring message, failed to hash message");
1283 DESTROY_IF(hasher);
1284 id->destroy(id);
1285 goto out;
1286 }
1287 hasher->destroy(hasher);
1288
1289 /* ensure this is not a retransmit of an already handled init message */
1290 switch (check_and_put_init_hash(this, hash, &our_spi))
1291 {
1292 case NOT_FOUND:
1293 { /* we've not seen this packet yet, create a new IKE_SA */
1294 if (!this->ikesa_limit ||
1295 this->public.get_count(&this->public) < this->ikesa_limit)
1296 {
1297 id->set_responder_spi(id, our_spi);
1298 ike_sa = ike_sa_create(id, FALSE, ike_version);
1299 if (ike_sa)
1300 {
1301 entry = entry_create();
1302 entry->ike_sa = ike_sa;
1303 entry->ike_sa_id = id;
1304 entry->processing = get_message_id_or_hash(message);
1305 entry->init_hash = hash;
1306
1307 segment = put_entry(this, entry);
1308 entry->checked_out = thread_current();
1309 unlock_single_segment(this, segment);
1310
1311 DBG2(DBG_MGR, "created IKE_SA %s[%u]",
1312 ike_sa->get_name(ike_sa),
1313 ike_sa->get_unique_id(ike_sa));
1314 goto out;
1315 }
1316 else
1317 {
1318 DBG1(DBG_MGR, "creating IKE_SA failed, ignoring message");
1319 }
1320 }
1321 else
1322 {
1323 DBG1(DBG_MGR, "ignoring %N, hitting IKE_SA limit (%u)",
1324 exchange_type_names, message->get_exchange_type(message),
1325 this->ikesa_limit);
1326 }
1327 remove_init_hash(this, hash);
1328 chunk_free(&hash);
1329 id->destroy(id);
1330 goto out;
1331 }
1332 case FAILED:
1333 { /* we failed to allocate an SPI */
1334 chunk_free(&hash);
1335 id->destroy(id);
1336 DBG1(DBG_MGR, "ignoring message, failed to allocate SPI");
1337 goto out;
1338 }
1339 case ALREADY_DONE:
1340 default:
1341 break;
1342 }
1343 /* it looks like we already handled this init message to some degree */
1344 id->set_responder_spi(id, our_spi);
1345 chunk_free(&hash);
1346 }
1347
1348 if (get_entry_by_id(this, id, &entry, &segment) == SUCCESS)
1349 {
1350 /* only check out if we are not already processing it. */
1351 if (entry->processing == get_message_id_or_hash(message))
1352 {
1353 DBG1(DBG_MGR, "ignoring request with ID %u, already processing",
1354 entry->processing);
1355 }
1356 else if (wait_for_entry(this, entry, segment))
1357 {
1358 ike_sa_id_t *ike_id;
1359
1360 ike_id = entry->ike_sa->get_id(entry->ike_sa);
1361 entry->checked_out = thread_current();
1362 if (message->get_first_payload_type(message) != PLV1_FRAGMENT &&
1363 message->get_first_payload_type(message) != PLV2_FRAGMENT)
1364 { /* TODO-FRAG: this fails if there are unencrypted payloads */
1365 entry->processing = get_message_id_or_hash(message);
1366 }
1367 if (ike_id->get_responder_spi(ike_id) == 0)
1368 {
1369 ike_id->set_responder_spi(ike_id, id->get_responder_spi(id));
1370 }
1371 ike_sa = entry->ike_sa;
1372 DBG2(DBG_MGR, "IKE_SA %s[%u] successfully checked out",
1373 ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1374 }
1375 unlock_single_segment(this, segment);
1376 }
1377 else
1378 {
1379 charon->bus->alert(charon->bus, ALERT_INVALID_IKE_SPI, message);
1380 }
1381 id->destroy(id);
1382
1383 out:
1384 charon->bus->set_sa(charon->bus, ike_sa);
1385 if (!ike_sa)
1386 {
1387 DBG2(DBG_MGR, "IKE_SA checkout not successful");
1388 }
1389 return ike_sa;
1390 }
1391
1392 METHOD(ike_sa_manager_t, checkout_by_config, ike_sa_t*,
1393 private_ike_sa_manager_t *this, peer_cfg_t *peer_cfg)
1394 {
1395 enumerator_t *enumerator;
1396 entry_t *entry;
1397 ike_sa_t *ike_sa = NULL;
1398 peer_cfg_t *current_peer;
1399 ike_cfg_t *current_ike;
1400 u_int segment;
1401
1402 DBG2(DBG_MGR, "checkout IKE_SA by config");
1403
1404 if (!this->reuse_ikesa && peer_cfg->get_ike_version(peer_cfg) != IKEV1)
1405 { /* IKE_SA reuse disabled by config (not possible for IKEv1) */
1406 ike_sa = checkout_new(this, peer_cfg->get_ike_version(peer_cfg), TRUE);
1407 charon->bus->set_sa(charon->bus, ike_sa);
1408 goto out;
1409 }
1410
1411 enumerator = create_table_enumerator(this);
1412 while (enumerator->enumerate(enumerator, &entry, &segment))
1413 {
1414 if (!wait_for_entry(this, entry, segment))
1415 {
1416 continue;
1417 }
1418 if (entry->ike_sa->get_state(entry->ike_sa) == IKE_DELETING ||
1419 entry->ike_sa->get_state(entry->ike_sa) == IKE_REKEYED)
1420 { /* skip IKE_SAs which are not usable, wake other waiting threads */
1421 entry->condvar->signal(entry->condvar);
1422 continue;
1423 }
1424
1425 current_peer = entry->ike_sa->get_peer_cfg(entry->ike_sa);
1426 if (current_peer && current_peer->equals(current_peer, peer_cfg))
1427 {
1428 current_ike = current_peer->get_ike_cfg(current_peer);
1429 if (current_ike->equals(current_ike, peer_cfg->get_ike_cfg(peer_cfg)))
1430 {
1431 entry->checked_out = thread_current();
1432 ike_sa = entry->ike_sa;
1433 DBG2(DBG_MGR, "found existing IKE_SA %u with a '%s' config",
1434 ike_sa->get_unique_id(ike_sa),
1435 current_peer->get_name(current_peer));
1436 break;
1437 }
1438 }
1439 /* other threads might be waiting for this entry */
1440 entry->condvar->signal(entry->condvar);
1441 }
1442 enumerator->destroy(enumerator);
1443
1444 if (!ike_sa)
1445 { /* no IKE_SA using such a config, hand out a new */
1446 ike_sa = checkout_new(this, peer_cfg->get_ike_version(peer_cfg), TRUE);
1447 }
1448 charon->bus->set_sa(charon->bus, ike_sa);
1449
1450 out:
1451 if (!ike_sa)
1452 {
1453 DBG2(DBG_MGR, "IKE_SA checkout not successful");
1454 }
1455 return ike_sa;
1456 }
1457
1458 METHOD(ike_sa_manager_t, checkout_by_id, ike_sa_t*,
1459 private_ike_sa_manager_t *this, uint32_t id)
1460 {
1461 enumerator_t *enumerator;
1462 entry_t *entry;
1463 ike_sa_t *ike_sa = NULL;
1464 u_int segment;
1465
1466 DBG2(DBG_MGR, "checkout IKE_SA by unique ID %u", id);
1467
1468 enumerator = create_table_enumerator(this);
1469 while (enumerator->enumerate(enumerator, &entry, &segment))
1470 {
1471 if (wait_for_entry(this, entry, segment))
1472 {
1473 if (entry->ike_sa->get_unique_id(entry->ike_sa) == id)
1474 {
1475 ike_sa = entry->ike_sa;
1476 entry->checked_out = thread_current();
1477 break;
1478 }
1479 /* other threads might be waiting for this entry */
1480 entry->condvar->signal(entry->condvar);
1481 }
1482 }
1483 enumerator->destroy(enumerator);
1484
1485 if (ike_sa)
1486 {
1487 DBG2(DBG_MGR, "IKE_SA %s[%u] successfully checked out",
1488 ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1489 }
1490 else
1491 {
1492 DBG2(DBG_MGR, "IKE_SA checkout not successful");
1493 }
1494 charon->bus->set_sa(charon->bus, ike_sa);
1495 return ike_sa;
1496 }
1497
1498 METHOD(ike_sa_manager_t, checkout_by_name, ike_sa_t*,
1499 private_ike_sa_manager_t *this, char *name, bool child)
1500 {
1501 enumerator_t *enumerator, *children;
1502 entry_t *entry;
1503 ike_sa_t *ike_sa = NULL;
1504 child_sa_t *child_sa;
1505 u_int segment;
1506
1507 DBG2(DBG_MGR, "checkout IKE_SA by%s name '%s'", child ? " child" : "", name);
1508
1509 enumerator = create_table_enumerator(this);
1510 while (enumerator->enumerate(enumerator, &entry, &segment))
1511 {
1512 if (wait_for_entry(this, entry, segment))
1513 {
1514 /* look for a child with such a policy name ... */
1515 if (child)
1516 {
1517 children = entry->ike_sa->create_child_sa_enumerator(entry->ike_sa);
1518 while (children->enumerate(children, (void**)&child_sa))
1519 {
1520 if (streq(child_sa->get_name(child_sa), name))
1521 {
1522 ike_sa = entry->ike_sa;
1523 break;
1524 }
1525 }
1526 children->destroy(children);
1527 }
1528 else /* ... or for a IKE_SA with such a connection name */
1529 {
1530 if (streq(entry->ike_sa->get_name(entry->ike_sa), name))
1531 {
1532 ike_sa = entry->ike_sa;
1533 }
1534 }
1535 /* got one, return */
1536 if (ike_sa)
1537 {
1538 entry->checked_out = thread_current();
1539 DBG2(DBG_MGR, "IKE_SA %s[%u] successfully checked out",
1540 ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1541 break;
1542 }
1543 /* other threads might be waiting for this entry */
1544 entry->condvar->signal(entry->condvar);
1545 }
1546 }
1547 enumerator->destroy(enumerator);
1548
1549 charon->bus->set_sa(charon->bus, ike_sa);
1550
1551 if (!ike_sa)
1552 {
1553 DBG2(DBG_MGR, "IKE_SA checkout not successful");
1554 }
1555 return ike_sa;
1556 }
1557
1558 /**
1559 * enumerator filter function, waiting variant
1560 */
1561 static bool enumerator_filter_wait(private_ike_sa_manager_t *this,
1562 entry_t **in, ike_sa_t **out, u_int *segment)
1563 {
1564 if (wait_for_entry(this, *in, *segment))
1565 {
1566 *out = (*in)->ike_sa;
1567 charon->bus->set_sa(charon->bus, *out);
1568 return TRUE;
1569 }
1570 return FALSE;
1571 }
1572
1573 /**
1574 * enumerator filter function, skipping variant
1575 */
1576 static bool enumerator_filter_skip(private_ike_sa_manager_t *this,
1577 entry_t **in, ike_sa_t **out, u_int *segment)
1578 {
1579 if (!(*in)->driveout_new_threads &&
1580 !(*in)->driveout_waiting_threads &&
1581 !(*in)->checked_out)
1582 {
1583 *out = (*in)->ike_sa;
1584 charon->bus->set_sa(charon->bus, *out);
1585 return TRUE;
1586 }
1587 return FALSE;
1588 }
1589
1590 /**
1591 * Reset threads SA after enumeration
1592 */
1593 static void reset_sa(void *data)
1594 {
1595 charon->bus->set_sa(charon->bus, NULL);
1596 }
1597
1598 METHOD(ike_sa_manager_t, create_enumerator, enumerator_t*,
1599 private_ike_sa_manager_t* this, bool wait)
1600 {
1601 return enumerator_create_filter(create_table_enumerator(this),
1602 wait ? (void*)enumerator_filter_wait : (void*)enumerator_filter_skip,
1603 this, reset_sa);
1604 }
1605
1606 METHOD(ike_sa_manager_t, checkin, void,
1607 private_ike_sa_manager_t *this, ike_sa_t *ike_sa)
1608 {
1609 /* to check the SA back in, we look for the pointer of the ike_sa
1610 * in all entries.
1611 * The lookup is done by initiator SPI, so even if the SPI has changed (e.g.
1612 * on reception of a IKE_SA_INIT response) the lookup will work but
1613 * updating of the SPI MAY be necessary...
1614 */
1615 entry_t *entry;
1616 ike_sa_id_t *ike_sa_id;
1617 host_t *other;
1618 identification_t *my_id, *other_id;
1619 u_int segment;
1620
1621 ike_sa_id = ike_sa->get_id(ike_sa);
1622 my_id = ike_sa->get_my_id(ike_sa);
1623 other_id = ike_sa->get_other_eap_id(ike_sa);
1624 other = ike_sa->get_other_host(ike_sa);
1625
1626 DBG2(DBG_MGR, "checkin IKE_SA %s[%u]", ike_sa->get_name(ike_sa),
1627 ike_sa->get_unique_id(ike_sa));
1628
1629 /* look for the entry */
1630 if (get_entry_by_sa(this, ike_sa_id, ike_sa, &entry, &segment) == SUCCESS)
1631 {
1632 /* ike_sa_id must be updated */
1633 entry->ike_sa_id->replace_values(entry->ike_sa_id, ike_sa->get_id(ike_sa));
1634 /* signal waiting threads */
1635 entry->checked_out = NULL;
1636 entry->processing = -1;
1637 /* check if this SA is half-open */
1638 if (entry->half_open && ike_sa->get_state(ike_sa) != IKE_CONNECTING)
1639 {
1640 /* not half open anymore */
1641 entry->half_open = FALSE;
1642 remove_half_open(this, entry);
1643 }
1644 else if (entry->half_open && !other->ip_equals(other, entry->other))
1645 {
1646 /* the other host's IP has changed, we must update the hash table */
1647 remove_half_open(this, entry);
1648 DESTROY_IF(entry->other);
1649 entry->other = other->clone(other);
1650 put_half_open(this, entry);
1651 }
1652 else if (!entry->half_open &&
1653 ike_sa->get_state(ike_sa) == IKE_CONNECTING)
1654 {
1655 /* this is a new half-open SA */
1656 entry->half_open = TRUE;
1657 entry->other = other->clone(other);
1658 put_half_open(this, entry);
1659 }
1660 entry->condvar->signal(entry->condvar);
1661 }
1662 else
1663 {
1664 entry = entry_create();
1665 entry->ike_sa_id = ike_sa_id->clone(ike_sa_id);
1666 entry->ike_sa = ike_sa;
1667 if (ike_sa->get_state(ike_sa) == IKE_CONNECTING)
1668 {
1669 entry->half_open = TRUE;
1670 entry->other = other->clone(other);
1671 put_half_open(this, entry);
1672 }
1673 segment = put_entry(this, entry);
1674 }
1675 DBG2(DBG_MGR, "checkin of IKE_SA successful");
1676
1677 /* apply identities for duplicate test */
1678 if ((ike_sa->get_state(ike_sa) == IKE_ESTABLISHED ||
1679 ike_sa->get_state(ike_sa) == IKE_PASSIVE) &&
1680 entry->my_id == NULL && entry->other_id == NULL)
1681 {
1682 if (ike_sa->get_version(ike_sa) == IKEV1)
1683 {
1684 /* If authenticated and received INITIAL_CONTACT,
1685 * delete any existing IKE_SAs with that peer. */
1686 if (ike_sa->has_condition(ike_sa, COND_INIT_CONTACT_SEEN))
1687 {
1688 /* We can't hold the segment locked while checking the
1689 * uniqueness as this could lead to deadlocks. We mark the
1690 * entry as checked out while we release the lock so no other
1691 * thread can acquire it. Since it is not yet in the list of
1692 * connected peers that will not cause a deadlock as no other
1693 * caller of check_unqiueness() will try to check out this SA */
1694 entry->checked_out = thread_current();
1695 unlock_single_segment(this, segment);
1696
1697 this->public.check_uniqueness(&this->public, ike_sa, TRUE);
1698 ike_sa->set_condition(ike_sa, COND_INIT_CONTACT_SEEN, FALSE);
1699
1700 /* The entry could have been modified in the mean time, e.g.
1701 * because another SA was added/removed next to it or another
1702 * thread is waiting, but it should still exist, so there is no
1703 * need for a lookup via get_entry_by... */
1704 lock_single_segment(this, segment);
1705 entry->checked_out = NULL;
1706 /* We already signaled waiting threads above, we have to do that
1707 * again after checking the SA out and back in again. */
1708 entry->condvar->signal(entry->condvar);
1709 }
1710 }
1711
1712 entry->my_id = my_id->clone(my_id);
1713 entry->other_id = other_id->clone(other_id);
1714 if (!entry->other)
1715 {
1716 entry->other = other->clone(other);
1717 }
1718 put_connected_peers(this, entry);
1719 }
1720
1721 unlock_single_segment(this, segment);
1722
1723 charon->bus->set_sa(charon->bus, NULL);
1724 }
1725
1726 METHOD(ike_sa_manager_t, checkin_and_destroy, void,
1727 private_ike_sa_manager_t *this, ike_sa_t *ike_sa)
1728 {
1729 /* deletion is a bit complex, we must ensure that no thread is waiting for
1730 * this SA.
1731 * We take this SA from the table, and start signaling while threads
1732 * are in the condvar.
1733 */
1734 entry_t *entry;
1735 ike_sa_id_t *ike_sa_id;
1736 u_int segment;
1737
1738 ike_sa_id = ike_sa->get_id(ike_sa);
1739
1740 DBG2(DBG_MGR, "checkin and destroy IKE_SA %s[%u]", ike_sa->get_name(ike_sa),
1741 ike_sa->get_unique_id(ike_sa));
1742
1743 if (get_entry_by_sa(this, ike_sa_id, ike_sa, &entry, &segment) == SUCCESS)
1744 {
1745 if (entry->driveout_waiting_threads && entry->driveout_new_threads)
1746 { /* it looks like flush() has been called and the SA is being deleted
1747 * anyway, just check it in */
1748 DBG2(DBG_MGR, "ignored checkin and destroy of IKE_SA during shutdown");
1749 entry->checked_out = NULL;
1750 entry->condvar->broadcast(entry->condvar);
1751 unlock_single_segment(this, segment);
1752 return;
1753 }
1754
1755 /* drive out waiting threads, as we are in hurry */
1756 entry->driveout_waiting_threads = TRUE;
1757 /* mark it, so no new threads can get this entry */
1758 entry->driveout_new_threads = TRUE;
1759 /* wait until all workers have done their work */
1760 while (entry->waiting_threads)
1761 {
1762 /* wake up all */
1763 entry->condvar->broadcast(entry->condvar);
1764 /* they will wake us again when their work is done */
1765 entry->condvar->wait(entry->condvar, this->segments[segment].mutex);
1766 }
1767 remove_entry(this, entry);
1768 unlock_single_segment(this, segment);
1769
1770 if (entry->half_open)
1771 {
1772 remove_half_open(this, entry);
1773 }
1774 if (entry->my_id && entry->other_id)
1775 {
1776 remove_connected_peers(this, entry);
1777 }
1778 if (entry->init_hash.ptr)
1779 {
1780 remove_init_hash(this, entry->init_hash);
1781 }
1782
1783 entry_destroy(entry);
1784
1785 DBG2(DBG_MGR, "checkin and destroy of IKE_SA successful");
1786 }
1787 else
1788 {
1789 DBG1(DBG_MGR, "tried to checkin and delete nonexisting IKE_SA");
1790 ike_sa->destroy(ike_sa);
1791 }
1792 charon->bus->set_sa(charon->bus, NULL);
1793 }
1794
1795 /**
1796 * Cleanup function for create_id_enumerator
1797 */
1798 static void id_enumerator_cleanup(linked_list_t *ids)
1799 {
1800 ids->destroy_offset(ids, offsetof(ike_sa_id_t, destroy));
1801 }
1802
1803 METHOD(ike_sa_manager_t, create_id_enumerator, enumerator_t*,
1804 private_ike_sa_manager_t *this, identification_t *me,
1805 identification_t *other, int family)
1806 {
1807 table_item_t *item;
1808 u_int row, segment;
1809 rwlock_t *lock;
1810 linked_list_t *ids = NULL;
1811
1812 row = chunk_hash_inc(other->get_encoding(other),
1813 chunk_hash(me->get_encoding(me))) & this->table_mask;
1814 segment = row & this->segment_mask;
1815
1816 lock = this->connected_peers_segments[segment].lock;
1817 lock->read_lock(lock);
1818 item = this->connected_peers_table[row];
1819 while (item)
1820 {
1821 connected_peers_t *current = item->value;
1822
1823 if (connected_peers_match(current, me, other, family))
1824 {
1825 ids = current->sas->clone_offset(current->sas,
1826 offsetof(ike_sa_id_t, clone));
1827 break;
1828 }
1829 item = item->next;
1830 }
1831 lock->unlock(lock);
1832
1833 if (!ids)
1834 {
1835 return enumerator_create_empty();
1836 }
1837 return enumerator_create_cleaner(ids->create_enumerator(ids),
1838 (void*)id_enumerator_cleanup, ids);
1839 }
1840
1841 /**
1842 * Move all CHILD_SAs and virtual IPs from old to new
1843 */
1844 static void adopt_children_and_vips(ike_sa_t *old, ike_sa_t *new)
1845 {
1846 enumerator_t *enumerator;
1847 child_sa_t *child_sa;
1848 host_t *vip;
1849 int chcount = 0, vipcount = 0;
1850
1851 charon->bus->children_migrate(charon->bus, new->get_id(new),
1852 new->get_unique_id(new));
1853 enumerator = old->create_child_sa_enumerator(old);
1854 while (enumerator->enumerate(enumerator, &child_sa))
1855 {
1856 old->remove_child_sa(old, enumerator);
1857 new->add_child_sa(new, child_sa);
1858 chcount++;
1859 }
1860 enumerator->destroy(enumerator);
1861
1862 enumerator = old->create_virtual_ip_enumerator(old, FALSE);
1863 while (enumerator->enumerate(enumerator, &vip))
1864 {
1865 new->add_virtual_ip(new, FALSE, vip);
1866 vipcount++;
1867 }
1868 enumerator->destroy(enumerator);
1869 /* this does not release the addresses, which is good, but it does trigger
1870 * an assign_vips(FALSE) event... */
1871 old->clear_virtual_ips(old, FALSE);
1872 /* ...trigger the analogous event on the new SA */
1873 charon->bus->set_sa(charon->bus, new);
1874 charon->bus->assign_vips(charon->bus, new, TRUE);
1875 charon->bus->children_migrate(charon->bus, NULL, 0);
1876 charon->bus->set_sa(charon->bus, old);
1877
1878 if (chcount || vipcount)
1879 {
1880 DBG1(DBG_IKE, "detected reauth of existing IKE_SA, adopting %d "
1881 "children and %d virtual IPs", chcount, vipcount);
1882 }
1883 }
1884
1885 /**
1886 * Delete an existing IKE_SA due to a unique replace policy
1887 */
1888 static status_t enforce_replace(private_ike_sa_manager_t *this,
1889 ike_sa_t *duplicate, ike_sa_t *new,
1890 identification_t *other, host_t *host)
1891 {
1892 charon->bus->alert(charon->bus, ALERT_UNIQUE_REPLACE);
1893
1894 if (host->equals(host, duplicate->get_other_host(duplicate)))
1895 {
1896 /* looks like a reauthentication attempt */
1897 if (!new->has_condition(new, COND_INIT_CONTACT_SEEN) &&
1898 new->get_version(new) == IKEV1)
1899 {
1900 /* IKEv1 implicitly takes over children, IKEv2 recreates them
1901 * explicitly. */
1902 adopt_children_and_vips(duplicate, new);
1903 }
1904 /* For IKEv1 we have to delay the delete for the old IKE_SA. Some
1905 * peers need to complete the new SA first, otherwise the quick modes
1906 * might get lost. For IKEv2 we do the same, as we want overlapping
1907 * CHILD_SAs to keep connectivity up. */
1908 lib->scheduler->schedule_job(lib->scheduler, (job_t*)
1909 delete_ike_sa_job_create(duplicate->get_id(duplicate), TRUE), 10);
1910 return SUCCESS;
1911 }
1912 DBG1(DBG_IKE, "deleting duplicate IKE_SA for peer '%Y' due to "
1913 "uniqueness policy", other);
1914 return duplicate->delete(duplicate);
1915 }
1916
1917 METHOD(ike_sa_manager_t, check_uniqueness, bool,
1918 private_ike_sa_manager_t *this, ike_sa_t *ike_sa, bool force_replace)
1919 {
1920 bool cancel = FALSE;
1921 peer_cfg_t *peer_cfg;
1922 unique_policy_t policy;
1923 enumerator_t *enumerator;
1924 ike_sa_id_t *id = NULL;
1925 identification_t *me, *other;
1926 host_t *other_host;
1927
1928 peer_cfg = ike_sa->get_peer_cfg(ike_sa);
1929 policy = peer_cfg->get_unique_policy(peer_cfg);
1930 if (policy == UNIQUE_NEVER || (policy == UNIQUE_NO && !force_replace))
1931 {
1932 return FALSE;
1933 }
1934 me = ike_sa->get_my_id(ike_sa);
1935 other = ike_sa->get_other_eap_id(ike_sa);
1936 other_host = ike_sa->get_other_host(ike_sa);
1937
1938 enumerator = create_id_enumerator(this, me, other,
1939 other_host->get_family(other_host));
1940 while (enumerator->enumerate(enumerator, &id))
1941 {
1942 status_t status = SUCCESS;
1943 ike_sa_t *duplicate;
1944
1945 duplicate = checkout(this, id);
1946 if (!duplicate)
1947 {
1948 continue;
1949 }
1950 if (force_replace)
1951 {
1952 DBG1(DBG_IKE, "destroying duplicate IKE_SA for peer '%Y', "
1953 "received INITIAL_CONTACT", other);
1954 charon->bus->ike_updown(charon->bus, duplicate, FALSE);
1955 checkin_and_destroy(this, duplicate);
1956 continue;
1957 }
1958 peer_cfg = duplicate->get_peer_cfg(duplicate);
1959 if (peer_cfg && peer_cfg->equals(peer_cfg, ike_sa->get_peer_cfg(ike_sa)))
1960 {
1961 switch (duplicate->get_state(duplicate))
1962 {
1963 case IKE_ESTABLISHED:
1964 case IKE_REKEYING:
1965 switch (policy)
1966 {
1967 case UNIQUE_REPLACE:
1968 status = enforce_replace(this, duplicate, ike_sa,
1969 other, other_host);
1970 break;
1971 case UNIQUE_KEEP:
1972 /* potential reauthentication? */
1973 if (!other_host->equals(other_host,
1974 duplicate->get_other_host(duplicate)))
1975 {
1976 cancel = TRUE;
1977 /* we keep the first IKE_SA and delete all
1978 * other duplicates that might exist */
1979 policy = UNIQUE_REPLACE;
1980 }
1981 break;
1982 default:
1983 break;
1984 }
1985 break;
1986 default:
1987 break;
1988 }
1989 }
1990 if (status == DESTROY_ME)
1991 {
1992 checkin_and_destroy(this, duplicate);
1993 }
1994 else
1995 {
1996 checkin(this, duplicate);
1997 }
1998 }
1999 enumerator->destroy(enumerator);
2000 /* reset thread's current IKE_SA after checkin */
2001 charon->bus->set_sa(charon->bus, ike_sa);
2002 return cancel;
2003 }
2004
2005 METHOD(ike_sa_manager_t, has_contact, bool,
2006 private_ike_sa_manager_t *this, identification_t *me,
2007 identification_t *other, int family)
2008 {
2009 table_item_t *item;
2010 u_int row, segment;
2011 rwlock_t *lock;
2012 bool found = FALSE;
2013
2014 row = chunk_hash_inc(other->get_encoding(other),
2015 chunk_hash(me->get_encoding(me))) & this->table_mask;
2016 segment = row & this->segment_mask;
2017 lock = this->connected_peers_segments[segment].lock;
2018 lock->read_lock(lock);
2019 item = this->connected_peers_table[row];
2020 while (item)
2021 {
2022 if (connected_peers_match(item->value, me, other, family))
2023 {
2024 found = TRUE;
2025 break;
2026 }
2027 item = item->next;
2028 }
2029 lock->unlock(lock);
2030
2031 return found;
2032 }
2033
2034 METHOD(ike_sa_manager_t, get_count, u_int,
2035 private_ike_sa_manager_t *this)
2036 {
2037 u_int segment, count = 0;
2038 mutex_t *mutex;
2039
2040 for (segment = 0; segment < this->segment_count; segment++)
2041 {
2042 mutex = this->segments[segment & this->segment_mask].mutex;
2043 mutex->lock(mutex);
2044 count += this->segments[segment].count;
2045 mutex->unlock(mutex);
2046 }
2047 return count;
2048 }
2049
2050 METHOD(ike_sa_manager_t, get_half_open_count, u_int,
2051 private_ike_sa_manager_t *this, host_t *ip, bool responder_only)
2052 {
2053 table_item_t *item;
2054 u_int row, segment;
2055 rwlock_t *lock;
2056 chunk_t addr;
2057 u_int count = 0;
2058
2059 if (ip)
2060 {
2061 addr = ip->get_address(ip);
2062 row = chunk_hash(addr) & this->table_mask;
2063 segment = row & this->segment_mask;
2064 lock = this->half_open_segments[segment].lock;
2065 lock->read_lock(lock);
2066 item = this->half_open_table[row];
2067 while (item)
2068 {
2069 half_open_t *half_open = item->value;
2070
2071 if (chunk_equals(addr, half_open->other))
2072 {
2073 count = responder_only ? half_open->count_responder
2074 : half_open->count;
2075 break;
2076 }
2077 item = item->next;
2078 }
2079 lock->unlock(lock);
2080 }
2081 else
2082 {
2083 count = responder_only ? (u_int)ref_cur(&this->half_open_count_responder)
2084 : (u_int)ref_cur(&this->half_open_count);
2085 }
2086 return count;
2087 }
2088
2089 METHOD(ike_sa_manager_t, set_spi_cb, void,
2090 private_ike_sa_manager_t *this, spi_cb_t callback, void *data)
2091 {
2092 this->spi_lock->write_lock(this->spi_lock);
2093 this->spi_cb.cb = callback;
2094 this->spi_cb.data = data;
2095 this->spi_lock->unlock(this->spi_lock);
2096 }
2097
2098 /**
2099 * Destroy all entries
2100 */
2101 static void destroy_all_entries(private_ike_sa_manager_t *this)
2102 {
2103 enumerator_t *enumerator;
2104 entry_t *entry;
2105 u_int segment;
2106
2107 enumerator = create_table_enumerator(this);
2108 while (enumerator->enumerate(enumerator, &entry, &segment))
2109 {
2110 charon->bus->set_sa(charon->bus, entry->ike_sa);
2111 if (entry->half_open)
2112 {
2113 remove_half_open(this, entry);
2114 }
2115 if (entry->my_id && entry->other_id)
2116 {
2117 remove_connected_peers(this, entry);
2118 }
2119 if (entry->init_hash.ptr)
2120 {
2121 remove_init_hash(this, entry->init_hash);
2122 }
2123 remove_entry_at((private_enumerator_t*)enumerator);
2124 entry_destroy(entry);
2125 }
2126 enumerator->destroy(enumerator);
2127 charon->bus->set_sa(charon->bus, NULL);
2128 }
2129
2130 METHOD(ike_sa_manager_t, flush, void,
2131 private_ike_sa_manager_t *this)
2132 {
2133 enumerator_t *enumerator;
2134 entry_t *entry;
2135 u_int segment;
2136
2137 lock_all_segments(this);
2138 DBG2(DBG_MGR, "going to destroy IKE_SA manager and all managed IKE_SA's");
2139 /* Step 1: drive out all waiting threads */
2140 DBG2(DBG_MGR, "set driveout flags for all stored IKE_SA's");
2141 enumerator = create_table_enumerator(this);
2142 while (enumerator->enumerate(enumerator, &entry, &segment))
2143 {
2144 /* do not accept new threads, drive out waiting threads */
2145 entry->driveout_new_threads = TRUE;
2146 entry->driveout_waiting_threads = TRUE;
2147 }
2148 enumerator->destroy(enumerator);
2149 DBG2(DBG_MGR, "wait for all threads to leave IKE_SA's");
2150 /* Step 2: wait until all are gone */
2151 enumerator = create_table_enumerator(this);
2152 while (enumerator->enumerate(enumerator, &entry, &segment))
2153 {
2154 while (entry->waiting_threads || entry->checked_out)
2155 {
2156 /* wake up all */
2157 entry->condvar->broadcast(entry->condvar);
2158 /* go sleeping until they are gone */
2159 entry->condvar->wait(entry->condvar, this->segments[segment].mutex);
2160 }
2161 }
2162 enumerator->destroy(enumerator);
2163 DBG2(DBG_MGR, "delete all IKE_SA's");
2164 /* Step 3: initiate deletion of all IKE_SAs */
2165 enumerator = create_table_enumerator(this);
2166 while (enumerator->enumerate(enumerator, &entry, &segment))
2167 {
2168 charon->bus->set_sa(charon->bus, entry->ike_sa);
2169 if (entry->ike_sa->get_version(entry->ike_sa) == IKEV2)
2170 { /* as the delete never gets processed, fire down events */
2171 switch (entry->ike_sa->get_state(entry->ike_sa))
2172 {
2173 case IKE_ESTABLISHED:
2174 case IKE_REKEYING:
2175 case IKE_DELETING:
2176 charon->bus->ike_updown(charon->bus, entry->ike_sa, FALSE);
2177 break;
2178 default:
2179 break;
2180 }
2181 }
2182 entry->ike_sa->delete(entry->ike_sa);
2183 }
2184 enumerator->destroy(enumerator);
2185
2186 DBG2(DBG_MGR, "destroy all entries");
2187 /* Step 4: destroy all entries */
2188 destroy_all_entries(this);
2189 unlock_all_segments(this);
2190
2191 this->spi_lock->write_lock(this->spi_lock);
2192 DESTROY_IF(this->rng);
2193 this->rng = NULL;
2194 this->spi_cb.cb = NULL;
2195 this->spi_cb.data = NULL;
2196 this->spi_lock->unlock(this->spi_lock);
2197 }
2198
2199 METHOD(ike_sa_manager_t, destroy, void,
2200 private_ike_sa_manager_t *this)
2201 {
2202 u_int i;
2203
2204 /* in case new SAs were checked in after flush() was called */
2205 lock_all_segments(this);
2206 destroy_all_entries(this);
2207 unlock_all_segments(this);
2208
2209 free(this->ike_sa_table);
2210 free(this->half_open_table);
2211 free(this->connected_peers_table);
2212 free(this->init_hashes_table);
2213 for (i = 0; i < this->segment_count; i++)
2214 {
2215 this->segments[i].mutex->destroy(this->segments[i].mutex);
2216 this->half_open_segments[i].lock->destroy(this->half_open_segments[i].lock);
2217 this->connected_peers_segments[i].lock->destroy(this->connected_peers_segments[i].lock);
2218 this->init_hashes_segments[i].mutex->destroy(this->init_hashes_segments[i].mutex);
2219 }
2220 free(this->segments);
2221 free(this->half_open_segments);
2222 free(this->connected_peers_segments);
2223 free(this->init_hashes_segments);
2224
2225 this->spi_lock->destroy(this->spi_lock);
2226 free(this);
2227 }
2228
2229 /**
2230 * This function returns the next-highest power of two for the given number.
2231 * The algorithm works by setting all bits on the right-hand side of the most
2232 * significant 1 to 1 and then increments the whole number so it rolls over
2233 * to the nearest power of two. Note: returns 0 for n == 0
2234 */
2235 static u_int get_nearest_powerof2(u_int n)
2236 {
2237 u_int i;
2238
2239 --n;
2240 for (i = 1; i < sizeof(u_int) * 8; i <<= 1)
2241 {
2242 n |= n >> i;
2243 }
2244 return ++n;
2245 }
2246
2247 /*
2248 * Described in header.
2249 */
2250 ike_sa_manager_t *ike_sa_manager_create()
2251 {
2252 private_ike_sa_manager_t *this;
2253 u_int i;
2254
2255 INIT(this,
2256 .public = {
2257 .checkout = _checkout,
2258 .checkout_new = _checkout_new,
2259 .checkout_by_message = _checkout_by_message,
2260 .checkout_by_config = _checkout_by_config,
2261 .checkout_by_id = _checkout_by_id,
2262 .checkout_by_name = _checkout_by_name,
2263 .check_uniqueness = _check_uniqueness,
2264 .has_contact = _has_contact,
2265 .create_enumerator = _create_enumerator,
2266 .create_id_enumerator = _create_id_enumerator,
2267 .checkin = _checkin,
2268 .checkin_and_destroy = _checkin_and_destroy,
2269 .get_count = _get_count,
2270 .get_half_open_count = _get_half_open_count,
2271 .flush = _flush,
2272 .set_spi_cb = _set_spi_cb,
2273 .destroy = _destroy,
2274 },
2275 );
2276
2277 this->rng = lib->crypto->create_rng(lib->crypto, RNG_WEAK);
2278 if (this->rng == NULL)
2279 {
2280 DBG1(DBG_MGR, "manager initialization failed, no RNG supported");
2281 free(this);
2282 return NULL;
2283 }
2284 this->spi_lock = rwlock_create(RWLOCK_TYPE_DEFAULT);
2285
2286 this->ikesa_limit = lib->settings->get_int(lib->settings,
2287 "%s.ikesa_limit", 0, lib->ns);
2288
2289 this->table_size = get_nearest_powerof2(lib->settings->get_int(
2290 lib->settings, "%s.ikesa_table_size",
2291 DEFAULT_HASHTABLE_SIZE, lib->ns));
2292 this->table_size = max(1, min(this->table_size, MAX_HASHTABLE_SIZE));
2293 this->table_mask = this->table_size - 1;
2294
2295 this->segment_count = get_nearest_powerof2(lib->settings->get_int(
2296 lib->settings, "%s.ikesa_table_segments",
2297 DEFAULT_SEGMENT_COUNT, lib->ns));
2298 this->segment_count = max(1, min(this->segment_count, this->table_size));
2299 this->segment_mask = this->segment_count - 1;
2300
2301 this->ike_sa_table = calloc(this->table_size, sizeof(table_item_t*));
2302 this->segments = (segment_t*)calloc(this->segment_count, sizeof(segment_t));
2303 for (i = 0; i < this->segment_count; i++)
2304 {
2305 this->segments[i].mutex = mutex_create(MUTEX_TYPE_RECURSIVE);
2306 }
2307
2308 /* we use the same table parameters for the table to track half-open SAs */
2309 this->half_open_table = calloc(this->table_size, sizeof(table_item_t*));
2310 this->half_open_segments = calloc(this->segment_count, sizeof(shareable_segment_t));
2311 for (i = 0; i < this->segment_count; i++)
2312 {
2313 this->half_open_segments[i].lock = rwlock_create(RWLOCK_TYPE_DEFAULT);
2314 }
2315
2316 /* also for the hash table used for duplicate tests */
2317 this->connected_peers_table = calloc(this->table_size, sizeof(table_item_t*));
2318 this->connected_peers_segments = calloc(this->segment_count, sizeof(shareable_segment_t));
2319 for (i = 0; i < this->segment_count; i++)
2320 {
2321 this->connected_peers_segments[i].lock = rwlock_create(RWLOCK_TYPE_DEFAULT);
2322 }
2323
2324 /* and again for the table of hashes of seen initial IKE messages */
2325 this->init_hashes_table = calloc(this->table_size, sizeof(table_item_t*));
2326 this->init_hashes_segments = calloc(this->segment_count, sizeof(segment_t));
2327 for (i = 0; i < this->segment_count; i++)
2328 {
2329 this->init_hashes_segments[i].mutex = mutex_create(MUTEX_TYPE_RECURSIVE);
2330 }
2331
2332 this->reuse_ikesa = lib->settings->get_bool(lib->settings,
2333 "%s.reuse_ikesa", TRUE, lib->ns);
2334 return &this->public;
2335 }