ike-sa-manager: Store a reference to the thread that checked out an IKE_SA
[strongswan.git] / src / libcharon / sa / ike_sa_manager.c
1 /*
2 * Copyright (C) 2005-2011 Martin Willi
3 * Copyright (C) 2011 revosec AG
4 * Copyright (C) 2008-2016 Tobias Brunner
5 * Copyright (C) 2005 Jan Hutter
6 * Hochschule fuer Technik Rapperswil
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version. See <http://www.fsf.org/copyleft/gpl.txt>.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 * for more details.
17 */
18
19 #include <string.h>
20
21 #include "ike_sa_manager.h"
22
23 #include <daemon.h>
24 #include <sa/ike_sa_id.h>
25 #include <bus/bus.h>
26 #include <threading/thread.h>
27 #include <threading/condvar.h>
28 #include <threading/mutex.h>
29 #include <threading/rwlock.h>
30 #include <collections/linked_list.h>
31 #include <crypto/hashers/hasher.h>
32 #include <processing/jobs/delete_ike_sa_job.h>
33
34 /* the default size of the hash table (MUST be a power of 2) */
35 #define DEFAULT_HASHTABLE_SIZE 1
36
37 /* the maximum size of the hash table (MUST be a power of 2) */
38 #define MAX_HASHTABLE_SIZE (1 << 30)
39
40 /* the default number of segments (MUST be a power of 2) */
41 #define DEFAULT_SEGMENT_COUNT 1
42
43 typedef struct entry_t entry_t;
44
45 /**
46 * An entry in the linked list, contains IKE_SA, locking and lookup data.
47 */
48 struct entry_t {
49
50 /**
51 * Number of threads waiting for this ike_sa_t object.
52 */
53 int waiting_threads;
54
55 /**
56 * Condvar where threads can wait until ike_sa_t object is free for use again.
57 */
58 condvar_t *condvar;
59
60 /**
61 * Thread by which this IKE_SA is currently checked out, if any
62 */
63 thread_t *checked_out;
64
65 /**
66 * Does this SA drives out new threads?
67 */
68 bool driveout_new_threads;
69
70 /**
71 * Does this SA drives out waiting threads?
72 */
73 bool driveout_waiting_threads;
74
75 /**
76 * Identification of an IKE_SA (SPIs).
77 */
78 ike_sa_id_t *ike_sa_id;
79
80 /**
81 * The contained ike_sa_t object.
82 */
83 ike_sa_t *ike_sa;
84
85 /**
86 * hash of the IKE_SA_INIT message, used to detect retransmissions
87 */
88 chunk_t init_hash;
89
90 /**
91 * remote host address, required for DoS detection and duplicate
92 * checking (host with same my_id and other_id is *not* considered
93 * a duplicate if the address family differs)
94 */
95 host_t *other;
96
97 /**
98 * As responder: Is this SA half-open?
99 */
100 bool half_open;
101
102 /**
103 * own identity, required for duplicate checking
104 */
105 identification_t *my_id;
106
107 /**
108 * remote identity, required for duplicate checking
109 */
110 identification_t *other_id;
111
112 /**
113 * message ID or hash of currently processing message, -1 if none
114 */
115 u_int32_t processing;
116 };
117
118 /**
119 * Implementation of entry_t.destroy.
120 */
121 static status_t entry_destroy(entry_t *this)
122 {
123 /* also destroy IKE SA */
124 this->ike_sa->destroy(this->ike_sa);
125 this->ike_sa_id->destroy(this->ike_sa_id);
126 chunk_free(&this->init_hash);
127 DESTROY_IF(this->other);
128 DESTROY_IF(this->my_id);
129 DESTROY_IF(this->other_id);
130 this->condvar->destroy(this->condvar);
131 free(this);
132 return SUCCESS;
133 }
134
135 /**
136 * Creates a new entry for the ike_sa_t list.
137 */
138 static entry_t *entry_create()
139 {
140 entry_t *this;
141
142 INIT(this,
143 .condvar = condvar_create(CONDVAR_TYPE_DEFAULT),
144 .processing = -1,
145 );
146
147 return this;
148 }
149
150 /**
151 * Function that matches entry_t objects by ike_sa_id_t.
152 */
153 static bool entry_match_by_id(entry_t *entry, ike_sa_id_t *id)
154 {
155 if (id->equals(id, entry->ike_sa_id))
156 {
157 return TRUE;
158 }
159 if ((id->get_responder_spi(id) == 0 ||
160 entry->ike_sa_id->get_responder_spi(entry->ike_sa_id) == 0) &&
161 (id->get_ike_version(id) == IKEV1_MAJOR_VERSION ||
162 id->is_initiator(id) == entry->ike_sa_id->is_initiator(entry->ike_sa_id)) &&
163 id->get_initiator_spi(id) == entry->ike_sa_id->get_initiator_spi(entry->ike_sa_id))
164 {
165 /* this is TRUE for IKE_SAs that we initiated but have not yet received a response */
166 return TRUE;
167 }
168 return FALSE;
169 }
170
171 /**
172 * Function that matches entry_t objects by ike_sa_t pointers.
173 */
174 static bool entry_match_by_sa(entry_t *entry, ike_sa_t *ike_sa)
175 {
176 return entry->ike_sa == ike_sa;
177 }
178
179 /**
180 * Hash function for ike_sa_id_t objects.
181 */
182 static u_int ike_sa_id_hash(ike_sa_id_t *ike_sa_id)
183 {
184 /* IKEv2 does not mandate random SPIs (RFC 5996, 2.6), they just have to be
185 * locally unique, so we use our randomly allocated SPI whether we are
186 * initiator or responder to ensure a good distribution. The latter is not
187 * possible for IKEv1 as we don't know whether we are original initiator or
188 * not (based on the IKE header). But as RFC 2408, section 2.5.3 proposes
189 * SPIs (Cookies) to be allocated near random (we allocate them randomly
190 * anyway) it seems safe to always use the initiator SPI. */
191 if (ike_sa_id->get_ike_version(ike_sa_id) == IKEV1_MAJOR_VERSION ||
192 ike_sa_id->is_initiator(ike_sa_id))
193 {
194 return ike_sa_id->get_initiator_spi(ike_sa_id);
195 }
196 return ike_sa_id->get_responder_spi(ike_sa_id);
197 }
198
199 typedef struct half_open_t half_open_t;
200
201 /**
202 * Struct to manage half-open IKE_SAs per peer.
203 */
204 struct half_open_t {
205 /** chunk of remote host address */
206 chunk_t other;
207
208 /** the number of half-open IKE_SAs with that host */
209 u_int count;
210
211 /** the number of half-open IKE_SAs we responded to with that host */
212 u_int count_responder;
213 };
214
215 /**
216 * Destroys a half_open_t object.
217 */
218 static void half_open_destroy(half_open_t *this)
219 {
220 chunk_free(&this->other);
221 free(this);
222 }
223
224 typedef struct connected_peers_t connected_peers_t;
225
226 struct connected_peers_t {
227 /** own identity */
228 identification_t *my_id;
229
230 /** remote identity */
231 identification_t *other_id;
232
233 /** ip address family of peer */
234 int family;
235
236 /** list of ike_sa_id_t objects of IKE_SAs between the two identities */
237 linked_list_t *sas;
238 };
239
240 static void connected_peers_destroy(connected_peers_t *this)
241 {
242 this->my_id->destroy(this->my_id);
243 this->other_id->destroy(this->other_id);
244 this->sas->destroy(this->sas);
245 free(this);
246 }
247
248 /**
249 * Function that matches connected_peers_t objects by the given ids.
250 */
251 static inline bool connected_peers_match(connected_peers_t *connected_peers,
252 identification_t *my_id, identification_t *other_id,
253 int family)
254 {
255 return my_id->equals(my_id, connected_peers->my_id) &&
256 other_id->equals(other_id, connected_peers->other_id) &&
257 (!family || family == connected_peers->family);
258 }
259
260 typedef struct init_hash_t init_hash_t;
261
262 struct init_hash_t {
263 /** hash of IKE_SA_INIT or initial phase1 message (data is not cloned) */
264 chunk_t hash;
265
266 /** our SPI allocated for the IKE_SA based on this message */
267 u_int64_t our_spi;
268 };
269
270 typedef struct segment_t segment_t;
271
272 /**
273 * Struct to manage segments of the hash table.
274 */
275 struct segment_t {
276 /** mutex to access a segment exclusively */
277 mutex_t *mutex;
278
279 /** the number of entries in this segment */
280 u_int count;
281 };
282
283 typedef struct shareable_segment_t shareable_segment_t;
284
285 /**
286 * Struct to manage segments of the "half-open" and "connected peers" hash tables.
287 */
288 struct shareable_segment_t {
289 /** rwlock to access a segment non-/exclusively */
290 rwlock_t *lock;
291
292 /** the number of entries in this segment - in case of the "half-open table"
293 * it's the sum of all half_open_t.count in a segment. */
294 u_int count;
295 };
296
297 typedef struct table_item_t table_item_t;
298
299 /**
300 * Instead of using linked_list_t for each bucket we store the data in our own
301 * list to save memory.
302 */
303 struct table_item_t {
304 /** data of this item */
305 void *value;
306
307 /** next item in the overflow list */
308 table_item_t *next;
309 };
310
311 typedef struct private_ike_sa_manager_t private_ike_sa_manager_t;
312
313 /**
314 * Additional private members of ike_sa_manager_t.
315 */
316 struct private_ike_sa_manager_t {
317 /**
318 * Public interface of ike_sa_manager_t.
319 */
320 ike_sa_manager_t public;
321
322 /**
323 * Hash table with entries for the ike_sa_t objects.
324 */
325 table_item_t **ike_sa_table;
326
327 /**
328 * The size of the hash table.
329 */
330 u_int table_size;
331
332 /**
333 * Mask to map the hashes to table rows.
334 */
335 u_int table_mask;
336
337 /**
338 * Segments of the hash table.
339 */
340 segment_t *segments;
341
342 /**
343 * The number of segments.
344 */
345 u_int segment_count;
346
347 /**
348 * Mask to map a table row to a segment.
349 */
350 u_int segment_mask;
351
352 /**
353 * Hash table with half_open_t objects.
354 */
355 table_item_t **half_open_table;
356
357 /**
358 * Segments of the "half-open" hash table.
359 */
360 shareable_segment_t *half_open_segments;
361
362 /**
363 * Total number of half-open IKE_SAs.
364 */
365 refcount_t half_open_count;
366
367 /**
368 * Total number of half-open IKE_SAs as responder.
369 */
370 refcount_t half_open_count_responder;
371
372 /**
373 * Hash table with connected_peers_t objects.
374 */
375 table_item_t **connected_peers_table;
376
377 /**
378 * Segments of the "connected peers" hash table.
379 */
380 shareable_segment_t *connected_peers_segments;
381
382 /**
383 * Hash table with init_hash_t objects.
384 */
385 table_item_t **init_hashes_table;
386
387 /**
388 * Segments of the "hashes" hash table.
389 */
390 segment_t *init_hashes_segments;
391
392 /**
393 * RNG to get random SPIs for our side
394 */
395 rng_t *rng;
396
397 /**
398 * Registered callback for IKE SPIs
399 */
400 struct {
401 spi_cb_t cb;
402 void *data;
403 } spi_cb;
404
405 /**
406 * Lock to access the RNG instance and the callback
407 */
408 rwlock_t *spi_lock;
409
410 /**
411 * reuse existing IKE_SAs in checkout_by_config
412 */
413 bool reuse_ikesa;
414
415 /**
416 * Configured IKE_SA limit, if any
417 */
418 u_int ikesa_limit;
419 };
420
421 /**
422 * Acquire a lock to access the segment of the table row with the given index.
423 * It also works with the segment index directly.
424 */
425 static inline void lock_single_segment(private_ike_sa_manager_t *this,
426 u_int index)
427 {
428 mutex_t *lock = this->segments[index & this->segment_mask].mutex;
429 lock->lock(lock);
430 }
431
432 /**
433 * Release the lock required to access the segment of the table row with the given index.
434 * It also works with the segment index directly.
435 */
436 static inline void unlock_single_segment(private_ike_sa_manager_t *this,
437 u_int index)
438 {
439 mutex_t *lock = this->segments[index & this->segment_mask].mutex;
440 lock->unlock(lock);
441 }
442
443 /**
444 * Lock all segments
445 */
446 static void lock_all_segments(private_ike_sa_manager_t *this)
447 {
448 u_int i;
449
450 for (i = 0; i < this->segment_count; i++)
451 {
452 this->segments[i].mutex->lock(this->segments[i].mutex);
453 }
454 }
455
456 /**
457 * Unlock all segments
458 */
459 static void unlock_all_segments(private_ike_sa_manager_t *this)
460 {
461 u_int i;
462
463 for (i = 0; i < this->segment_count; i++)
464 {
465 this->segments[i].mutex->unlock(this->segments[i].mutex);
466 }
467 }
468
469 typedef struct private_enumerator_t private_enumerator_t;
470
471 /**
472 * hash table enumerator implementation
473 */
474 struct private_enumerator_t {
475
476 /**
477 * implements enumerator interface
478 */
479 enumerator_t enumerator;
480
481 /**
482 * associated ike_sa_manager_t
483 */
484 private_ike_sa_manager_t *manager;
485
486 /**
487 * current segment index
488 */
489 u_int segment;
490
491 /**
492 * currently enumerating entry
493 */
494 entry_t *entry;
495
496 /**
497 * current table row index
498 */
499 u_int row;
500
501 /**
502 * current table item
503 */
504 table_item_t *current;
505
506 /**
507 * previous table item
508 */
509 table_item_t *prev;
510 };
511
512 METHOD(enumerator_t, enumerate, bool,
513 private_enumerator_t *this, entry_t **entry, u_int *segment)
514 {
515 if (this->entry)
516 {
517 this->entry->condvar->signal(this->entry->condvar);
518 this->entry = NULL;
519 }
520 while (this->segment < this->manager->segment_count)
521 {
522 while (this->row < this->manager->table_size)
523 {
524 this->prev = this->current;
525 if (this->current)
526 {
527 this->current = this->current->next;
528 }
529 else
530 {
531 lock_single_segment(this->manager, this->segment);
532 this->current = this->manager->ike_sa_table[this->row];
533 }
534 if (this->current)
535 {
536 *entry = this->entry = this->current->value;
537 *segment = this->segment;
538 return TRUE;
539 }
540 unlock_single_segment(this->manager, this->segment);
541 this->row += this->manager->segment_count;
542 }
543 this->segment++;
544 this->row = this->segment;
545 }
546 return FALSE;
547 }
548
549 METHOD(enumerator_t, enumerator_destroy, void,
550 private_enumerator_t *this)
551 {
552 if (this->entry)
553 {
554 this->entry->condvar->signal(this->entry->condvar);
555 }
556 if (this->current)
557 {
558 unlock_single_segment(this->manager, this->segment);
559 }
560 free(this);
561 }
562
563 /**
564 * Creates an enumerator to enumerate the entries in the hash table.
565 */
566 static enumerator_t* create_table_enumerator(private_ike_sa_manager_t *this)
567 {
568 private_enumerator_t *enumerator;
569
570 INIT(enumerator,
571 .enumerator = {
572 .enumerate = (void*)_enumerate,
573 .destroy = _enumerator_destroy,
574 },
575 .manager = this,
576 );
577 return &enumerator->enumerator;
578 }
579
580 /**
581 * Put an entry into the hash table.
582 * Note: The caller has to unlock the returned segment.
583 */
584 static u_int put_entry(private_ike_sa_manager_t *this, entry_t *entry)
585 {
586 table_item_t *current, *item;
587 u_int row, segment;
588
589 INIT(item,
590 .value = entry,
591 );
592
593 row = ike_sa_id_hash(entry->ike_sa_id) & this->table_mask;
594 segment = row & this->segment_mask;
595
596 lock_single_segment(this, segment);
597 current = this->ike_sa_table[row];
598 if (current)
599 { /* insert at the front of current bucket */
600 item->next = current;
601 }
602 this->ike_sa_table[row] = item;
603 this->segments[segment].count++;
604 return segment;
605 }
606
607 /**
608 * Remove an entry from the hash table.
609 * Note: The caller MUST have a lock on the segment of this entry.
610 */
611 static void remove_entry(private_ike_sa_manager_t *this, entry_t *entry)
612 {
613 table_item_t *item, *prev = NULL;
614 u_int row, segment;
615
616 row = ike_sa_id_hash(entry->ike_sa_id) & this->table_mask;
617 segment = row & this->segment_mask;
618 item = this->ike_sa_table[row];
619 while (item)
620 {
621 if (item->value == entry)
622 {
623 if (prev)
624 {
625 prev->next = item->next;
626 }
627 else
628 {
629 this->ike_sa_table[row] = item->next;
630 }
631 this->segments[segment].count--;
632 free(item);
633 break;
634 }
635 prev = item;
636 item = item->next;
637 }
638 }
639
640 /**
641 * Remove the entry at the current enumerator position.
642 */
643 static void remove_entry_at(private_enumerator_t *this)
644 {
645 this->entry = NULL;
646 if (this->current)
647 {
648 table_item_t *current = this->current;
649
650 this->manager->segments[this->segment].count--;
651 this->current = this->prev;
652
653 if (this->prev)
654 {
655 this->prev->next = current->next;
656 }
657 else
658 {
659 this->manager->ike_sa_table[this->row] = current->next;
660 unlock_single_segment(this->manager, this->segment);
661 }
662 free(current);
663 }
664 }
665
666 /**
667 * Find an entry using the provided match function to compare the entries for
668 * equality.
669 */
670 static status_t get_entry_by_match_function(private_ike_sa_manager_t *this,
671 ike_sa_id_t *ike_sa_id, entry_t **entry, u_int *segment,
672 linked_list_match_t match, void *param)
673 {
674 table_item_t *item;
675 u_int row, seg;
676
677 row = ike_sa_id_hash(ike_sa_id) & this->table_mask;
678 seg = row & this->segment_mask;
679
680 lock_single_segment(this, seg);
681 item = this->ike_sa_table[row];
682 while (item)
683 {
684 if (match(item->value, param))
685 {
686 *entry = item->value;
687 *segment = seg;
688 /* the locked segment has to be unlocked by the caller */
689 return SUCCESS;
690 }
691 item = item->next;
692 }
693 unlock_single_segment(this, seg);
694 return NOT_FOUND;
695 }
696
697 /**
698 * Find an entry by ike_sa_id_t.
699 * Note: On SUCCESS, the caller has to unlock the segment.
700 */
701 static status_t get_entry_by_id(private_ike_sa_manager_t *this,
702 ike_sa_id_t *ike_sa_id, entry_t **entry, u_int *segment)
703 {
704 return get_entry_by_match_function(this, ike_sa_id, entry, segment,
705 (linked_list_match_t)entry_match_by_id, ike_sa_id);
706 }
707
708 /**
709 * Find an entry by IKE_SA pointer.
710 * Note: On SUCCESS, the caller has to unlock the segment.
711 */
712 static status_t get_entry_by_sa(private_ike_sa_manager_t *this,
713 ike_sa_id_t *ike_sa_id, ike_sa_t *ike_sa, entry_t **entry, u_int *segment)
714 {
715 return get_entry_by_match_function(this, ike_sa_id, entry, segment,
716 (linked_list_match_t)entry_match_by_sa, ike_sa);
717 }
718
719 /**
720 * Wait until no other thread is using an IKE_SA, return FALSE if entry not
721 * acquirable.
722 */
723 static bool wait_for_entry(private_ike_sa_manager_t *this, entry_t *entry,
724 u_int segment)
725 {
726 if (entry->driveout_new_threads)
727 {
728 /* we are not allowed to get this */
729 return FALSE;
730 }
731 while (entry->checked_out && !entry->driveout_waiting_threads)
732 {
733 /* so wait until we can get it for us.
734 * we register us as waiting. */
735 entry->waiting_threads++;
736 entry->condvar->wait(entry->condvar, this->segments[segment].mutex);
737 entry->waiting_threads--;
738 }
739 /* hm, a deletion request forbids us to get this SA, get next one */
740 if (entry->driveout_waiting_threads)
741 {
742 /* we must signal here, others may be waiting on it, too */
743 entry->condvar->signal(entry->condvar);
744 return FALSE;
745 }
746 return TRUE;
747 }
748
749 /**
750 * Put a half-open SA into the hash table.
751 */
752 static void put_half_open(private_ike_sa_manager_t *this, entry_t *entry)
753 {
754 table_item_t *item;
755 u_int row, segment;
756 rwlock_t *lock;
757 ike_sa_id_t *ike_id;
758 half_open_t *half_open;
759 chunk_t addr;
760
761 ike_id = entry->ike_sa_id;
762 addr = entry->other->get_address(entry->other);
763 row = chunk_hash(addr) & this->table_mask;
764 segment = row & this->segment_mask;
765 lock = this->half_open_segments[segment].lock;
766 lock->write_lock(lock);
767 item = this->half_open_table[row];
768 while (item)
769 {
770 half_open = item->value;
771
772 if (chunk_equals(addr, half_open->other))
773 {
774 break;
775 }
776 item = item->next;
777 }
778
779 if (!item)
780 {
781 INIT(half_open,
782 .other = chunk_clone(addr),
783 );
784 INIT(item,
785 .value = half_open,
786 .next = this->half_open_table[row],
787 );
788 this->half_open_table[row] = item;
789 }
790 half_open->count++;
791 ref_get(&this->half_open_count);
792 if (!ike_id->is_initiator(ike_id))
793 {
794 half_open->count_responder++;
795 ref_get(&this->half_open_count_responder);
796 }
797 this->half_open_segments[segment].count++;
798 lock->unlock(lock);
799 }
800
801 /**
802 * Remove a half-open SA from the hash table.
803 */
804 static void remove_half_open(private_ike_sa_manager_t *this, entry_t *entry)
805 {
806 table_item_t *item, *prev = NULL;
807 u_int row, segment;
808 rwlock_t *lock;
809 ike_sa_id_t *ike_id;
810 chunk_t addr;
811
812 ike_id = entry->ike_sa_id;
813 addr = entry->other->get_address(entry->other);
814 row = chunk_hash(addr) & this->table_mask;
815 segment = row & this->segment_mask;
816 lock = this->half_open_segments[segment].lock;
817 lock->write_lock(lock);
818 item = this->half_open_table[row];
819 while (item)
820 {
821 half_open_t *half_open = item->value;
822
823 if (chunk_equals(addr, half_open->other))
824 {
825 if (!ike_id->is_initiator(ike_id))
826 {
827 half_open->count_responder--;
828 ignore_result(ref_put(&this->half_open_count_responder));
829 }
830 ignore_result(ref_put(&this->half_open_count));
831 if (--half_open->count == 0)
832 {
833 if (prev)
834 {
835 prev->next = item->next;
836 }
837 else
838 {
839 this->half_open_table[row] = item->next;
840 }
841 half_open_destroy(half_open);
842 free(item);
843 }
844 this->half_open_segments[segment].count--;
845 break;
846 }
847 prev = item;
848 item = item->next;
849 }
850 lock->unlock(lock);
851 }
852
853 /**
854 * Put an SA between two peers into the hash table.
855 */
856 static void put_connected_peers(private_ike_sa_manager_t *this, entry_t *entry)
857 {
858 table_item_t *item;
859 u_int row, segment;
860 rwlock_t *lock;
861 connected_peers_t *connected_peers;
862 chunk_t my_id, other_id;
863 int family;
864
865 my_id = entry->my_id->get_encoding(entry->my_id);
866 other_id = entry->other_id->get_encoding(entry->other_id);
867 family = entry->other->get_family(entry->other);
868 row = chunk_hash_inc(other_id, chunk_hash(my_id)) & this->table_mask;
869 segment = row & this->segment_mask;
870 lock = this->connected_peers_segments[segment].lock;
871 lock->write_lock(lock);
872 item = this->connected_peers_table[row];
873 while (item)
874 {
875 connected_peers = item->value;
876
877 if (connected_peers_match(connected_peers, entry->my_id,
878 entry->other_id, family))
879 {
880 if (connected_peers->sas->find_first(connected_peers->sas,
881 (linked_list_match_t)entry->ike_sa_id->equals,
882 NULL, entry->ike_sa_id) == SUCCESS)
883 {
884 lock->unlock(lock);
885 return;
886 }
887 break;
888 }
889 item = item->next;
890 }
891
892 if (!item)
893 {
894 INIT(connected_peers,
895 .my_id = entry->my_id->clone(entry->my_id),
896 .other_id = entry->other_id->clone(entry->other_id),
897 .family = family,
898 .sas = linked_list_create(),
899 );
900 INIT(item,
901 .value = connected_peers,
902 .next = this->connected_peers_table[row],
903 );
904 this->connected_peers_table[row] = item;
905 }
906 connected_peers->sas->insert_last(connected_peers->sas,
907 entry->ike_sa_id->clone(entry->ike_sa_id));
908 this->connected_peers_segments[segment].count++;
909 lock->unlock(lock);
910 }
911
912 /**
913 * Remove an SA between two peers from the hash table.
914 */
915 static void remove_connected_peers(private_ike_sa_manager_t *this, entry_t *entry)
916 {
917 table_item_t *item, *prev = NULL;
918 u_int row, segment;
919 rwlock_t *lock;
920 chunk_t my_id, other_id;
921 int family;
922
923 my_id = entry->my_id->get_encoding(entry->my_id);
924 other_id = entry->other_id->get_encoding(entry->other_id);
925 family = entry->other->get_family(entry->other);
926
927 row = chunk_hash_inc(other_id, chunk_hash(my_id)) & this->table_mask;
928 segment = row & this->segment_mask;
929
930 lock = this->connected_peers_segments[segment].lock;
931 lock->write_lock(lock);
932 item = this->connected_peers_table[row];
933 while (item)
934 {
935 connected_peers_t *current = item->value;
936
937 if (connected_peers_match(current, entry->my_id, entry->other_id,
938 family))
939 {
940 enumerator_t *enumerator;
941 ike_sa_id_t *ike_sa_id;
942
943 enumerator = current->sas->create_enumerator(current->sas);
944 while (enumerator->enumerate(enumerator, &ike_sa_id))
945 {
946 if (ike_sa_id->equals(ike_sa_id, entry->ike_sa_id))
947 {
948 current->sas->remove_at(current->sas, enumerator);
949 ike_sa_id->destroy(ike_sa_id);
950 this->connected_peers_segments[segment].count--;
951 break;
952 }
953 }
954 enumerator->destroy(enumerator);
955 if (current->sas->get_count(current->sas) == 0)
956 {
957 if (prev)
958 {
959 prev->next = item->next;
960 }
961 else
962 {
963 this->connected_peers_table[row] = item->next;
964 }
965 connected_peers_destroy(current);
966 free(item);
967 }
968 break;
969 }
970 prev = item;
971 item = item->next;
972 }
973 lock->unlock(lock);
974 }
975
976 /**
977 * Get a random SPI for new IKE_SAs
978 */
979 static u_int64_t get_spi(private_ike_sa_manager_t *this)
980 {
981 u_int64_t spi;
982
983 this->spi_lock->read_lock(this->spi_lock);
984 if (this->spi_cb.cb)
985 {
986 spi = this->spi_cb.cb(this->spi_cb.data);
987 }
988 else if (!this->rng ||
989 !this->rng->get_bytes(this->rng, sizeof(spi), (u_int8_t*)&spi))
990 {
991 spi = 0;
992 }
993 this->spi_lock->unlock(this->spi_lock);
994 return spi;
995 }
996
997 /**
998 * Calculate the hash of the initial IKE message. Memory for the hash is
999 * allocated on success.
1000 *
1001 * @returns TRUE on success
1002 */
1003 static bool get_init_hash(hasher_t *hasher, message_t *message, chunk_t *hash)
1004 {
1005 host_t *src;
1006
1007 if (message->get_first_payload_type(message) == PLV1_FRAGMENT)
1008 { /* only hash the source IP, port and SPI for fragmented init messages */
1009 u_int16_t port;
1010 u_int64_t spi;
1011
1012 src = message->get_source(message);
1013 if (!hasher->allocate_hash(hasher, src->get_address(src), NULL))
1014 {
1015 return FALSE;
1016 }
1017 port = src->get_port(src);
1018 if (!hasher->allocate_hash(hasher, chunk_from_thing(port), NULL))
1019 {
1020 return FALSE;
1021 }
1022 spi = message->get_initiator_spi(message);
1023 return hasher->allocate_hash(hasher, chunk_from_thing(spi), hash);
1024 }
1025 if (message->get_exchange_type(message) == ID_PROT)
1026 { /* include the source for Main Mode as the hash will be the same if
1027 * SPIs are reused by two initiators that use the same proposal */
1028 src = message->get_source(message);
1029
1030 if (!hasher->allocate_hash(hasher, src->get_address(src), NULL))
1031 {
1032 return FALSE;
1033 }
1034 }
1035 return hasher->allocate_hash(hasher, message->get_packet_data(message), hash);
1036 }
1037
1038 /**
1039 * Check if we already have created an IKE_SA based on the initial IKE message
1040 * with the given hash.
1041 * If not the hash is stored, the hash data is not(!) cloned.
1042 *
1043 * Also, the local SPI is returned. In case of a retransmit this is already
1044 * stored together with the hash, otherwise it is newly allocated and should
1045 * be used to create the IKE_SA.
1046 *
1047 * @returns ALREADY_DONE if the message with the given hash has been seen before
1048 * NOT_FOUND if the message hash was not found
1049 * FAILED if the SPI allocation failed
1050 */
1051 static status_t check_and_put_init_hash(private_ike_sa_manager_t *this,
1052 chunk_t init_hash, u_int64_t *our_spi)
1053 {
1054 table_item_t *item;
1055 u_int row, segment;
1056 mutex_t *mutex;
1057 init_hash_t *init;
1058 u_int64_t spi;
1059
1060 row = chunk_hash(init_hash) & this->table_mask;
1061 segment = row & this->segment_mask;
1062 mutex = this->init_hashes_segments[segment].mutex;
1063 mutex->lock(mutex);
1064 item = this->init_hashes_table[row];
1065 while (item)
1066 {
1067 init_hash_t *current = item->value;
1068
1069 if (chunk_equals(init_hash, current->hash))
1070 {
1071 *our_spi = current->our_spi;
1072 mutex->unlock(mutex);
1073 return ALREADY_DONE;
1074 }
1075 item = item->next;
1076 }
1077
1078 spi = get_spi(this);
1079 if (!spi)
1080 {
1081 return FAILED;
1082 }
1083
1084 INIT(init,
1085 .hash = {
1086 .len = init_hash.len,
1087 .ptr = init_hash.ptr,
1088 },
1089 .our_spi = spi,
1090 );
1091 INIT(item,
1092 .value = init,
1093 .next = this->init_hashes_table[row],
1094 );
1095 this->init_hashes_table[row] = item;
1096 *our_spi = init->our_spi;
1097 mutex->unlock(mutex);
1098 return NOT_FOUND;
1099 }
1100
1101 /**
1102 * Remove the hash of an initial IKE message from the cache.
1103 */
1104 static void remove_init_hash(private_ike_sa_manager_t *this, chunk_t init_hash)
1105 {
1106 table_item_t *item, *prev = NULL;
1107 u_int row, segment;
1108 mutex_t *mutex;
1109
1110 row = chunk_hash(init_hash) & this->table_mask;
1111 segment = row & this->segment_mask;
1112 mutex = this->init_hashes_segments[segment].mutex;
1113 mutex->lock(mutex);
1114 item = this->init_hashes_table[row];
1115 while (item)
1116 {
1117 init_hash_t *current = item->value;
1118
1119 if (chunk_equals(init_hash, current->hash))
1120 {
1121 if (prev)
1122 {
1123 prev->next = item->next;
1124 }
1125 else
1126 {
1127 this->init_hashes_table[row] = item->next;
1128 }
1129 free(current);
1130 free(item);
1131 break;
1132 }
1133 prev = item;
1134 item = item->next;
1135 }
1136 mutex->unlock(mutex);
1137 }
1138
1139 METHOD(ike_sa_manager_t, checkout, ike_sa_t*,
1140 private_ike_sa_manager_t *this, ike_sa_id_t *ike_sa_id)
1141 {
1142 ike_sa_t *ike_sa = NULL;
1143 entry_t *entry;
1144 u_int segment;
1145
1146 DBG2(DBG_MGR, "checkout IKE_SA");
1147
1148 if (get_entry_by_id(this, ike_sa_id, &entry, &segment) == SUCCESS)
1149 {
1150 if (wait_for_entry(this, entry, segment))
1151 {
1152 entry->checked_out = thread_current();
1153 ike_sa = entry->ike_sa;
1154 DBG2(DBG_MGR, "IKE_SA %s[%u] successfully checked out",
1155 ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1156 }
1157 unlock_single_segment(this, segment);
1158 }
1159 charon->bus->set_sa(charon->bus, ike_sa);
1160 return ike_sa;
1161 }
1162
1163 METHOD(ike_sa_manager_t, checkout_new, ike_sa_t*,
1164 private_ike_sa_manager_t* this, ike_version_t version, bool initiator)
1165 {
1166 ike_sa_id_t *ike_sa_id;
1167 ike_sa_t *ike_sa;
1168 u_int8_t ike_version;
1169 u_int64_t spi;
1170
1171 ike_version = version == IKEV1 ? IKEV1_MAJOR_VERSION : IKEV2_MAJOR_VERSION;
1172
1173 spi = get_spi(this);
1174 if (!spi)
1175 {
1176 DBG1(DBG_MGR, "failed to allocate SPI for new IKE_SA");
1177 return NULL;
1178 }
1179
1180 if (initiator)
1181 {
1182 ike_sa_id = ike_sa_id_create(ike_version, spi, 0, TRUE);
1183 }
1184 else
1185 {
1186 ike_sa_id = ike_sa_id_create(ike_version, 0, spi, FALSE);
1187 }
1188 ike_sa = ike_sa_create(ike_sa_id, initiator, version);
1189 ike_sa_id->destroy(ike_sa_id);
1190
1191 if (ike_sa)
1192 {
1193 DBG2(DBG_MGR, "created IKE_SA %s[%u]", ike_sa->get_name(ike_sa),
1194 ike_sa->get_unique_id(ike_sa));
1195 }
1196 return ike_sa;
1197 }
1198
1199 /**
1200 * Get the message ID or message hash to detect early retransmissions
1201 */
1202 static u_int32_t get_message_id_or_hash(message_t *message)
1203 {
1204 if (message->get_major_version(message) == IKEV1_MAJOR_VERSION)
1205 {
1206 /* Use a hash for IKEv1 Phase 1, where we don't have a MID, and Quick
1207 * Mode, where all three messages use the same message ID */
1208 if (message->get_message_id(message) == 0 ||
1209 message->get_exchange_type(message) == QUICK_MODE)
1210 {
1211 return chunk_hash(message->get_packet_data(message));
1212 }
1213 }
1214 return message->get_message_id(message);
1215 }
1216
1217 METHOD(ike_sa_manager_t, checkout_by_message, ike_sa_t*,
1218 private_ike_sa_manager_t* this, message_t *message)
1219 {
1220 u_int segment;
1221 entry_t *entry;
1222 ike_sa_t *ike_sa = NULL;
1223 ike_sa_id_t *id;
1224 ike_version_t ike_version;
1225 bool is_init = FALSE;
1226
1227 id = message->get_ike_sa_id(message);
1228 /* clone the IKE_SA ID so we can modify the initiator flag */
1229 id = id->clone(id);
1230 id->switch_initiator(id);
1231
1232 DBG2(DBG_MGR, "checkout IKE_SA by message");
1233
1234 if (id->get_responder_spi(id) == 0 &&
1235 message->get_message_id(message) == 0)
1236 {
1237 if (message->get_major_version(message) == IKEV2_MAJOR_VERSION)
1238 {
1239 if (message->get_exchange_type(message) == IKE_SA_INIT &&
1240 message->get_request(message))
1241 {
1242 ike_version = IKEV2;
1243 is_init = TRUE;
1244 }
1245 }
1246 else
1247 {
1248 if (message->get_exchange_type(message) == ID_PROT ||
1249 message->get_exchange_type(message) == AGGRESSIVE)
1250 {
1251 ike_version = IKEV1;
1252 is_init = TRUE;
1253 if (id->is_initiator(id))
1254 { /* not set in IKEv1, switch back before applying to new SA */
1255 id->switch_initiator(id);
1256 }
1257 }
1258 }
1259 }
1260
1261 if (is_init)
1262 {
1263 hasher_t *hasher;
1264 u_int64_t our_spi;
1265 chunk_t hash;
1266
1267 hasher = lib->crypto->create_hasher(lib->crypto, HASH_SHA1);
1268 if (!hasher || !get_init_hash(hasher, message, &hash))
1269 {
1270 DBG1(DBG_MGR, "ignoring message, failed to hash message");
1271 DESTROY_IF(hasher);
1272 id->destroy(id);
1273 return NULL;
1274 }
1275 hasher->destroy(hasher);
1276
1277 /* ensure this is not a retransmit of an already handled init message */
1278 switch (check_and_put_init_hash(this, hash, &our_spi))
1279 {
1280 case NOT_FOUND:
1281 { /* we've not seen this packet yet, create a new IKE_SA */
1282 if (!this->ikesa_limit ||
1283 this->public.get_count(&this->public) < this->ikesa_limit)
1284 {
1285 id->set_responder_spi(id, our_spi);
1286 ike_sa = ike_sa_create(id, FALSE, ike_version);
1287 if (ike_sa)
1288 {
1289 entry = entry_create();
1290 entry->ike_sa = ike_sa;
1291 entry->ike_sa_id = id;
1292 entry->processing = get_message_id_or_hash(message);
1293 entry->init_hash = hash;
1294
1295 segment = put_entry(this, entry);
1296 entry->checked_out = thread_current();
1297 unlock_single_segment(this, segment);
1298
1299 DBG2(DBG_MGR, "created IKE_SA %s[%u]",
1300 ike_sa->get_name(ike_sa),
1301 ike_sa->get_unique_id(ike_sa));
1302
1303 charon->bus->set_sa(charon->bus, ike_sa);
1304 return ike_sa;
1305 }
1306 else
1307 {
1308 DBG1(DBG_MGR, "creating IKE_SA failed, ignoring message");
1309 }
1310 }
1311 else
1312 {
1313 DBG1(DBG_MGR, "ignoring %N, hitting IKE_SA limit (%u)",
1314 exchange_type_names, message->get_exchange_type(message),
1315 this->ikesa_limit);
1316 }
1317 remove_init_hash(this, hash);
1318 chunk_free(&hash);
1319 id->destroy(id);
1320 return NULL;
1321 }
1322 case FAILED:
1323 { /* we failed to allocate an SPI */
1324 chunk_free(&hash);
1325 id->destroy(id);
1326 DBG1(DBG_MGR, "ignoring message, failed to allocate SPI");
1327 return NULL;
1328 }
1329 case ALREADY_DONE:
1330 default:
1331 break;
1332 }
1333 /* it looks like we already handled this init message to some degree */
1334 id->set_responder_spi(id, our_spi);
1335 chunk_free(&hash);
1336 }
1337
1338 if (get_entry_by_id(this, id, &entry, &segment) == SUCCESS)
1339 {
1340 /* only check out if we are not already processing it. */
1341 if (entry->processing == get_message_id_or_hash(message))
1342 {
1343 DBG1(DBG_MGR, "ignoring request with ID %u, already processing",
1344 entry->processing);
1345 }
1346 else if (wait_for_entry(this, entry, segment))
1347 {
1348 ike_sa_id_t *ike_id;
1349
1350 ike_id = entry->ike_sa->get_id(entry->ike_sa);
1351 entry->checked_out = thread_current();
1352 if (message->get_first_payload_type(message) != PLV1_FRAGMENT &&
1353 message->get_first_payload_type(message) != PLV2_FRAGMENT)
1354 { /* TODO-FRAG: this fails if there are unencrypted payloads */
1355 entry->processing = get_message_id_or_hash(message);
1356 }
1357 if (ike_id->get_responder_spi(ike_id) == 0)
1358 {
1359 ike_id->set_responder_spi(ike_id, id->get_responder_spi(id));
1360 }
1361 ike_sa = entry->ike_sa;
1362 DBG2(DBG_MGR, "IKE_SA %s[%u] successfully checked out",
1363 ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1364 }
1365 unlock_single_segment(this, segment);
1366 }
1367 else
1368 {
1369 charon->bus->alert(charon->bus, ALERT_INVALID_IKE_SPI, message);
1370 }
1371 id->destroy(id);
1372 charon->bus->set_sa(charon->bus, ike_sa);
1373 return ike_sa;
1374 }
1375
1376 METHOD(ike_sa_manager_t, checkout_by_config, ike_sa_t*,
1377 private_ike_sa_manager_t *this, peer_cfg_t *peer_cfg)
1378 {
1379 enumerator_t *enumerator;
1380 entry_t *entry;
1381 ike_sa_t *ike_sa = NULL;
1382 peer_cfg_t *current_peer;
1383 ike_cfg_t *current_ike;
1384 u_int segment;
1385
1386 DBG2(DBG_MGR, "checkout IKE_SA by config");
1387
1388 if (!this->reuse_ikesa && peer_cfg->get_ike_version(peer_cfg) != IKEV1)
1389 { /* IKE_SA reuse disabled by config (not possible for IKEv1) */
1390 ike_sa = checkout_new(this, peer_cfg->get_ike_version(peer_cfg), TRUE);
1391 charon->bus->set_sa(charon->bus, ike_sa);
1392 return ike_sa;
1393 }
1394
1395 enumerator = create_table_enumerator(this);
1396 while (enumerator->enumerate(enumerator, &entry, &segment))
1397 {
1398 if (!wait_for_entry(this, entry, segment))
1399 {
1400 continue;
1401 }
1402 if (entry->ike_sa->get_state(entry->ike_sa) == IKE_DELETING)
1403 { /* skip IKE_SAs which are not usable, wake other waiting threads */
1404 entry->condvar->signal(entry->condvar);
1405 continue;
1406 }
1407
1408 current_peer = entry->ike_sa->get_peer_cfg(entry->ike_sa);
1409 if (current_peer && current_peer->equals(current_peer, peer_cfg))
1410 {
1411 current_ike = current_peer->get_ike_cfg(current_peer);
1412 if (current_ike->equals(current_ike, peer_cfg->get_ike_cfg(peer_cfg)))
1413 {
1414 entry->checked_out = thread_current();
1415 ike_sa = entry->ike_sa;
1416 DBG2(DBG_MGR, "found existing IKE_SA %u with a '%s' config",
1417 ike_sa->get_unique_id(ike_sa),
1418 current_peer->get_name(current_peer));
1419 break;
1420 }
1421 }
1422 /* other threads might be waiting for this entry */
1423 entry->condvar->signal(entry->condvar);
1424 }
1425 enumerator->destroy(enumerator);
1426
1427 if (!ike_sa)
1428 { /* no IKE_SA using such a config, hand out a new */
1429 ike_sa = checkout_new(this, peer_cfg->get_ike_version(peer_cfg), TRUE);
1430 }
1431 charon->bus->set_sa(charon->bus, ike_sa);
1432 return ike_sa;
1433 }
1434
1435 METHOD(ike_sa_manager_t, checkout_by_id, ike_sa_t*,
1436 private_ike_sa_manager_t *this, u_int32_t id)
1437 {
1438 enumerator_t *enumerator;
1439 entry_t *entry;
1440 ike_sa_t *ike_sa = NULL;
1441 u_int segment;
1442
1443 DBG2(DBG_MGR, "checkout IKE_SA by ID %u", id);
1444
1445 enumerator = create_table_enumerator(this);
1446 while (enumerator->enumerate(enumerator, &entry, &segment))
1447 {
1448 if (wait_for_entry(this, entry, segment))
1449 {
1450 if (entry->ike_sa->get_unique_id(entry->ike_sa) == id)
1451 {
1452 ike_sa = entry->ike_sa;
1453 entry->checked_out = thread_current();
1454 break;
1455 }
1456 /* other threads might be waiting for this entry */
1457 entry->condvar->signal(entry->condvar);
1458 }
1459 }
1460 enumerator->destroy(enumerator);
1461
1462 if (ike_sa)
1463 {
1464 DBG2(DBG_MGR, "IKE_SA %s[%u] successfully checked out",
1465 ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1466 }
1467 charon->bus->set_sa(charon->bus, ike_sa);
1468 return ike_sa;
1469 }
1470
1471 METHOD(ike_sa_manager_t, checkout_by_name, ike_sa_t*,
1472 private_ike_sa_manager_t *this, char *name, bool child)
1473 {
1474 enumerator_t *enumerator, *children;
1475 entry_t *entry;
1476 ike_sa_t *ike_sa = NULL;
1477 child_sa_t *child_sa;
1478 u_int segment;
1479
1480 enumerator = create_table_enumerator(this);
1481 while (enumerator->enumerate(enumerator, &entry, &segment))
1482 {
1483 if (wait_for_entry(this, entry, segment))
1484 {
1485 /* look for a child with such a policy name ... */
1486 if (child)
1487 {
1488 children = entry->ike_sa->create_child_sa_enumerator(entry->ike_sa);
1489 while (children->enumerate(children, (void**)&child_sa))
1490 {
1491 if (streq(child_sa->get_name(child_sa), name))
1492 {
1493 ike_sa = entry->ike_sa;
1494 break;
1495 }
1496 }
1497 children->destroy(children);
1498 }
1499 else /* ... or for a IKE_SA with such a connection name */
1500 {
1501 if (streq(entry->ike_sa->get_name(entry->ike_sa), name))
1502 {
1503 ike_sa = entry->ike_sa;
1504 }
1505 }
1506 /* got one, return */
1507 if (ike_sa)
1508 {
1509 entry->checked_out = thread_current();
1510 DBG2(DBG_MGR, "IKE_SA %s[%u] successfully checked out",
1511 ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1512 break;
1513 }
1514 /* other threads might be waiting for this entry */
1515 entry->condvar->signal(entry->condvar);
1516 }
1517 }
1518 enumerator->destroy(enumerator);
1519
1520 charon->bus->set_sa(charon->bus, ike_sa);
1521 return ike_sa;
1522 }
1523
1524 /**
1525 * enumerator filter function, waiting variant
1526 */
1527 static bool enumerator_filter_wait(private_ike_sa_manager_t *this,
1528 entry_t **in, ike_sa_t **out, u_int *segment)
1529 {
1530 if (wait_for_entry(this, *in, *segment))
1531 {
1532 *out = (*in)->ike_sa;
1533 charon->bus->set_sa(charon->bus, *out);
1534 return TRUE;
1535 }
1536 return FALSE;
1537 }
1538
1539 /**
1540 * enumerator filter function, skipping variant
1541 */
1542 static bool enumerator_filter_skip(private_ike_sa_manager_t *this,
1543 entry_t **in, ike_sa_t **out, u_int *segment)
1544 {
1545 if (!(*in)->driveout_new_threads &&
1546 !(*in)->driveout_waiting_threads &&
1547 !(*in)->checked_out)
1548 {
1549 *out = (*in)->ike_sa;
1550 charon->bus->set_sa(charon->bus, *out);
1551 return TRUE;
1552 }
1553 return FALSE;
1554 }
1555
1556 /**
1557 * Reset threads SA after enumeration
1558 */
1559 static void reset_sa(void *data)
1560 {
1561 charon->bus->set_sa(charon->bus, NULL);
1562 }
1563
1564 METHOD(ike_sa_manager_t, create_enumerator, enumerator_t*,
1565 private_ike_sa_manager_t* this, bool wait)
1566 {
1567 return enumerator_create_filter(create_table_enumerator(this),
1568 wait ? (void*)enumerator_filter_wait : (void*)enumerator_filter_skip,
1569 this, reset_sa);
1570 }
1571
1572 METHOD(ike_sa_manager_t, checkin, void,
1573 private_ike_sa_manager_t *this, ike_sa_t *ike_sa)
1574 {
1575 /* to check the SA back in, we look for the pointer of the ike_sa
1576 * in all entries.
1577 * The lookup is done by initiator SPI, so even if the SPI has changed (e.g.
1578 * on reception of a IKE_SA_INIT response) the lookup will work but
1579 * updating of the SPI MAY be necessary...
1580 */
1581 entry_t *entry;
1582 ike_sa_id_t *ike_sa_id;
1583 host_t *other;
1584 identification_t *my_id, *other_id;
1585 u_int segment;
1586
1587 ike_sa_id = ike_sa->get_id(ike_sa);
1588 my_id = ike_sa->get_my_id(ike_sa);
1589 other_id = ike_sa->get_other_eap_id(ike_sa);
1590 other = ike_sa->get_other_host(ike_sa);
1591
1592 DBG2(DBG_MGR, "checkin IKE_SA %s[%u]", ike_sa->get_name(ike_sa),
1593 ike_sa->get_unique_id(ike_sa));
1594
1595 /* look for the entry */
1596 if (get_entry_by_sa(this, ike_sa_id, ike_sa, &entry, &segment) == SUCCESS)
1597 {
1598 /* ike_sa_id must be updated */
1599 entry->ike_sa_id->replace_values(entry->ike_sa_id, ike_sa->get_id(ike_sa));
1600 /* signal waiting threads */
1601 entry->checked_out = NULL;
1602 entry->processing = -1;
1603 /* check if this SA is half-open */
1604 if (entry->half_open && ike_sa->get_state(ike_sa) != IKE_CONNECTING)
1605 {
1606 /* not half open anymore */
1607 entry->half_open = FALSE;
1608 remove_half_open(this, entry);
1609 }
1610 else if (entry->half_open && !other->ip_equals(other, entry->other))
1611 {
1612 /* the other host's IP has changed, we must update the hash table */
1613 remove_half_open(this, entry);
1614 DESTROY_IF(entry->other);
1615 entry->other = other->clone(other);
1616 put_half_open(this, entry);
1617 }
1618 else if (!entry->half_open &&
1619 ike_sa->get_state(ike_sa) == IKE_CONNECTING)
1620 {
1621 /* this is a new half-open SA */
1622 entry->half_open = TRUE;
1623 entry->other = other->clone(other);
1624 put_half_open(this, entry);
1625 }
1626 DBG2(DBG_MGR, "check-in of IKE_SA successful.");
1627 entry->condvar->signal(entry->condvar);
1628 }
1629 else
1630 {
1631 entry = entry_create();
1632 entry->ike_sa_id = ike_sa_id->clone(ike_sa_id);
1633 entry->ike_sa = ike_sa;
1634 if (ike_sa->get_state(ike_sa) == IKE_CONNECTING)
1635 {
1636 entry->half_open = TRUE;
1637 entry->other = other->clone(other);
1638 put_half_open(this, entry);
1639 }
1640 segment = put_entry(this, entry);
1641 }
1642
1643 /* apply identities for duplicate test */
1644 if ((ike_sa->get_state(ike_sa) == IKE_ESTABLISHED ||
1645 ike_sa->get_state(ike_sa) == IKE_PASSIVE) &&
1646 entry->my_id == NULL && entry->other_id == NULL)
1647 {
1648 if (ike_sa->get_version(ike_sa) == IKEV1)
1649 {
1650 /* If authenticated and received INITIAL_CONTACT,
1651 * delete any existing IKE_SAs with that peer. */
1652 if (ike_sa->has_condition(ike_sa, COND_INIT_CONTACT_SEEN))
1653 {
1654 /* We can't hold the segment locked while checking the
1655 * uniqueness as this could lead to deadlocks. We mark the
1656 * entry as checked out while we release the lock so no other
1657 * thread can acquire it. Since it is not yet in the list of
1658 * connected peers that will not cause a deadlock as no other
1659 * caller of check_unqiueness() will try to check out this SA */
1660 entry->checked_out = thread_current();
1661 unlock_single_segment(this, segment);
1662
1663 this->public.check_uniqueness(&this->public, ike_sa, TRUE);
1664 ike_sa->set_condition(ike_sa, COND_INIT_CONTACT_SEEN, FALSE);
1665
1666 /* The entry could have been modified in the mean time, e.g.
1667 * because another SA was added/removed next to it or another
1668 * thread is waiting, but it should still exist, so there is no
1669 * need for a lookup via get_entry_by... */
1670 lock_single_segment(this, segment);
1671 entry->checked_out = NULL;
1672 /* We already signaled waiting threads above, we have to do that
1673 * again after checking the SA out and back in again. */
1674 entry->condvar->signal(entry->condvar);
1675 }
1676 }
1677
1678 entry->my_id = my_id->clone(my_id);
1679 entry->other_id = other_id->clone(other_id);
1680 if (!entry->other)
1681 {
1682 entry->other = other->clone(other);
1683 }
1684 put_connected_peers(this, entry);
1685 }
1686
1687 unlock_single_segment(this, segment);
1688
1689 charon->bus->set_sa(charon->bus, NULL);
1690 }
1691
1692 METHOD(ike_sa_manager_t, checkin_and_destroy, void,
1693 private_ike_sa_manager_t *this, ike_sa_t *ike_sa)
1694 {
1695 /* deletion is a bit complex, we must ensure that no thread is waiting for
1696 * this SA.
1697 * We take this SA from the table, and start signaling while threads
1698 * are in the condvar.
1699 */
1700 entry_t *entry;
1701 ike_sa_id_t *ike_sa_id;
1702 u_int segment;
1703
1704 ike_sa_id = ike_sa->get_id(ike_sa);
1705
1706 DBG2(DBG_MGR, "checkin and destroy IKE_SA %s[%u]", ike_sa->get_name(ike_sa),
1707 ike_sa->get_unique_id(ike_sa));
1708
1709 if (get_entry_by_sa(this, ike_sa_id, ike_sa, &entry, &segment) == SUCCESS)
1710 {
1711 if (entry->driveout_waiting_threads && entry->driveout_new_threads)
1712 { /* it looks like flush() has been called and the SA is being deleted
1713 * anyway, just check it in */
1714 DBG2(DBG_MGR, "ignored check-in and destroy of IKE_SA during shutdown");
1715 entry->checked_out = NULL;
1716 entry->condvar->broadcast(entry->condvar);
1717 unlock_single_segment(this, segment);
1718 return;
1719 }
1720
1721 /* drive out waiting threads, as we are in hurry */
1722 entry->driveout_waiting_threads = TRUE;
1723 /* mark it, so no new threads can get this entry */
1724 entry->driveout_new_threads = TRUE;
1725 /* wait until all workers have done their work */
1726 while (entry->waiting_threads)
1727 {
1728 /* wake up all */
1729 entry->condvar->broadcast(entry->condvar);
1730 /* they will wake us again when their work is done */
1731 entry->condvar->wait(entry->condvar, this->segments[segment].mutex);
1732 }
1733 remove_entry(this, entry);
1734 unlock_single_segment(this, segment);
1735
1736 if (entry->half_open)
1737 {
1738 remove_half_open(this, entry);
1739 }
1740 if (entry->my_id && entry->other_id)
1741 {
1742 remove_connected_peers(this, entry);
1743 }
1744 if (entry->init_hash.ptr)
1745 {
1746 remove_init_hash(this, entry->init_hash);
1747 }
1748
1749 entry_destroy(entry);
1750
1751 DBG2(DBG_MGR, "check-in and destroy of IKE_SA successful");
1752 }
1753 else
1754 {
1755 DBG1(DBG_MGR, "tried to check-in and delete nonexisting IKE_SA");
1756 ike_sa->destroy(ike_sa);
1757 }
1758 charon->bus->set_sa(charon->bus, NULL);
1759 }
1760
1761 /**
1762 * Cleanup function for create_id_enumerator
1763 */
1764 static void id_enumerator_cleanup(linked_list_t *ids)
1765 {
1766 ids->destroy_offset(ids, offsetof(ike_sa_id_t, destroy));
1767 }
1768
1769 METHOD(ike_sa_manager_t, create_id_enumerator, enumerator_t*,
1770 private_ike_sa_manager_t *this, identification_t *me,
1771 identification_t *other, int family)
1772 {
1773 table_item_t *item;
1774 u_int row, segment;
1775 rwlock_t *lock;
1776 linked_list_t *ids = NULL;
1777
1778 row = chunk_hash_inc(other->get_encoding(other),
1779 chunk_hash(me->get_encoding(me))) & this->table_mask;
1780 segment = row & this->segment_mask;
1781
1782 lock = this->connected_peers_segments[segment].lock;
1783 lock->read_lock(lock);
1784 item = this->connected_peers_table[row];
1785 while (item)
1786 {
1787 connected_peers_t *current = item->value;
1788
1789 if (connected_peers_match(current, me, other, family))
1790 {
1791 ids = current->sas->clone_offset(current->sas,
1792 offsetof(ike_sa_id_t, clone));
1793 break;
1794 }
1795 item = item->next;
1796 }
1797 lock->unlock(lock);
1798
1799 if (!ids)
1800 {
1801 return enumerator_create_empty();
1802 }
1803 return enumerator_create_cleaner(ids->create_enumerator(ids),
1804 (void*)id_enumerator_cleanup, ids);
1805 }
1806
1807 /**
1808 * Move all CHILD_SAs and virtual IPs from old to new
1809 */
1810 static void adopt_children_and_vips(ike_sa_t *old, ike_sa_t *new)
1811 {
1812 enumerator_t *enumerator;
1813 child_sa_t *child_sa;
1814 host_t *vip;
1815 int chcount = 0, vipcount = 0;
1816
1817 charon->bus->children_migrate(charon->bus, new->get_id(new),
1818 new->get_unique_id(new));
1819 enumerator = old->create_child_sa_enumerator(old);
1820 while (enumerator->enumerate(enumerator, &child_sa))
1821 {
1822 old->remove_child_sa(old, enumerator);
1823 new->add_child_sa(new, child_sa);
1824 chcount++;
1825 }
1826 enumerator->destroy(enumerator);
1827
1828 enumerator = old->create_virtual_ip_enumerator(old, FALSE);
1829 while (enumerator->enumerate(enumerator, &vip))
1830 {
1831 new->add_virtual_ip(new, FALSE, vip);
1832 vipcount++;
1833 }
1834 enumerator->destroy(enumerator);
1835 /* this does not release the addresses, which is good, but it does trigger
1836 * an assign_vips(FALSE) event... */
1837 old->clear_virtual_ips(old, FALSE);
1838 /* ...trigger the analogous event on the new SA */
1839 charon->bus->set_sa(charon->bus, new);
1840 charon->bus->assign_vips(charon->bus, new, TRUE);
1841 charon->bus->children_migrate(charon->bus, NULL, 0);
1842 charon->bus->set_sa(charon->bus, old);
1843
1844 if (chcount || vipcount)
1845 {
1846 DBG1(DBG_IKE, "detected reauth of existing IKE_SA, adopting %d "
1847 "children and %d virtual IPs", chcount, vipcount);
1848 }
1849 }
1850
1851 /**
1852 * Delete an existing IKE_SA due to a unique replace policy
1853 */
1854 static status_t enforce_replace(private_ike_sa_manager_t *this,
1855 ike_sa_t *duplicate, ike_sa_t *new,
1856 identification_t *other, host_t *host)
1857 {
1858 charon->bus->alert(charon->bus, ALERT_UNIQUE_REPLACE);
1859
1860 if (host->equals(host, duplicate->get_other_host(duplicate)))
1861 {
1862 /* looks like a reauthentication attempt */
1863 if (!new->has_condition(new, COND_INIT_CONTACT_SEEN) &&
1864 new->get_version(new) == IKEV1)
1865 {
1866 /* IKEv1 implicitly takes over children, IKEv2 recreates them
1867 * explicitly. */
1868 adopt_children_and_vips(duplicate, new);
1869 }
1870 /* For IKEv1 we have to delay the delete for the old IKE_SA. Some
1871 * peers need to complete the new SA first, otherwise the quick modes
1872 * might get lost. For IKEv2 we do the same, as we want overlapping
1873 * CHILD_SAs to keep connectivity up. */
1874 lib->scheduler->schedule_job(lib->scheduler, (job_t*)
1875 delete_ike_sa_job_create(duplicate->get_id(duplicate), TRUE), 10);
1876 return SUCCESS;
1877 }
1878 DBG1(DBG_IKE, "deleting duplicate IKE_SA for peer '%Y' due to "
1879 "uniqueness policy", other);
1880 return duplicate->delete(duplicate);
1881 }
1882
1883 METHOD(ike_sa_manager_t, check_uniqueness, bool,
1884 private_ike_sa_manager_t *this, ike_sa_t *ike_sa, bool force_replace)
1885 {
1886 bool cancel = FALSE;
1887 peer_cfg_t *peer_cfg;
1888 unique_policy_t policy;
1889 enumerator_t *enumerator;
1890 ike_sa_id_t *id = NULL;
1891 identification_t *me, *other;
1892 host_t *other_host;
1893
1894 peer_cfg = ike_sa->get_peer_cfg(ike_sa);
1895 policy = peer_cfg->get_unique_policy(peer_cfg);
1896 if (policy == UNIQUE_NEVER || (policy == UNIQUE_NO && !force_replace))
1897 {
1898 return FALSE;
1899 }
1900 me = ike_sa->get_my_id(ike_sa);
1901 other = ike_sa->get_other_eap_id(ike_sa);
1902 other_host = ike_sa->get_other_host(ike_sa);
1903
1904 enumerator = create_id_enumerator(this, me, other,
1905 other_host->get_family(other_host));
1906 while (enumerator->enumerate(enumerator, &id))
1907 {
1908 status_t status = SUCCESS;
1909 ike_sa_t *duplicate;
1910
1911 duplicate = checkout(this, id);
1912 if (!duplicate)
1913 {
1914 continue;
1915 }
1916 if (force_replace)
1917 {
1918 DBG1(DBG_IKE, "destroying duplicate IKE_SA for peer '%Y', "
1919 "received INITIAL_CONTACT", other);
1920 charon->bus->ike_updown(charon->bus, duplicate, FALSE);
1921 checkin_and_destroy(this, duplicate);
1922 continue;
1923 }
1924 peer_cfg = duplicate->get_peer_cfg(duplicate);
1925 if (peer_cfg && peer_cfg->equals(peer_cfg, ike_sa->get_peer_cfg(ike_sa)))
1926 {
1927 switch (duplicate->get_state(duplicate))
1928 {
1929 case IKE_ESTABLISHED:
1930 case IKE_REKEYING:
1931 switch (policy)
1932 {
1933 case UNIQUE_REPLACE:
1934 status = enforce_replace(this, duplicate, ike_sa,
1935 other, other_host);
1936 break;
1937 case UNIQUE_KEEP:
1938 /* potential reauthentication? */
1939 if (!other_host->equals(other_host,
1940 duplicate->get_other_host(duplicate)))
1941 {
1942 cancel = TRUE;
1943 /* we keep the first IKE_SA and delete all
1944 * other duplicates that might exist */
1945 policy = UNIQUE_REPLACE;
1946 }
1947 break;
1948 default:
1949 break;
1950 }
1951 break;
1952 default:
1953 break;
1954 }
1955 }
1956 if (status == DESTROY_ME)
1957 {
1958 checkin_and_destroy(this, duplicate);
1959 }
1960 else
1961 {
1962 checkin(this, duplicate);
1963 }
1964 }
1965 enumerator->destroy(enumerator);
1966 /* reset thread's current IKE_SA after checkin */
1967 charon->bus->set_sa(charon->bus, ike_sa);
1968 return cancel;
1969 }
1970
1971 METHOD(ike_sa_manager_t, has_contact, bool,
1972 private_ike_sa_manager_t *this, identification_t *me,
1973 identification_t *other, int family)
1974 {
1975 table_item_t *item;
1976 u_int row, segment;
1977 rwlock_t *lock;
1978 bool found = FALSE;
1979
1980 row = chunk_hash_inc(other->get_encoding(other),
1981 chunk_hash(me->get_encoding(me))) & this->table_mask;
1982 segment = row & this->segment_mask;
1983 lock = this->connected_peers_segments[segment].lock;
1984 lock->read_lock(lock);
1985 item = this->connected_peers_table[row];
1986 while (item)
1987 {
1988 if (connected_peers_match(item->value, me, other, family))
1989 {
1990 found = TRUE;
1991 break;
1992 }
1993 item = item->next;
1994 }
1995 lock->unlock(lock);
1996
1997 return found;
1998 }
1999
2000 METHOD(ike_sa_manager_t, get_count, u_int,
2001 private_ike_sa_manager_t *this)
2002 {
2003 u_int segment, count = 0;
2004 mutex_t *mutex;
2005
2006 for (segment = 0; segment < this->segment_count; segment++)
2007 {
2008 mutex = this->segments[segment & this->segment_mask].mutex;
2009 mutex->lock(mutex);
2010 count += this->segments[segment].count;
2011 mutex->unlock(mutex);
2012 }
2013 return count;
2014 }
2015
2016 METHOD(ike_sa_manager_t, get_half_open_count, u_int,
2017 private_ike_sa_manager_t *this, host_t *ip, bool responder_only)
2018 {
2019 table_item_t *item;
2020 u_int row, segment;
2021 rwlock_t *lock;
2022 chunk_t addr;
2023 u_int count = 0;
2024
2025 if (ip)
2026 {
2027 addr = ip->get_address(ip);
2028 row = chunk_hash(addr) & this->table_mask;
2029 segment = row & this->segment_mask;
2030 lock = this->half_open_segments[segment].lock;
2031 lock->read_lock(lock);
2032 item = this->half_open_table[row];
2033 while (item)
2034 {
2035 half_open_t *half_open = item->value;
2036
2037 if (chunk_equals(addr, half_open->other))
2038 {
2039 count = responder_only ? half_open->count_responder
2040 : half_open->count;
2041 break;
2042 }
2043 item = item->next;
2044 }
2045 lock->unlock(lock);
2046 }
2047 else
2048 {
2049 count = responder_only ? (u_int)ref_cur(&this->half_open_count_responder)
2050 : (u_int)ref_cur(&this->half_open_count);
2051 }
2052 return count;
2053 }
2054
2055 METHOD(ike_sa_manager_t, set_spi_cb, void,
2056 private_ike_sa_manager_t *this, spi_cb_t callback, void *data)
2057 {
2058 this->spi_lock->write_lock(this->spi_lock);
2059 this->spi_cb.cb = callback;
2060 this->spi_cb.data = data;
2061 this->spi_lock->unlock(this->spi_lock);
2062 }
2063
2064 METHOD(ike_sa_manager_t, flush, void,
2065 private_ike_sa_manager_t *this)
2066 {
2067 /* destroy all list entries */
2068 enumerator_t *enumerator;
2069 entry_t *entry;
2070 u_int segment;
2071
2072 lock_all_segments(this);
2073 DBG2(DBG_MGR, "going to destroy IKE_SA manager and all managed IKE_SA's");
2074 /* Step 1: drive out all waiting threads */
2075 DBG2(DBG_MGR, "set driveout flags for all stored IKE_SA's");
2076 enumerator = create_table_enumerator(this);
2077 while (enumerator->enumerate(enumerator, &entry, &segment))
2078 {
2079 /* do not accept new threads, drive out waiting threads */
2080 entry->driveout_new_threads = TRUE;
2081 entry->driveout_waiting_threads = TRUE;
2082 }
2083 enumerator->destroy(enumerator);
2084 DBG2(DBG_MGR, "wait for all threads to leave IKE_SA's");
2085 /* Step 2: wait until all are gone */
2086 enumerator = create_table_enumerator(this);
2087 while (enumerator->enumerate(enumerator, &entry, &segment))
2088 {
2089 while (entry->waiting_threads || entry->checked_out)
2090 {
2091 /* wake up all */
2092 entry->condvar->broadcast(entry->condvar);
2093 /* go sleeping until they are gone */
2094 entry->condvar->wait(entry->condvar, this->segments[segment].mutex);
2095 }
2096 }
2097 enumerator->destroy(enumerator);
2098 DBG2(DBG_MGR, "delete all IKE_SA's");
2099 /* Step 3: initiate deletion of all IKE_SAs */
2100 enumerator = create_table_enumerator(this);
2101 while (enumerator->enumerate(enumerator, &entry, &segment))
2102 {
2103 charon->bus->set_sa(charon->bus, entry->ike_sa);
2104 if (entry->ike_sa->get_version(entry->ike_sa) == IKEV2)
2105 { /* as the delete never gets processed, fire down events */
2106 switch (entry->ike_sa->get_state(entry->ike_sa))
2107 {
2108 case IKE_ESTABLISHED:
2109 case IKE_REKEYING:
2110 case IKE_DELETING:
2111 charon->bus->ike_updown(charon->bus, entry->ike_sa, FALSE);
2112 break;
2113 default:
2114 break;
2115 }
2116 }
2117 entry->ike_sa->delete(entry->ike_sa);
2118 }
2119 enumerator->destroy(enumerator);
2120
2121 DBG2(DBG_MGR, "destroy all entries");
2122 /* Step 4: destroy all entries */
2123 enumerator = create_table_enumerator(this);
2124 while (enumerator->enumerate(enumerator, &entry, &segment))
2125 {
2126 charon->bus->set_sa(charon->bus, entry->ike_sa);
2127 if (entry->half_open)
2128 {
2129 remove_half_open(this, entry);
2130 }
2131 if (entry->my_id && entry->other_id)
2132 {
2133 remove_connected_peers(this, entry);
2134 }
2135 if (entry->init_hash.ptr)
2136 {
2137 remove_init_hash(this, entry->init_hash);
2138 }
2139 remove_entry_at((private_enumerator_t*)enumerator);
2140 entry_destroy(entry);
2141 }
2142 enumerator->destroy(enumerator);
2143 charon->bus->set_sa(charon->bus, NULL);
2144 unlock_all_segments(this);
2145
2146 this->spi_lock->write_lock(this->spi_lock);
2147 this->rng->destroy(this->rng);
2148 this->rng = NULL;
2149 this->spi_cb.cb = NULL;
2150 this->spi_cb.data = NULL;
2151 this->spi_lock->unlock(this->spi_lock);
2152 }
2153
2154 METHOD(ike_sa_manager_t, destroy, void,
2155 private_ike_sa_manager_t *this)
2156 {
2157 u_int i;
2158
2159 /* these are already cleared in flush() above */
2160 free(this->ike_sa_table);
2161 free(this->half_open_table);
2162 free(this->connected_peers_table);
2163 free(this->init_hashes_table);
2164 for (i = 0; i < this->segment_count; i++)
2165 {
2166 this->segments[i].mutex->destroy(this->segments[i].mutex);
2167 this->half_open_segments[i].lock->destroy(this->half_open_segments[i].lock);
2168 this->connected_peers_segments[i].lock->destroy(this->connected_peers_segments[i].lock);
2169 this->init_hashes_segments[i].mutex->destroy(this->init_hashes_segments[i].mutex);
2170 }
2171 free(this->segments);
2172 free(this->half_open_segments);
2173 free(this->connected_peers_segments);
2174 free(this->init_hashes_segments);
2175
2176 this->spi_lock->destroy(this->spi_lock);
2177 free(this);
2178 }
2179
2180 /**
2181 * This function returns the next-highest power of two for the given number.
2182 * The algorithm works by setting all bits on the right-hand side of the most
2183 * significant 1 to 1 and then increments the whole number so it rolls over
2184 * to the nearest power of two. Note: returns 0 for n == 0
2185 */
2186 static u_int get_nearest_powerof2(u_int n)
2187 {
2188 u_int i;
2189
2190 --n;
2191 for (i = 1; i < sizeof(u_int) * 8; i <<= 1)
2192 {
2193 n |= n >> i;
2194 }
2195 return ++n;
2196 }
2197
2198 /*
2199 * Described in header.
2200 */
2201 ike_sa_manager_t *ike_sa_manager_create()
2202 {
2203 private_ike_sa_manager_t *this;
2204 u_int i;
2205
2206 INIT(this,
2207 .public = {
2208 .checkout = _checkout,
2209 .checkout_new = _checkout_new,
2210 .checkout_by_message = _checkout_by_message,
2211 .checkout_by_config = _checkout_by_config,
2212 .checkout_by_id = _checkout_by_id,
2213 .checkout_by_name = _checkout_by_name,
2214 .check_uniqueness = _check_uniqueness,
2215 .has_contact = _has_contact,
2216 .create_enumerator = _create_enumerator,
2217 .create_id_enumerator = _create_id_enumerator,
2218 .checkin = _checkin,
2219 .checkin_and_destroy = _checkin_and_destroy,
2220 .get_count = _get_count,
2221 .get_half_open_count = _get_half_open_count,
2222 .flush = _flush,
2223 .set_spi_cb = _set_spi_cb,
2224 .destroy = _destroy,
2225 },
2226 );
2227
2228 this->rng = lib->crypto->create_rng(lib->crypto, RNG_WEAK);
2229 if (this->rng == NULL)
2230 {
2231 DBG1(DBG_MGR, "manager initialization failed, no RNG supported");
2232 free(this);
2233 return NULL;
2234 }
2235 this->spi_lock = rwlock_create(RWLOCK_TYPE_DEFAULT);
2236
2237 this->ikesa_limit = lib->settings->get_int(lib->settings,
2238 "%s.ikesa_limit", 0, lib->ns);
2239
2240 this->table_size = get_nearest_powerof2(lib->settings->get_int(
2241 lib->settings, "%s.ikesa_table_size",
2242 DEFAULT_HASHTABLE_SIZE, lib->ns));
2243 this->table_size = max(1, min(this->table_size, MAX_HASHTABLE_SIZE));
2244 this->table_mask = this->table_size - 1;
2245
2246 this->segment_count = get_nearest_powerof2(lib->settings->get_int(
2247 lib->settings, "%s.ikesa_table_segments",
2248 DEFAULT_SEGMENT_COUNT, lib->ns));
2249 this->segment_count = max(1, min(this->segment_count, this->table_size));
2250 this->segment_mask = this->segment_count - 1;
2251
2252 this->ike_sa_table = calloc(this->table_size, sizeof(table_item_t*));
2253 this->segments = (segment_t*)calloc(this->segment_count, sizeof(segment_t));
2254 for (i = 0; i < this->segment_count; i++)
2255 {
2256 this->segments[i].mutex = mutex_create(MUTEX_TYPE_RECURSIVE);
2257 this->segments[i].count = 0;
2258 }
2259
2260 /* we use the same table parameters for the table to track half-open SAs */
2261 this->half_open_table = calloc(this->table_size, sizeof(table_item_t*));
2262 this->half_open_segments = calloc(this->segment_count, sizeof(shareable_segment_t));
2263 for (i = 0; i < this->segment_count; i++)
2264 {
2265 this->half_open_segments[i].lock = rwlock_create(RWLOCK_TYPE_DEFAULT);
2266 this->half_open_segments[i].count = 0;
2267 }
2268
2269 /* also for the hash table used for duplicate tests */
2270 this->connected_peers_table = calloc(this->table_size, sizeof(table_item_t*));
2271 this->connected_peers_segments = calloc(this->segment_count, sizeof(shareable_segment_t));
2272 for (i = 0; i < this->segment_count; i++)
2273 {
2274 this->connected_peers_segments[i].lock = rwlock_create(RWLOCK_TYPE_DEFAULT);
2275 this->connected_peers_segments[i].count = 0;
2276 }
2277
2278 /* and again for the table of hashes of seen initial IKE messages */
2279 this->init_hashes_table = calloc(this->table_size, sizeof(table_item_t*));
2280 this->init_hashes_segments = calloc(this->segment_count, sizeof(segment_t));
2281 for (i = 0; i < this->segment_count; i++)
2282 {
2283 this->init_hashes_segments[i].mutex = mutex_create(MUTEX_TYPE_RECURSIVE);
2284 this->init_hashes_segments[i].count = 0;
2285 }
2286
2287 this->reuse_ikesa = lib->settings->get_bool(lib->settings,
2288 "%s.reuse_ikesa", TRUE, lib->ns);
2289 return &this->public;
2290 }