4adf22c15d6e1dbb00fb01bdb69e613236ff3291
[strongswan.git] / src / libcharon / sa / ike_sa_manager.c
1 /*
2 * Copyright (C) 2005-2011 Martin Willi
3 * Copyright (C) 2011 revosec AG
4 * Copyright (C) 2008-2015 Tobias Brunner
5 * Copyright (C) 2005 Jan Hutter
6 * Hochschule fuer Technik Rapperswil
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version. See <http://www.fsf.org/copyleft/gpl.txt>.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 * for more details.
17 */
18
19 #include <string.h>
20
21 #include "ike_sa_manager.h"
22
23 #include <daemon.h>
24 #include <sa/ike_sa_id.h>
25 #include <bus/bus.h>
26 #include <threading/condvar.h>
27 #include <threading/mutex.h>
28 #include <threading/rwlock.h>
29 #include <collections/linked_list.h>
30 #include <crypto/hashers/hasher.h>
31 #include <processing/jobs/delete_ike_sa_job.h>
32
33 /* the default size of the hash table (MUST be a power of 2) */
34 #define DEFAULT_HASHTABLE_SIZE 1
35
36 /* the maximum size of the hash table (MUST be a power of 2) */
37 #define MAX_HASHTABLE_SIZE (1 << 30)
38
39 /* the default number of segments (MUST be a power of 2) */
40 #define DEFAULT_SEGMENT_COUNT 1
41
42 typedef struct entry_t entry_t;
43
44 /**
45 * An entry in the linked list, contains IKE_SA, locking and lookup data.
46 */
47 struct entry_t {
48
49 /**
50 * Number of threads waiting for this ike_sa_t object.
51 */
52 int waiting_threads;
53
54 /**
55 * Condvar where threads can wait until ike_sa_t object is free for use again.
56 */
57 condvar_t *condvar;
58
59 /**
60 * Is this ike_sa currently checked out?
61 */
62 bool checked_out;
63
64 /**
65 * Does this SA drives out new threads?
66 */
67 bool driveout_new_threads;
68
69 /**
70 * Does this SA drives out waiting threads?
71 */
72 bool driveout_waiting_threads;
73
74 /**
75 * Identification of an IKE_SA (SPIs).
76 */
77 ike_sa_id_t *ike_sa_id;
78
79 /**
80 * The contained ike_sa_t object.
81 */
82 ike_sa_t *ike_sa;
83
84 /**
85 * hash of the IKE_SA_INIT message, used to detect retransmissions
86 */
87 chunk_t init_hash;
88
89 /**
90 * remote host address, required for DoS detection and duplicate
91 * checking (host with same my_id and other_id is *not* considered
92 * a duplicate if the address family differs)
93 */
94 host_t *other;
95
96 /**
97 * As responder: Is this SA half-open?
98 */
99 bool half_open;
100
101 /**
102 * own identity, required for duplicate checking
103 */
104 identification_t *my_id;
105
106 /**
107 * remote identity, required for duplicate checking
108 */
109 identification_t *other_id;
110
111 /**
112 * message ID or hash of currently processing message, -1 if none
113 */
114 u_int32_t processing;
115 };
116
117 /**
118 * Implementation of entry_t.destroy.
119 */
120 static status_t entry_destroy(entry_t *this)
121 {
122 /* also destroy IKE SA */
123 this->ike_sa->destroy(this->ike_sa);
124 this->ike_sa_id->destroy(this->ike_sa_id);
125 chunk_free(&this->init_hash);
126 DESTROY_IF(this->other);
127 DESTROY_IF(this->my_id);
128 DESTROY_IF(this->other_id);
129 this->condvar->destroy(this->condvar);
130 free(this);
131 return SUCCESS;
132 }
133
134 /**
135 * Creates a new entry for the ike_sa_t list.
136 */
137 static entry_t *entry_create()
138 {
139 entry_t *this;
140
141 INIT(this,
142 .condvar = condvar_create(CONDVAR_TYPE_DEFAULT),
143 .processing = -1,
144 );
145
146 return this;
147 }
148
149 /**
150 * Function that matches entry_t objects by ike_sa_id_t.
151 */
152 static bool entry_match_by_id(entry_t *entry, ike_sa_id_t *id)
153 {
154 if (id->equals(id, entry->ike_sa_id))
155 {
156 return TRUE;
157 }
158 if ((id->get_responder_spi(id) == 0 ||
159 entry->ike_sa_id->get_responder_spi(entry->ike_sa_id) == 0) &&
160 (id->get_ike_version(id) == IKEV1_MAJOR_VERSION ||
161 id->is_initiator(id) == entry->ike_sa_id->is_initiator(entry->ike_sa_id)) &&
162 id->get_initiator_spi(id) == entry->ike_sa_id->get_initiator_spi(entry->ike_sa_id))
163 {
164 /* this is TRUE for IKE_SAs that we initiated but have not yet received a response */
165 return TRUE;
166 }
167 return FALSE;
168 }
169
170 /**
171 * Function that matches entry_t objects by ike_sa_t pointers.
172 */
173 static bool entry_match_by_sa(entry_t *entry, ike_sa_t *ike_sa)
174 {
175 return entry->ike_sa == ike_sa;
176 }
177
178 /**
179 * Hash function for ike_sa_id_t objects.
180 */
181 static u_int ike_sa_id_hash(ike_sa_id_t *ike_sa_id)
182 {
183 /* IKEv2 does not mandate random SPIs (RFC 5996, 2.6), they just have to be
184 * locally unique, so we use our randomly allocated SPI whether we are
185 * initiator or responder to ensure a good distribution. The latter is not
186 * possible for IKEv1 as we don't know whether we are original initiator or
187 * not (based on the IKE header). But as RFC 2408, section 2.5.3 proposes
188 * SPIs (Cookies) to be allocated near random (we allocate them randomly
189 * anyway) it seems safe to always use the initiator SPI. */
190 if (ike_sa_id->get_ike_version(ike_sa_id) == IKEV1_MAJOR_VERSION ||
191 ike_sa_id->is_initiator(ike_sa_id))
192 {
193 return ike_sa_id->get_initiator_spi(ike_sa_id);
194 }
195 return ike_sa_id->get_responder_spi(ike_sa_id);
196 }
197
198 typedef struct half_open_t half_open_t;
199
200 /**
201 * Struct to manage half-open IKE_SAs per peer.
202 */
203 struct half_open_t {
204 /** chunk of remote host address */
205 chunk_t other;
206
207 /** the number of half-open IKE_SAs with that host */
208 u_int count;
209
210 /** the number of half-open IKE_SAs we responded to with that host */
211 u_int count_responder;
212 };
213
214 /**
215 * Destroys a half_open_t object.
216 */
217 static void half_open_destroy(half_open_t *this)
218 {
219 chunk_free(&this->other);
220 free(this);
221 }
222
223 typedef struct connected_peers_t connected_peers_t;
224
225 struct connected_peers_t {
226 /** own identity */
227 identification_t *my_id;
228
229 /** remote identity */
230 identification_t *other_id;
231
232 /** ip address family of peer */
233 int family;
234
235 /** list of ike_sa_id_t objects of IKE_SAs between the two identities */
236 linked_list_t *sas;
237 };
238
239 static void connected_peers_destroy(connected_peers_t *this)
240 {
241 this->my_id->destroy(this->my_id);
242 this->other_id->destroy(this->other_id);
243 this->sas->destroy(this->sas);
244 free(this);
245 }
246
247 /**
248 * Function that matches connected_peers_t objects by the given ids.
249 */
250 static inline bool connected_peers_match(connected_peers_t *connected_peers,
251 identification_t *my_id, identification_t *other_id,
252 int family)
253 {
254 return my_id->equals(my_id, connected_peers->my_id) &&
255 other_id->equals(other_id, connected_peers->other_id) &&
256 (!family || family == connected_peers->family);
257 }
258
259 typedef struct init_hash_t init_hash_t;
260
261 struct init_hash_t {
262 /** hash of IKE_SA_INIT or initial phase1 message (data is not cloned) */
263 chunk_t hash;
264
265 /** our SPI allocated for the IKE_SA based on this message */
266 u_int64_t our_spi;
267 };
268
269 typedef struct segment_t segment_t;
270
271 /**
272 * Struct to manage segments of the hash table.
273 */
274 struct segment_t {
275 /** mutex to access a segment exclusively */
276 mutex_t *mutex;
277
278 /** the number of entries in this segment */
279 u_int count;
280 };
281
282 typedef struct shareable_segment_t shareable_segment_t;
283
284 /**
285 * Struct to manage segments of the "half-open" and "connected peers" hash tables.
286 */
287 struct shareable_segment_t {
288 /** rwlock to access a segment non-/exclusively */
289 rwlock_t *lock;
290
291 /** the number of entries in this segment - in case of the "half-open table"
292 * it's the sum of all half_open_t.count in a segment. */
293 u_int count;
294 };
295
296 typedef struct table_item_t table_item_t;
297
298 /**
299 * Instead of using linked_list_t for each bucket we store the data in our own
300 * list to save memory.
301 */
302 struct table_item_t {
303 /** data of this item */
304 void *value;
305
306 /** next item in the overflow list */
307 table_item_t *next;
308 };
309
310 typedef struct private_ike_sa_manager_t private_ike_sa_manager_t;
311
312 /**
313 * Additional private members of ike_sa_manager_t.
314 */
315 struct private_ike_sa_manager_t {
316 /**
317 * Public interface of ike_sa_manager_t.
318 */
319 ike_sa_manager_t public;
320
321 /**
322 * Hash table with entries for the ike_sa_t objects.
323 */
324 table_item_t **ike_sa_table;
325
326 /**
327 * The size of the hash table.
328 */
329 u_int table_size;
330
331 /**
332 * Mask to map the hashes to table rows.
333 */
334 u_int table_mask;
335
336 /**
337 * Segments of the hash table.
338 */
339 segment_t *segments;
340
341 /**
342 * The number of segments.
343 */
344 u_int segment_count;
345
346 /**
347 * Mask to map a table row to a segment.
348 */
349 u_int segment_mask;
350
351 /**
352 * Hash table with half_open_t objects.
353 */
354 table_item_t **half_open_table;
355
356 /**
357 * Segments of the "half-open" hash table.
358 */
359 shareable_segment_t *half_open_segments;
360
361 /**
362 * Total number of half-open IKE_SAs.
363 */
364 refcount_t half_open_count;
365
366 /**
367 * Total number of half-open IKE_SAs as responder.
368 */
369 refcount_t half_open_count_responder;
370
371 /**
372 * Hash table with connected_peers_t objects.
373 */
374 table_item_t **connected_peers_table;
375
376 /**
377 * Segments of the "connected peers" hash table.
378 */
379 shareable_segment_t *connected_peers_segments;
380
381 /**
382 * Hash table with init_hash_t objects.
383 */
384 table_item_t **init_hashes_table;
385
386 /**
387 * Segments of the "hashes" hash table.
388 */
389 segment_t *init_hashes_segments;
390
391 /**
392 * RNG to get random SPIs for our side
393 */
394 rng_t *rng;
395
396 /**
397 * Lock to access the RNG instance
398 */
399 rwlock_t *rng_lock;
400
401 /**
402 * reuse existing IKE_SAs in checkout_by_config
403 */
404 bool reuse_ikesa;
405
406 /**
407 * Configured IKE_SA limit, if any
408 */
409 u_int ikesa_limit;
410 };
411
412 /**
413 * Acquire a lock to access the segment of the table row with the given index.
414 * It also works with the segment index directly.
415 */
416 static inline void lock_single_segment(private_ike_sa_manager_t *this,
417 u_int index)
418 {
419 mutex_t *lock = this->segments[index & this->segment_mask].mutex;
420 lock->lock(lock);
421 }
422
423 /**
424 * Release the lock required to access the segment of the table row with the given index.
425 * It also works with the segment index directly.
426 */
427 static inline void unlock_single_segment(private_ike_sa_manager_t *this,
428 u_int index)
429 {
430 mutex_t *lock = this->segments[index & this->segment_mask].mutex;
431 lock->unlock(lock);
432 }
433
434 /**
435 * Lock all segments
436 */
437 static void lock_all_segments(private_ike_sa_manager_t *this)
438 {
439 u_int i;
440
441 for (i = 0; i < this->segment_count; i++)
442 {
443 this->segments[i].mutex->lock(this->segments[i].mutex);
444 }
445 }
446
447 /**
448 * Unlock all segments
449 */
450 static void unlock_all_segments(private_ike_sa_manager_t *this)
451 {
452 u_int i;
453
454 for (i = 0; i < this->segment_count; i++)
455 {
456 this->segments[i].mutex->unlock(this->segments[i].mutex);
457 }
458 }
459
460 typedef struct private_enumerator_t private_enumerator_t;
461
462 /**
463 * hash table enumerator implementation
464 */
465 struct private_enumerator_t {
466
467 /**
468 * implements enumerator interface
469 */
470 enumerator_t enumerator;
471
472 /**
473 * associated ike_sa_manager_t
474 */
475 private_ike_sa_manager_t *manager;
476
477 /**
478 * current segment index
479 */
480 u_int segment;
481
482 /**
483 * currently enumerating entry
484 */
485 entry_t *entry;
486
487 /**
488 * current table row index
489 */
490 u_int row;
491
492 /**
493 * current table item
494 */
495 table_item_t *current;
496
497 /**
498 * previous table item
499 */
500 table_item_t *prev;
501 };
502
503 METHOD(enumerator_t, enumerate, bool,
504 private_enumerator_t *this, entry_t **entry, u_int *segment)
505 {
506 if (this->entry)
507 {
508 this->entry->condvar->signal(this->entry->condvar);
509 this->entry = NULL;
510 }
511 while (this->segment < this->manager->segment_count)
512 {
513 while (this->row < this->manager->table_size)
514 {
515 this->prev = this->current;
516 if (this->current)
517 {
518 this->current = this->current->next;
519 }
520 else
521 {
522 lock_single_segment(this->manager, this->segment);
523 this->current = this->manager->ike_sa_table[this->row];
524 }
525 if (this->current)
526 {
527 *entry = this->entry = this->current->value;
528 *segment = this->segment;
529 return TRUE;
530 }
531 unlock_single_segment(this->manager, this->segment);
532 this->row += this->manager->segment_count;
533 }
534 this->segment++;
535 this->row = this->segment;
536 }
537 return FALSE;
538 }
539
540 METHOD(enumerator_t, enumerator_destroy, void,
541 private_enumerator_t *this)
542 {
543 if (this->entry)
544 {
545 this->entry->condvar->signal(this->entry->condvar);
546 }
547 if (this->current)
548 {
549 unlock_single_segment(this->manager, this->segment);
550 }
551 free(this);
552 }
553
554 /**
555 * Creates an enumerator to enumerate the entries in the hash table.
556 */
557 static enumerator_t* create_table_enumerator(private_ike_sa_manager_t *this)
558 {
559 private_enumerator_t *enumerator;
560
561 INIT(enumerator,
562 .enumerator = {
563 .enumerate = (void*)_enumerate,
564 .destroy = _enumerator_destroy,
565 },
566 .manager = this,
567 );
568 return &enumerator->enumerator;
569 }
570
571 /**
572 * Put an entry into the hash table.
573 * Note: The caller has to unlock the returned segment.
574 */
575 static u_int put_entry(private_ike_sa_manager_t *this, entry_t *entry)
576 {
577 table_item_t *current, *item;
578 u_int row, segment;
579
580 INIT(item,
581 .value = entry,
582 );
583
584 row = ike_sa_id_hash(entry->ike_sa_id) & this->table_mask;
585 segment = row & this->segment_mask;
586
587 lock_single_segment(this, segment);
588 current = this->ike_sa_table[row];
589 if (current)
590 { /* insert at the front of current bucket */
591 item->next = current;
592 }
593 this->ike_sa_table[row] = item;
594 this->segments[segment].count++;
595 return segment;
596 }
597
598 /**
599 * Remove an entry from the hash table.
600 * Note: The caller MUST have a lock on the segment of this entry.
601 */
602 static void remove_entry(private_ike_sa_manager_t *this, entry_t *entry)
603 {
604 table_item_t *item, *prev = NULL;
605 u_int row, segment;
606
607 row = ike_sa_id_hash(entry->ike_sa_id) & this->table_mask;
608 segment = row & this->segment_mask;
609 item = this->ike_sa_table[row];
610 while (item)
611 {
612 if (item->value == entry)
613 {
614 if (prev)
615 {
616 prev->next = item->next;
617 }
618 else
619 {
620 this->ike_sa_table[row] = item->next;
621 }
622 this->segments[segment].count--;
623 free(item);
624 break;
625 }
626 prev = item;
627 item = item->next;
628 }
629 }
630
631 /**
632 * Remove the entry at the current enumerator position.
633 */
634 static void remove_entry_at(private_enumerator_t *this)
635 {
636 this->entry = NULL;
637 if (this->current)
638 {
639 table_item_t *current = this->current;
640
641 this->manager->segments[this->segment].count--;
642 this->current = this->prev;
643
644 if (this->prev)
645 {
646 this->prev->next = current->next;
647 }
648 else
649 {
650 this->manager->ike_sa_table[this->row] = current->next;
651 unlock_single_segment(this->manager, this->segment);
652 }
653 free(current);
654 }
655 }
656
657 /**
658 * Find an entry using the provided match function to compare the entries for
659 * equality.
660 */
661 static status_t get_entry_by_match_function(private_ike_sa_manager_t *this,
662 ike_sa_id_t *ike_sa_id, entry_t **entry, u_int *segment,
663 linked_list_match_t match, void *param)
664 {
665 table_item_t *item;
666 u_int row, seg;
667
668 row = ike_sa_id_hash(ike_sa_id) & this->table_mask;
669 seg = row & this->segment_mask;
670
671 lock_single_segment(this, seg);
672 item = this->ike_sa_table[row];
673 while (item)
674 {
675 if (match(item->value, param))
676 {
677 *entry = item->value;
678 *segment = seg;
679 /* the locked segment has to be unlocked by the caller */
680 return SUCCESS;
681 }
682 item = item->next;
683 }
684 unlock_single_segment(this, seg);
685 return NOT_FOUND;
686 }
687
688 /**
689 * Find an entry by ike_sa_id_t.
690 * Note: On SUCCESS, the caller has to unlock the segment.
691 */
692 static status_t get_entry_by_id(private_ike_sa_manager_t *this,
693 ike_sa_id_t *ike_sa_id, entry_t **entry, u_int *segment)
694 {
695 return get_entry_by_match_function(this, ike_sa_id, entry, segment,
696 (linked_list_match_t)entry_match_by_id, ike_sa_id);
697 }
698
699 /**
700 * Find an entry by IKE_SA pointer.
701 * Note: On SUCCESS, the caller has to unlock the segment.
702 */
703 static status_t get_entry_by_sa(private_ike_sa_manager_t *this,
704 ike_sa_id_t *ike_sa_id, ike_sa_t *ike_sa, entry_t **entry, u_int *segment)
705 {
706 return get_entry_by_match_function(this, ike_sa_id, entry, segment,
707 (linked_list_match_t)entry_match_by_sa, ike_sa);
708 }
709
710 /**
711 * Wait until no other thread is using an IKE_SA, return FALSE if entry not
712 * acquirable.
713 */
714 static bool wait_for_entry(private_ike_sa_manager_t *this, entry_t *entry,
715 u_int segment)
716 {
717 if (entry->driveout_new_threads)
718 {
719 /* we are not allowed to get this */
720 return FALSE;
721 }
722 while (entry->checked_out && !entry->driveout_waiting_threads)
723 {
724 /* so wait until we can get it for us.
725 * we register us as waiting. */
726 entry->waiting_threads++;
727 entry->condvar->wait(entry->condvar, this->segments[segment].mutex);
728 entry->waiting_threads--;
729 }
730 /* hm, a deletion request forbids us to get this SA, get next one */
731 if (entry->driveout_waiting_threads)
732 {
733 /* we must signal here, others may be waiting on it, too */
734 entry->condvar->signal(entry->condvar);
735 return FALSE;
736 }
737 return TRUE;
738 }
739
740 /**
741 * Put a half-open SA into the hash table.
742 */
743 static void put_half_open(private_ike_sa_manager_t *this, entry_t *entry)
744 {
745 table_item_t *item;
746 u_int row, segment;
747 rwlock_t *lock;
748 ike_sa_id_t *ike_id;
749 half_open_t *half_open;
750 chunk_t addr;
751
752 ike_id = entry->ike_sa_id;
753 addr = entry->other->get_address(entry->other);
754 row = chunk_hash(addr) & this->table_mask;
755 segment = row & this->segment_mask;
756 lock = this->half_open_segments[segment].lock;
757 lock->write_lock(lock);
758 item = this->half_open_table[row];
759 while (item)
760 {
761 half_open = item->value;
762
763 if (chunk_equals(addr, half_open->other))
764 {
765 break;
766 }
767 item = item->next;
768 }
769
770 if (!item)
771 {
772 INIT(half_open,
773 .other = chunk_clone(addr),
774 );
775 INIT(item,
776 .value = half_open,
777 .next = this->half_open_table[row],
778 );
779 this->half_open_table[row] = item;
780 }
781 half_open->count++;
782 ref_get(&this->half_open_count);
783 if (!ike_id->is_initiator(ike_id))
784 {
785 half_open->count_responder++;
786 ref_get(&this->half_open_count_responder);
787 }
788 this->half_open_segments[segment].count++;
789 lock->unlock(lock);
790 }
791
792 /**
793 * Remove a half-open SA from the hash table.
794 */
795 static void remove_half_open(private_ike_sa_manager_t *this, entry_t *entry)
796 {
797 table_item_t *item, *prev = NULL;
798 u_int row, segment;
799 rwlock_t *lock;
800 ike_sa_id_t *ike_id;
801 chunk_t addr;
802
803 ike_id = entry->ike_sa_id;
804 addr = entry->other->get_address(entry->other);
805 row = chunk_hash(addr) & this->table_mask;
806 segment = row & this->segment_mask;
807 lock = this->half_open_segments[segment].lock;
808 lock->write_lock(lock);
809 item = this->half_open_table[row];
810 while (item)
811 {
812 half_open_t *half_open = item->value;
813
814 if (chunk_equals(addr, half_open->other))
815 {
816 if (!ike_id->is_initiator(ike_id))
817 {
818 half_open->count_responder--;
819 ignore_result(ref_put(&this->half_open_count_responder));
820 }
821 ignore_result(ref_put(&this->half_open_count));
822 if (--half_open->count == 0)
823 {
824 if (prev)
825 {
826 prev->next = item->next;
827 }
828 else
829 {
830 this->half_open_table[row] = item->next;
831 }
832 half_open_destroy(half_open);
833 free(item);
834 }
835 this->half_open_segments[segment].count--;
836 break;
837 }
838 prev = item;
839 item = item->next;
840 }
841 lock->unlock(lock);
842 }
843
844 /**
845 * Put an SA between two peers into the hash table.
846 */
847 static void put_connected_peers(private_ike_sa_manager_t *this, entry_t *entry)
848 {
849 table_item_t *item;
850 u_int row, segment;
851 rwlock_t *lock;
852 connected_peers_t *connected_peers;
853 chunk_t my_id, other_id;
854 int family;
855
856 my_id = entry->my_id->get_encoding(entry->my_id);
857 other_id = entry->other_id->get_encoding(entry->other_id);
858 family = entry->other->get_family(entry->other);
859 row = chunk_hash_inc(other_id, chunk_hash(my_id)) & this->table_mask;
860 segment = row & this->segment_mask;
861 lock = this->connected_peers_segments[segment].lock;
862 lock->write_lock(lock);
863 item = this->connected_peers_table[row];
864 while (item)
865 {
866 connected_peers = item->value;
867
868 if (connected_peers_match(connected_peers, entry->my_id,
869 entry->other_id, family))
870 {
871 if (connected_peers->sas->find_first(connected_peers->sas,
872 (linked_list_match_t)entry->ike_sa_id->equals,
873 NULL, entry->ike_sa_id) == SUCCESS)
874 {
875 lock->unlock(lock);
876 return;
877 }
878 break;
879 }
880 item = item->next;
881 }
882
883 if (!item)
884 {
885 INIT(connected_peers,
886 .my_id = entry->my_id->clone(entry->my_id),
887 .other_id = entry->other_id->clone(entry->other_id),
888 .family = family,
889 .sas = linked_list_create(),
890 );
891 INIT(item,
892 .value = connected_peers,
893 .next = this->connected_peers_table[row],
894 );
895 this->connected_peers_table[row] = item;
896 }
897 connected_peers->sas->insert_last(connected_peers->sas,
898 entry->ike_sa_id->clone(entry->ike_sa_id));
899 this->connected_peers_segments[segment].count++;
900 lock->unlock(lock);
901 }
902
903 /**
904 * Remove an SA between two peers from the hash table.
905 */
906 static void remove_connected_peers(private_ike_sa_manager_t *this, entry_t *entry)
907 {
908 table_item_t *item, *prev = NULL;
909 u_int row, segment;
910 rwlock_t *lock;
911 chunk_t my_id, other_id;
912 int family;
913
914 my_id = entry->my_id->get_encoding(entry->my_id);
915 other_id = entry->other_id->get_encoding(entry->other_id);
916 family = entry->other->get_family(entry->other);
917
918 row = chunk_hash_inc(other_id, chunk_hash(my_id)) & this->table_mask;
919 segment = row & this->segment_mask;
920
921 lock = this->connected_peers_segments[segment].lock;
922 lock->write_lock(lock);
923 item = this->connected_peers_table[row];
924 while (item)
925 {
926 connected_peers_t *current = item->value;
927
928 if (connected_peers_match(current, entry->my_id, entry->other_id,
929 family))
930 {
931 enumerator_t *enumerator;
932 ike_sa_id_t *ike_sa_id;
933
934 enumerator = current->sas->create_enumerator(current->sas);
935 while (enumerator->enumerate(enumerator, &ike_sa_id))
936 {
937 if (ike_sa_id->equals(ike_sa_id, entry->ike_sa_id))
938 {
939 current->sas->remove_at(current->sas, enumerator);
940 ike_sa_id->destroy(ike_sa_id);
941 this->connected_peers_segments[segment].count--;
942 break;
943 }
944 }
945 enumerator->destroy(enumerator);
946 if (current->sas->get_count(current->sas) == 0)
947 {
948 if (prev)
949 {
950 prev->next = item->next;
951 }
952 else
953 {
954 this->connected_peers_table[row] = item->next;
955 }
956 connected_peers_destroy(current);
957 free(item);
958 }
959 break;
960 }
961 prev = item;
962 item = item->next;
963 }
964 lock->unlock(lock);
965 }
966
967 /**
968 * Get a random SPI for new IKE_SAs
969 */
970 static u_int64_t get_spi(private_ike_sa_manager_t *this)
971 {
972 u_int64_t spi;
973
974 this->rng_lock->read_lock(this->rng_lock);
975 if (!this->rng ||
976 !this->rng->get_bytes(this->rng, sizeof(spi), (u_int8_t*)&spi))
977 {
978 spi = 0;
979 }
980 this->rng_lock->unlock(this->rng_lock);
981 return spi;
982 }
983
984 /**
985 * Calculate the hash of the initial IKE message. Memory for the hash is
986 * allocated on success.
987 *
988 * @returns TRUE on success
989 */
990 static bool get_init_hash(hasher_t *hasher, message_t *message, chunk_t *hash)
991 {
992 host_t *src;
993
994 if (message->get_first_payload_type(message) == PLV1_FRAGMENT)
995 { /* only hash the source IP, port and SPI for fragmented init messages */
996 u_int16_t port;
997 u_int64_t spi;
998
999 src = message->get_source(message);
1000 if (!hasher->allocate_hash(hasher, src->get_address(src), NULL))
1001 {
1002 return FALSE;
1003 }
1004 port = src->get_port(src);
1005 if (!hasher->allocate_hash(hasher, chunk_from_thing(port), NULL))
1006 {
1007 return FALSE;
1008 }
1009 spi = message->get_initiator_spi(message);
1010 return hasher->allocate_hash(hasher, chunk_from_thing(spi), hash);
1011 }
1012 if (message->get_exchange_type(message) == ID_PROT)
1013 { /* include the source for Main Mode as the hash will be the same if
1014 * SPIs are reused by two initiators that use the same proposal */
1015 src = message->get_source(message);
1016
1017 if (!hasher->allocate_hash(hasher, src->get_address(src), NULL))
1018 {
1019 return FALSE;
1020 }
1021 }
1022 return hasher->allocate_hash(hasher, message->get_packet_data(message), hash);
1023 }
1024
1025 /**
1026 * Check if we already have created an IKE_SA based on the initial IKE message
1027 * with the given hash.
1028 * If not the hash is stored, the hash data is not(!) cloned.
1029 *
1030 * Also, the local SPI is returned. In case of a retransmit this is already
1031 * stored together with the hash, otherwise it is newly allocated and should
1032 * be used to create the IKE_SA.
1033 *
1034 * @returns ALREADY_DONE if the message with the given hash has been seen before
1035 * NOT_FOUND if the message hash was not found
1036 * FAILED if the SPI allocation failed
1037 */
1038 static status_t check_and_put_init_hash(private_ike_sa_manager_t *this,
1039 chunk_t init_hash, u_int64_t *our_spi)
1040 {
1041 table_item_t *item;
1042 u_int row, segment;
1043 mutex_t *mutex;
1044 init_hash_t *init;
1045 u_int64_t spi;
1046
1047 row = chunk_hash(init_hash) & this->table_mask;
1048 segment = row & this->segment_mask;
1049 mutex = this->init_hashes_segments[segment].mutex;
1050 mutex->lock(mutex);
1051 item = this->init_hashes_table[row];
1052 while (item)
1053 {
1054 init_hash_t *current = item->value;
1055
1056 if (chunk_equals(init_hash, current->hash))
1057 {
1058 *our_spi = current->our_spi;
1059 mutex->unlock(mutex);
1060 return ALREADY_DONE;
1061 }
1062 item = item->next;
1063 }
1064
1065 spi = get_spi(this);
1066 if (!spi)
1067 {
1068 return FAILED;
1069 }
1070
1071 INIT(init,
1072 .hash = {
1073 .len = init_hash.len,
1074 .ptr = init_hash.ptr,
1075 },
1076 .our_spi = spi,
1077 );
1078 INIT(item,
1079 .value = init,
1080 .next = this->init_hashes_table[row],
1081 );
1082 this->init_hashes_table[row] = item;
1083 *our_spi = init->our_spi;
1084 mutex->unlock(mutex);
1085 return NOT_FOUND;
1086 }
1087
1088 /**
1089 * Remove the hash of an initial IKE message from the cache.
1090 */
1091 static void remove_init_hash(private_ike_sa_manager_t *this, chunk_t init_hash)
1092 {
1093 table_item_t *item, *prev = NULL;
1094 u_int row, segment;
1095 mutex_t *mutex;
1096
1097 row = chunk_hash(init_hash) & this->table_mask;
1098 segment = row & this->segment_mask;
1099 mutex = this->init_hashes_segments[segment].mutex;
1100 mutex->lock(mutex);
1101 item = this->init_hashes_table[row];
1102 while (item)
1103 {
1104 init_hash_t *current = item->value;
1105
1106 if (chunk_equals(init_hash, current->hash))
1107 {
1108 if (prev)
1109 {
1110 prev->next = item->next;
1111 }
1112 else
1113 {
1114 this->init_hashes_table[row] = item->next;
1115 }
1116 free(current);
1117 free(item);
1118 break;
1119 }
1120 prev = item;
1121 item = item->next;
1122 }
1123 mutex->unlock(mutex);
1124 }
1125
1126 METHOD(ike_sa_manager_t, checkout, ike_sa_t*,
1127 private_ike_sa_manager_t *this, ike_sa_id_t *ike_sa_id)
1128 {
1129 ike_sa_t *ike_sa = NULL;
1130 entry_t *entry;
1131 u_int segment;
1132
1133 DBG2(DBG_MGR, "checkout IKE_SA");
1134
1135 if (get_entry_by_id(this, ike_sa_id, &entry, &segment) == SUCCESS)
1136 {
1137 if (wait_for_entry(this, entry, segment))
1138 {
1139 entry->checked_out = TRUE;
1140 ike_sa = entry->ike_sa;
1141 DBG2(DBG_MGR, "IKE_SA %s[%u] successfully checked out",
1142 ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1143 }
1144 unlock_single_segment(this, segment);
1145 }
1146 charon->bus->set_sa(charon->bus, ike_sa);
1147 return ike_sa;
1148 }
1149
1150 METHOD(ike_sa_manager_t, checkout_new, ike_sa_t*,
1151 private_ike_sa_manager_t* this, ike_version_t version, bool initiator)
1152 {
1153 ike_sa_id_t *ike_sa_id;
1154 ike_sa_t *ike_sa;
1155 u_int8_t ike_version;
1156 u_int64_t spi;
1157
1158 ike_version = version == IKEV1 ? IKEV1_MAJOR_VERSION : IKEV2_MAJOR_VERSION;
1159
1160 spi = get_spi(this);
1161 if (!spi)
1162 {
1163 DBG1(DBG_MGR, "failed to allocate SPI for new IKE_SA");
1164 return NULL;
1165 }
1166
1167 if (initiator)
1168 {
1169 ike_sa_id = ike_sa_id_create(ike_version, spi, 0, TRUE);
1170 }
1171 else
1172 {
1173 ike_sa_id = ike_sa_id_create(ike_version, 0, spi, FALSE);
1174 }
1175 ike_sa = ike_sa_create(ike_sa_id, initiator, version);
1176 ike_sa_id->destroy(ike_sa_id);
1177
1178 if (ike_sa)
1179 {
1180 DBG2(DBG_MGR, "created IKE_SA %s[%u]", ike_sa->get_name(ike_sa),
1181 ike_sa->get_unique_id(ike_sa));
1182 }
1183 return ike_sa;
1184 }
1185
1186 /**
1187 * Get the message ID or message hash to detect early retransmissions
1188 */
1189 static u_int32_t get_message_id_or_hash(message_t *message)
1190 {
1191 /* Use the message ID, or the message hash in IKEv1 Main/Aggressive mode */
1192 if (message->get_major_version(message) == IKEV1_MAJOR_VERSION &&
1193 message->get_message_id(message) == 0)
1194 {
1195 return chunk_hash(message->get_packet_data(message));
1196 }
1197 return message->get_message_id(message);
1198 }
1199
1200 METHOD(ike_sa_manager_t, checkout_by_message, ike_sa_t*,
1201 private_ike_sa_manager_t* this, message_t *message)
1202 {
1203 u_int segment;
1204 entry_t *entry;
1205 ike_sa_t *ike_sa = NULL;
1206 ike_sa_id_t *id;
1207 ike_version_t ike_version;
1208 bool is_init = FALSE;
1209
1210 id = message->get_ike_sa_id(message);
1211 /* clone the IKE_SA ID so we can modify the initiator flag */
1212 id = id->clone(id);
1213 id->switch_initiator(id);
1214
1215 DBG2(DBG_MGR, "checkout IKE_SA by message");
1216
1217 if (id->get_responder_spi(id) == 0 &&
1218 message->get_message_id(message) == 0)
1219 {
1220 if (message->get_major_version(message) == IKEV2_MAJOR_VERSION)
1221 {
1222 if (message->get_exchange_type(message) == IKE_SA_INIT &&
1223 message->get_request(message))
1224 {
1225 ike_version = IKEV2;
1226 is_init = TRUE;
1227 }
1228 }
1229 else
1230 {
1231 if (message->get_exchange_type(message) == ID_PROT ||
1232 message->get_exchange_type(message) == AGGRESSIVE)
1233 {
1234 ike_version = IKEV1;
1235 is_init = TRUE;
1236 if (id->is_initiator(id))
1237 { /* not set in IKEv1, switch back before applying to new SA */
1238 id->switch_initiator(id);
1239 }
1240 }
1241 }
1242 }
1243
1244 if (is_init)
1245 {
1246 hasher_t *hasher;
1247 u_int64_t our_spi;
1248 chunk_t hash;
1249
1250 hasher = lib->crypto->create_hasher(lib->crypto, HASH_SHA1);
1251 if (!hasher || !get_init_hash(hasher, message, &hash))
1252 {
1253 DBG1(DBG_MGR, "ignoring message, failed to hash message");
1254 DESTROY_IF(hasher);
1255 id->destroy(id);
1256 return NULL;
1257 }
1258 hasher->destroy(hasher);
1259
1260 /* ensure this is not a retransmit of an already handled init message */
1261 switch (check_and_put_init_hash(this, hash, &our_spi))
1262 {
1263 case NOT_FOUND:
1264 { /* we've not seen this packet yet, create a new IKE_SA */
1265 if (!this->ikesa_limit ||
1266 this->public.get_count(&this->public) < this->ikesa_limit)
1267 {
1268 id->set_responder_spi(id, our_spi);
1269 ike_sa = ike_sa_create(id, FALSE, ike_version);
1270 if (ike_sa)
1271 {
1272 entry = entry_create();
1273 entry->ike_sa = ike_sa;
1274 entry->ike_sa_id = id;
1275
1276 segment = put_entry(this, entry);
1277 entry->checked_out = TRUE;
1278 unlock_single_segment(this, segment);
1279
1280 entry->processing = get_message_id_or_hash(message);
1281 entry->init_hash = hash;
1282
1283 DBG2(DBG_MGR, "created IKE_SA %s[%u]",
1284 ike_sa->get_name(ike_sa),
1285 ike_sa->get_unique_id(ike_sa));
1286
1287 charon->bus->set_sa(charon->bus, ike_sa);
1288 return ike_sa;
1289 }
1290 else
1291 {
1292 DBG1(DBG_MGR, "creating IKE_SA failed, ignoring message");
1293 }
1294 }
1295 else
1296 {
1297 DBG1(DBG_MGR, "ignoring %N, hitting IKE_SA limit (%u)",
1298 exchange_type_names, message->get_exchange_type(message),
1299 this->ikesa_limit);
1300 }
1301 remove_init_hash(this, hash);
1302 chunk_free(&hash);
1303 id->destroy(id);
1304 return NULL;
1305 }
1306 case FAILED:
1307 { /* we failed to allocate an SPI */
1308 chunk_free(&hash);
1309 id->destroy(id);
1310 DBG1(DBG_MGR, "ignoring message, failed to allocate SPI");
1311 return NULL;
1312 }
1313 case ALREADY_DONE:
1314 default:
1315 break;
1316 }
1317 /* it looks like we already handled this init message to some degree */
1318 id->set_responder_spi(id, our_spi);
1319 chunk_free(&hash);
1320 }
1321
1322 if (get_entry_by_id(this, id, &entry, &segment) == SUCCESS)
1323 {
1324 /* only check out if we are not already processing it. */
1325 if (entry->processing == get_message_id_or_hash(message))
1326 {
1327 DBG1(DBG_MGR, "ignoring request with ID %u, already processing",
1328 entry->processing);
1329 }
1330 else if (wait_for_entry(this, entry, segment))
1331 {
1332 ike_sa_id_t *ike_id;
1333
1334 ike_id = entry->ike_sa->get_id(entry->ike_sa);
1335 entry->checked_out = TRUE;
1336 if (message->get_first_payload_type(message) != PLV1_FRAGMENT &&
1337 message->get_first_payload_type(message) != PLV2_FRAGMENT)
1338 { /* TODO-FRAG: this fails if there are unencrypted payloads */
1339 entry->processing = get_message_id_or_hash(message);
1340 }
1341 if (ike_id->get_responder_spi(ike_id) == 0)
1342 {
1343 ike_id->set_responder_spi(ike_id, id->get_responder_spi(id));
1344 }
1345 ike_sa = entry->ike_sa;
1346 DBG2(DBG_MGR, "IKE_SA %s[%u] successfully checked out",
1347 ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1348 }
1349 unlock_single_segment(this, segment);
1350 }
1351 else
1352 {
1353 charon->bus->alert(charon->bus, ALERT_INVALID_IKE_SPI, message);
1354 }
1355 id->destroy(id);
1356 charon->bus->set_sa(charon->bus, ike_sa);
1357 return ike_sa;
1358 }
1359
1360 METHOD(ike_sa_manager_t, checkout_by_config, ike_sa_t*,
1361 private_ike_sa_manager_t *this, peer_cfg_t *peer_cfg)
1362 {
1363 enumerator_t *enumerator;
1364 entry_t *entry;
1365 ike_sa_t *ike_sa = NULL;
1366 peer_cfg_t *current_peer;
1367 ike_cfg_t *current_ike;
1368 u_int segment;
1369
1370 DBG2(DBG_MGR, "checkout IKE_SA by config");
1371
1372 if (!this->reuse_ikesa)
1373 { /* IKE_SA reuse disable by config */
1374 ike_sa = checkout_new(this, peer_cfg->get_ike_version(peer_cfg), TRUE);
1375 charon->bus->set_sa(charon->bus, ike_sa);
1376 return ike_sa;
1377 }
1378
1379 enumerator = create_table_enumerator(this);
1380 while (enumerator->enumerate(enumerator, &entry, &segment))
1381 {
1382 if (!wait_for_entry(this, entry, segment))
1383 {
1384 continue;
1385 }
1386 if (entry->ike_sa->get_state(entry->ike_sa) == IKE_DELETING)
1387 { /* skip IKE_SAs which are not usable */
1388 continue;
1389 }
1390
1391 current_peer = entry->ike_sa->get_peer_cfg(entry->ike_sa);
1392 if (current_peer && current_peer->equals(current_peer, peer_cfg))
1393 {
1394 current_ike = current_peer->get_ike_cfg(current_peer);
1395 if (current_ike->equals(current_ike, peer_cfg->get_ike_cfg(peer_cfg)))
1396 {
1397 entry->checked_out = TRUE;
1398 ike_sa = entry->ike_sa;
1399 DBG2(DBG_MGR, "found existing IKE_SA %u with a '%s' config",
1400 ike_sa->get_unique_id(ike_sa),
1401 current_peer->get_name(current_peer));
1402 break;
1403 }
1404 }
1405 }
1406 enumerator->destroy(enumerator);
1407
1408 if (!ike_sa)
1409 { /* no IKE_SA using such a config, hand out a new */
1410 ike_sa = checkout_new(this, peer_cfg->get_ike_version(peer_cfg), TRUE);
1411 }
1412 charon->bus->set_sa(charon->bus, ike_sa);
1413 return ike_sa;
1414 }
1415
1416 METHOD(ike_sa_manager_t, checkout_by_id, ike_sa_t*,
1417 private_ike_sa_manager_t *this, u_int32_t id)
1418 {
1419 enumerator_t *enumerator;
1420 entry_t *entry;
1421 ike_sa_t *ike_sa = NULL;
1422 u_int segment;
1423
1424 DBG2(DBG_MGR, "checkout IKE_SA by ID %u", id);
1425
1426 enumerator = create_table_enumerator(this);
1427 while (enumerator->enumerate(enumerator, &entry, &segment))
1428 {
1429 if (wait_for_entry(this, entry, segment))
1430 {
1431 if (entry->ike_sa->get_unique_id(entry->ike_sa) == id)
1432 {
1433 ike_sa = entry->ike_sa;
1434 entry->checked_out = TRUE;
1435 break;
1436 }
1437 }
1438 }
1439 enumerator->destroy(enumerator);
1440
1441 if (ike_sa)
1442 {
1443 DBG2(DBG_MGR, "IKE_SA %s[%u] successfully checked out",
1444 ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1445 }
1446 charon->bus->set_sa(charon->bus, ike_sa);
1447 return ike_sa;
1448 }
1449
1450 METHOD(ike_sa_manager_t, checkout_by_name, ike_sa_t*,
1451 private_ike_sa_manager_t *this, char *name, bool child)
1452 {
1453 enumerator_t *enumerator, *children;
1454 entry_t *entry;
1455 ike_sa_t *ike_sa = NULL;
1456 child_sa_t *child_sa;
1457 u_int segment;
1458
1459 enumerator = create_table_enumerator(this);
1460 while (enumerator->enumerate(enumerator, &entry, &segment))
1461 {
1462 if (wait_for_entry(this, entry, segment))
1463 {
1464 /* look for a child with such a policy name ... */
1465 if (child)
1466 {
1467 children = entry->ike_sa->create_child_sa_enumerator(entry->ike_sa);
1468 while (children->enumerate(children, (void**)&child_sa))
1469 {
1470 if (streq(child_sa->get_name(child_sa), name))
1471 {
1472 ike_sa = entry->ike_sa;
1473 break;
1474 }
1475 }
1476 children->destroy(children);
1477 }
1478 else /* ... or for a IKE_SA with such a connection name */
1479 {
1480 if (streq(entry->ike_sa->get_name(entry->ike_sa), name))
1481 {
1482 ike_sa = entry->ike_sa;
1483 }
1484 }
1485 /* got one, return */
1486 if (ike_sa)
1487 {
1488 entry->checked_out = TRUE;
1489 DBG2(DBG_MGR, "IKE_SA %s[%u] successfully checked out",
1490 ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1491 break;
1492 }
1493 }
1494 }
1495 enumerator->destroy(enumerator);
1496
1497 charon->bus->set_sa(charon->bus, ike_sa);
1498 return ike_sa;
1499 }
1500
1501 /**
1502 * enumerator filter function, waiting variant
1503 */
1504 static bool enumerator_filter_wait(private_ike_sa_manager_t *this,
1505 entry_t **in, ike_sa_t **out, u_int *segment)
1506 {
1507 if (wait_for_entry(this, *in, *segment))
1508 {
1509 *out = (*in)->ike_sa;
1510 charon->bus->set_sa(charon->bus, *out);
1511 return TRUE;
1512 }
1513 return FALSE;
1514 }
1515
1516 /**
1517 * enumerator filter function, skipping variant
1518 */
1519 static bool enumerator_filter_skip(private_ike_sa_manager_t *this,
1520 entry_t **in, ike_sa_t **out, u_int *segment)
1521 {
1522 if (!(*in)->driveout_new_threads &&
1523 !(*in)->driveout_waiting_threads &&
1524 !(*in)->checked_out)
1525 {
1526 *out = (*in)->ike_sa;
1527 charon->bus->set_sa(charon->bus, *out);
1528 return TRUE;
1529 }
1530 return FALSE;
1531 }
1532
1533 /**
1534 * Reset threads SA after enumeration
1535 */
1536 static void reset_sa(void *data)
1537 {
1538 charon->bus->set_sa(charon->bus, NULL);
1539 }
1540
1541 METHOD(ike_sa_manager_t, create_enumerator, enumerator_t*,
1542 private_ike_sa_manager_t* this, bool wait)
1543 {
1544 return enumerator_create_filter(create_table_enumerator(this),
1545 wait ? (void*)enumerator_filter_wait : (void*)enumerator_filter_skip,
1546 this, reset_sa);
1547 }
1548
1549 METHOD(ike_sa_manager_t, checkin, void,
1550 private_ike_sa_manager_t *this, ike_sa_t *ike_sa)
1551 {
1552 /* to check the SA back in, we look for the pointer of the ike_sa
1553 * in all entries.
1554 * The lookup is done by initiator SPI, so even if the SPI has changed (e.g.
1555 * on reception of a IKE_SA_INIT response) the lookup will work but
1556 * updating of the SPI MAY be necessary...
1557 */
1558 entry_t *entry;
1559 ike_sa_id_t *ike_sa_id;
1560 host_t *other;
1561 identification_t *my_id, *other_id;
1562 u_int segment;
1563
1564 ike_sa_id = ike_sa->get_id(ike_sa);
1565 my_id = ike_sa->get_my_id(ike_sa);
1566 other_id = ike_sa->get_other_eap_id(ike_sa);
1567 other = ike_sa->get_other_host(ike_sa);
1568
1569 DBG2(DBG_MGR, "checkin IKE_SA %s[%u]", ike_sa->get_name(ike_sa),
1570 ike_sa->get_unique_id(ike_sa));
1571
1572 /* look for the entry */
1573 if (get_entry_by_sa(this, ike_sa_id, ike_sa, &entry, &segment) == SUCCESS)
1574 {
1575 /* ike_sa_id must be updated */
1576 entry->ike_sa_id->replace_values(entry->ike_sa_id, ike_sa->get_id(ike_sa));
1577 /* signal waiting threads */
1578 entry->checked_out = FALSE;
1579 entry->processing = -1;
1580 /* check if this SA is half-open */
1581 if (entry->half_open && ike_sa->get_state(ike_sa) != IKE_CONNECTING)
1582 {
1583 /* not half open anymore */
1584 entry->half_open = FALSE;
1585 remove_half_open(this, entry);
1586 }
1587 else if (entry->half_open && !other->ip_equals(other, entry->other))
1588 {
1589 /* the other host's IP has changed, we must update the hash table */
1590 remove_half_open(this, entry);
1591 DESTROY_IF(entry->other);
1592 entry->other = other->clone(other);
1593 put_half_open(this, entry);
1594 }
1595 else if (!entry->half_open &&
1596 ike_sa->get_state(ike_sa) == IKE_CONNECTING)
1597 {
1598 /* this is a new half-open SA */
1599 entry->half_open = TRUE;
1600 entry->other = other->clone(other);
1601 put_half_open(this, entry);
1602 }
1603 DBG2(DBG_MGR, "check-in of IKE_SA successful.");
1604 entry->condvar->signal(entry->condvar);
1605 }
1606 else
1607 {
1608 entry = entry_create();
1609 entry->ike_sa_id = ike_sa_id->clone(ike_sa_id);
1610 entry->ike_sa = ike_sa;
1611 if (ike_sa->get_state(ike_sa) == IKE_CONNECTING)
1612 {
1613 entry->half_open = TRUE;
1614 entry->other = other->clone(other);
1615 put_half_open(this, entry);
1616 }
1617 segment = put_entry(this, entry);
1618 }
1619
1620 /* apply identities for duplicate test */
1621 if ((ike_sa->get_state(ike_sa) == IKE_ESTABLISHED ||
1622 ike_sa->get_state(ike_sa) == IKE_PASSIVE) &&
1623 entry->my_id == NULL && entry->other_id == NULL)
1624 {
1625 if (ike_sa->get_version(ike_sa) == IKEV1)
1626 {
1627 /* If authenticated and received INITIAL_CONTACT,
1628 * delete any existing IKE_SAs with that peer. */
1629 if (ike_sa->has_condition(ike_sa, COND_INIT_CONTACT_SEEN))
1630 {
1631 /* We can't hold the segment locked while checking the
1632 * uniqueness as this could lead to deadlocks. We mark the
1633 * entry as checked out while we release the lock so no other
1634 * thread can acquire it. Since it is not yet in the list of
1635 * connected peers that will not cause a deadlock as no other
1636 * caller of check_unqiueness() will try to check out this SA */
1637 entry->checked_out = TRUE;
1638 unlock_single_segment(this, segment);
1639
1640 this->public.check_uniqueness(&this->public, ike_sa, TRUE);
1641 ike_sa->set_condition(ike_sa, COND_INIT_CONTACT_SEEN, FALSE);
1642
1643 /* The entry could have been modified in the mean time, e.g.
1644 * because another SA was added/removed next to it or another
1645 * thread is waiting, but it should still exist, so there is no
1646 * need for a lookup via get_entry_by... */
1647 lock_single_segment(this, segment);
1648 entry->checked_out = FALSE;
1649 /* We already signaled waiting threads above, we have to do that
1650 * again after checking the SA out and back in again. */
1651 entry->condvar->signal(entry->condvar);
1652 }
1653 }
1654
1655 entry->my_id = my_id->clone(my_id);
1656 entry->other_id = other_id->clone(other_id);
1657 if (!entry->other)
1658 {
1659 entry->other = other->clone(other);
1660 }
1661 put_connected_peers(this, entry);
1662 }
1663
1664 unlock_single_segment(this, segment);
1665
1666 charon->bus->set_sa(charon->bus, NULL);
1667 }
1668
1669 METHOD(ike_sa_manager_t, checkin_and_destroy, void,
1670 private_ike_sa_manager_t *this, ike_sa_t *ike_sa)
1671 {
1672 /* deletion is a bit complex, we must ensure that no thread is waiting for
1673 * this SA.
1674 * We take this SA from the table, and start signaling while threads
1675 * are in the condvar.
1676 */
1677 entry_t *entry;
1678 ike_sa_id_t *ike_sa_id;
1679 u_int segment;
1680
1681 ike_sa_id = ike_sa->get_id(ike_sa);
1682
1683 DBG2(DBG_MGR, "checkin and destroy IKE_SA %s[%u]", ike_sa->get_name(ike_sa),
1684 ike_sa->get_unique_id(ike_sa));
1685
1686 if (get_entry_by_sa(this, ike_sa_id, ike_sa, &entry, &segment) == SUCCESS)
1687 {
1688 if (entry->driveout_waiting_threads && entry->driveout_new_threads)
1689 { /* it looks like flush() has been called and the SA is being deleted
1690 * anyway, just check it in */
1691 DBG2(DBG_MGR, "ignored check-in and destroy of IKE_SA during shutdown");
1692 entry->checked_out = FALSE;
1693 entry->condvar->broadcast(entry->condvar);
1694 unlock_single_segment(this, segment);
1695 return;
1696 }
1697
1698 /* drive out waiting threads, as we are in hurry */
1699 entry->driveout_waiting_threads = TRUE;
1700 /* mark it, so no new threads can get this entry */
1701 entry->driveout_new_threads = TRUE;
1702 /* wait until all workers have done their work */
1703 while (entry->waiting_threads)
1704 {
1705 /* wake up all */
1706 entry->condvar->broadcast(entry->condvar);
1707 /* they will wake us again when their work is done */
1708 entry->condvar->wait(entry->condvar, this->segments[segment].mutex);
1709 }
1710 remove_entry(this, entry);
1711 unlock_single_segment(this, segment);
1712
1713 if (entry->half_open)
1714 {
1715 remove_half_open(this, entry);
1716 }
1717 if (entry->my_id && entry->other_id)
1718 {
1719 remove_connected_peers(this, entry);
1720 }
1721 if (entry->init_hash.ptr)
1722 {
1723 remove_init_hash(this, entry->init_hash);
1724 }
1725
1726 entry_destroy(entry);
1727
1728 DBG2(DBG_MGR, "check-in and destroy of IKE_SA successful");
1729 }
1730 else
1731 {
1732 DBG1(DBG_MGR, "tried to check-in and delete nonexisting IKE_SA");
1733 ike_sa->destroy(ike_sa);
1734 }
1735 charon->bus->set_sa(charon->bus, NULL);
1736 }
1737
1738 /**
1739 * Cleanup function for create_id_enumerator
1740 */
1741 static void id_enumerator_cleanup(linked_list_t *ids)
1742 {
1743 ids->destroy_offset(ids, offsetof(ike_sa_id_t, destroy));
1744 }
1745
1746 METHOD(ike_sa_manager_t, create_id_enumerator, enumerator_t*,
1747 private_ike_sa_manager_t *this, identification_t *me,
1748 identification_t *other, int family)
1749 {
1750 table_item_t *item;
1751 u_int row, segment;
1752 rwlock_t *lock;
1753 linked_list_t *ids = NULL;
1754
1755 row = chunk_hash_inc(other->get_encoding(other),
1756 chunk_hash(me->get_encoding(me))) & this->table_mask;
1757 segment = row & this->segment_mask;
1758
1759 lock = this->connected_peers_segments[segment].lock;
1760 lock->read_lock(lock);
1761 item = this->connected_peers_table[row];
1762 while (item)
1763 {
1764 connected_peers_t *current = item->value;
1765
1766 if (connected_peers_match(current, me, other, family))
1767 {
1768 ids = current->sas->clone_offset(current->sas,
1769 offsetof(ike_sa_id_t, clone));
1770 break;
1771 }
1772 item = item->next;
1773 }
1774 lock->unlock(lock);
1775
1776 if (!ids)
1777 {
1778 return enumerator_create_empty();
1779 }
1780 return enumerator_create_cleaner(ids->create_enumerator(ids),
1781 (void*)id_enumerator_cleanup, ids);
1782 }
1783
1784 /**
1785 * Move all CHILD_SAs and virtual IPs from old to new
1786 */
1787 static void adopt_children_and_vips(ike_sa_t *old, ike_sa_t *new)
1788 {
1789 enumerator_t *enumerator;
1790 child_sa_t *child_sa;
1791 host_t *vip;
1792 int chcount = 0, vipcount = 0;
1793
1794 charon->bus->children_migrate(charon->bus, new->get_id(new),
1795 new->get_unique_id(new));
1796 enumerator = old->create_child_sa_enumerator(old);
1797 while (enumerator->enumerate(enumerator, &child_sa))
1798 {
1799 old->remove_child_sa(old, enumerator);
1800 new->add_child_sa(new, child_sa);
1801 chcount++;
1802 }
1803 enumerator->destroy(enumerator);
1804
1805 enumerator = old->create_virtual_ip_enumerator(old, FALSE);
1806 while (enumerator->enumerate(enumerator, &vip))
1807 {
1808 new->add_virtual_ip(new, FALSE, vip);
1809 vipcount++;
1810 }
1811 enumerator->destroy(enumerator);
1812 /* this does not release the addresses, which is good, but it does trigger
1813 * an assign_vips(FALSE) event... */
1814 old->clear_virtual_ips(old, FALSE);
1815 /* ...trigger the analogous event on the new SA */
1816 charon->bus->set_sa(charon->bus, new);
1817 charon->bus->assign_vips(charon->bus, new, TRUE);
1818 charon->bus->children_migrate(charon->bus, NULL, 0);
1819 charon->bus->set_sa(charon->bus, old);
1820
1821 if (chcount || vipcount)
1822 {
1823 DBG1(DBG_IKE, "detected reauth of existing IKE_SA, adopting %d "
1824 "children and %d virtual IPs", chcount, vipcount);
1825 }
1826 }
1827
1828 /**
1829 * Delete an existing IKE_SA due to a unique replace policy
1830 */
1831 static status_t enforce_replace(private_ike_sa_manager_t *this,
1832 ike_sa_t *duplicate, ike_sa_t *new,
1833 identification_t *other, host_t *host)
1834 {
1835 charon->bus->alert(charon->bus, ALERT_UNIQUE_REPLACE);
1836
1837 if (host->equals(host, duplicate->get_other_host(duplicate)))
1838 {
1839 /* looks like a reauthentication attempt */
1840 if (!new->has_condition(new, COND_INIT_CONTACT_SEEN) &&
1841 new->get_version(new) == IKEV1)
1842 {
1843 /* IKEv1 implicitly takes over children, IKEv2 recreates them
1844 * explicitly. */
1845 adopt_children_and_vips(duplicate, new);
1846 }
1847 /* For IKEv1 we have to delay the delete for the old IKE_SA. Some
1848 * peers need to complete the new SA first, otherwise the quick modes
1849 * might get lost. For IKEv2 we do the same, as we want overlapping
1850 * CHILD_SAs to keep connectivity up. */
1851 lib->scheduler->schedule_job(lib->scheduler, (job_t*)
1852 delete_ike_sa_job_create(duplicate->get_id(duplicate), TRUE), 10);
1853 return SUCCESS;
1854 }
1855 DBG1(DBG_IKE, "deleting duplicate IKE_SA for peer '%Y' due to "
1856 "uniqueness policy", other);
1857 return duplicate->delete(duplicate);
1858 }
1859
1860 METHOD(ike_sa_manager_t, check_uniqueness, bool,
1861 private_ike_sa_manager_t *this, ike_sa_t *ike_sa, bool force_replace)
1862 {
1863 bool cancel = FALSE;
1864 peer_cfg_t *peer_cfg;
1865 unique_policy_t policy;
1866 enumerator_t *enumerator;
1867 ike_sa_id_t *id = NULL;
1868 identification_t *me, *other;
1869 host_t *other_host;
1870
1871 peer_cfg = ike_sa->get_peer_cfg(ike_sa);
1872 policy = peer_cfg->get_unique_policy(peer_cfg);
1873 if (policy == UNIQUE_NEVER || (policy == UNIQUE_NO && !force_replace))
1874 {
1875 return FALSE;
1876 }
1877 me = ike_sa->get_my_id(ike_sa);
1878 other = ike_sa->get_other_eap_id(ike_sa);
1879 other_host = ike_sa->get_other_host(ike_sa);
1880
1881 enumerator = create_id_enumerator(this, me, other,
1882 other_host->get_family(other_host));
1883 while (enumerator->enumerate(enumerator, &id))
1884 {
1885 status_t status = SUCCESS;
1886 ike_sa_t *duplicate;
1887
1888 duplicate = checkout(this, id);
1889 if (!duplicate)
1890 {
1891 continue;
1892 }
1893 if (force_replace)
1894 {
1895 DBG1(DBG_IKE, "destroying duplicate IKE_SA for peer '%Y', "
1896 "received INITIAL_CONTACT", other);
1897 charon->bus->ike_updown(charon->bus, duplicate, FALSE);
1898 checkin_and_destroy(this, duplicate);
1899 continue;
1900 }
1901 peer_cfg = duplicate->get_peer_cfg(duplicate);
1902 if (peer_cfg && peer_cfg->equals(peer_cfg, ike_sa->get_peer_cfg(ike_sa)))
1903 {
1904 switch (duplicate->get_state(duplicate))
1905 {
1906 case IKE_ESTABLISHED:
1907 case IKE_REKEYING:
1908 switch (policy)
1909 {
1910 case UNIQUE_REPLACE:
1911 status = enforce_replace(this, duplicate, ike_sa,
1912 other, other_host);
1913 break;
1914 case UNIQUE_KEEP:
1915 /* potential reauthentication? */
1916 if (!other_host->equals(other_host,
1917 duplicate->get_other_host(duplicate)))
1918 {
1919 cancel = TRUE;
1920 /* we keep the first IKE_SA and delete all
1921 * other duplicates that might exist */
1922 policy = UNIQUE_REPLACE;
1923 }
1924 break;
1925 default:
1926 break;
1927 }
1928 break;
1929 default:
1930 break;
1931 }
1932 }
1933 if (status == DESTROY_ME)
1934 {
1935 checkin_and_destroy(this, duplicate);
1936 }
1937 else
1938 {
1939 checkin(this, duplicate);
1940 }
1941 }
1942 enumerator->destroy(enumerator);
1943 /* reset thread's current IKE_SA after checkin */
1944 charon->bus->set_sa(charon->bus, ike_sa);
1945 return cancel;
1946 }
1947
1948 METHOD(ike_sa_manager_t, has_contact, bool,
1949 private_ike_sa_manager_t *this, identification_t *me,
1950 identification_t *other, int family)
1951 {
1952 table_item_t *item;
1953 u_int row, segment;
1954 rwlock_t *lock;
1955 bool found = FALSE;
1956
1957 row = chunk_hash_inc(other->get_encoding(other),
1958 chunk_hash(me->get_encoding(me))) & this->table_mask;
1959 segment = row & this->segment_mask;
1960 lock = this->connected_peers_segments[segment].lock;
1961 lock->read_lock(lock);
1962 item = this->connected_peers_table[row];
1963 while (item)
1964 {
1965 if (connected_peers_match(item->value, me, other, family))
1966 {
1967 found = TRUE;
1968 break;
1969 }
1970 item = item->next;
1971 }
1972 lock->unlock(lock);
1973
1974 return found;
1975 }
1976
1977 METHOD(ike_sa_manager_t, get_count, u_int,
1978 private_ike_sa_manager_t *this)
1979 {
1980 u_int segment, count = 0;
1981 mutex_t *mutex;
1982
1983 for (segment = 0; segment < this->segment_count; segment++)
1984 {
1985 mutex = this->segments[segment & this->segment_mask].mutex;
1986 mutex->lock(mutex);
1987 count += this->segments[segment].count;
1988 mutex->unlock(mutex);
1989 }
1990 return count;
1991 }
1992
1993 METHOD(ike_sa_manager_t, get_half_open_count, u_int,
1994 private_ike_sa_manager_t *this, host_t *ip, bool responder_only)
1995 {
1996 table_item_t *item;
1997 u_int row, segment;
1998 rwlock_t *lock;
1999 chunk_t addr;
2000 u_int count = 0;
2001
2002 if (ip)
2003 {
2004 addr = ip->get_address(ip);
2005 row = chunk_hash(addr) & this->table_mask;
2006 segment = row & this->segment_mask;
2007 lock = this->half_open_segments[segment].lock;
2008 lock->read_lock(lock);
2009 item = this->half_open_table[row];
2010 while (item)
2011 {
2012 half_open_t *half_open = item->value;
2013
2014 if (chunk_equals(addr, half_open->other))
2015 {
2016 count = responder_only ? half_open->count_responder
2017 : half_open->count;
2018 break;
2019 }
2020 item = item->next;
2021 }
2022 lock->unlock(lock);
2023 }
2024 else
2025 {
2026 count = responder_only ? (u_int)ref_cur(&this->half_open_count_responder)
2027 : (u_int)ref_cur(&this->half_open_count);
2028 }
2029 return count;
2030 }
2031
2032 METHOD(ike_sa_manager_t, flush, void,
2033 private_ike_sa_manager_t *this)
2034 {
2035 /* destroy all list entries */
2036 enumerator_t *enumerator;
2037 entry_t *entry;
2038 u_int segment;
2039
2040 lock_all_segments(this);
2041 DBG2(DBG_MGR, "going to destroy IKE_SA manager and all managed IKE_SA's");
2042 /* Step 1: drive out all waiting threads */
2043 DBG2(DBG_MGR, "set driveout flags for all stored IKE_SA's");
2044 enumerator = create_table_enumerator(this);
2045 while (enumerator->enumerate(enumerator, &entry, &segment))
2046 {
2047 /* do not accept new threads, drive out waiting threads */
2048 entry->driveout_new_threads = TRUE;
2049 entry->driveout_waiting_threads = TRUE;
2050 }
2051 enumerator->destroy(enumerator);
2052 DBG2(DBG_MGR, "wait for all threads to leave IKE_SA's");
2053 /* Step 2: wait until all are gone */
2054 enumerator = create_table_enumerator(this);
2055 while (enumerator->enumerate(enumerator, &entry, &segment))
2056 {
2057 while (entry->waiting_threads || entry->checked_out)
2058 {
2059 /* wake up all */
2060 entry->condvar->broadcast(entry->condvar);
2061 /* go sleeping until they are gone */
2062 entry->condvar->wait(entry->condvar, this->segments[segment].mutex);
2063 }
2064 }
2065 enumerator->destroy(enumerator);
2066 DBG2(DBG_MGR, "delete all IKE_SA's");
2067 /* Step 3: initiate deletion of all IKE_SAs */
2068 enumerator = create_table_enumerator(this);
2069 while (enumerator->enumerate(enumerator, &entry, &segment))
2070 {
2071 charon->bus->set_sa(charon->bus, entry->ike_sa);
2072 if (entry->ike_sa->get_version(entry->ike_sa) == IKEV2)
2073 { /* as the delete never gets processed, fire down events */
2074 switch (entry->ike_sa->get_state(entry->ike_sa))
2075 {
2076 case IKE_ESTABLISHED:
2077 case IKE_REKEYING:
2078 case IKE_DELETING:
2079 charon->bus->ike_updown(charon->bus, entry->ike_sa, FALSE);
2080 break;
2081 default:
2082 break;
2083 }
2084 }
2085 entry->ike_sa->delete(entry->ike_sa);
2086 }
2087 enumerator->destroy(enumerator);
2088
2089 DBG2(DBG_MGR, "destroy all entries");
2090 /* Step 4: destroy all entries */
2091 enumerator = create_table_enumerator(this);
2092 while (enumerator->enumerate(enumerator, &entry, &segment))
2093 {
2094 charon->bus->set_sa(charon->bus, entry->ike_sa);
2095 if (entry->half_open)
2096 {
2097 remove_half_open(this, entry);
2098 }
2099 if (entry->my_id && entry->other_id)
2100 {
2101 remove_connected_peers(this, entry);
2102 }
2103 if (entry->init_hash.ptr)
2104 {
2105 remove_init_hash(this, entry->init_hash);
2106 }
2107 remove_entry_at((private_enumerator_t*)enumerator);
2108 entry_destroy(entry);
2109 }
2110 enumerator->destroy(enumerator);
2111 charon->bus->set_sa(charon->bus, NULL);
2112 unlock_all_segments(this);
2113
2114 this->rng_lock->write_lock(this->rng_lock);
2115 this->rng->destroy(this->rng);
2116 this->rng = NULL;
2117 this->rng_lock->unlock(this->rng_lock);
2118 }
2119
2120 METHOD(ike_sa_manager_t, destroy, void,
2121 private_ike_sa_manager_t *this)
2122 {
2123 u_int i;
2124
2125 /* these are already cleared in flush() above */
2126 free(this->ike_sa_table);
2127 free(this->half_open_table);
2128 free(this->connected_peers_table);
2129 free(this->init_hashes_table);
2130 for (i = 0; i < this->segment_count; i++)
2131 {
2132 this->segments[i].mutex->destroy(this->segments[i].mutex);
2133 this->half_open_segments[i].lock->destroy(this->half_open_segments[i].lock);
2134 this->connected_peers_segments[i].lock->destroy(this->connected_peers_segments[i].lock);
2135 this->init_hashes_segments[i].mutex->destroy(this->init_hashes_segments[i].mutex);
2136 }
2137 free(this->segments);
2138 free(this->half_open_segments);
2139 free(this->connected_peers_segments);
2140 free(this->init_hashes_segments);
2141
2142 this->rng_lock->destroy(this->rng_lock);
2143 free(this);
2144 }
2145
2146 /**
2147 * This function returns the next-highest power of two for the given number.
2148 * The algorithm works by setting all bits on the right-hand side of the most
2149 * significant 1 to 1 and then increments the whole number so it rolls over
2150 * to the nearest power of two. Note: returns 0 for n == 0
2151 */
2152 static u_int get_nearest_powerof2(u_int n)
2153 {
2154 u_int i;
2155
2156 --n;
2157 for (i = 1; i < sizeof(u_int) * 8; i <<= 1)
2158 {
2159 n |= n >> i;
2160 }
2161 return ++n;
2162 }
2163
2164 /*
2165 * Described in header.
2166 */
2167 ike_sa_manager_t *ike_sa_manager_create()
2168 {
2169 private_ike_sa_manager_t *this;
2170 u_int i;
2171
2172 INIT(this,
2173 .public = {
2174 .checkout = _checkout,
2175 .checkout_new = _checkout_new,
2176 .checkout_by_message = _checkout_by_message,
2177 .checkout_by_config = _checkout_by_config,
2178 .checkout_by_id = _checkout_by_id,
2179 .checkout_by_name = _checkout_by_name,
2180 .check_uniqueness = _check_uniqueness,
2181 .has_contact = _has_contact,
2182 .create_enumerator = _create_enumerator,
2183 .create_id_enumerator = _create_id_enumerator,
2184 .checkin = _checkin,
2185 .checkin_and_destroy = _checkin_and_destroy,
2186 .get_count = _get_count,
2187 .get_half_open_count = _get_half_open_count,
2188 .flush = _flush,
2189 .destroy = _destroy,
2190 },
2191 );
2192
2193 this->rng = lib->crypto->create_rng(lib->crypto, RNG_WEAK);
2194 if (this->rng == NULL)
2195 {
2196 DBG1(DBG_MGR, "manager initialization failed, no RNG supported");
2197 free(this);
2198 return NULL;
2199 }
2200 this->rng_lock = rwlock_create(RWLOCK_TYPE_DEFAULT);
2201
2202 this->ikesa_limit = lib->settings->get_int(lib->settings,
2203 "%s.ikesa_limit", 0, lib->ns);
2204
2205 this->table_size = get_nearest_powerof2(lib->settings->get_int(
2206 lib->settings, "%s.ikesa_table_size",
2207 DEFAULT_HASHTABLE_SIZE, lib->ns));
2208 this->table_size = max(1, min(this->table_size, MAX_HASHTABLE_SIZE));
2209 this->table_mask = this->table_size - 1;
2210
2211 this->segment_count = get_nearest_powerof2(lib->settings->get_int(
2212 lib->settings, "%s.ikesa_table_segments",
2213 DEFAULT_SEGMENT_COUNT, lib->ns));
2214 this->segment_count = max(1, min(this->segment_count, this->table_size));
2215 this->segment_mask = this->segment_count - 1;
2216
2217 this->ike_sa_table = calloc(this->table_size, sizeof(table_item_t*));
2218 this->segments = (segment_t*)calloc(this->segment_count, sizeof(segment_t));
2219 for (i = 0; i < this->segment_count; i++)
2220 {
2221 this->segments[i].mutex = mutex_create(MUTEX_TYPE_RECURSIVE);
2222 this->segments[i].count = 0;
2223 }
2224
2225 /* we use the same table parameters for the table to track half-open SAs */
2226 this->half_open_table = calloc(this->table_size, sizeof(table_item_t*));
2227 this->half_open_segments = calloc(this->segment_count, sizeof(shareable_segment_t));
2228 for (i = 0; i < this->segment_count; i++)
2229 {
2230 this->half_open_segments[i].lock = rwlock_create(RWLOCK_TYPE_DEFAULT);
2231 this->half_open_segments[i].count = 0;
2232 }
2233
2234 /* also for the hash table used for duplicate tests */
2235 this->connected_peers_table = calloc(this->table_size, sizeof(table_item_t*));
2236 this->connected_peers_segments = calloc(this->segment_count, sizeof(shareable_segment_t));
2237 for (i = 0; i < this->segment_count; i++)
2238 {
2239 this->connected_peers_segments[i].lock = rwlock_create(RWLOCK_TYPE_DEFAULT);
2240 this->connected_peers_segments[i].count = 0;
2241 }
2242
2243 /* and again for the table of hashes of seen initial IKE messages */
2244 this->init_hashes_table = calloc(this->table_size, sizeof(table_item_t*));
2245 this->init_hashes_segments = calloc(this->segment_count, sizeof(segment_t));
2246 for (i = 0; i < this->segment_count; i++)
2247 {
2248 this->init_hashes_segments[i].mutex = mutex_create(MUTEX_TYPE_RECURSIVE);
2249 this->init_hashes_segments[i].count = 0;
2250 }
2251
2252 this->reuse_ikesa = lib->settings->get_bool(lib->settings,
2253 "%s.reuse_ikesa", TRUE, lib->ns);
2254 return &this->public;
2255 }