389cbfe3bd866969f786b72963b7b0e0f0f1cd11
[strongswan.git] / src / libcharon / sa / ike_sa_manager.c
1 /*
2 * Copyright (C) 2005-2011 Martin Willi
3 * Copyright (C) 2011 revosec AG
4 * Copyright (C) 2008-2015 Tobias Brunner
5 * Copyright (C) 2005 Jan Hutter
6 * Hochschule fuer Technik Rapperswil
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version. See <http://www.fsf.org/copyleft/gpl.txt>.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 * for more details.
17 */
18
19 #include <string.h>
20
21 #include "ike_sa_manager.h"
22
23 #include <daemon.h>
24 #include <sa/ike_sa_id.h>
25 #include <bus/bus.h>
26 #include <threading/condvar.h>
27 #include <threading/mutex.h>
28 #include <threading/rwlock.h>
29 #include <collections/linked_list.h>
30 #include <crypto/hashers/hasher.h>
31 #include <processing/jobs/delete_ike_sa_job.h>
32
33 /* the default size of the hash table (MUST be a power of 2) */
34 #define DEFAULT_HASHTABLE_SIZE 1
35
36 /* the maximum size of the hash table (MUST be a power of 2) */
37 #define MAX_HASHTABLE_SIZE (1 << 30)
38
39 /* the default number of segments (MUST be a power of 2) */
40 #define DEFAULT_SEGMENT_COUNT 1
41
42 typedef struct entry_t entry_t;
43
44 /**
45 * An entry in the linked list, contains IKE_SA, locking and lookup data.
46 */
47 struct entry_t {
48
49 /**
50 * Number of threads waiting for this ike_sa_t object.
51 */
52 int waiting_threads;
53
54 /**
55 * Condvar where threads can wait until ike_sa_t object is free for use again.
56 */
57 condvar_t *condvar;
58
59 /**
60 * Is this ike_sa currently checked out?
61 */
62 bool checked_out;
63
64 /**
65 * Does this SA drives out new threads?
66 */
67 bool driveout_new_threads;
68
69 /**
70 * Does this SA drives out waiting threads?
71 */
72 bool driveout_waiting_threads;
73
74 /**
75 * Identification of an IKE_SA (SPIs).
76 */
77 ike_sa_id_t *ike_sa_id;
78
79 /**
80 * The contained ike_sa_t object.
81 */
82 ike_sa_t *ike_sa;
83
84 /**
85 * hash of the IKE_SA_INIT message, used to detect retransmissions
86 */
87 chunk_t init_hash;
88
89 /**
90 * remote host address, required for DoS detection and duplicate
91 * checking (host with same my_id and other_id is *not* considered
92 * a duplicate if the address family differs)
93 */
94 host_t *other;
95
96 /**
97 * As responder: Is this SA half-open?
98 */
99 bool half_open;
100
101 /**
102 * own identity, required for duplicate checking
103 */
104 identification_t *my_id;
105
106 /**
107 * remote identity, required for duplicate checking
108 */
109 identification_t *other_id;
110
111 /**
112 * message ID or hash of currently processing message, -1 if none
113 */
114 u_int32_t processing;
115 };
116
117 /**
118 * Implementation of entry_t.destroy.
119 */
120 static status_t entry_destroy(entry_t *this)
121 {
122 /* also destroy IKE SA */
123 this->ike_sa->destroy(this->ike_sa);
124 this->ike_sa_id->destroy(this->ike_sa_id);
125 chunk_free(&this->init_hash);
126 DESTROY_IF(this->other);
127 DESTROY_IF(this->my_id);
128 DESTROY_IF(this->other_id);
129 this->condvar->destroy(this->condvar);
130 free(this);
131 return SUCCESS;
132 }
133
134 /**
135 * Creates a new entry for the ike_sa_t list.
136 */
137 static entry_t *entry_create()
138 {
139 entry_t *this;
140
141 INIT(this,
142 .condvar = condvar_create(CONDVAR_TYPE_DEFAULT),
143 .processing = -1,
144 );
145
146 return this;
147 }
148
149 /**
150 * Function that matches entry_t objects by ike_sa_id_t.
151 */
152 static bool entry_match_by_id(entry_t *entry, ike_sa_id_t *id)
153 {
154 if (id->equals(id, entry->ike_sa_id))
155 {
156 return TRUE;
157 }
158 if ((id->get_responder_spi(id) == 0 ||
159 entry->ike_sa_id->get_responder_spi(entry->ike_sa_id) == 0) &&
160 (id->get_ike_version(id) == IKEV1_MAJOR_VERSION ||
161 id->is_initiator(id) == entry->ike_sa_id->is_initiator(entry->ike_sa_id)) &&
162 id->get_initiator_spi(id) == entry->ike_sa_id->get_initiator_spi(entry->ike_sa_id))
163 {
164 /* this is TRUE for IKE_SAs that we initiated but have not yet received a response */
165 return TRUE;
166 }
167 return FALSE;
168 }
169
170 /**
171 * Function that matches entry_t objects by ike_sa_t pointers.
172 */
173 static bool entry_match_by_sa(entry_t *entry, ike_sa_t *ike_sa)
174 {
175 return entry->ike_sa == ike_sa;
176 }
177
178 /**
179 * Hash function for ike_sa_id_t objects.
180 */
181 static u_int ike_sa_id_hash(ike_sa_id_t *ike_sa_id)
182 {
183 /* IKEv2 does not mandate random SPIs (RFC 5996, 2.6), they just have to be
184 * locally unique, so we use our randomly allocated SPI whether we are
185 * initiator or responder to ensure a good distribution. The latter is not
186 * possible for IKEv1 as we don't know whether we are original initiator or
187 * not (based on the IKE header). But as RFC 2408, section 2.5.3 proposes
188 * SPIs (Cookies) to be allocated near random (we allocate them randomly
189 * anyway) it seems safe to always use the initiator SPI. */
190 if (ike_sa_id->get_ike_version(ike_sa_id) == IKEV1_MAJOR_VERSION ||
191 ike_sa_id->is_initiator(ike_sa_id))
192 {
193 return ike_sa_id->get_initiator_spi(ike_sa_id);
194 }
195 return ike_sa_id->get_responder_spi(ike_sa_id);
196 }
197
198 typedef struct half_open_t half_open_t;
199
200 /**
201 * Struct to manage half-open IKE_SAs per peer.
202 */
203 struct half_open_t {
204 /** chunk of remote host address */
205 chunk_t other;
206
207 /** the number of half-open IKE_SAs with that host */
208 u_int count;
209
210 /** the number of half-open IKE_SAs we responded to with that host */
211 u_int count_responder;
212 };
213
214 /**
215 * Destroys a half_open_t object.
216 */
217 static void half_open_destroy(half_open_t *this)
218 {
219 chunk_free(&this->other);
220 free(this);
221 }
222
223 typedef struct connected_peers_t connected_peers_t;
224
225 struct connected_peers_t {
226 /** own identity */
227 identification_t *my_id;
228
229 /** remote identity */
230 identification_t *other_id;
231
232 /** ip address family of peer */
233 int family;
234
235 /** list of ike_sa_id_t objects of IKE_SAs between the two identities */
236 linked_list_t *sas;
237 };
238
239 static void connected_peers_destroy(connected_peers_t *this)
240 {
241 this->my_id->destroy(this->my_id);
242 this->other_id->destroy(this->other_id);
243 this->sas->destroy(this->sas);
244 free(this);
245 }
246
247 /**
248 * Function that matches connected_peers_t objects by the given ids.
249 */
250 static inline bool connected_peers_match(connected_peers_t *connected_peers,
251 identification_t *my_id, identification_t *other_id,
252 int family)
253 {
254 return my_id->equals(my_id, connected_peers->my_id) &&
255 other_id->equals(other_id, connected_peers->other_id) &&
256 (!family || family == connected_peers->family);
257 }
258
259 typedef struct init_hash_t init_hash_t;
260
261 struct init_hash_t {
262 /** hash of IKE_SA_INIT or initial phase1 message (data is not cloned) */
263 chunk_t hash;
264
265 /** our SPI allocated for the IKE_SA based on this message */
266 u_int64_t our_spi;
267 };
268
269 typedef struct segment_t segment_t;
270
271 /**
272 * Struct to manage segments of the hash table.
273 */
274 struct segment_t {
275 /** mutex to access a segment exclusively */
276 mutex_t *mutex;
277
278 /** the number of entries in this segment */
279 u_int count;
280 };
281
282 typedef struct shareable_segment_t shareable_segment_t;
283
284 /**
285 * Struct to manage segments of the "half-open" and "connected peers" hash tables.
286 */
287 struct shareable_segment_t {
288 /** rwlock to access a segment non-/exclusively */
289 rwlock_t *lock;
290
291 /** the number of entries in this segment - in case of the "half-open table"
292 * it's the sum of all half_open_t.count in a segment. */
293 u_int count;
294 };
295
296 typedef struct table_item_t table_item_t;
297
298 /**
299 * Instead of using linked_list_t for each bucket we store the data in our own
300 * list to save memory.
301 */
302 struct table_item_t {
303 /** data of this item */
304 void *value;
305
306 /** next item in the overflow list */
307 table_item_t *next;
308 };
309
310 typedef struct private_ike_sa_manager_t private_ike_sa_manager_t;
311
312 /**
313 * Additional private members of ike_sa_manager_t.
314 */
315 struct private_ike_sa_manager_t {
316 /**
317 * Public interface of ike_sa_manager_t.
318 */
319 ike_sa_manager_t public;
320
321 /**
322 * Hash table with entries for the ike_sa_t objects.
323 */
324 table_item_t **ike_sa_table;
325
326 /**
327 * The size of the hash table.
328 */
329 u_int table_size;
330
331 /**
332 * Mask to map the hashes to table rows.
333 */
334 u_int table_mask;
335
336 /**
337 * Segments of the hash table.
338 */
339 segment_t *segments;
340
341 /**
342 * The number of segments.
343 */
344 u_int segment_count;
345
346 /**
347 * Mask to map a table row to a segment.
348 */
349 u_int segment_mask;
350
351 /**
352 * Hash table with half_open_t objects.
353 */
354 table_item_t **half_open_table;
355
356 /**
357 * Segments of the "half-open" hash table.
358 */
359 shareable_segment_t *half_open_segments;
360
361 /**
362 * Total number of half-open IKE_SAs.
363 */
364 refcount_t half_open_count;
365
366 /**
367 * Total number of half-open IKE_SAs as responder.
368 */
369 refcount_t half_open_count_responder;
370
371 /**
372 * Hash table with connected_peers_t objects.
373 */
374 table_item_t **connected_peers_table;
375
376 /**
377 * Segments of the "connected peers" hash table.
378 */
379 shareable_segment_t *connected_peers_segments;
380
381 /**
382 * Hash table with init_hash_t objects.
383 */
384 table_item_t **init_hashes_table;
385
386 /**
387 * Segments of the "hashes" hash table.
388 */
389 segment_t *init_hashes_segments;
390
391 /**
392 * RNG to get random SPIs for our side
393 */
394 rng_t *rng;
395
396 /**
397 * Lock to access the RNG instance
398 */
399 rwlock_t *rng_lock;
400
401 /**
402 * reuse existing IKE_SAs in checkout_by_config
403 */
404 bool reuse_ikesa;
405
406 /**
407 * Configured IKE_SA limit, if any
408 */
409 u_int ikesa_limit;
410 };
411
412 /**
413 * Acquire a lock to access the segment of the table row with the given index.
414 * It also works with the segment index directly.
415 */
416 static inline void lock_single_segment(private_ike_sa_manager_t *this,
417 u_int index)
418 {
419 mutex_t *lock = this->segments[index & this->segment_mask].mutex;
420 lock->lock(lock);
421 }
422
423 /**
424 * Release the lock required to access the segment of the table row with the given index.
425 * It also works with the segment index directly.
426 */
427 static inline void unlock_single_segment(private_ike_sa_manager_t *this,
428 u_int index)
429 {
430 mutex_t *lock = this->segments[index & this->segment_mask].mutex;
431 lock->unlock(lock);
432 }
433
434 /**
435 * Lock all segments
436 */
437 static void lock_all_segments(private_ike_sa_manager_t *this)
438 {
439 u_int i;
440
441 for (i = 0; i < this->segment_count; i++)
442 {
443 this->segments[i].mutex->lock(this->segments[i].mutex);
444 }
445 }
446
447 /**
448 * Unlock all segments
449 */
450 static void unlock_all_segments(private_ike_sa_manager_t *this)
451 {
452 u_int i;
453
454 for (i = 0; i < this->segment_count; i++)
455 {
456 this->segments[i].mutex->unlock(this->segments[i].mutex);
457 }
458 }
459
460 typedef struct private_enumerator_t private_enumerator_t;
461
462 /**
463 * hash table enumerator implementation
464 */
465 struct private_enumerator_t {
466
467 /**
468 * implements enumerator interface
469 */
470 enumerator_t enumerator;
471
472 /**
473 * associated ike_sa_manager_t
474 */
475 private_ike_sa_manager_t *manager;
476
477 /**
478 * current segment index
479 */
480 u_int segment;
481
482 /**
483 * currently enumerating entry
484 */
485 entry_t *entry;
486
487 /**
488 * current table row index
489 */
490 u_int row;
491
492 /**
493 * current table item
494 */
495 table_item_t *current;
496
497 /**
498 * previous table item
499 */
500 table_item_t *prev;
501 };
502
503 METHOD(enumerator_t, enumerate, bool,
504 private_enumerator_t *this, entry_t **entry, u_int *segment)
505 {
506 if (this->entry)
507 {
508 this->entry->condvar->signal(this->entry->condvar);
509 this->entry = NULL;
510 }
511 while (this->segment < this->manager->segment_count)
512 {
513 while (this->row < this->manager->table_size)
514 {
515 this->prev = this->current;
516 if (this->current)
517 {
518 this->current = this->current->next;
519 }
520 else
521 {
522 lock_single_segment(this->manager, this->segment);
523 this->current = this->manager->ike_sa_table[this->row];
524 }
525 if (this->current)
526 {
527 *entry = this->entry = this->current->value;
528 *segment = this->segment;
529 return TRUE;
530 }
531 unlock_single_segment(this->manager, this->segment);
532 this->row += this->manager->segment_count;
533 }
534 this->segment++;
535 this->row = this->segment;
536 }
537 return FALSE;
538 }
539
540 METHOD(enumerator_t, enumerator_destroy, void,
541 private_enumerator_t *this)
542 {
543 if (this->entry)
544 {
545 this->entry->condvar->signal(this->entry->condvar);
546 }
547 if (this->current)
548 {
549 unlock_single_segment(this->manager, this->segment);
550 }
551 free(this);
552 }
553
554 /**
555 * Creates an enumerator to enumerate the entries in the hash table.
556 */
557 static enumerator_t* create_table_enumerator(private_ike_sa_manager_t *this)
558 {
559 private_enumerator_t *enumerator;
560
561 INIT(enumerator,
562 .enumerator = {
563 .enumerate = (void*)_enumerate,
564 .destroy = _enumerator_destroy,
565 },
566 .manager = this,
567 );
568 return &enumerator->enumerator;
569 }
570
571 /**
572 * Put an entry into the hash table.
573 * Note: The caller has to unlock the returned segment.
574 */
575 static u_int put_entry(private_ike_sa_manager_t *this, entry_t *entry)
576 {
577 table_item_t *current, *item;
578 u_int row, segment;
579
580 INIT(item,
581 .value = entry,
582 );
583
584 row = ike_sa_id_hash(entry->ike_sa_id) & this->table_mask;
585 segment = row & this->segment_mask;
586
587 lock_single_segment(this, segment);
588 current = this->ike_sa_table[row];
589 if (current)
590 { /* insert at the front of current bucket */
591 item->next = current;
592 }
593 this->ike_sa_table[row] = item;
594 this->segments[segment].count++;
595 return segment;
596 }
597
598 /**
599 * Remove an entry from the hash table.
600 * Note: The caller MUST have a lock on the segment of this entry.
601 */
602 static void remove_entry(private_ike_sa_manager_t *this, entry_t *entry)
603 {
604 table_item_t *item, *prev = NULL;
605 u_int row, segment;
606
607 row = ike_sa_id_hash(entry->ike_sa_id) & this->table_mask;
608 segment = row & this->segment_mask;
609 item = this->ike_sa_table[row];
610 while (item)
611 {
612 if (item->value == entry)
613 {
614 if (prev)
615 {
616 prev->next = item->next;
617 }
618 else
619 {
620 this->ike_sa_table[row] = item->next;
621 }
622 this->segments[segment].count--;
623 free(item);
624 break;
625 }
626 prev = item;
627 item = item->next;
628 }
629 }
630
631 /**
632 * Remove the entry at the current enumerator position.
633 */
634 static void remove_entry_at(private_enumerator_t *this)
635 {
636 this->entry = NULL;
637 if (this->current)
638 {
639 table_item_t *current = this->current;
640
641 this->manager->segments[this->segment].count--;
642 this->current = this->prev;
643
644 if (this->prev)
645 {
646 this->prev->next = current->next;
647 }
648 else
649 {
650 this->manager->ike_sa_table[this->row] = current->next;
651 unlock_single_segment(this->manager, this->segment);
652 }
653 free(current);
654 }
655 }
656
657 /**
658 * Find an entry using the provided match function to compare the entries for
659 * equality.
660 */
661 static status_t get_entry_by_match_function(private_ike_sa_manager_t *this,
662 ike_sa_id_t *ike_sa_id, entry_t **entry, u_int *segment,
663 linked_list_match_t match, void *param)
664 {
665 table_item_t *item;
666 u_int row, seg;
667
668 row = ike_sa_id_hash(ike_sa_id) & this->table_mask;
669 seg = row & this->segment_mask;
670
671 lock_single_segment(this, seg);
672 item = this->ike_sa_table[row];
673 while (item)
674 {
675 if (match(item->value, param))
676 {
677 *entry = item->value;
678 *segment = seg;
679 /* the locked segment has to be unlocked by the caller */
680 return SUCCESS;
681 }
682 item = item->next;
683 }
684 unlock_single_segment(this, seg);
685 return NOT_FOUND;
686 }
687
688 /**
689 * Find an entry by ike_sa_id_t.
690 * Note: On SUCCESS, the caller has to unlock the segment.
691 */
692 static status_t get_entry_by_id(private_ike_sa_manager_t *this,
693 ike_sa_id_t *ike_sa_id, entry_t **entry, u_int *segment)
694 {
695 return get_entry_by_match_function(this, ike_sa_id, entry, segment,
696 (linked_list_match_t)entry_match_by_id, ike_sa_id);
697 }
698
699 /**
700 * Find an entry by IKE_SA pointer.
701 * Note: On SUCCESS, the caller has to unlock the segment.
702 */
703 static status_t get_entry_by_sa(private_ike_sa_manager_t *this,
704 ike_sa_id_t *ike_sa_id, ike_sa_t *ike_sa, entry_t **entry, u_int *segment)
705 {
706 return get_entry_by_match_function(this, ike_sa_id, entry, segment,
707 (linked_list_match_t)entry_match_by_sa, ike_sa);
708 }
709
710 /**
711 * Wait until no other thread is using an IKE_SA, return FALSE if entry not
712 * acquirable.
713 */
714 static bool wait_for_entry(private_ike_sa_manager_t *this, entry_t *entry,
715 u_int segment)
716 {
717 if (entry->driveout_new_threads)
718 {
719 /* we are not allowed to get this */
720 return FALSE;
721 }
722 while (entry->checked_out && !entry->driveout_waiting_threads)
723 {
724 /* so wait until we can get it for us.
725 * we register us as waiting. */
726 entry->waiting_threads++;
727 entry->condvar->wait(entry->condvar, this->segments[segment].mutex);
728 entry->waiting_threads--;
729 }
730 /* hm, a deletion request forbids us to get this SA, get next one */
731 if (entry->driveout_waiting_threads)
732 {
733 /* we must signal here, others may be waiting on it, too */
734 entry->condvar->signal(entry->condvar);
735 return FALSE;
736 }
737 return TRUE;
738 }
739
740 /**
741 * Put a half-open SA into the hash table.
742 */
743 static void put_half_open(private_ike_sa_manager_t *this, entry_t *entry)
744 {
745 table_item_t *item;
746 u_int row, segment;
747 rwlock_t *lock;
748 ike_sa_id_t *ike_id;
749 half_open_t *half_open;
750 chunk_t addr;
751
752 ike_id = entry->ike_sa_id;
753 addr = entry->other->get_address(entry->other);
754 row = chunk_hash(addr) & this->table_mask;
755 segment = row & this->segment_mask;
756 lock = this->half_open_segments[segment].lock;
757 lock->write_lock(lock);
758 item = this->half_open_table[row];
759 while (item)
760 {
761 half_open = item->value;
762
763 if (chunk_equals(addr, half_open->other))
764 {
765 break;
766 }
767 item = item->next;
768 }
769
770 if (!item)
771 {
772 INIT(half_open,
773 .other = chunk_clone(addr),
774 );
775 INIT(item,
776 .value = half_open,
777 .next = this->half_open_table[row],
778 );
779 this->half_open_table[row] = item;
780 }
781 half_open->count++;
782 ref_get(&this->half_open_count);
783 if (!ike_id->is_initiator(ike_id))
784 {
785 half_open->count_responder++;
786 ref_get(&this->half_open_count_responder);
787 }
788 this->half_open_segments[segment].count++;
789 lock->unlock(lock);
790 }
791
792 /**
793 * Remove a half-open SA from the hash table.
794 */
795 static void remove_half_open(private_ike_sa_manager_t *this, entry_t *entry)
796 {
797 table_item_t *item, *prev = NULL;
798 u_int row, segment;
799 rwlock_t *lock;
800 ike_sa_id_t *ike_id;
801 chunk_t addr;
802
803 ike_id = entry->ike_sa_id;
804 addr = entry->other->get_address(entry->other);
805 row = chunk_hash(addr) & this->table_mask;
806 segment = row & this->segment_mask;
807 lock = this->half_open_segments[segment].lock;
808 lock->write_lock(lock);
809 item = this->half_open_table[row];
810 while (item)
811 {
812 half_open_t *half_open = item->value;
813
814 if (chunk_equals(addr, half_open->other))
815 {
816 if (!ike_id->is_initiator(ike_id))
817 {
818 half_open->count_responder--;
819 ignore_result(ref_put(&this->half_open_count_responder));
820 }
821 ignore_result(ref_put(&this->half_open_count));
822 if (--half_open->count == 0)
823 {
824 if (prev)
825 {
826 prev->next = item->next;
827 }
828 else
829 {
830 this->half_open_table[row] = item->next;
831 }
832 half_open_destroy(half_open);
833 free(item);
834 }
835 this->half_open_segments[segment].count--;
836 break;
837 }
838 prev = item;
839 item = item->next;
840 }
841 lock->unlock(lock);
842 }
843
844 /**
845 * Put an SA between two peers into the hash table.
846 */
847 static void put_connected_peers(private_ike_sa_manager_t *this, entry_t *entry)
848 {
849 table_item_t *item;
850 u_int row, segment;
851 rwlock_t *lock;
852 connected_peers_t *connected_peers;
853 chunk_t my_id, other_id;
854 int family;
855
856 my_id = entry->my_id->get_encoding(entry->my_id);
857 other_id = entry->other_id->get_encoding(entry->other_id);
858 family = entry->other->get_family(entry->other);
859 row = chunk_hash_inc(other_id, chunk_hash(my_id)) & this->table_mask;
860 segment = row & this->segment_mask;
861 lock = this->connected_peers_segments[segment].lock;
862 lock->write_lock(lock);
863 item = this->connected_peers_table[row];
864 while (item)
865 {
866 connected_peers = item->value;
867
868 if (connected_peers_match(connected_peers, entry->my_id,
869 entry->other_id, family))
870 {
871 if (connected_peers->sas->find_first(connected_peers->sas,
872 (linked_list_match_t)entry->ike_sa_id->equals,
873 NULL, entry->ike_sa_id) == SUCCESS)
874 {
875 lock->unlock(lock);
876 return;
877 }
878 break;
879 }
880 item = item->next;
881 }
882
883 if (!item)
884 {
885 INIT(connected_peers,
886 .my_id = entry->my_id->clone(entry->my_id),
887 .other_id = entry->other_id->clone(entry->other_id),
888 .family = family,
889 .sas = linked_list_create(),
890 );
891 INIT(item,
892 .value = connected_peers,
893 .next = this->connected_peers_table[row],
894 );
895 this->connected_peers_table[row] = item;
896 }
897 connected_peers->sas->insert_last(connected_peers->sas,
898 entry->ike_sa_id->clone(entry->ike_sa_id));
899 this->connected_peers_segments[segment].count++;
900 lock->unlock(lock);
901 }
902
903 /**
904 * Remove an SA between two peers from the hash table.
905 */
906 static void remove_connected_peers(private_ike_sa_manager_t *this, entry_t *entry)
907 {
908 table_item_t *item, *prev = NULL;
909 u_int row, segment;
910 rwlock_t *lock;
911 chunk_t my_id, other_id;
912 int family;
913
914 my_id = entry->my_id->get_encoding(entry->my_id);
915 other_id = entry->other_id->get_encoding(entry->other_id);
916 family = entry->other->get_family(entry->other);
917
918 row = chunk_hash_inc(other_id, chunk_hash(my_id)) & this->table_mask;
919 segment = row & this->segment_mask;
920
921 lock = this->connected_peers_segments[segment].lock;
922 lock->write_lock(lock);
923 item = this->connected_peers_table[row];
924 while (item)
925 {
926 connected_peers_t *current = item->value;
927
928 if (connected_peers_match(current, entry->my_id, entry->other_id,
929 family))
930 {
931 enumerator_t *enumerator;
932 ike_sa_id_t *ike_sa_id;
933
934 enumerator = current->sas->create_enumerator(current->sas);
935 while (enumerator->enumerate(enumerator, &ike_sa_id))
936 {
937 if (ike_sa_id->equals(ike_sa_id, entry->ike_sa_id))
938 {
939 current->sas->remove_at(current->sas, enumerator);
940 ike_sa_id->destroy(ike_sa_id);
941 this->connected_peers_segments[segment].count--;
942 break;
943 }
944 }
945 enumerator->destroy(enumerator);
946 if (current->sas->get_count(current->sas) == 0)
947 {
948 if (prev)
949 {
950 prev->next = item->next;
951 }
952 else
953 {
954 this->connected_peers_table[row] = item->next;
955 }
956 connected_peers_destroy(current);
957 free(item);
958 }
959 break;
960 }
961 prev = item;
962 item = item->next;
963 }
964 lock->unlock(lock);
965 }
966
967 /**
968 * Get a random SPI for new IKE_SAs
969 */
970 static u_int64_t get_spi(private_ike_sa_manager_t *this)
971 {
972 u_int64_t spi;
973
974 this->rng_lock->read_lock(this->rng_lock);
975 if (!this->rng ||
976 !this->rng->get_bytes(this->rng, sizeof(spi), (u_int8_t*)&spi))
977 {
978 spi = 0;
979 }
980 this->rng_lock->unlock(this->rng_lock);
981 return spi;
982 }
983
984 /**
985 * Calculate the hash of the initial IKE message. Memory for the hash is
986 * allocated on success.
987 *
988 * @returns TRUE on success
989 */
990 static bool get_init_hash(hasher_t *hasher, message_t *message, chunk_t *hash)
991 {
992 host_t *src;
993
994 if (message->get_first_payload_type(message) == PLV1_FRAGMENT)
995 { /* only hash the source IP, port and SPI for fragmented init messages */
996 u_int16_t port;
997 u_int64_t spi;
998
999 src = message->get_source(message);
1000 if (!hasher->allocate_hash(hasher, src->get_address(src), NULL))
1001 {
1002 return FALSE;
1003 }
1004 port = src->get_port(src);
1005 if (!hasher->allocate_hash(hasher, chunk_from_thing(port), NULL))
1006 {
1007 return FALSE;
1008 }
1009 spi = message->get_initiator_spi(message);
1010 return hasher->allocate_hash(hasher, chunk_from_thing(spi), hash);
1011 }
1012 if (message->get_exchange_type(message) == ID_PROT)
1013 { /* include the source for Main Mode as the hash will be the same if
1014 * SPIs are reused by two initiators that use the same proposal */
1015 src = message->get_source(message);
1016
1017 if (!hasher->allocate_hash(hasher, src->get_address(src), NULL))
1018 {
1019 return FALSE;
1020 }
1021 }
1022 return hasher->allocate_hash(hasher, message->get_packet_data(message), hash);
1023 }
1024
1025 /**
1026 * Check if we already have created an IKE_SA based on the initial IKE message
1027 * with the given hash.
1028 * If not the hash is stored, the hash data is not(!) cloned.
1029 *
1030 * Also, the local SPI is returned. In case of a retransmit this is already
1031 * stored together with the hash, otherwise it is newly allocated and should
1032 * be used to create the IKE_SA.
1033 *
1034 * @returns ALREADY_DONE if the message with the given hash has been seen before
1035 * NOT_FOUND if the message hash was not found
1036 * FAILED if the SPI allocation failed
1037 */
1038 static status_t check_and_put_init_hash(private_ike_sa_manager_t *this,
1039 chunk_t init_hash, u_int64_t *our_spi)
1040 {
1041 table_item_t *item;
1042 u_int row, segment;
1043 mutex_t *mutex;
1044 init_hash_t *init;
1045 u_int64_t spi;
1046
1047 row = chunk_hash(init_hash) & this->table_mask;
1048 segment = row & this->segment_mask;
1049 mutex = this->init_hashes_segments[segment].mutex;
1050 mutex->lock(mutex);
1051 item = this->init_hashes_table[row];
1052 while (item)
1053 {
1054 init_hash_t *current = item->value;
1055
1056 if (chunk_equals(init_hash, current->hash))
1057 {
1058 *our_spi = current->our_spi;
1059 mutex->unlock(mutex);
1060 return ALREADY_DONE;
1061 }
1062 item = item->next;
1063 }
1064
1065 spi = get_spi(this);
1066 if (!spi)
1067 {
1068 return FAILED;
1069 }
1070
1071 INIT(init,
1072 .hash = {
1073 .len = init_hash.len,
1074 .ptr = init_hash.ptr,
1075 },
1076 .our_spi = spi,
1077 );
1078 INIT(item,
1079 .value = init,
1080 .next = this->init_hashes_table[row],
1081 );
1082 this->init_hashes_table[row] = item;
1083 *our_spi = init->our_spi;
1084 mutex->unlock(mutex);
1085 return NOT_FOUND;
1086 }
1087
1088 /**
1089 * Remove the hash of an initial IKE message from the cache.
1090 */
1091 static void remove_init_hash(private_ike_sa_manager_t *this, chunk_t init_hash)
1092 {
1093 table_item_t *item, *prev = NULL;
1094 u_int row, segment;
1095 mutex_t *mutex;
1096
1097 row = chunk_hash(init_hash) & this->table_mask;
1098 segment = row & this->segment_mask;
1099 mutex = this->init_hashes_segments[segment].mutex;
1100 mutex->lock(mutex);
1101 item = this->init_hashes_table[row];
1102 while (item)
1103 {
1104 init_hash_t *current = item->value;
1105
1106 if (chunk_equals(init_hash, current->hash))
1107 {
1108 if (prev)
1109 {
1110 prev->next = item->next;
1111 }
1112 else
1113 {
1114 this->init_hashes_table[row] = item->next;
1115 }
1116 free(current);
1117 free(item);
1118 break;
1119 }
1120 prev = item;
1121 item = item->next;
1122 }
1123 mutex->unlock(mutex);
1124 }
1125
1126 METHOD(ike_sa_manager_t, checkout, ike_sa_t*,
1127 private_ike_sa_manager_t *this, ike_sa_id_t *ike_sa_id)
1128 {
1129 ike_sa_t *ike_sa = NULL;
1130 entry_t *entry;
1131 u_int segment;
1132
1133 DBG2(DBG_MGR, "checkout IKE_SA");
1134
1135 if (get_entry_by_id(this, ike_sa_id, &entry, &segment) == SUCCESS)
1136 {
1137 if (wait_for_entry(this, entry, segment))
1138 {
1139 entry->checked_out = TRUE;
1140 ike_sa = entry->ike_sa;
1141 DBG2(DBG_MGR, "IKE_SA %s[%u] successfully checked out",
1142 ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1143 }
1144 unlock_single_segment(this, segment);
1145 }
1146 charon->bus->set_sa(charon->bus, ike_sa);
1147 return ike_sa;
1148 }
1149
1150 METHOD(ike_sa_manager_t, checkout_new, ike_sa_t*,
1151 private_ike_sa_manager_t* this, ike_version_t version, bool initiator)
1152 {
1153 ike_sa_id_t *ike_sa_id;
1154 ike_sa_t *ike_sa;
1155 u_int8_t ike_version;
1156 u_int64_t spi;
1157
1158 ike_version = version == IKEV1 ? IKEV1_MAJOR_VERSION : IKEV2_MAJOR_VERSION;
1159
1160 spi = get_spi(this);
1161 if (!spi)
1162 {
1163 DBG1(DBG_MGR, "failed to allocate SPI for new IKE_SA");
1164 return NULL;
1165 }
1166
1167 if (initiator)
1168 {
1169 ike_sa_id = ike_sa_id_create(ike_version, spi, 0, TRUE);
1170 }
1171 else
1172 {
1173 ike_sa_id = ike_sa_id_create(ike_version, 0, spi, FALSE);
1174 }
1175 ike_sa = ike_sa_create(ike_sa_id, initiator, version);
1176 ike_sa_id->destroy(ike_sa_id);
1177
1178 if (ike_sa)
1179 {
1180 DBG2(DBG_MGR, "created IKE_SA %s[%u]", ike_sa->get_name(ike_sa),
1181 ike_sa->get_unique_id(ike_sa));
1182 }
1183 return ike_sa;
1184 }
1185
1186 /**
1187 * Get the message ID or message hash to detect early retransmissions
1188 */
1189 static u_int32_t get_message_id_or_hash(message_t *message)
1190 {
1191 if (message->get_major_version(message) == IKEV1_MAJOR_VERSION)
1192 {
1193 /* Use a hash for IKEv1 Phase 1, where we don't have a MID, and Quick
1194 * Mode, where all three messages use the same message ID */
1195 if (message->get_message_id(message) == 0 ||
1196 message->get_exchange_type(message) == QUICK_MODE)
1197 {
1198 return chunk_hash(message->get_packet_data(message));
1199 }
1200 }
1201 return message->get_message_id(message);
1202 }
1203
1204 METHOD(ike_sa_manager_t, checkout_by_message, ike_sa_t*,
1205 private_ike_sa_manager_t* this, message_t *message)
1206 {
1207 u_int segment;
1208 entry_t *entry;
1209 ike_sa_t *ike_sa = NULL;
1210 ike_sa_id_t *id;
1211 ike_version_t ike_version;
1212 bool is_init = FALSE;
1213
1214 id = message->get_ike_sa_id(message);
1215 /* clone the IKE_SA ID so we can modify the initiator flag */
1216 id = id->clone(id);
1217 id->switch_initiator(id);
1218
1219 DBG2(DBG_MGR, "checkout IKE_SA by message");
1220
1221 if (id->get_responder_spi(id) == 0 &&
1222 message->get_message_id(message) == 0)
1223 {
1224 if (message->get_major_version(message) == IKEV2_MAJOR_VERSION)
1225 {
1226 if (message->get_exchange_type(message) == IKE_SA_INIT &&
1227 message->get_request(message))
1228 {
1229 ike_version = IKEV2;
1230 is_init = TRUE;
1231 }
1232 }
1233 else
1234 {
1235 if (message->get_exchange_type(message) == ID_PROT ||
1236 message->get_exchange_type(message) == AGGRESSIVE)
1237 {
1238 ike_version = IKEV1;
1239 is_init = TRUE;
1240 if (id->is_initiator(id))
1241 { /* not set in IKEv1, switch back before applying to new SA */
1242 id->switch_initiator(id);
1243 }
1244 }
1245 }
1246 }
1247
1248 if (is_init)
1249 {
1250 hasher_t *hasher;
1251 u_int64_t our_spi;
1252 chunk_t hash;
1253
1254 hasher = lib->crypto->create_hasher(lib->crypto, HASH_SHA1);
1255 if (!hasher || !get_init_hash(hasher, message, &hash))
1256 {
1257 DBG1(DBG_MGR, "ignoring message, failed to hash message");
1258 DESTROY_IF(hasher);
1259 id->destroy(id);
1260 return NULL;
1261 }
1262 hasher->destroy(hasher);
1263
1264 /* ensure this is not a retransmit of an already handled init message */
1265 switch (check_and_put_init_hash(this, hash, &our_spi))
1266 {
1267 case NOT_FOUND:
1268 { /* we've not seen this packet yet, create a new IKE_SA */
1269 if (!this->ikesa_limit ||
1270 this->public.get_count(&this->public) < this->ikesa_limit)
1271 {
1272 id->set_responder_spi(id, our_spi);
1273 ike_sa = ike_sa_create(id, FALSE, ike_version);
1274 if (ike_sa)
1275 {
1276 entry = entry_create();
1277 entry->ike_sa = ike_sa;
1278 entry->ike_sa_id = id;
1279
1280 segment = put_entry(this, entry);
1281 entry->checked_out = TRUE;
1282 unlock_single_segment(this, segment);
1283
1284 entry->processing = get_message_id_or_hash(message);
1285 entry->init_hash = hash;
1286
1287 DBG2(DBG_MGR, "created IKE_SA %s[%u]",
1288 ike_sa->get_name(ike_sa),
1289 ike_sa->get_unique_id(ike_sa));
1290
1291 charon->bus->set_sa(charon->bus, ike_sa);
1292 return ike_sa;
1293 }
1294 else
1295 {
1296 DBG1(DBG_MGR, "creating IKE_SA failed, ignoring message");
1297 }
1298 }
1299 else
1300 {
1301 DBG1(DBG_MGR, "ignoring %N, hitting IKE_SA limit (%u)",
1302 exchange_type_names, message->get_exchange_type(message),
1303 this->ikesa_limit);
1304 }
1305 remove_init_hash(this, hash);
1306 chunk_free(&hash);
1307 id->destroy(id);
1308 return NULL;
1309 }
1310 case FAILED:
1311 { /* we failed to allocate an SPI */
1312 chunk_free(&hash);
1313 id->destroy(id);
1314 DBG1(DBG_MGR, "ignoring message, failed to allocate SPI");
1315 return NULL;
1316 }
1317 case ALREADY_DONE:
1318 default:
1319 break;
1320 }
1321 /* it looks like we already handled this init message to some degree */
1322 id->set_responder_spi(id, our_spi);
1323 chunk_free(&hash);
1324 }
1325
1326 if (get_entry_by_id(this, id, &entry, &segment) == SUCCESS)
1327 {
1328 /* only check out if we are not already processing it. */
1329 if (entry->processing == get_message_id_or_hash(message))
1330 {
1331 DBG1(DBG_MGR, "ignoring request with ID %u, already processing",
1332 entry->processing);
1333 }
1334 else if (wait_for_entry(this, entry, segment))
1335 {
1336 ike_sa_id_t *ike_id;
1337
1338 ike_id = entry->ike_sa->get_id(entry->ike_sa);
1339 entry->checked_out = TRUE;
1340 if (message->get_first_payload_type(message) != PLV1_FRAGMENT &&
1341 message->get_first_payload_type(message) != PLV2_FRAGMENT)
1342 { /* TODO-FRAG: this fails if there are unencrypted payloads */
1343 entry->processing = get_message_id_or_hash(message);
1344 }
1345 if (ike_id->get_responder_spi(ike_id) == 0)
1346 {
1347 ike_id->set_responder_spi(ike_id, id->get_responder_spi(id));
1348 }
1349 ike_sa = entry->ike_sa;
1350 DBG2(DBG_MGR, "IKE_SA %s[%u] successfully checked out",
1351 ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1352 }
1353 unlock_single_segment(this, segment);
1354 }
1355 else
1356 {
1357 charon->bus->alert(charon->bus, ALERT_INVALID_IKE_SPI, message);
1358 }
1359 id->destroy(id);
1360 charon->bus->set_sa(charon->bus, ike_sa);
1361 return ike_sa;
1362 }
1363
1364 METHOD(ike_sa_manager_t, checkout_by_config, ike_sa_t*,
1365 private_ike_sa_manager_t *this, peer_cfg_t *peer_cfg)
1366 {
1367 enumerator_t *enumerator;
1368 entry_t *entry;
1369 ike_sa_t *ike_sa = NULL;
1370 peer_cfg_t *current_peer;
1371 ike_cfg_t *current_ike;
1372 u_int segment;
1373
1374 DBG2(DBG_MGR, "checkout IKE_SA by config");
1375
1376 if (!this->reuse_ikesa)
1377 { /* IKE_SA reuse disable by config */
1378 ike_sa = checkout_new(this, peer_cfg->get_ike_version(peer_cfg), TRUE);
1379 charon->bus->set_sa(charon->bus, ike_sa);
1380 return ike_sa;
1381 }
1382
1383 enumerator = create_table_enumerator(this);
1384 while (enumerator->enumerate(enumerator, &entry, &segment))
1385 {
1386 if (!wait_for_entry(this, entry, segment))
1387 {
1388 continue;
1389 }
1390 if (entry->ike_sa->get_state(entry->ike_sa) == IKE_DELETING)
1391 { /* skip IKE_SAs which are not usable, wake other waiting threads */
1392 entry->condvar->signal(entry->condvar);
1393 continue;
1394 }
1395
1396 current_peer = entry->ike_sa->get_peer_cfg(entry->ike_sa);
1397 if (current_peer && current_peer->equals(current_peer, peer_cfg))
1398 {
1399 current_ike = current_peer->get_ike_cfg(current_peer);
1400 if (current_ike->equals(current_ike, peer_cfg->get_ike_cfg(peer_cfg)))
1401 {
1402 entry->checked_out = TRUE;
1403 ike_sa = entry->ike_sa;
1404 DBG2(DBG_MGR, "found existing IKE_SA %u with a '%s' config",
1405 ike_sa->get_unique_id(ike_sa),
1406 current_peer->get_name(current_peer));
1407 break;
1408 }
1409 }
1410 /* other threads might be waiting for this entry */
1411 entry->condvar->signal(entry->condvar);
1412 }
1413 enumerator->destroy(enumerator);
1414
1415 if (!ike_sa)
1416 { /* no IKE_SA using such a config, hand out a new */
1417 ike_sa = checkout_new(this, peer_cfg->get_ike_version(peer_cfg), TRUE);
1418 }
1419 charon->bus->set_sa(charon->bus, ike_sa);
1420 return ike_sa;
1421 }
1422
1423 METHOD(ike_sa_manager_t, checkout_by_id, ike_sa_t*,
1424 private_ike_sa_manager_t *this, u_int32_t id)
1425 {
1426 enumerator_t *enumerator;
1427 entry_t *entry;
1428 ike_sa_t *ike_sa = NULL;
1429 u_int segment;
1430
1431 DBG2(DBG_MGR, "checkout IKE_SA by ID %u", id);
1432
1433 enumerator = create_table_enumerator(this);
1434 while (enumerator->enumerate(enumerator, &entry, &segment))
1435 {
1436 if (wait_for_entry(this, entry, segment))
1437 {
1438 if (entry->ike_sa->get_unique_id(entry->ike_sa) == id)
1439 {
1440 ike_sa = entry->ike_sa;
1441 entry->checked_out = TRUE;
1442 break;
1443 }
1444 /* other threads might be waiting for this entry */
1445 entry->condvar->signal(entry->condvar);
1446 }
1447 }
1448 enumerator->destroy(enumerator);
1449
1450 if (ike_sa)
1451 {
1452 DBG2(DBG_MGR, "IKE_SA %s[%u] successfully checked out",
1453 ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1454 }
1455 charon->bus->set_sa(charon->bus, ike_sa);
1456 return ike_sa;
1457 }
1458
1459 METHOD(ike_sa_manager_t, checkout_by_name, ike_sa_t*,
1460 private_ike_sa_manager_t *this, char *name, bool child)
1461 {
1462 enumerator_t *enumerator, *children;
1463 entry_t *entry;
1464 ike_sa_t *ike_sa = NULL;
1465 child_sa_t *child_sa;
1466 u_int segment;
1467
1468 enumerator = create_table_enumerator(this);
1469 while (enumerator->enumerate(enumerator, &entry, &segment))
1470 {
1471 if (wait_for_entry(this, entry, segment))
1472 {
1473 /* look for a child with such a policy name ... */
1474 if (child)
1475 {
1476 children = entry->ike_sa->create_child_sa_enumerator(entry->ike_sa);
1477 while (children->enumerate(children, (void**)&child_sa))
1478 {
1479 if (streq(child_sa->get_name(child_sa), name))
1480 {
1481 ike_sa = entry->ike_sa;
1482 break;
1483 }
1484 }
1485 children->destroy(children);
1486 }
1487 else /* ... or for a IKE_SA with such a connection name */
1488 {
1489 if (streq(entry->ike_sa->get_name(entry->ike_sa), name))
1490 {
1491 ike_sa = entry->ike_sa;
1492 }
1493 }
1494 /* got one, return */
1495 if (ike_sa)
1496 {
1497 entry->checked_out = TRUE;
1498 DBG2(DBG_MGR, "IKE_SA %s[%u] successfully checked out",
1499 ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1500 break;
1501 }
1502 /* other threads might be waiting for this entry */
1503 entry->condvar->signal(entry->condvar);
1504 }
1505 }
1506 enumerator->destroy(enumerator);
1507
1508 charon->bus->set_sa(charon->bus, ike_sa);
1509 return ike_sa;
1510 }
1511
1512 /**
1513 * enumerator filter function, waiting variant
1514 */
1515 static bool enumerator_filter_wait(private_ike_sa_manager_t *this,
1516 entry_t **in, ike_sa_t **out, u_int *segment)
1517 {
1518 if (wait_for_entry(this, *in, *segment))
1519 {
1520 *out = (*in)->ike_sa;
1521 charon->bus->set_sa(charon->bus, *out);
1522 return TRUE;
1523 }
1524 return FALSE;
1525 }
1526
1527 /**
1528 * enumerator filter function, skipping variant
1529 */
1530 static bool enumerator_filter_skip(private_ike_sa_manager_t *this,
1531 entry_t **in, ike_sa_t **out, u_int *segment)
1532 {
1533 if (!(*in)->driveout_new_threads &&
1534 !(*in)->driveout_waiting_threads &&
1535 !(*in)->checked_out)
1536 {
1537 *out = (*in)->ike_sa;
1538 charon->bus->set_sa(charon->bus, *out);
1539 return TRUE;
1540 }
1541 return FALSE;
1542 }
1543
1544 /**
1545 * Reset threads SA after enumeration
1546 */
1547 static void reset_sa(void *data)
1548 {
1549 charon->bus->set_sa(charon->bus, NULL);
1550 }
1551
1552 METHOD(ike_sa_manager_t, create_enumerator, enumerator_t*,
1553 private_ike_sa_manager_t* this, bool wait)
1554 {
1555 return enumerator_create_filter(create_table_enumerator(this),
1556 wait ? (void*)enumerator_filter_wait : (void*)enumerator_filter_skip,
1557 this, reset_sa);
1558 }
1559
1560 METHOD(ike_sa_manager_t, checkin, void,
1561 private_ike_sa_manager_t *this, ike_sa_t *ike_sa)
1562 {
1563 /* to check the SA back in, we look for the pointer of the ike_sa
1564 * in all entries.
1565 * The lookup is done by initiator SPI, so even if the SPI has changed (e.g.
1566 * on reception of a IKE_SA_INIT response) the lookup will work but
1567 * updating of the SPI MAY be necessary...
1568 */
1569 entry_t *entry;
1570 ike_sa_id_t *ike_sa_id;
1571 host_t *other;
1572 identification_t *my_id, *other_id;
1573 u_int segment;
1574
1575 ike_sa_id = ike_sa->get_id(ike_sa);
1576 my_id = ike_sa->get_my_id(ike_sa);
1577 other_id = ike_sa->get_other_eap_id(ike_sa);
1578 other = ike_sa->get_other_host(ike_sa);
1579
1580 DBG2(DBG_MGR, "checkin IKE_SA %s[%u]", ike_sa->get_name(ike_sa),
1581 ike_sa->get_unique_id(ike_sa));
1582
1583 /* look for the entry */
1584 if (get_entry_by_sa(this, ike_sa_id, ike_sa, &entry, &segment) == SUCCESS)
1585 {
1586 /* ike_sa_id must be updated */
1587 entry->ike_sa_id->replace_values(entry->ike_sa_id, ike_sa->get_id(ike_sa));
1588 /* signal waiting threads */
1589 entry->checked_out = FALSE;
1590 entry->processing = -1;
1591 /* check if this SA is half-open */
1592 if (entry->half_open && ike_sa->get_state(ike_sa) != IKE_CONNECTING)
1593 {
1594 /* not half open anymore */
1595 entry->half_open = FALSE;
1596 remove_half_open(this, entry);
1597 }
1598 else if (entry->half_open && !other->ip_equals(other, entry->other))
1599 {
1600 /* the other host's IP has changed, we must update the hash table */
1601 remove_half_open(this, entry);
1602 DESTROY_IF(entry->other);
1603 entry->other = other->clone(other);
1604 put_half_open(this, entry);
1605 }
1606 else if (!entry->half_open &&
1607 ike_sa->get_state(ike_sa) == IKE_CONNECTING)
1608 {
1609 /* this is a new half-open SA */
1610 entry->half_open = TRUE;
1611 entry->other = other->clone(other);
1612 put_half_open(this, entry);
1613 }
1614 DBG2(DBG_MGR, "check-in of IKE_SA successful.");
1615 entry->condvar->signal(entry->condvar);
1616 }
1617 else
1618 {
1619 entry = entry_create();
1620 entry->ike_sa_id = ike_sa_id->clone(ike_sa_id);
1621 entry->ike_sa = ike_sa;
1622 if (ike_sa->get_state(ike_sa) == IKE_CONNECTING)
1623 {
1624 entry->half_open = TRUE;
1625 entry->other = other->clone(other);
1626 put_half_open(this, entry);
1627 }
1628 segment = put_entry(this, entry);
1629 }
1630
1631 /* apply identities for duplicate test */
1632 if ((ike_sa->get_state(ike_sa) == IKE_ESTABLISHED ||
1633 ike_sa->get_state(ike_sa) == IKE_PASSIVE) &&
1634 entry->my_id == NULL && entry->other_id == NULL)
1635 {
1636 if (ike_sa->get_version(ike_sa) == IKEV1)
1637 {
1638 /* If authenticated and received INITIAL_CONTACT,
1639 * delete any existing IKE_SAs with that peer. */
1640 if (ike_sa->has_condition(ike_sa, COND_INIT_CONTACT_SEEN))
1641 {
1642 /* We can't hold the segment locked while checking the
1643 * uniqueness as this could lead to deadlocks. We mark the
1644 * entry as checked out while we release the lock so no other
1645 * thread can acquire it. Since it is not yet in the list of
1646 * connected peers that will not cause a deadlock as no other
1647 * caller of check_unqiueness() will try to check out this SA */
1648 entry->checked_out = TRUE;
1649 unlock_single_segment(this, segment);
1650
1651 this->public.check_uniqueness(&this->public, ike_sa, TRUE);
1652 ike_sa->set_condition(ike_sa, COND_INIT_CONTACT_SEEN, FALSE);
1653
1654 /* The entry could have been modified in the mean time, e.g.
1655 * because another SA was added/removed next to it or another
1656 * thread is waiting, but it should still exist, so there is no
1657 * need for a lookup via get_entry_by... */
1658 lock_single_segment(this, segment);
1659 entry->checked_out = FALSE;
1660 /* We already signaled waiting threads above, we have to do that
1661 * again after checking the SA out and back in again. */
1662 entry->condvar->signal(entry->condvar);
1663 }
1664 }
1665
1666 entry->my_id = my_id->clone(my_id);
1667 entry->other_id = other_id->clone(other_id);
1668 if (!entry->other)
1669 {
1670 entry->other = other->clone(other);
1671 }
1672 put_connected_peers(this, entry);
1673 }
1674
1675 unlock_single_segment(this, segment);
1676
1677 charon->bus->set_sa(charon->bus, NULL);
1678 }
1679
1680 METHOD(ike_sa_manager_t, checkin_and_destroy, void,
1681 private_ike_sa_manager_t *this, ike_sa_t *ike_sa)
1682 {
1683 /* deletion is a bit complex, we must ensure that no thread is waiting for
1684 * this SA.
1685 * We take this SA from the table, and start signaling while threads
1686 * are in the condvar.
1687 */
1688 entry_t *entry;
1689 ike_sa_id_t *ike_sa_id;
1690 u_int segment;
1691
1692 ike_sa_id = ike_sa->get_id(ike_sa);
1693
1694 DBG2(DBG_MGR, "checkin and destroy IKE_SA %s[%u]", ike_sa->get_name(ike_sa),
1695 ike_sa->get_unique_id(ike_sa));
1696
1697 if (get_entry_by_sa(this, ike_sa_id, ike_sa, &entry, &segment) == SUCCESS)
1698 {
1699 if (entry->driveout_waiting_threads && entry->driveout_new_threads)
1700 { /* it looks like flush() has been called and the SA is being deleted
1701 * anyway, just check it in */
1702 DBG2(DBG_MGR, "ignored check-in and destroy of IKE_SA during shutdown");
1703 entry->checked_out = FALSE;
1704 entry->condvar->broadcast(entry->condvar);
1705 unlock_single_segment(this, segment);
1706 return;
1707 }
1708
1709 /* drive out waiting threads, as we are in hurry */
1710 entry->driveout_waiting_threads = TRUE;
1711 /* mark it, so no new threads can get this entry */
1712 entry->driveout_new_threads = TRUE;
1713 /* wait until all workers have done their work */
1714 while (entry->waiting_threads)
1715 {
1716 /* wake up all */
1717 entry->condvar->broadcast(entry->condvar);
1718 /* they will wake us again when their work is done */
1719 entry->condvar->wait(entry->condvar, this->segments[segment].mutex);
1720 }
1721 remove_entry(this, entry);
1722 unlock_single_segment(this, segment);
1723
1724 if (entry->half_open)
1725 {
1726 remove_half_open(this, entry);
1727 }
1728 if (entry->my_id && entry->other_id)
1729 {
1730 remove_connected_peers(this, entry);
1731 }
1732 if (entry->init_hash.ptr)
1733 {
1734 remove_init_hash(this, entry->init_hash);
1735 }
1736
1737 entry_destroy(entry);
1738
1739 DBG2(DBG_MGR, "check-in and destroy of IKE_SA successful");
1740 }
1741 else
1742 {
1743 DBG1(DBG_MGR, "tried to check-in and delete nonexisting IKE_SA");
1744 ike_sa->destroy(ike_sa);
1745 }
1746 charon->bus->set_sa(charon->bus, NULL);
1747 }
1748
1749 /**
1750 * Cleanup function for create_id_enumerator
1751 */
1752 static void id_enumerator_cleanup(linked_list_t *ids)
1753 {
1754 ids->destroy_offset(ids, offsetof(ike_sa_id_t, destroy));
1755 }
1756
1757 METHOD(ike_sa_manager_t, create_id_enumerator, enumerator_t*,
1758 private_ike_sa_manager_t *this, identification_t *me,
1759 identification_t *other, int family)
1760 {
1761 table_item_t *item;
1762 u_int row, segment;
1763 rwlock_t *lock;
1764 linked_list_t *ids = NULL;
1765
1766 row = chunk_hash_inc(other->get_encoding(other),
1767 chunk_hash(me->get_encoding(me))) & this->table_mask;
1768 segment = row & this->segment_mask;
1769
1770 lock = this->connected_peers_segments[segment].lock;
1771 lock->read_lock(lock);
1772 item = this->connected_peers_table[row];
1773 while (item)
1774 {
1775 connected_peers_t *current = item->value;
1776
1777 if (connected_peers_match(current, me, other, family))
1778 {
1779 ids = current->sas->clone_offset(current->sas,
1780 offsetof(ike_sa_id_t, clone));
1781 break;
1782 }
1783 item = item->next;
1784 }
1785 lock->unlock(lock);
1786
1787 if (!ids)
1788 {
1789 return enumerator_create_empty();
1790 }
1791 return enumerator_create_cleaner(ids->create_enumerator(ids),
1792 (void*)id_enumerator_cleanup, ids);
1793 }
1794
1795 /**
1796 * Move all CHILD_SAs and virtual IPs from old to new
1797 */
1798 static void adopt_children_and_vips(ike_sa_t *old, ike_sa_t *new)
1799 {
1800 enumerator_t *enumerator;
1801 child_sa_t *child_sa;
1802 host_t *vip;
1803 int chcount = 0, vipcount = 0;
1804
1805 charon->bus->children_migrate(charon->bus, new->get_id(new),
1806 new->get_unique_id(new));
1807 enumerator = old->create_child_sa_enumerator(old);
1808 while (enumerator->enumerate(enumerator, &child_sa))
1809 {
1810 old->remove_child_sa(old, enumerator);
1811 new->add_child_sa(new, child_sa);
1812 chcount++;
1813 }
1814 enumerator->destroy(enumerator);
1815
1816 enumerator = old->create_virtual_ip_enumerator(old, FALSE);
1817 while (enumerator->enumerate(enumerator, &vip))
1818 {
1819 new->add_virtual_ip(new, FALSE, vip);
1820 vipcount++;
1821 }
1822 enumerator->destroy(enumerator);
1823 /* this does not release the addresses, which is good, but it does trigger
1824 * an assign_vips(FALSE) event... */
1825 old->clear_virtual_ips(old, FALSE);
1826 /* ...trigger the analogous event on the new SA */
1827 charon->bus->set_sa(charon->bus, new);
1828 charon->bus->assign_vips(charon->bus, new, TRUE);
1829 charon->bus->children_migrate(charon->bus, NULL, 0);
1830 charon->bus->set_sa(charon->bus, old);
1831
1832 if (chcount || vipcount)
1833 {
1834 DBG1(DBG_IKE, "detected reauth of existing IKE_SA, adopting %d "
1835 "children and %d virtual IPs", chcount, vipcount);
1836 }
1837 }
1838
1839 /**
1840 * Delete an existing IKE_SA due to a unique replace policy
1841 */
1842 static status_t enforce_replace(private_ike_sa_manager_t *this,
1843 ike_sa_t *duplicate, ike_sa_t *new,
1844 identification_t *other, host_t *host)
1845 {
1846 charon->bus->alert(charon->bus, ALERT_UNIQUE_REPLACE);
1847
1848 if (host->equals(host, duplicate->get_other_host(duplicate)))
1849 {
1850 /* looks like a reauthentication attempt */
1851 if (!new->has_condition(new, COND_INIT_CONTACT_SEEN) &&
1852 new->get_version(new) == IKEV1)
1853 {
1854 /* IKEv1 implicitly takes over children, IKEv2 recreates them
1855 * explicitly. */
1856 adopt_children_and_vips(duplicate, new);
1857 }
1858 /* For IKEv1 we have to delay the delete for the old IKE_SA. Some
1859 * peers need to complete the new SA first, otherwise the quick modes
1860 * might get lost. For IKEv2 we do the same, as we want overlapping
1861 * CHILD_SAs to keep connectivity up. */
1862 lib->scheduler->schedule_job(lib->scheduler, (job_t*)
1863 delete_ike_sa_job_create(duplicate->get_id(duplicate), TRUE), 10);
1864 return SUCCESS;
1865 }
1866 DBG1(DBG_IKE, "deleting duplicate IKE_SA for peer '%Y' due to "
1867 "uniqueness policy", other);
1868 return duplicate->delete(duplicate);
1869 }
1870
1871 METHOD(ike_sa_manager_t, check_uniqueness, bool,
1872 private_ike_sa_manager_t *this, ike_sa_t *ike_sa, bool force_replace)
1873 {
1874 bool cancel = FALSE;
1875 peer_cfg_t *peer_cfg;
1876 unique_policy_t policy;
1877 enumerator_t *enumerator;
1878 ike_sa_id_t *id = NULL;
1879 identification_t *me, *other;
1880 host_t *other_host;
1881
1882 peer_cfg = ike_sa->get_peer_cfg(ike_sa);
1883 policy = peer_cfg->get_unique_policy(peer_cfg);
1884 if (policy == UNIQUE_NEVER || (policy == UNIQUE_NO && !force_replace))
1885 {
1886 return FALSE;
1887 }
1888 me = ike_sa->get_my_id(ike_sa);
1889 other = ike_sa->get_other_eap_id(ike_sa);
1890 other_host = ike_sa->get_other_host(ike_sa);
1891
1892 enumerator = create_id_enumerator(this, me, other,
1893 other_host->get_family(other_host));
1894 while (enumerator->enumerate(enumerator, &id))
1895 {
1896 status_t status = SUCCESS;
1897 ike_sa_t *duplicate;
1898
1899 duplicate = checkout(this, id);
1900 if (!duplicate)
1901 {
1902 continue;
1903 }
1904 if (force_replace)
1905 {
1906 DBG1(DBG_IKE, "destroying duplicate IKE_SA for peer '%Y', "
1907 "received INITIAL_CONTACT", other);
1908 charon->bus->ike_updown(charon->bus, duplicate, FALSE);
1909 checkin_and_destroy(this, duplicate);
1910 continue;
1911 }
1912 peer_cfg = duplicate->get_peer_cfg(duplicate);
1913 if (peer_cfg && peer_cfg->equals(peer_cfg, ike_sa->get_peer_cfg(ike_sa)))
1914 {
1915 switch (duplicate->get_state(duplicate))
1916 {
1917 case IKE_ESTABLISHED:
1918 case IKE_REKEYING:
1919 switch (policy)
1920 {
1921 case UNIQUE_REPLACE:
1922 status = enforce_replace(this, duplicate, ike_sa,
1923 other, other_host);
1924 break;
1925 case UNIQUE_KEEP:
1926 /* potential reauthentication? */
1927 if (!other_host->equals(other_host,
1928 duplicate->get_other_host(duplicate)))
1929 {
1930 cancel = TRUE;
1931 /* we keep the first IKE_SA and delete all
1932 * other duplicates that might exist */
1933 policy = UNIQUE_REPLACE;
1934 }
1935 break;
1936 default:
1937 break;
1938 }
1939 break;
1940 default:
1941 break;
1942 }
1943 }
1944 if (status == DESTROY_ME)
1945 {
1946 checkin_and_destroy(this, duplicate);
1947 }
1948 else
1949 {
1950 checkin(this, duplicate);
1951 }
1952 }
1953 enumerator->destroy(enumerator);
1954 /* reset thread's current IKE_SA after checkin */
1955 charon->bus->set_sa(charon->bus, ike_sa);
1956 return cancel;
1957 }
1958
1959 METHOD(ike_sa_manager_t, has_contact, bool,
1960 private_ike_sa_manager_t *this, identification_t *me,
1961 identification_t *other, int family)
1962 {
1963 table_item_t *item;
1964 u_int row, segment;
1965 rwlock_t *lock;
1966 bool found = FALSE;
1967
1968 row = chunk_hash_inc(other->get_encoding(other),
1969 chunk_hash(me->get_encoding(me))) & this->table_mask;
1970 segment = row & this->segment_mask;
1971 lock = this->connected_peers_segments[segment].lock;
1972 lock->read_lock(lock);
1973 item = this->connected_peers_table[row];
1974 while (item)
1975 {
1976 if (connected_peers_match(item->value, me, other, family))
1977 {
1978 found = TRUE;
1979 break;
1980 }
1981 item = item->next;
1982 }
1983 lock->unlock(lock);
1984
1985 return found;
1986 }
1987
1988 METHOD(ike_sa_manager_t, get_count, u_int,
1989 private_ike_sa_manager_t *this)
1990 {
1991 u_int segment, count = 0;
1992 mutex_t *mutex;
1993
1994 for (segment = 0; segment < this->segment_count; segment++)
1995 {
1996 mutex = this->segments[segment & this->segment_mask].mutex;
1997 mutex->lock(mutex);
1998 count += this->segments[segment].count;
1999 mutex->unlock(mutex);
2000 }
2001 return count;
2002 }
2003
2004 METHOD(ike_sa_manager_t, get_half_open_count, u_int,
2005 private_ike_sa_manager_t *this, host_t *ip, bool responder_only)
2006 {
2007 table_item_t *item;
2008 u_int row, segment;
2009 rwlock_t *lock;
2010 chunk_t addr;
2011 u_int count = 0;
2012
2013 if (ip)
2014 {
2015 addr = ip->get_address(ip);
2016 row = chunk_hash(addr) & this->table_mask;
2017 segment = row & this->segment_mask;
2018 lock = this->half_open_segments[segment].lock;
2019 lock->read_lock(lock);
2020 item = this->half_open_table[row];
2021 while (item)
2022 {
2023 half_open_t *half_open = item->value;
2024
2025 if (chunk_equals(addr, half_open->other))
2026 {
2027 count = responder_only ? half_open->count_responder
2028 : half_open->count;
2029 break;
2030 }
2031 item = item->next;
2032 }
2033 lock->unlock(lock);
2034 }
2035 else
2036 {
2037 count = responder_only ? (u_int)ref_cur(&this->half_open_count_responder)
2038 : (u_int)ref_cur(&this->half_open_count);
2039 }
2040 return count;
2041 }
2042
2043 METHOD(ike_sa_manager_t, flush, void,
2044 private_ike_sa_manager_t *this)
2045 {
2046 /* destroy all list entries */
2047 enumerator_t *enumerator;
2048 entry_t *entry;
2049 u_int segment;
2050
2051 lock_all_segments(this);
2052 DBG2(DBG_MGR, "going to destroy IKE_SA manager and all managed IKE_SA's");
2053 /* Step 1: drive out all waiting threads */
2054 DBG2(DBG_MGR, "set driveout flags for all stored IKE_SA's");
2055 enumerator = create_table_enumerator(this);
2056 while (enumerator->enumerate(enumerator, &entry, &segment))
2057 {
2058 /* do not accept new threads, drive out waiting threads */
2059 entry->driveout_new_threads = TRUE;
2060 entry->driveout_waiting_threads = TRUE;
2061 }
2062 enumerator->destroy(enumerator);
2063 DBG2(DBG_MGR, "wait for all threads to leave IKE_SA's");
2064 /* Step 2: wait until all are gone */
2065 enumerator = create_table_enumerator(this);
2066 while (enumerator->enumerate(enumerator, &entry, &segment))
2067 {
2068 while (entry->waiting_threads || entry->checked_out)
2069 {
2070 /* wake up all */
2071 entry->condvar->broadcast(entry->condvar);
2072 /* go sleeping until they are gone */
2073 entry->condvar->wait(entry->condvar, this->segments[segment].mutex);
2074 }
2075 }
2076 enumerator->destroy(enumerator);
2077 DBG2(DBG_MGR, "delete all IKE_SA's");
2078 /* Step 3: initiate deletion of all IKE_SAs */
2079 enumerator = create_table_enumerator(this);
2080 while (enumerator->enumerate(enumerator, &entry, &segment))
2081 {
2082 charon->bus->set_sa(charon->bus, entry->ike_sa);
2083 if (entry->ike_sa->get_version(entry->ike_sa) == IKEV2)
2084 { /* as the delete never gets processed, fire down events */
2085 switch (entry->ike_sa->get_state(entry->ike_sa))
2086 {
2087 case IKE_ESTABLISHED:
2088 case IKE_REKEYING:
2089 case IKE_DELETING:
2090 charon->bus->ike_updown(charon->bus, entry->ike_sa, FALSE);
2091 break;
2092 default:
2093 break;
2094 }
2095 }
2096 entry->ike_sa->delete(entry->ike_sa);
2097 }
2098 enumerator->destroy(enumerator);
2099
2100 DBG2(DBG_MGR, "destroy all entries");
2101 /* Step 4: destroy all entries */
2102 enumerator = create_table_enumerator(this);
2103 while (enumerator->enumerate(enumerator, &entry, &segment))
2104 {
2105 charon->bus->set_sa(charon->bus, entry->ike_sa);
2106 if (entry->half_open)
2107 {
2108 remove_half_open(this, entry);
2109 }
2110 if (entry->my_id && entry->other_id)
2111 {
2112 remove_connected_peers(this, entry);
2113 }
2114 if (entry->init_hash.ptr)
2115 {
2116 remove_init_hash(this, entry->init_hash);
2117 }
2118 remove_entry_at((private_enumerator_t*)enumerator);
2119 entry_destroy(entry);
2120 }
2121 enumerator->destroy(enumerator);
2122 charon->bus->set_sa(charon->bus, NULL);
2123 unlock_all_segments(this);
2124
2125 this->rng_lock->write_lock(this->rng_lock);
2126 this->rng->destroy(this->rng);
2127 this->rng = NULL;
2128 this->rng_lock->unlock(this->rng_lock);
2129 }
2130
2131 METHOD(ike_sa_manager_t, destroy, void,
2132 private_ike_sa_manager_t *this)
2133 {
2134 u_int i;
2135
2136 /* these are already cleared in flush() above */
2137 free(this->ike_sa_table);
2138 free(this->half_open_table);
2139 free(this->connected_peers_table);
2140 free(this->init_hashes_table);
2141 for (i = 0; i < this->segment_count; i++)
2142 {
2143 this->segments[i].mutex->destroy(this->segments[i].mutex);
2144 this->half_open_segments[i].lock->destroy(this->half_open_segments[i].lock);
2145 this->connected_peers_segments[i].lock->destroy(this->connected_peers_segments[i].lock);
2146 this->init_hashes_segments[i].mutex->destroy(this->init_hashes_segments[i].mutex);
2147 }
2148 free(this->segments);
2149 free(this->half_open_segments);
2150 free(this->connected_peers_segments);
2151 free(this->init_hashes_segments);
2152
2153 this->rng_lock->destroy(this->rng_lock);
2154 free(this);
2155 }
2156
2157 /**
2158 * This function returns the next-highest power of two for the given number.
2159 * The algorithm works by setting all bits on the right-hand side of the most
2160 * significant 1 to 1 and then increments the whole number so it rolls over
2161 * to the nearest power of two. Note: returns 0 for n == 0
2162 */
2163 static u_int get_nearest_powerof2(u_int n)
2164 {
2165 u_int i;
2166
2167 --n;
2168 for (i = 1; i < sizeof(u_int) * 8; i <<= 1)
2169 {
2170 n |= n >> i;
2171 }
2172 return ++n;
2173 }
2174
2175 /*
2176 * Described in header.
2177 */
2178 ike_sa_manager_t *ike_sa_manager_create()
2179 {
2180 private_ike_sa_manager_t *this;
2181 u_int i;
2182
2183 INIT(this,
2184 .public = {
2185 .checkout = _checkout,
2186 .checkout_new = _checkout_new,
2187 .checkout_by_message = _checkout_by_message,
2188 .checkout_by_config = _checkout_by_config,
2189 .checkout_by_id = _checkout_by_id,
2190 .checkout_by_name = _checkout_by_name,
2191 .check_uniqueness = _check_uniqueness,
2192 .has_contact = _has_contact,
2193 .create_enumerator = _create_enumerator,
2194 .create_id_enumerator = _create_id_enumerator,
2195 .checkin = _checkin,
2196 .checkin_and_destroy = _checkin_and_destroy,
2197 .get_count = _get_count,
2198 .get_half_open_count = _get_half_open_count,
2199 .flush = _flush,
2200 .destroy = _destroy,
2201 },
2202 );
2203
2204 this->rng = lib->crypto->create_rng(lib->crypto, RNG_WEAK);
2205 if (this->rng == NULL)
2206 {
2207 DBG1(DBG_MGR, "manager initialization failed, no RNG supported");
2208 free(this);
2209 return NULL;
2210 }
2211 this->rng_lock = rwlock_create(RWLOCK_TYPE_DEFAULT);
2212
2213 this->ikesa_limit = lib->settings->get_int(lib->settings,
2214 "%s.ikesa_limit", 0, lib->ns);
2215
2216 this->table_size = get_nearest_powerof2(lib->settings->get_int(
2217 lib->settings, "%s.ikesa_table_size",
2218 DEFAULT_HASHTABLE_SIZE, lib->ns));
2219 this->table_size = max(1, min(this->table_size, MAX_HASHTABLE_SIZE));
2220 this->table_mask = this->table_size - 1;
2221
2222 this->segment_count = get_nearest_powerof2(lib->settings->get_int(
2223 lib->settings, "%s.ikesa_table_segments",
2224 DEFAULT_SEGMENT_COUNT, lib->ns));
2225 this->segment_count = max(1, min(this->segment_count, this->table_size));
2226 this->segment_mask = this->segment_count - 1;
2227
2228 this->ike_sa_table = calloc(this->table_size, sizeof(table_item_t*));
2229 this->segments = (segment_t*)calloc(this->segment_count, sizeof(segment_t));
2230 for (i = 0; i < this->segment_count; i++)
2231 {
2232 this->segments[i].mutex = mutex_create(MUTEX_TYPE_RECURSIVE);
2233 this->segments[i].count = 0;
2234 }
2235
2236 /* we use the same table parameters for the table to track half-open SAs */
2237 this->half_open_table = calloc(this->table_size, sizeof(table_item_t*));
2238 this->half_open_segments = calloc(this->segment_count, sizeof(shareable_segment_t));
2239 for (i = 0; i < this->segment_count; i++)
2240 {
2241 this->half_open_segments[i].lock = rwlock_create(RWLOCK_TYPE_DEFAULT);
2242 this->half_open_segments[i].count = 0;
2243 }
2244
2245 /* also for the hash table used for duplicate tests */
2246 this->connected_peers_table = calloc(this->table_size, sizeof(table_item_t*));
2247 this->connected_peers_segments = calloc(this->segment_count, sizeof(shareable_segment_t));
2248 for (i = 0; i < this->segment_count; i++)
2249 {
2250 this->connected_peers_segments[i].lock = rwlock_create(RWLOCK_TYPE_DEFAULT);
2251 this->connected_peers_segments[i].count = 0;
2252 }
2253
2254 /* and again for the table of hashes of seen initial IKE messages */
2255 this->init_hashes_table = calloc(this->table_size, sizeof(table_item_t*));
2256 this->init_hashes_segments = calloc(this->segment_count, sizeof(segment_t));
2257 for (i = 0; i < this->segment_count; i++)
2258 {
2259 this->init_hashes_segments[i].mutex = mutex_create(MUTEX_TYPE_RECURSIVE);
2260 this->init_hashes_segments[i].count = 0;
2261 }
2262
2263 this->reuse_ikesa = lib->settings->get_bool(lib->settings,
2264 "%s.reuse_ikesa", TRUE, lib->ns);
2265 return &this->public;
2266 }