ike-sa-manager: Signal entries that we don't actually check out
[strongswan.git] / src / libcharon / sa / ike_sa_manager.c
1 /*
2 * Copyright (C) 2005-2011 Martin Willi
3 * Copyright (C) 2011 revosec AG
4 * Copyright (C) 2008-2015 Tobias Brunner
5 * Copyright (C) 2005 Jan Hutter
6 * Hochschule fuer Technik Rapperswil
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version. See <http://www.fsf.org/copyleft/gpl.txt>.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 * for more details.
17 */
18
19 #include <string.h>
20
21 #include "ike_sa_manager.h"
22
23 #include <daemon.h>
24 #include <sa/ike_sa_id.h>
25 #include <bus/bus.h>
26 #include <threading/condvar.h>
27 #include <threading/mutex.h>
28 #include <threading/rwlock.h>
29 #include <collections/linked_list.h>
30 #include <crypto/hashers/hasher.h>
31 #include <processing/jobs/delete_ike_sa_job.h>
32
33 /* the default size of the hash table (MUST be a power of 2) */
34 #define DEFAULT_HASHTABLE_SIZE 1
35
36 /* the maximum size of the hash table (MUST be a power of 2) */
37 #define MAX_HASHTABLE_SIZE (1 << 30)
38
39 /* the default number of segments (MUST be a power of 2) */
40 #define DEFAULT_SEGMENT_COUNT 1
41
42 typedef struct entry_t entry_t;
43
44 /**
45 * An entry in the linked list, contains IKE_SA, locking and lookup data.
46 */
47 struct entry_t {
48
49 /**
50 * Number of threads waiting for this ike_sa_t object.
51 */
52 int waiting_threads;
53
54 /**
55 * Condvar where threads can wait until ike_sa_t object is free for use again.
56 */
57 condvar_t *condvar;
58
59 /**
60 * Is this ike_sa currently checked out?
61 */
62 bool checked_out;
63
64 /**
65 * Does this SA drives out new threads?
66 */
67 bool driveout_new_threads;
68
69 /**
70 * Does this SA drives out waiting threads?
71 */
72 bool driveout_waiting_threads;
73
74 /**
75 * Identification of an IKE_SA (SPIs).
76 */
77 ike_sa_id_t *ike_sa_id;
78
79 /**
80 * The contained ike_sa_t object.
81 */
82 ike_sa_t *ike_sa;
83
84 /**
85 * hash of the IKE_SA_INIT message, used to detect retransmissions
86 */
87 chunk_t init_hash;
88
89 /**
90 * remote host address, required for DoS detection and duplicate
91 * checking (host with same my_id and other_id is *not* considered
92 * a duplicate if the address family differs)
93 */
94 host_t *other;
95
96 /**
97 * As responder: Is this SA half-open?
98 */
99 bool half_open;
100
101 /**
102 * own identity, required for duplicate checking
103 */
104 identification_t *my_id;
105
106 /**
107 * remote identity, required for duplicate checking
108 */
109 identification_t *other_id;
110
111 /**
112 * message ID or hash of currently processing message, -1 if none
113 */
114 u_int32_t processing;
115 };
116
117 /**
118 * Implementation of entry_t.destroy.
119 */
120 static status_t entry_destroy(entry_t *this)
121 {
122 /* also destroy IKE SA */
123 this->ike_sa->destroy(this->ike_sa);
124 this->ike_sa_id->destroy(this->ike_sa_id);
125 chunk_free(&this->init_hash);
126 DESTROY_IF(this->other);
127 DESTROY_IF(this->my_id);
128 DESTROY_IF(this->other_id);
129 this->condvar->destroy(this->condvar);
130 free(this);
131 return SUCCESS;
132 }
133
134 /**
135 * Creates a new entry for the ike_sa_t list.
136 */
137 static entry_t *entry_create()
138 {
139 entry_t *this;
140
141 INIT(this,
142 .condvar = condvar_create(CONDVAR_TYPE_DEFAULT),
143 .processing = -1,
144 );
145
146 return this;
147 }
148
149 /**
150 * Function that matches entry_t objects by ike_sa_id_t.
151 */
152 static bool entry_match_by_id(entry_t *entry, ike_sa_id_t *id)
153 {
154 if (id->equals(id, entry->ike_sa_id))
155 {
156 return TRUE;
157 }
158 if ((id->get_responder_spi(id) == 0 ||
159 entry->ike_sa_id->get_responder_spi(entry->ike_sa_id) == 0) &&
160 (id->get_ike_version(id) == IKEV1_MAJOR_VERSION ||
161 id->is_initiator(id) == entry->ike_sa_id->is_initiator(entry->ike_sa_id)) &&
162 id->get_initiator_spi(id) == entry->ike_sa_id->get_initiator_spi(entry->ike_sa_id))
163 {
164 /* this is TRUE for IKE_SAs that we initiated but have not yet received a response */
165 return TRUE;
166 }
167 return FALSE;
168 }
169
170 /**
171 * Function that matches entry_t objects by ike_sa_t pointers.
172 */
173 static bool entry_match_by_sa(entry_t *entry, ike_sa_t *ike_sa)
174 {
175 return entry->ike_sa == ike_sa;
176 }
177
178 /**
179 * Hash function for ike_sa_id_t objects.
180 */
181 static u_int ike_sa_id_hash(ike_sa_id_t *ike_sa_id)
182 {
183 /* IKEv2 does not mandate random SPIs (RFC 5996, 2.6), they just have to be
184 * locally unique, so we use our randomly allocated SPI whether we are
185 * initiator or responder to ensure a good distribution. The latter is not
186 * possible for IKEv1 as we don't know whether we are original initiator or
187 * not (based on the IKE header). But as RFC 2408, section 2.5.3 proposes
188 * SPIs (Cookies) to be allocated near random (we allocate them randomly
189 * anyway) it seems safe to always use the initiator SPI. */
190 if (ike_sa_id->get_ike_version(ike_sa_id) == IKEV1_MAJOR_VERSION ||
191 ike_sa_id->is_initiator(ike_sa_id))
192 {
193 return ike_sa_id->get_initiator_spi(ike_sa_id);
194 }
195 return ike_sa_id->get_responder_spi(ike_sa_id);
196 }
197
198 typedef struct half_open_t half_open_t;
199
200 /**
201 * Struct to manage half-open IKE_SAs per peer.
202 */
203 struct half_open_t {
204 /** chunk of remote host address */
205 chunk_t other;
206
207 /** the number of half-open IKE_SAs with that host */
208 u_int count;
209
210 /** the number of half-open IKE_SAs we responded to with that host */
211 u_int count_responder;
212 };
213
214 /**
215 * Destroys a half_open_t object.
216 */
217 static void half_open_destroy(half_open_t *this)
218 {
219 chunk_free(&this->other);
220 free(this);
221 }
222
223 typedef struct connected_peers_t connected_peers_t;
224
225 struct connected_peers_t {
226 /** own identity */
227 identification_t *my_id;
228
229 /** remote identity */
230 identification_t *other_id;
231
232 /** ip address family of peer */
233 int family;
234
235 /** list of ike_sa_id_t objects of IKE_SAs between the two identities */
236 linked_list_t *sas;
237 };
238
239 static void connected_peers_destroy(connected_peers_t *this)
240 {
241 this->my_id->destroy(this->my_id);
242 this->other_id->destroy(this->other_id);
243 this->sas->destroy(this->sas);
244 free(this);
245 }
246
247 /**
248 * Function that matches connected_peers_t objects by the given ids.
249 */
250 static inline bool connected_peers_match(connected_peers_t *connected_peers,
251 identification_t *my_id, identification_t *other_id,
252 int family)
253 {
254 return my_id->equals(my_id, connected_peers->my_id) &&
255 other_id->equals(other_id, connected_peers->other_id) &&
256 (!family || family == connected_peers->family);
257 }
258
259 typedef struct init_hash_t init_hash_t;
260
261 struct init_hash_t {
262 /** hash of IKE_SA_INIT or initial phase1 message (data is not cloned) */
263 chunk_t hash;
264
265 /** our SPI allocated for the IKE_SA based on this message */
266 u_int64_t our_spi;
267 };
268
269 typedef struct segment_t segment_t;
270
271 /**
272 * Struct to manage segments of the hash table.
273 */
274 struct segment_t {
275 /** mutex to access a segment exclusively */
276 mutex_t *mutex;
277
278 /** the number of entries in this segment */
279 u_int count;
280 };
281
282 typedef struct shareable_segment_t shareable_segment_t;
283
284 /**
285 * Struct to manage segments of the "half-open" and "connected peers" hash tables.
286 */
287 struct shareable_segment_t {
288 /** rwlock to access a segment non-/exclusively */
289 rwlock_t *lock;
290
291 /** the number of entries in this segment - in case of the "half-open table"
292 * it's the sum of all half_open_t.count in a segment. */
293 u_int count;
294 };
295
296 typedef struct table_item_t table_item_t;
297
298 /**
299 * Instead of using linked_list_t for each bucket we store the data in our own
300 * list to save memory.
301 */
302 struct table_item_t {
303 /** data of this item */
304 void *value;
305
306 /** next item in the overflow list */
307 table_item_t *next;
308 };
309
310 typedef struct private_ike_sa_manager_t private_ike_sa_manager_t;
311
312 /**
313 * Additional private members of ike_sa_manager_t.
314 */
315 struct private_ike_sa_manager_t {
316 /**
317 * Public interface of ike_sa_manager_t.
318 */
319 ike_sa_manager_t public;
320
321 /**
322 * Hash table with entries for the ike_sa_t objects.
323 */
324 table_item_t **ike_sa_table;
325
326 /**
327 * The size of the hash table.
328 */
329 u_int table_size;
330
331 /**
332 * Mask to map the hashes to table rows.
333 */
334 u_int table_mask;
335
336 /**
337 * Segments of the hash table.
338 */
339 segment_t *segments;
340
341 /**
342 * The number of segments.
343 */
344 u_int segment_count;
345
346 /**
347 * Mask to map a table row to a segment.
348 */
349 u_int segment_mask;
350
351 /**
352 * Hash table with half_open_t objects.
353 */
354 table_item_t **half_open_table;
355
356 /**
357 * Segments of the "half-open" hash table.
358 */
359 shareable_segment_t *half_open_segments;
360
361 /**
362 * Total number of half-open IKE_SAs.
363 */
364 refcount_t half_open_count;
365
366 /**
367 * Total number of half-open IKE_SAs as responder.
368 */
369 refcount_t half_open_count_responder;
370
371 /**
372 * Hash table with connected_peers_t objects.
373 */
374 table_item_t **connected_peers_table;
375
376 /**
377 * Segments of the "connected peers" hash table.
378 */
379 shareable_segment_t *connected_peers_segments;
380
381 /**
382 * Hash table with init_hash_t objects.
383 */
384 table_item_t **init_hashes_table;
385
386 /**
387 * Segments of the "hashes" hash table.
388 */
389 segment_t *init_hashes_segments;
390
391 /**
392 * RNG to get random SPIs for our side
393 */
394 rng_t *rng;
395
396 /**
397 * Lock to access the RNG instance
398 */
399 rwlock_t *rng_lock;
400
401 /**
402 * reuse existing IKE_SAs in checkout_by_config
403 */
404 bool reuse_ikesa;
405
406 /**
407 * Configured IKE_SA limit, if any
408 */
409 u_int ikesa_limit;
410 };
411
412 /**
413 * Acquire a lock to access the segment of the table row with the given index.
414 * It also works with the segment index directly.
415 */
416 static inline void lock_single_segment(private_ike_sa_manager_t *this,
417 u_int index)
418 {
419 mutex_t *lock = this->segments[index & this->segment_mask].mutex;
420 lock->lock(lock);
421 }
422
423 /**
424 * Release the lock required to access the segment of the table row with the given index.
425 * It also works with the segment index directly.
426 */
427 static inline void unlock_single_segment(private_ike_sa_manager_t *this,
428 u_int index)
429 {
430 mutex_t *lock = this->segments[index & this->segment_mask].mutex;
431 lock->unlock(lock);
432 }
433
434 /**
435 * Lock all segments
436 */
437 static void lock_all_segments(private_ike_sa_manager_t *this)
438 {
439 u_int i;
440
441 for (i = 0; i < this->segment_count; i++)
442 {
443 this->segments[i].mutex->lock(this->segments[i].mutex);
444 }
445 }
446
447 /**
448 * Unlock all segments
449 */
450 static void unlock_all_segments(private_ike_sa_manager_t *this)
451 {
452 u_int i;
453
454 for (i = 0; i < this->segment_count; i++)
455 {
456 this->segments[i].mutex->unlock(this->segments[i].mutex);
457 }
458 }
459
460 typedef struct private_enumerator_t private_enumerator_t;
461
462 /**
463 * hash table enumerator implementation
464 */
465 struct private_enumerator_t {
466
467 /**
468 * implements enumerator interface
469 */
470 enumerator_t enumerator;
471
472 /**
473 * associated ike_sa_manager_t
474 */
475 private_ike_sa_manager_t *manager;
476
477 /**
478 * current segment index
479 */
480 u_int segment;
481
482 /**
483 * currently enumerating entry
484 */
485 entry_t *entry;
486
487 /**
488 * current table row index
489 */
490 u_int row;
491
492 /**
493 * current table item
494 */
495 table_item_t *current;
496
497 /**
498 * previous table item
499 */
500 table_item_t *prev;
501 };
502
503 METHOD(enumerator_t, enumerate, bool,
504 private_enumerator_t *this, entry_t **entry, u_int *segment)
505 {
506 if (this->entry)
507 {
508 this->entry->condvar->signal(this->entry->condvar);
509 this->entry = NULL;
510 }
511 while (this->segment < this->manager->segment_count)
512 {
513 while (this->row < this->manager->table_size)
514 {
515 this->prev = this->current;
516 if (this->current)
517 {
518 this->current = this->current->next;
519 }
520 else
521 {
522 lock_single_segment(this->manager, this->segment);
523 this->current = this->manager->ike_sa_table[this->row];
524 }
525 if (this->current)
526 {
527 *entry = this->entry = this->current->value;
528 *segment = this->segment;
529 return TRUE;
530 }
531 unlock_single_segment(this->manager, this->segment);
532 this->row += this->manager->segment_count;
533 }
534 this->segment++;
535 this->row = this->segment;
536 }
537 return FALSE;
538 }
539
540 METHOD(enumerator_t, enumerator_destroy, void,
541 private_enumerator_t *this)
542 {
543 if (this->entry)
544 {
545 this->entry->condvar->signal(this->entry->condvar);
546 }
547 if (this->current)
548 {
549 unlock_single_segment(this->manager, this->segment);
550 }
551 free(this);
552 }
553
554 /**
555 * Creates an enumerator to enumerate the entries in the hash table.
556 */
557 static enumerator_t* create_table_enumerator(private_ike_sa_manager_t *this)
558 {
559 private_enumerator_t *enumerator;
560
561 INIT(enumerator,
562 .enumerator = {
563 .enumerate = (void*)_enumerate,
564 .destroy = _enumerator_destroy,
565 },
566 .manager = this,
567 );
568 return &enumerator->enumerator;
569 }
570
571 /**
572 * Put an entry into the hash table.
573 * Note: The caller has to unlock the returned segment.
574 */
575 static u_int put_entry(private_ike_sa_manager_t *this, entry_t *entry)
576 {
577 table_item_t *current, *item;
578 u_int row, segment;
579
580 INIT(item,
581 .value = entry,
582 );
583
584 row = ike_sa_id_hash(entry->ike_sa_id) & this->table_mask;
585 segment = row & this->segment_mask;
586
587 lock_single_segment(this, segment);
588 current = this->ike_sa_table[row];
589 if (current)
590 { /* insert at the front of current bucket */
591 item->next = current;
592 }
593 this->ike_sa_table[row] = item;
594 this->segments[segment].count++;
595 return segment;
596 }
597
598 /**
599 * Remove an entry from the hash table.
600 * Note: The caller MUST have a lock on the segment of this entry.
601 */
602 static void remove_entry(private_ike_sa_manager_t *this, entry_t *entry)
603 {
604 table_item_t *item, *prev = NULL;
605 u_int row, segment;
606
607 row = ike_sa_id_hash(entry->ike_sa_id) & this->table_mask;
608 segment = row & this->segment_mask;
609 item = this->ike_sa_table[row];
610 while (item)
611 {
612 if (item->value == entry)
613 {
614 if (prev)
615 {
616 prev->next = item->next;
617 }
618 else
619 {
620 this->ike_sa_table[row] = item->next;
621 }
622 this->segments[segment].count--;
623 free(item);
624 break;
625 }
626 prev = item;
627 item = item->next;
628 }
629 }
630
631 /**
632 * Remove the entry at the current enumerator position.
633 */
634 static void remove_entry_at(private_enumerator_t *this)
635 {
636 this->entry = NULL;
637 if (this->current)
638 {
639 table_item_t *current = this->current;
640
641 this->manager->segments[this->segment].count--;
642 this->current = this->prev;
643
644 if (this->prev)
645 {
646 this->prev->next = current->next;
647 }
648 else
649 {
650 this->manager->ike_sa_table[this->row] = current->next;
651 unlock_single_segment(this->manager, this->segment);
652 }
653 free(current);
654 }
655 }
656
657 /**
658 * Find an entry using the provided match function to compare the entries for
659 * equality.
660 */
661 static status_t get_entry_by_match_function(private_ike_sa_manager_t *this,
662 ike_sa_id_t *ike_sa_id, entry_t **entry, u_int *segment,
663 linked_list_match_t match, void *param)
664 {
665 table_item_t *item;
666 u_int row, seg;
667
668 row = ike_sa_id_hash(ike_sa_id) & this->table_mask;
669 seg = row & this->segment_mask;
670
671 lock_single_segment(this, seg);
672 item = this->ike_sa_table[row];
673 while (item)
674 {
675 if (match(item->value, param))
676 {
677 *entry = item->value;
678 *segment = seg;
679 /* the locked segment has to be unlocked by the caller */
680 return SUCCESS;
681 }
682 item = item->next;
683 }
684 unlock_single_segment(this, seg);
685 return NOT_FOUND;
686 }
687
688 /**
689 * Find an entry by ike_sa_id_t.
690 * Note: On SUCCESS, the caller has to unlock the segment.
691 */
692 static status_t get_entry_by_id(private_ike_sa_manager_t *this,
693 ike_sa_id_t *ike_sa_id, entry_t **entry, u_int *segment)
694 {
695 return get_entry_by_match_function(this, ike_sa_id, entry, segment,
696 (linked_list_match_t)entry_match_by_id, ike_sa_id);
697 }
698
699 /**
700 * Find an entry by IKE_SA pointer.
701 * Note: On SUCCESS, the caller has to unlock the segment.
702 */
703 static status_t get_entry_by_sa(private_ike_sa_manager_t *this,
704 ike_sa_id_t *ike_sa_id, ike_sa_t *ike_sa, entry_t **entry, u_int *segment)
705 {
706 return get_entry_by_match_function(this, ike_sa_id, entry, segment,
707 (linked_list_match_t)entry_match_by_sa, ike_sa);
708 }
709
710 /**
711 * Wait until no other thread is using an IKE_SA, return FALSE if entry not
712 * acquirable.
713 */
714 static bool wait_for_entry(private_ike_sa_manager_t *this, entry_t *entry,
715 u_int segment)
716 {
717 if (entry->driveout_new_threads)
718 {
719 /* we are not allowed to get this */
720 return FALSE;
721 }
722 while (entry->checked_out && !entry->driveout_waiting_threads)
723 {
724 /* so wait until we can get it for us.
725 * we register us as waiting. */
726 entry->waiting_threads++;
727 entry->condvar->wait(entry->condvar, this->segments[segment].mutex);
728 entry->waiting_threads--;
729 }
730 /* hm, a deletion request forbids us to get this SA, get next one */
731 if (entry->driveout_waiting_threads)
732 {
733 /* we must signal here, others may be waiting on it, too */
734 entry->condvar->signal(entry->condvar);
735 return FALSE;
736 }
737 return TRUE;
738 }
739
740 /**
741 * Put a half-open SA into the hash table.
742 */
743 static void put_half_open(private_ike_sa_manager_t *this, entry_t *entry)
744 {
745 table_item_t *item;
746 u_int row, segment;
747 rwlock_t *lock;
748 ike_sa_id_t *ike_id;
749 half_open_t *half_open;
750 chunk_t addr;
751
752 ike_id = entry->ike_sa_id;
753 addr = entry->other->get_address(entry->other);
754 row = chunk_hash(addr) & this->table_mask;
755 segment = row & this->segment_mask;
756 lock = this->half_open_segments[segment].lock;
757 lock->write_lock(lock);
758 item = this->half_open_table[row];
759 while (item)
760 {
761 half_open = item->value;
762
763 if (chunk_equals(addr, half_open->other))
764 {
765 break;
766 }
767 item = item->next;
768 }
769
770 if (!item)
771 {
772 INIT(half_open,
773 .other = chunk_clone(addr),
774 );
775 INIT(item,
776 .value = half_open,
777 .next = this->half_open_table[row],
778 );
779 this->half_open_table[row] = item;
780 }
781 half_open->count++;
782 ref_get(&this->half_open_count);
783 if (!ike_id->is_initiator(ike_id))
784 {
785 half_open->count_responder++;
786 ref_get(&this->half_open_count_responder);
787 }
788 this->half_open_segments[segment].count++;
789 lock->unlock(lock);
790 }
791
792 /**
793 * Remove a half-open SA from the hash table.
794 */
795 static void remove_half_open(private_ike_sa_manager_t *this, entry_t *entry)
796 {
797 table_item_t *item, *prev = NULL;
798 u_int row, segment;
799 rwlock_t *lock;
800 ike_sa_id_t *ike_id;
801 chunk_t addr;
802
803 ike_id = entry->ike_sa_id;
804 addr = entry->other->get_address(entry->other);
805 row = chunk_hash(addr) & this->table_mask;
806 segment = row & this->segment_mask;
807 lock = this->half_open_segments[segment].lock;
808 lock->write_lock(lock);
809 item = this->half_open_table[row];
810 while (item)
811 {
812 half_open_t *half_open = item->value;
813
814 if (chunk_equals(addr, half_open->other))
815 {
816 if (!ike_id->is_initiator(ike_id))
817 {
818 half_open->count_responder--;
819 ignore_result(ref_put(&this->half_open_count_responder));
820 }
821 ignore_result(ref_put(&this->half_open_count));
822 if (--half_open->count == 0)
823 {
824 if (prev)
825 {
826 prev->next = item->next;
827 }
828 else
829 {
830 this->half_open_table[row] = item->next;
831 }
832 half_open_destroy(half_open);
833 free(item);
834 }
835 this->half_open_segments[segment].count--;
836 break;
837 }
838 prev = item;
839 item = item->next;
840 }
841 lock->unlock(lock);
842 }
843
844 /**
845 * Put an SA between two peers into the hash table.
846 */
847 static void put_connected_peers(private_ike_sa_manager_t *this, entry_t *entry)
848 {
849 table_item_t *item;
850 u_int row, segment;
851 rwlock_t *lock;
852 connected_peers_t *connected_peers;
853 chunk_t my_id, other_id;
854 int family;
855
856 my_id = entry->my_id->get_encoding(entry->my_id);
857 other_id = entry->other_id->get_encoding(entry->other_id);
858 family = entry->other->get_family(entry->other);
859 row = chunk_hash_inc(other_id, chunk_hash(my_id)) & this->table_mask;
860 segment = row & this->segment_mask;
861 lock = this->connected_peers_segments[segment].lock;
862 lock->write_lock(lock);
863 item = this->connected_peers_table[row];
864 while (item)
865 {
866 connected_peers = item->value;
867
868 if (connected_peers_match(connected_peers, entry->my_id,
869 entry->other_id, family))
870 {
871 if (connected_peers->sas->find_first(connected_peers->sas,
872 (linked_list_match_t)entry->ike_sa_id->equals,
873 NULL, entry->ike_sa_id) == SUCCESS)
874 {
875 lock->unlock(lock);
876 return;
877 }
878 break;
879 }
880 item = item->next;
881 }
882
883 if (!item)
884 {
885 INIT(connected_peers,
886 .my_id = entry->my_id->clone(entry->my_id),
887 .other_id = entry->other_id->clone(entry->other_id),
888 .family = family,
889 .sas = linked_list_create(),
890 );
891 INIT(item,
892 .value = connected_peers,
893 .next = this->connected_peers_table[row],
894 );
895 this->connected_peers_table[row] = item;
896 }
897 connected_peers->sas->insert_last(connected_peers->sas,
898 entry->ike_sa_id->clone(entry->ike_sa_id));
899 this->connected_peers_segments[segment].count++;
900 lock->unlock(lock);
901 }
902
903 /**
904 * Remove an SA between two peers from the hash table.
905 */
906 static void remove_connected_peers(private_ike_sa_manager_t *this, entry_t *entry)
907 {
908 table_item_t *item, *prev = NULL;
909 u_int row, segment;
910 rwlock_t *lock;
911 chunk_t my_id, other_id;
912 int family;
913
914 my_id = entry->my_id->get_encoding(entry->my_id);
915 other_id = entry->other_id->get_encoding(entry->other_id);
916 family = entry->other->get_family(entry->other);
917
918 row = chunk_hash_inc(other_id, chunk_hash(my_id)) & this->table_mask;
919 segment = row & this->segment_mask;
920
921 lock = this->connected_peers_segments[segment].lock;
922 lock->write_lock(lock);
923 item = this->connected_peers_table[row];
924 while (item)
925 {
926 connected_peers_t *current = item->value;
927
928 if (connected_peers_match(current, entry->my_id, entry->other_id,
929 family))
930 {
931 enumerator_t *enumerator;
932 ike_sa_id_t *ike_sa_id;
933
934 enumerator = current->sas->create_enumerator(current->sas);
935 while (enumerator->enumerate(enumerator, &ike_sa_id))
936 {
937 if (ike_sa_id->equals(ike_sa_id, entry->ike_sa_id))
938 {
939 current->sas->remove_at(current->sas, enumerator);
940 ike_sa_id->destroy(ike_sa_id);
941 this->connected_peers_segments[segment].count--;
942 break;
943 }
944 }
945 enumerator->destroy(enumerator);
946 if (current->sas->get_count(current->sas) == 0)
947 {
948 if (prev)
949 {
950 prev->next = item->next;
951 }
952 else
953 {
954 this->connected_peers_table[row] = item->next;
955 }
956 connected_peers_destroy(current);
957 free(item);
958 }
959 break;
960 }
961 prev = item;
962 item = item->next;
963 }
964 lock->unlock(lock);
965 }
966
967 /**
968 * Get a random SPI for new IKE_SAs
969 */
970 static u_int64_t get_spi(private_ike_sa_manager_t *this)
971 {
972 u_int64_t spi;
973
974 this->rng_lock->read_lock(this->rng_lock);
975 if (!this->rng ||
976 !this->rng->get_bytes(this->rng, sizeof(spi), (u_int8_t*)&spi))
977 {
978 spi = 0;
979 }
980 this->rng_lock->unlock(this->rng_lock);
981 return spi;
982 }
983
984 /**
985 * Calculate the hash of the initial IKE message. Memory for the hash is
986 * allocated on success.
987 *
988 * @returns TRUE on success
989 */
990 static bool get_init_hash(hasher_t *hasher, message_t *message, chunk_t *hash)
991 {
992 host_t *src;
993
994 if (message->get_first_payload_type(message) == PLV1_FRAGMENT)
995 { /* only hash the source IP, port and SPI for fragmented init messages */
996 u_int16_t port;
997 u_int64_t spi;
998
999 src = message->get_source(message);
1000 if (!hasher->allocate_hash(hasher, src->get_address(src), NULL))
1001 {
1002 return FALSE;
1003 }
1004 port = src->get_port(src);
1005 if (!hasher->allocate_hash(hasher, chunk_from_thing(port), NULL))
1006 {
1007 return FALSE;
1008 }
1009 spi = message->get_initiator_spi(message);
1010 return hasher->allocate_hash(hasher, chunk_from_thing(spi), hash);
1011 }
1012 if (message->get_exchange_type(message) == ID_PROT)
1013 { /* include the source for Main Mode as the hash will be the same if
1014 * SPIs are reused by two initiators that use the same proposal */
1015 src = message->get_source(message);
1016
1017 if (!hasher->allocate_hash(hasher, src->get_address(src), NULL))
1018 {
1019 return FALSE;
1020 }
1021 }
1022 return hasher->allocate_hash(hasher, message->get_packet_data(message), hash);
1023 }
1024
1025 /**
1026 * Check if we already have created an IKE_SA based on the initial IKE message
1027 * with the given hash.
1028 * If not the hash is stored, the hash data is not(!) cloned.
1029 *
1030 * Also, the local SPI is returned. In case of a retransmit this is already
1031 * stored together with the hash, otherwise it is newly allocated and should
1032 * be used to create the IKE_SA.
1033 *
1034 * @returns ALREADY_DONE if the message with the given hash has been seen before
1035 * NOT_FOUND if the message hash was not found
1036 * FAILED if the SPI allocation failed
1037 */
1038 static status_t check_and_put_init_hash(private_ike_sa_manager_t *this,
1039 chunk_t init_hash, u_int64_t *our_spi)
1040 {
1041 table_item_t *item;
1042 u_int row, segment;
1043 mutex_t *mutex;
1044 init_hash_t *init;
1045 u_int64_t spi;
1046
1047 row = chunk_hash(init_hash) & this->table_mask;
1048 segment = row & this->segment_mask;
1049 mutex = this->init_hashes_segments[segment].mutex;
1050 mutex->lock(mutex);
1051 item = this->init_hashes_table[row];
1052 while (item)
1053 {
1054 init_hash_t *current = item->value;
1055
1056 if (chunk_equals(init_hash, current->hash))
1057 {
1058 *our_spi = current->our_spi;
1059 mutex->unlock(mutex);
1060 return ALREADY_DONE;
1061 }
1062 item = item->next;
1063 }
1064
1065 spi = get_spi(this);
1066 if (!spi)
1067 {
1068 return FAILED;
1069 }
1070
1071 INIT(init,
1072 .hash = {
1073 .len = init_hash.len,
1074 .ptr = init_hash.ptr,
1075 },
1076 .our_spi = spi,
1077 );
1078 INIT(item,
1079 .value = init,
1080 .next = this->init_hashes_table[row],
1081 );
1082 this->init_hashes_table[row] = item;
1083 *our_spi = init->our_spi;
1084 mutex->unlock(mutex);
1085 return NOT_FOUND;
1086 }
1087
1088 /**
1089 * Remove the hash of an initial IKE message from the cache.
1090 */
1091 static void remove_init_hash(private_ike_sa_manager_t *this, chunk_t init_hash)
1092 {
1093 table_item_t *item, *prev = NULL;
1094 u_int row, segment;
1095 mutex_t *mutex;
1096
1097 row = chunk_hash(init_hash) & this->table_mask;
1098 segment = row & this->segment_mask;
1099 mutex = this->init_hashes_segments[segment].mutex;
1100 mutex->lock(mutex);
1101 item = this->init_hashes_table[row];
1102 while (item)
1103 {
1104 init_hash_t *current = item->value;
1105
1106 if (chunk_equals(init_hash, current->hash))
1107 {
1108 if (prev)
1109 {
1110 prev->next = item->next;
1111 }
1112 else
1113 {
1114 this->init_hashes_table[row] = item->next;
1115 }
1116 free(current);
1117 free(item);
1118 break;
1119 }
1120 prev = item;
1121 item = item->next;
1122 }
1123 mutex->unlock(mutex);
1124 }
1125
1126 METHOD(ike_sa_manager_t, checkout, ike_sa_t*,
1127 private_ike_sa_manager_t *this, ike_sa_id_t *ike_sa_id)
1128 {
1129 ike_sa_t *ike_sa = NULL;
1130 entry_t *entry;
1131 u_int segment;
1132
1133 DBG2(DBG_MGR, "checkout IKE_SA");
1134
1135 if (get_entry_by_id(this, ike_sa_id, &entry, &segment) == SUCCESS)
1136 {
1137 if (wait_for_entry(this, entry, segment))
1138 {
1139 entry->checked_out = TRUE;
1140 ike_sa = entry->ike_sa;
1141 DBG2(DBG_MGR, "IKE_SA %s[%u] successfully checked out",
1142 ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1143 }
1144 unlock_single_segment(this, segment);
1145 }
1146 charon->bus->set_sa(charon->bus, ike_sa);
1147 return ike_sa;
1148 }
1149
1150 METHOD(ike_sa_manager_t, checkout_new, ike_sa_t*,
1151 private_ike_sa_manager_t* this, ike_version_t version, bool initiator)
1152 {
1153 ike_sa_id_t *ike_sa_id;
1154 ike_sa_t *ike_sa;
1155 u_int8_t ike_version;
1156 u_int64_t spi;
1157
1158 ike_version = version == IKEV1 ? IKEV1_MAJOR_VERSION : IKEV2_MAJOR_VERSION;
1159
1160 spi = get_spi(this);
1161 if (!spi)
1162 {
1163 DBG1(DBG_MGR, "failed to allocate SPI for new IKE_SA");
1164 return NULL;
1165 }
1166
1167 if (initiator)
1168 {
1169 ike_sa_id = ike_sa_id_create(ike_version, spi, 0, TRUE);
1170 }
1171 else
1172 {
1173 ike_sa_id = ike_sa_id_create(ike_version, 0, spi, FALSE);
1174 }
1175 ike_sa = ike_sa_create(ike_sa_id, initiator, version);
1176 ike_sa_id->destroy(ike_sa_id);
1177
1178 if (ike_sa)
1179 {
1180 DBG2(DBG_MGR, "created IKE_SA %s[%u]", ike_sa->get_name(ike_sa),
1181 ike_sa->get_unique_id(ike_sa));
1182 }
1183 return ike_sa;
1184 }
1185
1186 /**
1187 * Get the message ID or message hash to detect early retransmissions
1188 */
1189 static u_int32_t get_message_id_or_hash(message_t *message)
1190 {
1191 /* Use the message ID, or the message hash in IKEv1 Main/Aggressive mode */
1192 if (message->get_major_version(message) == IKEV1_MAJOR_VERSION &&
1193 message->get_message_id(message) == 0)
1194 {
1195 return chunk_hash(message->get_packet_data(message));
1196 }
1197 return message->get_message_id(message);
1198 }
1199
1200 METHOD(ike_sa_manager_t, checkout_by_message, ike_sa_t*,
1201 private_ike_sa_manager_t* this, message_t *message)
1202 {
1203 u_int segment;
1204 entry_t *entry;
1205 ike_sa_t *ike_sa = NULL;
1206 ike_sa_id_t *id;
1207 ike_version_t ike_version;
1208 bool is_init = FALSE;
1209
1210 id = message->get_ike_sa_id(message);
1211 /* clone the IKE_SA ID so we can modify the initiator flag */
1212 id = id->clone(id);
1213 id->switch_initiator(id);
1214
1215 DBG2(DBG_MGR, "checkout IKE_SA by message");
1216
1217 if (id->get_responder_spi(id) == 0 &&
1218 message->get_message_id(message) == 0)
1219 {
1220 if (message->get_major_version(message) == IKEV2_MAJOR_VERSION)
1221 {
1222 if (message->get_exchange_type(message) == IKE_SA_INIT &&
1223 message->get_request(message))
1224 {
1225 ike_version = IKEV2;
1226 is_init = TRUE;
1227 }
1228 }
1229 else
1230 {
1231 if (message->get_exchange_type(message) == ID_PROT ||
1232 message->get_exchange_type(message) == AGGRESSIVE)
1233 {
1234 ike_version = IKEV1;
1235 is_init = TRUE;
1236 if (id->is_initiator(id))
1237 { /* not set in IKEv1, switch back before applying to new SA */
1238 id->switch_initiator(id);
1239 }
1240 }
1241 }
1242 }
1243
1244 if (is_init)
1245 {
1246 hasher_t *hasher;
1247 u_int64_t our_spi;
1248 chunk_t hash;
1249
1250 hasher = lib->crypto->create_hasher(lib->crypto, HASH_SHA1);
1251 if (!hasher || !get_init_hash(hasher, message, &hash))
1252 {
1253 DBG1(DBG_MGR, "ignoring message, failed to hash message");
1254 DESTROY_IF(hasher);
1255 id->destroy(id);
1256 return NULL;
1257 }
1258 hasher->destroy(hasher);
1259
1260 /* ensure this is not a retransmit of an already handled init message */
1261 switch (check_and_put_init_hash(this, hash, &our_spi))
1262 {
1263 case NOT_FOUND:
1264 { /* we've not seen this packet yet, create a new IKE_SA */
1265 if (!this->ikesa_limit ||
1266 this->public.get_count(&this->public) < this->ikesa_limit)
1267 {
1268 id->set_responder_spi(id, our_spi);
1269 ike_sa = ike_sa_create(id, FALSE, ike_version);
1270 if (ike_sa)
1271 {
1272 entry = entry_create();
1273 entry->ike_sa = ike_sa;
1274 entry->ike_sa_id = id;
1275
1276 segment = put_entry(this, entry);
1277 entry->checked_out = TRUE;
1278 unlock_single_segment(this, segment);
1279
1280 entry->processing = get_message_id_or_hash(message);
1281 entry->init_hash = hash;
1282
1283 DBG2(DBG_MGR, "created IKE_SA %s[%u]",
1284 ike_sa->get_name(ike_sa),
1285 ike_sa->get_unique_id(ike_sa));
1286
1287 charon->bus->set_sa(charon->bus, ike_sa);
1288 return ike_sa;
1289 }
1290 else
1291 {
1292 DBG1(DBG_MGR, "creating IKE_SA failed, ignoring message");
1293 }
1294 }
1295 else
1296 {
1297 DBG1(DBG_MGR, "ignoring %N, hitting IKE_SA limit (%u)",
1298 exchange_type_names, message->get_exchange_type(message),
1299 this->ikesa_limit);
1300 }
1301 remove_init_hash(this, hash);
1302 chunk_free(&hash);
1303 id->destroy(id);
1304 return NULL;
1305 }
1306 case FAILED:
1307 { /* we failed to allocate an SPI */
1308 chunk_free(&hash);
1309 id->destroy(id);
1310 DBG1(DBG_MGR, "ignoring message, failed to allocate SPI");
1311 return NULL;
1312 }
1313 case ALREADY_DONE:
1314 default:
1315 break;
1316 }
1317 /* it looks like we already handled this init message to some degree */
1318 id->set_responder_spi(id, our_spi);
1319 chunk_free(&hash);
1320 }
1321
1322 if (get_entry_by_id(this, id, &entry, &segment) == SUCCESS)
1323 {
1324 /* only check out if we are not already processing it. */
1325 if (entry->processing == get_message_id_or_hash(message))
1326 {
1327 DBG1(DBG_MGR, "ignoring request with ID %u, already processing",
1328 entry->processing);
1329 }
1330 else if (wait_for_entry(this, entry, segment))
1331 {
1332 ike_sa_id_t *ike_id;
1333
1334 ike_id = entry->ike_sa->get_id(entry->ike_sa);
1335 entry->checked_out = TRUE;
1336 if (message->get_first_payload_type(message) != PLV1_FRAGMENT &&
1337 message->get_first_payload_type(message) != PLV2_FRAGMENT)
1338 { /* TODO-FRAG: this fails if there are unencrypted payloads */
1339 entry->processing = get_message_id_or_hash(message);
1340 }
1341 if (ike_id->get_responder_spi(ike_id) == 0)
1342 {
1343 ike_id->set_responder_spi(ike_id, id->get_responder_spi(id));
1344 }
1345 ike_sa = entry->ike_sa;
1346 DBG2(DBG_MGR, "IKE_SA %s[%u] successfully checked out",
1347 ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1348 }
1349 unlock_single_segment(this, segment);
1350 }
1351 else
1352 {
1353 charon->bus->alert(charon->bus, ALERT_INVALID_IKE_SPI, message);
1354 }
1355 id->destroy(id);
1356 charon->bus->set_sa(charon->bus, ike_sa);
1357 return ike_sa;
1358 }
1359
1360 METHOD(ike_sa_manager_t, checkout_by_config, ike_sa_t*,
1361 private_ike_sa_manager_t *this, peer_cfg_t *peer_cfg)
1362 {
1363 enumerator_t *enumerator;
1364 entry_t *entry;
1365 ike_sa_t *ike_sa = NULL;
1366 peer_cfg_t *current_peer;
1367 ike_cfg_t *current_ike;
1368 u_int segment;
1369
1370 DBG2(DBG_MGR, "checkout IKE_SA by config");
1371
1372 if (!this->reuse_ikesa)
1373 { /* IKE_SA reuse disable by config */
1374 ike_sa = checkout_new(this, peer_cfg->get_ike_version(peer_cfg), TRUE);
1375 charon->bus->set_sa(charon->bus, ike_sa);
1376 return ike_sa;
1377 }
1378
1379 enumerator = create_table_enumerator(this);
1380 while (enumerator->enumerate(enumerator, &entry, &segment))
1381 {
1382 if (!wait_for_entry(this, entry, segment))
1383 {
1384 continue;
1385 }
1386 if (entry->ike_sa->get_state(entry->ike_sa) == IKE_DELETING)
1387 { /* skip IKE_SAs which are not usable, wake other waiting threads */
1388 entry->condvar->signal(entry->condvar);
1389 continue;
1390 }
1391
1392 current_peer = entry->ike_sa->get_peer_cfg(entry->ike_sa);
1393 if (current_peer && current_peer->equals(current_peer, peer_cfg))
1394 {
1395 current_ike = current_peer->get_ike_cfg(current_peer);
1396 if (current_ike->equals(current_ike, peer_cfg->get_ike_cfg(peer_cfg)))
1397 {
1398 entry->checked_out = TRUE;
1399 ike_sa = entry->ike_sa;
1400 DBG2(DBG_MGR, "found existing IKE_SA %u with a '%s' config",
1401 ike_sa->get_unique_id(ike_sa),
1402 current_peer->get_name(current_peer));
1403 break;
1404 }
1405 }
1406 /* other threads might be waiting for this entry */
1407 entry->condvar->signal(entry->condvar);
1408 }
1409 enumerator->destroy(enumerator);
1410
1411 if (!ike_sa)
1412 { /* no IKE_SA using such a config, hand out a new */
1413 ike_sa = checkout_new(this, peer_cfg->get_ike_version(peer_cfg), TRUE);
1414 }
1415 charon->bus->set_sa(charon->bus, ike_sa);
1416 return ike_sa;
1417 }
1418
1419 METHOD(ike_sa_manager_t, checkout_by_id, ike_sa_t*,
1420 private_ike_sa_manager_t *this, u_int32_t id)
1421 {
1422 enumerator_t *enumerator;
1423 entry_t *entry;
1424 ike_sa_t *ike_sa = NULL;
1425 u_int segment;
1426
1427 DBG2(DBG_MGR, "checkout IKE_SA by ID %u", id);
1428
1429 enumerator = create_table_enumerator(this);
1430 while (enumerator->enumerate(enumerator, &entry, &segment))
1431 {
1432 if (wait_for_entry(this, entry, segment))
1433 {
1434 if (entry->ike_sa->get_unique_id(entry->ike_sa) == id)
1435 {
1436 ike_sa = entry->ike_sa;
1437 entry->checked_out = TRUE;
1438 break;
1439 }
1440 /* other threads might be waiting for this entry */
1441 entry->condvar->signal(entry->condvar);
1442 }
1443 }
1444 enumerator->destroy(enumerator);
1445
1446 if (ike_sa)
1447 {
1448 DBG2(DBG_MGR, "IKE_SA %s[%u] successfully checked out",
1449 ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1450 }
1451 charon->bus->set_sa(charon->bus, ike_sa);
1452 return ike_sa;
1453 }
1454
1455 METHOD(ike_sa_manager_t, checkout_by_name, ike_sa_t*,
1456 private_ike_sa_manager_t *this, char *name, bool child)
1457 {
1458 enumerator_t *enumerator, *children;
1459 entry_t *entry;
1460 ike_sa_t *ike_sa = NULL;
1461 child_sa_t *child_sa;
1462 u_int segment;
1463
1464 enumerator = create_table_enumerator(this);
1465 while (enumerator->enumerate(enumerator, &entry, &segment))
1466 {
1467 if (wait_for_entry(this, entry, segment))
1468 {
1469 /* look for a child with such a policy name ... */
1470 if (child)
1471 {
1472 children = entry->ike_sa->create_child_sa_enumerator(entry->ike_sa);
1473 while (children->enumerate(children, (void**)&child_sa))
1474 {
1475 if (streq(child_sa->get_name(child_sa), name))
1476 {
1477 ike_sa = entry->ike_sa;
1478 break;
1479 }
1480 }
1481 children->destroy(children);
1482 }
1483 else /* ... or for a IKE_SA with such a connection name */
1484 {
1485 if (streq(entry->ike_sa->get_name(entry->ike_sa), name))
1486 {
1487 ike_sa = entry->ike_sa;
1488 }
1489 }
1490 /* got one, return */
1491 if (ike_sa)
1492 {
1493 entry->checked_out = TRUE;
1494 DBG2(DBG_MGR, "IKE_SA %s[%u] successfully checked out",
1495 ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1496 break;
1497 }
1498 /* other threads might be waiting for this entry */
1499 entry->condvar->signal(entry->condvar);
1500 }
1501 }
1502 enumerator->destroy(enumerator);
1503
1504 charon->bus->set_sa(charon->bus, ike_sa);
1505 return ike_sa;
1506 }
1507
1508 /**
1509 * enumerator filter function, waiting variant
1510 */
1511 static bool enumerator_filter_wait(private_ike_sa_manager_t *this,
1512 entry_t **in, ike_sa_t **out, u_int *segment)
1513 {
1514 if (wait_for_entry(this, *in, *segment))
1515 {
1516 *out = (*in)->ike_sa;
1517 charon->bus->set_sa(charon->bus, *out);
1518 return TRUE;
1519 }
1520 return FALSE;
1521 }
1522
1523 /**
1524 * enumerator filter function, skipping variant
1525 */
1526 static bool enumerator_filter_skip(private_ike_sa_manager_t *this,
1527 entry_t **in, ike_sa_t **out, u_int *segment)
1528 {
1529 if (!(*in)->driveout_new_threads &&
1530 !(*in)->driveout_waiting_threads &&
1531 !(*in)->checked_out)
1532 {
1533 *out = (*in)->ike_sa;
1534 charon->bus->set_sa(charon->bus, *out);
1535 return TRUE;
1536 }
1537 return FALSE;
1538 }
1539
1540 /**
1541 * Reset threads SA after enumeration
1542 */
1543 static void reset_sa(void *data)
1544 {
1545 charon->bus->set_sa(charon->bus, NULL);
1546 }
1547
1548 METHOD(ike_sa_manager_t, create_enumerator, enumerator_t*,
1549 private_ike_sa_manager_t* this, bool wait)
1550 {
1551 return enumerator_create_filter(create_table_enumerator(this),
1552 wait ? (void*)enumerator_filter_wait : (void*)enumerator_filter_skip,
1553 this, reset_sa);
1554 }
1555
1556 METHOD(ike_sa_manager_t, checkin, void,
1557 private_ike_sa_manager_t *this, ike_sa_t *ike_sa)
1558 {
1559 /* to check the SA back in, we look for the pointer of the ike_sa
1560 * in all entries.
1561 * The lookup is done by initiator SPI, so even if the SPI has changed (e.g.
1562 * on reception of a IKE_SA_INIT response) the lookup will work but
1563 * updating of the SPI MAY be necessary...
1564 */
1565 entry_t *entry;
1566 ike_sa_id_t *ike_sa_id;
1567 host_t *other;
1568 identification_t *my_id, *other_id;
1569 u_int segment;
1570
1571 ike_sa_id = ike_sa->get_id(ike_sa);
1572 my_id = ike_sa->get_my_id(ike_sa);
1573 other_id = ike_sa->get_other_eap_id(ike_sa);
1574 other = ike_sa->get_other_host(ike_sa);
1575
1576 DBG2(DBG_MGR, "checkin IKE_SA %s[%u]", ike_sa->get_name(ike_sa),
1577 ike_sa->get_unique_id(ike_sa));
1578
1579 /* look for the entry */
1580 if (get_entry_by_sa(this, ike_sa_id, ike_sa, &entry, &segment) == SUCCESS)
1581 {
1582 /* ike_sa_id must be updated */
1583 entry->ike_sa_id->replace_values(entry->ike_sa_id, ike_sa->get_id(ike_sa));
1584 /* signal waiting threads */
1585 entry->checked_out = FALSE;
1586 entry->processing = -1;
1587 /* check if this SA is half-open */
1588 if (entry->half_open && ike_sa->get_state(ike_sa) != IKE_CONNECTING)
1589 {
1590 /* not half open anymore */
1591 entry->half_open = FALSE;
1592 remove_half_open(this, entry);
1593 }
1594 else if (entry->half_open && !other->ip_equals(other, entry->other))
1595 {
1596 /* the other host's IP has changed, we must update the hash table */
1597 remove_half_open(this, entry);
1598 DESTROY_IF(entry->other);
1599 entry->other = other->clone(other);
1600 put_half_open(this, entry);
1601 }
1602 else if (!entry->half_open &&
1603 ike_sa->get_state(ike_sa) == IKE_CONNECTING)
1604 {
1605 /* this is a new half-open SA */
1606 entry->half_open = TRUE;
1607 entry->other = other->clone(other);
1608 put_half_open(this, entry);
1609 }
1610 DBG2(DBG_MGR, "check-in of IKE_SA successful.");
1611 entry->condvar->signal(entry->condvar);
1612 }
1613 else
1614 {
1615 entry = entry_create();
1616 entry->ike_sa_id = ike_sa_id->clone(ike_sa_id);
1617 entry->ike_sa = ike_sa;
1618 if (ike_sa->get_state(ike_sa) == IKE_CONNECTING)
1619 {
1620 entry->half_open = TRUE;
1621 entry->other = other->clone(other);
1622 put_half_open(this, entry);
1623 }
1624 segment = put_entry(this, entry);
1625 }
1626
1627 /* apply identities for duplicate test */
1628 if ((ike_sa->get_state(ike_sa) == IKE_ESTABLISHED ||
1629 ike_sa->get_state(ike_sa) == IKE_PASSIVE) &&
1630 entry->my_id == NULL && entry->other_id == NULL)
1631 {
1632 if (ike_sa->get_version(ike_sa) == IKEV1)
1633 {
1634 /* If authenticated and received INITIAL_CONTACT,
1635 * delete any existing IKE_SAs with that peer. */
1636 if (ike_sa->has_condition(ike_sa, COND_INIT_CONTACT_SEEN))
1637 {
1638 /* We can't hold the segment locked while checking the
1639 * uniqueness as this could lead to deadlocks. We mark the
1640 * entry as checked out while we release the lock so no other
1641 * thread can acquire it. Since it is not yet in the list of
1642 * connected peers that will not cause a deadlock as no other
1643 * caller of check_unqiueness() will try to check out this SA */
1644 entry->checked_out = TRUE;
1645 unlock_single_segment(this, segment);
1646
1647 this->public.check_uniqueness(&this->public, ike_sa, TRUE);
1648 ike_sa->set_condition(ike_sa, COND_INIT_CONTACT_SEEN, FALSE);
1649
1650 /* The entry could have been modified in the mean time, e.g.
1651 * because another SA was added/removed next to it or another
1652 * thread is waiting, but it should still exist, so there is no
1653 * need for a lookup via get_entry_by... */
1654 lock_single_segment(this, segment);
1655 entry->checked_out = FALSE;
1656 /* We already signaled waiting threads above, we have to do that
1657 * again after checking the SA out and back in again. */
1658 entry->condvar->signal(entry->condvar);
1659 }
1660 }
1661
1662 entry->my_id = my_id->clone(my_id);
1663 entry->other_id = other_id->clone(other_id);
1664 if (!entry->other)
1665 {
1666 entry->other = other->clone(other);
1667 }
1668 put_connected_peers(this, entry);
1669 }
1670
1671 unlock_single_segment(this, segment);
1672
1673 charon->bus->set_sa(charon->bus, NULL);
1674 }
1675
1676 METHOD(ike_sa_manager_t, checkin_and_destroy, void,
1677 private_ike_sa_manager_t *this, ike_sa_t *ike_sa)
1678 {
1679 /* deletion is a bit complex, we must ensure that no thread is waiting for
1680 * this SA.
1681 * We take this SA from the table, and start signaling while threads
1682 * are in the condvar.
1683 */
1684 entry_t *entry;
1685 ike_sa_id_t *ike_sa_id;
1686 u_int segment;
1687
1688 ike_sa_id = ike_sa->get_id(ike_sa);
1689
1690 DBG2(DBG_MGR, "checkin and destroy IKE_SA %s[%u]", ike_sa->get_name(ike_sa),
1691 ike_sa->get_unique_id(ike_sa));
1692
1693 if (get_entry_by_sa(this, ike_sa_id, ike_sa, &entry, &segment) == SUCCESS)
1694 {
1695 if (entry->driveout_waiting_threads && entry->driveout_new_threads)
1696 { /* it looks like flush() has been called and the SA is being deleted
1697 * anyway, just check it in */
1698 DBG2(DBG_MGR, "ignored check-in and destroy of IKE_SA during shutdown");
1699 entry->checked_out = FALSE;
1700 entry->condvar->broadcast(entry->condvar);
1701 unlock_single_segment(this, segment);
1702 return;
1703 }
1704
1705 /* drive out waiting threads, as we are in hurry */
1706 entry->driveout_waiting_threads = TRUE;
1707 /* mark it, so no new threads can get this entry */
1708 entry->driveout_new_threads = TRUE;
1709 /* wait until all workers have done their work */
1710 while (entry->waiting_threads)
1711 {
1712 /* wake up all */
1713 entry->condvar->broadcast(entry->condvar);
1714 /* they will wake us again when their work is done */
1715 entry->condvar->wait(entry->condvar, this->segments[segment].mutex);
1716 }
1717 remove_entry(this, entry);
1718 unlock_single_segment(this, segment);
1719
1720 if (entry->half_open)
1721 {
1722 remove_half_open(this, entry);
1723 }
1724 if (entry->my_id && entry->other_id)
1725 {
1726 remove_connected_peers(this, entry);
1727 }
1728 if (entry->init_hash.ptr)
1729 {
1730 remove_init_hash(this, entry->init_hash);
1731 }
1732
1733 entry_destroy(entry);
1734
1735 DBG2(DBG_MGR, "check-in and destroy of IKE_SA successful");
1736 }
1737 else
1738 {
1739 DBG1(DBG_MGR, "tried to check-in and delete nonexisting IKE_SA");
1740 ike_sa->destroy(ike_sa);
1741 }
1742 charon->bus->set_sa(charon->bus, NULL);
1743 }
1744
1745 /**
1746 * Cleanup function for create_id_enumerator
1747 */
1748 static void id_enumerator_cleanup(linked_list_t *ids)
1749 {
1750 ids->destroy_offset(ids, offsetof(ike_sa_id_t, destroy));
1751 }
1752
1753 METHOD(ike_sa_manager_t, create_id_enumerator, enumerator_t*,
1754 private_ike_sa_manager_t *this, identification_t *me,
1755 identification_t *other, int family)
1756 {
1757 table_item_t *item;
1758 u_int row, segment;
1759 rwlock_t *lock;
1760 linked_list_t *ids = NULL;
1761
1762 row = chunk_hash_inc(other->get_encoding(other),
1763 chunk_hash(me->get_encoding(me))) & this->table_mask;
1764 segment = row & this->segment_mask;
1765
1766 lock = this->connected_peers_segments[segment].lock;
1767 lock->read_lock(lock);
1768 item = this->connected_peers_table[row];
1769 while (item)
1770 {
1771 connected_peers_t *current = item->value;
1772
1773 if (connected_peers_match(current, me, other, family))
1774 {
1775 ids = current->sas->clone_offset(current->sas,
1776 offsetof(ike_sa_id_t, clone));
1777 break;
1778 }
1779 item = item->next;
1780 }
1781 lock->unlock(lock);
1782
1783 if (!ids)
1784 {
1785 return enumerator_create_empty();
1786 }
1787 return enumerator_create_cleaner(ids->create_enumerator(ids),
1788 (void*)id_enumerator_cleanup, ids);
1789 }
1790
1791 /**
1792 * Move all CHILD_SAs and virtual IPs from old to new
1793 */
1794 static void adopt_children_and_vips(ike_sa_t *old, ike_sa_t *new)
1795 {
1796 enumerator_t *enumerator;
1797 child_sa_t *child_sa;
1798 host_t *vip;
1799 int chcount = 0, vipcount = 0;
1800
1801 charon->bus->children_migrate(charon->bus, new->get_id(new),
1802 new->get_unique_id(new));
1803 enumerator = old->create_child_sa_enumerator(old);
1804 while (enumerator->enumerate(enumerator, &child_sa))
1805 {
1806 old->remove_child_sa(old, enumerator);
1807 new->add_child_sa(new, child_sa);
1808 chcount++;
1809 }
1810 enumerator->destroy(enumerator);
1811
1812 enumerator = old->create_virtual_ip_enumerator(old, FALSE);
1813 while (enumerator->enumerate(enumerator, &vip))
1814 {
1815 new->add_virtual_ip(new, FALSE, vip);
1816 vipcount++;
1817 }
1818 enumerator->destroy(enumerator);
1819 /* this does not release the addresses, which is good, but it does trigger
1820 * an assign_vips(FALSE) event... */
1821 old->clear_virtual_ips(old, FALSE);
1822 /* ...trigger the analogous event on the new SA */
1823 charon->bus->set_sa(charon->bus, new);
1824 charon->bus->assign_vips(charon->bus, new, TRUE);
1825 charon->bus->children_migrate(charon->bus, NULL, 0);
1826 charon->bus->set_sa(charon->bus, old);
1827
1828 if (chcount || vipcount)
1829 {
1830 DBG1(DBG_IKE, "detected reauth of existing IKE_SA, adopting %d "
1831 "children and %d virtual IPs", chcount, vipcount);
1832 }
1833 }
1834
1835 /**
1836 * Delete an existing IKE_SA due to a unique replace policy
1837 */
1838 static status_t enforce_replace(private_ike_sa_manager_t *this,
1839 ike_sa_t *duplicate, ike_sa_t *new,
1840 identification_t *other, host_t *host)
1841 {
1842 charon->bus->alert(charon->bus, ALERT_UNIQUE_REPLACE);
1843
1844 if (host->equals(host, duplicate->get_other_host(duplicate)))
1845 {
1846 /* looks like a reauthentication attempt */
1847 if (!new->has_condition(new, COND_INIT_CONTACT_SEEN) &&
1848 new->get_version(new) == IKEV1)
1849 {
1850 /* IKEv1 implicitly takes over children, IKEv2 recreates them
1851 * explicitly. */
1852 adopt_children_and_vips(duplicate, new);
1853 }
1854 /* For IKEv1 we have to delay the delete for the old IKE_SA. Some
1855 * peers need to complete the new SA first, otherwise the quick modes
1856 * might get lost. For IKEv2 we do the same, as we want overlapping
1857 * CHILD_SAs to keep connectivity up. */
1858 lib->scheduler->schedule_job(lib->scheduler, (job_t*)
1859 delete_ike_sa_job_create(duplicate->get_id(duplicate), TRUE), 10);
1860 return SUCCESS;
1861 }
1862 DBG1(DBG_IKE, "deleting duplicate IKE_SA for peer '%Y' due to "
1863 "uniqueness policy", other);
1864 return duplicate->delete(duplicate);
1865 }
1866
1867 METHOD(ike_sa_manager_t, check_uniqueness, bool,
1868 private_ike_sa_manager_t *this, ike_sa_t *ike_sa, bool force_replace)
1869 {
1870 bool cancel = FALSE;
1871 peer_cfg_t *peer_cfg;
1872 unique_policy_t policy;
1873 enumerator_t *enumerator;
1874 ike_sa_id_t *id = NULL;
1875 identification_t *me, *other;
1876 host_t *other_host;
1877
1878 peer_cfg = ike_sa->get_peer_cfg(ike_sa);
1879 policy = peer_cfg->get_unique_policy(peer_cfg);
1880 if (policy == UNIQUE_NEVER || (policy == UNIQUE_NO && !force_replace))
1881 {
1882 return FALSE;
1883 }
1884 me = ike_sa->get_my_id(ike_sa);
1885 other = ike_sa->get_other_eap_id(ike_sa);
1886 other_host = ike_sa->get_other_host(ike_sa);
1887
1888 enumerator = create_id_enumerator(this, me, other,
1889 other_host->get_family(other_host));
1890 while (enumerator->enumerate(enumerator, &id))
1891 {
1892 status_t status = SUCCESS;
1893 ike_sa_t *duplicate;
1894
1895 duplicate = checkout(this, id);
1896 if (!duplicate)
1897 {
1898 continue;
1899 }
1900 if (force_replace)
1901 {
1902 DBG1(DBG_IKE, "destroying duplicate IKE_SA for peer '%Y', "
1903 "received INITIAL_CONTACT", other);
1904 charon->bus->ike_updown(charon->bus, duplicate, FALSE);
1905 checkin_and_destroy(this, duplicate);
1906 continue;
1907 }
1908 peer_cfg = duplicate->get_peer_cfg(duplicate);
1909 if (peer_cfg && peer_cfg->equals(peer_cfg, ike_sa->get_peer_cfg(ike_sa)))
1910 {
1911 switch (duplicate->get_state(duplicate))
1912 {
1913 case IKE_ESTABLISHED:
1914 case IKE_REKEYING:
1915 switch (policy)
1916 {
1917 case UNIQUE_REPLACE:
1918 status = enforce_replace(this, duplicate, ike_sa,
1919 other, other_host);
1920 break;
1921 case UNIQUE_KEEP:
1922 /* potential reauthentication? */
1923 if (!other_host->equals(other_host,
1924 duplicate->get_other_host(duplicate)))
1925 {
1926 cancel = TRUE;
1927 /* we keep the first IKE_SA and delete all
1928 * other duplicates that might exist */
1929 policy = UNIQUE_REPLACE;
1930 }
1931 break;
1932 default:
1933 break;
1934 }
1935 break;
1936 default:
1937 break;
1938 }
1939 }
1940 if (status == DESTROY_ME)
1941 {
1942 checkin_and_destroy(this, duplicate);
1943 }
1944 else
1945 {
1946 checkin(this, duplicate);
1947 }
1948 }
1949 enumerator->destroy(enumerator);
1950 /* reset thread's current IKE_SA after checkin */
1951 charon->bus->set_sa(charon->bus, ike_sa);
1952 return cancel;
1953 }
1954
1955 METHOD(ike_sa_manager_t, has_contact, bool,
1956 private_ike_sa_manager_t *this, identification_t *me,
1957 identification_t *other, int family)
1958 {
1959 table_item_t *item;
1960 u_int row, segment;
1961 rwlock_t *lock;
1962 bool found = FALSE;
1963
1964 row = chunk_hash_inc(other->get_encoding(other),
1965 chunk_hash(me->get_encoding(me))) & this->table_mask;
1966 segment = row & this->segment_mask;
1967 lock = this->connected_peers_segments[segment].lock;
1968 lock->read_lock(lock);
1969 item = this->connected_peers_table[row];
1970 while (item)
1971 {
1972 if (connected_peers_match(item->value, me, other, family))
1973 {
1974 found = TRUE;
1975 break;
1976 }
1977 item = item->next;
1978 }
1979 lock->unlock(lock);
1980
1981 return found;
1982 }
1983
1984 METHOD(ike_sa_manager_t, get_count, u_int,
1985 private_ike_sa_manager_t *this)
1986 {
1987 u_int segment, count = 0;
1988 mutex_t *mutex;
1989
1990 for (segment = 0; segment < this->segment_count; segment++)
1991 {
1992 mutex = this->segments[segment & this->segment_mask].mutex;
1993 mutex->lock(mutex);
1994 count += this->segments[segment].count;
1995 mutex->unlock(mutex);
1996 }
1997 return count;
1998 }
1999
2000 METHOD(ike_sa_manager_t, get_half_open_count, u_int,
2001 private_ike_sa_manager_t *this, host_t *ip, bool responder_only)
2002 {
2003 table_item_t *item;
2004 u_int row, segment;
2005 rwlock_t *lock;
2006 chunk_t addr;
2007 u_int count = 0;
2008
2009 if (ip)
2010 {
2011 addr = ip->get_address(ip);
2012 row = chunk_hash(addr) & this->table_mask;
2013 segment = row & this->segment_mask;
2014 lock = this->half_open_segments[segment].lock;
2015 lock->read_lock(lock);
2016 item = this->half_open_table[row];
2017 while (item)
2018 {
2019 half_open_t *half_open = item->value;
2020
2021 if (chunk_equals(addr, half_open->other))
2022 {
2023 count = responder_only ? half_open->count_responder
2024 : half_open->count;
2025 break;
2026 }
2027 item = item->next;
2028 }
2029 lock->unlock(lock);
2030 }
2031 else
2032 {
2033 count = responder_only ? (u_int)ref_cur(&this->half_open_count_responder)
2034 : (u_int)ref_cur(&this->half_open_count);
2035 }
2036 return count;
2037 }
2038
2039 METHOD(ike_sa_manager_t, flush, void,
2040 private_ike_sa_manager_t *this)
2041 {
2042 /* destroy all list entries */
2043 enumerator_t *enumerator;
2044 entry_t *entry;
2045 u_int segment;
2046
2047 lock_all_segments(this);
2048 DBG2(DBG_MGR, "going to destroy IKE_SA manager and all managed IKE_SA's");
2049 /* Step 1: drive out all waiting threads */
2050 DBG2(DBG_MGR, "set driveout flags for all stored IKE_SA's");
2051 enumerator = create_table_enumerator(this);
2052 while (enumerator->enumerate(enumerator, &entry, &segment))
2053 {
2054 /* do not accept new threads, drive out waiting threads */
2055 entry->driveout_new_threads = TRUE;
2056 entry->driveout_waiting_threads = TRUE;
2057 }
2058 enumerator->destroy(enumerator);
2059 DBG2(DBG_MGR, "wait for all threads to leave IKE_SA's");
2060 /* Step 2: wait until all are gone */
2061 enumerator = create_table_enumerator(this);
2062 while (enumerator->enumerate(enumerator, &entry, &segment))
2063 {
2064 while (entry->waiting_threads || entry->checked_out)
2065 {
2066 /* wake up all */
2067 entry->condvar->broadcast(entry->condvar);
2068 /* go sleeping until they are gone */
2069 entry->condvar->wait(entry->condvar, this->segments[segment].mutex);
2070 }
2071 }
2072 enumerator->destroy(enumerator);
2073 DBG2(DBG_MGR, "delete all IKE_SA's");
2074 /* Step 3: initiate deletion of all IKE_SAs */
2075 enumerator = create_table_enumerator(this);
2076 while (enumerator->enumerate(enumerator, &entry, &segment))
2077 {
2078 charon->bus->set_sa(charon->bus, entry->ike_sa);
2079 if (entry->ike_sa->get_version(entry->ike_sa) == IKEV2)
2080 { /* as the delete never gets processed, fire down events */
2081 switch (entry->ike_sa->get_state(entry->ike_sa))
2082 {
2083 case IKE_ESTABLISHED:
2084 case IKE_REKEYING:
2085 case IKE_DELETING:
2086 charon->bus->ike_updown(charon->bus, entry->ike_sa, FALSE);
2087 break;
2088 default:
2089 break;
2090 }
2091 }
2092 entry->ike_sa->delete(entry->ike_sa);
2093 }
2094 enumerator->destroy(enumerator);
2095
2096 DBG2(DBG_MGR, "destroy all entries");
2097 /* Step 4: destroy all entries */
2098 enumerator = create_table_enumerator(this);
2099 while (enumerator->enumerate(enumerator, &entry, &segment))
2100 {
2101 charon->bus->set_sa(charon->bus, entry->ike_sa);
2102 if (entry->half_open)
2103 {
2104 remove_half_open(this, entry);
2105 }
2106 if (entry->my_id && entry->other_id)
2107 {
2108 remove_connected_peers(this, entry);
2109 }
2110 if (entry->init_hash.ptr)
2111 {
2112 remove_init_hash(this, entry->init_hash);
2113 }
2114 remove_entry_at((private_enumerator_t*)enumerator);
2115 entry_destroy(entry);
2116 }
2117 enumerator->destroy(enumerator);
2118 charon->bus->set_sa(charon->bus, NULL);
2119 unlock_all_segments(this);
2120
2121 this->rng_lock->write_lock(this->rng_lock);
2122 this->rng->destroy(this->rng);
2123 this->rng = NULL;
2124 this->rng_lock->unlock(this->rng_lock);
2125 }
2126
2127 METHOD(ike_sa_manager_t, destroy, void,
2128 private_ike_sa_manager_t *this)
2129 {
2130 u_int i;
2131
2132 /* these are already cleared in flush() above */
2133 free(this->ike_sa_table);
2134 free(this->half_open_table);
2135 free(this->connected_peers_table);
2136 free(this->init_hashes_table);
2137 for (i = 0; i < this->segment_count; i++)
2138 {
2139 this->segments[i].mutex->destroy(this->segments[i].mutex);
2140 this->half_open_segments[i].lock->destroy(this->half_open_segments[i].lock);
2141 this->connected_peers_segments[i].lock->destroy(this->connected_peers_segments[i].lock);
2142 this->init_hashes_segments[i].mutex->destroy(this->init_hashes_segments[i].mutex);
2143 }
2144 free(this->segments);
2145 free(this->half_open_segments);
2146 free(this->connected_peers_segments);
2147 free(this->init_hashes_segments);
2148
2149 this->rng_lock->destroy(this->rng_lock);
2150 free(this);
2151 }
2152
2153 /**
2154 * This function returns the next-highest power of two for the given number.
2155 * The algorithm works by setting all bits on the right-hand side of the most
2156 * significant 1 to 1 and then increments the whole number so it rolls over
2157 * to the nearest power of two. Note: returns 0 for n == 0
2158 */
2159 static u_int get_nearest_powerof2(u_int n)
2160 {
2161 u_int i;
2162
2163 --n;
2164 for (i = 1; i < sizeof(u_int) * 8; i <<= 1)
2165 {
2166 n |= n >> i;
2167 }
2168 return ++n;
2169 }
2170
2171 /*
2172 * Described in header.
2173 */
2174 ike_sa_manager_t *ike_sa_manager_create()
2175 {
2176 private_ike_sa_manager_t *this;
2177 u_int i;
2178
2179 INIT(this,
2180 .public = {
2181 .checkout = _checkout,
2182 .checkout_new = _checkout_new,
2183 .checkout_by_message = _checkout_by_message,
2184 .checkout_by_config = _checkout_by_config,
2185 .checkout_by_id = _checkout_by_id,
2186 .checkout_by_name = _checkout_by_name,
2187 .check_uniqueness = _check_uniqueness,
2188 .has_contact = _has_contact,
2189 .create_enumerator = _create_enumerator,
2190 .create_id_enumerator = _create_id_enumerator,
2191 .checkin = _checkin,
2192 .checkin_and_destroy = _checkin_and_destroy,
2193 .get_count = _get_count,
2194 .get_half_open_count = _get_half_open_count,
2195 .flush = _flush,
2196 .destroy = _destroy,
2197 },
2198 );
2199
2200 this->rng = lib->crypto->create_rng(lib->crypto, RNG_WEAK);
2201 if (this->rng == NULL)
2202 {
2203 DBG1(DBG_MGR, "manager initialization failed, no RNG supported");
2204 free(this);
2205 return NULL;
2206 }
2207 this->rng_lock = rwlock_create(RWLOCK_TYPE_DEFAULT);
2208
2209 this->ikesa_limit = lib->settings->get_int(lib->settings,
2210 "%s.ikesa_limit", 0, lib->ns);
2211
2212 this->table_size = get_nearest_powerof2(lib->settings->get_int(
2213 lib->settings, "%s.ikesa_table_size",
2214 DEFAULT_HASHTABLE_SIZE, lib->ns));
2215 this->table_size = max(1, min(this->table_size, MAX_HASHTABLE_SIZE));
2216 this->table_mask = this->table_size - 1;
2217
2218 this->segment_count = get_nearest_powerof2(lib->settings->get_int(
2219 lib->settings, "%s.ikesa_table_segments",
2220 DEFAULT_SEGMENT_COUNT, lib->ns));
2221 this->segment_count = max(1, min(this->segment_count, this->table_size));
2222 this->segment_mask = this->segment_count - 1;
2223
2224 this->ike_sa_table = calloc(this->table_size, sizeof(table_item_t*));
2225 this->segments = (segment_t*)calloc(this->segment_count, sizeof(segment_t));
2226 for (i = 0; i < this->segment_count; i++)
2227 {
2228 this->segments[i].mutex = mutex_create(MUTEX_TYPE_RECURSIVE);
2229 this->segments[i].count = 0;
2230 }
2231
2232 /* we use the same table parameters for the table to track half-open SAs */
2233 this->half_open_table = calloc(this->table_size, sizeof(table_item_t*));
2234 this->half_open_segments = calloc(this->segment_count, sizeof(shareable_segment_t));
2235 for (i = 0; i < this->segment_count; i++)
2236 {
2237 this->half_open_segments[i].lock = rwlock_create(RWLOCK_TYPE_DEFAULT);
2238 this->half_open_segments[i].count = 0;
2239 }
2240
2241 /* also for the hash table used for duplicate tests */
2242 this->connected_peers_table = calloc(this->table_size, sizeof(table_item_t*));
2243 this->connected_peers_segments = calloc(this->segment_count, sizeof(shareable_segment_t));
2244 for (i = 0; i < this->segment_count; i++)
2245 {
2246 this->connected_peers_segments[i].lock = rwlock_create(RWLOCK_TYPE_DEFAULT);
2247 this->connected_peers_segments[i].count = 0;
2248 }
2249
2250 /* and again for the table of hashes of seen initial IKE messages */
2251 this->init_hashes_table = calloc(this->table_size, sizeof(table_item_t*));
2252 this->init_hashes_segments = calloc(this->segment_count, sizeof(segment_t));
2253 for (i = 0; i < this->segment_count; i++)
2254 {
2255 this->init_hashes_segments[i].mutex = mutex_create(MUTEX_TYPE_RECURSIVE);
2256 this->init_hashes_segments[i].count = 0;
2257 }
2258
2259 this->reuse_ikesa = lib->settings->get_bool(lib->settings,
2260 "%s.reuse_ikesa", TRUE, lib->ns);
2261 return &this->public;
2262 }