3d150090999396bf0c263c4bb85398eb5e2ef368
[strongswan.git] / src / libcharon / sa / ike_sa_manager.c
1 /*
2 * Copyright (C) 2005-2011 Martin Willi
3 * Copyright (C) 2011 revosec AG
4 * Copyright (C) 2008-2015 Tobias Brunner
5 * Copyright (C) 2005 Jan Hutter
6 * Hochschule fuer Technik Rapperswil
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version. See <http://www.fsf.org/copyleft/gpl.txt>.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 * for more details.
17 */
18
19 #include <string.h>
20
21 #include "ike_sa_manager.h"
22
23 #include <daemon.h>
24 #include <sa/ike_sa_id.h>
25 #include <bus/bus.h>
26 #include <threading/condvar.h>
27 #include <threading/mutex.h>
28 #include <threading/rwlock.h>
29 #include <collections/linked_list.h>
30 #include <crypto/hashers/hasher.h>
31 #include <processing/jobs/delete_ike_sa_job.h>
32
33 /* the default size of the hash table (MUST be a power of 2) */
34 #define DEFAULT_HASHTABLE_SIZE 1
35
36 /* the maximum size of the hash table (MUST be a power of 2) */
37 #define MAX_HASHTABLE_SIZE (1 << 30)
38
39 /* the default number of segments (MUST be a power of 2) */
40 #define DEFAULT_SEGMENT_COUNT 1
41
42 typedef struct entry_t entry_t;
43
44 /**
45 * An entry in the linked list, contains IKE_SA, locking and lookup data.
46 */
47 struct entry_t {
48
49 /**
50 * Number of threads waiting for this ike_sa_t object.
51 */
52 int waiting_threads;
53
54 /**
55 * Condvar where threads can wait until ike_sa_t object is free for use again.
56 */
57 condvar_t *condvar;
58
59 /**
60 * Is this ike_sa currently checked out?
61 */
62 bool checked_out;
63
64 /**
65 * Does this SA drives out new threads?
66 */
67 bool driveout_new_threads;
68
69 /**
70 * Does this SA drives out waiting threads?
71 */
72 bool driveout_waiting_threads;
73
74 /**
75 * Identification of an IKE_SA (SPIs).
76 */
77 ike_sa_id_t *ike_sa_id;
78
79 /**
80 * The contained ike_sa_t object.
81 */
82 ike_sa_t *ike_sa;
83
84 /**
85 * hash of the IKE_SA_INIT message, used to detect retransmissions
86 */
87 chunk_t init_hash;
88
89 /**
90 * remote host address, required for DoS detection and duplicate
91 * checking (host with same my_id and other_id is *not* considered
92 * a duplicate if the address family differs)
93 */
94 host_t *other;
95
96 /**
97 * As responder: Is this SA half-open?
98 */
99 bool half_open;
100
101 /**
102 * own identity, required for duplicate checking
103 */
104 identification_t *my_id;
105
106 /**
107 * remote identity, required for duplicate checking
108 */
109 identification_t *other_id;
110
111 /**
112 * message ID or hash of currently processing message, -1 if none
113 */
114 u_int32_t processing;
115 };
116
117 /**
118 * Implementation of entry_t.destroy.
119 */
120 static status_t entry_destroy(entry_t *this)
121 {
122 /* also destroy IKE SA */
123 this->ike_sa->destroy(this->ike_sa);
124 this->ike_sa_id->destroy(this->ike_sa_id);
125 chunk_free(&this->init_hash);
126 DESTROY_IF(this->other);
127 DESTROY_IF(this->my_id);
128 DESTROY_IF(this->other_id);
129 this->condvar->destroy(this->condvar);
130 free(this);
131 return SUCCESS;
132 }
133
134 /**
135 * Creates a new entry for the ike_sa_t list.
136 */
137 static entry_t *entry_create()
138 {
139 entry_t *this;
140
141 INIT(this,
142 .condvar = condvar_create(CONDVAR_TYPE_DEFAULT),
143 .processing = -1,
144 );
145
146 return this;
147 }
148
149 /**
150 * Function that matches entry_t objects by ike_sa_id_t.
151 */
152 static bool entry_match_by_id(entry_t *entry, ike_sa_id_t *id)
153 {
154 if (id->equals(id, entry->ike_sa_id))
155 {
156 return TRUE;
157 }
158 if ((id->get_responder_spi(id) == 0 ||
159 entry->ike_sa_id->get_responder_spi(entry->ike_sa_id) == 0) &&
160 (id->get_ike_version(id) == IKEV1_MAJOR_VERSION ||
161 id->is_initiator(id) == entry->ike_sa_id->is_initiator(entry->ike_sa_id)) &&
162 id->get_initiator_spi(id) == entry->ike_sa_id->get_initiator_spi(entry->ike_sa_id))
163 {
164 /* this is TRUE for IKE_SAs that we initiated but have not yet received a response */
165 return TRUE;
166 }
167 return FALSE;
168 }
169
170 /**
171 * Function that matches entry_t objects by ike_sa_t pointers.
172 */
173 static bool entry_match_by_sa(entry_t *entry, ike_sa_t *ike_sa)
174 {
175 return entry->ike_sa == ike_sa;
176 }
177
178 /**
179 * Hash function for ike_sa_id_t objects.
180 */
181 static u_int ike_sa_id_hash(ike_sa_id_t *ike_sa_id)
182 {
183 /* IKEv2 does not mandate random SPIs (RFC 5996, 2.6), they just have to be
184 * locally unique, so we use our randomly allocated SPI whether we are
185 * initiator or responder to ensure a good distribution. The latter is not
186 * possible for IKEv1 as we don't know whether we are original initiator or
187 * not (based on the IKE header). But as RFC 2408, section 2.5.3 proposes
188 * SPIs (Cookies) to be allocated near random (we allocate them randomly
189 * anyway) it seems safe to always use the initiator SPI. */
190 if (ike_sa_id->get_ike_version(ike_sa_id) == IKEV1_MAJOR_VERSION ||
191 ike_sa_id->is_initiator(ike_sa_id))
192 {
193 return ike_sa_id->get_initiator_spi(ike_sa_id);
194 }
195 return ike_sa_id->get_responder_spi(ike_sa_id);
196 }
197
198 typedef struct half_open_t half_open_t;
199
200 /**
201 * Struct to manage half-open IKE_SAs per peer.
202 */
203 struct half_open_t {
204 /** chunk of remote host address */
205 chunk_t other;
206
207 /** the number of half-open IKE_SAs with that host */
208 u_int count;
209
210 /** the number of half-open IKE_SAs we responded to with that host */
211 u_int count_responder;
212 };
213
214 /**
215 * Destroys a half_open_t object.
216 */
217 static void half_open_destroy(half_open_t *this)
218 {
219 chunk_free(&this->other);
220 free(this);
221 }
222
223 typedef struct connected_peers_t connected_peers_t;
224
225 struct connected_peers_t {
226 /** own identity */
227 identification_t *my_id;
228
229 /** remote identity */
230 identification_t *other_id;
231
232 /** ip address family of peer */
233 int family;
234
235 /** list of ike_sa_id_t objects of IKE_SAs between the two identities */
236 linked_list_t *sas;
237 };
238
239 static void connected_peers_destroy(connected_peers_t *this)
240 {
241 this->my_id->destroy(this->my_id);
242 this->other_id->destroy(this->other_id);
243 this->sas->destroy(this->sas);
244 free(this);
245 }
246
247 /**
248 * Function that matches connected_peers_t objects by the given ids.
249 */
250 static inline bool connected_peers_match(connected_peers_t *connected_peers,
251 identification_t *my_id, identification_t *other_id,
252 int family)
253 {
254 return my_id->equals(my_id, connected_peers->my_id) &&
255 other_id->equals(other_id, connected_peers->other_id) &&
256 (!family || family == connected_peers->family);
257 }
258
259 typedef struct init_hash_t init_hash_t;
260
261 struct init_hash_t {
262 /** hash of IKE_SA_INIT or initial phase1 message (data is not cloned) */
263 chunk_t hash;
264
265 /** our SPI allocated for the IKE_SA based on this message */
266 u_int64_t our_spi;
267 };
268
269 typedef struct segment_t segment_t;
270
271 /**
272 * Struct to manage segments of the hash table.
273 */
274 struct segment_t {
275 /** mutex to access a segment exclusively */
276 mutex_t *mutex;
277
278 /** the number of entries in this segment */
279 u_int count;
280 };
281
282 typedef struct shareable_segment_t shareable_segment_t;
283
284 /**
285 * Struct to manage segments of the "half-open" and "connected peers" hash tables.
286 */
287 struct shareable_segment_t {
288 /** rwlock to access a segment non-/exclusively */
289 rwlock_t *lock;
290
291 /** the number of entries in this segment - in case of the "half-open table"
292 * it's the sum of all half_open_t.count in a segment. */
293 u_int count;
294 };
295
296 typedef struct table_item_t table_item_t;
297
298 /**
299 * Instead of using linked_list_t for each bucket we store the data in our own
300 * list to save memory.
301 */
302 struct table_item_t {
303 /** data of this item */
304 void *value;
305
306 /** next item in the overflow list */
307 table_item_t *next;
308 };
309
310 typedef struct private_ike_sa_manager_t private_ike_sa_manager_t;
311
312 /**
313 * Additional private members of ike_sa_manager_t.
314 */
315 struct private_ike_sa_manager_t {
316 /**
317 * Public interface of ike_sa_manager_t.
318 */
319 ike_sa_manager_t public;
320
321 /**
322 * Hash table with entries for the ike_sa_t objects.
323 */
324 table_item_t **ike_sa_table;
325
326 /**
327 * The size of the hash table.
328 */
329 u_int table_size;
330
331 /**
332 * Mask to map the hashes to table rows.
333 */
334 u_int table_mask;
335
336 /**
337 * Segments of the hash table.
338 */
339 segment_t *segments;
340
341 /**
342 * The number of segments.
343 */
344 u_int segment_count;
345
346 /**
347 * Mask to map a table row to a segment.
348 */
349 u_int segment_mask;
350
351 /**
352 * Hash table with half_open_t objects.
353 */
354 table_item_t **half_open_table;
355
356 /**
357 * Segments of the "half-open" hash table.
358 */
359 shareable_segment_t *half_open_segments;
360
361 /**
362 * Total number of half-open IKE_SAs.
363 */
364 refcount_t half_open_count;
365
366 /**
367 * Total number of half-open IKE_SAs as responder.
368 */
369 refcount_t half_open_count_responder;
370
371 /**
372 * Hash table with connected_peers_t objects.
373 */
374 table_item_t **connected_peers_table;
375
376 /**
377 * Segments of the "connected peers" hash table.
378 */
379 shareable_segment_t *connected_peers_segments;
380
381 /**
382 * Hash table with init_hash_t objects.
383 */
384 table_item_t **init_hashes_table;
385
386 /**
387 * Segments of the "hashes" hash table.
388 */
389 segment_t *init_hashes_segments;
390
391 /**
392 * RNG to get random SPIs for our side
393 */
394 rng_t *rng;
395
396 /**
397 * Registered callback for IKE SPIs
398 */
399 struct {
400 spi_cb_t cb;
401 void *data;
402 } spi_cb;
403
404 /**
405 * Lock to access the RNG instance and the callback
406 */
407 rwlock_t *spi_lock;
408
409 /**
410 * reuse existing IKE_SAs in checkout_by_config
411 */
412 bool reuse_ikesa;
413
414 /**
415 * Configured IKE_SA limit, if any
416 */
417 u_int ikesa_limit;
418 };
419
420 /**
421 * Acquire a lock to access the segment of the table row with the given index.
422 * It also works with the segment index directly.
423 */
424 static inline void lock_single_segment(private_ike_sa_manager_t *this,
425 u_int index)
426 {
427 mutex_t *lock = this->segments[index & this->segment_mask].mutex;
428 lock->lock(lock);
429 }
430
431 /**
432 * Release the lock required to access the segment of the table row with the given index.
433 * It also works with the segment index directly.
434 */
435 static inline void unlock_single_segment(private_ike_sa_manager_t *this,
436 u_int index)
437 {
438 mutex_t *lock = this->segments[index & this->segment_mask].mutex;
439 lock->unlock(lock);
440 }
441
442 /**
443 * Lock all segments
444 */
445 static void lock_all_segments(private_ike_sa_manager_t *this)
446 {
447 u_int i;
448
449 for (i = 0; i < this->segment_count; i++)
450 {
451 this->segments[i].mutex->lock(this->segments[i].mutex);
452 }
453 }
454
455 /**
456 * Unlock all segments
457 */
458 static void unlock_all_segments(private_ike_sa_manager_t *this)
459 {
460 u_int i;
461
462 for (i = 0; i < this->segment_count; i++)
463 {
464 this->segments[i].mutex->unlock(this->segments[i].mutex);
465 }
466 }
467
468 typedef struct private_enumerator_t private_enumerator_t;
469
470 /**
471 * hash table enumerator implementation
472 */
473 struct private_enumerator_t {
474
475 /**
476 * implements enumerator interface
477 */
478 enumerator_t enumerator;
479
480 /**
481 * associated ike_sa_manager_t
482 */
483 private_ike_sa_manager_t *manager;
484
485 /**
486 * current segment index
487 */
488 u_int segment;
489
490 /**
491 * currently enumerating entry
492 */
493 entry_t *entry;
494
495 /**
496 * current table row index
497 */
498 u_int row;
499
500 /**
501 * current table item
502 */
503 table_item_t *current;
504
505 /**
506 * previous table item
507 */
508 table_item_t *prev;
509 };
510
511 METHOD(enumerator_t, enumerate, bool,
512 private_enumerator_t *this, entry_t **entry, u_int *segment)
513 {
514 if (this->entry)
515 {
516 this->entry->condvar->signal(this->entry->condvar);
517 this->entry = NULL;
518 }
519 while (this->segment < this->manager->segment_count)
520 {
521 while (this->row < this->manager->table_size)
522 {
523 this->prev = this->current;
524 if (this->current)
525 {
526 this->current = this->current->next;
527 }
528 else
529 {
530 lock_single_segment(this->manager, this->segment);
531 this->current = this->manager->ike_sa_table[this->row];
532 }
533 if (this->current)
534 {
535 *entry = this->entry = this->current->value;
536 *segment = this->segment;
537 return TRUE;
538 }
539 unlock_single_segment(this->manager, this->segment);
540 this->row += this->manager->segment_count;
541 }
542 this->segment++;
543 this->row = this->segment;
544 }
545 return FALSE;
546 }
547
548 METHOD(enumerator_t, enumerator_destroy, void,
549 private_enumerator_t *this)
550 {
551 if (this->entry)
552 {
553 this->entry->condvar->signal(this->entry->condvar);
554 }
555 if (this->current)
556 {
557 unlock_single_segment(this->manager, this->segment);
558 }
559 free(this);
560 }
561
562 /**
563 * Creates an enumerator to enumerate the entries in the hash table.
564 */
565 static enumerator_t* create_table_enumerator(private_ike_sa_manager_t *this)
566 {
567 private_enumerator_t *enumerator;
568
569 INIT(enumerator,
570 .enumerator = {
571 .enumerate = (void*)_enumerate,
572 .destroy = _enumerator_destroy,
573 },
574 .manager = this,
575 );
576 return &enumerator->enumerator;
577 }
578
579 /**
580 * Put an entry into the hash table.
581 * Note: The caller has to unlock the returned segment.
582 */
583 static u_int put_entry(private_ike_sa_manager_t *this, entry_t *entry)
584 {
585 table_item_t *current, *item;
586 u_int row, segment;
587
588 INIT(item,
589 .value = entry,
590 );
591
592 row = ike_sa_id_hash(entry->ike_sa_id) & this->table_mask;
593 segment = row & this->segment_mask;
594
595 lock_single_segment(this, segment);
596 current = this->ike_sa_table[row];
597 if (current)
598 { /* insert at the front of current bucket */
599 item->next = current;
600 }
601 this->ike_sa_table[row] = item;
602 this->segments[segment].count++;
603 return segment;
604 }
605
606 /**
607 * Remove an entry from the hash table.
608 * Note: The caller MUST have a lock on the segment of this entry.
609 */
610 static void remove_entry(private_ike_sa_manager_t *this, entry_t *entry)
611 {
612 table_item_t *item, *prev = NULL;
613 u_int row, segment;
614
615 row = ike_sa_id_hash(entry->ike_sa_id) & this->table_mask;
616 segment = row & this->segment_mask;
617 item = this->ike_sa_table[row];
618 while (item)
619 {
620 if (item->value == entry)
621 {
622 if (prev)
623 {
624 prev->next = item->next;
625 }
626 else
627 {
628 this->ike_sa_table[row] = item->next;
629 }
630 this->segments[segment].count--;
631 free(item);
632 break;
633 }
634 prev = item;
635 item = item->next;
636 }
637 }
638
639 /**
640 * Remove the entry at the current enumerator position.
641 */
642 static void remove_entry_at(private_enumerator_t *this)
643 {
644 this->entry = NULL;
645 if (this->current)
646 {
647 table_item_t *current = this->current;
648
649 this->manager->segments[this->segment].count--;
650 this->current = this->prev;
651
652 if (this->prev)
653 {
654 this->prev->next = current->next;
655 }
656 else
657 {
658 this->manager->ike_sa_table[this->row] = current->next;
659 unlock_single_segment(this->manager, this->segment);
660 }
661 free(current);
662 }
663 }
664
665 /**
666 * Find an entry using the provided match function to compare the entries for
667 * equality.
668 */
669 static status_t get_entry_by_match_function(private_ike_sa_manager_t *this,
670 ike_sa_id_t *ike_sa_id, entry_t **entry, u_int *segment,
671 linked_list_match_t match, void *param)
672 {
673 table_item_t *item;
674 u_int row, seg;
675
676 row = ike_sa_id_hash(ike_sa_id) & this->table_mask;
677 seg = row & this->segment_mask;
678
679 lock_single_segment(this, seg);
680 item = this->ike_sa_table[row];
681 while (item)
682 {
683 if (match(item->value, param))
684 {
685 *entry = item->value;
686 *segment = seg;
687 /* the locked segment has to be unlocked by the caller */
688 return SUCCESS;
689 }
690 item = item->next;
691 }
692 unlock_single_segment(this, seg);
693 return NOT_FOUND;
694 }
695
696 /**
697 * Find an entry by ike_sa_id_t.
698 * Note: On SUCCESS, the caller has to unlock the segment.
699 */
700 static status_t get_entry_by_id(private_ike_sa_manager_t *this,
701 ike_sa_id_t *ike_sa_id, entry_t **entry, u_int *segment)
702 {
703 return get_entry_by_match_function(this, ike_sa_id, entry, segment,
704 (linked_list_match_t)entry_match_by_id, ike_sa_id);
705 }
706
707 /**
708 * Find an entry by IKE_SA pointer.
709 * Note: On SUCCESS, the caller has to unlock the segment.
710 */
711 static status_t get_entry_by_sa(private_ike_sa_manager_t *this,
712 ike_sa_id_t *ike_sa_id, ike_sa_t *ike_sa, entry_t **entry, u_int *segment)
713 {
714 return get_entry_by_match_function(this, ike_sa_id, entry, segment,
715 (linked_list_match_t)entry_match_by_sa, ike_sa);
716 }
717
718 /**
719 * Wait until no other thread is using an IKE_SA, return FALSE if entry not
720 * acquirable.
721 */
722 static bool wait_for_entry(private_ike_sa_manager_t *this, entry_t *entry,
723 u_int segment)
724 {
725 if (entry->driveout_new_threads)
726 {
727 /* we are not allowed to get this */
728 return FALSE;
729 }
730 while (entry->checked_out && !entry->driveout_waiting_threads)
731 {
732 /* so wait until we can get it for us.
733 * we register us as waiting. */
734 entry->waiting_threads++;
735 entry->condvar->wait(entry->condvar, this->segments[segment].mutex);
736 entry->waiting_threads--;
737 }
738 /* hm, a deletion request forbids us to get this SA, get next one */
739 if (entry->driveout_waiting_threads)
740 {
741 /* we must signal here, others may be waiting on it, too */
742 entry->condvar->signal(entry->condvar);
743 return FALSE;
744 }
745 return TRUE;
746 }
747
748 /**
749 * Put a half-open SA into the hash table.
750 */
751 static void put_half_open(private_ike_sa_manager_t *this, entry_t *entry)
752 {
753 table_item_t *item;
754 u_int row, segment;
755 rwlock_t *lock;
756 ike_sa_id_t *ike_id;
757 half_open_t *half_open;
758 chunk_t addr;
759
760 ike_id = entry->ike_sa_id;
761 addr = entry->other->get_address(entry->other);
762 row = chunk_hash(addr) & this->table_mask;
763 segment = row & this->segment_mask;
764 lock = this->half_open_segments[segment].lock;
765 lock->write_lock(lock);
766 item = this->half_open_table[row];
767 while (item)
768 {
769 half_open = item->value;
770
771 if (chunk_equals(addr, half_open->other))
772 {
773 break;
774 }
775 item = item->next;
776 }
777
778 if (!item)
779 {
780 INIT(half_open,
781 .other = chunk_clone(addr),
782 );
783 INIT(item,
784 .value = half_open,
785 .next = this->half_open_table[row],
786 );
787 this->half_open_table[row] = item;
788 }
789 half_open->count++;
790 ref_get(&this->half_open_count);
791 if (!ike_id->is_initiator(ike_id))
792 {
793 half_open->count_responder++;
794 ref_get(&this->half_open_count_responder);
795 }
796 this->half_open_segments[segment].count++;
797 lock->unlock(lock);
798 }
799
800 /**
801 * Remove a half-open SA from the hash table.
802 */
803 static void remove_half_open(private_ike_sa_manager_t *this, entry_t *entry)
804 {
805 table_item_t *item, *prev = NULL;
806 u_int row, segment;
807 rwlock_t *lock;
808 ike_sa_id_t *ike_id;
809 chunk_t addr;
810
811 ike_id = entry->ike_sa_id;
812 addr = entry->other->get_address(entry->other);
813 row = chunk_hash(addr) & this->table_mask;
814 segment = row & this->segment_mask;
815 lock = this->half_open_segments[segment].lock;
816 lock->write_lock(lock);
817 item = this->half_open_table[row];
818 while (item)
819 {
820 half_open_t *half_open = item->value;
821
822 if (chunk_equals(addr, half_open->other))
823 {
824 if (!ike_id->is_initiator(ike_id))
825 {
826 half_open->count_responder--;
827 ignore_result(ref_put(&this->half_open_count_responder));
828 }
829 ignore_result(ref_put(&this->half_open_count));
830 if (--half_open->count == 0)
831 {
832 if (prev)
833 {
834 prev->next = item->next;
835 }
836 else
837 {
838 this->half_open_table[row] = item->next;
839 }
840 half_open_destroy(half_open);
841 free(item);
842 }
843 this->half_open_segments[segment].count--;
844 break;
845 }
846 prev = item;
847 item = item->next;
848 }
849 lock->unlock(lock);
850 }
851
852 /**
853 * Put an SA between two peers into the hash table.
854 */
855 static void put_connected_peers(private_ike_sa_manager_t *this, entry_t *entry)
856 {
857 table_item_t *item;
858 u_int row, segment;
859 rwlock_t *lock;
860 connected_peers_t *connected_peers;
861 chunk_t my_id, other_id;
862 int family;
863
864 my_id = entry->my_id->get_encoding(entry->my_id);
865 other_id = entry->other_id->get_encoding(entry->other_id);
866 family = entry->other->get_family(entry->other);
867 row = chunk_hash_inc(other_id, chunk_hash(my_id)) & this->table_mask;
868 segment = row & this->segment_mask;
869 lock = this->connected_peers_segments[segment].lock;
870 lock->write_lock(lock);
871 item = this->connected_peers_table[row];
872 while (item)
873 {
874 connected_peers = item->value;
875
876 if (connected_peers_match(connected_peers, entry->my_id,
877 entry->other_id, family))
878 {
879 if (connected_peers->sas->find_first(connected_peers->sas,
880 (linked_list_match_t)entry->ike_sa_id->equals,
881 NULL, entry->ike_sa_id) == SUCCESS)
882 {
883 lock->unlock(lock);
884 return;
885 }
886 break;
887 }
888 item = item->next;
889 }
890
891 if (!item)
892 {
893 INIT(connected_peers,
894 .my_id = entry->my_id->clone(entry->my_id),
895 .other_id = entry->other_id->clone(entry->other_id),
896 .family = family,
897 .sas = linked_list_create(),
898 );
899 INIT(item,
900 .value = connected_peers,
901 .next = this->connected_peers_table[row],
902 );
903 this->connected_peers_table[row] = item;
904 }
905 connected_peers->sas->insert_last(connected_peers->sas,
906 entry->ike_sa_id->clone(entry->ike_sa_id));
907 this->connected_peers_segments[segment].count++;
908 lock->unlock(lock);
909 }
910
911 /**
912 * Remove an SA between two peers from the hash table.
913 */
914 static void remove_connected_peers(private_ike_sa_manager_t *this, entry_t *entry)
915 {
916 table_item_t *item, *prev = NULL;
917 u_int row, segment;
918 rwlock_t *lock;
919 chunk_t my_id, other_id;
920 int family;
921
922 my_id = entry->my_id->get_encoding(entry->my_id);
923 other_id = entry->other_id->get_encoding(entry->other_id);
924 family = entry->other->get_family(entry->other);
925
926 row = chunk_hash_inc(other_id, chunk_hash(my_id)) & this->table_mask;
927 segment = row & this->segment_mask;
928
929 lock = this->connected_peers_segments[segment].lock;
930 lock->write_lock(lock);
931 item = this->connected_peers_table[row];
932 while (item)
933 {
934 connected_peers_t *current = item->value;
935
936 if (connected_peers_match(current, entry->my_id, entry->other_id,
937 family))
938 {
939 enumerator_t *enumerator;
940 ike_sa_id_t *ike_sa_id;
941
942 enumerator = current->sas->create_enumerator(current->sas);
943 while (enumerator->enumerate(enumerator, &ike_sa_id))
944 {
945 if (ike_sa_id->equals(ike_sa_id, entry->ike_sa_id))
946 {
947 current->sas->remove_at(current->sas, enumerator);
948 ike_sa_id->destroy(ike_sa_id);
949 this->connected_peers_segments[segment].count--;
950 break;
951 }
952 }
953 enumerator->destroy(enumerator);
954 if (current->sas->get_count(current->sas) == 0)
955 {
956 if (prev)
957 {
958 prev->next = item->next;
959 }
960 else
961 {
962 this->connected_peers_table[row] = item->next;
963 }
964 connected_peers_destroy(current);
965 free(item);
966 }
967 break;
968 }
969 prev = item;
970 item = item->next;
971 }
972 lock->unlock(lock);
973 }
974
975 /**
976 * Get a random SPI for new IKE_SAs
977 */
978 static u_int64_t get_spi(private_ike_sa_manager_t *this)
979 {
980 u_int64_t spi;
981
982 this->spi_lock->read_lock(this->spi_lock);
983 if (this->spi_cb.cb)
984 {
985 spi = this->spi_cb.cb(this->spi_cb.data);
986 }
987 else if (!this->rng ||
988 !this->rng->get_bytes(this->rng, sizeof(spi), (u_int8_t*)&spi))
989 {
990 spi = 0;
991 }
992 this->spi_lock->unlock(this->spi_lock);
993 return spi;
994 }
995
996 /**
997 * Calculate the hash of the initial IKE message. Memory for the hash is
998 * allocated on success.
999 *
1000 * @returns TRUE on success
1001 */
1002 static bool get_init_hash(hasher_t *hasher, message_t *message, chunk_t *hash)
1003 {
1004 host_t *src;
1005
1006 if (message->get_first_payload_type(message) == PLV1_FRAGMENT)
1007 { /* only hash the source IP, port and SPI for fragmented init messages */
1008 u_int16_t port;
1009 u_int64_t spi;
1010
1011 src = message->get_source(message);
1012 if (!hasher->allocate_hash(hasher, src->get_address(src), NULL))
1013 {
1014 return FALSE;
1015 }
1016 port = src->get_port(src);
1017 if (!hasher->allocate_hash(hasher, chunk_from_thing(port), NULL))
1018 {
1019 return FALSE;
1020 }
1021 spi = message->get_initiator_spi(message);
1022 return hasher->allocate_hash(hasher, chunk_from_thing(spi), hash);
1023 }
1024 if (message->get_exchange_type(message) == ID_PROT)
1025 { /* include the source for Main Mode as the hash will be the same if
1026 * SPIs are reused by two initiators that use the same proposal */
1027 src = message->get_source(message);
1028
1029 if (!hasher->allocate_hash(hasher, src->get_address(src), NULL))
1030 {
1031 return FALSE;
1032 }
1033 }
1034 return hasher->allocate_hash(hasher, message->get_packet_data(message), hash);
1035 }
1036
1037 /**
1038 * Check if we already have created an IKE_SA based on the initial IKE message
1039 * with the given hash.
1040 * If not the hash is stored, the hash data is not(!) cloned.
1041 *
1042 * Also, the local SPI is returned. In case of a retransmit this is already
1043 * stored together with the hash, otherwise it is newly allocated and should
1044 * be used to create the IKE_SA.
1045 *
1046 * @returns ALREADY_DONE if the message with the given hash has been seen before
1047 * NOT_FOUND if the message hash was not found
1048 * FAILED if the SPI allocation failed
1049 */
1050 static status_t check_and_put_init_hash(private_ike_sa_manager_t *this,
1051 chunk_t init_hash, u_int64_t *our_spi)
1052 {
1053 table_item_t *item;
1054 u_int row, segment;
1055 mutex_t *mutex;
1056 init_hash_t *init;
1057 u_int64_t spi;
1058
1059 row = chunk_hash(init_hash) & this->table_mask;
1060 segment = row & this->segment_mask;
1061 mutex = this->init_hashes_segments[segment].mutex;
1062 mutex->lock(mutex);
1063 item = this->init_hashes_table[row];
1064 while (item)
1065 {
1066 init_hash_t *current = item->value;
1067
1068 if (chunk_equals(init_hash, current->hash))
1069 {
1070 *our_spi = current->our_spi;
1071 mutex->unlock(mutex);
1072 return ALREADY_DONE;
1073 }
1074 item = item->next;
1075 }
1076
1077 spi = get_spi(this);
1078 if (!spi)
1079 {
1080 return FAILED;
1081 }
1082
1083 INIT(init,
1084 .hash = {
1085 .len = init_hash.len,
1086 .ptr = init_hash.ptr,
1087 },
1088 .our_spi = spi,
1089 );
1090 INIT(item,
1091 .value = init,
1092 .next = this->init_hashes_table[row],
1093 );
1094 this->init_hashes_table[row] = item;
1095 *our_spi = init->our_spi;
1096 mutex->unlock(mutex);
1097 return NOT_FOUND;
1098 }
1099
1100 /**
1101 * Remove the hash of an initial IKE message from the cache.
1102 */
1103 static void remove_init_hash(private_ike_sa_manager_t *this, chunk_t init_hash)
1104 {
1105 table_item_t *item, *prev = NULL;
1106 u_int row, segment;
1107 mutex_t *mutex;
1108
1109 row = chunk_hash(init_hash) & this->table_mask;
1110 segment = row & this->segment_mask;
1111 mutex = this->init_hashes_segments[segment].mutex;
1112 mutex->lock(mutex);
1113 item = this->init_hashes_table[row];
1114 while (item)
1115 {
1116 init_hash_t *current = item->value;
1117
1118 if (chunk_equals(init_hash, current->hash))
1119 {
1120 if (prev)
1121 {
1122 prev->next = item->next;
1123 }
1124 else
1125 {
1126 this->init_hashes_table[row] = item->next;
1127 }
1128 free(current);
1129 free(item);
1130 break;
1131 }
1132 prev = item;
1133 item = item->next;
1134 }
1135 mutex->unlock(mutex);
1136 }
1137
1138 METHOD(ike_sa_manager_t, checkout, ike_sa_t*,
1139 private_ike_sa_manager_t *this, ike_sa_id_t *ike_sa_id)
1140 {
1141 ike_sa_t *ike_sa = NULL;
1142 entry_t *entry;
1143 u_int segment;
1144
1145 DBG2(DBG_MGR, "checkout IKE_SA");
1146
1147 if (get_entry_by_id(this, ike_sa_id, &entry, &segment) == SUCCESS)
1148 {
1149 if (wait_for_entry(this, entry, segment))
1150 {
1151 entry->checked_out = TRUE;
1152 ike_sa = entry->ike_sa;
1153 DBG2(DBG_MGR, "IKE_SA %s[%u] successfully checked out",
1154 ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1155 }
1156 unlock_single_segment(this, segment);
1157 }
1158 charon->bus->set_sa(charon->bus, ike_sa);
1159 return ike_sa;
1160 }
1161
1162 METHOD(ike_sa_manager_t, checkout_new, ike_sa_t*,
1163 private_ike_sa_manager_t* this, ike_version_t version, bool initiator)
1164 {
1165 ike_sa_id_t *ike_sa_id;
1166 ike_sa_t *ike_sa;
1167 u_int8_t ike_version;
1168 u_int64_t spi;
1169
1170 ike_version = version == IKEV1 ? IKEV1_MAJOR_VERSION : IKEV2_MAJOR_VERSION;
1171
1172 spi = get_spi(this);
1173 if (!spi)
1174 {
1175 DBG1(DBG_MGR, "failed to allocate SPI for new IKE_SA");
1176 return NULL;
1177 }
1178
1179 if (initiator)
1180 {
1181 ike_sa_id = ike_sa_id_create(ike_version, spi, 0, TRUE);
1182 }
1183 else
1184 {
1185 ike_sa_id = ike_sa_id_create(ike_version, 0, spi, FALSE);
1186 }
1187 ike_sa = ike_sa_create(ike_sa_id, initiator, version);
1188 ike_sa_id->destroy(ike_sa_id);
1189
1190 if (ike_sa)
1191 {
1192 DBG2(DBG_MGR, "created IKE_SA %s[%u]", ike_sa->get_name(ike_sa),
1193 ike_sa->get_unique_id(ike_sa));
1194 }
1195 return ike_sa;
1196 }
1197
1198 /**
1199 * Get the message ID or message hash to detect early retransmissions
1200 */
1201 static u_int32_t get_message_id_or_hash(message_t *message)
1202 {
1203 if (message->get_major_version(message) == IKEV1_MAJOR_VERSION)
1204 {
1205 /* Use a hash for IKEv1 Phase 1, where we don't have a MID, and Quick
1206 * Mode, where all three messages use the same message ID */
1207 if (message->get_message_id(message) == 0 ||
1208 message->get_exchange_type(message) == QUICK_MODE)
1209 {
1210 return chunk_hash(message->get_packet_data(message));
1211 }
1212 }
1213 return message->get_message_id(message);
1214 }
1215
1216 METHOD(ike_sa_manager_t, checkout_by_message, ike_sa_t*,
1217 private_ike_sa_manager_t* this, message_t *message)
1218 {
1219 u_int segment;
1220 entry_t *entry;
1221 ike_sa_t *ike_sa = NULL;
1222 ike_sa_id_t *id;
1223 ike_version_t ike_version;
1224 bool is_init = FALSE;
1225
1226 id = message->get_ike_sa_id(message);
1227 /* clone the IKE_SA ID so we can modify the initiator flag */
1228 id = id->clone(id);
1229 id->switch_initiator(id);
1230
1231 DBG2(DBG_MGR, "checkout IKE_SA by message");
1232
1233 if (id->get_responder_spi(id) == 0 &&
1234 message->get_message_id(message) == 0)
1235 {
1236 if (message->get_major_version(message) == IKEV2_MAJOR_VERSION)
1237 {
1238 if (message->get_exchange_type(message) == IKE_SA_INIT &&
1239 message->get_request(message))
1240 {
1241 ike_version = IKEV2;
1242 is_init = TRUE;
1243 }
1244 }
1245 else
1246 {
1247 if (message->get_exchange_type(message) == ID_PROT ||
1248 message->get_exchange_type(message) == AGGRESSIVE)
1249 {
1250 ike_version = IKEV1;
1251 is_init = TRUE;
1252 if (id->is_initiator(id))
1253 { /* not set in IKEv1, switch back before applying to new SA */
1254 id->switch_initiator(id);
1255 }
1256 }
1257 }
1258 }
1259
1260 if (is_init)
1261 {
1262 hasher_t *hasher;
1263 u_int64_t our_spi;
1264 chunk_t hash;
1265
1266 hasher = lib->crypto->create_hasher(lib->crypto, HASH_SHA1);
1267 if (!hasher || !get_init_hash(hasher, message, &hash))
1268 {
1269 DBG1(DBG_MGR, "ignoring message, failed to hash message");
1270 DESTROY_IF(hasher);
1271 id->destroy(id);
1272 return NULL;
1273 }
1274 hasher->destroy(hasher);
1275
1276 /* ensure this is not a retransmit of an already handled init message */
1277 switch (check_and_put_init_hash(this, hash, &our_spi))
1278 {
1279 case NOT_FOUND:
1280 { /* we've not seen this packet yet, create a new IKE_SA */
1281 if (!this->ikesa_limit ||
1282 this->public.get_count(&this->public) < this->ikesa_limit)
1283 {
1284 id->set_responder_spi(id, our_spi);
1285 ike_sa = ike_sa_create(id, FALSE, ike_version);
1286 if (ike_sa)
1287 {
1288 entry = entry_create();
1289 entry->ike_sa = ike_sa;
1290 entry->ike_sa_id = id;
1291 entry->processing = get_message_id_or_hash(message);
1292 entry->init_hash = hash;
1293
1294 segment = put_entry(this, entry);
1295 entry->checked_out = TRUE;
1296 unlock_single_segment(this, segment);
1297
1298 DBG2(DBG_MGR, "created IKE_SA %s[%u]",
1299 ike_sa->get_name(ike_sa),
1300 ike_sa->get_unique_id(ike_sa));
1301
1302 charon->bus->set_sa(charon->bus, ike_sa);
1303 return ike_sa;
1304 }
1305 else
1306 {
1307 DBG1(DBG_MGR, "creating IKE_SA failed, ignoring message");
1308 }
1309 }
1310 else
1311 {
1312 DBG1(DBG_MGR, "ignoring %N, hitting IKE_SA limit (%u)",
1313 exchange_type_names, message->get_exchange_type(message),
1314 this->ikesa_limit);
1315 }
1316 remove_init_hash(this, hash);
1317 chunk_free(&hash);
1318 id->destroy(id);
1319 return NULL;
1320 }
1321 case FAILED:
1322 { /* we failed to allocate an SPI */
1323 chunk_free(&hash);
1324 id->destroy(id);
1325 DBG1(DBG_MGR, "ignoring message, failed to allocate SPI");
1326 return NULL;
1327 }
1328 case ALREADY_DONE:
1329 default:
1330 break;
1331 }
1332 /* it looks like we already handled this init message to some degree */
1333 id->set_responder_spi(id, our_spi);
1334 chunk_free(&hash);
1335 }
1336
1337 if (get_entry_by_id(this, id, &entry, &segment) == SUCCESS)
1338 {
1339 /* only check out if we are not already processing it. */
1340 if (entry->processing == get_message_id_or_hash(message))
1341 {
1342 DBG1(DBG_MGR, "ignoring request with ID %u, already processing",
1343 entry->processing);
1344 }
1345 else if (wait_for_entry(this, entry, segment))
1346 {
1347 ike_sa_id_t *ike_id;
1348
1349 ike_id = entry->ike_sa->get_id(entry->ike_sa);
1350 entry->checked_out = TRUE;
1351 if (message->get_first_payload_type(message) != PLV1_FRAGMENT &&
1352 message->get_first_payload_type(message) != PLV2_FRAGMENT)
1353 { /* TODO-FRAG: this fails if there are unencrypted payloads */
1354 entry->processing = get_message_id_or_hash(message);
1355 }
1356 if (ike_id->get_responder_spi(ike_id) == 0)
1357 {
1358 ike_id->set_responder_spi(ike_id, id->get_responder_spi(id));
1359 }
1360 ike_sa = entry->ike_sa;
1361 DBG2(DBG_MGR, "IKE_SA %s[%u] successfully checked out",
1362 ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1363 }
1364 unlock_single_segment(this, segment);
1365 }
1366 else
1367 {
1368 charon->bus->alert(charon->bus, ALERT_INVALID_IKE_SPI, message);
1369 }
1370 id->destroy(id);
1371 charon->bus->set_sa(charon->bus, ike_sa);
1372 return ike_sa;
1373 }
1374
1375 METHOD(ike_sa_manager_t, checkout_by_config, ike_sa_t*,
1376 private_ike_sa_manager_t *this, peer_cfg_t *peer_cfg)
1377 {
1378 enumerator_t *enumerator;
1379 entry_t *entry;
1380 ike_sa_t *ike_sa = NULL;
1381 peer_cfg_t *current_peer;
1382 ike_cfg_t *current_ike;
1383 u_int segment;
1384
1385 DBG2(DBG_MGR, "checkout IKE_SA by config");
1386
1387 if (!this->reuse_ikesa)
1388 { /* IKE_SA reuse disable by config */
1389 ike_sa = checkout_new(this, peer_cfg->get_ike_version(peer_cfg), TRUE);
1390 charon->bus->set_sa(charon->bus, ike_sa);
1391 return ike_sa;
1392 }
1393
1394 enumerator = create_table_enumerator(this);
1395 while (enumerator->enumerate(enumerator, &entry, &segment))
1396 {
1397 if (!wait_for_entry(this, entry, segment))
1398 {
1399 continue;
1400 }
1401 if (entry->ike_sa->get_state(entry->ike_sa) == IKE_DELETING)
1402 { /* skip IKE_SAs which are not usable, wake other waiting threads */
1403 entry->condvar->signal(entry->condvar);
1404 continue;
1405 }
1406
1407 current_peer = entry->ike_sa->get_peer_cfg(entry->ike_sa);
1408 if (current_peer && current_peer->equals(current_peer, peer_cfg))
1409 {
1410 current_ike = current_peer->get_ike_cfg(current_peer);
1411 if (current_ike->equals(current_ike, peer_cfg->get_ike_cfg(peer_cfg)))
1412 {
1413 entry->checked_out = TRUE;
1414 ike_sa = entry->ike_sa;
1415 DBG2(DBG_MGR, "found existing IKE_SA %u with a '%s' config",
1416 ike_sa->get_unique_id(ike_sa),
1417 current_peer->get_name(current_peer));
1418 break;
1419 }
1420 }
1421 /* other threads might be waiting for this entry */
1422 entry->condvar->signal(entry->condvar);
1423 }
1424 enumerator->destroy(enumerator);
1425
1426 if (!ike_sa)
1427 { /* no IKE_SA using such a config, hand out a new */
1428 ike_sa = checkout_new(this, peer_cfg->get_ike_version(peer_cfg), TRUE);
1429 }
1430 charon->bus->set_sa(charon->bus, ike_sa);
1431 return ike_sa;
1432 }
1433
1434 METHOD(ike_sa_manager_t, checkout_by_id, ike_sa_t*,
1435 private_ike_sa_manager_t *this, u_int32_t id)
1436 {
1437 enumerator_t *enumerator;
1438 entry_t *entry;
1439 ike_sa_t *ike_sa = NULL;
1440 u_int segment;
1441
1442 DBG2(DBG_MGR, "checkout IKE_SA by ID %u", id);
1443
1444 enumerator = create_table_enumerator(this);
1445 while (enumerator->enumerate(enumerator, &entry, &segment))
1446 {
1447 if (wait_for_entry(this, entry, segment))
1448 {
1449 if (entry->ike_sa->get_unique_id(entry->ike_sa) == id)
1450 {
1451 ike_sa = entry->ike_sa;
1452 entry->checked_out = TRUE;
1453 break;
1454 }
1455 /* other threads might be waiting for this entry */
1456 entry->condvar->signal(entry->condvar);
1457 }
1458 }
1459 enumerator->destroy(enumerator);
1460
1461 if (ike_sa)
1462 {
1463 DBG2(DBG_MGR, "IKE_SA %s[%u] successfully checked out",
1464 ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1465 }
1466 charon->bus->set_sa(charon->bus, ike_sa);
1467 return ike_sa;
1468 }
1469
1470 METHOD(ike_sa_manager_t, checkout_by_name, ike_sa_t*,
1471 private_ike_sa_manager_t *this, char *name, bool child)
1472 {
1473 enumerator_t *enumerator, *children;
1474 entry_t *entry;
1475 ike_sa_t *ike_sa = NULL;
1476 child_sa_t *child_sa;
1477 u_int segment;
1478
1479 enumerator = create_table_enumerator(this);
1480 while (enumerator->enumerate(enumerator, &entry, &segment))
1481 {
1482 if (wait_for_entry(this, entry, segment))
1483 {
1484 /* look for a child with such a policy name ... */
1485 if (child)
1486 {
1487 children = entry->ike_sa->create_child_sa_enumerator(entry->ike_sa);
1488 while (children->enumerate(children, (void**)&child_sa))
1489 {
1490 if (streq(child_sa->get_name(child_sa), name))
1491 {
1492 ike_sa = entry->ike_sa;
1493 break;
1494 }
1495 }
1496 children->destroy(children);
1497 }
1498 else /* ... or for a IKE_SA with such a connection name */
1499 {
1500 if (streq(entry->ike_sa->get_name(entry->ike_sa), name))
1501 {
1502 ike_sa = entry->ike_sa;
1503 }
1504 }
1505 /* got one, return */
1506 if (ike_sa)
1507 {
1508 entry->checked_out = TRUE;
1509 DBG2(DBG_MGR, "IKE_SA %s[%u] successfully checked out",
1510 ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1511 break;
1512 }
1513 /* other threads might be waiting for this entry */
1514 entry->condvar->signal(entry->condvar);
1515 }
1516 }
1517 enumerator->destroy(enumerator);
1518
1519 charon->bus->set_sa(charon->bus, ike_sa);
1520 return ike_sa;
1521 }
1522
1523 /**
1524 * enumerator filter function, waiting variant
1525 */
1526 static bool enumerator_filter_wait(private_ike_sa_manager_t *this,
1527 entry_t **in, ike_sa_t **out, u_int *segment)
1528 {
1529 if (wait_for_entry(this, *in, *segment))
1530 {
1531 *out = (*in)->ike_sa;
1532 charon->bus->set_sa(charon->bus, *out);
1533 return TRUE;
1534 }
1535 return FALSE;
1536 }
1537
1538 /**
1539 * enumerator filter function, skipping variant
1540 */
1541 static bool enumerator_filter_skip(private_ike_sa_manager_t *this,
1542 entry_t **in, ike_sa_t **out, u_int *segment)
1543 {
1544 if (!(*in)->driveout_new_threads &&
1545 !(*in)->driveout_waiting_threads &&
1546 !(*in)->checked_out)
1547 {
1548 *out = (*in)->ike_sa;
1549 charon->bus->set_sa(charon->bus, *out);
1550 return TRUE;
1551 }
1552 return FALSE;
1553 }
1554
1555 /**
1556 * Reset threads SA after enumeration
1557 */
1558 static void reset_sa(void *data)
1559 {
1560 charon->bus->set_sa(charon->bus, NULL);
1561 }
1562
1563 METHOD(ike_sa_manager_t, create_enumerator, enumerator_t*,
1564 private_ike_sa_manager_t* this, bool wait)
1565 {
1566 return enumerator_create_filter(create_table_enumerator(this),
1567 wait ? (void*)enumerator_filter_wait : (void*)enumerator_filter_skip,
1568 this, reset_sa);
1569 }
1570
1571 METHOD(ike_sa_manager_t, checkin, void,
1572 private_ike_sa_manager_t *this, ike_sa_t *ike_sa)
1573 {
1574 /* to check the SA back in, we look for the pointer of the ike_sa
1575 * in all entries.
1576 * The lookup is done by initiator SPI, so even if the SPI has changed (e.g.
1577 * on reception of a IKE_SA_INIT response) the lookup will work but
1578 * updating of the SPI MAY be necessary...
1579 */
1580 entry_t *entry;
1581 ike_sa_id_t *ike_sa_id;
1582 host_t *other;
1583 identification_t *my_id, *other_id;
1584 u_int segment;
1585
1586 ike_sa_id = ike_sa->get_id(ike_sa);
1587 my_id = ike_sa->get_my_id(ike_sa);
1588 other_id = ike_sa->get_other_eap_id(ike_sa);
1589 other = ike_sa->get_other_host(ike_sa);
1590
1591 DBG2(DBG_MGR, "checkin IKE_SA %s[%u]", ike_sa->get_name(ike_sa),
1592 ike_sa->get_unique_id(ike_sa));
1593
1594 /* look for the entry */
1595 if (get_entry_by_sa(this, ike_sa_id, ike_sa, &entry, &segment) == SUCCESS)
1596 {
1597 /* ike_sa_id must be updated */
1598 entry->ike_sa_id->replace_values(entry->ike_sa_id, ike_sa->get_id(ike_sa));
1599 /* signal waiting threads */
1600 entry->checked_out = FALSE;
1601 entry->processing = -1;
1602 /* check if this SA is half-open */
1603 if (entry->half_open && ike_sa->get_state(ike_sa) != IKE_CONNECTING)
1604 {
1605 /* not half open anymore */
1606 entry->half_open = FALSE;
1607 remove_half_open(this, entry);
1608 }
1609 else if (entry->half_open && !other->ip_equals(other, entry->other))
1610 {
1611 /* the other host's IP has changed, we must update the hash table */
1612 remove_half_open(this, entry);
1613 DESTROY_IF(entry->other);
1614 entry->other = other->clone(other);
1615 put_half_open(this, entry);
1616 }
1617 else if (!entry->half_open &&
1618 ike_sa->get_state(ike_sa) == IKE_CONNECTING)
1619 {
1620 /* this is a new half-open SA */
1621 entry->half_open = TRUE;
1622 entry->other = other->clone(other);
1623 put_half_open(this, entry);
1624 }
1625 DBG2(DBG_MGR, "check-in of IKE_SA successful.");
1626 entry->condvar->signal(entry->condvar);
1627 }
1628 else
1629 {
1630 entry = entry_create();
1631 entry->ike_sa_id = ike_sa_id->clone(ike_sa_id);
1632 entry->ike_sa = ike_sa;
1633 if (ike_sa->get_state(ike_sa) == IKE_CONNECTING)
1634 {
1635 entry->half_open = TRUE;
1636 entry->other = other->clone(other);
1637 put_half_open(this, entry);
1638 }
1639 segment = put_entry(this, entry);
1640 }
1641
1642 /* apply identities for duplicate test */
1643 if ((ike_sa->get_state(ike_sa) == IKE_ESTABLISHED ||
1644 ike_sa->get_state(ike_sa) == IKE_PASSIVE) &&
1645 entry->my_id == NULL && entry->other_id == NULL)
1646 {
1647 if (ike_sa->get_version(ike_sa) == IKEV1)
1648 {
1649 /* If authenticated and received INITIAL_CONTACT,
1650 * delete any existing IKE_SAs with that peer. */
1651 if (ike_sa->has_condition(ike_sa, COND_INIT_CONTACT_SEEN))
1652 {
1653 /* We can't hold the segment locked while checking the
1654 * uniqueness as this could lead to deadlocks. We mark the
1655 * entry as checked out while we release the lock so no other
1656 * thread can acquire it. Since it is not yet in the list of
1657 * connected peers that will not cause a deadlock as no other
1658 * caller of check_unqiueness() will try to check out this SA */
1659 entry->checked_out = TRUE;
1660 unlock_single_segment(this, segment);
1661
1662 this->public.check_uniqueness(&this->public, ike_sa, TRUE);
1663 ike_sa->set_condition(ike_sa, COND_INIT_CONTACT_SEEN, FALSE);
1664
1665 /* The entry could have been modified in the mean time, e.g.
1666 * because another SA was added/removed next to it or another
1667 * thread is waiting, but it should still exist, so there is no
1668 * need for a lookup via get_entry_by... */
1669 lock_single_segment(this, segment);
1670 entry->checked_out = FALSE;
1671 /* We already signaled waiting threads above, we have to do that
1672 * again after checking the SA out and back in again. */
1673 entry->condvar->signal(entry->condvar);
1674 }
1675 }
1676
1677 entry->my_id = my_id->clone(my_id);
1678 entry->other_id = other_id->clone(other_id);
1679 if (!entry->other)
1680 {
1681 entry->other = other->clone(other);
1682 }
1683 put_connected_peers(this, entry);
1684 }
1685
1686 unlock_single_segment(this, segment);
1687
1688 charon->bus->set_sa(charon->bus, NULL);
1689 }
1690
1691 METHOD(ike_sa_manager_t, checkin_and_destroy, void,
1692 private_ike_sa_manager_t *this, ike_sa_t *ike_sa)
1693 {
1694 /* deletion is a bit complex, we must ensure that no thread is waiting for
1695 * this SA.
1696 * We take this SA from the table, and start signaling while threads
1697 * are in the condvar.
1698 */
1699 entry_t *entry;
1700 ike_sa_id_t *ike_sa_id;
1701 u_int segment;
1702
1703 ike_sa_id = ike_sa->get_id(ike_sa);
1704
1705 DBG2(DBG_MGR, "checkin and destroy IKE_SA %s[%u]", ike_sa->get_name(ike_sa),
1706 ike_sa->get_unique_id(ike_sa));
1707
1708 if (get_entry_by_sa(this, ike_sa_id, ike_sa, &entry, &segment) == SUCCESS)
1709 {
1710 if (entry->driveout_waiting_threads && entry->driveout_new_threads)
1711 { /* it looks like flush() has been called and the SA is being deleted
1712 * anyway, just check it in */
1713 DBG2(DBG_MGR, "ignored check-in and destroy of IKE_SA during shutdown");
1714 entry->checked_out = FALSE;
1715 entry->condvar->broadcast(entry->condvar);
1716 unlock_single_segment(this, segment);
1717 return;
1718 }
1719
1720 /* drive out waiting threads, as we are in hurry */
1721 entry->driveout_waiting_threads = TRUE;
1722 /* mark it, so no new threads can get this entry */
1723 entry->driveout_new_threads = TRUE;
1724 /* wait until all workers have done their work */
1725 while (entry->waiting_threads)
1726 {
1727 /* wake up all */
1728 entry->condvar->broadcast(entry->condvar);
1729 /* they will wake us again when their work is done */
1730 entry->condvar->wait(entry->condvar, this->segments[segment].mutex);
1731 }
1732 remove_entry(this, entry);
1733 unlock_single_segment(this, segment);
1734
1735 if (entry->half_open)
1736 {
1737 remove_half_open(this, entry);
1738 }
1739 if (entry->my_id && entry->other_id)
1740 {
1741 remove_connected_peers(this, entry);
1742 }
1743 if (entry->init_hash.ptr)
1744 {
1745 remove_init_hash(this, entry->init_hash);
1746 }
1747
1748 entry_destroy(entry);
1749
1750 DBG2(DBG_MGR, "check-in and destroy of IKE_SA successful");
1751 }
1752 else
1753 {
1754 DBG1(DBG_MGR, "tried to check-in and delete nonexisting IKE_SA");
1755 ike_sa->destroy(ike_sa);
1756 }
1757 charon->bus->set_sa(charon->bus, NULL);
1758 }
1759
1760 /**
1761 * Cleanup function for create_id_enumerator
1762 */
1763 static void id_enumerator_cleanup(linked_list_t *ids)
1764 {
1765 ids->destroy_offset(ids, offsetof(ike_sa_id_t, destroy));
1766 }
1767
1768 METHOD(ike_sa_manager_t, create_id_enumerator, enumerator_t*,
1769 private_ike_sa_manager_t *this, identification_t *me,
1770 identification_t *other, int family)
1771 {
1772 table_item_t *item;
1773 u_int row, segment;
1774 rwlock_t *lock;
1775 linked_list_t *ids = NULL;
1776
1777 row = chunk_hash_inc(other->get_encoding(other),
1778 chunk_hash(me->get_encoding(me))) & this->table_mask;
1779 segment = row & this->segment_mask;
1780
1781 lock = this->connected_peers_segments[segment].lock;
1782 lock->read_lock(lock);
1783 item = this->connected_peers_table[row];
1784 while (item)
1785 {
1786 connected_peers_t *current = item->value;
1787
1788 if (connected_peers_match(current, me, other, family))
1789 {
1790 ids = current->sas->clone_offset(current->sas,
1791 offsetof(ike_sa_id_t, clone));
1792 break;
1793 }
1794 item = item->next;
1795 }
1796 lock->unlock(lock);
1797
1798 if (!ids)
1799 {
1800 return enumerator_create_empty();
1801 }
1802 return enumerator_create_cleaner(ids->create_enumerator(ids),
1803 (void*)id_enumerator_cleanup, ids);
1804 }
1805
1806 /**
1807 * Move all CHILD_SAs and virtual IPs from old to new
1808 */
1809 static void adopt_children_and_vips(ike_sa_t *old, ike_sa_t *new)
1810 {
1811 enumerator_t *enumerator;
1812 child_sa_t *child_sa;
1813 host_t *vip;
1814 int chcount = 0, vipcount = 0;
1815
1816 charon->bus->children_migrate(charon->bus, new->get_id(new),
1817 new->get_unique_id(new));
1818 enumerator = old->create_child_sa_enumerator(old);
1819 while (enumerator->enumerate(enumerator, &child_sa))
1820 {
1821 old->remove_child_sa(old, enumerator);
1822 new->add_child_sa(new, child_sa);
1823 chcount++;
1824 }
1825 enumerator->destroy(enumerator);
1826
1827 enumerator = old->create_virtual_ip_enumerator(old, FALSE);
1828 while (enumerator->enumerate(enumerator, &vip))
1829 {
1830 new->add_virtual_ip(new, FALSE, vip);
1831 vipcount++;
1832 }
1833 enumerator->destroy(enumerator);
1834 /* this does not release the addresses, which is good, but it does trigger
1835 * an assign_vips(FALSE) event... */
1836 old->clear_virtual_ips(old, FALSE);
1837 /* ...trigger the analogous event on the new SA */
1838 charon->bus->set_sa(charon->bus, new);
1839 charon->bus->assign_vips(charon->bus, new, TRUE);
1840 charon->bus->children_migrate(charon->bus, NULL, 0);
1841 charon->bus->set_sa(charon->bus, old);
1842
1843 if (chcount || vipcount)
1844 {
1845 DBG1(DBG_IKE, "detected reauth of existing IKE_SA, adopting %d "
1846 "children and %d virtual IPs", chcount, vipcount);
1847 }
1848 }
1849
1850 /**
1851 * Delete an existing IKE_SA due to a unique replace policy
1852 */
1853 static status_t enforce_replace(private_ike_sa_manager_t *this,
1854 ike_sa_t *duplicate, ike_sa_t *new,
1855 identification_t *other, host_t *host)
1856 {
1857 charon->bus->alert(charon->bus, ALERT_UNIQUE_REPLACE);
1858
1859 if (host->equals(host, duplicate->get_other_host(duplicate)))
1860 {
1861 /* looks like a reauthentication attempt */
1862 if (!new->has_condition(new, COND_INIT_CONTACT_SEEN) &&
1863 new->get_version(new) == IKEV1)
1864 {
1865 /* IKEv1 implicitly takes over children, IKEv2 recreates them
1866 * explicitly. */
1867 adopt_children_and_vips(duplicate, new);
1868 }
1869 /* For IKEv1 we have to delay the delete for the old IKE_SA. Some
1870 * peers need to complete the new SA first, otherwise the quick modes
1871 * might get lost. For IKEv2 we do the same, as we want overlapping
1872 * CHILD_SAs to keep connectivity up. */
1873 lib->scheduler->schedule_job(lib->scheduler, (job_t*)
1874 delete_ike_sa_job_create(duplicate->get_id(duplicate), TRUE), 10);
1875 return SUCCESS;
1876 }
1877 DBG1(DBG_IKE, "deleting duplicate IKE_SA for peer '%Y' due to "
1878 "uniqueness policy", other);
1879 return duplicate->delete(duplicate);
1880 }
1881
1882 METHOD(ike_sa_manager_t, check_uniqueness, bool,
1883 private_ike_sa_manager_t *this, ike_sa_t *ike_sa, bool force_replace)
1884 {
1885 bool cancel = FALSE;
1886 peer_cfg_t *peer_cfg;
1887 unique_policy_t policy;
1888 enumerator_t *enumerator;
1889 ike_sa_id_t *id = NULL;
1890 identification_t *me, *other;
1891 host_t *other_host;
1892
1893 peer_cfg = ike_sa->get_peer_cfg(ike_sa);
1894 policy = peer_cfg->get_unique_policy(peer_cfg);
1895 if (policy == UNIQUE_NEVER || (policy == UNIQUE_NO && !force_replace))
1896 {
1897 return FALSE;
1898 }
1899 me = ike_sa->get_my_id(ike_sa);
1900 other = ike_sa->get_other_eap_id(ike_sa);
1901 other_host = ike_sa->get_other_host(ike_sa);
1902
1903 enumerator = create_id_enumerator(this, me, other,
1904 other_host->get_family(other_host));
1905 while (enumerator->enumerate(enumerator, &id))
1906 {
1907 status_t status = SUCCESS;
1908 ike_sa_t *duplicate;
1909
1910 duplicate = checkout(this, id);
1911 if (!duplicate)
1912 {
1913 continue;
1914 }
1915 if (force_replace)
1916 {
1917 DBG1(DBG_IKE, "destroying duplicate IKE_SA for peer '%Y', "
1918 "received INITIAL_CONTACT", other);
1919 charon->bus->ike_updown(charon->bus, duplicate, FALSE);
1920 checkin_and_destroy(this, duplicate);
1921 continue;
1922 }
1923 peer_cfg = duplicate->get_peer_cfg(duplicate);
1924 if (peer_cfg && peer_cfg->equals(peer_cfg, ike_sa->get_peer_cfg(ike_sa)))
1925 {
1926 switch (duplicate->get_state(duplicate))
1927 {
1928 case IKE_ESTABLISHED:
1929 case IKE_REKEYING:
1930 switch (policy)
1931 {
1932 case UNIQUE_REPLACE:
1933 status = enforce_replace(this, duplicate, ike_sa,
1934 other, other_host);
1935 break;
1936 case UNIQUE_KEEP:
1937 /* potential reauthentication? */
1938 if (!other_host->equals(other_host,
1939 duplicate->get_other_host(duplicate)))
1940 {
1941 cancel = TRUE;
1942 /* we keep the first IKE_SA and delete all
1943 * other duplicates that might exist */
1944 policy = UNIQUE_REPLACE;
1945 }
1946 break;
1947 default:
1948 break;
1949 }
1950 break;
1951 default:
1952 break;
1953 }
1954 }
1955 if (status == DESTROY_ME)
1956 {
1957 checkin_and_destroy(this, duplicate);
1958 }
1959 else
1960 {
1961 checkin(this, duplicate);
1962 }
1963 }
1964 enumerator->destroy(enumerator);
1965 /* reset thread's current IKE_SA after checkin */
1966 charon->bus->set_sa(charon->bus, ike_sa);
1967 return cancel;
1968 }
1969
1970 METHOD(ike_sa_manager_t, has_contact, bool,
1971 private_ike_sa_manager_t *this, identification_t *me,
1972 identification_t *other, int family)
1973 {
1974 table_item_t *item;
1975 u_int row, segment;
1976 rwlock_t *lock;
1977 bool found = FALSE;
1978
1979 row = chunk_hash_inc(other->get_encoding(other),
1980 chunk_hash(me->get_encoding(me))) & this->table_mask;
1981 segment = row & this->segment_mask;
1982 lock = this->connected_peers_segments[segment].lock;
1983 lock->read_lock(lock);
1984 item = this->connected_peers_table[row];
1985 while (item)
1986 {
1987 if (connected_peers_match(item->value, me, other, family))
1988 {
1989 found = TRUE;
1990 break;
1991 }
1992 item = item->next;
1993 }
1994 lock->unlock(lock);
1995
1996 return found;
1997 }
1998
1999 METHOD(ike_sa_manager_t, get_count, u_int,
2000 private_ike_sa_manager_t *this)
2001 {
2002 u_int segment, count = 0;
2003 mutex_t *mutex;
2004
2005 for (segment = 0; segment < this->segment_count; segment++)
2006 {
2007 mutex = this->segments[segment & this->segment_mask].mutex;
2008 mutex->lock(mutex);
2009 count += this->segments[segment].count;
2010 mutex->unlock(mutex);
2011 }
2012 return count;
2013 }
2014
2015 METHOD(ike_sa_manager_t, get_half_open_count, u_int,
2016 private_ike_sa_manager_t *this, host_t *ip, bool responder_only)
2017 {
2018 table_item_t *item;
2019 u_int row, segment;
2020 rwlock_t *lock;
2021 chunk_t addr;
2022 u_int count = 0;
2023
2024 if (ip)
2025 {
2026 addr = ip->get_address(ip);
2027 row = chunk_hash(addr) & this->table_mask;
2028 segment = row & this->segment_mask;
2029 lock = this->half_open_segments[segment].lock;
2030 lock->read_lock(lock);
2031 item = this->half_open_table[row];
2032 while (item)
2033 {
2034 half_open_t *half_open = item->value;
2035
2036 if (chunk_equals(addr, half_open->other))
2037 {
2038 count = responder_only ? half_open->count_responder
2039 : half_open->count;
2040 break;
2041 }
2042 item = item->next;
2043 }
2044 lock->unlock(lock);
2045 }
2046 else
2047 {
2048 count = responder_only ? (u_int)ref_cur(&this->half_open_count_responder)
2049 : (u_int)ref_cur(&this->half_open_count);
2050 }
2051 return count;
2052 }
2053
2054 METHOD(ike_sa_manager_t, set_spi_cb, void,
2055 private_ike_sa_manager_t *this, spi_cb_t callback, void *data)
2056 {
2057 this->spi_lock->write_lock(this->spi_lock);
2058 this->spi_cb.cb = callback;
2059 this->spi_cb.data = data;
2060 this->spi_lock->unlock(this->spi_lock);
2061 }
2062
2063 METHOD(ike_sa_manager_t, flush, void,
2064 private_ike_sa_manager_t *this)
2065 {
2066 /* destroy all list entries */
2067 enumerator_t *enumerator;
2068 entry_t *entry;
2069 u_int segment;
2070
2071 lock_all_segments(this);
2072 DBG2(DBG_MGR, "going to destroy IKE_SA manager and all managed IKE_SA's");
2073 /* Step 1: drive out all waiting threads */
2074 DBG2(DBG_MGR, "set driveout flags for all stored IKE_SA's");
2075 enumerator = create_table_enumerator(this);
2076 while (enumerator->enumerate(enumerator, &entry, &segment))
2077 {
2078 /* do not accept new threads, drive out waiting threads */
2079 entry->driveout_new_threads = TRUE;
2080 entry->driveout_waiting_threads = TRUE;
2081 }
2082 enumerator->destroy(enumerator);
2083 DBG2(DBG_MGR, "wait for all threads to leave IKE_SA's");
2084 /* Step 2: wait until all are gone */
2085 enumerator = create_table_enumerator(this);
2086 while (enumerator->enumerate(enumerator, &entry, &segment))
2087 {
2088 while (entry->waiting_threads || entry->checked_out)
2089 {
2090 /* wake up all */
2091 entry->condvar->broadcast(entry->condvar);
2092 /* go sleeping until they are gone */
2093 entry->condvar->wait(entry->condvar, this->segments[segment].mutex);
2094 }
2095 }
2096 enumerator->destroy(enumerator);
2097 DBG2(DBG_MGR, "delete all IKE_SA's");
2098 /* Step 3: initiate deletion of all IKE_SAs */
2099 enumerator = create_table_enumerator(this);
2100 while (enumerator->enumerate(enumerator, &entry, &segment))
2101 {
2102 charon->bus->set_sa(charon->bus, entry->ike_sa);
2103 if (entry->ike_sa->get_version(entry->ike_sa) == IKEV2)
2104 { /* as the delete never gets processed, fire down events */
2105 switch (entry->ike_sa->get_state(entry->ike_sa))
2106 {
2107 case IKE_ESTABLISHED:
2108 case IKE_REKEYING:
2109 case IKE_DELETING:
2110 charon->bus->ike_updown(charon->bus, entry->ike_sa, FALSE);
2111 break;
2112 default:
2113 break;
2114 }
2115 }
2116 entry->ike_sa->delete(entry->ike_sa);
2117 }
2118 enumerator->destroy(enumerator);
2119
2120 DBG2(DBG_MGR, "destroy all entries");
2121 /* Step 4: destroy all entries */
2122 enumerator = create_table_enumerator(this);
2123 while (enumerator->enumerate(enumerator, &entry, &segment))
2124 {
2125 charon->bus->set_sa(charon->bus, entry->ike_sa);
2126 if (entry->half_open)
2127 {
2128 remove_half_open(this, entry);
2129 }
2130 if (entry->my_id && entry->other_id)
2131 {
2132 remove_connected_peers(this, entry);
2133 }
2134 if (entry->init_hash.ptr)
2135 {
2136 remove_init_hash(this, entry->init_hash);
2137 }
2138 remove_entry_at((private_enumerator_t*)enumerator);
2139 entry_destroy(entry);
2140 }
2141 enumerator->destroy(enumerator);
2142 charon->bus->set_sa(charon->bus, NULL);
2143 unlock_all_segments(this);
2144
2145 this->spi_lock->write_lock(this->spi_lock);
2146 this->rng->destroy(this->rng);
2147 this->rng = NULL;
2148 this->spi_cb.cb = NULL;
2149 this->spi_cb.data = NULL;
2150 this->spi_lock->unlock(this->spi_lock);
2151 }
2152
2153 METHOD(ike_sa_manager_t, destroy, void,
2154 private_ike_sa_manager_t *this)
2155 {
2156 u_int i;
2157
2158 /* these are already cleared in flush() above */
2159 free(this->ike_sa_table);
2160 free(this->half_open_table);
2161 free(this->connected_peers_table);
2162 free(this->init_hashes_table);
2163 for (i = 0; i < this->segment_count; i++)
2164 {
2165 this->segments[i].mutex->destroy(this->segments[i].mutex);
2166 this->half_open_segments[i].lock->destroy(this->half_open_segments[i].lock);
2167 this->connected_peers_segments[i].lock->destroy(this->connected_peers_segments[i].lock);
2168 this->init_hashes_segments[i].mutex->destroy(this->init_hashes_segments[i].mutex);
2169 }
2170 free(this->segments);
2171 free(this->half_open_segments);
2172 free(this->connected_peers_segments);
2173 free(this->init_hashes_segments);
2174
2175 this->spi_lock->destroy(this->spi_lock);
2176 free(this);
2177 }
2178
2179 /**
2180 * This function returns the next-highest power of two for the given number.
2181 * The algorithm works by setting all bits on the right-hand side of the most
2182 * significant 1 to 1 and then increments the whole number so it rolls over
2183 * to the nearest power of two. Note: returns 0 for n == 0
2184 */
2185 static u_int get_nearest_powerof2(u_int n)
2186 {
2187 u_int i;
2188
2189 --n;
2190 for (i = 1; i < sizeof(u_int) * 8; i <<= 1)
2191 {
2192 n |= n >> i;
2193 }
2194 return ++n;
2195 }
2196
2197 /*
2198 * Described in header.
2199 */
2200 ike_sa_manager_t *ike_sa_manager_create()
2201 {
2202 private_ike_sa_manager_t *this;
2203 u_int i;
2204
2205 INIT(this,
2206 .public = {
2207 .checkout = _checkout,
2208 .checkout_new = _checkout_new,
2209 .checkout_by_message = _checkout_by_message,
2210 .checkout_by_config = _checkout_by_config,
2211 .checkout_by_id = _checkout_by_id,
2212 .checkout_by_name = _checkout_by_name,
2213 .check_uniqueness = _check_uniqueness,
2214 .has_contact = _has_contact,
2215 .create_enumerator = _create_enumerator,
2216 .create_id_enumerator = _create_id_enumerator,
2217 .checkin = _checkin,
2218 .checkin_and_destroy = _checkin_and_destroy,
2219 .get_count = _get_count,
2220 .get_half_open_count = _get_half_open_count,
2221 .flush = _flush,
2222 .set_spi_cb = _set_spi_cb,
2223 .destroy = _destroy,
2224 },
2225 );
2226
2227 this->rng = lib->crypto->create_rng(lib->crypto, RNG_WEAK);
2228 if (this->rng == NULL)
2229 {
2230 DBG1(DBG_MGR, "manager initialization failed, no RNG supported");
2231 free(this);
2232 return NULL;
2233 }
2234 this->spi_lock = rwlock_create(RWLOCK_TYPE_DEFAULT);
2235
2236 this->ikesa_limit = lib->settings->get_int(lib->settings,
2237 "%s.ikesa_limit", 0, lib->ns);
2238
2239 this->table_size = get_nearest_powerof2(lib->settings->get_int(
2240 lib->settings, "%s.ikesa_table_size",
2241 DEFAULT_HASHTABLE_SIZE, lib->ns));
2242 this->table_size = max(1, min(this->table_size, MAX_HASHTABLE_SIZE));
2243 this->table_mask = this->table_size - 1;
2244
2245 this->segment_count = get_nearest_powerof2(lib->settings->get_int(
2246 lib->settings, "%s.ikesa_table_segments",
2247 DEFAULT_SEGMENT_COUNT, lib->ns));
2248 this->segment_count = max(1, min(this->segment_count, this->table_size));
2249 this->segment_mask = this->segment_count - 1;
2250
2251 this->ike_sa_table = calloc(this->table_size, sizeof(table_item_t*));
2252 this->segments = (segment_t*)calloc(this->segment_count, sizeof(segment_t));
2253 for (i = 0; i < this->segment_count; i++)
2254 {
2255 this->segments[i].mutex = mutex_create(MUTEX_TYPE_RECURSIVE);
2256 this->segments[i].count = 0;
2257 }
2258
2259 /* we use the same table parameters for the table to track half-open SAs */
2260 this->half_open_table = calloc(this->table_size, sizeof(table_item_t*));
2261 this->half_open_segments = calloc(this->segment_count, sizeof(shareable_segment_t));
2262 for (i = 0; i < this->segment_count; i++)
2263 {
2264 this->half_open_segments[i].lock = rwlock_create(RWLOCK_TYPE_DEFAULT);
2265 this->half_open_segments[i].count = 0;
2266 }
2267
2268 /* also for the hash table used for duplicate tests */
2269 this->connected_peers_table = calloc(this->table_size, sizeof(table_item_t*));
2270 this->connected_peers_segments = calloc(this->segment_count, sizeof(shareable_segment_t));
2271 for (i = 0; i < this->segment_count; i++)
2272 {
2273 this->connected_peers_segments[i].lock = rwlock_create(RWLOCK_TYPE_DEFAULT);
2274 this->connected_peers_segments[i].count = 0;
2275 }
2276
2277 /* and again for the table of hashes of seen initial IKE messages */
2278 this->init_hashes_table = calloc(this->table_size, sizeof(table_item_t*));
2279 this->init_hashes_segments = calloc(this->segment_count, sizeof(segment_t));
2280 for (i = 0; i < this->segment_count; i++)
2281 {
2282 this->init_hashes_segments[i].mutex = mutex_create(MUTEX_TYPE_RECURSIVE);
2283 this->init_hashes_segments[i].count = 0;
2284 }
2285
2286 this->reuse_ikesa = lib->settings->get_bool(lib->settings,
2287 "%s.reuse_ikesa", TRUE, lib->ns);
2288 return &this->public;
2289 }