4625df5b84bb0ab140b41d73df69c00857fd9d41
[strongswan.git] / src / libcharon / sa / ike_sa_manager.c
1 /*
2 * Copyright (C) 2005-2011 Martin Willi
3 * Copyright (C) 2011 revosec AG
4 * Copyright (C) 2008-2015 Tobias Brunner
5 * Copyright (C) 2005 Jan Hutter
6 * Hochschule fuer Technik Rapperswil
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version. See <http://www.fsf.org/copyleft/gpl.txt>.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 * for more details.
17 */
18
19 #include <string.h>
20
21 #include "ike_sa_manager.h"
22
23 #include <daemon.h>
24 #include <sa/ike_sa_id.h>
25 #include <bus/bus.h>
26 #include <threading/condvar.h>
27 #include <threading/mutex.h>
28 #include <threading/rwlock.h>
29 #include <collections/linked_list.h>
30 #include <crypto/hashers/hasher.h>
31 #include <processing/jobs/delete_ike_sa_job.h>
32
33 /* the default size of the hash table (MUST be a power of 2) */
34 #define DEFAULT_HASHTABLE_SIZE 1
35
36 /* the maximum size of the hash table (MUST be a power of 2) */
37 #define MAX_HASHTABLE_SIZE (1 << 30)
38
39 /* the default number of segments (MUST be a power of 2) */
40 #define DEFAULT_SEGMENT_COUNT 1
41
42 typedef struct entry_t entry_t;
43
44 /**
45 * An entry in the linked list, contains IKE_SA, locking and lookup data.
46 */
47 struct entry_t {
48
49 /**
50 * Number of threads waiting for this ike_sa_t object.
51 */
52 int waiting_threads;
53
54 /**
55 * Condvar where threads can wait until ike_sa_t object is free for use again.
56 */
57 condvar_t *condvar;
58
59 /**
60 * Is this ike_sa currently checked out?
61 */
62 bool checked_out;
63
64 /**
65 * Does this SA drives out new threads?
66 */
67 bool driveout_new_threads;
68
69 /**
70 * Does this SA drives out waiting threads?
71 */
72 bool driveout_waiting_threads;
73
74 /**
75 * Identification of an IKE_SA (SPIs).
76 */
77 ike_sa_id_t *ike_sa_id;
78
79 /**
80 * The contained ike_sa_t object.
81 */
82 ike_sa_t *ike_sa;
83
84 /**
85 * hash of the IKE_SA_INIT message, used to detect retransmissions
86 */
87 chunk_t init_hash;
88
89 /**
90 * remote host address, required for DoS detection and duplicate
91 * checking (host with same my_id and other_id is *not* considered
92 * a duplicate if the address family differs)
93 */
94 host_t *other;
95
96 /**
97 * As responder: Is this SA half-open?
98 */
99 bool half_open;
100
101 /**
102 * own identity, required for duplicate checking
103 */
104 identification_t *my_id;
105
106 /**
107 * remote identity, required for duplicate checking
108 */
109 identification_t *other_id;
110
111 /**
112 * message ID or hash of currently processing message, -1 if none
113 */
114 u_int32_t processing;
115 };
116
117 /**
118 * Implementation of entry_t.destroy.
119 */
120 static status_t entry_destroy(entry_t *this)
121 {
122 /* also destroy IKE SA */
123 this->ike_sa->destroy(this->ike_sa);
124 this->ike_sa_id->destroy(this->ike_sa_id);
125 chunk_free(&this->init_hash);
126 DESTROY_IF(this->other);
127 DESTROY_IF(this->my_id);
128 DESTROY_IF(this->other_id);
129 this->condvar->destroy(this->condvar);
130 free(this);
131 return SUCCESS;
132 }
133
134 /**
135 * Creates a new entry for the ike_sa_t list.
136 */
137 static entry_t *entry_create()
138 {
139 entry_t *this;
140
141 INIT(this,
142 .condvar = condvar_create(CONDVAR_TYPE_DEFAULT),
143 .processing = -1,
144 );
145
146 return this;
147 }
148
149 /**
150 * Function that matches entry_t objects by ike_sa_id_t.
151 */
152 static bool entry_match_by_id(entry_t *entry, ike_sa_id_t *id)
153 {
154 if (id->equals(id, entry->ike_sa_id))
155 {
156 return TRUE;
157 }
158 if ((id->get_responder_spi(id) == 0 ||
159 entry->ike_sa_id->get_responder_spi(entry->ike_sa_id) == 0) &&
160 (id->get_ike_version(id) == IKEV1_MAJOR_VERSION ||
161 id->is_initiator(id) == entry->ike_sa_id->is_initiator(entry->ike_sa_id)) &&
162 id->get_initiator_spi(id) == entry->ike_sa_id->get_initiator_spi(entry->ike_sa_id))
163 {
164 /* this is TRUE for IKE_SAs that we initiated but have not yet received a response */
165 return TRUE;
166 }
167 return FALSE;
168 }
169
170 /**
171 * Function that matches entry_t objects by ike_sa_t pointers.
172 */
173 static bool entry_match_by_sa(entry_t *entry, ike_sa_t *ike_sa)
174 {
175 return entry->ike_sa == ike_sa;
176 }
177
178 /**
179 * Hash function for ike_sa_id_t objects.
180 */
181 static u_int ike_sa_id_hash(ike_sa_id_t *ike_sa_id)
182 {
183 /* IKEv2 does not mandate random SPIs (RFC 5996, 2.6), they just have to be
184 * locally unique, so we use our randomly allocated SPI whether we are
185 * initiator or responder to ensure a good distribution. The latter is not
186 * possible for IKEv1 as we don't know whether we are original initiator or
187 * not (based on the IKE header). But as RFC 2408, section 2.5.3 proposes
188 * SPIs (Cookies) to be allocated near random (we allocate them randomly
189 * anyway) it seems safe to always use the initiator SPI. */
190 if (ike_sa_id->get_ike_version(ike_sa_id) == IKEV1_MAJOR_VERSION ||
191 ike_sa_id->is_initiator(ike_sa_id))
192 {
193 return ike_sa_id->get_initiator_spi(ike_sa_id);
194 }
195 return ike_sa_id->get_responder_spi(ike_sa_id);
196 }
197
198 typedef struct half_open_t half_open_t;
199
200 /**
201 * Struct to manage half-open IKE_SAs per peer.
202 */
203 struct half_open_t {
204 /** chunk of remote host address */
205 chunk_t other;
206
207 /** the number of half-open IKE_SAs with that host */
208 u_int count;
209
210 /** the number of half-open IKE_SAs we responded to with that host */
211 u_int count_responder;
212 };
213
214 /**
215 * Destroys a half_open_t object.
216 */
217 static void half_open_destroy(half_open_t *this)
218 {
219 chunk_free(&this->other);
220 free(this);
221 }
222
223 typedef struct connected_peers_t connected_peers_t;
224
225 struct connected_peers_t {
226 /** own identity */
227 identification_t *my_id;
228
229 /** remote identity */
230 identification_t *other_id;
231
232 /** ip address family of peer */
233 int family;
234
235 /** list of ike_sa_id_t objects of IKE_SAs between the two identities */
236 linked_list_t *sas;
237 };
238
239 static void connected_peers_destroy(connected_peers_t *this)
240 {
241 this->my_id->destroy(this->my_id);
242 this->other_id->destroy(this->other_id);
243 this->sas->destroy(this->sas);
244 free(this);
245 }
246
247 /**
248 * Function that matches connected_peers_t objects by the given ids.
249 */
250 static inline bool connected_peers_match(connected_peers_t *connected_peers,
251 identification_t *my_id, identification_t *other_id,
252 int family)
253 {
254 return my_id->equals(my_id, connected_peers->my_id) &&
255 other_id->equals(other_id, connected_peers->other_id) &&
256 (!family || family == connected_peers->family);
257 }
258
259 typedef struct init_hash_t init_hash_t;
260
261 struct init_hash_t {
262 /** hash of IKE_SA_INIT or initial phase1 message (data is not cloned) */
263 chunk_t hash;
264
265 /** our SPI allocated for the IKE_SA based on this message */
266 u_int64_t our_spi;
267 };
268
269 typedef struct segment_t segment_t;
270
271 /**
272 * Struct to manage segments of the hash table.
273 */
274 struct segment_t {
275 /** mutex to access a segment exclusively */
276 mutex_t *mutex;
277
278 /** the number of entries in this segment */
279 u_int count;
280 };
281
282 typedef struct shareable_segment_t shareable_segment_t;
283
284 /**
285 * Struct to manage segments of the "half-open" and "connected peers" hash tables.
286 */
287 struct shareable_segment_t {
288 /** rwlock to access a segment non-/exclusively */
289 rwlock_t *lock;
290
291 /** the number of entries in this segment - in case of the "half-open table"
292 * it's the sum of all half_open_t.count in a segment. */
293 u_int count;
294 };
295
296 typedef struct table_item_t table_item_t;
297
298 /**
299 * Instead of using linked_list_t for each bucket we store the data in our own
300 * list to save memory.
301 */
302 struct table_item_t {
303 /** data of this item */
304 void *value;
305
306 /** next item in the overflow list */
307 table_item_t *next;
308 };
309
310 typedef struct private_ike_sa_manager_t private_ike_sa_manager_t;
311
312 /**
313 * Additional private members of ike_sa_manager_t.
314 */
315 struct private_ike_sa_manager_t {
316 /**
317 * Public interface of ike_sa_manager_t.
318 */
319 ike_sa_manager_t public;
320
321 /**
322 * Hash table with entries for the ike_sa_t objects.
323 */
324 table_item_t **ike_sa_table;
325
326 /**
327 * The size of the hash table.
328 */
329 u_int table_size;
330
331 /**
332 * Mask to map the hashes to table rows.
333 */
334 u_int table_mask;
335
336 /**
337 * Segments of the hash table.
338 */
339 segment_t *segments;
340
341 /**
342 * The number of segments.
343 */
344 u_int segment_count;
345
346 /**
347 * Mask to map a table row to a segment.
348 */
349 u_int segment_mask;
350
351 /**
352 * Hash table with half_open_t objects.
353 */
354 table_item_t **half_open_table;
355
356 /**
357 * Segments of the "half-open" hash table.
358 */
359 shareable_segment_t *half_open_segments;
360
361 /**
362 * Total number of half-open IKE_SAs.
363 */
364 refcount_t half_open_count;
365
366 /**
367 * Total number of half-open IKE_SAs as responder.
368 */
369 refcount_t half_open_count_responder;
370
371 /**
372 * Hash table with connected_peers_t objects.
373 */
374 table_item_t **connected_peers_table;
375
376 /**
377 * Segments of the "connected peers" hash table.
378 */
379 shareable_segment_t *connected_peers_segments;
380
381 /**
382 * Hash table with init_hash_t objects.
383 */
384 table_item_t **init_hashes_table;
385
386 /**
387 * Segments of the "hashes" hash table.
388 */
389 segment_t *init_hashes_segments;
390
391 /**
392 * RNG to get random SPIs for our side
393 */
394 rng_t *rng;
395
396 /**
397 * Registered callback for IKE SPIs
398 */
399 struct {
400 spi_cb_t cb;
401 void *data;
402 } spi_cb;
403
404 /**
405 * Lock to access the RNG instance and the callback
406 */
407 rwlock_t *spi_lock;
408
409 /**
410 * reuse existing IKE_SAs in checkout_by_config
411 */
412 bool reuse_ikesa;
413
414 /**
415 * Configured IKE_SA limit, if any
416 */
417 u_int ikesa_limit;
418 };
419
420 /**
421 * Acquire a lock to access the segment of the table row with the given index.
422 * It also works with the segment index directly.
423 */
424 static inline void lock_single_segment(private_ike_sa_manager_t *this,
425 u_int index)
426 {
427 mutex_t *lock = this->segments[index & this->segment_mask].mutex;
428 lock->lock(lock);
429 }
430
431 /**
432 * Release the lock required to access the segment of the table row with the given index.
433 * It also works with the segment index directly.
434 */
435 static inline void unlock_single_segment(private_ike_sa_manager_t *this,
436 u_int index)
437 {
438 mutex_t *lock = this->segments[index & this->segment_mask].mutex;
439 lock->unlock(lock);
440 }
441
442 /**
443 * Lock all segments
444 */
445 static void lock_all_segments(private_ike_sa_manager_t *this)
446 {
447 u_int i;
448
449 for (i = 0; i < this->segment_count; i++)
450 {
451 this->segments[i].mutex->lock(this->segments[i].mutex);
452 }
453 }
454
455 /**
456 * Unlock all segments
457 */
458 static void unlock_all_segments(private_ike_sa_manager_t *this)
459 {
460 u_int i;
461
462 for (i = 0; i < this->segment_count; i++)
463 {
464 this->segments[i].mutex->unlock(this->segments[i].mutex);
465 }
466 }
467
468 typedef struct private_enumerator_t private_enumerator_t;
469
470 /**
471 * hash table enumerator implementation
472 */
473 struct private_enumerator_t {
474
475 /**
476 * implements enumerator interface
477 */
478 enumerator_t enumerator;
479
480 /**
481 * associated ike_sa_manager_t
482 */
483 private_ike_sa_manager_t *manager;
484
485 /**
486 * current segment index
487 */
488 u_int segment;
489
490 /**
491 * currently enumerating entry
492 */
493 entry_t *entry;
494
495 /**
496 * current table row index
497 */
498 u_int row;
499
500 /**
501 * current table item
502 */
503 table_item_t *current;
504
505 /**
506 * previous table item
507 */
508 table_item_t *prev;
509 };
510
511 METHOD(enumerator_t, enumerate, bool,
512 private_enumerator_t *this, entry_t **entry, u_int *segment)
513 {
514 if (this->entry)
515 {
516 this->entry->condvar->signal(this->entry->condvar);
517 this->entry = NULL;
518 }
519 while (this->segment < this->manager->segment_count)
520 {
521 while (this->row < this->manager->table_size)
522 {
523 this->prev = this->current;
524 if (this->current)
525 {
526 this->current = this->current->next;
527 }
528 else
529 {
530 lock_single_segment(this->manager, this->segment);
531 this->current = this->manager->ike_sa_table[this->row];
532 }
533 if (this->current)
534 {
535 *entry = this->entry = this->current->value;
536 *segment = this->segment;
537 return TRUE;
538 }
539 unlock_single_segment(this->manager, this->segment);
540 this->row += this->manager->segment_count;
541 }
542 this->segment++;
543 this->row = this->segment;
544 }
545 return FALSE;
546 }
547
548 METHOD(enumerator_t, enumerator_destroy, void,
549 private_enumerator_t *this)
550 {
551 if (this->entry)
552 {
553 this->entry->condvar->signal(this->entry->condvar);
554 }
555 if (this->current)
556 {
557 unlock_single_segment(this->manager, this->segment);
558 }
559 free(this);
560 }
561
562 /**
563 * Creates an enumerator to enumerate the entries in the hash table.
564 */
565 static enumerator_t* create_table_enumerator(private_ike_sa_manager_t *this)
566 {
567 private_enumerator_t *enumerator;
568
569 INIT(enumerator,
570 .enumerator = {
571 .enumerate = (void*)_enumerate,
572 .destroy = _enumerator_destroy,
573 },
574 .manager = this,
575 );
576 return &enumerator->enumerator;
577 }
578
579 /**
580 * Put an entry into the hash table.
581 * Note: The caller has to unlock the returned segment.
582 */
583 static u_int put_entry(private_ike_sa_manager_t *this, entry_t *entry)
584 {
585 table_item_t *current, *item;
586 u_int row, segment;
587
588 INIT(item,
589 .value = entry,
590 );
591
592 row = ike_sa_id_hash(entry->ike_sa_id) & this->table_mask;
593 segment = row & this->segment_mask;
594
595 lock_single_segment(this, segment);
596 current = this->ike_sa_table[row];
597 if (current)
598 { /* insert at the front of current bucket */
599 item->next = current;
600 }
601 this->ike_sa_table[row] = item;
602 this->segments[segment].count++;
603 return segment;
604 }
605
606 /**
607 * Remove an entry from the hash table.
608 * Note: The caller MUST have a lock on the segment of this entry.
609 */
610 static void remove_entry(private_ike_sa_manager_t *this, entry_t *entry)
611 {
612 table_item_t *item, *prev = NULL;
613 u_int row, segment;
614
615 row = ike_sa_id_hash(entry->ike_sa_id) & this->table_mask;
616 segment = row & this->segment_mask;
617 item = this->ike_sa_table[row];
618 while (item)
619 {
620 if (item->value == entry)
621 {
622 if (prev)
623 {
624 prev->next = item->next;
625 }
626 else
627 {
628 this->ike_sa_table[row] = item->next;
629 }
630 this->segments[segment].count--;
631 free(item);
632 break;
633 }
634 prev = item;
635 item = item->next;
636 }
637 }
638
639 /**
640 * Remove the entry at the current enumerator position.
641 */
642 static void remove_entry_at(private_enumerator_t *this)
643 {
644 this->entry = NULL;
645 if (this->current)
646 {
647 table_item_t *current = this->current;
648
649 this->manager->segments[this->segment].count--;
650 this->current = this->prev;
651
652 if (this->prev)
653 {
654 this->prev->next = current->next;
655 }
656 else
657 {
658 this->manager->ike_sa_table[this->row] = current->next;
659 unlock_single_segment(this->manager, this->segment);
660 }
661 free(current);
662 }
663 }
664
665 /**
666 * Find an entry using the provided match function to compare the entries for
667 * equality.
668 */
669 static status_t get_entry_by_match_function(private_ike_sa_manager_t *this,
670 ike_sa_id_t *ike_sa_id, entry_t **entry, u_int *segment,
671 linked_list_match_t match, void *param)
672 {
673 table_item_t *item;
674 u_int row, seg;
675
676 row = ike_sa_id_hash(ike_sa_id) & this->table_mask;
677 seg = row & this->segment_mask;
678
679 lock_single_segment(this, seg);
680 item = this->ike_sa_table[row];
681 while (item)
682 {
683 if (match(item->value, param))
684 {
685 *entry = item->value;
686 *segment = seg;
687 /* the locked segment has to be unlocked by the caller */
688 return SUCCESS;
689 }
690 item = item->next;
691 }
692 unlock_single_segment(this, seg);
693 return NOT_FOUND;
694 }
695
696 /**
697 * Find an entry by ike_sa_id_t.
698 * Note: On SUCCESS, the caller has to unlock the segment.
699 */
700 static status_t get_entry_by_id(private_ike_sa_manager_t *this,
701 ike_sa_id_t *ike_sa_id, entry_t **entry, u_int *segment)
702 {
703 return get_entry_by_match_function(this, ike_sa_id, entry, segment,
704 (linked_list_match_t)entry_match_by_id, ike_sa_id);
705 }
706
707 /**
708 * Find an entry by IKE_SA pointer.
709 * Note: On SUCCESS, the caller has to unlock the segment.
710 */
711 static status_t get_entry_by_sa(private_ike_sa_manager_t *this,
712 ike_sa_id_t *ike_sa_id, ike_sa_t *ike_sa, entry_t **entry, u_int *segment)
713 {
714 return get_entry_by_match_function(this, ike_sa_id, entry, segment,
715 (linked_list_match_t)entry_match_by_sa, ike_sa);
716 }
717
718 /**
719 * Wait until no other thread is using an IKE_SA, return FALSE if entry not
720 * acquirable.
721 */
722 static bool wait_for_entry(private_ike_sa_manager_t *this, entry_t *entry,
723 u_int segment)
724 {
725 if (entry->driveout_new_threads)
726 {
727 /* we are not allowed to get this */
728 return FALSE;
729 }
730 while (entry->checked_out && !entry->driveout_waiting_threads)
731 {
732 /* so wait until we can get it for us.
733 * we register us as waiting. */
734 entry->waiting_threads++;
735 entry->condvar->wait(entry->condvar, this->segments[segment].mutex);
736 entry->waiting_threads--;
737 }
738 /* hm, a deletion request forbids us to get this SA, get next one */
739 if (entry->driveout_waiting_threads)
740 {
741 /* we must signal here, others may be waiting on it, too */
742 entry->condvar->signal(entry->condvar);
743 return FALSE;
744 }
745 return TRUE;
746 }
747
748 /**
749 * Put a half-open SA into the hash table.
750 */
751 static void put_half_open(private_ike_sa_manager_t *this, entry_t *entry)
752 {
753 table_item_t *item;
754 u_int row, segment;
755 rwlock_t *lock;
756 ike_sa_id_t *ike_id;
757 half_open_t *half_open;
758 chunk_t addr;
759
760 ike_id = entry->ike_sa_id;
761 addr = entry->other->get_address(entry->other);
762 row = chunk_hash(addr) & this->table_mask;
763 segment = row & this->segment_mask;
764 lock = this->half_open_segments[segment].lock;
765 lock->write_lock(lock);
766 item = this->half_open_table[row];
767 while (item)
768 {
769 half_open = item->value;
770
771 if (chunk_equals(addr, half_open->other))
772 {
773 break;
774 }
775 item = item->next;
776 }
777
778 if (!item)
779 {
780 INIT(half_open,
781 .other = chunk_clone(addr),
782 );
783 INIT(item,
784 .value = half_open,
785 .next = this->half_open_table[row],
786 );
787 this->half_open_table[row] = item;
788 }
789 half_open->count++;
790 ref_get(&this->half_open_count);
791 if (!ike_id->is_initiator(ike_id))
792 {
793 half_open->count_responder++;
794 ref_get(&this->half_open_count_responder);
795 }
796 this->half_open_segments[segment].count++;
797 lock->unlock(lock);
798 }
799
800 /**
801 * Remove a half-open SA from the hash table.
802 */
803 static void remove_half_open(private_ike_sa_manager_t *this, entry_t *entry)
804 {
805 table_item_t *item, *prev = NULL;
806 u_int row, segment;
807 rwlock_t *lock;
808 ike_sa_id_t *ike_id;
809 chunk_t addr;
810
811 ike_id = entry->ike_sa_id;
812 addr = entry->other->get_address(entry->other);
813 row = chunk_hash(addr) & this->table_mask;
814 segment = row & this->segment_mask;
815 lock = this->half_open_segments[segment].lock;
816 lock->write_lock(lock);
817 item = this->half_open_table[row];
818 while (item)
819 {
820 half_open_t *half_open = item->value;
821
822 if (chunk_equals(addr, half_open->other))
823 {
824 if (!ike_id->is_initiator(ike_id))
825 {
826 half_open->count_responder--;
827 ignore_result(ref_put(&this->half_open_count_responder));
828 }
829 ignore_result(ref_put(&this->half_open_count));
830 if (--half_open->count == 0)
831 {
832 if (prev)
833 {
834 prev->next = item->next;
835 }
836 else
837 {
838 this->half_open_table[row] = item->next;
839 }
840 half_open_destroy(half_open);
841 free(item);
842 }
843 this->half_open_segments[segment].count--;
844 break;
845 }
846 prev = item;
847 item = item->next;
848 }
849 lock->unlock(lock);
850 }
851
852 /**
853 * Put an SA between two peers into the hash table.
854 */
855 static void put_connected_peers(private_ike_sa_manager_t *this, entry_t *entry)
856 {
857 table_item_t *item;
858 u_int row, segment;
859 rwlock_t *lock;
860 connected_peers_t *connected_peers;
861 chunk_t my_id, other_id;
862 int family;
863
864 my_id = entry->my_id->get_encoding(entry->my_id);
865 other_id = entry->other_id->get_encoding(entry->other_id);
866 family = entry->other->get_family(entry->other);
867 row = chunk_hash_inc(other_id, chunk_hash(my_id)) & this->table_mask;
868 segment = row & this->segment_mask;
869 lock = this->connected_peers_segments[segment].lock;
870 lock->write_lock(lock);
871 item = this->connected_peers_table[row];
872 while (item)
873 {
874 connected_peers = item->value;
875
876 if (connected_peers_match(connected_peers, entry->my_id,
877 entry->other_id, family))
878 {
879 if (connected_peers->sas->find_first(connected_peers->sas,
880 (linked_list_match_t)entry->ike_sa_id->equals,
881 NULL, entry->ike_sa_id) == SUCCESS)
882 {
883 lock->unlock(lock);
884 return;
885 }
886 break;
887 }
888 item = item->next;
889 }
890
891 if (!item)
892 {
893 INIT(connected_peers,
894 .my_id = entry->my_id->clone(entry->my_id),
895 .other_id = entry->other_id->clone(entry->other_id),
896 .family = family,
897 .sas = linked_list_create(),
898 );
899 INIT(item,
900 .value = connected_peers,
901 .next = this->connected_peers_table[row],
902 );
903 this->connected_peers_table[row] = item;
904 }
905 connected_peers->sas->insert_last(connected_peers->sas,
906 entry->ike_sa_id->clone(entry->ike_sa_id));
907 this->connected_peers_segments[segment].count++;
908 lock->unlock(lock);
909 }
910
911 /**
912 * Remove an SA between two peers from the hash table.
913 */
914 static void remove_connected_peers(private_ike_sa_manager_t *this, entry_t *entry)
915 {
916 table_item_t *item, *prev = NULL;
917 u_int row, segment;
918 rwlock_t *lock;
919 chunk_t my_id, other_id;
920 int family;
921
922 my_id = entry->my_id->get_encoding(entry->my_id);
923 other_id = entry->other_id->get_encoding(entry->other_id);
924 family = entry->other->get_family(entry->other);
925
926 row = chunk_hash_inc(other_id, chunk_hash(my_id)) & this->table_mask;
927 segment = row & this->segment_mask;
928
929 lock = this->connected_peers_segments[segment].lock;
930 lock->write_lock(lock);
931 item = this->connected_peers_table[row];
932 while (item)
933 {
934 connected_peers_t *current = item->value;
935
936 if (connected_peers_match(current, entry->my_id, entry->other_id,
937 family))
938 {
939 enumerator_t *enumerator;
940 ike_sa_id_t *ike_sa_id;
941
942 enumerator = current->sas->create_enumerator(current->sas);
943 while (enumerator->enumerate(enumerator, &ike_sa_id))
944 {
945 if (ike_sa_id->equals(ike_sa_id, entry->ike_sa_id))
946 {
947 current->sas->remove_at(current->sas, enumerator);
948 ike_sa_id->destroy(ike_sa_id);
949 this->connected_peers_segments[segment].count--;
950 break;
951 }
952 }
953 enumerator->destroy(enumerator);
954 if (current->sas->get_count(current->sas) == 0)
955 {
956 if (prev)
957 {
958 prev->next = item->next;
959 }
960 else
961 {
962 this->connected_peers_table[row] = item->next;
963 }
964 connected_peers_destroy(current);
965 free(item);
966 }
967 break;
968 }
969 prev = item;
970 item = item->next;
971 }
972 lock->unlock(lock);
973 }
974
975 /**
976 * Get a random SPI for new IKE_SAs
977 */
978 static u_int64_t get_spi(private_ike_sa_manager_t *this)
979 {
980 u_int64_t spi;
981
982 this->spi_lock->read_lock(this->spi_lock);
983 if (this->spi_cb.cb)
984 {
985 spi = this->spi_cb.cb(this->spi_cb.data);
986 }
987 else if (!this->rng ||
988 !this->rng->get_bytes(this->rng, sizeof(spi), (u_int8_t*)&spi))
989 {
990 spi = 0;
991 }
992 this->spi_lock->unlock(this->spi_lock);
993 return spi;
994 }
995
996 /**
997 * Calculate the hash of the initial IKE message. Memory for the hash is
998 * allocated on success.
999 *
1000 * @returns TRUE on success
1001 */
1002 static bool get_init_hash(hasher_t *hasher, message_t *message, chunk_t *hash)
1003 {
1004 host_t *src;
1005
1006 if (message->get_first_payload_type(message) == PLV1_FRAGMENT)
1007 { /* only hash the source IP, port and SPI for fragmented init messages */
1008 u_int16_t port;
1009 u_int64_t spi;
1010
1011 src = message->get_source(message);
1012 if (!hasher->allocate_hash(hasher, src->get_address(src), NULL))
1013 {
1014 return FALSE;
1015 }
1016 port = src->get_port(src);
1017 if (!hasher->allocate_hash(hasher, chunk_from_thing(port), NULL))
1018 {
1019 return FALSE;
1020 }
1021 spi = message->get_initiator_spi(message);
1022 return hasher->allocate_hash(hasher, chunk_from_thing(spi), hash);
1023 }
1024 if (message->get_exchange_type(message) == ID_PROT)
1025 { /* include the source for Main Mode as the hash will be the same if
1026 * SPIs are reused by two initiators that use the same proposal */
1027 src = message->get_source(message);
1028
1029 if (!hasher->allocate_hash(hasher, src->get_address(src), NULL))
1030 {
1031 return FALSE;
1032 }
1033 }
1034 return hasher->allocate_hash(hasher, message->get_packet_data(message), hash);
1035 }
1036
1037 /**
1038 * Check if we already have created an IKE_SA based on the initial IKE message
1039 * with the given hash.
1040 * If not the hash is stored, the hash data is not(!) cloned.
1041 *
1042 * Also, the local SPI is returned. In case of a retransmit this is already
1043 * stored together with the hash, otherwise it is newly allocated and should
1044 * be used to create the IKE_SA.
1045 *
1046 * @returns ALREADY_DONE if the message with the given hash has been seen before
1047 * NOT_FOUND if the message hash was not found
1048 * FAILED if the SPI allocation failed
1049 */
1050 static status_t check_and_put_init_hash(private_ike_sa_manager_t *this,
1051 chunk_t init_hash, u_int64_t *our_spi)
1052 {
1053 table_item_t *item;
1054 u_int row, segment;
1055 mutex_t *mutex;
1056 init_hash_t *init;
1057 u_int64_t spi;
1058
1059 row = chunk_hash(init_hash) & this->table_mask;
1060 segment = row & this->segment_mask;
1061 mutex = this->init_hashes_segments[segment].mutex;
1062 mutex->lock(mutex);
1063 item = this->init_hashes_table[row];
1064 while (item)
1065 {
1066 init_hash_t *current = item->value;
1067
1068 if (chunk_equals(init_hash, current->hash))
1069 {
1070 *our_spi = current->our_spi;
1071 mutex->unlock(mutex);
1072 return ALREADY_DONE;
1073 }
1074 item = item->next;
1075 }
1076
1077 spi = get_spi(this);
1078 if (!spi)
1079 {
1080 return FAILED;
1081 }
1082
1083 INIT(init,
1084 .hash = {
1085 .len = init_hash.len,
1086 .ptr = init_hash.ptr,
1087 },
1088 .our_spi = spi,
1089 );
1090 INIT(item,
1091 .value = init,
1092 .next = this->init_hashes_table[row],
1093 );
1094 this->init_hashes_table[row] = item;
1095 *our_spi = init->our_spi;
1096 mutex->unlock(mutex);
1097 return NOT_FOUND;
1098 }
1099
1100 /**
1101 * Remove the hash of an initial IKE message from the cache.
1102 */
1103 static void remove_init_hash(private_ike_sa_manager_t *this, chunk_t init_hash)
1104 {
1105 table_item_t *item, *prev = NULL;
1106 u_int row, segment;
1107 mutex_t *mutex;
1108
1109 row = chunk_hash(init_hash) & this->table_mask;
1110 segment = row & this->segment_mask;
1111 mutex = this->init_hashes_segments[segment].mutex;
1112 mutex->lock(mutex);
1113 item = this->init_hashes_table[row];
1114 while (item)
1115 {
1116 init_hash_t *current = item->value;
1117
1118 if (chunk_equals(init_hash, current->hash))
1119 {
1120 if (prev)
1121 {
1122 prev->next = item->next;
1123 }
1124 else
1125 {
1126 this->init_hashes_table[row] = item->next;
1127 }
1128 free(current);
1129 free(item);
1130 break;
1131 }
1132 prev = item;
1133 item = item->next;
1134 }
1135 mutex->unlock(mutex);
1136 }
1137
1138 METHOD(ike_sa_manager_t, checkout, ike_sa_t*,
1139 private_ike_sa_manager_t *this, ike_sa_id_t *ike_sa_id)
1140 {
1141 ike_sa_t *ike_sa = NULL;
1142 entry_t *entry;
1143 u_int segment;
1144
1145 DBG2(DBG_MGR, "checkout IKE_SA");
1146
1147 if (get_entry_by_id(this, ike_sa_id, &entry, &segment) == SUCCESS)
1148 {
1149 if (wait_for_entry(this, entry, segment))
1150 {
1151 entry->checked_out = TRUE;
1152 ike_sa = entry->ike_sa;
1153 DBG2(DBG_MGR, "IKE_SA %s[%u] successfully checked out",
1154 ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1155 }
1156 unlock_single_segment(this, segment);
1157 }
1158 charon->bus->set_sa(charon->bus, ike_sa);
1159 return ike_sa;
1160 }
1161
1162 METHOD(ike_sa_manager_t, checkout_new, ike_sa_t*,
1163 private_ike_sa_manager_t* this, ike_version_t version, bool initiator)
1164 {
1165 ike_sa_id_t *ike_sa_id;
1166 ike_sa_t *ike_sa;
1167 u_int8_t ike_version;
1168 u_int64_t spi;
1169
1170 ike_version = version == IKEV1 ? IKEV1_MAJOR_VERSION : IKEV2_MAJOR_VERSION;
1171
1172 spi = get_spi(this);
1173 if (!spi)
1174 {
1175 DBG1(DBG_MGR, "failed to allocate SPI for new IKE_SA");
1176 return NULL;
1177 }
1178
1179 if (initiator)
1180 {
1181 ike_sa_id = ike_sa_id_create(ike_version, spi, 0, TRUE);
1182 }
1183 else
1184 {
1185 ike_sa_id = ike_sa_id_create(ike_version, 0, spi, FALSE);
1186 }
1187 ike_sa = ike_sa_create(ike_sa_id, initiator, version);
1188 ike_sa_id->destroy(ike_sa_id);
1189
1190 if (ike_sa)
1191 {
1192 DBG2(DBG_MGR, "created IKE_SA %s[%u]", ike_sa->get_name(ike_sa),
1193 ike_sa->get_unique_id(ike_sa));
1194 }
1195 return ike_sa;
1196 }
1197
1198 /**
1199 * Get the message ID or message hash to detect early retransmissions
1200 */
1201 static u_int32_t get_message_id_or_hash(message_t *message)
1202 {
1203 if (message->get_major_version(message) == IKEV1_MAJOR_VERSION)
1204 {
1205 /* Use a hash for IKEv1 Phase 1, where we don't have a MID, and Quick
1206 * Mode, where all three messages use the same message ID */
1207 if (message->get_message_id(message) == 0 ||
1208 message->get_exchange_type(message) == QUICK_MODE)
1209 {
1210 return chunk_hash(message->get_packet_data(message));
1211 }
1212 }
1213 return message->get_message_id(message);
1214 }
1215
1216 METHOD(ike_sa_manager_t, checkout_by_message, ike_sa_t*,
1217 private_ike_sa_manager_t* this, message_t *message)
1218 {
1219 u_int segment;
1220 entry_t *entry;
1221 ike_sa_t *ike_sa = NULL;
1222 ike_sa_id_t *id;
1223 ike_version_t ike_version;
1224 bool is_init = FALSE;
1225
1226 id = message->get_ike_sa_id(message);
1227 /* clone the IKE_SA ID so we can modify the initiator flag */
1228 id = id->clone(id);
1229 id->switch_initiator(id);
1230
1231 DBG2(DBG_MGR, "checkout IKE_SA by message");
1232
1233 if (id->get_responder_spi(id) == 0 &&
1234 message->get_message_id(message) == 0)
1235 {
1236 if (message->get_major_version(message) == IKEV2_MAJOR_VERSION)
1237 {
1238 if (message->get_exchange_type(message) == IKE_SA_INIT &&
1239 message->get_request(message))
1240 {
1241 ike_version = IKEV2;
1242 is_init = TRUE;
1243 }
1244 }
1245 else
1246 {
1247 if (message->get_exchange_type(message) == ID_PROT ||
1248 message->get_exchange_type(message) == AGGRESSIVE)
1249 {
1250 ike_version = IKEV1;
1251 is_init = TRUE;
1252 if (id->is_initiator(id))
1253 { /* not set in IKEv1, switch back before applying to new SA */
1254 id->switch_initiator(id);
1255 }
1256 }
1257 }
1258 }
1259
1260 if (is_init)
1261 {
1262 hasher_t *hasher;
1263 u_int64_t our_spi;
1264 chunk_t hash;
1265
1266 hasher = lib->crypto->create_hasher(lib->crypto, HASH_SHA1);
1267 if (!hasher || !get_init_hash(hasher, message, &hash))
1268 {
1269 DBG1(DBG_MGR, "ignoring message, failed to hash message");
1270 DESTROY_IF(hasher);
1271 id->destroy(id);
1272 return NULL;
1273 }
1274 hasher->destroy(hasher);
1275
1276 /* ensure this is not a retransmit of an already handled init message */
1277 switch (check_and_put_init_hash(this, hash, &our_spi))
1278 {
1279 case NOT_FOUND:
1280 { /* we've not seen this packet yet, create a new IKE_SA */
1281 if (!this->ikesa_limit ||
1282 this->public.get_count(&this->public) < this->ikesa_limit)
1283 {
1284 id->set_responder_spi(id, our_spi);
1285 ike_sa = ike_sa_create(id, FALSE, ike_version);
1286 if (ike_sa)
1287 {
1288 entry = entry_create();
1289 entry->ike_sa = ike_sa;
1290 entry->ike_sa_id = id;
1291
1292 segment = put_entry(this, entry);
1293 entry->checked_out = TRUE;
1294 unlock_single_segment(this, segment);
1295
1296 entry->processing = get_message_id_or_hash(message);
1297 entry->init_hash = hash;
1298
1299 DBG2(DBG_MGR, "created IKE_SA %s[%u]",
1300 ike_sa->get_name(ike_sa),
1301 ike_sa->get_unique_id(ike_sa));
1302
1303 charon->bus->set_sa(charon->bus, ike_sa);
1304 return ike_sa;
1305 }
1306 else
1307 {
1308 DBG1(DBG_MGR, "creating IKE_SA failed, ignoring message");
1309 }
1310 }
1311 else
1312 {
1313 DBG1(DBG_MGR, "ignoring %N, hitting IKE_SA limit (%u)",
1314 exchange_type_names, message->get_exchange_type(message),
1315 this->ikesa_limit);
1316 }
1317 remove_init_hash(this, hash);
1318 chunk_free(&hash);
1319 id->destroy(id);
1320 return NULL;
1321 }
1322 case FAILED:
1323 { /* we failed to allocate an SPI */
1324 chunk_free(&hash);
1325 id->destroy(id);
1326 DBG1(DBG_MGR, "ignoring message, failed to allocate SPI");
1327 return NULL;
1328 }
1329 case ALREADY_DONE:
1330 default:
1331 break;
1332 }
1333 /* it looks like we already handled this init message to some degree */
1334 id->set_responder_spi(id, our_spi);
1335 chunk_free(&hash);
1336 }
1337
1338 if (get_entry_by_id(this, id, &entry, &segment) == SUCCESS)
1339 {
1340 /* only check out if we are not already processing it. */
1341 if (entry->processing == get_message_id_or_hash(message))
1342 {
1343 DBG1(DBG_MGR, "ignoring request with ID %u, already processing",
1344 entry->processing);
1345 }
1346 else if (wait_for_entry(this, entry, segment))
1347 {
1348 ike_sa_id_t *ike_id;
1349
1350 ike_id = entry->ike_sa->get_id(entry->ike_sa);
1351 entry->checked_out = TRUE;
1352 if (message->get_first_payload_type(message) != PLV1_FRAGMENT &&
1353 message->get_first_payload_type(message) != PLV2_FRAGMENT)
1354 { /* TODO-FRAG: this fails if there are unencrypted payloads */
1355 entry->processing = get_message_id_or_hash(message);
1356 }
1357 if (ike_id->get_responder_spi(ike_id) == 0)
1358 {
1359 ike_id->set_responder_spi(ike_id, id->get_responder_spi(id));
1360 }
1361 ike_sa = entry->ike_sa;
1362 DBG2(DBG_MGR, "IKE_SA %s[%u] successfully checked out",
1363 ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1364 }
1365 unlock_single_segment(this, segment);
1366 }
1367 else
1368 {
1369 charon->bus->alert(charon->bus, ALERT_INVALID_IKE_SPI, message);
1370 }
1371 id->destroy(id);
1372 charon->bus->set_sa(charon->bus, ike_sa);
1373 return ike_sa;
1374 }
1375
1376 METHOD(ike_sa_manager_t, checkout_by_config, ike_sa_t*,
1377 private_ike_sa_manager_t *this, peer_cfg_t *peer_cfg)
1378 {
1379 enumerator_t *enumerator;
1380 entry_t *entry;
1381 ike_sa_t *ike_sa = NULL;
1382 peer_cfg_t *current_peer;
1383 ike_cfg_t *current_ike;
1384 u_int segment;
1385
1386 DBG2(DBG_MGR, "checkout IKE_SA by config");
1387
1388 if (!this->reuse_ikesa)
1389 { /* IKE_SA reuse disable by config */
1390 ike_sa = checkout_new(this, peer_cfg->get_ike_version(peer_cfg), TRUE);
1391 charon->bus->set_sa(charon->bus, ike_sa);
1392 return ike_sa;
1393 }
1394
1395 enumerator = create_table_enumerator(this);
1396 while (enumerator->enumerate(enumerator, &entry, &segment))
1397 {
1398 if (!wait_for_entry(this, entry, segment))
1399 {
1400 continue;
1401 }
1402 if (entry->ike_sa->get_state(entry->ike_sa) == IKE_DELETING)
1403 { /* skip IKE_SAs which are not usable, wake other waiting threads */
1404 entry->condvar->signal(entry->condvar);
1405 continue;
1406 }
1407
1408 current_peer = entry->ike_sa->get_peer_cfg(entry->ike_sa);
1409 if (current_peer && current_peer->equals(current_peer, peer_cfg))
1410 {
1411 current_ike = current_peer->get_ike_cfg(current_peer);
1412 if (current_ike->equals(current_ike, peer_cfg->get_ike_cfg(peer_cfg)))
1413 {
1414 entry->checked_out = TRUE;
1415 ike_sa = entry->ike_sa;
1416 DBG2(DBG_MGR, "found existing IKE_SA %u with a '%s' config",
1417 ike_sa->get_unique_id(ike_sa),
1418 current_peer->get_name(current_peer));
1419 break;
1420 }
1421 }
1422 /* other threads might be waiting for this entry */
1423 entry->condvar->signal(entry->condvar);
1424 }
1425 enumerator->destroy(enumerator);
1426
1427 if (!ike_sa)
1428 { /* no IKE_SA using such a config, hand out a new */
1429 ike_sa = checkout_new(this, peer_cfg->get_ike_version(peer_cfg), TRUE);
1430 }
1431 charon->bus->set_sa(charon->bus, ike_sa);
1432 return ike_sa;
1433 }
1434
1435 METHOD(ike_sa_manager_t, checkout_by_id, ike_sa_t*,
1436 private_ike_sa_manager_t *this, u_int32_t id)
1437 {
1438 enumerator_t *enumerator;
1439 entry_t *entry;
1440 ike_sa_t *ike_sa = NULL;
1441 u_int segment;
1442
1443 DBG2(DBG_MGR, "checkout IKE_SA by ID %u", id);
1444
1445 enumerator = create_table_enumerator(this);
1446 while (enumerator->enumerate(enumerator, &entry, &segment))
1447 {
1448 if (wait_for_entry(this, entry, segment))
1449 {
1450 if (entry->ike_sa->get_unique_id(entry->ike_sa) == id)
1451 {
1452 ike_sa = entry->ike_sa;
1453 entry->checked_out = TRUE;
1454 break;
1455 }
1456 /* other threads might be waiting for this entry */
1457 entry->condvar->signal(entry->condvar);
1458 }
1459 }
1460 enumerator->destroy(enumerator);
1461
1462 if (ike_sa)
1463 {
1464 DBG2(DBG_MGR, "IKE_SA %s[%u] successfully checked out",
1465 ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1466 }
1467 charon->bus->set_sa(charon->bus, ike_sa);
1468 return ike_sa;
1469 }
1470
1471 METHOD(ike_sa_manager_t, checkout_by_name, ike_sa_t*,
1472 private_ike_sa_manager_t *this, char *name, bool child)
1473 {
1474 enumerator_t *enumerator, *children;
1475 entry_t *entry;
1476 ike_sa_t *ike_sa = NULL;
1477 child_sa_t *child_sa;
1478 u_int segment;
1479
1480 enumerator = create_table_enumerator(this);
1481 while (enumerator->enumerate(enumerator, &entry, &segment))
1482 {
1483 if (wait_for_entry(this, entry, segment))
1484 {
1485 /* look for a child with such a policy name ... */
1486 if (child)
1487 {
1488 children = entry->ike_sa->create_child_sa_enumerator(entry->ike_sa);
1489 while (children->enumerate(children, (void**)&child_sa))
1490 {
1491 if (streq(child_sa->get_name(child_sa), name))
1492 {
1493 ike_sa = entry->ike_sa;
1494 break;
1495 }
1496 }
1497 children->destroy(children);
1498 }
1499 else /* ... or for a IKE_SA with such a connection name */
1500 {
1501 if (streq(entry->ike_sa->get_name(entry->ike_sa), name))
1502 {
1503 ike_sa = entry->ike_sa;
1504 }
1505 }
1506 /* got one, return */
1507 if (ike_sa)
1508 {
1509 entry->checked_out = TRUE;
1510 DBG2(DBG_MGR, "IKE_SA %s[%u] successfully checked out",
1511 ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1512 break;
1513 }
1514 /* other threads might be waiting for this entry */
1515 entry->condvar->signal(entry->condvar);
1516 }
1517 }
1518 enumerator->destroy(enumerator);
1519
1520 charon->bus->set_sa(charon->bus, ike_sa);
1521 return ike_sa;
1522 }
1523
1524 /**
1525 * enumerator filter function, waiting variant
1526 */
1527 static bool enumerator_filter_wait(private_ike_sa_manager_t *this,
1528 entry_t **in, ike_sa_t **out, u_int *segment)
1529 {
1530 if (wait_for_entry(this, *in, *segment))
1531 {
1532 *out = (*in)->ike_sa;
1533 charon->bus->set_sa(charon->bus, *out);
1534 return TRUE;
1535 }
1536 return FALSE;
1537 }
1538
1539 /**
1540 * enumerator filter function, skipping variant
1541 */
1542 static bool enumerator_filter_skip(private_ike_sa_manager_t *this,
1543 entry_t **in, ike_sa_t **out, u_int *segment)
1544 {
1545 if (!(*in)->driveout_new_threads &&
1546 !(*in)->driveout_waiting_threads &&
1547 !(*in)->checked_out)
1548 {
1549 *out = (*in)->ike_sa;
1550 charon->bus->set_sa(charon->bus, *out);
1551 return TRUE;
1552 }
1553 return FALSE;
1554 }
1555
1556 /**
1557 * Reset threads SA after enumeration
1558 */
1559 static void reset_sa(void *data)
1560 {
1561 charon->bus->set_sa(charon->bus, NULL);
1562 }
1563
1564 METHOD(ike_sa_manager_t, create_enumerator, enumerator_t*,
1565 private_ike_sa_manager_t* this, bool wait)
1566 {
1567 return enumerator_create_filter(create_table_enumerator(this),
1568 wait ? (void*)enumerator_filter_wait : (void*)enumerator_filter_skip,
1569 this, reset_sa);
1570 }
1571
1572 METHOD(ike_sa_manager_t, checkin, void,
1573 private_ike_sa_manager_t *this, ike_sa_t *ike_sa)
1574 {
1575 /* to check the SA back in, we look for the pointer of the ike_sa
1576 * in all entries.
1577 * The lookup is done by initiator SPI, so even if the SPI has changed (e.g.
1578 * on reception of a IKE_SA_INIT response) the lookup will work but
1579 * updating of the SPI MAY be necessary...
1580 */
1581 entry_t *entry;
1582 ike_sa_id_t *ike_sa_id;
1583 host_t *other;
1584 identification_t *my_id, *other_id;
1585 u_int segment;
1586
1587 ike_sa_id = ike_sa->get_id(ike_sa);
1588 my_id = ike_sa->get_my_id(ike_sa);
1589 other_id = ike_sa->get_other_eap_id(ike_sa);
1590 other = ike_sa->get_other_host(ike_sa);
1591
1592 DBG2(DBG_MGR, "checkin IKE_SA %s[%u]", ike_sa->get_name(ike_sa),
1593 ike_sa->get_unique_id(ike_sa));
1594
1595 /* look for the entry */
1596 if (get_entry_by_sa(this, ike_sa_id, ike_sa, &entry, &segment) == SUCCESS)
1597 {
1598 /* ike_sa_id must be updated */
1599 entry->ike_sa_id->replace_values(entry->ike_sa_id, ike_sa->get_id(ike_sa));
1600 /* signal waiting threads */
1601 entry->checked_out = FALSE;
1602 entry->processing = -1;
1603 /* check if this SA is half-open */
1604 if (entry->half_open && ike_sa->get_state(ike_sa) != IKE_CONNECTING)
1605 {
1606 /* not half open anymore */
1607 entry->half_open = FALSE;
1608 remove_half_open(this, entry);
1609 }
1610 else if (entry->half_open && !other->ip_equals(other, entry->other))
1611 {
1612 /* the other host's IP has changed, we must update the hash table */
1613 remove_half_open(this, entry);
1614 DESTROY_IF(entry->other);
1615 entry->other = other->clone(other);
1616 put_half_open(this, entry);
1617 }
1618 else if (!entry->half_open &&
1619 ike_sa->get_state(ike_sa) == IKE_CONNECTING)
1620 {
1621 /* this is a new half-open SA */
1622 entry->half_open = TRUE;
1623 entry->other = other->clone(other);
1624 put_half_open(this, entry);
1625 }
1626 DBG2(DBG_MGR, "check-in of IKE_SA successful.");
1627 entry->condvar->signal(entry->condvar);
1628 }
1629 else
1630 {
1631 entry = entry_create();
1632 entry->ike_sa_id = ike_sa_id->clone(ike_sa_id);
1633 entry->ike_sa = ike_sa;
1634 if (ike_sa->get_state(ike_sa) == IKE_CONNECTING)
1635 {
1636 entry->half_open = TRUE;
1637 entry->other = other->clone(other);
1638 put_half_open(this, entry);
1639 }
1640 segment = put_entry(this, entry);
1641 }
1642
1643 /* apply identities for duplicate test */
1644 if ((ike_sa->get_state(ike_sa) == IKE_ESTABLISHED ||
1645 ike_sa->get_state(ike_sa) == IKE_PASSIVE) &&
1646 entry->my_id == NULL && entry->other_id == NULL)
1647 {
1648 if (ike_sa->get_version(ike_sa) == IKEV1)
1649 {
1650 /* If authenticated and received INITIAL_CONTACT,
1651 * delete any existing IKE_SAs with that peer. */
1652 if (ike_sa->has_condition(ike_sa, COND_INIT_CONTACT_SEEN))
1653 {
1654 /* We can't hold the segment locked while checking the
1655 * uniqueness as this could lead to deadlocks. We mark the
1656 * entry as checked out while we release the lock so no other
1657 * thread can acquire it. Since it is not yet in the list of
1658 * connected peers that will not cause a deadlock as no other
1659 * caller of check_unqiueness() will try to check out this SA */
1660 entry->checked_out = TRUE;
1661 unlock_single_segment(this, segment);
1662
1663 this->public.check_uniqueness(&this->public, ike_sa, TRUE);
1664 ike_sa->set_condition(ike_sa, COND_INIT_CONTACT_SEEN, FALSE);
1665
1666 /* The entry could have been modified in the mean time, e.g.
1667 * because another SA was added/removed next to it or another
1668 * thread is waiting, but it should still exist, so there is no
1669 * need for a lookup via get_entry_by... */
1670 lock_single_segment(this, segment);
1671 entry->checked_out = FALSE;
1672 /* We already signaled waiting threads above, we have to do that
1673 * again after checking the SA out and back in again. */
1674 entry->condvar->signal(entry->condvar);
1675 }
1676 }
1677
1678 entry->my_id = my_id->clone(my_id);
1679 entry->other_id = other_id->clone(other_id);
1680 if (!entry->other)
1681 {
1682 entry->other = other->clone(other);
1683 }
1684 put_connected_peers(this, entry);
1685 }
1686
1687 unlock_single_segment(this, segment);
1688
1689 charon->bus->set_sa(charon->bus, NULL);
1690 }
1691
1692 METHOD(ike_sa_manager_t, checkin_and_destroy, void,
1693 private_ike_sa_manager_t *this, ike_sa_t *ike_sa)
1694 {
1695 /* deletion is a bit complex, we must ensure that no thread is waiting for
1696 * this SA.
1697 * We take this SA from the table, and start signaling while threads
1698 * are in the condvar.
1699 */
1700 entry_t *entry;
1701 ike_sa_id_t *ike_sa_id;
1702 u_int segment;
1703
1704 ike_sa_id = ike_sa->get_id(ike_sa);
1705
1706 DBG2(DBG_MGR, "checkin and destroy IKE_SA %s[%u]", ike_sa->get_name(ike_sa),
1707 ike_sa->get_unique_id(ike_sa));
1708
1709 if (get_entry_by_sa(this, ike_sa_id, ike_sa, &entry, &segment) == SUCCESS)
1710 {
1711 if (entry->driveout_waiting_threads && entry->driveout_new_threads)
1712 { /* it looks like flush() has been called and the SA is being deleted
1713 * anyway, just check it in */
1714 DBG2(DBG_MGR, "ignored check-in and destroy of IKE_SA during shutdown");
1715 entry->checked_out = FALSE;
1716 entry->condvar->broadcast(entry->condvar);
1717 unlock_single_segment(this, segment);
1718 return;
1719 }
1720
1721 /* drive out waiting threads, as we are in hurry */
1722 entry->driveout_waiting_threads = TRUE;
1723 /* mark it, so no new threads can get this entry */
1724 entry->driveout_new_threads = TRUE;
1725 /* wait until all workers have done their work */
1726 while (entry->waiting_threads)
1727 {
1728 /* wake up all */
1729 entry->condvar->broadcast(entry->condvar);
1730 /* they will wake us again when their work is done */
1731 entry->condvar->wait(entry->condvar, this->segments[segment].mutex);
1732 }
1733 remove_entry(this, entry);
1734 unlock_single_segment(this, segment);
1735
1736 if (entry->half_open)
1737 {
1738 remove_half_open(this, entry);
1739 }
1740 if (entry->my_id && entry->other_id)
1741 {
1742 remove_connected_peers(this, entry);
1743 }
1744 if (entry->init_hash.ptr)
1745 {
1746 remove_init_hash(this, entry->init_hash);
1747 }
1748
1749 entry_destroy(entry);
1750
1751 DBG2(DBG_MGR, "check-in and destroy of IKE_SA successful");
1752 }
1753 else
1754 {
1755 DBG1(DBG_MGR, "tried to check-in and delete nonexisting IKE_SA");
1756 ike_sa->destroy(ike_sa);
1757 }
1758 charon->bus->set_sa(charon->bus, NULL);
1759 }
1760
1761 /**
1762 * Cleanup function for create_id_enumerator
1763 */
1764 static void id_enumerator_cleanup(linked_list_t *ids)
1765 {
1766 ids->destroy_offset(ids, offsetof(ike_sa_id_t, destroy));
1767 }
1768
1769 METHOD(ike_sa_manager_t, create_id_enumerator, enumerator_t*,
1770 private_ike_sa_manager_t *this, identification_t *me,
1771 identification_t *other, int family)
1772 {
1773 table_item_t *item;
1774 u_int row, segment;
1775 rwlock_t *lock;
1776 linked_list_t *ids = NULL;
1777
1778 row = chunk_hash_inc(other->get_encoding(other),
1779 chunk_hash(me->get_encoding(me))) & this->table_mask;
1780 segment = row & this->segment_mask;
1781
1782 lock = this->connected_peers_segments[segment].lock;
1783 lock->read_lock(lock);
1784 item = this->connected_peers_table[row];
1785 while (item)
1786 {
1787 connected_peers_t *current = item->value;
1788
1789 if (connected_peers_match(current, me, other, family))
1790 {
1791 ids = current->sas->clone_offset(current->sas,
1792 offsetof(ike_sa_id_t, clone));
1793 break;
1794 }
1795 item = item->next;
1796 }
1797 lock->unlock(lock);
1798
1799 if (!ids)
1800 {
1801 return enumerator_create_empty();
1802 }
1803 return enumerator_create_cleaner(ids->create_enumerator(ids),
1804 (void*)id_enumerator_cleanup, ids);
1805 }
1806
1807 /**
1808 * Move all CHILD_SAs and virtual IPs from old to new
1809 */
1810 static void adopt_children_and_vips(ike_sa_t *old, ike_sa_t *new)
1811 {
1812 enumerator_t *enumerator;
1813 child_sa_t *child_sa;
1814 host_t *vip;
1815 int chcount = 0, vipcount = 0;
1816
1817 charon->bus->children_migrate(charon->bus, new->get_id(new),
1818 new->get_unique_id(new));
1819 enumerator = old->create_child_sa_enumerator(old);
1820 while (enumerator->enumerate(enumerator, &child_sa))
1821 {
1822 old->remove_child_sa(old, enumerator);
1823 new->add_child_sa(new, child_sa);
1824 chcount++;
1825 }
1826 enumerator->destroy(enumerator);
1827
1828 enumerator = old->create_virtual_ip_enumerator(old, FALSE);
1829 while (enumerator->enumerate(enumerator, &vip))
1830 {
1831 new->add_virtual_ip(new, FALSE, vip);
1832 vipcount++;
1833 }
1834 enumerator->destroy(enumerator);
1835 /* this does not release the addresses, which is good, but it does trigger
1836 * an assign_vips(FALSE) event... */
1837 old->clear_virtual_ips(old, FALSE);
1838 /* ...trigger the analogous event on the new SA */
1839 charon->bus->set_sa(charon->bus, new);
1840 charon->bus->assign_vips(charon->bus, new, TRUE);
1841 charon->bus->children_migrate(charon->bus, NULL, 0);
1842 charon->bus->set_sa(charon->bus, old);
1843
1844 if (chcount || vipcount)
1845 {
1846 DBG1(DBG_IKE, "detected reauth of existing IKE_SA, adopting %d "
1847 "children and %d virtual IPs", chcount, vipcount);
1848 }
1849 }
1850
1851 /**
1852 * Delete an existing IKE_SA due to a unique replace policy
1853 */
1854 static status_t enforce_replace(private_ike_sa_manager_t *this,
1855 ike_sa_t *duplicate, ike_sa_t *new,
1856 identification_t *other, host_t *host)
1857 {
1858 charon->bus->alert(charon->bus, ALERT_UNIQUE_REPLACE);
1859
1860 if (host->equals(host, duplicate->get_other_host(duplicate)))
1861 {
1862 /* looks like a reauthentication attempt */
1863 if (!new->has_condition(new, COND_INIT_CONTACT_SEEN) &&
1864 new->get_version(new) == IKEV1)
1865 {
1866 /* IKEv1 implicitly takes over children, IKEv2 recreates them
1867 * explicitly. */
1868 adopt_children_and_vips(duplicate, new);
1869 }
1870 /* For IKEv1 we have to delay the delete for the old IKE_SA. Some
1871 * peers need to complete the new SA first, otherwise the quick modes
1872 * might get lost. For IKEv2 we do the same, as we want overlapping
1873 * CHILD_SAs to keep connectivity up. */
1874 lib->scheduler->schedule_job(lib->scheduler, (job_t*)
1875 delete_ike_sa_job_create(duplicate->get_id(duplicate), TRUE), 10);
1876 return SUCCESS;
1877 }
1878 DBG1(DBG_IKE, "deleting duplicate IKE_SA for peer '%Y' due to "
1879 "uniqueness policy", other);
1880 return duplicate->delete(duplicate);
1881 }
1882
1883 METHOD(ike_sa_manager_t, check_uniqueness, bool,
1884 private_ike_sa_manager_t *this, ike_sa_t *ike_sa, bool force_replace)
1885 {
1886 bool cancel = FALSE;
1887 peer_cfg_t *peer_cfg;
1888 unique_policy_t policy;
1889 enumerator_t *enumerator;
1890 ike_sa_id_t *id = NULL;
1891 identification_t *me, *other;
1892 host_t *other_host;
1893
1894 peer_cfg = ike_sa->get_peer_cfg(ike_sa);
1895 policy = peer_cfg->get_unique_policy(peer_cfg);
1896 if (policy == UNIQUE_NEVER || (policy == UNIQUE_NO && !force_replace))
1897 {
1898 return FALSE;
1899 }
1900 me = ike_sa->get_my_id(ike_sa);
1901 other = ike_sa->get_other_eap_id(ike_sa);
1902 other_host = ike_sa->get_other_host(ike_sa);
1903
1904 enumerator = create_id_enumerator(this, me, other,
1905 other_host->get_family(other_host));
1906 while (enumerator->enumerate(enumerator, &id))
1907 {
1908 status_t status = SUCCESS;
1909 ike_sa_t *duplicate;
1910
1911 duplicate = checkout(this, id);
1912 if (!duplicate)
1913 {
1914 continue;
1915 }
1916 if (force_replace)
1917 {
1918 DBG1(DBG_IKE, "destroying duplicate IKE_SA for peer '%Y', "
1919 "received INITIAL_CONTACT", other);
1920 charon->bus->ike_updown(charon->bus, duplicate, FALSE);
1921 checkin_and_destroy(this, duplicate);
1922 continue;
1923 }
1924 peer_cfg = duplicate->get_peer_cfg(duplicate);
1925 if (peer_cfg && peer_cfg->equals(peer_cfg, ike_sa->get_peer_cfg(ike_sa)))
1926 {
1927 switch (duplicate->get_state(duplicate))
1928 {
1929 case IKE_ESTABLISHED:
1930 case IKE_REKEYING:
1931 switch (policy)
1932 {
1933 case UNIQUE_REPLACE:
1934 status = enforce_replace(this, duplicate, ike_sa,
1935 other, other_host);
1936 break;
1937 case UNIQUE_KEEP:
1938 /* potential reauthentication? */
1939 if (!other_host->equals(other_host,
1940 duplicate->get_other_host(duplicate)))
1941 {
1942 cancel = TRUE;
1943 /* we keep the first IKE_SA and delete all
1944 * other duplicates that might exist */
1945 policy = UNIQUE_REPLACE;
1946 }
1947 break;
1948 default:
1949 break;
1950 }
1951 break;
1952 default:
1953 break;
1954 }
1955 }
1956 if (status == DESTROY_ME)
1957 {
1958 checkin_and_destroy(this, duplicate);
1959 }
1960 else
1961 {
1962 checkin(this, duplicate);
1963 }
1964 }
1965 enumerator->destroy(enumerator);
1966 /* reset thread's current IKE_SA after checkin */
1967 charon->bus->set_sa(charon->bus, ike_sa);
1968 return cancel;
1969 }
1970
1971 METHOD(ike_sa_manager_t, has_contact, bool,
1972 private_ike_sa_manager_t *this, identification_t *me,
1973 identification_t *other, int family)
1974 {
1975 table_item_t *item;
1976 u_int row, segment;
1977 rwlock_t *lock;
1978 bool found = FALSE;
1979
1980 row = chunk_hash_inc(other->get_encoding(other),
1981 chunk_hash(me->get_encoding(me))) & this->table_mask;
1982 segment = row & this->segment_mask;
1983 lock = this->connected_peers_segments[segment].lock;
1984 lock->read_lock(lock);
1985 item = this->connected_peers_table[row];
1986 while (item)
1987 {
1988 if (connected_peers_match(item->value, me, other, family))
1989 {
1990 found = TRUE;
1991 break;
1992 }
1993 item = item->next;
1994 }
1995 lock->unlock(lock);
1996
1997 return found;
1998 }
1999
2000 METHOD(ike_sa_manager_t, get_count, u_int,
2001 private_ike_sa_manager_t *this)
2002 {
2003 u_int segment, count = 0;
2004 mutex_t *mutex;
2005
2006 for (segment = 0; segment < this->segment_count; segment++)
2007 {
2008 mutex = this->segments[segment & this->segment_mask].mutex;
2009 mutex->lock(mutex);
2010 count += this->segments[segment].count;
2011 mutex->unlock(mutex);
2012 }
2013 return count;
2014 }
2015
2016 METHOD(ike_sa_manager_t, get_half_open_count, u_int,
2017 private_ike_sa_manager_t *this, host_t *ip, bool responder_only)
2018 {
2019 table_item_t *item;
2020 u_int row, segment;
2021 rwlock_t *lock;
2022 chunk_t addr;
2023 u_int count = 0;
2024
2025 if (ip)
2026 {
2027 addr = ip->get_address(ip);
2028 row = chunk_hash(addr) & this->table_mask;
2029 segment = row & this->segment_mask;
2030 lock = this->half_open_segments[segment].lock;
2031 lock->read_lock(lock);
2032 item = this->half_open_table[row];
2033 while (item)
2034 {
2035 half_open_t *half_open = item->value;
2036
2037 if (chunk_equals(addr, half_open->other))
2038 {
2039 count = responder_only ? half_open->count_responder
2040 : half_open->count;
2041 break;
2042 }
2043 item = item->next;
2044 }
2045 lock->unlock(lock);
2046 }
2047 else
2048 {
2049 count = responder_only ? (u_int)ref_cur(&this->half_open_count_responder)
2050 : (u_int)ref_cur(&this->half_open_count);
2051 }
2052 return count;
2053 }
2054
2055 METHOD(ike_sa_manager_t, set_spi_cb, void,
2056 private_ike_sa_manager_t *this, spi_cb_t callback, void *data)
2057 {
2058 this->spi_lock->write_lock(this->spi_lock);
2059 this->spi_cb.cb = callback;
2060 this->spi_cb.data = data;
2061 this->spi_lock->unlock(this->spi_lock);
2062 }
2063
2064 METHOD(ike_sa_manager_t, flush, void,
2065 private_ike_sa_manager_t *this)
2066 {
2067 /* destroy all list entries */
2068 enumerator_t *enumerator;
2069 entry_t *entry;
2070 u_int segment;
2071
2072 lock_all_segments(this);
2073 DBG2(DBG_MGR, "going to destroy IKE_SA manager and all managed IKE_SA's");
2074 /* Step 1: drive out all waiting threads */
2075 DBG2(DBG_MGR, "set driveout flags for all stored IKE_SA's");
2076 enumerator = create_table_enumerator(this);
2077 while (enumerator->enumerate(enumerator, &entry, &segment))
2078 {
2079 /* do not accept new threads, drive out waiting threads */
2080 entry->driveout_new_threads = TRUE;
2081 entry->driveout_waiting_threads = TRUE;
2082 }
2083 enumerator->destroy(enumerator);
2084 DBG2(DBG_MGR, "wait for all threads to leave IKE_SA's");
2085 /* Step 2: wait until all are gone */
2086 enumerator = create_table_enumerator(this);
2087 while (enumerator->enumerate(enumerator, &entry, &segment))
2088 {
2089 while (entry->waiting_threads || entry->checked_out)
2090 {
2091 /* wake up all */
2092 entry->condvar->broadcast(entry->condvar);
2093 /* go sleeping until they are gone */
2094 entry->condvar->wait(entry->condvar, this->segments[segment].mutex);
2095 }
2096 }
2097 enumerator->destroy(enumerator);
2098 DBG2(DBG_MGR, "delete all IKE_SA's");
2099 /* Step 3: initiate deletion of all IKE_SAs */
2100 enumerator = create_table_enumerator(this);
2101 while (enumerator->enumerate(enumerator, &entry, &segment))
2102 {
2103 charon->bus->set_sa(charon->bus, entry->ike_sa);
2104 if (entry->ike_sa->get_version(entry->ike_sa) == IKEV2)
2105 { /* as the delete never gets processed, fire down events */
2106 switch (entry->ike_sa->get_state(entry->ike_sa))
2107 {
2108 case IKE_ESTABLISHED:
2109 case IKE_REKEYING:
2110 case IKE_DELETING:
2111 charon->bus->ike_updown(charon->bus, entry->ike_sa, FALSE);
2112 break;
2113 default:
2114 break;
2115 }
2116 }
2117 entry->ike_sa->delete(entry->ike_sa);
2118 }
2119 enumerator->destroy(enumerator);
2120
2121 DBG2(DBG_MGR, "destroy all entries");
2122 /* Step 4: destroy all entries */
2123 enumerator = create_table_enumerator(this);
2124 while (enumerator->enumerate(enumerator, &entry, &segment))
2125 {
2126 charon->bus->set_sa(charon->bus, entry->ike_sa);
2127 if (entry->half_open)
2128 {
2129 remove_half_open(this, entry);
2130 }
2131 if (entry->my_id && entry->other_id)
2132 {
2133 remove_connected_peers(this, entry);
2134 }
2135 if (entry->init_hash.ptr)
2136 {
2137 remove_init_hash(this, entry->init_hash);
2138 }
2139 remove_entry_at((private_enumerator_t*)enumerator);
2140 entry_destroy(entry);
2141 }
2142 enumerator->destroy(enumerator);
2143 charon->bus->set_sa(charon->bus, NULL);
2144 unlock_all_segments(this);
2145
2146 this->spi_lock->write_lock(this->spi_lock);
2147 this->rng->destroy(this->rng);
2148 this->rng = NULL;
2149 this->spi_cb.cb = NULL;
2150 this->spi_cb.data = NULL;
2151 this->spi_lock->unlock(this->spi_lock);
2152 }
2153
2154 METHOD(ike_sa_manager_t, destroy, void,
2155 private_ike_sa_manager_t *this)
2156 {
2157 u_int i;
2158
2159 /* these are already cleared in flush() above */
2160 free(this->ike_sa_table);
2161 free(this->half_open_table);
2162 free(this->connected_peers_table);
2163 free(this->init_hashes_table);
2164 for (i = 0; i < this->segment_count; i++)
2165 {
2166 this->segments[i].mutex->destroy(this->segments[i].mutex);
2167 this->half_open_segments[i].lock->destroy(this->half_open_segments[i].lock);
2168 this->connected_peers_segments[i].lock->destroy(this->connected_peers_segments[i].lock);
2169 this->init_hashes_segments[i].mutex->destroy(this->init_hashes_segments[i].mutex);
2170 }
2171 free(this->segments);
2172 free(this->half_open_segments);
2173 free(this->connected_peers_segments);
2174 free(this->init_hashes_segments);
2175
2176 this->spi_lock->destroy(this->spi_lock);
2177 free(this);
2178 }
2179
2180 /**
2181 * This function returns the next-highest power of two for the given number.
2182 * The algorithm works by setting all bits on the right-hand side of the most
2183 * significant 1 to 1 and then increments the whole number so it rolls over
2184 * to the nearest power of two. Note: returns 0 for n == 0
2185 */
2186 static u_int get_nearest_powerof2(u_int n)
2187 {
2188 u_int i;
2189
2190 --n;
2191 for (i = 1; i < sizeof(u_int) * 8; i <<= 1)
2192 {
2193 n |= n >> i;
2194 }
2195 return ++n;
2196 }
2197
2198 /*
2199 * Described in header.
2200 */
2201 ike_sa_manager_t *ike_sa_manager_create()
2202 {
2203 private_ike_sa_manager_t *this;
2204 u_int i;
2205
2206 INIT(this,
2207 .public = {
2208 .checkout = _checkout,
2209 .checkout_new = _checkout_new,
2210 .checkout_by_message = _checkout_by_message,
2211 .checkout_by_config = _checkout_by_config,
2212 .checkout_by_id = _checkout_by_id,
2213 .checkout_by_name = _checkout_by_name,
2214 .check_uniqueness = _check_uniqueness,
2215 .has_contact = _has_contact,
2216 .create_enumerator = _create_enumerator,
2217 .create_id_enumerator = _create_id_enumerator,
2218 .checkin = _checkin,
2219 .checkin_and_destroy = _checkin_and_destroy,
2220 .get_count = _get_count,
2221 .get_half_open_count = _get_half_open_count,
2222 .flush = _flush,
2223 .set_spi_cb = _set_spi_cb,
2224 .destroy = _destroy,
2225 },
2226 );
2227
2228 this->rng = lib->crypto->create_rng(lib->crypto, RNG_WEAK);
2229 if (this->rng == NULL)
2230 {
2231 DBG1(DBG_MGR, "manager initialization failed, no RNG supported");
2232 free(this);
2233 return NULL;
2234 }
2235 this->spi_lock = rwlock_create(RWLOCK_TYPE_DEFAULT);
2236
2237 this->ikesa_limit = lib->settings->get_int(lib->settings,
2238 "%s.ikesa_limit", 0, lib->ns);
2239
2240 this->table_size = get_nearest_powerof2(lib->settings->get_int(
2241 lib->settings, "%s.ikesa_table_size",
2242 DEFAULT_HASHTABLE_SIZE, lib->ns));
2243 this->table_size = max(1, min(this->table_size, MAX_HASHTABLE_SIZE));
2244 this->table_mask = this->table_size - 1;
2245
2246 this->segment_count = get_nearest_powerof2(lib->settings->get_int(
2247 lib->settings, "%s.ikesa_table_segments",
2248 DEFAULT_SEGMENT_COUNT, lib->ns));
2249 this->segment_count = max(1, min(this->segment_count, this->table_size));
2250 this->segment_mask = this->segment_count - 1;
2251
2252 this->ike_sa_table = calloc(this->table_size, sizeof(table_item_t*));
2253 this->segments = (segment_t*)calloc(this->segment_count, sizeof(segment_t));
2254 for (i = 0; i < this->segment_count; i++)
2255 {
2256 this->segments[i].mutex = mutex_create(MUTEX_TYPE_RECURSIVE);
2257 this->segments[i].count = 0;
2258 }
2259
2260 /* we use the same table parameters for the table to track half-open SAs */
2261 this->half_open_table = calloc(this->table_size, sizeof(table_item_t*));
2262 this->half_open_segments = calloc(this->segment_count, sizeof(shareable_segment_t));
2263 for (i = 0; i < this->segment_count; i++)
2264 {
2265 this->half_open_segments[i].lock = rwlock_create(RWLOCK_TYPE_DEFAULT);
2266 this->half_open_segments[i].count = 0;
2267 }
2268
2269 /* also for the hash table used for duplicate tests */
2270 this->connected_peers_table = calloc(this->table_size, sizeof(table_item_t*));
2271 this->connected_peers_segments = calloc(this->segment_count, sizeof(shareable_segment_t));
2272 for (i = 0; i < this->segment_count; i++)
2273 {
2274 this->connected_peers_segments[i].lock = rwlock_create(RWLOCK_TYPE_DEFAULT);
2275 this->connected_peers_segments[i].count = 0;
2276 }
2277
2278 /* and again for the table of hashes of seen initial IKE messages */
2279 this->init_hashes_table = calloc(this->table_size, sizeof(table_item_t*));
2280 this->init_hashes_segments = calloc(this->segment_count, sizeof(segment_t));
2281 for (i = 0; i < this->segment_count; i++)
2282 {
2283 this->init_hashes_segments[i].mutex = mutex_create(MUTEX_TYPE_RECURSIVE);
2284 this->init_hashes_segments[i].count = 0;
2285 }
2286
2287 this->reuse_ikesa = lib->settings->get_bool(lib->settings,
2288 "%s.reuse_ikesa", TRUE, lib->ns);
2289 return &this->public;
2290 }