Store IKEv2 IKE_SAs by local SPI in the IKE_SA manager hash table.
[strongswan.git] / src / libcharon / sa / ike_sa_manager.c
1 /*
2 * Copyright (C) 2005-2011 Martin Willi
3 * Copyright (C) 2011 revosec AG
4 * Copyright (C) 2008-2012 Tobias Brunner
5 * Copyright (C) 2005 Jan Hutter
6 * Hochschule fuer Technik Rapperswil
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version. See <http://www.fsf.org/copyleft/gpl.txt>.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 * for more details.
17 */
18
19 #include <string.h>
20
21 #include "ike_sa_manager.h"
22
23 #include <daemon.h>
24 #include <sa/ike_sa_id.h>
25 #include <bus/bus.h>
26 #include <threading/condvar.h>
27 #include <threading/mutex.h>
28 #include <threading/rwlock.h>
29 #include <utils/linked_list.h>
30 #include <crypto/hashers/hasher.h>
31
32 /* the default size of the hash table (MUST be a power of 2) */
33 #define DEFAULT_HASHTABLE_SIZE 1
34
35 /* the maximum size of the hash table (MUST be a power of 2) */
36 #define MAX_HASHTABLE_SIZE (1 << 30)
37
38 /* the default number of segments (MUST be a power of 2) */
39 #define DEFAULT_SEGMENT_COUNT 1
40
41 typedef struct entry_t entry_t;
42
43 /**
44 * An entry in the linked list, contains IKE_SA, locking and lookup data.
45 */
46 struct entry_t {
47
48 /**
49 * Number of threads waiting for this ike_sa_t object.
50 */
51 int waiting_threads;
52
53 /**
54 * Condvar where threads can wait until ike_sa_t object is free for use again.
55 */
56 condvar_t *condvar;
57
58 /**
59 * Is this ike_sa currently checked out?
60 */
61 bool checked_out;
62
63 /**
64 * Does this SA drives out new threads?
65 */
66 bool driveout_new_threads;
67
68 /**
69 * Does this SA drives out waiting threads?
70 */
71 bool driveout_waiting_threads;
72
73 /**
74 * Identification of an IKE_SA (SPIs).
75 */
76 ike_sa_id_t *ike_sa_id;
77
78 /**
79 * The contained ike_sa_t object.
80 */
81 ike_sa_t *ike_sa;
82
83 /**
84 * hash of the IKE_SA_INIT message, used to detect retransmissions
85 */
86 chunk_t init_hash;
87
88 /**
89 * remote host address, required for DoS detection and duplicate
90 * checking (host with same my_id and other_id is *not* considered
91 * a duplicate if the address family differs)
92 */
93 host_t *other;
94
95 /**
96 * As responder: Is this SA half-open?
97 */
98 bool half_open;
99
100 /**
101 * own identity, required for duplicate checking
102 */
103 identification_t *my_id;
104
105 /**
106 * remote identity, required for duplicate checking
107 */
108 identification_t *other_id;
109
110 /**
111 * message ID currently processing, if any
112 */
113 u_int32_t message_id;
114 };
115
116 /**
117 * Implementation of entry_t.destroy.
118 */
119 static status_t entry_destroy(entry_t *this)
120 {
121 /* also destroy IKE SA */
122 this->ike_sa->destroy(this->ike_sa);
123 this->ike_sa_id->destroy(this->ike_sa_id);
124 chunk_free(&this->init_hash);
125 DESTROY_IF(this->other);
126 DESTROY_IF(this->my_id);
127 DESTROY_IF(this->other_id);
128 this->condvar->destroy(this->condvar);
129 free(this);
130 return SUCCESS;
131 }
132
133 /**
134 * Creates a new entry for the ike_sa_t list.
135 */
136 static entry_t *entry_create()
137 {
138 entry_t *this = malloc_thing(entry_t);
139
140 this->waiting_threads = 0;
141 this->condvar = condvar_create(CONDVAR_TYPE_DEFAULT);
142
143 /* we set checkout flag when we really give it out */
144 this->checked_out = FALSE;
145 this->driveout_new_threads = FALSE;
146 this->driveout_waiting_threads = FALSE;
147 this->message_id = -1;
148 this->init_hash = chunk_empty;
149 this->other = NULL;
150 this->half_open = FALSE;
151 this->my_id = NULL;
152 this->other_id = NULL;
153 this->ike_sa_id = NULL;
154 this->ike_sa = NULL;
155
156 return this;
157 }
158
159 /**
160 * Function that matches entry_t objects by ike_sa_id_t.
161 */
162 static bool entry_match_by_id(entry_t *entry, ike_sa_id_t *id)
163 {
164 if (id->equals(id, entry->ike_sa_id))
165 {
166 return TRUE;
167 }
168 if ((id->get_responder_spi(id) == 0 ||
169 entry->ike_sa_id->get_responder_spi(entry->ike_sa_id) == 0) &&
170 id->get_initiator_spi(id) == entry->ike_sa_id->get_initiator_spi(entry->ike_sa_id))
171 {
172 /* this is TRUE for IKE_SAs that we initiated but have not yet received a response */
173 return TRUE;
174 }
175 return FALSE;
176 }
177
178 /**
179 * Function that matches entry_t objects by ike_sa_t pointers.
180 */
181 static bool entry_match_by_sa(entry_t *entry, ike_sa_t *ike_sa)
182 {
183 return entry->ike_sa == ike_sa;
184 }
185
186 /**
187 * Hash function for ike_sa_id_t objects.
188 */
189 static u_int ike_sa_id_hash(ike_sa_id_t *ike_sa_id)
190 {
191 /* IKEv2 does not mandate random SPIs (RFC 5996, 2.6), they just have to be
192 * locally unique, so we use our randomly allocated SPI whether we are
193 * initiator or responder to ensure a good distribution. The latter is not
194 * possible for IKEv1 as we don't know whether we are original initiator or
195 * not (based on the IKE header). But as RFC 2408, section 2.5.3 proposes
196 * SPIs (Cookies) to be allocated near random (we allocate them randomly
197 * anyway) it seems safe to always use the initiator SPI. */
198 if (ike_sa_id->get_ike_version(ike_sa_id) == IKEV1_MAJOR_VERSION ||
199 ike_sa_id->is_initiator(ike_sa_id))
200 {
201 return ike_sa_id->get_initiator_spi(ike_sa_id);
202 }
203 return ike_sa_id->get_responder_spi(ike_sa_id);
204 }
205
206 typedef struct half_open_t half_open_t;
207
208 /**
209 * Struct to manage half-open IKE_SAs per peer.
210 */
211 struct half_open_t {
212 /** chunk of remote host address */
213 chunk_t other;
214
215 /** the number of half-open IKE_SAs with that host */
216 u_int count;
217 };
218
219 /**
220 * Destroys a half_open_t object.
221 */
222 static void half_open_destroy(half_open_t *this)
223 {
224 chunk_free(&this->other);
225 free(this);
226 }
227
228 /**
229 * Function that matches half_open_t objects by the given IP address chunk.
230 */
231 static bool half_open_match(half_open_t *half_open, chunk_t *addr)
232 {
233 return chunk_equals(*addr, half_open->other);
234 }
235
236 typedef struct connected_peers_t connected_peers_t;
237
238 struct connected_peers_t {
239 /** own identity */
240 identification_t *my_id;
241
242 /** remote identity */
243 identification_t *other_id;
244
245 /** ip address family of peer */
246 int family;
247
248 /** list of ike_sa_id_t objects of IKE_SAs between the two identities */
249 linked_list_t *sas;
250 };
251
252 static void connected_peers_destroy(connected_peers_t *this)
253 {
254 this->my_id->destroy(this->my_id);
255 this->other_id->destroy(this->other_id);
256 this->sas->destroy(this->sas);
257 free(this);
258 }
259
260 /**
261 * Function that matches connected_peers_t objects by the given ids.
262 */
263 static bool connected_peers_match(connected_peers_t *connected_peers,
264 identification_t *my_id, identification_t *other_id,
265 uintptr_t family)
266 {
267 return my_id->equals(my_id, connected_peers->my_id) &&
268 other_id->equals(other_id, connected_peers->other_id) &&
269 (!family || family == connected_peers->family);
270 }
271
272 typedef struct segment_t segment_t;
273
274 /**
275 * Struct to manage segments of the hash table.
276 */
277 struct segment_t {
278 /** mutex to access a segment exclusively */
279 mutex_t *mutex;
280
281 /** the number of entries in this segment */
282 u_int count;
283 };
284
285 typedef struct shareable_segment_t shareable_segment_t;
286
287 /**
288 * Struct to manage segments of the "half-open" and "connected peers" hash tables.
289 */
290 struct shareable_segment_t {
291 /** rwlock to access a segment non-/exclusively */
292 rwlock_t *lock;
293
294 /** the number of entries in this segment - in case of the "half-open table"
295 * it's the sum of all half_open_t.count in a segment. */
296 u_int count;
297 };
298
299 typedef struct private_ike_sa_manager_t private_ike_sa_manager_t;
300
301 /**
302 * Additional private members of ike_sa_manager_t.
303 */
304 struct private_ike_sa_manager_t {
305 /**
306 * Public interface of ike_sa_manager_t.
307 */
308 ike_sa_manager_t public;
309
310 /**
311 * Hash table with entries for the ike_sa_t objects.
312 */
313 linked_list_t **ike_sa_table;
314
315 /**
316 * The size of the hash table.
317 */
318 u_int table_size;
319
320 /**
321 * Mask to map the hashes to table rows.
322 */
323 u_int table_mask;
324
325 /**
326 * Segments of the hash table.
327 */
328 segment_t *segments;
329
330 /**
331 * The number of segments.
332 */
333 u_int segment_count;
334
335 /**
336 * Mask to map a table row to a segment.
337 */
338 u_int segment_mask;
339
340 /**
341 * Hash table with half_open_t objects.
342 */
343 linked_list_t **half_open_table;
344
345 /**
346 * Segments of the "half-open" hash table.
347 */
348 shareable_segment_t *half_open_segments;
349
350 /**
351 * Hash table with connected_peers_t objects.
352 */
353 linked_list_t **connected_peers_table;
354
355 /**
356 * Segments of the "connected peers" hash table.
357 */
358 shareable_segment_t *connected_peers_segments;
359
360 /**
361 * Hash table with chunk_t objects.
362 */
363 linked_list_t **init_hashes_table;
364
365 /**
366 * Segments of the "hashes" hash table.
367 */
368 segment_t *init_hashes_segments;
369
370 /**
371 * RNG to get random SPIs for our side
372 */
373 rng_t *rng;
374
375 /**
376 * SHA1 hasher for IKE_SA_INIT retransmit detection
377 */
378 hasher_t *hasher;
379
380 /**
381 * reuse existing IKE_SAs in checkout_by_config
382 */
383 bool reuse_ikesa;
384 };
385
386 /**
387 * Acquire a lock to access the segment of the table row with the given index.
388 * It also works with the segment index directly.
389 */
390 static void lock_single_segment(private_ike_sa_manager_t *this, u_int index)
391 {
392 mutex_t *lock = this->segments[index & this->segment_mask].mutex;
393
394 lock->lock(lock);
395 }
396
397 /**
398 * Release the lock required to access the segment of the table row with the given index.
399 * It also works with the segment index directly.
400 */
401 static void unlock_single_segment(private_ike_sa_manager_t *this, u_int index)
402 {
403 mutex_t *lock = this->segments[index & this->segment_mask].mutex;
404
405 lock->unlock(lock);
406 }
407
408 /**
409 * Lock all segments
410 */
411 static void lock_all_segments(private_ike_sa_manager_t *this)
412 {
413 u_int i;
414
415 for (i = 0; i < this->segment_count; i++)
416 {
417 this->segments[i].mutex->lock(this->segments[i].mutex);
418 }
419 }
420
421 /**
422 * Unlock all segments
423 */
424 static void unlock_all_segments(private_ike_sa_manager_t *this)
425 {
426 u_int i;
427
428 for (i = 0; i < this->segment_count; i++)
429 {
430 this->segments[i].mutex->unlock(this->segments[i].mutex);
431 }
432 }
433
434 typedef struct private_enumerator_t private_enumerator_t;
435
436 /**
437 * hash table enumerator implementation
438 */
439 struct private_enumerator_t {
440
441 /**
442 * implements enumerator interface
443 */
444 enumerator_t enumerator;
445
446 /**
447 * associated ike_sa_manager_t
448 */
449 private_ike_sa_manager_t *manager;
450
451 /**
452 * current segment index
453 */
454 u_int segment;
455
456 /**
457 * currently enumerating entry
458 */
459 entry_t *entry;
460
461 /**
462 * current table row index
463 */
464 u_int row;
465
466 /**
467 * enumerator for the current table row
468 */
469 enumerator_t *current;
470 };
471
472 METHOD(enumerator_t, enumerate, bool,
473 private_enumerator_t *this, entry_t **entry, u_int *segment)
474 {
475 if (this->entry)
476 {
477 this->entry->condvar->signal(this->entry->condvar);
478 this->entry = NULL;
479 }
480 while (this->segment < this->manager->segment_count)
481 {
482 while (this->row < this->manager->table_size)
483 {
484 if (this->current)
485 {
486 entry_t *item;
487
488 if (this->current->enumerate(this->current, &item))
489 {
490 *entry = this->entry = item;
491 *segment = this->segment;
492 return TRUE;
493 }
494 this->current->destroy(this->current);
495 this->current = NULL;
496 unlock_single_segment(this->manager, this->segment);
497 }
498 else
499 {
500 linked_list_t *list;
501
502 lock_single_segment(this->manager, this->segment);
503 if ((list = this->manager->ike_sa_table[this->row]) != NULL &&
504 list->get_count(list))
505 {
506 this->current = list->create_enumerator(list);
507 continue;
508 }
509 unlock_single_segment(this->manager, this->segment);
510 }
511 this->row += this->manager->segment_count;
512 }
513 this->segment++;
514 this->row = this->segment;
515 }
516 return FALSE;
517 }
518
519 METHOD(enumerator_t, enumerator_destroy, void,
520 private_enumerator_t *this)
521 {
522 if (this->entry)
523 {
524 this->entry->condvar->signal(this->entry->condvar);
525 }
526 if (this->current)
527 {
528 this->current->destroy(this->current);
529 unlock_single_segment(this->manager, this->segment);
530 }
531 free(this);
532 }
533
534 /**
535 * Creates an enumerator to enumerate the entries in the hash table.
536 */
537 static enumerator_t* create_table_enumerator(private_ike_sa_manager_t *this)
538 {
539 private_enumerator_t *enumerator;
540
541 INIT(enumerator,
542 .enumerator = {
543 .enumerate = (void*)_enumerate,
544 .destroy = _enumerator_destroy,
545 },
546 .manager = this,
547 );
548 return &enumerator->enumerator;
549 }
550
551 /**
552 * Put an entry into the hash table.
553 * Note: The caller has to unlock the returned segment.
554 */
555 static u_int put_entry(private_ike_sa_manager_t *this, entry_t *entry)
556 {
557 linked_list_t *list;
558 u_int row, segment;
559
560 row = ike_sa_id_hash(entry->ike_sa_id) & this->table_mask;
561 segment = row & this->segment_mask;
562
563 lock_single_segment(this, segment);
564 list = this->ike_sa_table[row];
565 if (!list)
566 {
567 list = this->ike_sa_table[row] = linked_list_create();
568 }
569 list->insert_last(list, entry);
570 this->segments[segment].count++;
571 return segment;
572 }
573
574 /**
575 * Remove an entry from the hash table.
576 * Note: The caller MUST have a lock on the segment of this entry.
577 */
578 static void remove_entry(private_ike_sa_manager_t *this, entry_t *entry)
579 {
580 linked_list_t *list;
581 u_int row, segment;
582
583 row = ike_sa_id_hash(entry->ike_sa_id) & this->table_mask;
584 segment = row & this->segment_mask;
585 list = this->ike_sa_table[row];
586 if (list)
587 {
588 entry_t *current;
589 enumerator_t *enumerator;
590
591 enumerator = list->create_enumerator(list);
592 while (enumerator->enumerate(enumerator, &current))
593 {
594 if (current == entry)
595 {
596 list->remove_at(list, enumerator);
597 this->segments[segment].count--;
598 break;
599 }
600 }
601 enumerator->destroy(enumerator);
602 }
603 }
604
605 /**
606 * Remove the entry at the current enumerator position.
607 */
608 static void remove_entry_at(private_enumerator_t *this)
609 {
610 this->entry = NULL;
611 if (this->current)
612 {
613 linked_list_t *list = this->manager->ike_sa_table[this->row];
614 list->remove_at(list, this->current);
615 this->manager->segments[this->segment].count--;
616 }
617 }
618
619 /**
620 * Find an entry using the provided match function to compare the entries for
621 * equality.
622 */
623 static status_t get_entry_by_match_function(private_ike_sa_manager_t *this,
624 ike_sa_id_t *ike_sa_id, entry_t **entry, u_int *segment,
625 linked_list_match_t match, void *p1, void *p2)
626 {
627 entry_t *current;
628 linked_list_t *list;
629 u_int row, seg;
630
631 row = ike_sa_id_hash(ike_sa_id) & this->table_mask;
632 seg = row & this->segment_mask;
633
634 lock_single_segment(this, seg);
635 list = this->ike_sa_table[row];
636 if (list)
637 {
638 if (list->find_first(list, match, (void**)&current, p1, p2) == SUCCESS)
639 {
640 *entry = current;
641 *segment = seg;
642 /* the locked segment has to be unlocked by the caller */
643 return SUCCESS;
644 }
645 }
646 unlock_single_segment(this, seg);
647 return NOT_FOUND;
648 }
649
650 /**
651 * Find an entry by ike_sa_id_t.
652 * Note: On SUCCESS, the caller has to unlock the segment.
653 */
654 static status_t get_entry_by_id(private_ike_sa_manager_t *this,
655 ike_sa_id_t *ike_sa_id, entry_t **entry, u_int *segment)
656 {
657 return get_entry_by_match_function(this, ike_sa_id, entry, segment,
658 (linked_list_match_t)entry_match_by_id, ike_sa_id, NULL);
659 }
660
661 /**
662 * Find an entry by IKE_SA pointer.
663 * Note: On SUCCESS, the caller has to unlock the segment.
664 */
665 static status_t get_entry_by_sa(private_ike_sa_manager_t *this,
666 ike_sa_id_t *ike_sa_id, ike_sa_t *ike_sa, entry_t **entry, u_int *segment)
667 {
668 return get_entry_by_match_function(this, ike_sa_id, entry, segment,
669 (linked_list_match_t)entry_match_by_sa, ike_sa, NULL);
670 }
671
672 /**
673 * Wait until no other thread is using an IKE_SA, return FALSE if entry not
674 * acquirable.
675 */
676 static bool wait_for_entry(private_ike_sa_manager_t *this, entry_t *entry,
677 u_int segment)
678 {
679 if (entry->driveout_new_threads)
680 {
681 /* we are not allowed to get this */
682 return FALSE;
683 }
684 while (entry->checked_out && !entry->driveout_waiting_threads)
685 {
686 /* so wait until we can get it for us.
687 * we register us as waiting. */
688 entry->waiting_threads++;
689 entry->condvar->wait(entry->condvar, this->segments[segment].mutex);
690 entry->waiting_threads--;
691 }
692 /* hm, a deletion request forbids us to get this SA, get next one */
693 if (entry->driveout_waiting_threads)
694 {
695 /* we must signal here, others may be waiting on it, too */
696 entry->condvar->signal(entry->condvar);
697 return FALSE;
698 }
699 return TRUE;
700 }
701
702 /**
703 * Put a half-open SA into the hash table.
704 */
705 static void put_half_open(private_ike_sa_manager_t *this, entry_t *entry)
706 {
707 half_open_t *half_open = NULL;
708 linked_list_t *list;
709 chunk_t addr;
710 u_int row, segment;
711 rwlock_t *lock;
712
713 addr = entry->other->get_address(entry->other);
714 row = chunk_hash(addr) & this->table_mask;
715 segment = row & this->segment_mask;
716 lock = this->half_open_segments[segment].lock;
717 lock->write_lock(lock);
718 list = this->half_open_table[row];
719 if (list)
720 {
721 half_open_t *current;
722
723 if (list->find_first(list, (linked_list_match_t)half_open_match,
724 (void**)&current, &addr) == SUCCESS)
725 {
726 half_open = current;
727 half_open->count++;
728 this->half_open_segments[segment].count++;
729 }
730 }
731 else
732 {
733 list = this->half_open_table[row] = linked_list_create();
734 }
735
736 if (!half_open)
737 {
738 INIT(half_open,
739 .other = chunk_clone(addr),
740 .count = 1,
741 );
742 list->insert_last(list, half_open);
743 this->half_open_segments[segment].count++;
744 }
745 lock->unlock(lock);
746 }
747
748 /**
749 * Remove a half-open SA from the hash table.
750 */
751 static void remove_half_open(private_ike_sa_manager_t *this, entry_t *entry)
752 {
753 linked_list_t *list;
754 chunk_t addr;
755 u_int row, segment;
756 rwlock_t *lock;
757
758 addr = entry->other->get_address(entry->other);
759 row = chunk_hash(addr) & this->table_mask;
760 segment = row & this->segment_mask;
761 lock = this->half_open_segments[segment].lock;
762 lock->write_lock(lock);
763 list = this->half_open_table[row];
764 if (list)
765 {
766 half_open_t *current;
767 enumerator_t *enumerator;
768
769 enumerator = list->create_enumerator(list);
770 while (enumerator->enumerate(enumerator, &current))
771 {
772 if (half_open_match(current, &addr))
773 {
774 if (--current->count == 0)
775 {
776 list->remove_at(list, enumerator);
777 half_open_destroy(current);
778 }
779 this->half_open_segments[segment].count--;
780 break;
781 }
782 }
783 enumerator->destroy(enumerator);
784 }
785 lock->unlock(lock);
786 }
787
788 /**
789 * Put an SA between two peers into the hash table.
790 */
791 static void put_connected_peers(private_ike_sa_manager_t *this, entry_t *entry)
792 {
793 connected_peers_t *connected_peers = NULL;
794 chunk_t my_id, other_id;
795 linked_list_t *list;
796 u_int row, segment;
797 rwlock_t *lock;
798
799 my_id = entry->my_id->get_encoding(entry->my_id);
800 other_id = entry->other_id->get_encoding(entry->other_id);
801 row = chunk_hash_inc(other_id, chunk_hash(my_id)) & this->table_mask;
802 segment = row & this->segment_mask;
803 lock = this->connected_peers_segments[segment].lock;
804 lock->write_lock(lock);
805 list = this->connected_peers_table[row];
806 if (list)
807 {
808 connected_peers_t *current;
809
810 if (list->find_first(list, (linked_list_match_t)connected_peers_match,
811 (void**)&current, entry->my_id, entry->other_id,
812 (uintptr_t)entry->other->get_family(entry->other)) == SUCCESS)
813 {
814 connected_peers = current;
815 if (connected_peers->sas->find_first(connected_peers->sas,
816 (linked_list_match_t)entry->ike_sa_id->equals,
817 NULL, entry->ike_sa_id) == SUCCESS)
818 {
819 lock->unlock(lock);
820 return;
821 }
822 }
823 }
824 else
825 {
826 list = this->connected_peers_table[row] = linked_list_create();
827 }
828
829 if (!connected_peers)
830 {
831 INIT(connected_peers,
832 .my_id = entry->my_id->clone(entry->my_id),
833 .other_id = entry->other_id->clone(entry->other_id),
834 .family = entry->other->get_family(entry->other),
835 .sas = linked_list_create(),
836 );
837 list->insert_last(list, connected_peers);
838 }
839 connected_peers->sas->insert_last(connected_peers->sas,
840 entry->ike_sa_id->clone(entry->ike_sa_id));
841 this->connected_peers_segments[segment].count++;
842 lock->unlock(lock);
843 }
844
845 /**
846 * Remove an SA between two peers from the hash table.
847 */
848 static void remove_connected_peers(private_ike_sa_manager_t *this, entry_t *entry)
849 {
850 chunk_t my_id, other_id;
851 linked_list_t *list;
852 u_int row, segment;
853 rwlock_t *lock;
854
855 my_id = entry->my_id->get_encoding(entry->my_id);
856 other_id = entry->other_id->get_encoding(entry->other_id);
857 row = chunk_hash_inc(other_id, chunk_hash(my_id)) & this->table_mask;
858 segment = row & this->segment_mask;
859
860 lock = this->connected_peers_segments[segment].lock;
861 lock->write_lock(lock);
862 list = this->connected_peers_table[row];
863 if (list)
864 {
865 connected_peers_t *current;
866 enumerator_t *enumerator;
867
868 enumerator = list->create_enumerator(list);
869 while (enumerator->enumerate(enumerator, &current))
870 {
871 if (connected_peers_match(current, entry->my_id, entry->other_id,
872 (uintptr_t)entry->other->get_family(entry->other)))
873 {
874 ike_sa_id_t *ike_sa_id;
875 enumerator_t *inner;
876
877 inner = current->sas->create_enumerator(current->sas);
878 while (inner->enumerate(inner, &ike_sa_id))
879 {
880 if (ike_sa_id->equals(ike_sa_id, entry->ike_sa_id))
881 {
882 current->sas->remove_at(current->sas, inner);
883 ike_sa_id->destroy(ike_sa_id);
884 this->connected_peers_segments[segment].count--;
885 break;
886 }
887 }
888 inner->destroy(inner);
889 if (current->sas->get_count(current->sas) == 0)
890 {
891 list->remove_at(list, enumerator);
892 connected_peers_destroy(current);
893 }
894 break;
895 }
896 }
897 enumerator->destroy(enumerator);
898 }
899 lock->unlock(lock);
900 }
901
902 /**
903 * Check if we already have created an IKE_SA based on the initial IKE message
904 * with the given hash. If not the hash is stored.
905 *
906 * @returns TRUE if the message with the given hash was seen before
907 */
908 static bool check_and_put_init_hash(private_ike_sa_manager_t *this,
909 chunk_t init_hash)
910 {
911 chunk_t *clone;
912 linked_list_t *list;
913 u_int row, segment;
914 mutex_t *mutex;
915
916 row = chunk_hash(init_hash) & this->table_mask;
917 segment = row & this->segment_mask;
918 mutex = this->init_hashes_segments[segment].mutex;
919 mutex->lock(mutex);
920 list = this->init_hashes_table[row];
921 if (list)
922 {
923 chunk_t *current;
924
925 if (list->find_first(list, (linked_list_match_t)chunk_equals_ptr,
926 (void**)&current, &init_hash) == SUCCESS)
927 {
928 mutex->unlock(mutex);
929 return TRUE;
930 }
931 }
932 else
933 {
934 list = this->init_hashes_table[row] = linked_list_create();
935 }
936
937 INIT(clone,
938 .len = init_hash.len,
939 .ptr = malloc(init_hash.len),
940 );
941 memcpy(clone->ptr, init_hash.ptr, clone->len);
942 list->insert_last(list, clone);
943
944 mutex->unlock(mutex);
945 return FALSE;
946 }
947
948 /**
949 * Remove the hash of an initial IKE message from the cache.
950 */
951 static void remove_init_hash(private_ike_sa_manager_t *this, chunk_t init_hash)
952 {
953 linked_list_t *list;
954 u_int row, segment;
955 mutex_t *mutex;
956
957 row = chunk_hash(init_hash) & this->table_mask;
958 segment = row & this->segment_mask;
959 mutex = this->init_hashes_segments[segment].mutex;
960 mutex->lock(mutex);
961 list = this->init_hashes_table[row];
962 if (list)
963 {
964 enumerator_t *enumerator;
965 chunk_t *current;
966
967 enumerator = list->create_enumerator(list);
968 while (enumerator->enumerate(enumerator, &current))
969 {
970 if (chunk_equals_ptr(current, &init_hash))
971 {
972 list->remove_at(list, enumerator);
973 chunk_free(current);
974 free(current);
975 break;
976 }
977 }
978 enumerator->destroy(enumerator);
979 }
980 mutex->unlock(mutex);
981 }
982
983 /**
984 * Get a random SPI for new IKE_SAs
985 */
986 static u_int64_t get_spi(private_ike_sa_manager_t *this)
987 {
988 u_int64_t spi = 0;
989
990 if (this->rng)
991 {
992 this->rng->get_bytes(this->rng, sizeof(spi), (u_int8_t*)&spi);
993 }
994 return spi;
995 }
996
997 METHOD(ike_sa_manager_t, checkout, ike_sa_t*,
998 private_ike_sa_manager_t *this, ike_sa_id_t *ike_sa_id)
999 {
1000 ike_sa_t *ike_sa = NULL;
1001 entry_t *entry;
1002 u_int segment;
1003
1004 DBG2(DBG_MGR, "checkout IKE_SA");
1005
1006 if (get_entry_by_id(this, ike_sa_id, &entry, &segment) == SUCCESS)
1007 {
1008 if (wait_for_entry(this, entry, segment))
1009 {
1010 entry->checked_out = TRUE;
1011 ike_sa = entry->ike_sa;
1012 DBG2(DBG_MGR, "IKE_SA %s[%u] successfully checked out",
1013 ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1014 }
1015 unlock_single_segment(this, segment);
1016 }
1017 charon->bus->set_sa(charon->bus, ike_sa);
1018 return ike_sa;
1019 }
1020
1021 METHOD(ike_sa_manager_t, checkout_new, ike_sa_t*,
1022 private_ike_sa_manager_t* this, ike_version_t version, bool initiator)
1023 {
1024 ike_sa_id_t *ike_sa_id;
1025 ike_sa_t *ike_sa;
1026 u_int8_t ike_version;
1027
1028 ike_version = version == IKEV1 ? IKEV1_MAJOR_VERSION : IKEV2_MAJOR_VERSION;
1029
1030 if (initiator)
1031 {
1032 ike_sa_id = ike_sa_id_create(ike_version, get_spi(this), 0, TRUE);
1033 }
1034 else
1035 {
1036 ike_sa_id = ike_sa_id_create(ike_version, 0, get_spi(this), FALSE);
1037 }
1038 ike_sa = ike_sa_create(ike_sa_id, initiator, version);
1039 ike_sa_id->destroy(ike_sa_id);
1040
1041 if (ike_sa)
1042 {
1043 DBG2(DBG_MGR, "created IKE_SA %s[%u]", ike_sa->get_name(ike_sa),
1044 ike_sa->get_unique_id(ike_sa));
1045 }
1046 return ike_sa;
1047 }
1048
1049 METHOD(ike_sa_manager_t, checkout_by_message, ike_sa_t*,
1050 private_ike_sa_manager_t* this, message_t *message)
1051 {
1052 u_int segment;
1053 entry_t *entry;
1054 ike_sa_t *ike_sa = NULL;
1055 ike_sa_id_t *id;
1056 ike_version_t ike_version;
1057 bool is_init = FALSE;
1058
1059 id = message->get_ike_sa_id(message);
1060 /* clone the IKE_SA ID so we can modify the initiator flag */
1061 id = id->clone(id);
1062 id->switch_initiator(id);
1063
1064 DBG2(DBG_MGR, "checkout IKE_SA by message");
1065
1066 if (id->get_responder_spi(id) == 0)
1067 {
1068 if (message->get_major_version(message) == IKEV2_MAJOR_VERSION)
1069 {
1070 if (message->get_exchange_type(message) == IKE_SA_INIT &&
1071 message->get_request(message))
1072 {
1073 ike_version = IKEV2;
1074 is_init = TRUE;
1075 }
1076 }
1077 else
1078 {
1079 if (message->get_exchange_type(message) == ID_PROT ||
1080 message->get_exchange_type(message) == AGGRESSIVE)
1081 {
1082 ike_version = IKEV1;
1083 is_init = TRUE;
1084 if (id->is_initiator(id))
1085 { /* not set in IKEv1, switch back before applying to new SA */
1086 id->switch_initiator(id);
1087 }
1088 }
1089 }
1090 }
1091
1092 if (is_init && this->hasher)
1093 { /* initial request. checking for the hasher prevents crashes once
1094 * flush() has been called */
1095 chunk_t hash;
1096
1097 this->hasher->allocate_hash(this->hasher,
1098 message->get_packet_data(message), &hash);
1099
1100 /* ensure this is not a retransmit of an already handled init message */
1101 if (check_and_put_init_hash(this, hash))
1102 {
1103 chunk_free(&hash);
1104 id->destroy(id);
1105 DBG1(DBG_MGR, "ignoring %s, already processing",
1106 ike_version == IKEV2 ? "IKE_SA_INIT" : "initial IKE message");
1107 return NULL;
1108 }
1109
1110 /* no IKE_SA yet, create a new one */
1111 id->set_responder_spi(id, get_spi(this));
1112 ike_sa = ike_sa_create(id, FALSE, ike_version);
1113 if (ike_sa)
1114 {
1115 entry = entry_create();
1116 entry->ike_sa = ike_sa;
1117 entry->ike_sa_id = id->clone(id);
1118
1119 segment = put_entry(this, entry);
1120 entry->checked_out = TRUE;
1121 unlock_single_segment(this, segment);
1122
1123 entry->message_id = message->get_message_id(message);
1124 entry->init_hash = hash;
1125
1126 DBG2(DBG_MGR, "created IKE_SA %s[%u]",
1127 ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1128 }
1129 else
1130 {
1131 remove_init_hash(this, hash);
1132 chunk_free(&hash);
1133 DBG1(DBG_MGR, "ignoring message, no such IKE_SA");
1134 }
1135 id->destroy(id);
1136 charon->bus->set_sa(charon->bus, ike_sa);
1137 return ike_sa;
1138 }
1139
1140 if (get_entry_by_id(this, id, &entry, &segment) == SUCCESS)
1141 {
1142 /* only check out in IKEv2 if we are not already processing it */
1143 if (message->get_request(message) &&
1144 message->get_message_id(message) == entry->message_id)
1145 {
1146 DBG1(DBG_MGR, "ignoring request with ID %u, already processing",
1147 entry->message_id);
1148 }
1149 else if (wait_for_entry(this, entry, segment))
1150 {
1151 ike_sa_id_t *ike_id;
1152
1153 ike_id = entry->ike_sa->get_id(entry->ike_sa);
1154 entry->checked_out = TRUE;
1155 entry->message_id = message->get_message_id(message);
1156 if (ike_id->get_responder_spi(ike_id) == 0)
1157 {
1158 ike_id->set_responder_spi(ike_id, id->get_responder_spi(id));
1159 }
1160 ike_sa = entry->ike_sa;
1161 DBG2(DBG_MGR, "IKE_SA %s[%u] successfully checked out",
1162 ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1163 }
1164 unlock_single_segment(this, segment);
1165 }
1166 id->destroy(id);
1167 charon->bus->set_sa(charon->bus, ike_sa);
1168 return ike_sa;
1169 }
1170
1171 METHOD(ike_sa_manager_t, checkout_by_config, ike_sa_t*,
1172 private_ike_sa_manager_t *this, peer_cfg_t *peer_cfg)
1173 {
1174 enumerator_t *enumerator;
1175 entry_t *entry;
1176 ike_sa_t *ike_sa = NULL;
1177 peer_cfg_t *current_peer;
1178 ike_cfg_t *current_ike;
1179 u_int segment;
1180
1181 DBG2(DBG_MGR, "checkout IKE_SA by config");
1182
1183 if (!this->reuse_ikesa)
1184 { /* IKE_SA reuse disable by config */
1185 ike_sa = checkout_new(this, peer_cfg->get_ike_version(peer_cfg), TRUE);
1186 charon->bus->set_sa(charon->bus, ike_sa);
1187 return ike_sa;
1188 }
1189
1190 enumerator = create_table_enumerator(this);
1191 while (enumerator->enumerate(enumerator, &entry, &segment))
1192 {
1193 if (!wait_for_entry(this, entry, segment))
1194 {
1195 continue;
1196 }
1197 if (entry->ike_sa->get_state(entry->ike_sa) == IKE_DELETING)
1198 { /* skip IKE_SAs which are not usable */
1199 continue;
1200 }
1201
1202 current_peer = entry->ike_sa->get_peer_cfg(entry->ike_sa);
1203 if (current_peer && current_peer->equals(current_peer, peer_cfg))
1204 {
1205 current_ike = current_peer->get_ike_cfg(current_peer);
1206 if (current_ike->equals(current_ike, peer_cfg->get_ike_cfg(peer_cfg)))
1207 {
1208 entry->checked_out = TRUE;
1209 ike_sa = entry->ike_sa;
1210 DBG2(DBG_MGR, "found existing IKE_SA %u with a '%s' config",
1211 ike_sa->get_unique_id(ike_sa),
1212 current_peer->get_name(current_peer));
1213 break;
1214 }
1215 }
1216 }
1217 enumerator->destroy(enumerator);
1218
1219 if (!ike_sa)
1220 { /* no IKE_SA using such a config, hand out a new */
1221 ike_sa = checkout_new(this, peer_cfg->get_ike_version(peer_cfg), TRUE);
1222 }
1223 charon->bus->set_sa(charon->bus, ike_sa);
1224 return ike_sa;
1225 }
1226
1227 METHOD(ike_sa_manager_t, checkout_by_id, ike_sa_t*,
1228 private_ike_sa_manager_t *this, u_int32_t id, bool child)
1229 {
1230 enumerator_t *enumerator, *children;
1231 entry_t *entry;
1232 ike_sa_t *ike_sa = NULL;
1233 child_sa_t *child_sa;
1234 u_int segment;
1235
1236 DBG2(DBG_MGR, "checkout IKE_SA by ID");
1237
1238 enumerator = create_table_enumerator(this);
1239 while (enumerator->enumerate(enumerator, &entry, &segment))
1240 {
1241 if (wait_for_entry(this, entry, segment))
1242 {
1243 /* look for a child with such a reqid ... */
1244 if (child)
1245 {
1246 children = entry->ike_sa->create_child_sa_enumerator(entry->ike_sa);
1247 while (children->enumerate(children, (void**)&child_sa))
1248 {
1249 if (child_sa->get_reqid(child_sa) == id)
1250 {
1251 ike_sa = entry->ike_sa;
1252 break;
1253 }
1254 }
1255 children->destroy(children);
1256 }
1257 else /* ... or for a IKE_SA with such a unique id */
1258 {
1259 if (entry->ike_sa->get_unique_id(entry->ike_sa) == id)
1260 {
1261 ike_sa = entry->ike_sa;
1262 }
1263 }
1264 /* got one, return */
1265 if (ike_sa)
1266 {
1267 entry->checked_out = TRUE;
1268 DBG2(DBG_MGR, "IKE_SA %s[%u] successfully checked out",
1269 ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1270 break;
1271 }
1272 }
1273 }
1274 enumerator->destroy(enumerator);
1275
1276 charon->bus->set_sa(charon->bus, ike_sa);
1277 return ike_sa;
1278 }
1279
1280 METHOD(ike_sa_manager_t, checkout_by_name, ike_sa_t*,
1281 private_ike_sa_manager_t *this, char *name, bool child)
1282 {
1283 enumerator_t *enumerator, *children;
1284 entry_t *entry;
1285 ike_sa_t *ike_sa = NULL;
1286 child_sa_t *child_sa;
1287 u_int segment;
1288
1289 enumerator = create_table_enumerator(this);
1290 while (enumerator->enumerate(enumerator, &entry, &segment))
1291 {
1292 if (wait_for_entry(this, entry, segment))
1293 {
1294 /* look for a child with such a policy name ... */
1295 if (child)
1296 {
1297 children = entry->ike_sa->create_child_sa_enumerator(entry->ike_sa);
1298 while (children->enumerate(children, (void**)&child_sa))
1299 {
1300 if (streq(child_sa->get_name(child_sa), name))
1301 {
1302 ike_sa = entry->ike_sa;
1303 break;
1304 }
1305 }
1306 children->destroy(children);
1307 }
1308 else /* ... or for a IKE_SA with such a connection name */
1309 {
1310 if (streq(entry->ike_sa->get_name(entry->ike_sa), name))
1311 {
1312 ike_sa = entry->ike_sa;
1313 }
1314 }
1315 /* got one, return */
1316 if (ike_sa)
1317 {
1318 entry->checked_out = TRUE;
1319 DBG2(DBG_MGR, "IKE_SA %s[%u] successfully checked out",
1320 ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1321 break;
1322 }
1323 }
1324 }
1325 enumerator->destroy(enumerator);
1326
1327 charon->bus->set_sa(charon->bus, ike_sa);
1328 return ike_sa;
1329 }
1330
1331 /**
1332 * enumerator filter function, waiting variant
1333 */
1334 static bool enumerator_filter_wait(private_ike_sa_manager_t *this,
1335 entry_t **in, ike_sa_t **out, u_int *segment)
1336 {
1337 if (wait_for_entry(this, *in, *segment))
1338 {
1339 *out = (*in)->ike_sa;
1340 charon->bus->set_sa(charon->bus, *out);
1341 return TRUE;
1342 }
1343 return FALSE;
1344 }
1345
1346 /**
1347 * enumerator filter function, skipping variant
1348 */
1349 static bool enumerator_filter_skip(private_ike_sa_manager_t *this,
1350 entry_t **in, ike_sa_t **out, u_int *segment)
1351 {
1352 if (!(*in)->driveout_new_threads &&
1353 !(*in)->driveout_waiting_threads &&
1354 !(*in)->checked_out)
1355 {
1356 *out = (*in)->ike_sa;
1357 charon->bus->set_sa(charon->bus, *out);
1358 return TRUE;
1359 }
1360 return FALSE;
1361 }
1362
1363 /**
1364 * Reset threads SA after enumeration
1365 */
1366 static void reset_sa(void *data)
1367 {
1368 charon->bus->set_sa(charon->bus, NULL);
1369 }
1370
1371 METHOD(ike_sa_manager_t, create_enumerator, enumerator_t*,
1372 private_ike_sa_manager_t* this, bool wait)
1373 {
1374 return enumerator_create_filter(create_table_enumerator(this),
1375 wait ? (void*)enumerator_filter_wait : (void*)enumerator_filter_skip,
1376 this, reset_sa);
1377 }
1378
1379 METHOD(ike_sa_manager_t, checkin, void,
1380 private_ike_sa_manager_t *this, ike_sa_t *ike_sa)
1381 {
1382 /* to check the SA back in, we look for the pointer of the ike_sa
1383 * in all entries.
1384 * The lookup is done by initiator SPI, so even if the SPI has changed (e.g.
1385 * on reception of a IKE_SA_INIT response) the lookup will work but
1386 * updating of the SPI MAY be necessary...
1387 */
1388 entry_t *entry;
1389 ike_sa_id_t *ike_sa_id;
1390 host_t *other;
1391 identification_t *my_id, *other_id;
1392 u_int segment;
1393
1394 ike_sa_id = ike_sa->get_id(ike_sa);
1395 my_id = ike_sa->get_my_id(ike_sa);
1396 other_id = ike_sa->get_other_id(ike_sa);
1397 other = ike_sa->get_other_host(ike_sa);
1398
1399 DBG2(DBG_MGR, "checkin IKE_SA %s[%u]", ike_sa->get_name(ike_sa),
1400 ike_sa->get_unique_id(ike_sa));
1401
1402 /* look for the entry */
1403 if (get_entry_by_sa(this, ike_sa_id, ike_sa, &entry, &segment) == SUCCESS)
1404 {
1405 /* ike_sa_id must be updated */
1406 entry->ike_sa_id->replace_values(entry->ike_sa_id, ike_sa->get_id(ike_sa));
1407 /* signal waiting threads */
1408 entry->checked_out = FALSE;
1409 entry->message_id = -1;
1410 /* check if this SA is half-open */
1411 if (entry->half_open && ike_sa->get_state(ike_sa) != IKE_CONNECTING)
1412 {
1413 /* not half open anymore */
1414 entry->half_open = FALSE;
1415 remove_half_open(this, entry);
1416 }
1417 else if (entry->half_open && !other->ip_equals(other, entry->other))
1418 {
1419 /* the other host's IP has changed, we must update the hash table */
1420 remove_half_open(this, entry);
1421 DESTROY_IF(entry->other);
1422 entry->other = other->clone(other);
1423 put_half_open(this, entry);
1424 }
1425 else if (!entry->half_open &&
1426 !entry->ike_sa_id->is_initiator(entry->ike_sa_id) &&
1427 ike_sa->get_state(ike_sa) == IKE_CONNECTING)
1428 {
1429 /* this is a new half-open SA */
1430 entry->half_open = TRUE;
1431 entry->other = other->clone(other);
1432 put_half_open(this, entry);
1433 }
1434 DBG2(DBG_MGR, "check-in of IKE_SA successful.");
1435 entry->condvar->signal(entry->condvar);
1436 }
1437 else
1438 {
1439 entry = entry_create();
1440 entry->ike_sa_id = ike_sa_id->clone(ike_sa_id);
1441 entry->ike_sa = ike_sa;
1442 segment = put_entry(this, entry);
1443 }
1444
1445 /* apply identities for duplicate test */
1446 if ((ike_sa->get_state(ike_sa) == IKE_ESTABLISHED ||
1447 ike_sa->get_state(ike_sa) == IKE_PASSIVE) &&
1448 entry->my_id == NULL && entry->other_id == NULL)
1449 {
1450 if (ike_sa->get_version(ike_sa) == IKEV1)
1451 {
1452 /* If authenticated and received INITIAL_CONTACT,
1453 * delete any existing IKE_SAs with that peer. */
1454 if (ike_sa->has_condition(ike_sa, COND_INIT_CONTACT_SEEN))
1455 {
1456 this->public.check_uniqueness(&this->public, ike_sa, TRUE);
1457 ike_sa->set_condition(ike_sa, COND_INIT_CONTACT_SEEN, FALSE);
1458 }
1459 }
1460
1461 entry->my_id = my_id->clone(my_id);
1462 entry->other_id = other_id->clone(other_id);
1463 if (!entry->other)
1464 {
1465 entry->other = other->clone(other);
1466 }
1467 put_connected_peers(this, entry);
1468 }
1469
1470 unlock_single_segment(this, segment);
1471
1472 charon->bus->set_sa(charon->bus, NULL);
1473 }
1474
1475 METHOD(ike_sa_manager_t, checkin_and_destroy, void,
1476 private_ike_sa_manager_t *this, ike_sa_t *ike_sa)
1477 {
1478 /* deletion is a bit complex, we must ensure that no thread is waiting for
1479 * this SA.
1480 * We take this SA from the table, and start signaling while threads
1481 * are in the condvar.
1482 */
1483 entry_t *entry;
1484 ike_sa_id_t *ike_sa_id;
1485 u_int segment;
1486
1487 ike_sa_id = ike_sa->get_id(ike_sa);
1488
1489 DBG2(DBG_MGR, "checkin and destroy IKE_SA %s[%u]", ike_sa->get_name(ike_sa),
1490 ike_sa->get_unique_id(ike_sa));
1491
1492 if (get_entry_by_sa(this, ike_sa_id, ike_sa, &entry, &segment) == SUCCESS)
1493 {
1494 /* drive out waiting threads, as we are in hurry */
1495 entry->driveout_waiting_threads = TRUE;
1496 /* mark it, so no new threads can get this entry */
1497 entry->driveout_new_threads = TRUE;
1498 /* wait until all workers have done their work */
1499 while (entry->waiting_threads)
1500 {
1501 /* wake up all */
1502 entry->condvar->broadcast(entry->condvar);
1503 /* they will wake us again when their work is done */
1504 entry->condvar->wait(entry->condvar, this->segments[segment].mutex);
1505 }
1506 remove_entry(this, entry);
1507 unlock_single_segment(this, segment);
1508
1509 if (entry->half_open)
1510 {
1511 remove_half_open(this, entry);
1512 }
1513 if (entry->my_id && entry->other_id)
1514 {
1515 remove_connected_peers(this, entry);
1516 }
1517 if (entry->init_hash.ptr)
1518 {
1519 remove_init_hash(this, entry->init_hash);
1520 }
1521
1522 entry_destroy(entry);
1523
1524 DBG2(DBG_MGR, "check-in and destroy of IKE_SA successful");
1525 }
1526 else
1527 {
1528 DBG1(DBG_MGR, "tried to check-in and delete nonexisting IKE_SA");
1529 ike_sa->destroy(ike_sa);
1530 }
1531 charon->bus->set_sa(charon->bus, NULL);
1532 }
1533
1534 /**
1535 * Cleanup function for create_id_enumerator
1536 */
1537 static void id_enumerator_cleanup(linked_list_t *ids)
1538 {
1539 ids->destroy_offset(ids, offsetof(ike_sa_id_t, destroy));
1540 }
1541
1542 METHOD(ike_sa_manager_t, create_id_enumerator, enumerator_t*,
1543 private_ike_sa_manager_t *this, identification_t *me,
1544 identification_t *other, int family)
1545 {
1546 linked_list_t *list, *ids = NULL;
1547 connected_peers_t *current;
1548 u_int row, segment;
1549 rwlock_t *lock;
1550
1551 row = chunk_hash_inc(other->get_encoding(other),
1552 chunk_hash(me->get_encoding(me))) & this->table_mask;
1553 segment = row & this->segment_mask;
1554
1555 lock = this->connected_peers_segments[segment & this->segment_mask].lock;
1556 lock->read_lock(lock);
1557 list = this->connected_peers_table[row];
1558 if (list)
1559 {
1560 if (list->find_first(list, (linked_list_match_t)connected_peers_match,
1561 (void**)&current, me, other, (uintptr_t)family) == SUCCESS)
1562 {
1563 ids = current->sas->clone_offset(current->sas,
1564 offsetof(ike_sa_id_t, clone));
1565 }
1566 }
1567 lock->unlock(lock);
1568
1569 if (!ids)
1570 {
1571 return enumerator_create_empty();
1572 }
1573 return enumerator_create_cleaner(ids->create_enumerator(ids),
1574 (void*)id_enumerator_cleanup, ids);
1575 }
1576
1577 METHOD(ike_sa_manager_t, check_uniqueness, bool,
1578 private_ike_sa_manager_t *this, ike_sa_t *ike_sa, bool force_replace)
1579 {
1580 bool cancel = FALSE;
1581 peer_cfg_t *peer_cfg;
1582 unique_policy_t policy;
1583 enumerator_t *enumerator;
1584 ike_sa_id_t *id = NULL;
1585 identification_t *me, *other;
1586 host_t *other_host;
1587
1588 peer_cfg = ike_sa->get_peer_cfg(ike_sa);
1589 policy = peer_cfg->get_unique_policy(peer_cfg);
1590 if (policy == UNIQUE_NO && !force_replace)
1591 {
1592 return FALSE;
1593 }
1594 me = ike_sa->get_my_id(ike_sa);
1595 other = ike_sa->get_other_id(ike_sa);
1596 other_host = ike_sa->get_other_host(ike_sa);
1597
1598 enumerator = create_id_enumerator(this, me, other,
1599 other_host->get_family(other_host));
1600 while (enumerator->enumerate(enumerator, &id))
1601 {
1602 status_t status = SUCCESS;
1603 ike_sa_t *duplicate;
1604
1605 duplicate = checkout(this, id);
1606 if (!duplicate)
1607 {
1608 continue;
1609 }
1610 if (force_replace)
1611 {
1612 DBG1(DBG_IKE, "destroying duplicate IKE_SA for peer '%Y', "
1613 "received INITIAL_CONTACT", other);
1614 checkin_and_destroy(this, duplicate);
1615 continue;
1616 }
1617 peer_cfg = duplicate->get_peer_cfg(duplicate);
1618 if (peer_cfg && peer_cfg->equals(peer_cfg, ike_sa->get_peer_cfg(ike_sa)))
1619 {
1620 switch (duplicate->get_state(duplicate))
1621 {
1622 case IKE_ESTABLISHED:
1623 case IKE_REKEYING:
1624 switch (policy)
1625 {
1626 case UNIQUE_REPLACE:
1627 DBG1(DBG_IKE, "deleting duplicate IKE_SA for peer "
1628 "'%Y' due to uniqueness policy", other);
1629 status = duplicate->delete(duplicate);
1630 break;
1631 case UNIQUE_KEEP:
1632 cancel = TRUE;
1633 /* we keep the first IKE_SA and delete all
1634 * other duplicates that might exist */
1635 policy = UNIQUE_REPLACE;
1636 break;
1637 default:
1638 break;
1639 }
1640 break;
1641 default:
1642 break;
1643 }
1644 }
1645 if (status == DESTROY_ME)
1646 {
1647 checkin_and_destroy(this, duplicate);
1648 }
1649 else
1650 {
1651 checkin(this, duplicate);
1652 }
1653 }
1654 enumerator->destroy(enumerator);
1655 /* reset thread's current IKE_SA after checkin */
1656 charon->bus->set_sa(charon->bus, ike_sa);
1657 return cancel;
1658 }
1659
1660 METHOD(ike_sa_manager_t, has_contact, bool,
1661 private_ike_sa_manager_t *this, identification_t *me,
1662 identification_t *other, int family)
1663 {
1664 linked_list_t *list;
1665 u_int row, segment;
1666 rwlock_t *lock;
1667 bool found = FALSE;
1668
1669 row = chunk_hash_inc(other->get_encoding(other),
1670 chunk_hash(me->get_encoding(me))) & this->table_mask;
1671 segment = row & this->segment_mask;
1672 lock = this->connected_peers_segments[segment & this->segment_mask].lock;
1673 lock->read_lock(lock);
1674 list = this->connected_peers_table[row];
1675 if (list)
1676 {
1677 if (list->find_first(list, (linked_list_match_t)connected_peers_match,
1678 NULL, me, other, family) == SUCCESS)
1679 {
1680 found = TRUE;
1681 }
1682 }
1683 lock->unlock(lock);
1684
1685 return found;
1686 }
1687
1688 METHOD(ike_sa_manager_t, get_count, u_int,
1689 private_ike_sa_manager_t *this)
1690 {
1691 u_int segment, count = 0;
1692 mutex_t *mutex;
1693
1694 for (segment = 0; segment < this->segment_count; segment++)
1695 {
1696 mutex = this->segments[segment & this->segment_mask].mutex;
1697 mutex->lock(mutex);
1698 count += this->segments[segment].count;
1699 mutex->unlock(mutex);
1700 }
1701 return count;
1702 }
1703
1704 METHOD(ike_sa_manager_t, get_half_open_count, u_int,
1705 private_ike_sa_manager_t *this, host_t *ip)
1706 {
1707 linked_list_t *list;
1708 u_int segment, row;
1709 rwlock_t *lock;
1710 chunk_t addr;
1711 u_int count = 0;
1712
1713 if (ip)
1714 {
1715 addr = ip->get_address(ip);
1716 row = chunk_hash(addr) & this->table_mask;
1717 segment = row & this->segment_mask;
1718 lock = this->half_open_segments[segment & this->segment_mask].lock;
1719 lock->read_lock(lock);
1720 if ((list = this->half_open_table[row]) != NULL)
1721 {
1722 half_open_t *current;
1723
1724 if (list->find_first(list, (linked_list_match_t)half_open_match,
1725 (void**)&current, &addr) == SUCCESS)
1726 {
1727 count = current->count;
1728 }
1729 }
1730 lock->unlock(lock);
1731 }
1732 else
1733 {
1734 for (segment = 0; segment < this->segment_count; segment++)
1735 {
1736 lock = this->half_open_segments[segment & this->segment_mask].lock;
1737 lock->read_lock(lock);
1738 count += this->half_open_segments[segment].count;
1739 lock->unlock(lock);
1740 }
1741 }
1742 return count;
1743 }
1744
1745 METHOD(ike_sa_manager_t, flush, void,
1746 private_ike_sa_manager_t *this)
1747 {
1748 /* destroy all list entries */
1749 enumerator_t *enumerator;
1750 entry_t *entry;
1751 u_int segment;
1752
1753 lock_all_segments(this);
1754 DBG2(DBG_MGR, "going to destroy IKE_SA manager and all managed IKE_SA's");
1755 /* Step 1: drive out all waiting threads */
1756 DBG2(DBG_MGR, "set driveout flags for all stored IKE_SA's");
1757 enumerator = create_table_enumerator(this);
1758 while (enumerator->enumerate(enumerator, &entry, &segment))
1759 {
1760 /* do not accept new threads, drive out waiting threads */
1761 entry->driveout_new_threads = TRUE;
1762 entry->driveout_waiting_threads = TRUE;
1763 }
1764 enumerator->destroy(enumerator);
1765 DBG2(DBG_MGR, "wait for all threads to leave IKE_SA's");
1766 /* Step 2: wait until all are gone */
1767 enumerator = create_table_enumerator(this);
1768 while (enumerator->enumerate(enumerator, &entry, &segment))
1769 {
1770 while (entry->waiting_threads || entry->checked_out)
1771 {
1772 /* wake up all */
1773 entry->condvar->broadcast(entry->condvar);
1774 /* go sleeping until they are gone */
1775 entry->condvar->wait(entry->condvar, this->segments[segment].mutex);
1776 }
1777 }
1778 enumerator->destroy(enumerator);
1779 DBG2(DBG_MGR, "delete all IKE_SA's");
1780 /* Step 3: initiate deletion of all IKE_SAs */
1781 enumerator = create_table_enumerator(this);
1782 while (enumerator->enumerate(enumerator, &entry, &segment))
1783 {
1784 charon->bus->set_sa(charon->bus, entry->ike_sa);
1785 if (entry->ike_sa->get_version(entry->ike_sa) == IKEV2)
1786 { /* as the delete never gets processed, fire down events */
1787 switch (entry->ike_sa->get_state(entry->ike_sa))
1788 {
1789 case IKE_ESTABLISHED:
1790 case IKE_REKEYING:
1791 case IKE_DELETING:
1792 charon->bus->ike_updown(charon->bus, entry->ike_sa, FALSE);
1793 break;
1794 default:
1795 break;
1796 }
1797 }
1798 entry->ike_sa->delete(entry->ike_sa);
1799 }
1800 enumerator->destroy(enumerator);
1801
1802 DBG2(DBG_MGR, "destroy all entries");
1803 /* Step 4: destroy all entries */
1804 enumerator = create_table_enumerator(this);
1805 while (enumerator->enumerate(enumerator, &entry, &segment))
1806 {
1807 charon->bus->set_sa(charon->bus, entry->ike_sa);
1808 if (entry->half_open)
1809 {
1810 remove_half_open(this, entry);
1811 }
1812 if (entry->my_id && entry->other_id)
1813 {
1814 remove_connected_peers(this, entry);
1815 }
1816 if (entry->init_hash.ptr)
1817 {
1818 remove_init_hash(this, entry->init_hash);
1819 }
1820 remove_entry_at((private_enumerator_t*)enumerator);
1821 entry_destroy(entry);
1822 }
1823 enumerator->destroy(enumerator);
1824 charon->bus->set_sa(charon->bus, NULL);
1825 unlock_all_segments(this);
1826
1827 this->rng->destroy(this->rng);
1828 this->rng = NULL;
1829 this->hasher->destroy(this->hasher);
1830 this->hasher = NULL;
1831 }
1832
1833 METHOD(ike_sa_manager_t, destroy, void,
1834 private_ike_sa_manager_t *this)
1835 {
1836 u_int i;
1837
1838 for (i = 0; i < this->table_size; i++)
1839 {
1840 DESTROY_IF(this->ike_sa_table[i]);
1841 DESTROY_IF(this->half_open_table[i]);
1842 DESTROY_IF(this->connected_peers_table[i]);
1843 DESTROY_IF(this->init_hashes_table[i]);
1844 }
1845 free(this->ike_sa_table);
1846 free(this->half_open_table);
1847 free(this->connected_peers_table);
1848 free(this->init_hashes_table);
1849 for (i = 0; i < this->segment_count; i++)
1850 {
1851 this->segments[i].mutex->destroy(this->segments[i].mutex);
1852 this->half_open_segments[i].lock->destroy(this->half_open_segments[i].lock);
1853 this->connected_peers_segments[i].lock->destroy(this->connected_peers_segments[i].lock);
1854 this->init_hashes_segments[i].mutex->destroy(this->init_hashes_segments[i].mutex);
1855 }
1856 free(this->segments);
1857 free(this->half_open_segments);
1858 free(this->connected_peers_segments);
1859 free(this->init_hashes_segments);
1860
1861 free(this);
1862 }
1863
1864 /**
1865 * This function returns the next-highest power of two for the given number.
1866 * The algorithm works by setting all bits on the right-hand side of the most
1867 * significant 1 to 1 and then increments the whole number so it rolls over
1868 * to the nearest power of two. Note: returns 0 for n == 0
1869 */
1870 static u_int get_nearest_powerof2(u_int n)
1871 {
1872 u_int i;
1873
1874 --n;
1875 for (i = 1; i < sizeof(u_int) * 8; i <<= 1)
1876 {
1877 n |= n >> i;
1878 }
1879 return ++n;
1880 }
1881
1882 /*
1883 * Described in header.
1884 */
1885 ike_sa_manager_t *ike_sa_manager_create()
1886 {
1887 private_ike_sa_manager_t *this;
1888 u_int i;
1889
1890 INIT(this,
1891 .public = {
1892 .checkout = _checkout,
1893 .checkout_new = _checkout_new,
1894 .checkout_by_message = _checkout_by_message,
1895 .checkout_by_config = _checkout_by_config,
1896 .checkout_by_id = _checkout_by_id,
1897 .checkout_by_name = _checkout_by_name,
1898 .check_uniqueness = _check_uniqueness,
1899 .has_contact = _has_contact,
1900 .create_enumerator = _create_enumerator,
1901 .create_id_enumerator = _create_id_enumerator,
1902 .checkin = _checkin,
1903 .checkin_and_destroy = _checkin_and_destroy,
1904 .get_count = _get_count,
1905 .get_half_open_count = _get_half_open_count,
1906 .flush = _flush,
1907 .destroy = _destroy,
1908 },
1909 );
1910
1911 this->hasher = lib->crypto->create_hasher(lib->crypto, HASH_PREFERRED);
1912 if (this->hasher == NULL)
1913 {
1914 DBG1(DBG_MGR, "manager initialization failed, no hasher supported");
1915 free(this);
1916 return NULL;
1917 }
1918 this->rng = lib->crypto->create_rng(lib->crypto, RNG_WEAK);
1919 if (this->rng == NULL)
1920 {
1921 DBG1(DBG_MGR, "manager initialization failed, no RNG supported");
1922 this->hasher->destroy(this->hasher);
1923 free(this);
1924 return NULL;
1925 }
1926
1927 this->table_size = get_nearest_powerof2(lib->settings->get_int(lib->settings,
1928 "charon.ikesa_table_size", DEFAULT_HASHTABLE_SIZE));
1929 this->table_size = max(1, min(this->table_size, MAX_HASHTABLE_SIZE));
1930 this->table_mask = this->table_size - 1;
1931
1932 this->segment_count = get_nearest_powerof2(lib->settings->get_int(lib->settings,
1933 "charon.ikesa_table_segments", DEFAULT_SEGMENT_COUNT));
1934 this->segment_count = max(1, min(this->segment_count, this->table_size));
1935 this->segment_mask = this->segment_count - 1;
1936
1937 this->ike_sa_table = calloc(this->table_size, sizeof(linked_list_t*));
1938 this->segments = (segment_t*)calloc(this->segment_count, sizeof(segment_t));
1939 for (i = 0; i < this->segment_count; i++)
1940 {
1941 this->segments[i].mutex = mutex_create(MUTEX_TYPE_RECURSIVE);
1942 this->segments[i].count = 0;
1943 }
1944
1945 /* we use the same table parameters for the table to track half-open SAs */
1946 this->half_open_table = calloc(this->table_size, sizeof(linked_list_t*));
1947 this->half_open_segments = calloc(this->segment_count, sizeof(shareable_segment_t));
1948 for (i = 0; i < this->segment_count; i++)
1949 {
1950 this->half_open_segments[i].lock = rwlock_create(RWLOCK_TYPE_DEFAULT);
1951 this->half_open_segments[i].count = 0;
1952 }
1953
1954 /* also for the hash table used for duplicate tests */
1955 this->connected_peers_table = calloc(this->table_size, sizeof(linked_list_t*));
1956 this->connected_peers_segments = calloc(this->segment_count, sizeof(shareable_segment_t));
1957 for (i = 0; i < this->segment_count; i++)
1958 {
1959 this->connected_peers_segments[i].lock = rwlock_create(RWLOCK_TYPE_DEFAULT);
1960 this->connected_peers_segments[i].count = 0;
1961 }
1962
1963 /* and again for the table of hashes of seen initial IKE messages */
1964 this->init_hashes_table = calloc(this->table_size, sizeof(linked_list_t*));
1965 this->init_hashes_segments = calloc(this->segment_count, sizeof(segment_t));
1966 for (i = 0; i < this->segment_count; i++)
1967 {
1968 this->init_hashes_segments[i].mutex = mutex_create(MUTEX_TYPE_RECURSIVE);
1969 this->init_hashes_segments[i].count = 0;
1970 }
1971
1972 this->reuse_ikesa = lib->settings->get_bool(lib->settings,
1973 "charon.reuse_ikesa", TRUE);
1974 return &this->public;
1975 }