Don't use linked_list_t for buckets in main IKE_SA hash table.
[strongswan.git] / src / libcharon / sa / ike_sa_manager.c
1 /*
2 * Copyright (C) 2005-2011 Martin Willi
3 * Copyright (C) 2011 revosec AG
4 * Copyright (C) 2008-2012 Tobias Brunner
5 * Copyright (C) 2005 Jan Hutter
6 * Hochschule fuer Technik Rapperswil
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version. See <http://www.fsf.org/copyleft/gpl.txt>.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 * for more details.
17 */
18
19 #include <string.h>
20
21 #include "ike_sa_manager.h"
22
23 #include <daemon.h>
24 #include <sa/ike_sa_id.h>
25 #include <bus/bus.h>
26 #include <threading/condvar.h>
27 #include <threading/mutex.h>
28 #include <threading/rwlock.h>
29 #include <utils/linked_list.h>
30 #include <crypto/hashers/hasher.h>
31
32 /* the default size of the hash table (MUST be a power of 2) */
33 #define DEFAULT_HASHTABLE_SIZE 1
34
35 /* the maximum size of the hash table (MUST be a power of 2) */
36 #define MAX_HASHTABLE_SIZE (1 << 30)
37
38 /* the default number of segments (MUST be a power of 2) */
39 #define DEFAULT_SEGMENT_COUNT 1
40
41 typedef struct entry_t entry_t;
42
43 /**
44 * An entry in the linked list, contains IKE_SA, locking and lookup data.
45 */
46 struct entry_t {
47
48 /**
49 * Number of threads waiting for this ike_sa_t object.
50 */
51 int waiting_threads;
52
53 /**
54 * Condvar where threads can wait until ike_sa_t object is free for use again.
55 */
56 condvar_t *condvar;
57
58 /**
59 * Is this ike_sa currently checked out?
60 */
61 bool checked_out;
62
63 /**
64 * Does this SA drives out new threads?
65 */
66 bool driveout_new_threads;
67
68 /**
69 * Does this SA drives out waiting threads?
70 */
71 bool driveout_waiting_threads;
72
73 /**
74 * Identification of an IKE_SA (SPIs).
75 */
76 ike_sa_id_t *ike_sa_id;
77
78 /**
79 * The contained ike_sa_t object.
80 */
81 ike_sa_t *ike_sa;
82
83 /**
84 * hash of the IKE_SA_INIT message, used to detect retransmissions
85 */
86 chunk_t init_hash;
87
88 /**
89 * remote host address, required for DoS detection and duplicate
90 * checking (host with same my_id and other_id is *not* considered
91 * a duplicate if the address family differs)
92 */
93 host_t *other;
94
95 /**
96 * As responder: Is this SA half-open?
97 */
98 bool half_open;
99
100 /**
101 * own identity, required for duplicate checking
102 */
103 identification_t *my_id;
104
105 /**
106 * remote identity, required for duplicate checking
107 */
108 identification_t *other_id;
109
110 /**
111 * message ID currently processing, if any
112 */
113 u_int32_t message_id;
114 };
115
116 /**
117 * Implementation of entry_t.destroy.
118 */
119 static status_t entry_destroy(entry_t *this)
120 {
121 /* also destroy IKE SA */
122 this->ike_sa->destroy(this->ike_sa);
123 this->ike_sa_id->destroy(this->ike_sa_id);
124 chunk_free(&this->init_hash);
125 DESTROY_IF(this->other);
126 DESTROY_IF(this->my_id);
127 DESTROY_IF(this->other_id);
128 this->condvar->destroy(this->condvar);
129 free(this);
130 return SUCCESS;
131 }
132
133 /**
134 * Creates a new entry for the ike_sa_t list.
135 */
136 static entry_t *entry_create()
137 {
138 entry_t *this = malloc_thing(entry_t);
139
140 this->waiting_threads = 0;
141 this->condvar = condvar_create(CONDVAR_TYPE_DEFAULT);
142
143 /* we set checkout flag when we really give it out */
144 this->checked_out = FALSE;
145 this->driveout_new_threads = FALSE;
146 this->driveout_waiting_threads = FALSE;
147 this->message_id = -1;
148 this->init_hash = chunk_empty;
149 this->other = NULL;
150 this->half_open = FALSE;
151 this->my_id = NULL;
152 this->other_id = NULL;
153 this->ike_sa_id = NULL;
154 this->ike_sa = NULL;
155
156 return this;
157 }
158
159 /**
160 * Function that matches entry_t objects by ike_sa_id_t.
161 */
162 static bool entry_match_by_id(entry_t *entry, ike_sa_id_t *id)
163 {
164 if (id->equals(id, entry->ike_sa_id))
165 {
166 return TRUE;
167 }
168 if ((id->get_responder_spi(id) == 0 ||
169 entry->ike_sa_id->get_responder_spi(entry->ike_sa_id) == 0) &&
170 id->get_initiator_spi(id) == entry->ike_sa_id->get_initiator_spi(entry->ike_sa_id))
171 {
172 /* this is TRUE for IKE_SAs that we initiated but have not yet received a response */
173 return TRUE;
174 }
175 return FALSE;
176 }
177
178 /**
179 * Function that matches entry_t objects by ike_sa_t pointers.
180 */
181 static bool entry_match_by_sa(entry_t *entry, ike_sa_t *ike_sa)
182 {
183 return entry->ike_sa == ike_sa;
184 }
185
186 /**
187 * Hash function for ike_sa_id_t objects.
188 */
189 static u_int ike_sa_id_hash(ike_sa_id_t *ike_sa_id)
190 {
191 /* IKEv2 does not mandate random SPIs (RFC 5996, 2.6), they just have to be
192 * locally unique, so we use our randomly allocated SPI whether we are
193 * initiator or responder to ensure a good distribution. The latter is not
194 * possible for IKEv1 as we don't know whether we are original initiator or
195 * not (based on the IKE header). But as RFC 2408, section 2.5.3 proposes
196 * SPIs (Cookies) to be allocated near random (we allocate them randomly
197 * anyway) it seems safe to always use the initiator SPI. */
198 if (ike_sa_id->get_ike_version(ike_sa_id) == IKEV1_MAJOR_VERSION ||
199 ike_sa_id->is_initiator(ike_sa_id))
200 {
201 return ike_sa_id->get_initiator_spi(ike_sa_id);
202 }
203 return ike_sa_id->get_responder_spi(ike_sa_id);
204 }
205
206 typedef struct half_open_t half_open_t;
207
208 /**
209 * Struct to manage half-open IKE_SAs per peer.
210 */
211 struct half_open_t {
212 /** chunk of remote host address */
213 chunk_t other;
214
215 /** the number of half-open IKE_SAs with that host */
216 u_int count;
217 };
218
219 /**
220 * Destroys a half_open_t object.
221 */
222 static void half_open_destroy(half_open_t *this)
223 {
224 chunk_free(&this->other);
225 free(this);
226 }
227
228 /**
229 * Function that matches half_open_t objects by the given IP address chunk.
230 */
231 static bool half_open_match(half_open_t *half_open, chunk_t *addr)
232 {
233 return chunk_equals(*addr, half_open->other);
234 }
235
236 typedef struct connected_peers_t connected_peers_t;
237
238 struct connected_peers_t {
239 /** own identity */
240 identification_t *my_id;
241
242 /** remote identity */
243 identification_t *other_id;
244
245 /** ip address family of peer */
246 int family;
247
248 /** list of ike_sa_id_t objects of IKE_SAs between the two identities */
249 linked_list_t *sas;
250 };
251
252 static void connected_peers_destroy(connected_peers_t *this)
253 {
254 this->my_id->destroy(this->my_id);
255 this->other_id->destroy(this->other_id);
256 this->sas->destroy(this->sas);
257 free(this);
258 }
259
260 /**
261 * Function that matches connected_peers_t objects by the given ids.
262 */
263 static bool connected_peers_match(connected_peers_t *connected_peers,
264 identification_t *my_id, identification_t *other_id,
265 uintptr_t family)
266 {
267 return my_id->equals(my_id, connected_peers->my_id) &&
268 other_id->equals(other_id, connected_peers->other_id) &&
269 (!family || family == connected_peers->family);
270 }
271
272 typedef struct segment_t segment_t;
273
274 /**
275 * Struct to manage segments of the hash table.
276 */
277 struct segment_t {
278 /** mutex to access a segment exclusively */
279 mutex_t *mutex;
280
281 /** the number of entries in this segment */
282 u_int count;
283 };
284
285 typedef struct shareable_segment_t shareable_segment_t;
286
287 /**
288 * Struct to manage segments of the "half-open" and "connected peers" hash tables.
289 */
290 struct shareable_segment_t {
291 /** rwlock to access a segment non-/exclusively */
292 rwlock_t *lock;
293
294 /** the number of entries in this segment - in case of the "half-open table"
295 * it's the sum of all half_open_t.count in a segment. */
296 u_int count;
297 };
298
299 typedef struct table_item_t table_item_t;
300
301 /**
302 * Instead of using linked_list_t for each bucket we store the data in our own
303 * list to save memory.
304 */
305 struct table_item_t {
306 /** data of this item */
307 void *value;
308
309 /** next item in the overflow list */
310 table_item_t *next;
311 };
312
313 typedef struct private_ike_sa_manager_t private_ike_sa_manager_t;
314
315 /**
316 * Additional private members of ike_sa_manager_t.
317 */
318 struct private_ike_sa_manager_t {
319 /**
320 * Public interface of ike_sa_manager_t.
321 */
322 ike_sa_manager_t public;
323
324 /**
325 * Hash table with entries for the ike_sa_t objects.
326 */
327 table_item_t **ike_sa_table;
328
329 /**
330 * The size of the hash table.
331 */
332 u_int table_size;
333
334 /**
335 * Mask to map the hashes to table rows.
336 */
337 u_int table_mask;
338
339 /**
340 * Segments of the hash table.
341 */
342 segment_t *segments;
343
344 /**
345 * The number of segments.
346 */
347 u_int segment_count;
348
349 /**
350 * Mask to map a table row to a segment.
351 */
352 u_int segment_mask;
353
354 /**
355 * Hash table with half_open_t objects.
356 */
357 linked_list_t **half_open_table;
358
359 /**
360 * Segments of the "half-open" hash table.
361 */
362 shareable_segment_t *half_open_segments;
363
364 /**
365 * Hash table with connected_peers_t objects.
366 */
367 linked_list_t **connected_peers_table;
368
369 /**
370 * Segments of the "connected peers" hash table.
371 */
372 shareable_segment_t *connected_peers_segments;
373
374 /**
375 * Hash table with chunk_t objects.
376 */
377 linked_list_t **init_hashes_table;
378
379 /**
380 * Segments of the "hashes" hash table.
381 */
382 segment_t *init_hashes_segments;
383
384 /**
385 * RNG to get random SPIs for our side
386 */
387 rng_t *rng;
388
389 /**
390 * SHA1 hasher for IKE_SA_INIT retransmit detection
391 */
392 hasher_t *hasher;
393
394 /**
395 * reuse existing IKE_SAs in checkout_by_config
396 */
397 bool reuse_ikesa;
398 };
399
400 /**
401 * Acquire a lock to access the segment of the table row with the given index.
402 * It also works with the segment index directly.
403 */
404 static inline void lock_single_segment(private_ike_sa_manager_t *this,
405 u_int index)
406 {
407 mutex_t *lock = this->segments[index & this->segment_mask].mutex;
408 lock->lock(lock);
409 }
410
411 /**
412 * Release the lock required to access the segment of the table row with the given index.
413 * It also works with the segment index directly.
414 */
415 static inline void unlock_single_segment(private_ike_sa_manager_t *this,
416 u_int index)
417 {
418 mutex_t *lock = this->segments[index & this->segment_mask].mutex;
419 lock->unlock(lock);
420 }
421
422 /**
423 * Lock all segments
424 */
425 static void lock_all_segments(private_ike_sa_manager_t *this)
426 {
427 u_int i;
428
429 for (i = 0; i < this->segment_count; i++)
430 {
431 this->segments[i].mutex->lock(this->segments[i].mutex);
432 }
433 }
434
435 /**
436 * Unlock all segments
437 */
438 static void unlock_all_segments(private_ike_sa_manager_t *this)
439 {
440 u_int i;
441
442 for (i = 0; i < this->segment_count; i++)
443 {
444 this->segments[i].mutex->unlock(this->segments[i].mutex);
445 }
446 }
447
448 typedef struct private_enumerator_t private_enumerator_t;
449
450 /**
451 * hash table enumerator implementation
452 */
453 struct private_enumerator_t {
454
455 /**
456 * implements enumerator interface
457 */
458 enumerator_t enumerator;
459
460 /**
461 * associated ike_sa_manager_t
462 */
463 private_ike_sa_manager_t *manager;
464
465 /**
466 * current segment index
467 */
468 u_int segment;
469
470 /**
471 * currently enumerating entry
472 */
473 entry_t *entry;
474
475 /**
476 * current table row index
477 */
478 u_int row;
479
480 /**
481 * current table item
482 */
483 table_item_t *current;
484
485 /**
486 * previous table item
487 */
488 table_item_t *prev;
489 };
490
491 METHOD(enumerator_t, enumerate, bool,
492 private_enumerator_t *this, entry_t **entry, u_int *segment)
493 {
494 if (this->entry)
495 {
496 this->entry->condvar->signal(this->entry->condvar);
497 this->entry = NULL;
498 }
499 while (this->segment < this->manager->segment_count)
500 {
501 while (this->row < this->manager->table_size)
502 {
503 this->prev = this->current;
504 if (this->current)
505 {
506 this->current = this->current->next;
507 }
508 else
509 {
510 lock_single_segment(this->manager, this->segment);
511 this->current = this->manager->ike_sa_table[this->row];
512 }
513 if (this->current)
514 {
515 *entry = this->entry = this->current->value;
516 *segment = this->segment;
517 return TRUE;
518 }
519 unlock_single_segment(this->manager, this->segment);
520 this->row += this->manager->segment_count;
521 }
522 this->segment++;
523 this->row = this->segment;
524 }
525 return FALSE;
526 }
527
528 METHOD(enumerator_t, enumerator_destroy, void,
529 private_enumerator_t *this)
530 {
531 if (this->entry)
532 {
533 this->entry->condvar->signal(this->entry->condvar);
534 }
535 if (this->current)
536 {
537 unlock_single_segment(this->manager, this->segment);
538 }
539 free(this);
540 }
541
542 /**
543 * Creates an enumerator to enumerate the entries in the hash table.
544 */
545 static enumerator_t* create_table_enumerator(private_ike_sa_manager_t *this)
546 {
547 private_enumerator_t *enumerator;
548
549 INIT(enumerator,
550 .enumerator = {
551 .enumerate = (void*)_enumerate,
552 .destroy = _enumerator_destroy,
553 },
554 .manager = this,
555 );
556 return &enumerator->enumerator;
557 }
558
559 /**
560 * Put an entry into the hash table.
561 * Note: The caller has to unlock the returned segment.
562 */
563 static u_int put_entry(private_ike_sa_manager_t *this, entry_t *entry)
564 {
565 table_item_t *current, *item;
566 u_int row, segment;
567
568 INIT(item,
569 .value = entry,
570 );
571
572 row = ike_sa_id_hash(entry->ike_sa_id) & this->table_mask;
573 segment = row & this->segment_mask;
574
575 lock_single_segment(this, segment);
576 current = this->ike_sa_table[row];
577 if (current)
578 { /* insert at the front of current bucket */
579 item->next = current;
580 }
581 this->ike_sa_table[row] = item;
582 this->segments[segment].count++;
583 return segment;
584 }
585
586 /**
587 * Remove an entry from the hash table.
588 * Note: The caller MUST have a lock on the segment of this entry.
589 */
590 static void remove_entry(private_ike_sa_manager_t *this, entry_t *entry)
591 {
592 table_item_t *item, *prev = NULL;
593 u_int row, segment;
594
595 row = ike_sa_id_hash(entry->ike_sa_id) & this->table_mask;
596 segment = row & this->segment_mask;
597 item = this->ike_sa_table[row];
598 while (item)
599 {
600 if (item->value == entry)
601 {
602 if (prev)
603 {
604 prev->next = item->next;
605 }
606 else
607 {
608 this->ike_sa_table[row] = item->next;
609 }
610 this->segments[segment].count--;
611 free(item);
612 break;
613 }
614 prev = item;
615 item = item->next;
616 }
617 }
618
619 /**
620 * Remove the entry at the current enumerator position.
621 */
622 static void remove_entry_at(private_enumerator_t *this)
623 {
624 this->entry = NULL;
625 if (this->current)
626 {
627 table_item_t *current = this->current;
628
629 this->manager->segments[this->segment].count--;
630 this->current = this->prev;
631
632 if (this->prev)
633 {
634 this->prev->next = current->next;
635 }
636 else
637 {
638 this->manager->ike_sa_table[this->row] = current->next;
639 unlock_single_segment(this->manager, this->segment);
640 }
641 free(current);
642 }
643 }
644
645 /**
646 * Find an entry using the provided match function to compare the entries for
647 * equality.
648 */
649 static status_t get_entry_by_match_function(private_ike_sa_manager_t *this,
650 ike_sa_id_t *ike_sa_id, entry_t **entry, u_int *segment,
651 linked_list_match_t match, void *p1, void *p2)
652 {
653 table_item_t *item;
654 u_int row, seg;
655
656 row = ike_sa_id_hash(ike_sa_id) & this->table_mask;
657 seg = row & this->segment_mask;
658
659 lock_single_segment(this, seg);
660 item = this->ike_sa_table[row];
661 while (item)
662 {
663 if (match(item->value, p1, p2))
664 {
665 *entry = item->value;
666 *segment = seg;
667 /* the locked segment has to be unlocked by the caller */
668 return SUCCESS;
669 }
670 item = item->next;
671 }
672 unlock_single_segment(this, seg);
673 return NOT_FOUND;
674 }
675
676 /**
677 * Find an entry by ike_sa_id_t.
678 * Note: On SUCCESS, the caller has to unlock the segment.
679 */
680 static status_t get_entry_by_id(private_ike_sa_manager_t *this,
681 ike_sa_id_t *ike_sa_id, entry_t **entry, u_int *segment)
682 {
683 return get_entry_by_match_function(this, ike_sa_id, entry, segment,
684 (linked_list_match_t)entry_match_by_id, ike_sa_id, NULL);
685 }
686
687 /**
688 * Find an entry by IKE_SA pointer.
689 * Note: On SUCCESS, the caller has to unlock the segment.
690 */
691 static status_t get_entry_by_sa(private_ike_sa_manager_t *this,
692 ike_sa_id_t *ike_sa_id, ike_sa_t *ike_sa, entry_t **entry, u_int *segment)
693 {
694 return get_entry_by_match_function(this, ike_sa_id, entry, segment,
695 (linked_list_match_t)entry_match_by_sa, ike_sa, NULL);
696 }
697
698 /**
699 * Wait until no other thread is using an IKE_SA, return FALSE if entry not
700 * acquirable.
701 */
702 static bool wait_for_entry(private_ike_sa_manager_t *this, entry_t *entry,
703 u_int segment)
704 {
705 if (entry->driveout_new_threads)
706 {
707 /* we are not allowed to get this */
708 return FALSE;
709 }
710 while (entry->checked_out && !entry->driveout_waiting_threads)
711 {
712 /* so wait until we can get it for us.
713 * we register us as waiting. */
714 entry->waiting_threads++;
715 entry->condvar->wait(entry->condvar, this->segments[segment].mutex);
716 entry->waiting_threads--;
717 }
718 /* hm, a deletion request forbids us to get this SA, get next one */
719 if (entry->driveout_waiting_threads)
720 {
721 /* we must signal here, others may be waiting on it, too */
722 entry->condvar->signal(entry->condvar);
723 return FALSE;
724 }
725 return TRUE;
726 }
727
728 /**
729 * Put a half-open SA into the hash table.
730 */
731 static void put_half_open(private_ike_sa_manager_t *this, entry_t *entry)
732 {
733 half_open_t *half_open = NULL;
734 linked_list_t *list;
735 chunk_t addr;
736 u_int row, segment;
737 rwlock_t *lock;
738
739 addr = entry->other->get_address(entry->other);
740 row = chunk_hash(addr) & this->table_mask;
741 segment = row & this->segment_mask;
742 lock = this->half_open_segments[segment].lock;
743 lock->write_lock(lock);
744 list = this->half_open_table[row];
745 if (list)
746 {
747 half_open_t *current;
748
749 if (list->find_first(list, (linked_list_match_t)half_open_match,
750 (void**)&current, &addr) == SUCCESS)
751 {
752 half_open = current;
753 half_open->count++;
754 this->half_open_segments[segment].count++;
755 }
756 }
757 else
758 {
759 list = this->half_open_table[row] = linked_list_create();
760 }
761
762 if (!half_open)
763 {
764 INIT(half_open,
765 .other = chunk_clone(addr),
766 .count = 1,
767 );
768 list->insert_last(list, half_open);
769 this->half_open_segments[segment].count++;
770 }
771 lock->unlock(lock);
772 }
773
774 /**
775 * Remove a half-open SA from the hash table.
776 */
777 static void remove_half_open(private_ike_sa_manager_t *this, entry_t *entry)
778 {
779 linked_list_t *list;
780 chunk_t addr;
781 u_int row, segment;
782 rwlock_t *lock;
783
784 addr = entry->other->get_address(entry->other);
785 row = chunk_hash(addr) & this->table_mask;
786 segment = row & this->segment_mask;
787 lock = this->half_open_segments[segment].lock;
788 lock->write_lock(lock);
789 list = this->half_open_table[row];
790 if (list)
791 {
792 half_open_t *current;
793 enumerator_t *enumerator;
794
795 enumerator = list->create_enumerator(list);
796 while (enumerator->enumerate(enumerator, &current))
797 {
798 if (half_open_match(current, &addr))
799 {
800 if (--current->count == 0)
801 {
802 list->remove_at(list, enumerator);
803 half_open_destroy(current);
804 }
805 this->half_open_segments[segment].count--;
806 break;
807 }
808 }
809 enumerator->destroy(enumerator);
810 }
811 lock->unlock(lock);
812 }
813
814 /**
815 * Put an SA between two peers into the hash table.
816 */
817 static void put_connected_peers(private_ike_sa_manager_t *this, entry_t *entry)
818 {
819 connected_peers_t *connected_peers = NULL;
820 chunk_t my_id, other_id;
821 linked_list_t *list;
822 u_int row, segment;
823 rwlock_t *lock;
824
825 my_id = entry->my_id->get_encoding(entry->my_id);
826 other_id = entry->other_id->get_encoding(entry->other_id);
827 row = chunk_hash_inc(other_id, chunk_hash(my_id)) & this->table_mask;
828 segment = row & this->segment_mask;
829 lock = this->connected_peers_segments[segment].lock;
830 lock->write_lock(lock);
831 list = this->connected_peers_table[row];
832 if (list)
833 {
834 connected_peers_t *current;
835
836 if (list->find_first(list, (linked_list_match_t)connected_peers_match,
837 (void**)&current, entry->my_id, entry->other_id,
838 (uintptr_t)entry->other->get_family(entry->other)) == SUCCESS)
839 {
840 connected_peers = current;
841 if (connected_peers->sas->find_first(connected_peers->sas,
842 (linked_list_match_t)entry->ike_sa_id->equals,
843 NULL, entry->ike_sa_id) == SUCCESS)
844 {
845 lock->unlock(lock);
846 return;
847 }
848 }
849 }
850 else
851 {
852 list = this->connected_peers_table[row] = linked_list_create();
853 }
854
855 if (!connected_peers)
856 {
857 INIT(connected_peers,
858 .my_id = entry->my_id->clone(entry->my_id),
859 .other_id = entry->other_id->clone(entry->other_id),
860 .family = entry->other->get_family(entry->other),
861 .sas = linked_list_create(),
862 );
863 list->insert_last(list, connected_peers);
864 }
865 connected_peers->sas->insert_last(connected_peers->sas,
866 entry->ike_sa_id->clone(entry->ike_sa_id));
867 this->connected_peers_segments[segment].count++;
868 lock->unlock(lock);
869 }
870
871 /**
872 * Remove an SA between two peers from the hash table.
873 */
874 static void remove_connected_peers(private_ike_sa_manager_t *this, entry_t *entry)
875 {
876 chunk_t my_id, other_id;
877 linked_list_t *list;
878 u_int row, segment;
879 rwlock_t *lock;
880
881 my_id = entry->my_id->get_encoding(entry->my_id);
882 other_id = entry->other_id->get_encoding(entry->other_id);
883 row = chunk_hash_inc(other_id, chunk_hash(my_id)) & this->table_mask;
884 segment = row & this->segment_mask;
885
886 lock = this->connected_peers_segments[segment].lock;
887 lock->write_lock(lock);
888 list = this->connected_peers_table[row];
889 if (list)
890 {
891 connected_peers_t *current;
892 enumerator_t *enumerator;
893
894 enumerator = list->create_enumerator(list);
895 while (enumerator->enumerate(enumerator, &current))
896 {
897 if (connected_peers_match(current, entry->my_id, entry->other_id,
898 (uintptr_t)entry->other->get_family(entry->other)))
899 {
900 ike_sa_id_t *ike_sa_id;
901 enumerator_t *inner;
902
903 inner = current->sas->create_enumerator(current->sas);
904 while (inner->enumerate(inner, &ike_sa_id))
905 {
906 if (ike_sa_id->equals(ike_sa_id, entry->ike_sa_id))
907 {
908 current->sas->remove_at(current->sas, inner);
909 ike_sa_id->destroy(ike_sa_id);
910 this->connected_peers_segments[segment].count--;
911 break;
912 }
913 }
914 inner->destroy(inner);
915 if (current->sas->get_count(current->sas) == 0)
916 {
917 list->remove_at(list, enumerator);
918 connected_peers_destroy(current);
919 }
920 break;
921 }
922 }
923 enumerator->destroy(enumerator);
924 }
925 lock->unlock(lock);
926 }
927
928 /**
929 * Check if we already have created an IKE_SA based on the initial IKE message
930 * with the given hash.
931 * If not the hash is stored, the hash data is not(!) cloned.
932 *
933 * @returns TRUE if the message with the given hash was seen before
934 */
935 static bool check_and_put_init_hash(private_ike_sa_manager_t *this,
936 chunk_t init_hash)
937 {
938 chunk_t *clone;
939 linked_list_t *list;
940 u_int row, segment;
941 mutex_t *mutex;
942 chunk_t *chunk;
943
944 row = chunk_hash(init_hash) & this->table_mask;
945 segment = row & this->segment_mask;
946 mutex = this->init_hashes_segments[segment].mutex;
947 mutex->lock(mutex);
948 list = this->init_hashes_table[row];
949 if (list)
950 {
951 chunk_t *current;
952
953 if (list->find_first(list, (linked_list_match_t)chunk_equals_ptr,
954 (void**)&current, &init_hash) == SUCCESS)
955 {
956 mutex->unlock(mutex);
957 return TRUE;
958 }
959 }
960 else
961 {
962 list = this->init_hashes_table[row] = linked_list_create();
963 }
964
965 INIT(chunk,
966 .len = init_hash.len,
967 .ptr = init_hash.ptr,
968 );
969 list->insert_last(list, chunk);
970 mutex->unlock(mutex);
971 return FALSE;
972 }
973
974 /**
975 * Remove the hash of an initial IKE message from the cache.
976 */
977 static void remove_init_hash(private_ike_sa_manager_t *this, chunk_t init_hash)
978 {
979 linked_list_t *list;
980 u_int row, segment;
981 mutex_t *mutex;
982
983 row = chunk_hash(init_hash) & this->table_mask;
984 segment = row & this->segment_mask;
985 mutex = this->init_hashes_segments[segment].mutex;
986 mutex->lock(mutex);
987 list = this->init_hashes_table[row];
988 if (list)
989 {
990 enumerator_t *enumerator;
991 chunk_t *current;
992
993 enumerator = list->create_enumerator(list);
994 while (enumerator->enumerate(enumerator, &current))
995 {
996 if (chunk_equals_ptr(current, &init_hash))
997 {
998 list->remove_at(list, enumerator);
999 free(current);
1000 break;
1001 }
1002 }
1003 enumerator->destroy(enumerator);
1004 }
1005 mutex->unlock(mutex);
1006 }
1007
1008 /**
1009 * Get a random SPI for new IKE_SAs
1010 */
1011 static u_int64_t get_spi(private_ike_sa_manager_t *this)
1012 {
1013 u_int64_t spi = 0;
1014
1015 if (this->rng)
1016 {
1017 this->rng->get_bytes(this->rng, sizeof(spi), (u_int8_t*)&spi);
1018 }
1019 return spi;
1020 }
1021
1022 METHOD(ike_sa_manager_t, checkout, ike_sa_t*,
1023 private_ike_sa_manager_t *this, ike_sa_id_t *ike_sa_id)
1024 {
1025 ike_sa_t *ike_sa = NULL;
1026 entry_t *entry;
1027 u_int segment;
1028
1029 DBG2(DBG_MGR, "checkout IKE_SA");
1030
1031 if (get_entry_by_id(this, ike_sa_id, &entry, &segment) == SUCCESS)
1032 {
1033 if (wait_for_entry(this, entry, segment))
1034 {
1035 entry->checked_out = TRUE;
1036 ike_sa = entry->ike_sa;
1037 DBG2(DBG_MGR, "IKE_SA %s[%u] successfully checked out",
1038 ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1039 }
1040 unlock_single_segment(this, segment);
1041 }
1042 charon->bus->set_sa(charon->bus, ike_sa);
1043 return ike_sa;
1044 }
1045
1046 METHOD(ike_sa_manager_t, checkout_new, ike_sa_t*,
1047 private_ike_sa_manager_t* this, ike_version_t version, bool initiator)
1048 {
1049 ike_sa_id_t *ike_sa_id;
1050 ike_sa_t *ike_sa;
1051 u_int8_t ike_version;
1052
1053 ike_version = version == IKEV1 ? IKEV1_MAJOR_VERSION : IKEV2_MAJOR_VERSION;
1054
1055 if (initiator)
1056 {
1057 ike_sa_id = ike_sa_id_create(ike_version, get_spi(this), 0, TRUE);
1058 }
1059 else
1060 {
1061 ike_sa_id = ike_sa_id_create(ike_version, 0, get_spi(this), FALSE);
1062 }
1063 ike_sa = ike_sa_create(ike_sa_id, initiator, version);
1064 ike_sa_id->destroy(ike_sa_id);
1065
1066 if (ike_sa)
1067 {
1068 DBG2(DBG_MGR, "created IKE_SA %s[%u]", ike_sa->get_name(ike_sa),
1069 ike_sa->get_unique_id(ike_sa));
1070 }
1071 return ike_sa;
1072 }
1073
1074 METHOD(ike_sa_manager_t, checkout_by_message, ike_sa_t*,
1075 private_ike_sa_manager_t* this, message_t *message)
1076 {
1077 u_int segment;
1078 entry_t *entry;
1079 ike_sa_t *ike_sa = NULL;
1080 ike_sa_id_t *id;
1081 ike_version_t ike_version;
1082 bool is_init = FALSE;
1083
1084 id = message->get_ike_sa_id(message);
1085 /* clone the IKE_SA ID so we can modify the initiator flag */
1086 id = id->clone(id);
1087 id->switch_initiator(id);
1088
1089 DBG2(DBG_MGR, "checkout IKE_SA by message");
1090
1091 if (id->get_responder_spi(id) == 0)
1092 {
1093 if (message->get_major_version(message) == IKEV2_MAJOR_VERSION)
1094 {
1095 if (message->get_exchange_type(message) == IKE_SA_INIT &&
1096 message->get_request(message))
1097 {
1098 ike_version = IKEV2;
1099 is_init = TRUE;
1100 }
1101 }
1102 else
1103 {
1104 if (message->get_exchange_type(message) == ID_PROT ||
1105 message->get_exchange_type(message) == AGGRESSIVE)
1106 {
1107 ike_version = IKEV1;
1108 is_init = TRUE;
1109 if (id->is_initiator(id))
1110 { /* not set in IKEv1, switch back before applying to new SA */
1111 id->switch_initiator(id);
1112 }
1113 }
1114 }
1115 }
1116
1117 if (is_init && this->hasher)
1118 { /* initial request. checking for the hasher prevents crashes once
1119 * flush() has been called */
1120 chunk_t hash;
1121
1122 this->hasher->allocate_hash(this->hasher,
1123 message->get_packet_data(message), &hash);
1124
1125 /* ensure this is not a retransmit of an already handled init message */
1126 if (check_and_put_init_hash(this, hash))
1127 {
1128 chunk_free(&hash);
1129 id->destroy(id);
1130 DBG1(DBG_MGR, "ignoring %s, already processing",
1131 ike_version == IKEV2 ? "IKE_SA_INIT" : "initial IKE message");
1132 return NULL;
1133 }
1134
1135 /* no IKE_SA yet, create a new one */
1136 id->set_responder_spi(id, get_spi(this));
1137 ike_sa = ike_sa_create(id, FALSE, ike_version);
1138 if (ike_sa)
1139 {
1140 entry = entry_create();
1141 entry->ike_sa = ike_sa;
1142 entry->ike_sa_id = id->clone(id);
1143
1144 segment = put_entry(this, entry);
1145 entry->checked_out = TRUE;
1146 unlock_single_segment(this, segment);
1147
1148 entry->message_id = message->get_message_id(message);
1149 entry->init_hash = hash;
1150
1151 DBG2(DBG_MGR, "created IKE_SA %s[%u]",
1152 ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1153 }
1154 else
1155 {
1156 remove_init_hash(this, hash);
1157 chunk_free(&hash);
1158 DBG1(DBG_MGR, "ignoring message, no such IKE_SA");
1159 }
1160 id->destroy(id);
1161 charon->bus->set_sa(charon->bus, ike_sa);
1162 return ike_sa;
1163 }
1164
1165 if (get_entry_by_id(this, id, &entry, &segment) == SUCCESS)
1166 {
1167 /* only check out in IKEv2 if we are not already processing it */
1168 if (message->get_request(message) &&
1169 message->get_message_id(message) == entry->message_id)
1170 {
1171 DBG1(DBG_MGR, "ignoring request with ID %u, already processing",
1172 entry->message_id);
1173 }
1174 else if (wait_for_entry(this, entry, segment))
1175 {
1176 ike_sa_id_t *ike_id;
1177
1178 ike_id = entry->ike_sa->get_id(entry->ike_sa);
1179 entry->checked_out = TRUE;
1180 entry->message_id = message->get_message_id(message);
1181 if (ike_id->get_responder_spi(ike_id) == 0)
1182 {
1183 ike_id->set_responder_spi(ike_id, id->get_responder_spi(id));
1184 }
1185 ike_sa = entry->ike_sa;
1186 DBG2(DBG_MGR, "IKE_SA %s[%u] successfully checked out",
1187 ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1188 }
1189 unlock_single_segment(this, segment);
1190 }
1191 id->destroy(id);
1192 charon->bus->set_sa(charon->bus, ike_sa);
1193 return ike_sa;
1194 }
1195
1196 METHOD(ike_sa_manager_t, checkout_by_config, ike_sa_t*,
1197 private_ike_sa_manager_t *this, peer_cfg_t *peer_cfg)
1198 {
1199 enumerator_t *enumerator;
1200 entry_t *entry;
1201 ike_sa_t *ike_sa = NULL;
1202 peer_cfg_t *current_peer;
1203 ike_cfg_t *current_ike;
1204 u_int segment;
1205
1206 DBG2(DBG_MGR, "checkout IKE_SA by config");
1207
1208 if (!this->reuse_ikesa)
1209 { /* IKE_SA reuse disable by config */
1210 ike_sa = checkout_new(this, peer_cfg->get_ike_version(peer_cfg), TRUE);
1211 charon->bus->set_sa(charon->bus, ike_sa);
1212 return ike_sa;
1213 }
1214
1215 enumerator = create_table_enumerator(this);
1216 while (enumerator->enumerate(enumerator, &entry, &segment))
1217 {
1218 if (!wait_for_entry(this, entry, segment))
1219 {
1220 continue;
1221 }
1222 if (entry->ike_sa->get_state(entry->ike_sa) == IKE_DELETING)
1223 { /* skip IKE_SAs which are not usable */
1224 continue;
1225 }
1226
1227 current_peer = entry->ike_sa->get_peer_cfg(entry->ike_sa);
1228 if (current_peer && current_peer->equals(current_peer, peer_cfg))
1229 {
1230 current_ike = current_peer->get_ike_cfg(current_peer);
1231 if (current_ike->equals(current_ike, peer_cfg->get_ike_cfg(peer_cfg)))
1232 {
1233 entry->checked_out = TRUE;
1234 ike_sa = entry->ike_sa;
1235 DBG2(DBG_MGR, "found existing IKE_SA %u with a '%s' config",
1236 ike_sa->get_unique_id(ike_sa),
1237 current_peer->get_name(current_peer));
1238 break;
1239 }
1240 }
1241 }
1242 enumerator->destroy(enumerator);
1243
1244 if (!ike_sa)
1245 { /* no IKE_SA using such a config, hand out a new */
1246 ike_sa = checkout_new(this, peer_cfg->get_ike_version(peer_cfg), TRUE);
1247 }
1248 charon->bus->set_sa(charon->bus, ike_sa);
1249 return ike_sa;
1250 }
1251
1252 METHOD(ike_sa_manager_t, checkout_by_id, ike_sa_t*,
1253 private_ike_sa_manager_t *this, u_int32_t id, bool child)
1254 {
1255 enumerator_t *enumerator, *children;
1256 entry_t *entry;
1257 ike_sa_t *ike_sa = NULL;
1258 child_sa_t *child_sa;
1259 u_int segment;
1260
1261 DBG2(DBG_MGR, "checkout IKE_SA by ID");
1262
1263 enumerator = create_table_enumerator(this);
1264 while (enumerator->enumerate(enumerator, &entry, &segment))
1265 {
1266 if (wait_for_entry(this, entry, segment))
1267 {
1268 /* look for a child with such a reqid ... */
1269 if (child)
1270 {
1271 children = entry->ike_sa->create_child_sa_enumerator(entry->ike_sa);
1272 while (children->enumerate(children, (void**)&child_sa))
1273 {
1274 if (child_sa->get_reqid(child_sa) == id)
1275 {
1276 ike_sa = entry->ike_sa;
1277 break;
1278 }
1279 }
1280 children->destroy(children);
1281 }
1282 else /* ... or for a IKE_SA with such a unique id */
1283 {
1284 if (entry->ike_sa->get_unique_id(entry->ike_sa) == id)
1285 {
1286 ike_sa = entry->ike_sa;
1287 }
1288 }
1289 /* got one, return */
1290 if (ike_sa)
1291 {
1292 entry->checked_out = TRUE;
1293 DBG2(DBG_MGR, "IKE_SA %s[%u] successfully checked out",
1294 ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1295 break;
1296 }
1297 }
1298 }
1299 enumerator->destroy(enumerator);
1300
1301 charon->bus->set_sa(charon->bus, ike_sa);
1302 return ike_sa;
1303 }
1304
1305 METHOD(ike_sa_manager_t, checkout_by_name, ike_sa_t*,
1306 private_ike_sa_manager_t *this, char *name, bool child)
1307 {
1308 enumerator_t *enumerator, *children;
1309 entry_t *entry;
1310 ike_sa_t *ike_sa = NULL;
1311 child_sa_t *child_sa;
1312 u_int segment;
1313
1314 enumerator = create_table_enumerator(this);
1315 while (enumerator->enumerate(enumerator, &entry, &segment))
1316 {
1317 if (wait_for_entry(this, entry, segment))
1318 {
1319 /* look for a child with such a policy name ... */
1320 if (child)
1321 {
1322 children = entry->ike_sa->create_child_sa_enumerator(entry->ike_sa);
1323 while (children->enumerate(children, (void**)&child_sa))
1324 {
1325 if (streq(child_sa->get_name(child_sa), name))
1326 {
1327 ike_sa = entry->ike_sa;
1328 break;
1329 }
1330 }
1331 children->destroy(children);
1332 }
1333 else /* ... or for a IKE_SA with such a connection name */
1334 {
1335 if (streq(entry->ike_sa->get_name(entry->ike_sa), name))
1336 {
1337 ike_sa = entry->ike_sa;
1338 }
1339 }
1340 /* got one, return */
1341 if (ike_sa)
1342 {
1343 entry->checked_out = TRUE;
1344 DBG2(DBG_MGR, "IKE_SA %s[%u] successfully checked out",
1345 ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1346 break;
1347 }
1348 }
1349 }
1350 enumerator->destroy(enumerator);
1351
1352 charon->bus->set_sa(charon->bus, ike_sa);
1353 return ike_sa;
1354 }
1355
1356 /**
1357 * enumerator filter function, waiting variant
1358 */
1359 static bool enumerator_filter_wait(private_ike_sa_manager_t *this,
1360 entry_t **in, ike_sa_t **out, u_int *segment)
1361 {
1362 if (wait_for_entry(this, *in, *segment))
1363 {
1364 *out = (*in)->ike_sa;
1365 charon->bus->set_sa(charon->bus, *out);
1366 return TRUE;
1367 }
1368 return FALSE;
1369 }
1370
1371 /**
1372 * enumerator filter function, skipping variant
1373 */
1374 static bool enumerator_filter_skip(private_ike_sa_manager_t *this,
1375 entry_t **in, ike_sa_t **out, u_int *segment)
1376 {
1377 if (!(*in)->driveout_new_threads &&
1378 !(*in)->driveout_waiting_threads &&
1379 !(*in)->checked_out)
1380 {
1381 *out = (*in)->ike_sa;
1382 charon->bus->set_sa(charon->bus, *out);
1383 return TRUE;
1384 }
1385 return FALSE;
1386 }
1387
1388 /**
1389 * Reset threads SA after enumeration
1390 */
1391 static void reset_sa(void *data)
1392 {
1393 charon->bus->set_sa(charon->bus, NULL);
1394 }
1395
1396 METHOD(ike_sa_manager_t, create_enumerator, enumerator_t*,
1397 private_ike_sa_manager_t* this, bool wait)
1398 {
1399 return enumerator_create_filter(create_table_enumerator(this),
1400 wait ? (void*)enumerator_filter_wait : (void*)enumerator_filter_skip,
1401 this, reset_sa);
1402 }
1403
1404 METHOD(ike_sa_manager_t, checkin, void,
1405 private_ike_sa_manager_t *this, ike_sa_t *ike_sa)
1406 {
1407 /* to check the SA back in, we look for the pointer of the ike_sa
1408 * in all entries.
1409 * The lookup is done by initiator SPI, so even if the SPI has changed (e.g.
1410 * on reception of a IKE_SA_INIT response) the lookup will work but
1411 * updating of the SPI MAY be necessary...
1412 */
1413 entry_t *entry;
1414 ike_sa_id_t *ike_sa_id;
1415 host_t *other;
1416 identification_t *my_id, *other_id;
1417 u_int segment;
1418
1419 ike_sa_id = ike_sa->get_id(ike_sa);
1420 my_id = ike_sa->get_my_id(ike_sa);
1421 other_id = ike_sa->get_other_id(ike_sa);
1422 other = ike_sa->get_other_host(ike_sa);
1423
1424 DBG2(DBG_MGR, "checkin IKE_SA %s[%u]", ike_sa->get_name(ike_sa),
1425 ike_sa->get_unique_id(ike_sa));
1426
1427 /* look for the entry */
1428 if (get_entry_by_sa(this, ike_sa_id, ike_sa, &entry, &segment) == SUCCESS)
1429 {
1430 /* ike_sa_id must be updated */
1431 entry->ike_sa_id->replace_values(entry->ike_sa_id, ike_sa->get_id(ike_sa));
1432 /* signal waiting threads */
1433 entry->checked_out = FALSE;
1434 entry->message_id = -1;
1435 /* check if this SA is half-open */
1436 if (entry->half_open && ike_sa->get_state(ike_sa) != IKE_CONNECTING)
1437 {
1438 /* not half open anymore */
1439 entry->half_open = FALSE;
1440 remove_half_open(this, entry);
1441 }
1442 else if (entry->half_open && !other->ip_equals(other, entry->other))
1443 {
1444 /* the other host's IP has changed, we must update the hash table */
1445 remove_half_open(this, entry);
1446 DESTROY_IF(entry->other);
1447 entry->other = other->clone(other);
1448 put_half_open(this, entry);
1449 }
1450 else if (!entry->half_open &&
1451 !entry->ike_sa_id->is_initiator(entry->ike_sa_id) &&
1452 ike_sa->get_state(ike_sa) == IKE_CONNECTING)
1453 {
1454 /* this is a new half-open SA */
1455 entry->half_open = TRUE;
1456 entry->other = other->clone(other);
1457 put_half_open(this, entry);
1458 }
1459 DBG2(DBG_MGR, "check-in of IKE_SA successful.");
1460 entry->condvar->signal(entry->condvar);
1461 }
1462 else
1463 {
1464 entry = entry_create();
1465 entry->ike_sa_id = ike_sa_id->clone(ike_sa_id);
1466 entry->ike_sa = ike_sa;
1467 segment = put_entry(this, entry);
1468 }
1469
1470 /* apply identities for duplicate test */
1471 if ((ike_sa->get_state(ike_sa) == IKE_ESTABLISHED ||
1472 ike_sa->get_state(ike_sa) == IKE_PASSIVE) &&
1473 entry->my_id == NULL && entry->other_id == NULL)
1474 {
1475 if (ike_sa->get_version(ike_sa) == IKEV1)
1476 {
1477 /* If authenticated and received INITIAL_CONTACT,
1478 * delete any existing IKE_SAs with that peer. */
1479 if (ike_sa->has_condition(ike_sa, COND_INIT_CONTACT_SEEN))
1480 {
1481 this->public.check_uniqueness(&this->public, ike_sa, TRUE);
1482 ike_sa->set_condition(ike_sa, COND_INIT_CONTACT_SEEN, FALSE);
1483 }
1484 }
1485
1486 entry->my_id = my_id->clone(my_id);
1487 entry->other_id = other_id->clone(other_id);
1488 if (!entry->other)
1489 {
1490 entry->other = other->clone(other);
1491 }
1492 put_connected_peers(this, entry);
1493 }
1494
1495 unlock_single_segment(this, segment);
1496
1497 charon->bus->set_sa(charon->bus, NULL);
1498 }
1499
1500 METHOD(ike_sa_manager_t, checkin_and_destroy, void,
1501 private_ike_sa_manager_t *this, ike_sa_t *ike_sa)
1502 {
1503 /* deletion is a bit complex, we must ensure that no thread is waiting for
1504 * this SA.
1505 * We take this SA from the table, and start signaling while threads
1506 * are in the condvar.
1507 */
1508 entry_t *entry;
1509 ike_sa_id_t *ike_sa_id;
1510 u_int segment;
1511
1512 ike_sa_id = ike_sa->get_id(ike_sa);
1513
1514 DBG2(DBG_MGR, "checkin and destroy IKE_SA %s[%u]", ike_sa->get_name(ike_sa),
1515 ike_sa->get_unique_id(ike_sa));
1516
1517 if (get_entry_by_sa(this, ike_sa_id, ike_sa, &entry, &segment) == SUCCESS)
1518 {
1519 if (entry->driveout_waiting_threads && entry->driveout_new_threads)
1520 { /* it looks like flush() has been called and the SA is being deleted
1521 * anyway, just check it in */
1522 DBG2(DBG_MGR, "ignored check-in and destroy of IKE_SA during shutdown");
1523 entry->checked_out = FALSE;
1524 entry->condvar->broadcast(entry->condvar);
1525 unlock_single_segment(this, segment);
1526 return;
1527 }
1528
1529 /* drive out waiting threads, as we are in hurry */
1530 entry->driveout_waiting_threads = TRUE;
1531 /* mark it, so no new threads can get this entry */
1532 entry->driveout_new_threads = TRUE;
1533 /* wait until all workers have done their work */
1534 while (entry->waiting_threads)
1535 {
1536 /* wake up all */
1537 entry->condvar->broadcast(entry->condvar);
1538 /* they will wake us again when their work is done */
1539 entry->condvar->wait(entry->condvar, this->segments[segment].mutex);
1540 }
1541 remove_entry(this, entry);
1542 unlock_single_segment(this, segment);
1543
1544 if (entry->half_open)
1545 {
1546 remove_half_open(this, entry);
1547 }
1548 if (entry->my_id && entry->other_id)
1549 {
1550 remove_connected_peers(this, entry);
1551 }
1552 if (entry->init_hash.ptr)
1553 {
1554 remove_init_hash(this, entry->init_hash);
1555 }
1556
1557 entry_destroy(entry);
1558
1559 DBG2(DBG_MGR, "check-in and destroy of IKE_SA successful");
1560 }
1561 else
1562 {
1563 DBG1(DBG_MGR, "tried to check-in and delete nonexisting IKE_SA");
1564 ike_sa->destroy(ike_sa);
1565 }
1566 charon->bus->set_sa(charon->bus, NULL);
1567 }
1568
1569 /**
1570 * Cleanup function for create_id_enumerator
1571 */
1572 static void id_enumerator_cleanup(linked_list_t *ids)
1573 {
1574 ids->destroy_offset(ids, offsetof(ike_sa_id_t, destroy));
1575 }
1576
1577 METHOD(ike_sa_manager_t, create_id_enumerator, enumerator_t*,
1578 private_ike_sa_manager_t *this, identification_t *me,
1579 identification_t *other, int family)
1580 {
1581 linked_list_t *list, *ids = NULL;
1582 connected_peers_t *current;
1583 u_int row, segment;
1584 rwlock_t *lock;
1585
1586 row = chunk_hash_inc(other->get_encoding(other),
1587 chunk_hash(me->get_encoding(me))) & this->table_mask;
1588 segment = row & this->segment_mask;
1589
1590 lock = this->connected_peers_segments[segment & this->segment_mask].lock;
1591 lock->read_lock(lock);
1592 list = this->connected_peers_table[row];
1593 if (list)
1594 {
1595 if (list->find_first(list, (linked_list_match_t)connected_peers_match,
1596 (void**)&current, me, other, (uintptr_t)family) == SUCCESS)
1597 {
1598 ids = current->sas->clone_offset(current->sas,
1599 offsetof(ike_sa_id_t, clone));
1600 }
1601 }
1602 lock->unlock(lock);
1603
1604 if (!ids)
1605 {
1606 return enumerator_create_empty();
1607 }
1608 return enumerator_create_cleaner(ids->create_enumerator(ids),
1609 (void*)id_enumerator_cleanup, ids);
1610 }
1611
1612 METHOD(ike_sa_manager_t, check_uniqueness, bool,
1613 private_ike_sa_manager_t *this, ike_sa_t *ike_sa, bool force_replace)
1614 {
1615 bool cancel = FALSE;
1616 peer_cfg_t *peer_cfg;
1617 unique_policy_t policy;
1618 enumerator_t *enumerator;
1619 ike_sa_id_t *id = NULL;
1620 identification_t *me, *other;
1621 host_t *other_host;
1622
1623 peer_cfg = ike_sa->get_peer_cfg(ike_sa);
1624 policy = peer_cfg->get_unique_policy(peer_cfg);
1625 if (policy == UNIQUE_NO && !force_replace)
1626 {
1627 return FALSE;
1628 }
1629 me = ike_sa->get_my_id(ike_sa);
1630 other = ike_sa->get_other_id(ike_sa);
1631 other_host = ike_sa->get_other_host(ike_sa);
1632
1633 enumerator = create_id_enumerator(this, me, other,
1634 other_host->get_family(other_host));
1635 while (enumerator->enumerate(enumerator, &id))
1636 {
1637 status_t status = SUCCESS;
1638 ike_sa_t *duplicate;
1639
1640 duplicate = checkout(this, id);
1641 if (!duplicate)
1642 {
1643 continue;
1644 }
1645 if (force_replace)
1646 {
1647 DBG1(DBG_IKE, "destroying duplicate IKE_SA for peer '%Y', "
1648 "received INITIAL_CONTACT", other);
1649 checkin_and_destroy(this, duplicate);
1650 continue;
1651 }
1652 peer_cfg = duplicate->get_peer_cfg(duplicate);
1653 if (peer_cfg && peer_cfg->equals(peer_cfg, ike_sa->get_peer_cfg(ike_sa)))
1654 {
1655 switch (duplicate->get_state(duplicate))
1656 {
1657 case IKE_ESTABLISHED:
1658 case IKE_REKEYING:
1659 switch (policy)
1660 {
1661 case UNIQUE_REPLACE:
1662 DBG1(DBG_IKE, "deleting duplicate IKE_SA for peer "
1663 "'%Y' due to uniqueness policy", other);
1664 status = duplicate->delete(duplicate);
1665 break;
1666 case UNIQUE_KEEP:
1667 cancel = TRUE;
1668 /* we keep the first IKE_SA and delete all
1669 * other duplicates that might exist */
1670 policy = UNIQUE_REPLACE;
1671 break;
1672 default:
1673 break;
1674 }
1675 break;
1676 default:
1677 break;
1678 }
1679 }
1680 if (status == DESTROY_ME)
1681 {
1682 checkin_and_destroy(this, duplicate);
1683 }
1684 else
1685 {
1686 checkin(this, duplicate);
1687 }
1688 }
1689 enumerator->destroy(enumerator);
1690 /* reset thread's current IKE_SA after checkin */
1691 charon->bus->set_sa(charon->bus, ike_sa);
1692 return cancel;
1693 }
1694
1695 METHOD(ike_sa_manager_t, has_contact, bool,
1696 private_ike_sa_manager_t *this, identification_t *me,
1697 identification_t *other, int family)
1698 {
1699 linked_list_t *list;
1700 u_int row, segment;
1701 rwlock_t *lock;
1702 bool found = FALSE;
1703
1704 row = chunk_hash_inc(other->get_encoding(other),
1705 chunk_hash(me->get_encoding(me))) & this->table_mask;
1706 segment = row & this->segment_mask;
1707 lock = this->connected_peers_segments[segment & this->segment_mask].lock;
1708 lock->read_lock(lock);
1709 list = this->connected_peers_table[row];
1710 if (list)
1711 {
1712 if (list->find_first(list, (linked_list_match_t)connected_peers_match,
1713 NULL, me, other, family) == SUCCESS)
1714 {
1715 found = TRUE;
1716 }
1717 }
1718 lock->unlock(lock);
1719
1720 return found;
1721 }
1722
1723 METHOD(ike_sa_manager_t, get_count, u_int,
1724 private_ike_sa_manager_t *this)
1725 {
1726 u_int segment, count = 0;
1727 mutex_t *mutex;
1728
1729 for (segment = 0; segment < this->segment_count; segment++)
1730 {
1731 mutex = this->segments[segment & this->segment_mask].mutex;
1732 mutex->lock(mutex);
1733 count += this->segments[segment].count;
1734 mutex->unlock(mutex);
1735 }
1736 return count;
1737 }
1738
1739 METHOD(ike_sa_manager_t, get_half_open_count, u_int,
1740 private_ike_sa_manager_t *this, host_t *ip)
1741 {
1742 linked_list_t *list;
1743 u_int segment, row;
1744 rwlock_t *lock;
1745 chunk_t addr;
1746 u_int count = 0;
1747
1748 if (ip)
1749 {
1750 addr = ip->get_address(ip);
1751 row = chunk_hash(addr) & this->table_mask;
1752 segment = row & this->segment_mask;
1753 lock = this->half_open_segments[segment & this->segment_mask].lock;
1754 lock->read_lock(lock);
1755 if ((list = this->half_open_table[row]) != NULL)
1756 {
1757 half_open_t *current;
1758
1759 if (list->find_first(list, (linked_list_match_t)half_open_match,
1760 (void**)&current, &addr) == SUCCESS)
1761 {
1762 count = current->count;
1763 }
1764 }
1765 lock->unlock(lock);
1766 }
1767 else
1768 {
1769 for (segment = 0; segment < this->segment_count; segment++)
1770 {
1771 lock = this->half_open_segments[segment & this->segment_mask].lock;
1772 lock->read_lock(lock);
1773 count += this->half_open_segments[segment].count;
1774 lock->unlock(lock);
1775 }
1776 }
1777 return count;
1778 }
1779
1780 METHOD(ike_sa_manager_t, flush, void,
1781 private_ike_sa_manager_t *this)
1782 {
1783 /* destroy all list entries */
1784 enumerator_t *enumerator;
1785 entry_t *entry;
1786 u_int segment;
1787
1788 lock_all_segments(this);
1789 DBG2(DBG_MGR, "going to destroy IKE_SA manager and all managed IKE_SA's");
1790 /* Step 1: drive out all waiting threads */
1791 DBG2(DBG_MGR, "set driveout flags for all stored IKE_SA's");
1792 enumerator = create_table_enumerator(this);
1793 while (enumerator->enumerate(enumerator, &entry, &segment))
1794 {
1795 /* do not accept new threads, drive out waiting threads */
1796 entry->driveout_new_threads = TRUE;
1797 entry->driveout_waiting_threads = TRUE;
1798 }
1799 enumerator->destroy(enumerator);
1800 DBG2(DBG_MGR, "wait for all threads to leave IKE_SA's");
1801 /* Step 2: wait until all are gone */
1802 enumerator = create_table_enumerator(this);
1803 while (enumerator->enumerate(enumerator, &entry, &segment))
1804 {
1805 while (entry->waiting_threads || entry->checked_out)
1806 {
1807 /* wake up all */
1808 entry->condvar->broadcast(entry->condvar);
1809 /* go sleeping until they are gone */
1810 entry->condvar->wait(entry->condvar, this->segments[segment].mutex);
1811 }
1812 }
1813 enumerator->destroy(enumerator);
1814 DBG2(DBG_MGR, "delete all IKE_SA's");
1815 /* Step 3: initiate deletion of all IKE_SAs */
1816 enumerator = create_table_enumerator(this);
1817 while (enumerator->enumerate(enumerator, &entry, &segment))
1818 {
1819 charon->bus->set_sa(charon->bus, entry->ike_sa);
1820 if (entry->ike_sa->get_version(entry->ike_sa) == IKEV2)
1821 { /* as the delete never gets processed, fire down events */
1822 switch (entry->ike_sa->get_state(entry->ike_sa))
1823 {
1824 case IKE_ESTABLISHED:
1825 case IKE_REKEYING:
1826 case IKE_DELETING:
1827 charon->bus->ike_updown(charon->bus, entry->ike_sa, FALSE);
1828 break;
1829 default:
1830 break;
1831 }
1832 }
1833 entry->ike_sa->delete(entry->ike_sa);
1834 }
1835 enumerator->destroy(enumerator);
1836
1837 DBG2(DBG_MGR, "destroy all entries");
1838 /* Step 4: destroy all entries */
1839 enumerator = create_table_enumerator(this);
1840 while (enumerator->enumerate(enumerator, &entry, &segment))
1841 {
1842 charon->bus->set_sa(charon->bus, entry->ike_sa);
1843 if (entry->half_open)
1844 {
1845 remove_half_open(this, entry);
1846 }
1847 if (entry->my_id && entry->other_id)
1848 {
1849 remove_connected_peers(this, entry);
1850 }
1851 if (entry->init_hash.ptr)
1852 {
1853 remove_init_hash(this, entry->init_hash);
1854 }
1855 remove_entry_at((private_enumerator_t*)enumerator);
1856 entry_destroy(entry);
1857 }
1858 enumerator->destroy(enumerator);
1859 charon->bus->set_sa(charon->bus, NULL);
1860 unlock_all_segments(this);
1861
1862 this->rng->destroy(this->rng);
1863 this->rng = NULL;
1864 this->hasher->destroy(this->hasher);
1865 this->hasher = NULL;
1866 }
1867
1868 METHOD(ike_sa_manager_t, destroy, void,
1869 private_ike_sa_manager_t *this)
1870 {
1871 u_int i;
1872
1873 for (i = 0; i < this->table_size; i++)
1874 {
1875 DESTROY_IF(this->half_open_table[i]);
1876 DESTROY_IF(this->connected_peers_table[i]);
1877 DESTROY_IF(this->init_hashes_table[i]);
1878 }
1879 free(this->ike_sa_table);
1880 free(this->half_open_table);
1881 free(this->connected_peers_table);
1882 free(this->init_hashes_table);
1883 for (i = 0; i < this->segment_count; i++)
1884 {
1885 this->segments[i].mutex->destroy(this->segments[i].mutex);
1886 this->half_open_segments[i].lock->destroy(this->half_open_segments[i].lock);
1887 this->connected_peers_segments[i].lock->destroy(this->connected_peers_segments[i].lock);
1888 this->init_hashes_segments[i].mutex->destroy(this->init_hashes_segments[i].mutex);
1889 }
1890 free(this->segments);
1891 free(this->half_open_segments);
1892 free(this->connected_peers_segments);
1893 free(this->init_hashes_segments);
1894
1895 free(this);
1896 }
1897
1898 /**
1899 * This function returns the next-highest power of two for the given number.
1900 * The algorithm works by setting all bits on the right-hand side of the most
1901 * significant 1 to 1 and then increments the whole number so it rolls over
1902 * to the nearest power of two. Note: returns 0 for n == 0
1903 */
1904 static u_int get_nearest_powerof2(u_int n)
1905 {
1906 u_int i;
1907
1908 --n;
1909 for (i = 1; i < sizeof(u_int) * 8; i <<= 1)
1910 {
1911 n |= n >> i;
1912 }
1913 return ++n;
1914 }
1915
1916 /*
1917 * Described in header.
1918 */
1919 ike_sa_manager_t *ike_sa_manager_create()
1920 {
1921 private_ike_sa_manager_t *this;
1922 u_int i;
1923
1924 INIT(this,
1925 .public = {
1926 .checkout = _checkout,
1927 .checkout_new = _checkout_new,
1928 .checkout_by_message = _checkout_by_message,
1929 .checkout_by_config = _checkout_by_config,
1930 .checkout_by_id = _checkout_by_id,
1931 .checkout_by_name = _checkout_by_name,
1932 .check_uniqueness = _check_uniqueness,
1933 .has_contact = _has_contact,
1934 .create_enumerator = _create_enumerator,
1935 .create_id_enumerator = _create_id_enumerator,
1936 .checkin = _checkin,
1937 .checkin_and_destroy = _checkin_and_destroy,
1938 .get_count = _get_count,
1939 .get_half_open_count = _get_half_open_count,
1940 .flush = _flush,
1941 .destroy = _destroy,
1942 },
1943 );
1944
1945 this->hasher = lib->crypto->create_hasher(lib->crypto, HASH_PREFERRED);
1946 if (this->hasher == NULL)
1947 {
1948 DBG1(DBG_MGR, "manager initialization failed, no hasher supported");
1949 free(this);
1950 return NULL;
1951 }
1952 this->rng = lib->crypto->create_rng(lib->crypto, RNG_WEAK);
1953 if (this->rng == NULL)
1954 {
1955 DBG1(DBG_MGR, "manager initialization failed, no RNG supported");
1956 this->hasher->destroy(this->hasher);
1957 free(this);
1958 return NULL;
1959 }
1960
1961 this->table_size = get_nearest_powerof2(lib->settings->get_int(lib->settings,
1962 "charon.ikesa_table_size", DEFAULT_HASHTABLE_SIZE));
1963 this->table_size = max(1, min(this->table_size, MAX_HASHTABLE_SIZE));
1964 this->table_mask = this->table_size - 1;
1965
1966 this->segment_count = get_nearest_powerof2(lib->settings->get_int(lib->settings,
1967 "charon.ikesa_table_segments", DEFAULT_SEGMENT_COUNT));
1968 this->segment_count = max(1, min(this->segment_count, this->table_size));
1969 this->segment_mask = this->segment_count - 1;
1970
1971 this->ike_sa_table = calloc(this->table_size, sizeof(table_item_t*));
1972 this->segments = (segment_t*)calloc(this->segment_count, sizeof(segment_t));
1973 for (i = 0; i < this->segment_count; i++)
1974 {
1975 this->segments[i].mutex = mutex_create(MUTEX_TYPE_RECURSIVE);
1976 this->segments[i].count = 0;
1977 }
1978
1979 /* we use the same table parameters for the table to track half-open SAs */
1980 this->half_open_table = calloc(this->table_size, sizeof(linked_list_t*));
1981 this->half_open_segments = calloc(this->segment_count, sizeof(shareable_segment_t));
1982 for (i = 0; i < this->segment_count; i++)
1983 {
1984 this->half_open_segments[i].lock = rwlock_create(RWLOCK_TYPE_DEFAULT);
1985 this->half_open_segments[i].count = 0;
1986 }
1987
1988 /* also for the hash table used for duplicate tests */
1989 this->connected_peers_table = calloc(this->table_size, sizeof(linked_list_t*));
1990 this->connected_peers_segments = calloc(this->segment_count, sizeof(shareable_segment_t));
1991 for (i = 0; i < this->segment_count; i++)
1992 {
1993 this->connected_peers_segments[i].lock = rwlock_create(RWLOCK_TYPE_DEFAULT);
1994 this->connected_peers_segments[i].count = 0;
1995 }
1996
1997 /* and again for the table of hashes of seen initial IKE messages */
1998 this->init_hashes_table = calloc(this->table_size, sizeof(linked_list_t*));
1999 this->init_hashes_segments = calloc(this->segment_count, sizeof(segment_t));
2000 for (i = 0; i < this->segment_count; i++)
2001 {
2002 this->init_hashes_segments[i].mutex = mutex_create(MUTEX_TYPE_RECURSIVE);
2003 this->init_hashes_segments[i].count = 0;
2004 }
2005
2006 this->reuse_ikesa = lib->settings->get_bool(lib->settings,
2007 "charon.reuse_ikesa", TRUE);
2008 return &this->public;
2009 }