When detecting a duplicate IKEv1 SA, adopt children, as it might be a rekeying
[strongswan.git] / src / libcharon / sa / ike_sa_manager.c
1 /*
2 * Copyright (C) 2005-2011 Martin Willi
3 * Copyright (C) 2011 revosec AG
4 * Copyright (C) 2008-2012 Tobias Brunner
5 * Copyright (C) 2005 Jan Hutter
6 * Hochschule fuer Technik Rapperswil
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version. See <http://www.fsf.org/copyleft/gpl.txt>.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 * for more details.
17 */
18
19 #include <string.h>
20
21 #include "ike_sa_manager.h"
22
23 #include <daemon.h>
24 #include <sa/ike_sa_id.h>
25 #include <bus/bus.h>
26 #include <threading/condvar.h>
27 #include <threading/mutex.h>
28 #include <threading/rwlock.h>
29 #include <collections/linked_list.h>
30 #include <crypto/hashers/hasher.h>
31
32 /* the default size of the hash table (MUST be a power of 2) */
33 #define DEFAULT_HASHTABLE_SIZE 1
34
35 /* the maximum size of the hash table (MUST be a power of 2) */
36 #define MAX_HASHTABLE_SIZE (1 << 30)
37
38 /* the default number of segments (MUST be a power of 2) */
39 #define DEFAULT_SEGMENT_COUNT 1
40
41 typedef struct entry_t entry_t;
42
43 /**
44 * An entry in the linked list, contains IKE_SA, locking and lookup data.
45 */
46 struct entry_t {
47
48 /**
49 * Number of threads waiting for this ike_sa_t object.
50 */
51 int waiting_threads;
52
53 /**
54 * Condvar where threads can wait until ike_sa_t object is free for use again.
55 */
56 condvar_t *condvar;
57
58 /**
59 * Is this ike_sa currently checked out?
60 */
61 bool checked_out;
62
63 /**
64 * Does this SA drives out new threads?
65 */
66 bool driveout_new_threads;
67
68 /**
69 * Does this SA drives out waiting threads?
70 */
71 bool driveout_waiting_threads;
72
73 /**
74 * Identification of an IKE_SA (SPIs).
75 */
76 ike_sa_id_t *ike_sa_id;
77
78 /**
79 * The contained ike_sa_t object.
80 */
81 ike_sa_t *ike_sa;
82
83 /**
84 * hash of the IKE_SA_INIT message, used to detect retransmissions
85 */
86 chunk_t init_hash;
87
88 /**
89 * remote host address, required for DoS detection and duplicate
90 * checking (host with same my_id and other_id is *not* considered
91 * a duplicate if the address family differs)
92 */
93 host_t *other;
94
95 /**
96 * As responder: Is this SA half-open?
97 */
98 bool half_open;
99
100 /**
101 * own identity, required for duplicate checking
102 */
103 identification_t *my_id;
104
105 /**
106 * remote identity, required for duplicate checking
107 */
108 identification_t *other_id;
109
110 /**
111 * message ID currently processing, if any
112 */
113 u_int32_t message_id;
114 };
115
116 /**
117 * Implementation of entry_t.destroy.
118 */
119 static status_t entry_destroy(entry_t *this)
120 {
121 /* also destroy IKE SA */
122 this->ike_sa->destroy(this->ike_sa);
123 this->ike_sa_id->destroy(this->ike_sa_id);
124 chunk_free(&this->init_hash);
125 DESTROY_IF(this->other);
126 DESTROY_IF(this->my_id);
127 DESTROY_IF(this->other_id);
128 this->condvar->destroy(this->condvar);
129 free(this);
130 return SUCCESS;
131 }
132
133 /**
134 * Creates a new entry for the ike_sa_t list.
135 */
136 static entry_t *entry_create()
137 {
138 entry_t *this = malloc_thing(entry_t);
139
140 this->waiting_threads = 0;
141 this->condvar = condvar_create(CONDVAR_TYPE_DEFAULT);
142
143 /* we set checkout flag when we really give it out */
144 this->checked_out = FALSE;
145 this->driveout_new_threads = FALSE;
146 this->driveout_waiting_threads = FALSE;
147 this->message_id = -1;
148 this->init_hash = chunk_empty;
149 this->other = NULL;
150 this->half_open = FALSE;
151 this->my_id = NULL;
152 this->other_id = NULL;
153 this->ike_sa_id = NULL;
154 this->ike_sa = NULL;
155
156 return this;
157 }
158
159 /**
160 * Function that matches entry_t objects by ike_sa_id_t.
161 */
162 static bool entry_match_by_id(entry_t *entry, ike_sa_id_t *id)
163 {
164 if (id->equals(id, entry->ike_sa_id))
165 {
166 return TRUE;
167 }
168 if ((id->get_responder_spi(id) == 0 ||
169 entry->ike_sa_id->get_responder_spi(entry->ike_sa_id) == 0) &&
170 id->get_initiator_spi(id) == entry->ike_sa_id->get_initiator_spi(entry->ike_sa_id))
171 {
172 /* this is TRUE for IKE_SAs that we initiated but have not yet received a response */
173 return TRUE;
174 }
175 return FALSE;
176 }
177
178 /**
179 * Function that matches entry_t objects by ike_sa_t pointers.
180 */
181 static bool entry_match_by_sa(entry_t *entry, ike_sa_t *ike_sa)
182 {
183 return entry->ike_sa == ike_sa;
184 }
185
186 /**
187 * Hash function for ike_sa_id_t objects.
188 */
189 static u_int ike_sa_id_hash(ike_sa_id_t *ike_sa_id)
190 {
191 /* IKEv2 does not mandate random SPIs (RFC 5996, 2.6), they just have to be
192 * locally unique, so we use our randomly allocated SPI whether we are
193 * initiator or responder to ensure a good distribution. The latter is not
194 * possible for IKEv1 as we don't know whether we are original initiator or
195 * not (based on the IKE header). But as RFC 2408, section 2.5.3 proposes
196 * SPIs (Cookies) to be allocated near random (we allocate them randomly
197 * anyway) it seems safe to always use the initiator SPI. */
198 if (ike_sa_id->get_ike_version(ike_sa_id) == IKEV1_MAJOR_VERSION ||
199 ike_sa_id->is_initiator(ike_sa_id))
200 {
201 return ike_sa_id->get_initiator_spi(ike_sa_id);
202 }
203 return ike_sa_id->get_responder_spi(ike_sa_id);
204 }
205
206 typedef struct half_open_t half_open_t;
207
208 /**
209 * Struct to manage half-open IKE_SAs per peer.
210 */
211 struct half_open_t {
212 /** chunk of remote host address */
213 chunk_t other;
214
215 /** the number of half-open IKE_SAs with that host */
216 u_int count;
217 };
218
219 /**
220 * Destroys a half_open_t object.
221 */
222 static void half_open_destroy(half_open_t *this)
223 {
224 chunk_free(&this->other);
225 free(this);
226 }
227
228 typedef struct connected_peers_t connected_peers_t;
229
230 struct connected_peers_t {
231 /** own identity */
232 identification_t *my_id;
233
234 /** remote identity */
235 identification_t *other_id;
236
237 /** ip address family of peer */
238 int family;
239
240 /** list of ike_sa_id_t objects of IKE_SAs between the two identities */
241 linked_list_t *sas;
242 };
243
244 static void connected_peers_destroy(connected_peers_t *this)
245 {
246 this->my_id->destroy(this->my_id);
247 this->other_id->destroy(this->other_id);
248 this->sas->destroy(this->sas);
249 free(this);
250 }
251
252 /**
253 * Function that matches connected_peers_t objects by the given ids.
254 */
255 static inline bool connected_peers_match(connected_peers_t *connected_peers,
256 identification_t *my_id, identification_t *other_id,
257 int family)
258 {
259 return my_id->equals(my_id, connected_peers->my_id) &&
260 other_id->equals(other_id, connected_peers->other_id) &&
261 (!family || family == connected_peers->family);
262 }
263
264 typedef struct init_hash_t init_hash_t;
265
266 struct init_hash_t {
267 /** hash of IKE_SA_INIT or initial phase1 message (data is not cloned) */
268 chunk_t hash;
269
270 /** our SPI allocated for the IKE_SA based on this message */
271 u_int64_t our_spi;
272 };
273
274 typedef struct segment_t segment_t;
275
276 /**
277 * Struct to manage segments of the hash table.
278 */
279 struct segment_t {
280 /** mutex to access a segment exclusively */
281 mutex_t *mutex;
282
283 /** the number of entries in this segment */
284 u_int count;
285 };
286
287 typedef struct shareable_segment_t shareable_segment_t;
288
289 /**
290 * Struct to manage segments of the "half-open" and "connected peers" hash tables.
291 */
292 struct shareable_segment_t {
293 /** rwlock to access a segment non-/exclusively */
294 rwlock_t *lock;
295
296 /** the number of entries in this segment - in case of the "half-open table"
297 * it's the sum of all half_open_t.count in a segment. */
298 u_int count;
299 };
300
301 typedef struct table_item_t table_item_t;
302
303 /**
304 * Instead of using linked_list_t for each bucket we store the data in our own
305 * list to save memory.
306 */
307 struct table_item_t {
308 /** data of this item */
309 void *value;
310
311 /** next item in the overflow list */
312 table_item_t *next;
313 };
314
315 typedef struct private_ike_sa_manager_t private_ike_sa_manager_t;
316
317 /**
318 * Additional private members of ike_sa_manager_t.
319 */
320 struct private_ike_sa_manager_t {
321 /**
322 * Public interface of ike_sa_manager_t.
323 */
324 ike_sa_manager_t public;
325
326 /**
327 * Hash table with entries for the ike_sa_t objects.
328 */
329 table_item_t **ike_sa_table;
330
331 /**
332 * The size of the hash table.
333 */
334 u_int table_size;
335
336 /**
337 * Mask to map the hashes to table rows.
338 */
339 u_int table_mask;
340
341 /**
342 * Segments of the hash table.
343 */
344 segment_t *segments;
345
346 /**
347 * The number of segments.
348 */
349 u_int segment_count;
350
351 /**
352 * Mask to map a table row to a segment.
353 */
354 u_int segment_mask;
355
356 /**
357 * Hash table with half_open_t objects.
358 */
359 table_item_t **half_open_table;
360
361 /**
362 * Segments of the "half-open" hash table.
363 */
364 shareable_segment_t *half_open_segments;
365
366 /**
367 * Hash table with connected_peers_t objects.
368 */
369 table_item_t **connected_peers_table;
370
371 /**
372 * Segments of the "connected peers" hash table.
373 */
374 shareable_segment_t *connected_peers_segments;
375
376 /**
377 * Hash table with init_hash_t objects.
378 */
379 table_item_t **init_hashes_table;
380
381 /**
382 * Segments of the "hashes" hash table.
383 */
384 segment_t *init_hashes_segments;
385
386 /**
387 * RNG to get random SPIs for our side
388 */
389 rng_t *rng;
390
391 /**
392 * SHA1 hasher for IKE_SA_INIT retransmit detection
393 */
394 hasher_t *hasher;
395
396 /**
397 * reuse existing IKE_SAs in checkout_by_config
398 */
399 bool reuse_ikesa;
400
401 /**
402 * Configured IKE_SA limit, if any
403 */
404 u_int ikesa_limit;
405 };
406
407 /**
408 * Acquire a lock to access the segment of the table row with the given index.
409 * It also works with the segment index directly.
410 */
411 static inline void lock_single_segment(private_ike_sa_manager_t *this,
412 u_int index)
413 {
414 mutex_t *lock = this->segments[index & this->segment_mask].mutex;
415 lock->lock(lock);
416 }
417
418 /**
419 * Release the lock required to access the segment of the table row with the given index.
420 * It also works with the segment index directly.
421 */
422 static inline void unlock_single_segment(private_ike_sa_manager_t *this,
423 u_int index)
424 {
425 mutex_t *lock = this->segments[index & this->segment_mask].mutex;
426 lock->unlock(lock);
427 }
428
429 /**
430 * Lock all segments
431 */
432 static void lock_all_segments(private_ike_sa_manager_t *this)
433 {
434 u_int i;
435
436 for (i = 0; i < this->segment_count; i++)
437 {
438 this->segments[i].mutex->lock(this->segments[i].mutex);
439 }
440 }
441
442 /**
443 * Unlock all segments
444 */
445 static void unlock_all_segments(private_ike_sa_manager_t *this)
446 {
447 u_int i;
448
449 for (i = 0; i < this->segment_count; i++)
450 {
451 this->segments[i].mutex->unlock(this->segments[i].mutex);
452 }
453 }
454
455 typedef struct private_enumerator_t private_enumerator_t;
456
457 /**
458 * hash table enumerator implementation
459 */
460 struct private_enumerator_t {
461
462 /**
463 * implements enumerator interface
464 */
465 enumerator_t enumerator;
466
467 /**
468 * associated ike_sa_manager_t
469 */
470 private_ike_sa_manager_t *manager;
471
472 /**
473 * current segment index
474 */
475 u_int segment;
476
477 /**
478 * currently enumerating entry
479 */
480 entry_t *entry;
481
482 /**
483 * current table row index
484 */
485 u_int row;
486
487 /**
488 * current table item
489 */
490 table_item_t *current;
491
492 /**
493 * previous table item
494 */
495 table_item_t *prev;
496 };
497
498 METHOD(enumerator_t, enumerate, bool,
499 private_enumerator_t *this, entry_t **entry, u_int *segment)
500 {
501 if (this->entry)
502 {
503 this->entry->condvar->signal(this->entry->condvar);
504 this->entry = NULL;
505 }
506 while (this->segment < this->manager->segment_count)
507 {
508 while (this->row < this->manager->table_size)
509 {
510 this->prev = this->current;
511 if (this->current)
512 {
513 this->current = this->current->next;
514 }
515 else
516 {
517 lock_single_segment(this->manager, this->segment);
518 this->current = this->manager->ike_sa_table[this->row];
519 }
520 if (this->current)
521 {
522 *entry = this->entry = this->current->value;
523 *segment = this->segment;
524 return TRUE;
525 }
526 unlock_single_segment(this->manager, this->segment);
527 this->row += this->manager->segment_count;
528 }
529 this->segment++;
530 this->row = this->segment;
531 }
532 return FALSE;
533 }
534
535 METHOD(enumerator_t, enumerator_destroy, void,
536 private_enumerator_t *this)
537 {
538 if (this->entry)
539 {
540 this->entry->condvar->signal(this->entry->condvar);
541 }
542 if (this->current)
543 {
544 unlock_single_segment(this->manager, this->segment);
545 }
546 free(this);
547 }
548
549 /**
550 * Creates an enumerator to enumerate the entries in the hash table.
551 */
552 static enumerator_t* create_table_enumerator(private_ike_sa_manager_t *this)
553 {
554 private_enumerator_t *enumerator;
555
556 INIT(enumerator,
557 .enumerator = {
558 .enumerate = (void*)_enumerate,
559 .destroy = _enumerator_destroy,
560 },
561 .manager = this,
562 );
563 return &enumerator->enumerator;
564 }
565
566 /**
567 * Put an entry into the hash table.
568 * Note: The caller has to unlock the returned segment.
569 */
570 static u_int put_entry(private_ike_sa_manager_t *this, entry_t *entry)
571 {
572 table_item_t *current, *item;
573 u_int row, segment;
574
575 INIT(item,
576 .value = entry,
577 );
578
579 row = ike_sa_id_hash(entry->ike_sa_id) & this->table_mask;
580 segment = row & this->segment_mask;
581
582 lock_single_segment(this, segment);
583 current = this->ike_sa_table[row];
584 if (current)
585 { /* insert at the front of current bucket */
586 item->next = current;
587 }
588 this->ike_sa_table[row] = item;
589 this->segments[segment].count++;
590 return segment;
591 }
592
593 /**
594 * Remove an entry from the hash table.
595 * Note: The caller MUST have a lock on the segment of this entry.
596 */
597 static void remove_entry(private_ike_sa_manager_t *this, entry_t *entry)
598 {
599 table_item_t *item, *prev = NULL;
600 u_int row, segment;
601
602 row = ike_sa_id_hash(entry->ike_sa_id) & this->table_mask;
603 segment = row & this->segment_mask;
604 item = this->ike_sa_table[row];
605 while (item)
606 {
607 if (item->value == entry)
608 {
609 if (prev)
610 {
611 prev->next = item->next;
612 }
613 else
614 {
615 this->ike_sa_table[row] = item->next;
616 }
617 this->segments[segment].count--;
618 free(item);
619 break;
620 }
621 prev = item;
622 item = item->next;
623 }
624 }
625
626 /**
627 * Remove the entry at the current enumerator position.
628 */
629 static void remove_entry_at(private_enumerator_t *this)
630 {
631 this->entry = NULL;
632 if (this->current)
633 {
634 table_item_t *current = this->current;
635
636 this->manager->segments[this->segment].count--;
637 this->current = this->prev;
638
639 if (this->prev)
640 {
641 this->prev->next = current->next;
642 }
643 else
644 {
645 this->manager->ike_sa_table[this->row] = current->next;
646 unlock_single_segment(this->manager, this->segment);
647 }
648 free(current);
649 }
650 }
651
652 /**
653 * Find an entry using the provided match function to compare the entries for
654 * equality.
655 */
656 static status_t get_entry_by_match_function(private_ike_sa_manager_t *this,
657 ike_sa_id_t *ike_sa_id, entry_t **entry, u_int *segment,
658 linked_list_match_t match, void *param)
659 {
660 table_item_t *item;
661 u_int row, seg;
662
663 row = ike_sa_id_hash(ike_sa_id) & this->table_mask;
664 seg = row & this->segment_mask;
665
666 lock_single_segment(this, seg);
667 item = this->ike_sa_table[row];
668 while (item)
669 {
670 if (match(item->value, param))
671 {
672 *entry = item->value;
673 *segment = seg;
674 /* the locked segment has to be unlocked by the caller */
675 return SUCCESS;
676 }
677 item = item->next;
678 }
679 unlock_single_segment(this, seg);
680 return NOT_FOUND;
681 }
682
683 /**
684 * Find an entry by ike_sa_id_t.
685 * Note: On SUCCESS, the caller has to unlock the segment.
686 */
687 static status_t get_entry_by_id(private_ike_sa_manager_t *this,
688 ike_sa_id_t *ike_sa_id, entry_t **entry, u_int *segment)
689 {
690 return get_entry_by_match_function(this, ike_sa_id, entry, segment,
691 (linked_list_match_t)entry_match_by_id, ike_sa_id);
692 }
693
694 /**
695 * Find an entry by IKE_SA pointer.
696 * Note: On SUCCESS, the caller has to unlock the segment.
697 */
698 static status_t get_entry_by_sa(private_ike_sa_manager_t *this,
699 ike_sa_id_t *ike_sa_id, ike_sa_t *ike_sa, entry_t **entry, u_int *segment)
700 {
701 return get_entry_by_match_function(this, ike_sa_id, entry, segment,
702 (linked_list_match_t)entry_match_by_sa, ike_sa);
703 }
704
705 /**
706 * Wait until no other thread is using an IKE_SA, return FALSE if entry not
707 * acquirable.
708 */
709 static bool wait_for_entry(private_ike_sa_manager_t *this, entry_t *entry,
710 u_int segment)
711 {
712 if (entry->driveout_new_threads)
713 {
714 /* we are not allowed to get this */
715 return FALSE;
716 }
717 while (entry->checked_out && !entry->driveout_waiting_threads)
718 {
719 /* so wait until we can get it for us.
720 * we register us as waiting. */
721 entry->waiting_threads++;
722 entry->condvar->wait(entry->condvar, this->segments[segment].mutex);
723 entry->waiting_threads--;
724 }
725 /* hm, a deletion request forbids us to get this SA, get next one */
726 if (entry->driveout_waiting_threads)
727 {
728 /* we must signal here, others may be waiting on it, too */
729 entry->condvar->signal(entry->condvar);
730 return FALSE;
731 }
732 return TRUE;
733 }
734
735 /**
736 * Put a half-open SA into the hash table.
737 */
738 static void put_half_open(private_ike_sa_manager_t *this, entry_t *entry)
739 {
740 table_item_t *item;
741 u_int row, segment;
742 rwlock_t *lock;
743 half_open_t *half_open;
744 chunk_t addr;
745
746 addr = entry->other->get_address(entry->other);
747 row = chunk_hash(addr) & this->table_mask;
748 segment = row & this->segment_mask;
749 lock = this->half_open_segments[segment].lock;
750 lock->write_lock(lock);
751 item = this->half_open_table[row];
752 while (item)
753 {
754 half_open = item->value;
755
756 if (chunk_equals(addr, half_open->other))
757 {
758 half_open->count++;
759 break;
760 }
761 item = item->next;
762 }
763
764 if (!item)
765 {
766 INIT(half_open,
767 .other = chunk_clone(addr),
768 .count = 1,
769 );
770 INIT(item,
771 .value = half_open,
772 .next = this->half_open_table[row],
773 );
774 this->half_open_table[row] = item;
775 }
776 this->half_open_segments[segment].count++;
777 lock->unlock(lock);
778 }
779
780 /**
781 * Remove a half-open SA from the hash table.
782 */
783 static void remove_half_open(private_ike_sa_manager_t *this, entry_t *entry)
784 {
785 table_item_t *item, *prev = NULL;
786 u_int row, segment;
787 rwlock_t *lock;
788 chunk_t addr;
789
790 addr = entry->other->get_address(entry->other);
791 row = chunk_hash(addr) & this->table_mask;
792 segment = row & this->segment_mask;
793 lock = this->half_open_segments[segment].lock;
794 lock->write_lock(lock);
795 item = this->half_open_table[row];
796 while (item)
797 {
798 half_open_t *half_open = item->value;
799
800 if (chunk_equals(addr, half_open->other))
801 {
802 if (--half_open->count == 0)
803 {
804 if (prev)
805 {
806 prev->next = item->next;
807 }
808 else
809 {
810 this->half_open_table[row] = item->next;
811 }
812 half_open_destroy(half_open);
813 free(item);
814 }
815 this->half_open_segments[segment].count--;
816 break;
817 }
818 prev = item;
819 item = item->next;
820 }
821 lock->unlock(lock);
822 }
823
824 /**
825 * Put an SA between two peers into the hash table.
826 */
827 static void put_connected_peers(private_ike_sa_manager_t *this, entry_t *entry)
828 {
829 table_item_t *item;
830 u_int row, segment;
831 rwlock_t *lock;
832 connected_peers_t *connected_peers;
833 chunk_t my_id, other_id;
834 int family;
835
836 my_id = entry->my_id->get_encoding(entry->my_id);
837 other_id = entry->other_id->get_encoding(entry->other_id);
838 family = entry->other->get_family(entry->other);
839 row = chunk_hash_inc(other_id, chunk_hash(my_id)) & this->table_mask;
840 segment = row & this->segment_mask;
841 lock = this->connected_peers_segments[segment].lock;
842 lock->write_lock(lock);
843 item = this->connected_peers_table[row];
844 while (item)
845 {
846 connected_peers = item->value;
847
848 if (connected_peers_match(connected_peers, entry->my_id,
849 entry->other_id, family))
850 {
851 if (connected_peers->sas->find_first(connected_peers->sas,
852 (linked_list_match_t)entry->ike_sa_id->equals,
853 NULL, entry->ike_sa_id) == SUCCESS)
854 {
855 lock->unlock(lock);
856 return;
857 }
858 break;
859 }
860 item = item->next;
861 }
862
863 if (!item)
864 {
865 INIT(connected_peers,
866 .my_id = entry->my_id->clone(entry->my_id),
867 .other_id = entry->other_id->clone(entry->other_id),
868 .family = family,
869 .sas = linked_list_create(),
870 );
871 INIT(item,
872 .value = connected_peers,
873 .next = this->connected_peers_table[row],
874 );
875 this->connected_peers_table[row] = item;
876 }
877 connected_peers->sas->insert_last(connected_peers->sas,
878 entry->ike_sa_id->clone(entry->ike_sa_id));
879 this->connected_peers_segments[segment].count++;
880 lock->unlock(lock);
881 }
882
883 /**
884 * Remove an SA between two peers from the hash table.
885 */
886 static void remove_connected_peers(private_ike_sa_manager_t *this, entry_t *entry)
887 {
888 table_item_t *item, *prev = NULL;
889 u_int row, segment;
890 rwlock_t *lock;
891 chunk_t my_id, other_id;
892 int family;
893
894 my_id = entry->my_id->get_encoding(entry->my_id);
895 other_id = entry->other_id->get_encoding(entry->other_id);
896 family = entry->other->get_family(entry->other);
897
898 row = chunk_hash_inc(other_id, chunk_hash(my_id)) & this->table_mask;
899 segment = row & this->segment_mask;
900
901 lock = this->connected_peers_segments[segment].lock;
902 lock->write_lock(lock);
903 item = this->connected_peers_table[row];
904 while (item)
905 {
906 connected_peers_t *current = item->value;
907
908 if (connected_peers_match(current, entry->my_id, entry->other_id,
909 family))
910 {
911 enumerator_t *enumerator;
912 ike_sa_id_t *ike_sa_id;
913
914 enumerator = current->sas->create_enumerator(current->sas);
915 while (enumerator->enumerate(enumerator, &ike_sa_id))
916 {
917 if (ike_sa_id->equals(ike_sa_id, entry->ike_sa_id))
918 {
919 current->sas->remove_at(current->sas, enumerator);
920 ike_sa_id->destroy(ike_sa_id);
921 this->connected_peers_segments[segment].count--;
922 break;
923 }
924 }
925 enumerator->destroy(enumerator);
926 if (current->sas->get_count(current->sas) == 0)
927 {
928 if (prev)
929 {
930 prev->next = item->next;
931 }
932 else
933 {
934 this->connected_peers_table[row] = item->next;
935 }
936 connected_peers_destroy(current);
937 free(item);
938 }
939 break;
940 }
941 prev = item;
942 item = item->next;
943 }
944 lock->unlock(lock);
945 }
946
947 /**
948 * Get a random SPI for new IKE_SAs
949 */
950 static u_int64_t get_spi(private_ike_sa_manager_t *this)
951 {
952 u_int64_t spi;
953
954 if (this->rng &&
955 this->rng->get_bytes(this->rng, sizeof(spi), (u_int8_t*)&spi))
956 {
957 return spi;
958 }
959 return 0;
960 }
961
962 /**
963 * Calculate the hash of the initial IKE message. Memory for the hash is
964 * allocated on success.
965 *
966 * @returns TRUE on success
967 */
968 static bool get_init_hash(private_ike_sa_manager_t *this, message_t *message,
969 chunk_t *hash)
970 {
971 host_t *src;
972
973 if (!this->hasher)
974 { /* this might be the case when flush() has been called */
975 return FALSE;
976 }
977 if (message->get_first_payload_type(message) == FRAGMENT_V1)
978 { /* only hash the source IP, port and SPI for fragmented init messages */
979 u_int16_t port;
980 u_int64_t spi;
981
982 src = message->get_source(message);
983 if (!this->hasher->allocate_hash(this->hasher,
984 src->get_address(src), NULL))
985 {
986 return FALSE;
987 }
988 port = src->get_port(src);
989 if (!this->hasher->allocate_hash(this->hasher,
990 chunk_from_thing(port), NULL))
991 {
992 return FALSE;
993 }
994 spi = message->get_initiator_spi(message);
995 return this->hasher->allocate_hash(this->hasher,
996 chunk_from_thing(spi), hash);
997 }
998 if (message->get_exchange_type(message) == ID_PROT)
999 { /* include the source for Main Mode as the hash will be the same if
1000 * SPIs are reused by two initiators that use the same proposal */
1001 src = message->get_source(message);
1002
1003 if (!this->hasher->allocate_hash(this->hasher,
1004 src->get_address(src), NULL))
1005 {
1006 return FALSE;
1007 }
1008 }
1009 return this->hasher->allocate_hash(this->hasher,
1010 message->get_packet_data(message), hash);
1011 }
1012
1013 /**
1014 * Check if we already have created an IKE_SA based on the initial IKE message
1015 * with the given hash.
1016 * If not the hash is stored, the hash data is not(!) cloned.
1017 *
1018 * Also, the local SPI is returned. In case of a retransmit this is already
1019 * stored together with the hash, otherwise it is newly allocated and should
1020 * be used to create the IKE_SA.
1021 *
1022 * @returns ALREADY_DONE if the message with the given hash has been seen before
1023 * NOT_FOUND if the message hash was not found
1024 * FAILED if the SPI allocation failed
1025 */
1026 static status_t check_and_put_init_hash(private_ike_sa_manager_t *this,
1027 chunk_t init_hash, u_int64_t *our_spi)
1028 {
1029 table_item_t *item;
1030 u_int row, segment;
1031 mutex_t *mutex;
1032 init_hash_t *init;
1033 u_int64_t spi;
1034
1035 row = chunk_hash(init_hash) & this->table_mask;
1036 segment = row & this->segment_mask;
1037 mutex = this->init_hashes_segments[segment].mutex;
1038 mutex->lock(mutex);
1039 item = this->init_hashes_table[row];
1040 while (item)
1041 {
1042 init_hash_t *current = item->value;
1043
1044 if (chunk_equals(init_hash, current->hash))
1045 {
1046 *our_spi = current->our_spi;
1047 mutex->unlock(mutex);
1048 return ALREADY_DONE;
1049 }
1050 item = item->next;
1051 }
1052
1053 spi = get_spi(this);
1054 if (!spi)
1055 {
1056 return FAILED;
1057 }
1058
1059 INIT(init,
1060 .hash = {
1061 .len = init_hash.len,
1062 .ptr = init_hash.ptr,
1063 },
1064 .our_spi = spi,
1065 );
1066 INIT(item,
1067 .value = init,
1068 .next = this->init_hashes_table[row],
1069 );
1070 this->init_hashes_table[row] = item;
1071 *our_spi = init->our_spi;
1072 mutex->unlock(mutex);
1073 return NOT_FOUND;
1074 }
1075
1076 /**
1077 * Remove the hash of an initial IKE message from the cache.
1078 */
1079 static void remove_init_hash(private_ike_sa_manager_t *this, chunk_t init_hash)
1080 {
1081 table_item_t *item, *prev = NULL;
1082 u_int row, segment;
1083 mutex_t *mutex;
1084
1085 row = chunk_hash(init_hash) & this->table_mask;
1086 segment = row & this->segment_mask;
1087 mutex = this->init_hashes_segments[segment].mutex;
1088 mutex->lock(mutex);
1089 item = this->init_hashes_table[row];
1090 while (item)
1091 {
1092 init_hash_t *current = item->value;
1093
1094 if (chunk_equals(init_hash, current->hash))
1095 {
1096 if (prev)
1097 {
1098 prev->next = item->next;
1099 }
1100 else
1101 {
1102 this->init_hashes_table[row] = item->next;
1103 }
1104 free(current);
1105 free(item);
1106 break;
1107 }
1108 prev = item;
1109 item = item->next;
1110 }
1111 mutex->unlock(mutex);
1112 }
1113
1114 METHOD(ike_sa_manager_t, checkout, ike_sa_t*,
1115 private_ike_sa_manager_t *this, ike_sa_id_t *ike_sa_id)
1116 {
1117 ike_sa_t *ike_sa = NULL;
1118 entry_t *entry;
1119 u_int segment;
1120
1121 DBG2(DBG_MGR, "checkout IKE_SA");
1122
1123 if (get_entry_by_id(this, ike_sa_id, &entry, &segment) == SUCCESS)
1124 {
1125 if (wait_for_entry(this, entry, segment))
1126 {
1127 entry->checked_out = TRUE;
1128 ike_sa = entry->ike_sa;
1129 DBG2(DBG_MGR, "IKE_SA %s[%u] successfully checked out",
1130 ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1131 }
1132 unlock_single_segment(this, segment);
1133 }
1134 charon->bus->set_sa(charon->bus, ike_sa);
1135 return ike_sa;
1136 }
1137
1138 METHOD(ike_sa_manager_t, checkout_new, ike_sa_t*,
1139 private_ike_sa_manager_t* this, ike_version_t version, bool initiator)
1140 {
1141 ike_sa_id_t *ike_sa_id;
1142 ike_sa_t *ike_sa;
1143 u_int8_t ike_version;
1144 u_int64_t spi;
1145
1146 ike_version = version == IKEV1 ? IKEV1_MAJOR_VERSION : IKEV2_MAJOR_VERSION;
1147
1148 spi = get_spi(this);
1149 if (!spi)
1150 {
1151 DBG1(DBG_MGR, "failed to allocate SPI for new IKE_SA");
1152 return NULL;
1153 }
1154
1155 if (initiator)
1156 {
1157 ike_sa_id = ike_sa_id_create(ike_version, spi, 0, TRUE);
1158 }
1159 else
1160 {
1161 ike_sa_id = ike_sa_id_create(ike_version, 0, spi, FALSE);
1162 }
1163 ike_sa = ike_sa_create(ike_sa_id, initiator, version);
1164 ike_sa_id->destroy(ike_sa_id);
1165
1166 if (ike_sa)
1167 {
1168 DBG2(DBG_MGR, "created IKE_SA %s[%u]", ike_sa->get_name(ike_sa),
1169 ike_sa->get_unique_id(ike_sa));
1170 }
1171 return ike_sa;
1172 }
1173
1174 METHOD(ike_sa_manager_t, checkout_by_message, ike_sa_t*,
1175 private_ike_sa_manager_t* this, message_t *message)
1176 {
1177 u_int segment;
1178 entry_t *entry;
1179 ike_sa_t *ike_sa = NULL;
1180 ike_sa_id_t *id;
1181 ike_version_t ike_version;
1182 bool is_init = FALSE;
1183
1184 id = message->get_ike_sa_id(message);
1185 /* clone the IKE_SA ID so we can modify the initiator flag */
1186 id = id->clone(id);
1187 id->switch_initiator(id);
1188
1189 DBG2(DBG_MGR, "checkout IKE_SA by message");
1190
1191 if (id->get_responder_spi(id) == 0)
1192 {
1193 if (message->get_major_version(message) == IKEV2_MAJOR_VERSION)
1194 {
1195 if (message->get_exchange_type(message) == IKE_SA_INIT &&
1196 message->get_request(message))
1197 {
1198 ike_version = IKEV2;
1199 is_init = TRUE;
1200 }
1201 }
1202 else
1203 {
1204 if (message->get_exchange_type(message) == ID_PROT ||
1205 message->get_exchange_type(message) == AGGRESSIVE)
1206 {
1207 ike_version = IKEV1;
1208 is_init = TRUE;
1209 if (id->is_initiator(id))
1210 { /* not set in IKEv1, switch back before applying to new SA */
1211 id->switch_initiator(id);
1212 }
1213 }
1214 }
1215 }
1216
1217 if (is_init)
1218 {
1219 u_int64_t our_spi;
1220 chunk_t hash;
1221
1222 if (!get_init_hash(this, message, &hash))
1223 {
1224 DBG1(DBG_MGR, "ignoring message, failed to hash message");
1225 id->destroy(id);
1226 return NULL;
1227 }
1228
1229 /* ensure this is not a retransmit of an already handled init message */
1230 switch (check_and_put_init_hash(this, hash, &our_spi))
1231 {
1232 case NOT_FOUND:
1233 { /* we've not seen this packet yet, create a new IKE_SA */
1234 if (!this->ikesa_limit ||
1235 this->public.get_count(&this->public) < this->ikesa_limit)
1236 {
1237 id->set_responder_spi(id, our_spi);
1238 ike_sa = ike_sa_create(id, FALSE, ike_version);
1239 if (ike_sa)
1240 {
1241 entry = entry_create();
1242 entry->ike_sa = ike_sa;
1243 entry->ike_sa_id = id;
1244
1245 segment = put_entry(this, entry);
1246 entry->checked_out = TRUE;
1247 unlock_single_segment(this, segment);
1248
1249 entry->message_id = message->get_message_id(message);
1250 entry->init_hash = hash;
1251
1252 DBG2(DBG_MGR, "created IKE_SA %s[%u]",
1253 ike_sa->get_name(ike_sa),
1254 ike_sa->get_unique_id(ike_sa));
1255
1256 charon->bus->set_sa(charon->bus, ike_sa);
1257 return ike_sa;
1258 }
1259 else
1260 {
1261 DBG1(DBG_MGR, "creating IKE_SA failed, ignoring message");
1262 }
1263 }
1264 else
1265 {
1266 DBG1(DBG_MGR, "ignoring %N, hitting IKE_SA limit (%u)",
1267 exchange_type_names, message->get_exchange_type(message),
1268 this->ikesa_limit);
1269 }
1270 remove_init_hash(this, hash);
1271 chunk_free(&hash);
1272 id->destroy(id);
1273 return NULL;
1274 }
1275 case FAILED:
1276 { /* we failed to allocate an SPI */
1277 chunk_free(&hash);
1278 id->destroy(id);
1279 DBG1(DBG_MGR, "ignoring message, failed to allocate SPI");
1280 return NULL;
1281 }
1282 case ALREADY_DONE:
1283 default:
1284 break;
1285 }
1286 /* it looks like we already handled this init message to some degree */
1287 id->set_responder_spi(id, our_spi);
1288 chunk_free(&hash);
1289 }
1290
1291 if (get_entry_by_id(this, id, &entry, &segment) == SUCCESS)
1292 {
1293 /* only check out in IKEv2 if we are not already processing it */
1294 if (message->get_request(message) &&
1295 message->get_message_id(message) == entry->message_id)
1296 {
1297 DBG1(DBG_MGR, "ignoring request with ID %u, already processing",
1298 entry->message_id);
1299 }
1300 else if (wait_for_entry(this, entry, segment))
1301 {
1302 ike_sa_id_t *ike_id;
1303
1304 ike_id = entry->ike_sa->get_id(entry->ike_sa);
1305 entry->checked_out = TRUE;
1306 if (message->get_first_payload_type(message) != FRAGMENT_V1)
1307 {
1308 entry->message_id = message->get_message_id(message);
1309 }
1310 if (ike_id->get_responder_spi(ike_id) == 0)
1311 {
1312 ike_id->set_responder_spi(ike_id, id->get_responder_spi(id));
1313 }
1314 ike_sa = entry->ike_sa;
1315 DBG2(DBG_MGR, "IKE_SA %s[%u] successfully checked out",
1316 ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1317 }
1318 unlock_single_segment(this, segment);
1319 }
1320 else
1321 {
1322 charon->bus->alert(charon->bus, ALERT_INVALID_IKE_SPI, message);
1323 }
1324 id->destroy(id);
1325 charon->bus->set_sa(charon->bus, ike_sa);
1326 return ike_sa;
1327 }
1328
1329 METHOD(ike_sa_manager_t, checkout_by_config, ike_sa_t*,
1330 private_ike_sa_manager_t *this, peer_cfg_t *peer_cfg)
1331 {
1332 enumerator_t *enumerator;
1333 entry_t *entry;
1334 ike_sa_t *ike_sa = NULL;
1335 peer_cfg_t *current_peer;
1336 ike_cfg_t *current_ike;
1337 u_int segment;
1338
1339 DBG2(DBG_MGR, "checkout IKE_SA by config");
1340
1341 if (!this->reuse_ikesa)
1342 { /* IKE_SA reuse disable by config */
1343 ike_sa = checkout_new(this, peer_cfg->get_ike_version(peer_cfg), TRUE);
1344 charon->bus->set_sa(charon->bus, ike_sa);
1345 return ike_sa;
1346 }
1347
1348 enumerator = create_table_enumerator(this);
1349 while (enumerator->enumerate(enumerator, &entry, &segment))
1350 {
1351 if (!wait_for_entry(this, entry, segment))
1352 {
1353 continue;
1354 }
1355 if (entry->ike_sa->get_state(entry->ike_sa) == IKE_DELETING)
1356 { /* skip IKE_SAs which are not usable */
1357 continue;
1358 }
1359
1360 current_peer = entry->ike_sa->get_peer_cfg(entry->ike_sa);
1361 if (current_peer && current_peer->equals(current_peer, peer_cfg))
1362 {
1363 current_ike = current_peer->get_ike_cfg(current_peer);
1364 if (current_ike->equals(current_ike, peer_cfg->get_ike_cfg(peer_cfg)))
1365 {
1366 entry->checked_out = TRUE;
1367 ike_sa = entry->ike_sa;
1368 DBG2(DBG_MGR, "found existing IKE_SA %u with a '%s' config",
1369 ike_sa->get_unique_id(ike_sa),
1370 current_peer->get_name(current_peer));
1371 break;
1372 }
1373 }
1374 }
1375 enumerator->destroy(enumerator);
1376
1377 if (!ike_sa)
1378 { /* no IKE_SA using such a config, hand out a new */
1379 ike_sa = checkout_new(this, peer_cfg->get_ike_version(peer_cfg), TRUE);
1380 }
1381 charon->bus->set_sa(charon->bus, ike_sa);
1382 return ike_sa;
1383 }
1384
1385 METHOD(ike_sa_manager_t, checkout_by_id, ike_sa_t*,
1386 private_ike_sa_manager_t *this, u_int32_t id, bool child)
1387 {
1388 enumerator_t *enumerator, *children;
1389 entry_t *entry;
1390 ike_sa_t *ike_sa = NULL;
1391 child_sa_t *child_sa;
1392 u_int segment;
1393
1394 DBG2(DBG_MGR, "checkout IKE_SA by ID");
1395
1396 enumerator = create_table_enumerator(this);
1397 while (enumerator->enumerate(enumerator, &entry, &segment))
1398 {
1399 if (wait_for_entry(this, entry, segment))
1400 {
1401 /* look for a child with such a reqid ... */
1402 if (child)
1403 {
1404 children = entry->ike_sa->create_child_sa_enumerator(entry->ike_sa);
1405 while (children->enumerate(children, (void**)&child_sa))
1406 {
1407 if (child_sa->get_reqid(child_sa) == id)
1408 {
1409 ike_sa = entry->ike_sa;
1410 break;
1411 }
1412 }
1413 children->destroy(children);
1414 }
1415 else /* ... or for a IKE_SA with such a unique id */
1416 {
1417 if (entry->ike_sa->get_unique_id(entry->ike_sa) == id)
1418 {
1419 ike_sa = entry->ike_sa;
1420 }
1421 }
1422 /* got one, return */
1423 if (ike_sa)
1424 {
1425 entry->checked_out = TRUE;
1426 DBG2(DBG_MGR, "IKE_SA %s[%u] successfully checked out",
1427 ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1428 break;
1429 }
1430 }
1431 }
1432 enumerator->destroy(enumerator);
1433
1434 charon->bus->set_sa(charon->bus, ike_sa);
1435 return ike_sa;
1436 }
1437
1438 METHOD(ike_sa_manager_t, checkout_by_name, ike_sa_t*,
1439 private_ike_sa_manager_t *this, char *name, bool child)
1440 {
1441 enumerator_t *enumerator, *children;
1442 entry_t *entry;
1443 ike_sa_t *ike_sa = NULL;
1444 child_sa_t *child_sa;
1445 u_int segment;
1446
1447 enumerator = create_table_enumerator(this);
1448 while (enumerator->enumerate(enumerator, &entry, &segment))
1449 {
1450 if (wait_for_entry(this, entry, segment))
1451 {
1452 /* look for a child with such a policy name ... */
1453 if (child)
1454 {
1455 children = entry->ike_sa->create_child_sa_enumerator(entry->ike_sa);
1456 while (children->enumerate(children, (void**)&child_sa))
1457 {
1458 if (streq(child_sa->get_name(child_sa), name))
1459 {
1460 ike_sa = entry->ike_sa;
1461 break;
1462 }
1463 }
1464 children->destroy(children);
1465 }
1466 else /* ... or for a IKE_SA with such a connection name */
1467 {
1468 if (streq(entry->ike_sa->get_name(entry->ike_sa), name))
1469 {
1470 ike_sa = entry->ike_sa;
1471 }
1472 }
1473 /* got one, return */
1474 if (ike_sa)
1475 {
1476 entry->checked_out = TRUE;
1477 DBG2(DBG_MGR, "IKE_SA %s[%u] successfully checked out",
1478 ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1479 break;
1480 }
1481 }
1482 }
1483 enumerator->destroy(enumerator);
1484
1485 charon->bus->set_sa(charon->bus, ike_sa);
1486 return ike_sa;
1487 }
1488
1489 /**
1490 * enumerator filter function, waiting variant
1491 */
1492 static bool enumerator_filter_wait(private_ike_sa_manager_t *this,
1493 entry_t **in, ike_sa_t **out, u_int *segment)
1494 {
1495 if (wait_for_entry(this, *in, *segment))
1496 {
1497 *out = (*in)->ike_sa;
1498 charon->bus->set_sa(charon->bus, *out);
1499 return TRUE;
1500 }
1501 return FALSE;
1502 }
1503
1504 /**
1505 * enumerator filter function, skipping variant
1506 */
1507 static bool enumerator_filter_skip(private_ike_sa_manager_t *this,
1508 entry_t **in, ike_sa_t **out, u_int *segment)
1509 {
1510 if (!(*in)->driveout_new_threads &&
1511 !(*in)->driveout_waiting_threads &&
1512 !(*in)->checked_out)
1513 {
1514 *out = (*in)->ike_sa;
1515 charon->bus->set_sa(charon->bus, *out);
1516 return TRUE;
1517 }
1518 return FALSE;
1519 }
1520
1521 /**
1522 * Reset threads SA after enumeration
1523 */
1524 static void reset_sa(void *data)
1525 {
1526 charon->bus->set_sa(charon->bus, NULL);
1527 }
1528
1529 METHOD(ike_sa_manager_t, create_enumerator, enumerator_t*,
1530 private_ike_sa_manager_t* this, bool wait)
1531 {
1532 return enumerator_create_filter(create_table_enumerator(this),
1533 wait ? (void*)enumerator_filter_wait : (void*)enumerator_filter_skip,
1534 this, reset_sa);
1535 }
1536
1537 METHOD(ike_sa_manager_t, checkin, void,
1538 private_ike_sa_manager_t *this, ike_sa_t *ike_sa)
1539 {
1540 /* to check the SA back in, we look for the pointer of the ike_sa
1541 * in all entries.
1542 * The lookup is done by initiator SPI, so even if the SPI has changed (e.g.
1543 * on reception of a IKE_SA_INIT response) the lookup will work but
1544 * updating of the SPI MAY be necessary...
1545 */
1546 entry_t *entry;
1547 ike_sa_id_t *ike_sa_id;
1548 host_t *other;
1549 identification_t *my_id, *other_id;
1550 u_int segment;
1551
1552 ike_sa_id = ike_sa->get_id(ike_sa);
1553 my_id = ike_sa->get_my_id(ike_sa);
1554 other_id = ike_sa->get_other_eap_id(ike_sa);
1555 other = ike_sa->get_other_host(ike_sa);
1556
1557 DBG2(DBG_MGR, "checkin IKE_SA %s[%u]", ike_sa->get_name(ike_sa),
1558 ike_sa->get_unique_id(ike_sa));
1559
1560 /* look for the entry */
1561 if (get_entry_by_sa(this, ike_sa_id, ike_sa, &entry, &segment) == SUCCESS)
1562 {
1563 /* ike_sa_id must be updated */
1564 entry->ike_sa_id->replace_values(entry->ike_sa_id, ike_sa->get_id(ike_sa));
1565 /* signal waiting threads */
1566 entry->checked_out = FALSE;
1567 entry->message_id = -1;
1568 /* check if this SA is half-open */
1569 if (entry->half_open && ike_sa->get_state(ike_sa) != IKE_CONNECTING)
1570 {
1571 /* not half open anymore */
1572 entry->half_open = FALSE;
1573 remove_half_open(this, entry);
1574 }
1575 else if (entry->half_open && !other->ip_equals(other, entry->other))
1576 {
1577 /* the other host's IP has changed, we must update the hash table */
1578 remove_half_open(this, entry);
1579 DESTROY_IF(entry->other);
1580 entry->other = other->clone(other);
1581 put_half_open(this, entry);
1582 }
1583 else if (!entry->half_open &&
1584 !entry->ike_sa_id->is_initiator(entry->ike_sa_id) &&
1585 ike_sa->get_state(ike_sa) == IKE_CONNECTING)
1586 {
1587 /* this is a new half-open SA */
1588 entry->half_open = TRUE;
1589 entry->other = other->clone(other);
1590 put_half_open(this, entry);
1591 }
1592 DBG2(DBG_MGR, "check-in of IKE_SA successful.");
1593 entry->condvar->signal(entry->condvar);
1594 }
1595 else
1596 {
1597 entry = entry_create();
1598 entry->ike_sa_id = ike_sa_id->clone(ike_sa_id);
1599 entry->ike_sa = ike_sa;
1600 segment = put_entry(this, entry);
1601 }
1602
1603 /* apply identities for duplicate test */
1604 if ((ike_sa->get_state(ike_sa) == IKE_ESTABLISHED ||
1605 ike_sa->get_state(ike_sa) == IKE_PASSIVE) &&
1606 entry->my_id == NULL && entry->other_id == NULL)
1607 {
1608 if (ike_sa->get_version(ike_sa) == IKEV1)
1609 {
1610 /* If authenticated and received INITIAL_CONTACT,
1611 * delete any existing IKE_SAs with that peer. */
1612 if (ike_sa->has_condition(ike_sa, COND_INIT_CONTACT_SEEN))
1613 {
1614 this->public.check_uniqueness(&this->public, ike_sa, TRUE);
1615 ike_sa->set_condition(ike_sa, COND_INIT_CONTACT_SEEN, FALSE);
1616 }
1617 }
1618
1619 entry->my_id = my_id->clone(my_id);
1620 entry->other_id = other_id->clone(other_id);
1621 if (!entry->other)
1622 {
1623 entry->other = other->clone(other);
1624 }
1625 put_connected_peers(this, entry);
1626 }
1627
1628 unlock_single_segment(this, segment);
1629
1630 charon->bus->set_sa(charon->bus, NULL);
1631 }
1632
1633 METHOD(ike_sa_manager_t, checkin_and_destroy, void,
1634 private_ike_sa_manager_t *this, ike_sa_t *ike_sa)
1635 {
1636 /* deletion is a bit complex, we must ensure that no thread is waiting for
1637 * this SA.
1638 * We take this SA from the table, and start signaling while threads
1639 * are in the condvar.
1640 */
1641 entry_t *entry;
1642 ike_sa_id_t *ike_sa_id;
1643 u_int segment;
1644
1645 ike_sa_id = ike_sa->get_id(ike_sa);
1646
1647 DBG2(DBG_MGR, "checkin and destroy IKE_SA %s[%u]", ike_sa->get_name(ike_sa),
1648 ike_sa->get_unique_id(ike_sa));
1649
1650 if (get_entry_by_sa(this, ike_sa_id, ike_sa, &entry, &segment) == SUCCESS)
1651 {
1652 if (entry->driveout_waiting_threads && entry->driveout_new_threads)
1653 { /* it looks like flush() has been called and the SA is being deleted
1654 * anyway, just check it in */
1655 DBG2(DBG_MGR, "ignored check-in and destroy of IKE_SA during shutdown");
1656 entry->checked_out = FALSE;
1657 entry->condvar->broadcast(entry->condvar);
1658 unlock_single_segment(this, segment);
1659 return;
1660 }
1661
1662 /* drive out waiting threads, as we are in hurry */
1663 entry->driveout_waiting_threads = TRUE;
1664 /* mark it, so no new threads can get this entry */
1665 entry->driveout_new_threads = TRUE;
1666 /* wait until all workers have done their work */
1667 while (entry->waiting_threads)
1668 {
1669 /* wake up all */
1670 entry->condvar->broadcast(entry->condvar);
1671 /* they will wake us again when their work is done */
1672 entry->condvar->wait(entry->condvar, this->segments[segment].mutex);
1673 }
1674 remove_entry(this, entry);
1675 unlock_single_segment(this, segment);
1676
1677 if (entry->half_open)
1678 {
1679 remove_half_open(this, entry);
1680 }
1681 if (entry->my_id && entry->other_id)
1682 {
1683 remove_connected_peers(this, entry);
1684 }
1685 if (entry->init_hash.ptr)
1686 {
1687 remove_init_hash(this, entry->init_hash);
1688 }
1689
1690 entry_destroy(entry);
1691
1692 DBG2(DBG_MGR, "check-in and destroy of IKE_SA successful");
1693 }
1694 else
1695 {
1696 DBG1(DBG_MGR, "tried to check-in and delete nonexisting IKE_SA");
1697 ike_sa->destroy(ike_sa);
1698 }
1699 charon->bus->set_sa(charon->bus, NULL);
1700 }
1701
1702 /**
1703 * Cleanup function for create_id_enumerator
1704 */
1705 static void id_enumerator_cleanup(linked_list_t *ids)
1706 {
1707 ids->destroy_offset(ids, offsetof(ike_sa_id_t, destroy));
1708 }
1709
1710 METHOD(ike_sa_manager_t, create_id_enumerator, enumerator_t*,
1711 private_ike_sa_manager_t *this, identification_t *me,
1712 identification_t *other, int family)
1713 {
1714 table_item_t *item;
1715 u_int row, segment;
1716 rwlock_t *lock;
1717 linked_list_t *ids = NULL;
1718
1719 row = chunk_hash_inc(other->get_encoding(other),
1720 chunk_hash(me->get_encoding(me))) & this->table_mask;
1721 segment = row & this->segment_mask;
1722
1723 lock = this->connected_peers_segments[segment].lock;
1724 lock->read_lock(lock);
1725 item = this->connected_peers_table[row];
1726 while (item)
1727 {
1728 connected_peers_t *current = item->value;
1729
1730 if (connected_peers_match(current, me, other, family))
1731 {
1732 ids = current->sas->clone_offset(current->sas,
1733 offsetof(ike_sa_id_t, clone));
1734 break;
1735 }
1736 item = item->next;
1737 }
1738 lock->unlock(lock);
1739
1740 if (!ids)
1741 {
1742 return enumerator_create_empty();
1743 }
1744 return enumerator_create_cleaner(ids->create_enumerator(ids),
1745 (void*)id_enumerator_cleanup, ids);
1746 }
1747
1748 /**
1749 * Move all CHILD_SAs from old to new
1750 */
1751 static void adopt_children(ike_sa_t *old, ike_sa_t *new)
1752 {
1753 enumerator_t *enumerator;
1754 child_sa_t *child_sa;
1755
1756 enumerator = old->create_child_sa_enumerator(old);
1757 while (enumerator->enumerate(enumerator, &child_sa))
1758 {
1759 old->remove_child_sa(old, enumerator);
1760 new->add_child_sa(new, child_sa);
1761 }
1762 enumerator->destroy(enumerator);
1763 }
1764
1765 METHOD(ike_sa_manager_t, check_uniqueness, bool,
1766 private_ike_sa_manager_t *this, ike_sa_t *ike_sa, bool force_replace)
1767 {
1768 bool cancel = FALSE;
1769 peer_cfg_t *peer_cfg;
1770 unique_policy_t policy;
1771 enumerator_t *enumerator;
1772 ike_sa_id_t *id = NULL;
1773 identification_t *me, *other;
1774 host_t *other_host;
1775
1776 peer_cfg = ike_sa->get_peer_cfg(ike_sa);
1777 policy = peer_cfg->get_unique_policy(peer_cfg);
1778 if (policy == UNIQUE_NEVER || (policy == UNIQUE_NO && !force_replace))
1779 {
1780 return FALSE;
1781 }
1782 me = ike_sa->get_my_id(ike_sa);
1783 other = ike_sa->get_other_eap_id(ike_sa);
1784 other_host = ike_sa->get_other_host(ike_sa);
1785
1786 enumerator = create_id_enumerator(this, me, other,
1787 other_host->get_family(other_host));
1788 while (enumerator->enumerate(enumerator, &id))
1789 {
1790 status_t status = SUCCESS;
1791 ike_sa_t *duplicate;
1792
1793 duplicate = checkout(this, id);
1794 if (!duplicate)
1795 {
1796 continue;
1797 }
1798 if (force_replace)
1799 {
1800 DBG1(DBG_IKE, "destroying duplicate IKE_SA for peer '%Y', "
1801 "received INITIAL_CONTACT", other);
1802 checkin_and_destroy(this, duplicate);
1803 continue;
1804 }
1805 peer_cfg = duplicate->get_peer_cfg(duplicate);
1806 if (peer_cfg && peer_cfg->equals(peer_cfg, ike_sa->get_peer_cfg(ike_sa)))
1807 {
1808 switch (duplicate->get_state(duplicate))
1809 {
1810 case IKE_ESTABLISHED:
1811 case IKE_REKEYING:
1812 switch (policy)
1813 {
1814 case UNIQUE_REPLACE:
1815 charon->bus->alert(charon->bus, ALERT_UNIQUE_REPLACE);
1816 if (duplicate->get_version(duplicate) == IKEV1)
1817 {
1818 adopt_children(duplicate, ike_sa);
1819 }
1820 DBG1(DBG_IKE, "deleting duplicate IKE_SA for peer "
1821 "'%Y' due to uniqueness policy", other);
1822 status = duplicate->delete(duplicate);
1823 break;
1824 case UNIQUE_KEEP:
1825 cancel = TRUE;
1826 /* we keep the first IKE_SA and delete all
1827 * other duplicates that might exist */
1828 policy = UNIQUE_REPLACE;
1829 break;
1830 default:
1831 break;
1832 }
1833 break;
1834 default:
1835 break;
1836 }
1837 }
1838 if (status == DESTROY_ME)
1839 {
1840 checkin_and_destroy(this, duplicate);
1841 }
1842 else
1843 {
1844 checkin(this, duplicate);
1845 }
1846 }
1847 enumerator->destroy(enumerator);
1848 /* reset thread's current IKE_SA after checkin */
1849 charon->bus->set_sa(charon->bus, ike_sa);
1850 return cancel;
1851 }
1852
1853 METHOD(ike_sa_manager_t, has_contact, bool,
1854 private_ike_sa_manager_t *this, identification_t *me,
1855 identification_t *other, int family)
1856 {
1857 table_item_t *item;
1858 u_int row, segment;
1859 rwlock_t *lock;
1860 bool found = FALSE;
1861
1862 row = chunk_hash_inc(other->get_encoding(other),
1863 chunk_hash(me->get_encoding(me))) & this->table_mask;
1864 segment = row & this->segment_mask;
1865 lock = this->connected_peers_segments[segment].lock;
1866 lock->read_lock(lock);
1867 item = this->connected_peers_table[row];
1868 while (item)
1869 {
1870 if (connected_peers_match(item->value, me, other, family))
1871 {
1872 found = TRUE;
1873 break;
1874 }
1875 item = item->next;
1876 }
1877 lock->unlock(lock);
1878
1879 return found;
1880 }
1881
1882 METHOD(ike_sa_manager_t, get_count, u_int,
1883 private_ike_sa_manager_t *this)
1884 {
1885 u_int segment, count = 0;
1886 mutex_t *mutex;
1887
1888 for (segment = 0; segment < this->segment_count; segment++)
1889 {
1890 mutex = this->segments[segment & this->segment_mask].mutex;
1891 mutex->lock(mutex);
1892 count += this->segments[segment].count;
1893 mutex->unlock(mutex);
1894 }
1895 return count;
1896 }
1897
1898 METHOD(ike_sa_manager_t, get_half_open_count, u_int,
1899 private_ike_sa_manager_t *this, host_t *ip)
1900 {
1901 table_item_t *item;
1902 u_int row, segment;
1903 rwlock_t *lock;
1904 chunk_t addr;
1905 u_int count = 0;
1906
1907 if (ip)
1908 {
1909 addr = ip->get_address(ip);
1910 row = chunk_hash(addr) & this->table_mask;
1911 segment = row & this->segment_mask;
1912 lock = this->half_open_segments[segment].lock;
1913 lock->read_lock(lock);
1914 item = this->half_open_table[row];
1915 while (item)
1916 {
1917 half_open_t *half_open = item->value;
1918
1919 if (chunk_equals(addr, half_open->other))
1920 {
1921 count = half_open->count;
1922 break;
1923 }
1924 item = item->next;
1925 }
1926 lock->unlock(lock);
1927 }
1928 else
1929 {
1930 for (segment = 0; segment < this->segment_count; segment++)
1931 {
1932 lock = this->half_open_segments[segment].lock;
1933 lock->read_lock(lock);
1934 count += this->half_open_segments[segment].count;
1935 lock->unlock(lock);
1936 }
1937 }
1938 return count;
1939 }
1940
1941 METHOD(ike_sa_manager_t, flush, void,
1942 private_ike_sa_manager_t *this)
1943 {
1944 /* destroy all list entries */
1945 enumerator_t *enumerator;
1946 entry_t *entry;
1947 u_int segment;
1948
1949 lock_all_segments(this);
1950 DBG2(DBG_MGR, "going to destroy IKE_SA manager and all managed IKE_SA's");
1951 /* Step 1: drive out all waiting threads */
1952 DBG2(DBG_MGR, "set driveout flags for all stored IKE_SA's");
1953 enumerator = create_table_enumerator(this);
1954 while (enumerator->enumerate(enumerator, &entry, &segment))
1955 {
1956 /* do not accept new threads, drive out waiting threads */
1957 entry->driveout_new_threads = TRUE;
1958 entry->driveout_waiting_threads = TRUE;
1959 }
1960 enumerator->destroy(enumerator);
1961 DBG2(DBG_MGR, "wait for all threads to leave IKE_SA's");
1962 /* Step 2: wait until all are gone */
1963 enumerator = create_table_enumerator(this);
1964 while (enumerator->enumerate(enumerator, &entry, &segment))
1965 {
1966 while (entry->waiting_threads || entry->checked_out)
1967 {
1968 /* wake up all */
1969 entry->condvar->broadcast(entry->condvar);
1970 /* go sleeping until they are gone */
1971 entry->condvar->wait(entry->condvar, this->segments[segment].mutex);
1972 }
1973 }
1974 enumerator->destroy(enumerator);
1975 DBG2(DBG_MGR, "delete all IKE_SA's");
1976 /* Step 3: initiate deletion of all IKE_SAs */
1977 enumerator = create_table_enumerator(this);
1978 while (enumerator->enumerate(enumerator, &entry, &segment))
1979 {
1980 charon->bus->set_sa(charon->bus, entry->ike_sa);
1981 if (entry->ike_sa->get_version(entry->ike_sa) == IKEV2)
1982 { /* as the delete never gets processed, fire down events */
1983 switch (entry->ike_sa->get_state(entry->ike_sa))
1984 {
1985 case IKE_ESTABLISHED:
1986 case IKE_REKEYING:
1987 case IKE_DELETING:
1988 charon->bus->ike_updown(charon->bus, entry->ike_sa, FALSE);
1989 break;
1990 default:
1991 break;
1992 }
1993 }
1994 entry->ike_sa->delete(entry->ike_sa);
1995 }
1996 enumerator->destroy(enumerator);
1997
1998 DBG2(DBG_MGR, "destroy all entries");
1999 /* Step 4: destroy all entries */
2000 enumerator = create_table_enumerator(this);
2001 while (enumerator->enumerate(enumerator, &entry, &segment))
2002 {
2003 charon->bus->set_sa(charon->bus, entry->ike_sa);
2004 if (entry->half_open)
2005 {
2006 remove_half_open(this, entry);
2007 }
2008 if (entry->my_id && entry->other_id)
2009 {
2010 remove_connected_peers(this, entry);
2011 }
2012 if (entry->init_hash.ptr)
2013 {
2014 remove_init_hash(this, entry->init_hash);
2015 }
2016 remove_entry_at((private_enumerator_t*)enumerator);
2017 entry_destroy(entry);
2018 }
2019 enumerator->destroy(enumerator);
2020 charon->bus->set_sa(charon->bus, NULL);
2021 unlock_all_segments(this);
2022
2023 this->rng->destroy(this->rng);
2024 this->rng = NULL;
2025 this->hasher->destroy(this->hasher);
2026 this->hasher = NULL;
2027 }
2028
2029 METHOD(ike_sa_manager_t, destroy, void,
2030 private_ike_sa_manager_t *this)
2031 {
2032 u_int i;
2033
2034 /* these are already cleared in flush() above */
2035 free(this->ike_sa_table);
2036 free(this->half_open_table);
2037 free(this->connected_peers_table);
2038 free(this->init_hashes_table);
2039 for (i = 0; i < this->segment_count; i++)
2040 {
2041 this->segments[i].mutex->destroy(this->segments[i].mutex);
2042 this->half_open_segments[i].lock->destroy(this->half_open_segments[i].lock);
2043 this->connected_peers_segments[i].lock->destroy(this->connected_peers_segments[i].lock);
2044 this->init_hashes_segments[i].mutex->destroy(this->init_hashes_segments[i].mutex);
2045 }
2046 free(this->segments);
2047 free(this->half_open_segments);
2048 free(this->connected_peers_segments);
2049 free(this->init_hashes_segments);
2050
2051 free(this);
2052 }
2053
2054 /**
2055 * This function returns the next-highest power of two for the given number.
2056 * The algorithm works by setting all bits on the right-hand side of the most
2057 * significant 1 to 1 and then increments the whole number so it rolls over
2058 * to the nearest power of two. Note: returns 0 for n == 0
2059 */
2060 static u_int get_nearest_powerof2(u_int n)
2061 {
2062 u_int i;
2063
2064 --n;
2065 for (i = 1; i < sizeof(u_int) * 8; i <<= 1)
2066 {
2067 n |= n >> i;
2068 }
2069 return ++n;
2070 }
2071
2072 /*
2073 * Described in header.
2074 */
2075 ike_sa_manager_t *ike_sa_manager_create()
2076 {
2077 private_ike_sa_manager_t *this;
2078 u_int i;
2079
2080 INIT(this,
2081 .public = {
2082 .checkout = _checkout,
2083 .checkout_new = _checkout_new,
2084 .checkout_by_message = _checkout_by_message,
2085 .checkout_by_config = _checkout_by_config,
2086 .checkout_by_id = _checkout_by_id,
2087 .checkout_by_name = _checkout_by_name,
2088 .check_uniqueness = _check_uniqueness,
2089 .has_contact = _has_contact,
2090 .create_enumerator = _create_enumerator,
2091 .create_id_enumerator = _create_id_enumerator,
2092 .checkin = _checkin,
2093 .checkin_and_destroy = _checkin_and_destroy,
2094 .get_count = _get_count,
2095 .get_half_open_count = _get_half_open_count,
2096 .flush = _flush,
2097 .destroy = _destroy,
2098 },
2099 );
2100
2101 this->hasher = lib->crypto->create_hasher(lib->crypto, HASH_PREFERRED);
2102 if (this->hasher == NULL)
2103 {
2104 DBG1(DBG_MGR, "manager initialization failed, no hasher supported");
2105 free(this);
2106 return NULL;
2107 }
2108 this->rng = lib->crypto->create_rng(lib->crypto, RNG_WEAK);
2109 if (this->rng == NULL)
2110 {
2111 DBG1(DBG_MGR, "manager initialization failed, no RNG supported");
2112 this->hasher->destroy(this->hasher);
2113 free(this);
2114 return NULL;
2115 }
2116
2117 this->ikesa_limit = lib->settings->get_int(lib->settings,
2118 "%s.ikesa_limit", 0, charon->name);
2119
2120 this->table_size = get_nearest_powerof2(lib->settings->get_int(
2121 lib->settings, "%s.ikesa_table_size",
2122 DEFAULT_HASHTABLE_SIZE, charon->name));
2123 this->table_size = max(1, min(this->table_size, MAX_HASHTABLE_SIZE));
2124 this->table_mask = this->table_size - 1;
2125
2126 this->segment_count = get_nearest_powerof2(lib->settings->get_int(
2127 lib->settings, "%s.ikesa_table_segments",
2128 DEFAULT_SEGMENT_COUNT, charon->name));
2129 this->segment_count = max(1, min(this->segment_count, this->table_size));
2130 this->segment_mask = this->segment_count - 1;
2131
2132 this->ike_sa_table = calloc(this->table_size, sizeof(table_item_t*));
2133 this->segments = (segment_t*)calloc(this->segment_count, sizeof(segment_t));
2134 for (i = 0; i < this->segment_count; i++)
2135 {
2136 this->segments[i].mutex = mutex_create(MUTEX_TYPE_RECURSIVE);
2137 this->segments[i].count = 0;
2138 }
2139
2140 /* we use the same table parameters for the table to track half-open SAs */
2141 this->half_open_table = calloc(this->table_size, sizeof(table_item_t*));
2142 this->half_open_segments = calloc(this->segment_count, sizeof(shareable_segment_t));
2143 for (i = 0; i < this->segment_count; i++)
2144 {
2145 this->half_open_segments[i].lock = rwlock_create(RWLOCK_TYPE_DEFAULT);
2146 this->half_open_segments[i].count = 0;
2147 }
2148
2149 /* also for the hash table used for duplicate tests */
2150 this->connected_peers_table = calloc(this->table_size, sizeof(table_item_t*));
2151 this->connected_peers_segments = calloc(this->segment_count, sizeof(shareable_segment_t));
2152 for (i = 0; i < this->segment_count; i++)
2153 {
2154 this->connected_peers_segments[i].lock = rwlock_create(RWLOCK_TYPE_DEFAULT);
2155 this->connected_peers_segments[i].count = 0;
2156 }
2157
2158 /* and again for the table of hashes of seen initial IKE messages */
2159 this->init_hashes_table = calloc(this->table_size, sizeof(table_item_t*));
2160 this->init_hashes_segments = calloc(this->segment_count, sizeof(segment_t));
2161 for (i = 0; i < this->segment_count; i++)
2162 {
2163 this->init_hashes_segments[i].mutex = mutex_create(MUTEX_TYPE_RECURSIVE);
2164 this->init_hashes_segments[i].count = 0;
2165 }
2166
2167 this->reuse_ikesa = lib->settings->get_bool(lib->settings,
2168 "%s.reuse_ikesa", TRUE, charon->name);
2169 return &this->public;
2170 }