Properly handle retransmitted initial IKE messages.
[strongswan.git] / src / libcharon / sa / ike_sa_manager.c
1 /*
2 * Copyright (C) 2005-2011 Martin Willi
3 * Copyright (C) 2011 revosec AG
4 * Copyright (C) 2008-2012 Tobias Brunner
5 * Copyright (C) 2005 Jan Hutter
6 * Hochschule fuer Technik Rapperswil
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version. See <http://www.fsf.org/copyleft/gpl.txt>.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 * for more details.
17 */
18
19 #include <string.h>
20
21 #include "ike_sa_manager.h"
22
23 #include <daemon.h>
24 #include <sa/ike_sa_id.h>
25 #include <bus/bus.h>
26 #include <threading/condvar.h>
27 #include <threading/mutex.h>
28 #include <threading/rwlock.h>
29 #include <utils/linked_list.h>
30 #include <crypto/hashers/hasher.h>
31
32 /* the default size of the hash table (MUST be a power of 2) */
33 #define DEFAULT_HASHTABLE_SIZE 1
34
35 /* the maximum size of the hash table (MUST be a power of 2) */
36 #define MAX_HASHTABLE_SIZE (1 << 30)
37
38 /* the default number of segments (MUST be a power of 2) */
39 #define DEFAULT_SEGMENT_COUNT 1
40
41 typedef struct entry_t entry_t;
42
43 /**
44 * An entry in the linked list, contains IKE_SA, locking and lookup data.
45 */
46 struct entry_t {
47
48 /**
49 * Number of threads waiting for this ike_sa_t object.
50 */
51 int waiting_threads;
52
53 /**
54 * Condvar where threads can wait until ike_sa_t object is free for use again.
55 */
56 condvar_t *condvar;
57
58 /**
59 * Is this ike_sa currently checked out?
60 */
61 bool checked_out;
62
63 /**
64 * Does this SA drives out new threads?
65 */
66 bool driveout_new_threads;
67
68 /**
69 * Does this SA drives out waiting threads?
70 */
71 bool driveout_waiting_threads;
72
73 /**
74 * Identification of an IKE_SA (SPIs).
75 */
76 ike_sa_id_t *ike_sa_id;
77
78 /**
79 * The contained ike_sa_t object.
80 */
81 ike_sa_t *ike_sa;
82
83 /**
84 * hash of the IKE_SA_INIT message, used to detect retransmissions
85 */
86 chunk_t init_hash;
87
88 /**
89 * remote host address, required for DoS detection and duplicate
90 * checking (host with same my_id and other_id is *not* considered
91 * a duplicate if the address family differs)
92 */
93 host_t *other;
94
95 /**
96 * As responder: Is this SA half-open?
97 */
98 bool half_open;
99
100 /**
101 * own identity, required for duplicate checking
102 */
103 identification_t *my_id;
104
105 /**
106 * remote identity, required for duplicate checking
107 */
108 identification_t *other_id;
109
110 /**
111 * message ID currently processing, if any
112 */
113 u_int32_t message_id;
114 };
115
116 /**
117 * Implementation of entry_t.destroy.
118 */
119 static status_t entry_destroy(entry_t *this)
120 {
121 /* also destroy IKE SA */
122 this->ike_sa->destroy(this->ike_sa);
123 this->ike_sa_id->destroy(this->ike_sa_id);
124 chunk_free(&this->init_hash);
125 DESTROY_IF(this->other);
126 DESTROY_IF(this->my_id);
127 DESTROY_IF(this->other_id);
128 this->condvar->destroy(this->condvar);
129 free(this);
130 return SUCCESS;
131 }
132
133 /**
134 * Creates a new entry for the ike_sa_t list.
135 */
136 static entry_t *entry_create()
137 {
138 entry_t *this = malloc_thing(entry_t);
139
140 this->waiting_threads = 0;
141 this->condvar = condvar_create(CONDVAR_TYPE_DEFAULT);
142
143 /* we set checkout flag when we really give it out */
144 this->checked_out = FALSE;
145 this->driveout_new_threads = FALSE;
146 this->driveout_waiting_threads = FALSE;
147 this->message_id = -1;
148 this->init_hash = chunk_empty;
149 this->other = NULL;
150 this->half_open = FALSE;
151 this->my_id = NULL;
152 this->other_id = NULL;
153 this->ike_sa_id = NULL;
154 this->ike_sa = NULL;
155
156 return this;
157 }
158
159 /**
160 * Function that matches entry_t objects by ike_sa_id_t.
161 */
162 static bool entry_match_by_id(entry_t *entry, ike_sa_id_t *id)
163 {
164 if (id->equals(id, entry->ike_sa_id))
165 {
166 return TRUE;
167 }
168 if ((id->get_responder_spi(id) == 0 ||
169 entry->ike_sa_id->get_responder_spi(entry->ike_sa_id) == 0) &&
170 id->get_initiator_spi(id) == entry->ike_sa_id->get_initiator_spi(entry->ike_sa_id))
171 {
172 /* this is TRUE for IKE_SAs that we initiated but have not yet received a response */
173 return TRUE;
174 }
175 return FALSE;
176 }
177
178 /**
179 * Function that matches entry_t objects by ike_sa_t pointers.
180 */
181 static bool entry_match_by_sa(entry_t *entry, ike_sa_t *ike_sa)
182 {
183 return entry->ike_sa == ike_sa;
184 }
185
186 /**
187 * Hash function for ike_sa_id_t objects.
188 */
189 static u_int ike_sa_id_hash(ike_sa_id_t *ike_sa_id)
190 {
191 /* IKEv2 does not mandate random SPIs (RFC 5996, 2.6), they just have to be
192 * locally unique, so we use our randomly allocated SPI whether we are
193 * initiator or responder to ensure a good distribution. The latter is not
194 * possible for IKEv1 as we don't know whether we are original initiator or
195 * not (based on the IKE header). But as RFC 2408, section 2.5.3 proposes
196 * SPIs (Cookies) to be allocated near random (we allocate them randomly
197 * anyway) it seems safe to always use the initiator SPI. */
198 if (ike_sa_id->get_ike_version(ike_sa_id) == IKEV1_MAJOR_VERSION ||
199 ike_sa_id->is_initiator(ike_sa_id))
200 {
201 return ike_sa_id->get_initiator_spi(ike_sa_id);
202 }
203 return ike_sa_id->get_responder_spi(ike_sa_id);
204 }
205
206 typedef struct half_open_t half_open_t;
207
208 /**
209 * Struct to manage half-open IKE_SAs per peer.
210 */
211 struct half_open_t {
212 /** chunk of remote host address */
213 chunk_t other;
214
215 /** the number of half-open IKE_SAs with that host */
216 u_int count;
217 };
218
219 /**
220 * Destroys a half_open_t object.
221 */
222 static void half_open_destroy(half_open_t *this)
223 {
224 chunk_free(&this->other);
225 free(this);
226 }
227
228 typedef struct connected_peers_t connected_peers_t;
229
230 struct connected_peers_t {
231 /** own identity */
232 identification_t *my_id;
233
234 /** remote identity */
235 identification_t *other_id;
236
237 /** ip address family of peer */
238 int family;
239
240 /** list of ike_sa_id_t objects of IKE_SAs between the two identities */
241 linked_list_t *sas;
242 };
243
244 static void connected_peers_destroy(connected_peers_t *this)
245 {
246 this->my_id->destroy(this->my_id);
247 this->other_id->destroy(this->other_id);
248 this->sas->destroy(this->sas);
249 free(this);
250 }
251
252 /**
253 * Function that matches connected_peers_t objects by the given ids.
254 */
255 static inline bool connected_peers_match(connected_peers_t *connected_peers,
256 identification_t *my_id, identification_t *other_id,
257 int family)
258 {
259 return my_id->equals(my_id, connected_peers->my_id) &&
260 other_id->equals(other_id, connected_peers->other_id) &&
261 (!family || family == connected_peers->family);
262 }
263
264 typedef struct init_hash_t init_hash_t;
265
266 struct init_hash_t {
267 /** hash of IKE_SA_INIT or initial phase1 message (data is not cloned) */
268 chunk_t hash;
269
270 /** our SPI allocated for the IKE_SA based on this message */
271 u_int64_t our_spi;
272 };
273
274 typedef struct segment_t segment_t;
275
276 /**
277 * Struct to manage segments of the hash table.
278 */
279 struct segment_t {
280 /** mutex to access a segment exclusively */
281 mutex_t *mutex;
282
283 /** the number of entries in this segment */
284 u_int count;
285 };
286
287 typedef struct shareable_segment_t shareable_segment_t;
288
289 /**
290 * Struct to manage segments of the "half-open" and "connected peers" hash tables.
291 */
292 struct shareable_segment_t {
293 /** rwlock to access a segment non-/exclusively */
294 rwlock_t *lock;
295
296 /** the number of entries in this segment - in case of the "half-open table"
297 * it's the sum of all half_open_t.count in a segment. */
298 u_int count;
299 };
300
301 typedef struct table_item_t table_item_t;
302
303 /**
304 * Instead of using linked_list_t for each bucket we store the data in our own
305 * list to save memory.
306 */
307 struct table_item_t {
308 /** data of this item */
309 void *value;
310
311 /** next item in the overflow list */
312 table_item_t *next;
313 };
314
315 typedef struct private_ike_sa_manager_t private_ike_sa_manager_t;
316
317 /**
318 * Additional private members of ike_sa_manager_t.
319 */
320 struct private_ike_sa_manager_t {
321 /**
322 * Public interface of ike_sa_manager_t.
323 */
324 ike_sa_manager_t public;
325
326 /**
327 * Hash table with entries for the ike_sa_t objects.
328 */
329 table_item_t **ike_sa_table;
330
331 /**
332 * The size of the hash table.
333 */
334 u_int table_size;
335
336 /**
337 * Mask to map the hashes to table rows.
338 */
339 u_int table_mask;
340
341 /**
342 * Segments of the hash table.
343 */
344 segment_t *segments;
345
346 /**
347 * The number of segments.
348 */
349 u_int segment_count;
350
351 /**
352 * Mask to map a table row to a segment.
353 */
354 u_int segment_mask;
355
356 /**
357 * Hash table with half_open_t objects.
358 */
359 table_item_t **half_open_table;
360
361 /**
362 * Segments of the "half-open" hash table.
363 */
364 shareable_segment_t *half_open_segments;
365
366 /**
367 * Hash table with connected_peers_t objects.
368 */
369 table_item_t **connected_peers_table;
370
371 /**
372 * Segments of the "connected peers" hash table.
373 */
374 shareable_segment_t *connected_peers_segments;
375
376 /**
377 * Hash table with init_hash_t objects.
378 */
379 table_item_t **init_hashes_table;
380
381 /**
382 * Segments of the "hashes" hash table.
383 */
384 segment_t *init_hashes_segments;
385
386 /**
387 * RNG to get random SPIs for our side
388 */
389 rng_t *rng;
390
391 /**
392 * SHA1 hasher for IKE_SA_INIT retransmit detection
393 */
394 hasher_t *hasher;
395
396 /**
397 * reuse existing IKE_SAs in checkout_by_config
398 */
399 bool reuse_ikesa;
400 };
401
402 /**
403 * Acquire a lock to access the segment of the table row with the given index.
404 * It also works with the segment index directly.
405 */
406 static inline void lock_single_segment(private_ike_sa_manager_t *this,
407 u_int index)
408 {
409 mutex_t *lock = this->segments[index & this->segment_mask].mutex;
410 lock->lock(lock);
411 }
412
413 /**
414 * Release the lock required to access the segment of the table row with the given index.
415 * It also works with the segment index directly.
416 */
417 static inline void unlock_single_segment(private_ike_sa_manager_t *this,
418 u_int index)
419 {
420 mutex_t *lock = this->segments[index & this->segment_mask].mutex;
421 lock->unlock(lock);
422 }
423
424 /**
425 * Lock all segments
426 */
427 static void lock_all_segments(private_ike_sa_manager_t *this)
428 {
429 u_int i;
430
431 for (i = 0; i < this->segment_count; i++)
432 {
433 this->segments[i].mutex->lock(this->segments[i].mutex);
434 }
435 }
436
437 /**
438 * Unlock all segments
439 */
440 static void unlock_all_segments(private_ike_sa_manager_t *this)
441 {
442 u_int i;
443
444 for (i = 0; i < this->segment_count; i++)
445 {
446 this->segments[i].mutex->unlock(this->segments[i].mutex);
447 }
448 }
449
450 typedef struct private_enumerator_t private_enumerator_t;
451
452 /**
453 * hash table enumerator implementation
454 */
455 struct private_enumerator_t {
456
457 /**
458 * implements enumerator interface
459 */
460 enumerator_t enumerator;
461
462 /**
463 * associated ike_sa_manager_t
464 */
465 private_ike_sa_manager_t *manager;
466
467 /**
468 * current segment index
469 */
470 u_int segment;
471
472 /**
473 * currently enumerating entry
474 */
475 entry_t *entry;
476
477 /**
478 * current table row index
479 */
480 u_int row;
481
482 /**
483 * current table item
484 */
485 table_item_t *current;
486
487 /**
488 * previous table item
489 */
490 table_item_t *prev;
491 };
492
493 METHOD(enumerator_t, enumerate, bool,
494 private_enumerator_t *this, entry_t **entry, u_int *segment)
495 {
496 if (this->entry)
497 {
498 this->entry->condvar->signal(this->entry->condvar);
499 this->entry = NULL;
500 }
501 while (this->segment < this->manager->segment_count)
502 {
503 while (this->row < this->manager->table_size)
504 {
505 this->prev = this->current;
506 if (this->current)
507 {
508 this->current = this->current->next;
509 }
510 else
511 {
512 lock_single_segment(this->manager, this->segment);
513 this->current = this->manager->ike_sa_table[this->row];
514 }
515 if (this->current)
516 {
517 *entry = this->entry = this->current->value;
518 *segment = this->segment;
519 return TRUE;
520 }
521 unlock_single_segment(this->manager, this->segment);
522 this->row += this->manager->segment_count;
523 }
524 this->segment++;
525 this->row = this->segment;
526 }
527 return FALSE;
528 }
529
530 METHOD(enumerator_t, enumerator_destroy, void,
531 private_enumerator_t *this)
532 {
533 if (this->entry)
534 {
535 this->entry->condvar->signal(this->entry->condvar);
536 }
537 if (this->current)
538 {
539 unlock_single_segment(this->manager, this->segment);
540 }
541 free(this);
542 }
543
544 /**
545 * Creates an enumerator to enumerate the entries in the hash table.
546 */
547 static enumerator_t* create_table_enumerator(private_ike_sa_manager_t *this)
548 {
549 private_enumerator_t *enumerator;
550
551 INIT(enumerator,
552 .enumerator = {
553 .enumerate = (void*)_enumerate,
554 .destroy = _enumerator_destroy,
555 },
556 .manager = this,
557 );
558 return &enumerator->enumerator;
559 }
560
561 /**
562 * Put an entry into the hash table.
563 * Note: The caller has to unlock the returned segment.
564 */
565 static u_int put_entry(private_ike_sa_manager_t *this, entry_t *entry)
566 {
567 table_item_t *current, *item;
568 u_int row, segment;
569
570 INIT(item,
571 .value = entry,
572 );
573
574 row = ike_sa_id_hash(entry->ike_sa_id) & this->table_mask;
575 segment = row & this->segment_mask;
576
577 lock_single_segment(this, segment);
578 current = this->ike_sa_table[row];
579 if (current)
580 { /* insert at the front of current bucket */
581 item->next = current;
582 }
583 this->ike_sa_table[row] = item;
584 this->segments[segment].count++;
585 return segment;
586 }
587
588 /**
589 * Remove an entry from the hash table.
590 * Note: The caller MUST have a lock on the segment of this entry.
591 */
592 static void remove_entry(private_ike_sa_manager_t *this, entry_t *entry)
593 {
594 table_item_t *item, *prev = NULL;
595 u_int row, segment;
596
597 row = ike_sa_id_hash(entry->ike_sa_id) & this->table_mask;
598 segment = row & this->segment_mask;
599 item = this->ike_sa_table[row];
600 while (item)
601 {
602 if (item->value == entry)
603 {
604 if (prev)
605 {
606 prev->next = item->next;
607 }
608 else
609 {
610 this->ike_sa_table[row] = item->next;
611 }
612 this->segments[segment].count--;
613 free(item);
614 break;
615 }
616 prev = item;
617 item = item->next;
618 }
619 }
620
621 /**
622 * Remove the entry at the current enumerator position.
623 */
624 static void remove_entry_at(private_enumerator_t *this)
625 {
626 this->entry = NULL;
627 if (this->current)
628 {
629 table_item_t *current = this->current;
630
631 this->manager->segments[this->segment].count--;
632 this->current = this->prev;
633
634 if (this->prev)
635 {
636 this->prev->next = current->next;
637 }
638 else
639 {
640 this->manager->ike_sa_table[this->row] = current->next;
641 unlock_single_segment(this->manager, this->segment);
642 }
643 free(current);
644 }
645 }
646
647 /**
648 * Find an entry using the provided match function to compare the entries for
649 * equality.
650 */
651 static status_t get_entry_by_match_function(private_ike_sa_manager_t *this,
652 ike_sa_id_t *ike_sa_id, entry_t **entry, u_int *segment,
653 linked_list_match_t match, void *p1, void *p2)
654 {
655 table_item_t *item;
656 u_int row, seg;
657
658 row = ike_sa_id_hash(ike_sa_id) & this->table_mask;
659 seg = row & this->segment_mask;
660
661 lock_single_segment(this, seg);
662 item = this->ike_sa_table[row];
663 while (item)
664 {
665 if (match(item->value, p1, p2))
666 {
667 *entry = item->value;
668 *segment = seg;
669 /* the locked segment has to be unlocked by the caller */
670 return SUCCESS;
671 }
672 item = item->next;
673 }
674 unlock_single_segment(this, seg);
675 return NOT_FOUND;
676 }
677
678 /**
679 * Find an entry by ike_sa_id_t.
680 * Note: On SUCCESS, the caller has to unlock the segment.
681 */
682 static status_t get_entry_by_id(private_ike_sa_manager_t *this,
683 ike_sa_id_t *ike_sa_id, entry_t **entry, u_int *segment)
684 {
685 return get_entry_by_match_function(this, ike_sa_id, entry, segment,
686 (linked_list_match_t)entry_match_by_id, ike_sa_id, NULL);
687 }
688
689 /**
690 * Find an entry by IKE_SA pointer.
691 * Note: On SUCCESS, the caller has to unlock the segment.
692 */
693 static status_t get_entry_by_sa(private_ike_sa_manager_t *this,
694 ike_sa_id_t *ike_sa_id, ike_sa_t *ike_sa, entry_t **entry, u_int *segment)
695 {
696 return get_entry_by_match_function(this, ike_sa_id, entry, segment,
697 (linked_list_match_t)entry_match_by_sa, ike_sa, NULL);
698 }
699
700 /**
701 * Wait until no other thread is using an IKE_SA, return FALSE if entry not
702 * acquirable.
703 */
704 static bool wait_for_entry(private_ike_sa_manager_t *this, entry_t *entry,
705 u_int segment)
706 {
707 if (entry->driveout_new_threads)
708 {
709 /* we are not allowed to get this */
710 return FALSE;
711 }
712 while (entry->checked_out && !entry->driveout_waiting_threads)
713 {
714 /* so wait until we can get it for us.
715 * we register us as waiting. */
716 entry->waiting_threads++;
717 entry->condvar->wait(entry->condvar, this->segments[segment].mutex);
718 entry->waiting_threads--;
719 }
720 /* hm, a deletion request forbids us to get this SA, get next one */
721 if (entry->driveout_waiting_threads)
722 {
723 /* we must signal here, others may be waiting on it, too */
724 entry->condvar->signal(entry->condvar);
725 return FALSE;
726 }
727 return TRUE;
728 }
729
730 /**
731 * Put a half-open SA into the hash table.
732 */
733 static void put_half_open(private_ike_sa_manager_t *this, entry_t *entry)
734 {
735 table_item_t *item;
736 u_int row, segment;
737 rwlock_t *lock;
738 half_open_t *half_open;
739 chunk_t addr;
740
741 addr = entry->other->get_address(entry->other);
742 row = chunk_hash(addr) & this->table_mask;
743 segment = row & this->segment_mask;
744 lock = this->half_open_segments[segment].lock;
745 lock->write_lock(lock);
746 item = this->half_open_table[row];
747 while (item)
748 {
749 half_open = item->value;
750
751 if (chunk_equals(addr, half_open->other))
752 {
753 half_open->count++;
754 break;
755 }
756 item = item->next;
757 }
758
759 if (!item)
760 {
761 INIT(half_open,
762 .other = chunk_clone(addr),
763 .count = 1,
764 );
765 INIT(item,
766 .value = half_open,
767 .next = this->half_open_table[row],
768 );
769 this->half_open_table[row] = item;
770 }
771 this->half_open_segments[segment].count++;
772 lock->unlock(lock);
773 }
774
775 /**
776 * Remove a half-open SA from the hash table.
777 */
778 static void remove_half_open(private_ike_sa_manager_t *this, entry_t *entry)
779 {
780 table_item_t *item, *prev = NULL;
781 u_int row, segment;
782 rwlock_t *lock;
783 chunk_t addr;
784
785 addr = entry->other->get_address(entry->other);
786 row = chunk_hash(addr) & this->table_mask;
787 segment = row & this->segment_mask;
788 lock = this->half_open_segments[segment].lock;
789 lock->write_lock(lock);
790 item = this->half_open_table[row];
791 while (item)
792 {
793 half_open_t *half_open = item->value;
794
795 if (chunk_equals(addr, half_open->other))
796 {
797 if (--half_open->count == 0)
798 {
799 if (prev)
800 {
801 prev->next = item->next;
802 }
803 else
804 {
805 this->half_open_table[row] = item->next;
806 }
807 half_open_destroy(half_open);
808 free(item);
809 }
810 this->half_open_segments[segment].count--;
811 break;
812 }
813 prev = item;
814 item = item->next;
815 }
816 lock->unlock(lock);
817 }
818
819 /**
820 * Put an SA between two peers into the hash table.
821 */
822 static void put_connected_peers(private_ike_sa_manager_t *this, entry_t *entry)
823 {
824 table_item_t *item;
825 u_int row, segment;
826 rwlock_t *lock;
827 connected_peers_t *connected_peers;
828 chunk_t my_id, other_id;
829 int family;
830
831 my_id = entry->my_id->get_encoding(entry->my_id);
832 other_id = entry->other_id->get_encoding(entry->other_id);
833 family = entry->other->get_family(entry->other);
834 row = chunk_hash_inc(other_id, chunk_hash(my_id)) & this->table_mask;
835 segment = row & this->segment_mask;
836 lock = this->connected_peers_segments[segment].lock;
837 lock->write_lock(lock);
838 item = this->connected_peers_table[row];
839 while (item)
840 {
841 connected_peers = item->value;
842
843 if (connected_peers_match(connected_peers, entry->my_id,
844 entry->other_id, family))
845 {
846 if (connected_peers->sas->find_first(connected_peers->sas,
847 (linked_list_match_t)entry->ike_sa_id->equals,
848 NULL, entry->ike_sa_id) == SUCCESS)
849 {
850 lock->unlock(lock);
851 return;
852 }
853 break;
854 }
855 item = item->next;
856 }
857
858 if (!item)
859 {
860 INIT(connected_peers,
861 .my_id = entry->my_id->clone(entry->my_id),
862 .other_id = entry->other_id->clone(entry->other_id),
863 .family = family,
864 .sas = linked_list_create(),
865 );
866 INIT(item,
867 .value = connected_peers,
868 .next = this->connected_peers_table[row],
869 );
870 this->connected_peers_table[row] = item;
871 }
872 connected_peers->sas->insert_last(connected_peers->sas,
873 entry->ike_sa_id->clone(entry->ike_sa_id));
874 this->connected_peers_segments[segment].count++;
875 lock->unlock(lock);
876 }
877
878 /**
879 * Remove an SA between two peers from the hash table.
880 */
881 static void remove_connected_peers(private_ike_sa_manager_t *this, entry_t *entry)
882 {
883 table_item_t *item, *prev = NULL;
884 u_int row, segment;
885 rwlock_t *lock;
886 chunk_t my_id, other_id;
887 int family;
888
889 my_id = entry->my_id->get_encoding(entry->my_id);
890 other_id = entry->other_id->get_encoding(entry->other_id);
891 family = entry->other->get_family(entry->other);
892
893 row = chunk_hash_inc(other_id, chunk_hash(my_id)) & this->table_mask;
894 segment = row & this->segment_mask;
895
896 lock = this->connected_peers_segments[segment].lock;
897 lock->write_lock(lock);
898 item = this->connected_peers_table[row];
899 while (item)
900 {
901 connected_peers_t *current = item->value;
902
903 if (connected_peers_match(current, entry->my_id, entry->other_id,
904 family))
905 {
906 enumerator_t *enumerator;
907 ike_sa_id_t *ike_sa_id;
908
909 enumerator = current->sas->create_enumerator(current->sas);
910 while (enumerator->enumerate(enumerator, &ike_sa_id))
911 {
912 if (ike_sa_id->equals(ike_sa_id, entry->ike_sa_id))
913 {
914 current->sas->remove_at(current->sas, enumerator);
915 ike_sa_id->destroy(ike_sa_id);
916 this->connected_peers_segments[segment].count--;
917 break;
918 }
919 }
920 enumerator->destroy(enumerator);
921 if (current->sas->get_count(current->sas) == 0)
922 {
923 if (prev)
924 {
925 prev->next = item->next;
926 }
927 else
928 {
929 this->connected_peers_table[row] = item->next;
930 }
931 connected_peers_destroy(current);
932 free(item);
933 }
934 break;
935 }
936 prev = item;
937 item = item->next;
938 }
939 lock->unlock(lock);
940 }
941
942 /**
943 * Get a random SPI for new IKE_SAs
944 */
945 static u_int64_t get_spi(private_ike_sa_manager_t *this)
946 {
947 u_int64_t spi = 0;
948
949 if (this->rng)
950 {
951 this->rng->get_bytes(this->rng, sizeof(spi), (u_int8_t*)&spi);
952 }
953 return spi;
954 }
955
956 /**
957 * Check if we already have created an IKE_SA based on the initial IKE message
958 * with the given hash.
959 * If not the hash is stored, the hash data is not(!) cloned.
960 *
961 * Also, the local SPI is returned. In case of a retransmit this is already
962 * stored together with the hash, otherwise it is newly allocated and should
963 * be used to create the IKE_SA.
964 *
965 * @returns TRUE if the message with the given hash was seen before
966 */
967 static bool check_and_put_init_hash(private_ike_sa_manager_t *this,
968 chunk_t init_hash, u_int64_t *our_spi)
969 {
970 table_item_t *item;
971 u_int row, segment;
972 mutex_t *mutex;
973 init_hash_t *init;
974
975 row = chunk_hash(init_hash) & this->table_mask;
976 segment = row & this->segment_mask;
977 mutex = this->init_hashes_segments[segment].mutex;
978 mutex->lock(mutex);
979 item = this->init_hashes_table[row];
980 while (item)
981 {
982 init_hash_t *current = item->value;
983
984 if (chunk_equals(init_hash, current->hash))
985 {
986 *our_spi = current->our_spi;
987 mutex->unlock(mutex);
988 return TRUE;
989 }
990 item = item->next;
991 }
992
993 INIT(init,
994 .hash = {
995 .len = init_hash.len,
996 .ptr = init_hash.ptr,
997 },
998 .our_spi = get_spi(this),
999 );
1000 INIT(item,
1001 .value = init,
1002 .next = this->init_hashes_table[row],
1003 );
1004 this->init_hashes_table[row] = item;
1005 *our_spi = init->our_spi;
1006 mutex->unlock(mutex);
1007 return FALSE;
1008 }
1009
1010 /**
1011 * Remove the hash of an initial IKE message from the cache.
1012 */
1013 static void remove_init_hash(private_ike_sa_manager_t *this, chunk_t init_hash)
1014 {
1015 table_item_t *item, *prev = NULL;
1016 u_int row, segment;
1017 mutex_t *mutex;
1018
1019 row = chunk_hash(init_hash) & this->table_mask;
1020 segment = row & this->segment_mask;
1021 mutex = this->init_hashes_segments[segment].mutex;
1022 mutex->lock(mutex);
1023 item = this->init_hashes_table[row];
1024 while (item)
1025 {
1026 init_hash_t *current = item->value;
1027
1028 if (chunk_equals(init_hash, current->hash))
1029 {
1030 if (prev)
1031 {
1032 prev->next = item->next;
1033 }
1034 else
1035 {
1036 this->init_hashes_table[row] = item->next;
1037 }
1038 free(current);
1039 free(item);
1040 break;
1041 }
1042 prev = item;
1043 item = item->next;
1044 }
1045 mutex->unlock(mutex);
1046 }
1047
1048 METHOD(ike_sa_manager_t, checkout, ike_sa_t*,
1049 private_ike_sa_manager_t *this, ike_sa_id_t *ike_sa_id)
1050 {
1051 ike_sa_t *ike_sa = NULL;
1052 entry_t *entry;
1053 u_int segment;
1054
1055 DBG2(DBG_MGR, "checkout IKE_SA");
1056
1057 if (get_entry_by_id(this, ike_sa_id, &entry, &segment) == SUCCESS)
1058 {
1059 if (wait_for_entry(this, entry, segment))
1060 {
1061 entry->checked_out = TRUE;
1062 ike_sa = entry->ike_sa;
1063 DBG2(DBG_MGR, "IKE_SA %s[%u] successfully checked out",
1064 ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1065 }
1066 unlock_single_segment(this, segment);
1067 }
1068 charon->bus->set_sa(charon->bus, ike_sa);
1069 return ike_sa;
1070 }
1071
1072 METHOD(ike_sa_manager_t, checkout_new, ike_sa_t*,
1073 private_ike_sa_manager_t* this, ike_version_t version, bool initiator)
1074 {
1075 ike_sa_id_t *ike_sa_id;
1076 ike_sa_t *ike_sa;
1077 u_int8_t ike_version;
1078
1079 ike_version = version == IKEV1 ? IKEV1_MAJOR_VERSION : IKEV2_MAJOR_VERSION;
1080
1081 if (initiator)
1082 {
1083 ike_sa_id = ike_sa_id_create(ike_version, get_spi(this), 0, TRUE);
1084 }
1085 else
1086 {
1087 ike_sa_id = ike_sa_id_create(ike_version, 0, get_spi(this), FALSE);
1088 }
1089 ike_sa = ike_sa_create(ike_sa_id, initiator, version);
1090 ike_sa_id->destroy(ike_sa_id);
1091
1092 if (ike_sa)
1093 {
1094 DBG2(DBG_MGR, "created IKE_SA %s[%u]", ike_sa->get_name(ike_sa),
1095 ike_sa->get_unique_id(ike_sa));
1096 }
1097 return ike_sa;
1098 }
1099
1100 METHOD(ike_sa_manager_t, checkout_by_message, ike_sa_t*,
1101 private_ike_sa_manager_t* this, message_t *message)
1102 {
1103 u_int segment;
1104 entry_t *entry;
1105 ike_sa_t *ike_sa = NULL;
1106 ike_sa_id_t *id;
1107 ike_version_t ike_version;
1108 bool is_init = FALSE;
1109
1110 id = message->get_ike_sa_id(message);
1111 /* clone the IKE_SA ID so we can modify the initiator flag */
1112 id = id->clone(id);
1113 id->switch_initiator(id);
1114
1115 DBG2(DBG_MGR, "checkout IKE_SA by message");
1116
1117 if (id->get_responder_spi(id) == 0)
1118 {
1119 if (message->get_major_version(message) == IKEV2_MAJOR_VERSION)
1120 {
1121 if (message->get_exchange_type(message) == IKE_SA_INIT &&
1122 message->get_request(message))
1123 {
1124 ike_version = IKEV2;
1125 is_init = TRUE;
1126 }
1127 }
1128 else
1129 {
1130 if (message->get_exchange_type(message) == ID_PROT ||
1131 message->get_exchange_type(message) == AGGRESSIVE)
1132 {
1133 ike_version = IKEV1;
1134 is_init = TRUE;
1135 if (id->is_initiator(id))
1136 { /* not set in IKEv1, switch back before applying to new SA */
1137 id->switch_initiator(id);
1138 }
1139 }
1140 }
1141 }
1142
1143 if (is_init && this->hasher)
1144 { /* initial request. checking for the hasher prevents crashes once
1145 * flush() has been called */
1146 u_int64_t our_spi;
1147 chunk_t hash;
1148
1149 this->hasher->allocate_hash(this->hasher,
1150 message->get_packet_data(message), &hash);
1151
1152 /* ensure this is not a retransmit of an already handled init message */
1153 if (!check_and_put_init_hash(this, hash, &our_spi))
1154 { /* we've not seen this packet yet, create a new IKE_SA */
1155 id->set_responder_spi(id, our_spi);
1156 ike_sa = ike_sa_create(id, FALSE, ike_version);
1157 if (ike_sa)
1158 {
1159 entry = entry_create();
1160 entry->ike_sa = ike_sa;
1161 entry->ike_sa_id = id->clone(id);
1162
1163 segment = put_entry(this, entry);
1164 entry->checked_out = TRUE;
1165 unlock_single_segment(this, segment);
1166
1167 entry->message_id = message->get_message_id(message);
1168 entry->init_hash = hash;
1169
1170 DBG2(DBG_MGR, "created IKE_SA %s[%u]",
1171 ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1172 }
1173 else
1174 {
1175 remove_init_hash(this, hash);
1176 chunk_free(&hash);
1177 DBG1(DBG_MGR, "ignoring message, no such IKE_SA");
1178 }
1179 id->destroy(id);
1180 charon->bus->set_sa(charon->bus, ike_sa);
1181 return ike_sa;
1182 }
1183 /* it looks like we already handled this init message to some degree */
1184 id->set_responder_spi(id, our_spi);
1185 chunk_free(&hash);
1186 }
1187
1188 if (get_entry_by_id(this, id, &entry, &segment) == SUCCESS)
1189 {
1190 /* only check out in IKEv2 if we are not already processing it */
1191 if (message->get_request(message) &&
1192 message->get_message_id(message) == entry->message_id)
1193 {
1194 DBG1(DBG_MGR, "ignoring request with ID %u, already processing",
1195 entry->message_id);
1196 }
1197 else if (wait_for_entry(this, entry, segment))
1198 {
1199 ike_sa_id_t *ike_id;
1200
1201 ike_id = entry->ike_sa->get_id(entry->ike_sa);
1202 entry->checked_out = TRUE;
1203 entry->message_id = message->get_message_id(message);
1204 if (ike_id->get_responder_spi(ike_id) == 0)
1205 {
1206 ike_id->set_responder_spi(ike_id, id->get_responder_spi(id));
1207 }
1208 ike_sa = entry->ike_sa;
1209 DBG2(DBG_MGR, "IKE_SA %s[%u] successfully checked out",
1210 ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1211 }
1212 unlock_single_segment(this, segment);
1213 }
1214 id->destroy(id);
1215 charon->bus->set_sa(charon->bus, ike_sa);
1216 return ike_sa;
1217 }
1218
1219 METHOD(ike_sa_manager_t, checkout_by_config, ike_sa_t*,
1220 private_ike_sa_manager_t *this, peer_cfg_t *peer_cfg)
1221 {
1222 enumerator_t *enumerator;
1223 entry_t *entry;
1224 ike_sa_t *ike_sa = NULL;
1225 peer_cfg_t *current_peer;
1226 ike_cfg_t *current_ike;
1227 u_int segment;
1228
1229 DBG2(DBG_MGR, "checkout IKE_SA by config");
1230
1231 if (!this->reuse_ikesa)
1232 { /* IKE_SA reuse disable by config */
1233 ike_sa = checkout_new(this, peer_cfg->get_ike_version(peer_cfg), TRUE);
1234 charon->bus->set_sa(charon->bus, ike_sa);
1235 return ike_sa;
1236 }
1237
1238 enumerator = create_table_enumerator(this);
1239 while (enumerator->enumerate(enumerator, &entry, &segment))
1240 {
1241 if (!wait_for_entry(this, entry, segment))
1242 {
1243 continue;
1244 }
1245 if (entry->ike_sa->get_state(entry->ike_sa) == IKE_DELETING)
1246 { /* skip IKE_SAs which are not usable */
1247 continue;
1248 }
1249
1250 current_peer = entry->ike_sa->get_peer_cfg(entry->ike_sa);
1251 if (current_peer && current_peer->equals(current_peer, peer_cfg))
1252 {
1253 current_ike = current_peer->get_ike_cfg(current_peer);
1254 if (current_ike->equals(current_ike, peer_cfg->get_ike_cfg(peer_cfg)))
1255 {
1256 entry->checked_out = TRUE;
1257 ike_sa = entry->ike_sa;
1258 DBG2(DBG_MGR, "found existing IKE_SA %u with a '%s' config",
1259 ike_sa->get_unique_id(ike_sa),
1260 current_peer->get_name(current_peer));
1261 break;
1262 }
1263 }
1264 }
1265 enumerator->destroy(enumerator);
1266
1267 if (!ike_sa)
1268 { /* no IKE_SA using such a config, hand out a new */
1269 ike_sa = checkout_new(this, peer_cfg->get_ike_version(peer_cfg), TRUE);
1270 }
1271 charon->bus->set_sa(charon->bus, ike_sa);
1272 return ike_sa;
1273 }
1274
1275 METHOD(ike_sa_manager_t, checkout_by_id, ike_sa_t*,
1276 private_ike_sa_manager_t *this, u_int32_t id, bool child)
1277 {
1278 enumerator_t *enumerator, *children;
1279 entry_t *entry;
1280 ike_sa_t *ike_sa = NULL;
1281 child_sa_t *child_sa;
1282 u_int segment;
1283
1284 DBG2(DBG_MGR, "checkout IKE_SA by ID");
1285
1286 enumerator = create_table_enumerator(this);
1287 while (enumerator->enumerate(enumerator, &entry, &segment))
1288 {
1289 if (wait_for_entry(this, entry, segment))
1290 {
1291 /* look for a child with such a reqid ... */
1292 if (child)
1293 {
1294 children = entry->ike_sa->create_child_sa_enumerator(entry->ike_sa);
1295 while (children->enumerate(children, (void**)&child_sa))
1296 {
1297 if (child_sa->get_reqid(child_sa) == id)
1298 {
1299 ike_sa = entry->ike_sa;
1300 break;
1301 }
1302 }
1303 children->destroy(children);
1304 }
1305 else /* ... or for a IKE_SA with such a unique id */
1306 {
1307 if (entry->ike_sa->get_unique_id(entry->ike_sa) == id)
1308 {
1309 ike_sa = entry->ike_sa;
1310 }
1311 }
1312 /* got one, return */
1313 if (ike_sa)
1314 {
1315 entry->checked_out = TRUE;
1316 DBG2(DBG_MGR, "IKE_SA %s[%u] successfully checked out",
1317 ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1318 break;
1319 }
1320 }
1321 }
1322 enumerator->destroy(enumerator);
1323
1324 charon->bus->set_sa(charon->bus, ike_sa);
1325 return ike_sa;
1326 }
1327
1328 METHOD(ike_sa_manager_t, checkout_by_name, ike_sa_t*,
1329 private_ike_sa_manager_t *this, char *name, bool child)
1330 {
1331 enumerator_t *enumerator, *children;
1332 entry_t *entry;
1333 ike_sa_t *ike_sa = NULL;
1334 child_sa_t *child_sa;
1335 u_int segment;
1336
1337 enumerator = create_table_enumerator(this);
1338 while (enumerator->enumerate(enumerator, &entry, &segment))
1339 {
1340 if (wait_for_entry(this, entry, segment))
1341 {
1342 /* look for a child with such a policy name ... */
1343 if (child)
1344 {
1345 children = entry->ike_sa->create_child_sa_enumerator(entry->ike_sa);
1346 while (children->enumerate(children, (void**)&child_sa))
1347 {
1348 if (streq(child_sa->get_name(child_sa), name))
1349 {
1350 ike_sa = entry->ike_sa;
1351 break;
1352 }
1353 }
1354 children->destroy(children);
1355 }
1356 else /* ... or for a IKE_SA with such a connection name */
1357 {
1358 if (streq(entry->ike_sa->get_name(entry->ike_sa), name))
1359 {
1360 ike_sa = entry->ike_sa;
1361 }
1362 }
1363 /* got one, return */
1364 if (ike_sa)
1365 {
1366 entry->checked_out = TRUE;
1367 DBG2(DBG_MGR, "IKE_SA %s[%u] successfully checked out",
1368 ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1369 break;
1370 }
1371 }
1372 }
1373 enumerator->destroy(enumerator);
1374
1375 charon->bus->set_sa(charon->bus, ike_sa);
1376 return ike_sa;
1377 }
1378
1379 /**
1380 * enumerator filter function, waiting variant
1381 */
1382 static bool enumerator_filter_wait(private_ike_sa_manager_t *this,
1383 entry_t **in, ike_sa_t **out, u_int *segment)
1384 {
1385 if (wait_for_entry(this, *in, *segment))
1386 {
1387 *out = (*in)->ike_sa;
1388 charon->bus->set_sa(charon->bus, *out);
1389 return TRUE;
1390 }
1391 return FALSE;
1392 }
1393
1394 /**
1395 * enumerator filter function, skipping variant
1396 */
1397 static bool enumerator_filter_skip(private_ike_sa_manager_t *this,
1398 entry_t **in, ike_sa_t **out, u_int *segment)
1399 {
1400 if (!(*in)->driveout_new_threads &&
1401 !(*in)->driveout_waiting_threads &&
1402 !(*in)->checked_out)
1403 {
1404 *out = (*in)->ike_sa;
1405 charon->bus->set_sa(charon->bus, *out);
1406 return TRUE;
1407 }
1408 return FALSE;
1409 }
1410
1411 /**
1412 * Reset threads SA after enumeration
1413 */
1414 static void reset_sa(void *data)
1415 {
1416 charon->bus->set_sa(charon->bus, NULL);
1417 }
1418
1419 METHOD(ike_sa_manager_t, create_enumerator, enumerator_t*,
1420 private_ike_sa_manager_t* this, bool wait)
1421 {
1422 return enumerator_create_filter(create_table_enumerator(this),
1423 wait ? (void*)enumerator_filter_wait : (void*)enumerator_filter_skip,
1424 this, reset_sa);
1425 }
1426
1427 METHOD(ike_sa_manager_t, checkin, void,
1428 private_ike_sa_manager_t *this, ike_sa_t *ike_sa)
1429 {
1430 /* to check the SA back in, we look for the pointer of the ike_sa
1431 * in all entries.
1432 * The lookup is done by initiator SPI, so even if the SPI has changed (e.g.
1433 * on reception of a IKE_SA_INIT response) the lookup will work but
1434 * updating of the SPI MAY be necessary...
1435 */
1436 entry_t *entry;
1437 ike_sa_id_t *ike_sa_id;
1438 host_t *other;
1439 identification_t *my_id, *other_id;
1440 u_int segment;
1441
1442 ike_sa_id = ike_sa->get_id(ike_sa);
1443 my_id = ike_sa->get_my_id(ike_sa);
1444 other_id = ike_sa->get_other_id(ike_sa);
1445 other = ike_sa->get_other_host(ike_sa);
1446
1447 DBG2(DBG_MGR, "checkin IKE_SA %s[%u]", ike_sa->get_name(ike_sa),
1448 ike_sa->get_unique_id(ike_sa));
1449
1450 /* look for the entry */
1451 if (get_entry_by_sa(this, ike_sa_id, ike_sa, &entry, &segment) == SUCCESS)
1452 {
1453 /* ike_sa_id must be updated */
1454 entry->ike_sa_id->replace_values(entry->ike_sa_id, ike_sa->get_id(ike_sa));
1455 /* signal waiting threads */
1456 entry->checked_out = FALSE;
1457 entry->message_id = -1;
1458 /* check if this SA is half-open */
1459 if (entry->half_open && ike_sa->get_state(ike_sa) != IKE_CONNECTING)
1460 {
1461 /* not half open anymore */
1462 entry->half_open = FALSE;
1463 remove_half_open(this, entry);
1464 }
1465 else if (entry->half_open && !other->ip_equals(other, entry->other))
1466 {
1467 /* the other host's IP has changed, we must update the hash table */
1468 remove_half_open(this, entry);
1469 DESTROY_IF(entry->other);
1470 entry->other = other->clone(other);
1471 put_half_open(this, entry);
1472 }
1473 else if (!entry->half_open &&
1474 !entry->ike_sa_id->is_initiator(entry->ike_sa_id) &&
1475 ike_sa->get_state(ike_sa) == IKE_CONNECTING)
1476 {
1477 /* this is a new half-open SA */
1478 entry->half_open = TRUE;
1479 entry->other = other->clone(other);
1480 put_half_open(this, entry);
1481 }
1482 DBG2(DBG_MGR, "check-in of IKE_SA successful.");
1483 entry->condvar->signal(entry->condvar);
1484 }
1485 else
1486 {
1487 entry = entry_create();
1488 entry->ike_sa_id = ike_sa_id->clone(ike_sa_id);
1489 entry->ike_sa = ike_sa;
1490 segment = put_entry(this, entry);
1491 }
1492
1493 /* apply identities for duplicate test */
1494 if ((ike_sa->get_state(ike_sa) == IKE_ESTABLISHED ||
1495 ike_sa->get_state(ike_sa) == IKE_PASSIVE) &&
1496 entry->my_id == NULL && entry->other_id == NULL)
1497 {
1498 if (ike_sa->get_version(ike_sa) == IKEV1)
1499 {
1500 /* If authenticated and received INITIAL_CONTACT,
1501 * delete any existing IKE_SAs with that peer. */
1502 if (ike_sa->has_condition(ike_sa, COND_INIT_CONTACT_SEEN))
1503 {
1504 this->public.check_uniqueness(&this->public, ike_sa, TRUE);
1505 ike_sa->set_condition(ike_sa, COND_INIT_CONTACT_SEEN, FALSE);
1506 }
1507 }
1508
1509 entry->my_id = my_id->clone(my_id);
1510 entry->other_id = other_id->clone(other_id);
1511 if (!entry->other)
1512 {
1513 entry->other = other->clone(other);
1514 }
1515 put_connected_peers(this, entry);
1516 }
1517
1518 unlock_single_segment(this, segment);
1519
1520 charon->bus->set_sa(charon->bus, NULL);
1521 }
1522
1523 METHOD(ike_sa_manager_t, checkin_and_destroy, void,
1524 private_ike_sa_manager_t *this, ike_sa_t *ike_sa)
1525 {
1526 /* deletion is a bit complex, we must ensure that no thread is waiting for
1527 * this SA.
1528 * We take this SA from the table, and start signaling while threads
1529 * are in the condvar.
1530 */
1531 entry_t *entry;
1532 ike_sa_id_t *ike_sa_id;
1533 u_int segment;
1534
1535 ike_sa_id = ike_sa->get_id(ike_sa);
1536
1537 DBG2(DBG_MGR, "checkin and destroy IKE_SA %s[%u]", ike_sa->get_name(ike_sa),
1538 ike_sa->get_unique_id(ike_sa));
1539
1540 if (get_entry_by_sa(this, ike_sa_id, ike_sa, &entry, &segment) == SUCCESS)
1541 {
1542 if (entry->driveout_waiting_threads && entry->driveout_new_threads)
1543 { /* it looks like flush() has been called and the SA is being deleted
1544 * anyway, just check it in */
1545 DBG2(DBG_MGR, "ignored check-in and destroy of IKE_SA during shutdown");
1546 entry->checked_out = FALSE;
1547 entry->condvar->broadcast(entry->condvar);
1548 unlock_single_segment(this, segment);
1549 return;
1550 }
1551
1552 /* drive out waiting threads, as we are in hurry */
1553 entry->driveout_waiting_threads = TRUE;
1554 /* mark it, so no new threads can get this entry */
1555 entry->driveout_new_threads = TRUE;
1556 /* wait until all workers have done their work */
1557 while (entry->waiting_threads)
1558 {
1559 /* wake up all */
1560 entry->condvar->broadcast(entry->condvar);
1561 /* they will wake us again when their work is done */
1562 entry->condvar->wait(entry->condvar, this->segments[segment].mutex);
1563 }
1564 remove_entry(this, entry);
1565 unlock_single_segment(this, segment);
1566
1567 if (entry->half_open)
1568 {
1569 remove_half_open(this, entry);
1570 }
1571 if (entry->my_id && entry->other_id)
1572 {
1573 remove_connected_peers(this, entry);
1574 }
1575 if (entry->init_hash.ptr)
1576 {
1577 remove_init_hash(this, entry->init_hash);
1578 }
1579
1580 entry_destroy(entry);
1581
1582 DBG2(DBG_MGR, "check-in and destroy of IKE_SA successful");
1583 }
1584 else
1585 {
1586 DBG1(DBG_MGR, "tried to check-in and delete nonexisting IKE_SA");
1587 ike_sa->destroy(ike_sa);
1588 }
1589 charon->bus->set_sa(charon->bus, NULL);
1590 }
1591
1592 /**
1593 * Cleanup function for create_id_enumerator
1594 */
1595 static void id_enumerator_cleanup(linked_list_t *ids)
1596 {
1597 ids->destroy_offset(ids, offsetof(ike_sa_id_t, destroy));
1598 }
1599
1600 METHOD(ike_sa_manager_t, create_id_enumerator, enumerator_t*,
1601 private_ike_sa_manager_t *this, identification_t *me,
1602 identification_t *other, int family)
1603 {
1604 table_item_t *item;
1605 u_int row, segment;
1606 rwlock_t *lock;
1607 linked_list_t *ids = NULL;
1608
1609 row = chunk_hash_inc(other->get_encoding(other),
1610 chunk_hash(me->get_encoding(me))) & this->table_mask;
1611 segment = row & this->segment_mask;
1612
1613 lock = this->connected_peers_segments[segment].lock;
1614 lock->read_lock(lock);
1615 item = this->connected_peers_table[row];
1616 while (item)
1617 {
1618 connected_peers_t *current = item->value;
1619
1620 if (connected_peers_match(current, me, other, family))
1621 {
1622 ids = current->sas->clone_offset(current->sas,
1623 offsetof(ike_sa_id_t, clone));
1624 break;
1625 }
1626 item = item->next;
1627 }
1628 lock->unlock(lock);
1629
1630 if (!ids)
1631 {
1632 return enumerator_create_empty();
1633 }
1634 return enumerator_create_cleaner(ids->create_enumerator(ids),
1635 (void*)id_enumerator_cleanup, ids);
1636 }
1637
1638 METHOD(ike_sa_manager_t, check_uniqueness, bool,
1639 private_ike_sa_manager_t *this, ike_sa_t *ike_sa, bool force_replace)
1640 {
1641 bool cancel = FALSE;
1642 peer_cfg_t *peer_cfg;
1643 unique_policy_t policy;
1644 enumerator_t *enumerator;
1645 ike_sa_id_t *id = NULL;
1646 identification_t *me, *other;
1647 host_t *other_host;
1648
1649 peer_cfg = ike_sa->get_peer_cfg(ike_sa);
1650 policy = peer_cfg->get_unique_policy(peer_cfg);
1651 if (policy == UNIQUE_NO && !force_replace)
1652 {
1653 return FALSE;
1654 }
1655 me = ike_sa->get_my_id(ike_sa);
1656 other = ike_sa->get_other_id(ike_sa);
1657 other_host = ike_sa->get_other_host(ike_sa);
1658
1659 enumerator = create_id_enumerator(this, me, other,
1660 other_host->get_family(other_host));
1661 while (enumerator->enumerate(enumerator, &id))
1662 {
1663 status_t status = SUCCESS;
1664 ike_sa_t *duplicate;
1665
1666 duplicate = checkout(this, id);
1667 if (!duplicate)
1668 {
1669 continue;
1670 }
1671 if (force_replace)
1672 {
1673 DBG1(DBG_IKE, "destroying duplicate IKE_SA for peer '%Y', "
1674 "received INITIAL_CONTACT", other);
1675 checkin_and_destroy(this, duplicate);
1676 continue;
1677 }
1678 peer_cfg = duplicate->get_peer_cfg(duplicate);
1679 if (peer_cfg && peer_cfg->equals(peer_cfg, ike_sa->get_peer_cfg(ike_sa)))
1680 {
1681 switch (duplicate->get_state(duplicate))
1682 {
1683 case IKE_ESTABLISHED:
1684 case IKE_REKEYING:
1685 switch (policy)
1686 {
1687 case UNIQUE_REPLACE:
1688 DBG1(DBG_IKE, "deleting duplicate IKE_SA for peer "
1689 "'%Y' due to uniqueness policy", other);
1690 status = duplicate->delete(duplicate);
1691 break;
1692 case UNIQUE_KEEP:
1693 cancel = TRUE;
1694 /* we keep the first IKE_SA and delete all
1695 * other duplicates that might exist */
1696 policy = UNIQUE_REPLACE;
1697 break;
1698 default:
1699 break;
1700 }
1701 break;
1702 default:
1703 break;
1704 }
1705 }
1706 if (status == DESTROY_ME)
1707 {
1708 checkin_and_destroy(this, duplicate);
1709 }
1710 else
1711 {
1712 checkin(this, duplicate);
1713 }
1714 }
1715 enumerator->destroy(enumerator);
1716 /* reset thread's current IKE_SA after checkin */
1717 charon->bus->set_sa(charon->bus, ike_sa);
1718 return cancel;
1719 }
1720
1721 METHOD(ike_sa_manager_t, has_contact, bool,
1722 private_ike_sa_manager_t *this, identification_t *me,
1723 identification_t *other, int family)
1724 {
1725 table_item_t *item;
1726 u_int row, segment;
1727 rwlock_t *lock;
1728 bool found = FALSE;
1729
1730 row = chunk_hash_inc(other->get_encoding(other),
1731 chunk_hash(me->get_encoding(me))) & this->table_mask;
1732 segment = row & this->segment_mask;
1733 lock = this->connected_peers_segments[segment].lock;
1734 lock->read_lock(lock);
1735 item = this->connected_peers_table[row];
1736 while (item)
1737 {
1738 if (connected_peers_match(item->value, me, other, family))
1739 {
1740 found = TRUE;
1741 break;
1742 }
1743 item = item->next;
1744 }
1745 lock->unlock(lock);
1746
1747 return found;
1748 }
1749
1750 METHOD(ike_sa_manager_t, get_count, u_int,
1751 private_ike_sa_manager_t *this)
1752 {
1753 u_int segment, count = 0;
1754 mutex_t *mutex;
1755
1756 for (segment = 0; segment < this->segment_count; segment++)
1757 {
1758 mutex = this->segments[segment & this->segment_mask].mutex;
1759 mutex->lock(mutex);
1760 count += this->segments[segment].count;
1761 mutex->unlock(mutex);
1762 }
1763 return count;
1764 }
1765
1766 METHOD(ike_sa_manager_t, get_half_open_count, u_int,
1767 private_ike_sa_manager_t *this, host_t *ip)
1768 {
1769 table_item_t *item;
1770 u_int row, segment;
1771 rwlock_t *lock;
1772 chunk_t addr;
1773 u_int count = 0;
1774
1775 if (ip)
1776 {
1777 addr = ip->get_address(ip);
1778 row = chunk_hash(addr) & this->table_mask;
1779 segment = row & this->segment_mask;
1780 lock = this->half_open_segments[segment].lock;
1781 lock->read_lock(lock);
1782 item = this->half_open_table[row];
1783 while (item)
1784 {
1785 half_open_t *half_open = item->value;
1786
1787 if (chunk_equals(addr, half_open->other))
1788 {
1789 count = half_open->count;
1790 break;
1791 }
1792 }
1793 lock->unlock(lock);
1794 }
1795 else
1796 {
1797 for (segment = 0; segment < this->segment_count; segment++)
1798 {
1799 lock = this->half_open_segments[segment].lock;
1800 lock->read_lock(lock);
1801 count += this->half_open_segments[segment].count;
1802 lock->unlock(lock);
1803 }
1804 }
1805 return count;
1806 }
1807
1808 METHOD(ike_sa_manager_t, flush, void,
1809 private_ike_sa_manager_t *this)
1810 {
1811 /* destroy all list entries */
1812 enumerator_t *enumerator;
1813 entry_t *entry;
1814 u_int segment;
1815
1816 lock_all_segments(this);
1817 DBG2(DBG_MGR, "going to destroy IKE_SA manager and all managed IKE_SA's");
1818 /* Step 1: drive out all waiting threads */
1819 DBG2(DBG_MGR, "set driveout flags for all stored IKE_SA's");
1820 enumerator = create_table_enumerator(this);
1821 while (enumerator->enumerate(enumerator, &entry, &segment))
1822 {
1823 /* do not accept new threads, drive out waiting threads */
1824 entry->driveout_new_threads = TRUE;
1825 entry->driveout_waiting_threads = TRUE;
1826 }
1827 enumerator->destroy(enumerator);
1828 DBG2(DBG_MGR, "wait for all threads to leave IKE_SA's");
1829 /* Step 2: wait until all are gone */
1830 enumerator = create_table_enumerator(this);
1831 while (enumerator->enumerate(enumerator, &entry, &segment))
1832 {
1833 while (entry->waiting_threads || entry->checked_out)
1834 {
1835 /* wake up all */
1836 entry->condvar->broadcast(entry->condvar);
1837 /* go sleeping until they are gone */
1838 entry->condvar->wait(entry->condvar, this->segments[segment].mutex);
1839 }
1840 }
1841 enumerator->destroy(enumerator);
1842 DBG2(DBG_MGR, "delete all IKE_SA's");
1843 /* Step 3: initiate deletion of all IKE_SAs */
1844 enumerator = create_table_enumerator(this);
1845 while (enumerator->enumerate(enumerator, &entry, &segment))
1846 {
1847 charon->bus->set_sa(charon->bus, entry->ike_sa);
1848 if (entry->ike_sa->get_version(entry->ike_sa) == IKEV2)
1849 { /* as the delete never gets processed, fire down events */
1850 switch (entry->ike_sa->get_state(entry->ike_sa))
1851 {
1852 case IKE_ESTABLISHED:
1853 case IKE_REKEYING:
1854 case IKE_DELETING:
1855 charon->bus->ike_updown(charon->bus, entry->ike_sa, FALSE);
1856 break;
1857 default:
1858 break;
1859 }
1860 }
1861 entry->ike_sa->delete(entry->ike_sa);
1862 }
1863 enumerator->destroy(enumerator);
1864
1865 DBG2(DBG_MGR, "destroy all entries");
1866 /* Step 4: destroy all entries */
1867 enumerator = create_table_enumerator(this);
1868 while (enumerator->enumerate(enumerator, &entry, &segment))
1869 {
1870 charon->bus->set_sa(charon->bus, entry->ike_sa);
1871 if (entry->half_open)
1872 {
1873 remove_half_open(this, entry);
1874 }
1875 if (entry->my_id && entry->other_id)
1876 {
1877 remove_connected_peers(this, entry);
1878 }
1879 if (entry->init_hash.ptr)
1880 {
1881 remove_init_hash(this, entry->init_hash);
1882 }
1883 remove_entry_at((private_enumerator_t*)enumerator);
1884 entry_destroy(entry);
1885 }
1886 enumerator->destroy(enumerator);
1887 charon->bus->set_sa(charon->bus, NULL);
1888 unlock_all_segments(this);
1889
1890 this->rng->destroy(this->rng);
1891 this->rng = NULL;
1892 this->hasher->destroy(this->hasher);
1893 this->hasher = NULL;
1894 }
1895
1896 METHOD(ike_sa_manager_t, destroy, void,
1897 private_ike_sa_manager_t *this)
1898 {
1899 u_int i;
1900
1901 /* these are already cleared in flush() above */
1902 free(this->ike_sa_table);
1903 free(this->half_open_table);
1904 free(this->connected_peers_table);
1905 free(this->init_hashes_table);
1906 for (i = 0; i < this->segment_count; i++)
1907 {
1908 this->segments[i].mutex->destroy(this->segments[i].mutex);
1909 this->half_open_segments[i].lock->destroy(this->half_open_segments[i].lock);
1910 this->connected_peers_segments[i].lock->destroy(this->connected_peers_segments[i].lock);
1911 this->init_hashes_segments[i].mutex->destroy(this->init_hashes_segments[i].mutex);
1912 }
1913 free(this->segments);
1914 free(this->half_open_segments);
1915 free(this->connected_peers_segments);
1916 free(this->init_hashes_segments);
1917
1918 free(this);
1919 }
1920
1921 /**
1922 * This function returns the next-highest power of two for the given number.
1923 * The algorithm works by setting all bits on the right-hand side of the most
1924 * significant 1 to 1 and then increments the whole number so it rolls over
1925 * to the nearest power of two. Note: returns 0 for n == 0
1926 */
1927 static u_int get_nearest_powerof2(u_int n)
1928 {
1929 u_int i;
1930
1931 --n;
1932 for (i = 1; i < sizeof(u_int) * 8; i <<= 1)
1933 {
1934 n |= n >> i;
1935 }
1936 return ++n;
1937 }
1938
1939 /*
1940 * Described in header.
1941 */
1942 ike_sa_manager_t *ike_sa_manager_create()
1943 {
1944 private_ike_sa_manager_t *this;
1945 u_int i;
1946
1947 INIT(this,
1948 .public = {
1949 .checkout = _checkout,
1950 .checkout_new = _checkout_new,
1951 .checkout_by_message = _checkout_by_message,
1952 .checkout_by_config = _checkout_by_config,
1953 .checkout_by_id = _checkout_by_id,
1954 .checkout_by_name = _checkout_by_name,
1955 .check_uniqueness = _check_uniqueness,
1956 .has_contact = _has_contact,
1957 .create_enumerator = _create_enumerator,
1958 .create_id_enumerator = _create_id_enumerator,
1959 .checkin = _checkin,
1960 .checkin_and_destroy = _checkin_and_destroy,
1961 .get_count = _get_count,
1962 .get_half_open_count = _get_half_open_count,
1963 .flush = _flush,
1964 .destroy = _destroy,
1965 },
1966 );
1967
1968 this->hasher = lib->crypto->create_hasher(lib->crypto, HASH_PREFERRED);
1969 if (this->hasher == NULL)
1970 {
1971 DBG1(DBG_MGR, "manager initialization failed, no hasher supported");
1972 free(this);
1973 return NULL;
1974 }
1975 this->rng = lib->crypto->create_rng(lib->crypto, RNG_WEAK);
1976 if (this->rng == NULL)
1977 {
1978 DBG1(DBG_MGR, "manager initialization failed, no RNG supported");
1979 this->hasher->destroy(this->hasher);
1980 free(this);
1981 return NULL;
1982 }
1983
1984 this->table_size = get_nearest_powerof2(lib->settings->get_int(lib->settings,
1985 "charon.ikesa_table_size", DEFAULT_HASHTABLE_SIZE));
1986 this->table_size = max(1, min(this->table_size, MAX_HASHTABLE_SIZE));
1987 this->table_mask = this->table_size - 1;
1988
1989 this->segment_count = get_nearest_powerof2(lib->settings->get_int(lib->settings,
1990 "charon.ikesa_table_segments", DEFAULT_SEGMENT_COUNT));
1991 this->segment_count = max(1, min(this->segment_count, this->table_size));
1992 this->segment_mask = this->segment_count - 1;
1993
1994 this->ike_sa_table = calloc(this->table_size, sizeof(table_item_t*));
1995 this->segments = (segment_t*)calloc(this->segment_count, sizeof(segment_t));
1996 for (i = 0; i < this->segment_count; i++)
1997 {
1998 this->segments[i].mutex = mutex_create(MUTEX_TYPE_RECURSIVE);
1999 this->segments[i].count = 0;
2000 }
2001
2002 /* we use the same table parameters for the table to track half-open SAs */
2003 this->half_open_table = calloc(this->table_size, sizeof(table_item_t*));
2004 this->half_open_segments = calloc(this->segment_count, sizeof(shareable_segment_t));
2005 for (i = 0; i < this->segment_count; i++)
2006 {
2007 this->half_open_segments[i].lock = rwlock_create(RWLOCK_TYPE_DEFAULT);
2008 this->half_open_segments[i].count = 0;
2009 }
2010
2011 /* also for the hash table used for duplicate tests */
2012 this->connected_peers_table = calloc(this->table_size, sizeof(table_item_t*));
2013 this->connected_peers_segments = calloc(this->segment_count, sizeof(shareable_segment_t));
2014 for (i = 0; i < this->segment_count; i++)
2015 {
2016 this->connected_peers_segments[i].lock = rwlock_create(RWLOCK_TYPE_DEFAULT);
2017 this->connected_peers_segments[i].count = 0;
2018 }
2019
2020 /* and again for the table of hashes of seen initial IKE messages */
2021 this->init_hashes_table = calloc(this->table_size, sizeof(table_item_t*));
2022 this->init_hashes_segments = calloc(this->segment_count, sizeof(segment_t));
2023 for (i = 0; i < this->segment_count; i++)
2024 {
2025 this->init_hashes_segments[i].mutex = mutex_create(MUTEX_TYPE_RECURSIVE);
2026 this->init_hashes_segments[i].count = 0;
2027 }
2028
2029 this->reuse_ikesa = lib->settings->get_bool(lib->settings,
2030 "charon.reuse_ikesa", TRUE);
2031 return &this->public;
2032 }