ike-sa-manager: Safely access the RNG instance with an rwlock
[strongswan.git] / src / libcharon / sa / ike_sa_manager.c
1 /*
2 * Copyright (C) 2005-2011 Martin Willi
3 * Copyright (C) 2011 revosec AG
4 * Copyright (C) 2008-2015 Tobias Brunner
5 * Copyright (C) 2005 Jan Hutter
6 * Hochschule fuer Technik Rapperswil
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version. See <http://www.fsf.org/copyleft/gpl.txt>.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 * for more details.
17 */
18
19 #include <string.h>
20
21 #include "ike_sa_manager.h"
22
23 #include <daemon.h>
24 #include <sa/ike_sa_id.h>
25 #include <bus/bus.h>
26 #include <threading/condvar.h>
27 #include <threading/mutex.h>
28 #include <threading/rwlock.h>
29 #include <collections/linked_list.h>
30 #include <crypto/hashers/hasher.h>
31 #include <processing/jobs/delete_ike_sa_job.h>
32
33 /* the default size of the hash table (MUST be a power of 2) */
34 #define DEFAULT_HASHTABLE_SIZE 1
35
36 /* the maximum size of the hash table (MUST be a power of 2) */
37 #define MAX_HASHTABLE_SIZE (1 << 30)
38
39 /* the default number of segments (MUST be a power of 2) */
40 #define DEFAULT_SEGMENT_COUNT 1
41
42 typedef struct entry_t entry_t;
43
44 /**
45 * An entry in the linked list, contains IKE_SA, locking and lookup data.
46 */
47 struct entry_t {
48
49 /**
50 * Number of threads waiting for this ike_sa_t object.
51 */
52 int waiting_threads;
53
54 /**
55 * Condvar where threads can wait until ike_sa_t object is free for use again.
56 */
57 condvar_t *condvar;
58
59 /**
60 * Is this ike_sa currently checked out?
61 */
62 bool checked_out;
63
64 /**
65 * Does this SA drives out new threads?
66 */
67 bool driveout_new_threads;
68
69 /**
70 * Does this SA drives out waiting threads?
71 */
72 bool driveout_waiting_threads;
73
74 /**
75 * Identification of an IKE_SA (SPIs).
76 */
77 ike_sa_id_t *ike_sa_id;
78
79 /**
80 * The contained ike_sa_t object.
81 */
82 ike_sa_t *ike_sa;
83
84 /**
85 * hash of the IKE_SA_INIT message, used to detect retransmissions
86 */
87 chunk_t init_hash;
88
89 /**
90 * remote host address, required for DoS detection and duplicate
91 * checking (host with same my_id and other_id is *not* considered
92 * a duplicate if the address family differs)
93 */
94 host_t *other;
95
96 /**
97 * As responder: Is this SA half-open?
98 */
99 bool half_open;
100
101 /**
102 * own identity, required for duplicate checking
103 */
104 identification_t *my_id;
105
106 /**
107 * remote identity, required for duplicate checking
108 */
109 identification_t *other_id;
110
111 /**
112 * message ID or hash of currently processing message, -1 if none
113 */
114 u_int32_t processing;
115 };
116
117 /**
118 * Implementation of entry_t.destroy.
119 */
120 static status_t entry_destroy(entry_t *this)
121 {
122 /* also destroy IKE SA */
123 this->ike_sa->destroy(this->ike_sa);
124 this->ike_sa_id->destroy(this->ike_sa_id);
125 chunk_free(&this->init_hash);
126 DESTROY_IF(this->other);
127 DESTROY_IF(this->my_id);
128 DESTROY_IF(this->other_id);
129 this->condvar->destroy(this->condvar);
130 free(this);
131 return SUCCESS;
132 }
133
134 /**
135 * Creates a new entry for the ike_sa_t list.
136 */
137 static entry_t *entry_create()
138 {
139 entry_t *this;
140
141 INIT(this,
142 .condvar = condvar_create(CONDVAR_TYPE_DEFAULT),
143 .processing = -1,
144 );
145
146 return this;
147 }
148
149 /**
150 * Function that matches entry_t objects by ike_sa_id_t.
151 */
152 static bool entry_match_by_id(entry_t *entry, ike_sa_id_t *id)
153 {
154 if (id->equals(id, entry->ike_sa_id))
155 {
156 return TRUE;
157 }
158 if ((id->get_responder_spi(id) == 0 ||
159 entry->ike_sa_id->get_responder_spi(entry->ike_sa_id) == 0) &&
160 id->get_initiator_spi(id) == entry->ike_sa_id->get_initiator_spi(entry->ike_sa_id))
161 {
162 /* this is TRUE for IKE_SAs that we initiated but have not yet received a response */
163 return TRUE;
164 }
165 return FALSE;
166 }
167
168 /**
169 * Function that matches entry_t objects by ike_sa_t pointers.
170 */
171 static bool entry_match_by_sa(entry_t *entry, ike_sa_t *ike_sa)
172 {
173 return entry->ike_sa == ike_sa;
174 }
175
176 /**
177 * Hash function for ike_sa_id_t objects.
178 */
179 static u_int ike_sa_id_hash(ike_sa_id_t *ike_sa_id)
180 {
181 /* IKEv2 does not mandate random SPIs (RFC 5996, 2.6), they just have to be
182 * locally unique, so we use our randomly allocated SPI whether we are
183 * initiator or responder to ensure a good distribution. The latter is not
184 * possible for IKEv1 as we don't know whether we are original initiator or
185 * not (based on the IKE header). But as RFC 2408, section 2.5.3 proposes
186 * SPIs (Cookies) to be allocated near random (we allocate them randomly
187 * anyway) it seems safe to always use the initiator SPI. */
188 if (ike_sa_id->get_ike_version(ike_sa_id) == IKEV1_MAJOR_VERSION ||
189 ike_sa_id->is_initiator(ike_sa_id))
190 {
191 return ike_sa_id->get_initiator_spi(ike_sa_id);
192 }
193 return ike_sa_id->get_responder_spi(ike_sa_id);
194 }
195
196 typedef struct half_open_t half_open_t;
197
198 /**
199 * Struct to manage half-open IKE_SAs per peer.
200 */
201 struct half_open_t {
202 /** chunk of remote host address */
203 chunk_t other;
204
205 /** the number of half-open IKE_SAs with that host */
206 u_int count;
207 };
208
209 /**
210 * Destroys a half_open_t object.
211 */
212 static void half_open_destroy(half_open_t *this)
213 {
214 chunk_free(&this->other);
215 free(this);
216 }
217
218 typedef struct connected_peers_t connected_peers_t;
219
220 struct connected_peers_t {
221 /** own identity */
222 identification_t *my_id;
223
224 /** remote identity */
225 identification_t *other_id;
226
227 /** ip address family of peer */
228 int family;
229
230 /** list of ike_sa_id_t objects of IKE_SAs between the two identities */
231 linked_list_t *sas;
232 };
233
234 static void connected_peers_destroy(connected_peers_t *this)
235 {
236 this->my_id->destroy(this->my_id);
237 this->other_id->destroy(this->other_id);
238 this->sas->destroy(this->sas);
239 free(this);
240 }
241
242 /**
243 * Function that matches connected_peers_t objects by the given ids.
244 */
245 static inline bool connected_peers_match(connected_peers_t *connected_peers,
246 identification_t *my_id, identification_t *other_id,
247 int family)
248 {
249 return my_id->equals(my_id, connected_peers->my_id) &&
250 other_id->equals(other_id, connected_peers->other_id) &&
251 (!family || family == connected_peers->family);
252 }
253
254 typedef struct init_hash_t init_hash_t;
255
256 struct init_hash_t {
257 /** hash of IKE_SA_INIT or initial phase1 message (data is not cloned) */
258 chunk_t hash;
259
260 /** our SPI allocated for the IKE_SA based on this message */
261 u_int64_t our_spi;
262 };
263
264 typedef struct segment_t segment_t;
265
266 /**
267 * Struct to manage segments of the hash table.
268 */
269 struct segment_t {
270 /** mutex to access a segment exclusively */
271 mutex_t *mutex;
272
273 /** the number of entries in this segment */
274 u_int count;
275 };
276
277 typedef struct shareable_segment_t shareable_segment_t;
278
279 /**
280 * Struct to manage segments of the "half-open" and "connected peers" hash tables.
281 */
282 struct shareable_segment_t {
283 /** rwlock to access a segment non-/exclusively */
284 rwlock_t *lock;
285
286 /** the number of entries in this segment - in case of the "half-open table"
287 * it's the sum of all half_open_t.count in a segment. */
288 u_int count;
289 };
290
291 typedef struct table_item_t table_item_t;
292
293 /**
294 * Instead of using linked_list_t for each bucket we store the data in our own
295 * list to save memory.
296 */
297 struct table_item_t {
298 /** data of this item */
299 void *value;
300
301 /** next item in the overflow list */
302 table_item_t *next;
303 };
304
305 typedef struct private_ike_sa_manager_t private_ike_sa_manager_t;
306
307 /**
308 * Additional private members of ike_sa_manager_t.
309 */
310 struct private_ike_sa_manager_t {
311 /**
312 * Public interface of ike_sa_manager_t.
313 */
314 ike_sa_manager_t public;
315
316 /**
317 * Hash table with entries for the ike_sa_t objects.
318 */
319 table_item_t **ike_sa_table;
320
321 /**
322 * The size of the hash table.
323 */
324 u_int table_size;
325
326 /**
327 * Mask to map the hashes to table rows.
328 */
329 u_int table_mask;
330
331 /**
332 * Segments of the hash table.
333 */
334 segment_t *segments;
335
336 /**
337 * The number of segments.
338 */
339 u_int segment_count;
340
341 /**
342 * Mask to map a table row to a segment.
343 */
344 u_int segment_mask;
345
346 /**
347 * Hash table with half_open_t objects.
348 */
349 table_item_t **half_open_table;
350
351 /**
352 * Segments of the "half-open" hash table.
353 */
354 shareable_segment_t *half_open_segments;
355
356 /**
357 * Total number of half-open IKE_SAs.
358 */
359 refcount_t half_open_count;
360
361 /**
362 * Hash table with connected_peers_t objects.
363 */
364 table_item_t **connected_peers_table;
365
366 /**
367 * Segments of the "connected peers" hash table.
368 */
369 shareable_segment_t *connected_peers_segments;
370
371 /**
372 * Hash table with init_hash_t objects.
373 */
374 table_item_t **init_hashes_table;
375
376 /**
377 * Segments of the "hashes" hash table.
378 */
379 segment_t *init_hashes_segments;
380
381 /**
382 * RNG to get random SPIs for our side
383 */
384 rng_t *rng;
385
386 /**
387 * Lock to access the RNG instance
388 */
389 rwlock_t *rng_lock;
390
391 /**
392 * reuse existing IKE_SAs in checkout_by_config
393 */
394 bool reuse_ikesa;
395
396 /**
397 * Configured IKE_SA limit, if any
398 */
399 u_int ikesa_limit;
400 };
401
402 /**
403 * Acquire a lock to access the segment of the table row with the given index.
404 * It also works with the segment index directly.
405 */
406 static inline void lock_single_segment(private_ike_sa_manager_t *this,
407 u_int index)
408 {
409 mutex_t *lock = this->segments[index & this->segment_mask].mutex;
410 lock->lock(lock);
411 }
412
413 /**
414 * Release the lock required to access the segment of the table row with the given index.
415 * It also works with the segment index directly.
416 */
417 static inline void unlock_single_segment(private_ike_sa_manager_t *this,
418 u_int index)
419 {
420 mutex_t *lock = this->segments[index & this->segment_mask].mutex;
421 lock->unlock(lock);
422 }
423
424 /**
425 * Lock all segments
426 */
427 static void lock_all_segments(private_ike_sa_manager_t *this)
428 {
429 u_int i;
430
431 for (i = 0; i < this->segment_count; i++)
432 {
433 this->segments[i].mutex->lock(this->segments[i].mutex);
434 }
435 }
436
437 /**
438 * Unlock all segments
439 */
440 static void unlock_all_segments(private_ike_sa_manager_t *this)
441 {
442 u_int i;
443
444 for (i = 0; i < this->segment_count; i++)
445 {
446 this->segments[i].mutex->unlock(this->segments[i].mutex);
447 }
448 }
449
450 typedef struct private_enumerator_t private_enumerator_t;
451
452 /**
453 * hash table enumerator implementation
454 */
455 struct private_enumerator_t {
456
457 /**
458 * implements enumerator interface
459 */
460 enumerator_t enumerator;
461
462 /**
463 * associated ike_sa_manager_t
464 */
465 private_ike_sa_manager_t *manager;
466
467 /**
468 * current segment index
469 */
470 u_int segment;
471
472 /**
473 * currently enumerating entry
474 */
475 entry_t *entry;
476
477 /**
478 * current table row index
479 */
480 u_int row;
481
482 /**
483 * current table item
484 */
485 table_item_t *current;
486
487 /**
488 * previous table item
489 */
490 table_item_t *prev;
491 };
492
493 METHOD(enumerator_t, enumerate, bool,
494 private_enumerator_t *this, entry_t **entry, u_int *segment)
495 {
496 if (this->entry)
497 {
498 this->entry->condvar->signal(this->entry->condvar);
499 this->entry = NULL;
500 }
501 while (this->segment < this->manager->segment_count)
502 {
503 while (this->row < this->manager->table_size)
504 {
505 this->prev = this->current;
506 if (this->current)
507 {
508 this->current = this->current->next;
509 }
510 else
511 {
512 lock_single_segment(this->manager, this->segment);
513 this->current = this->manager->ike_sa_table[this->row];
514 }
515 if (this->current)
516 {
517 *entry = this->entry = this->current->value;
518 *segment = this->segment;
519 return TRUE;
520 }
521 unlock_single_segment(this->manager, this->segment);
522 this->row += this->manager->segment_count;
523 }
524 this->segment++;
525 this->row = this->segment;
526 }
527 return FALSE;
528 }
529
530 METHOD(enumerator_t, enumerator_destroy, void,
531 private_enumerator_t *this)
532 {
533 if (this->entry)
534 {
535 this->entry->condvar->signal(this->entry->condvar);
536 }
537 if (this->current)
538 {
539 unlock_single_segment(this->manager, this->segment);
540 }
541 free(this);
542 }
543
544 /**
545 * Creates an enumerator to enumerate the entries in the hash table.
546 */
547 static enumerator_t* create_table_enumerator(private_ike_sa_manager_t *this)
548 {
549 private_enumerator_t *enumerator;
550
551 INIT(enumerator,
552 .enumerator = {
553 .enumerate = (void*)_enumerate,
554 .destroy = _enumerator_destroy,
555 },
556 .manager = this,
557 );
558 return &enumerator->enumerator;
559 }
560
561 /**
562 * Put an entry into the hash table.
563 * Note: The caller has to unlock the returned segment.
564 */
565 static u_int put_entry(private_ike_sa_manager_t *this, entry_t *entry)
566 {
567 table_item_t *current, *item;
568 u_int row, segment;
569
570 INIT(item,
571 .value = entry,
572 );
573
574 row = ike_sa_id_hash(entry->ike_sa_id) & this->table_mask;
575 segment = row & this->segment_mask;
576
577 lock_single_segment(this, segment);
578 current = this->ike_sa_table[row];
579 if (current)
580 { /* insert at the front of current bucket */
581 item->next = current;
582 }
583 this->ike_sa_table[row] = item;
584 this->segments[segment].count++;
585 return segment;
586 }
587
588 /**
589 * Remove an entry from the hash table.
590 * Note: The caller MUST have a lock on the segment of this entry.
591 */
592 static void remove_entry(private_ike_sa_manager_t *this, entry_t *entry)
593 {
594 table_item_t *item, *prev = NULL;
595 u_int row, segment;
596
597 row = ike_sa_id_hash(entry->ike_sa_id) & this->table_mask;
598 segment = row & this->segment_mask;
599 item = this->ike_sa_table[row];
600 while (item)
601 {
602 if (item->value == entry)
603 {
604 if (prev)
605 {
606 prev->next = item->next;
607 }
608 else
609 {
610 this->ike_sa_table[row] = item->next;
611 }
612 this->segments[segment].count--;
613 free(item);
614 break;
615 }
616 prev = item;
617 item = item->next;
618 }
619 }
620
621 /**
622 * Remove the entry at the current enumerator position.
623 */
624 static void remove_entry_at(private_enumerator_t *this)
625 {
626 this->entry = NULL;
627 if (this->current)
628 {
629 table_item_t *current = this->current;
630
631 this->manager->segments[this->segment].count--;
632 this->current = this->prev;
633
634 if (this->prev)
635 {
636 this->prev->next = current->next;
637 }
638 else
639 {
640 this->manager->ike_sa_table[this->row] = current->next;
641 unlock_single_segment(this->manager, this->segment);
642 }
643 free(current);
644 }
645 }
646
647 /**
648 * Find an entry using the provided match function to compare the entries for
649 * equality.
650 */
651 static status_t get_entry_by_match_function(private_ike_sa_manager_t *this,
652 ike_sa_id_t *ike_sa_id, entry_t **entry, u_int *segment,
653 linked_list_match_t match, void *param)
654 {
655 table_item_t *item;
656 u_int row, seg;
657
658 row = ike_sa_id_hash(ike_sa_id) & this->table_mask;
659 seg = row & this->segment_mask;
660
661 lock_single_segment(this, seg);
662 item = this->ike_sa_table[row];
663 while (item)
664 {
665 if (match(item->value, param))
666 {
667 *entry = item->value;
668 *segment = seg;
669 /* the locked segment has to be unlocked by the caller */
670 return SUCCESS;
671 }
672 item = item->next;
673 }
674 unlock_single_segment(this, seg);
675 return NOT_FOUND;
676 }
677
678 /**
679 * Find an entry by ike_sa_id_t.
680 * Note: On SUCCESS, the caller has to unlock the segment.
681 */
682 static status_t get_entry_by_id(private_ike_sa_manager_t *this,
683 ike_sa_id_t *ike_sa_id, entry_t **entry, u_int *segment)
684 {
685 return get_entry_by_match_function(this, ike_sa_id, entry, segment,
686 (linked_list_match_t)entry_match_by_id, ike_sa_id);
687 }
688
689 /**
690 * Find an entry by IKE_SA pointer.
691 * Note: On SUCCESS, the caller has to unlock the segment.
692 */
693 static status_t get_entry_by_sa(private_ike_sa_manager_t *this,
694 ike_sa_id_t *ike_sa_id, ike_sa_t *ike_sa, entry_t **entry, u_int *segment)
695 {
696 return get_entry_by_match_function(this, ike_sa_id, entry, segment,
697 (linked_list_match_t)entry_match_by_sa, ike_sa);
698 }
699
700 /**
701 * Wait until no other thread is using an IKE_SA, return FALSE if entry not
702 * acquirable.
703 */
704 static bool wait_for_entry(private_ike_sa_manager_t *this, entry_t *entry,
705 u_int segment)
706 {
707 if (entry->driveout_new_threads)
708 {
709 /* we are not allowed to get this */
710 return FALSE;
711 }
712 while (entry->checked_out && !entry->driveout_waiting_threads)
713 {
714 /* so wait until we can get it for us.
715 * we register us as waiting. */
716 entry->waiting_threads++;
717 entry->condvar->wait(entry->condvar, this->segments[segment].mutex);
718 entry->waiting_threads--;
719 }
720 /* hm, a deletion request forbids us to get this SA, get next one */
721 if (entry->driveout_waiting_threads)
722 {
723 /* we must signal here, others may be waiting on it, too */
724 entry->condvar->signal(entry->condvar);
725 return FALSE;
726 }
727 return TRUE;
728 }
729
730 /**
731 * Put a half-open SA into the hash table.
732 */
733 static void put_half_open(private_ike_sa_manager_t *this, entry_t *entry)
734 {
735 table_item_t *item;
736 u_int row, segment;
737 rwlock_t *lock;
738 half_open_t *half_open;
739 chunk_t addr;
740
741 addr = entry->other->get_address(entry->other);
742 row = chunk_hash(addr) & this->table_mask;
743 segment = row & this->segment_mask;
744 lock = this->half_open_segments[segment].lock;
745 lock->write_lock(lock);
746 item = this->half_open_table[row];
747 while (item)
748 {
749 half_open = item->value;
750
751 if (chunk_equals(addr, half_open->other))
752 {
753 half_open->count++;
754 break;
755 }
756 item = item->next;
757 }
758
759 if (!item)
760 {
761 INIT(half_open,
762 .other = chunk_clone(addr),
763 .count = 1,
764 );
765 INIT(item,
766 .value = half_open,
767 .next = this->half_open_table[row],
768 );
769 this->half_open_table[row] = item;
770 }
771 this->half_open_segments[segment].count++;
772 ref_get(&this->half_open_count);
773 lock->unlock(lock);
774 }
775
776 /**
777 * Remove a half-open SA from the hash table.
778 */
779 static void remove_half_open(private_ike_sa_manager_t *this, entry_t *entry)
780 {
781 table_item_t *item, *prev = NULL;
782 u_int row, segment;
783 rwlock_t *lock;
784 chunk_t addr;
785
786 addr = entry->other->get_address(entry->other);
787 row = chunk_hash(addr) & this->table_mask;
788 segment = row & this->segment_mask;
789 lock = this->half_open_segments[segment].lock;
790 lock->write_lock(lock);
791 item = this->half_open_table[row];
792 while (item)
793 {
794 half_open_t *half_open = item->value;
795
796 if (chunk_equals(addr, half_open->other))
797 {
798 if (--half_open->count == 0)
799 {
800 if (prev)
801 {
802 prev->next = item->next;
803 }
804 else
805 {
806 this->half_open_table[row] = item->next;
807 }
808 half_open_destroy(half_open);
809 free(item);
810 }
811 this->half_open_segments[segment].count--;
812 ignore_result(ref_put(&this->half_open_count));
813 break;
814 }
815 prev = item;
816 item = item->next;
817 }
818 lock->unlock(lock);
819 }
820
821 /**
822 * Put an SA between two peers into the hash table.
823 */
824 static void put_connected_peers(private_ike_sa_manager_t *this, entry_t *entry)
825 {
826 table_item_t *item;
827 u_int row, segment;
828 rwlock_t *lock;
829 connected_peers_t *connected_peers;
830 chunk_t my_id, other_id;
831 int family;
832
833 my_id = entry->my_id->get_encoding(entry->my_id);
834 other_id = entry->other_id->get_encoding(entry->other_id);
835 family = entry->other->get_family(entry->other);
836 row = chunk_hash_inc(other_id, chunk_hash(my_id)) & this->table_mask;
837 segment = row & this->segment_mask;
838 lock = this->connected_peers_segments[segment].lock;
839 lock->write_lock(lock);
840 item = this->connected_peers_table[row];
841 while (item)
842 {
843 connected_peers = item->value;
844
845 if (connected_peers_match(connected_peers, entry->my_id,
846 entry->other_id, family))
847 {
848 if (connected_peers->sas->find_first(connected_peers->sas,
849 (linked_list_match_t)entry->ike_sa_id->equals,
850 NULL, entry->ike_sa_id) == SUCCESS)
851 {
852 lock->unlock(lock);
853 return;
854 }
855 break;
856 }
857 item = item->next;
858 }
859
860 if (!item)
861 {
862 INIT(connected_peers,
863 .my_id = entry->my_id->clone(entry->my_id),
864 .other_id = entry->other_id->clone(entry->other_id),
865 .family = family,
866 .sas = linked_list_create(),
867 );
868 INIT(item,
869 .value = connected_peers,
870 .next = this->connected_peers_table[row],
871 );
872 this->connected_peers_table[row] = item;
873 }
874 connected_peers->sas->insert_last(connected_peers->sas,
875 entry->ike_sa_id->clone(entry->ike_sa_id));
876 this->connected_peers_segments[segment].count++;
877 lock->unlock(lock);
878 }
879
880 /**
881 * Remove an SA between two peers from the hash table.
882 */
883 static void remove_connected_peers(private_ike_sa_manager_t *this, entry_t *entry)
884 {
885 table_item_t *item, *prev = NULL;
886 u_int row, segment;
887 rwlock_t *lock;
888 chunk_t my_id, other_id;
889 int family;
890
891 my_id = entry->my_id->get_encoding(entry->my_id);
892 other_id = entry->other_id->get_encoding(entry->other_id);
893 family = entry->other->get_family(entry->other);
894
895 row = chunk_hash_inc(other_id, chunk_hash(my_id)) & this->table_mask;
896 segment = row & this->segment_mask;
897
898 lock = this->connected_peers_segments[segment].lock;
899 lock->write_lock(lock);
900 item = this->connected_peers_table[row];
901 while (item)
902 {
903 connected_peers_t *current = item->value;
904
905 if (connected_peers_match(current, entry->my_id, entry->other_id,
906 family))
907 {
908 enumerator_t *enumerator;
909 ike_sa_id_t *ike_sa_id;
910
911 enumerator = current->sas->create_enumerator(current->sas);
912 while (enumerator->enumerate(enumerator, &ike_sa_id))
913 {
914 if (ike_sa_id->equals(ike_sa_id, entry->ike_sa_id))
915 {
916 current->sas->remove_at(current->sas, enumerator);
917 ike_sa_id->destroy(ike_sa_id);
918 this->connected_peers_segments[segment].count--;
919 break;
920 }
921 }
922 enumerator->destroy(enumerator);
923 if (current->sas->get_count(current->sas) == 0)
924 {
925 if (prev)
926 {
927 prev->next = item->next;
928 }
929 else
930 {
931 this->connected_peers_table[row] = item->next;
932 }
933 connected_peers_destroy(current);
934 free(item);
935 }
936 break;
937 }
938 prev = item;
939 item = item->next;
940 }
941 lock->unlock(lock);
942 }
943
944 /**
945 * Get a random SPI for new IKE_SAs
946 */
947 static u_int64_t get_spi(private_ike_sa_manager_t *this)
948 {
949 u_int64_t spi;
950
951 this->rng_lock->read_lock(this->rng_lock);
952 if (!this->rng ||
953 !this->rng->get_bytes(this->rng, sizeof(spi), (u_int8_t*)&spi))
954 {
955 spi = 0;
956 }
957 this->rng_lock->unlock(this->rng_lock);
958 return spi;
959 }
960
961 /**
962 * Calculate the hash of the initial IKE message. Memory for the hash is
963 * allocated on success.
964 *
965 * @returns TRUE on success
966 */
967 static bool get_init_hash(hasher_t *hasher, message_t *message, chunk_t *hash)
968 {
969 host_t *src;
970
971 if (message->get_first_payload_type(message) == PLV1_FRAGMENT)
972 { /* only hash the source IP, port and SPI for fragmented init messages */
973 u_int16_t port;
974 u_int64_t spi;
975
976 src = message->get_source(message);
977 if (!hasher->allocate_hash(hasher, src->get_address(src), NULL))
978 {
979 return FALSE;
980 }
981 port = src->get_port(src);
982 if (!hasher->allocate_hash(hasher, chunk_from_thing(port), NULL))
983 {
984 return FALSE;
985 }
986 spi = message->get_initiator_spi(message);
987 return hasher->allocate_hash(hasher, chunk_from_thing(spi), hash);
988 }
989 if (message->get_exchange_type(message) == ID_PROT)
990 { /* include the source for Main Mode as the hash will be the same if
991 * SPIs are reused by two initiators that use the same proposal */
992 src = message->get_source(message);
993
994 if (!hasher->allocate_hash(hasher, src->get_address(src), NULL))
995 {
996 return FALSE;
997 }
998 }
999 return hasher->allocate_hash(hasher, message->get_packet_data(message), hash);
1000 }
1001
1002 /**
1003 * Check if we already have created an IKE_SA based on the initial IKE message
1004 * with the given hash.
1005 * If not the hash is stored, the hash data is not(!) cloned.
1006 *
1007 * Also, the local SPI is returned. In case of a retransmit this is already
1008 * stored together with the hash, otherwise it is newly allocated and should
1009 * be used to create the IKE_SA.
1010 *
1011 * @returns ALREADY_DONE if the message with the given hash has been seen before
1012 * NOT_FOUND if the message hash was not found
1013 * FAILED if the SPI allocation failed
1014 */
1015 static status_t check_and_put_init_hash(private_ike_sa_manager_t *this,
1016 chunk_t init_hash, u_int64_t *our_spi)
1017 {
1018 table_item_t *item;
1019 u_int row, segment;
1020 mutex_t *mutex;
1021 init_hash_t *init;
1022 u_int64_t spi;
1023
1024 row = chunk_hash(init_hash) & this->table_mask;
1025 segment = row & this->segment_mask;
1026 mutex = this->init_hashes_segments[segment].mutex;
1027 mutex->lock(mutex);
1028 item = this->init_hashes_table[row];
1029 while (item)
1030 {
1031 init_hash_t *current = item->value;
1032
1033 if (chunk_equals(init_hash, current->hash))
1034 {
1035 *our_spi = current->our_spi;
1036 mutex->unlock(mutex);
1037 return ALREADY_DONE;
1038 }
1039 item = item->next;
1040 }
1041
1042 spi = get_spi(this);
1043 if (!spi)
1044 {
1045 return FAILED;
1046 }
1047
1048 INIT(init,
1049 .hash = {
1050 .len = init_hash.len,
1051 .ptr = init_hash.ptr,
1052 },
1053 .our_spi = spi,
1054 );
1055 INIT(item,
1056 .value = init,
1057 .next = this->init_hashes_table[row],
1058 );
1059 this->init_hashes_table[row] = item;
1060 *our_spi = init->our_spi;
1061 mutex->unlock(mutex);
1062 return NOT_FOUND;
1063 }
1064
1065 /**
1066 * Remove the hash of an initial IKE message from the cache.
1067 */
1068 static void remove_init_hash(private_ike_sa_manager_t *this, chunk_t init_hash)
1069 {
1070 table_item_t *item, *prev = NULL;
1071 u_int row, segment;
1072 mutex_t *mutex;
1073
1074 row = chunk_hash(init_hash) & this->table_mask;
1075 segment = row & this->segment_mask;
1076 mutex = this->init_hashes_segments[segment].mutex;
1077 mutex->lock(mutex);
1078 item = this->init_hashes_table[row];
1079 while (item)
1080 {
1081 init_hash_t *current = item->value;
1082
1083 if (chunk_equals(init_hash, current->hash))
1084 {
1085 if (prev)
1086 {
1087 prev->next = item->next;
1088 }
1089 else
1090 {
1091 this->init_hashes_table[row] = item->next;
1092 }
1093 free(current);
1094 free(item);
1095 break;
1096 }
1097 prev = item;
1098 item = item->next;
1099 }
1100 mutex->unlock(mutex);
1101 }
1102
1103 METHOD(ike_sa_manager_t, checkout, ike_sa_t*,
1104 private_ike_sa_manager_t *this, ike_sa_id_t *ike_sa_id)
1105 {
1106 ike_sa_t *ike_sa = NULL;
1107 entry_t *entry;
1108 u_int segment;
1109
1110 DBG2(DBG_MGR, "checkout IKE_SA");
1111
1112 if (get_entry_by_id(this, ike_sa_id, &entry, &segment) == SUCCESS)
1113 {
1114 if (wait_for_entry(this, entry, segment))
1115 {
1116 entry->checked_out = TRUE;
1117 ike_sa = entry->ike_sa;
1118 DBG2(DBG_MGR, "IKE_SA %s[%u] successfully checked out",
1119 ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1120 }
1121 unlock_single_segment(this, segment);
1122 }
1123 charon->bus->set_sa(charon->bus, ike_sa);
1124 return ike_sa;
1125 }
1126
1127 METHOD(ike_sa_manager_t, checkout_new, ike_sa_t*,
1128 private_ike_sa_manager_t* this, ike_version_t version, bool initiator)
1129 {
1130 ike_sa_id_t *ike_sa_id;
1131 ike_sa_t *ike_sa;
1132 u_int8_t ike_version;
1133 u_int64_t spi;
1134
1135 ike_version = version == IKEV1 ? IKEV1_MAJOR_VERSION : IKEV2_MAJOR_VERSION;
1136
1137 spi = get_spi(this);
1138 if (!spi)
1139 {
1140 DBG1(DBG_MGR, "failed to allocate SPI for new IKE_SA");
1141 return NULL;
1142 }
1143
1144 if (initiator)
1145 {
1146 ike_sa_id = ike_sa_id_create(ike_version, spi, 0, TRUE);
1147 }
1148 else
1149 {
1150 ike_sa_id = ike_sa_id_create(ike_version, 0, spi, FALSE);
1151 }
1152 ike_sa = ike_sa_create(ike_sa_id, initiator, version);
1153 ike_sa_id->destroy(ike_sa_id);
1154
1155 if (ike_sa)
1156 {
1157 DBG2(DBG_MGR, "created IKE_SA %s[%u]", ike_sa->get_name(ike_sa),
1158 ike_sa->get_unique_id(ike_sa));
1159 }
1160 return ike_sa;
1161 }
1162
1163 /**
1164 * Get the message ID or message hash to detect early retransmissions
1165 */
1166 static u_int32_t get_message_id_or_hash(message_t *message)
1167 {
1168 /* Use the message ID, or the message hash in IKEv1 Main/Aggressive mode */
1169 if (message->get_major_version(message) == IKEV1_MAJOR_VERSION &&
1170 message->get_message_id(message) == 0)
1171 {
1172 return chunk_hash(message->get_packet_data(message));
1173 }
1174 return message->get_message_id(message);
1175 }
1176
1177 METHOD(ike_sa_manager_t, checkout_by_message, ike_sa_t*,
1178 private_ike_sa_manager_t* this, message_t *message)
1179 {
1180 u_int segment;
1181 entry_t *entry;
1182 ike_sa_t *ike_sa = NULL;
1183 ike_sa_id_t *id;
1184 ike_version_t ike_version;
1185 bool is_init = FALSE;
1186
1187 id = message->get_ike_sa_id(message);
1188 /* clone the IKE_SA ID so we can modify the initiator flag */
1189 id = id->clone(id);
1190 id->switch_initiator(id);
1191
1192 DBG2(DBG_MGR, "checkout IKE_SA by message");
1193
1194 if (id->get_responder_spi(id) == 0 &&
1195 message->get_message_id(message) == 0)
1196 {
1197 if (message->get_major_version(message) == IKEV2_MAJOR_VERSION)
1198 {
1199 if (message->get_exchange_type(message) == IKE_SA_INIT &&
1200 message->get_request(message))
1201 {
1202 ike_version = IKEV2;
1203 is_init = TRUE;
1204 }
1205 }
1206 else
1207 {
1208 if (message->get_exchange_type(message) == ID_PROT ||
1209 message->get_exchange_type(message) == AGGRESSIVE)
1210 {
1211 ike_version = IKEV1;
1212 is_init = TRUE;
1213 if (id->is_initiator(id))
1214 { /* not set in IKEv1, switch back before applying to new SA */
1215 id->switch_initiator(id);
1216 }
1217 }
1218 }
1219 }
1220
1221 if (is_init)
1222 {
1223 hasher_t *hasher;
1224 u_int64_t our_spi;
1225 chunk_t hash;
1226
1227 hasher = lib->crypto->create_hasher(lib->crypto, HASH_SHA1);
1228 if (!hasher || !get_init_hash(hasher, message, &hash))
1229 {
1230 DBG1(DBG_MGR, "ignoring message, failed to hash message");
1231 DESTROY_IF(hasher);
1232 id->destroy(id);
1233 return NULL;
1234 }
1235 hasher->destroy(hasher);
1236
1237 /* ensure this is not a retransmit of an already handled init message */
1238 switch (check_and_put_init_hash(this, hash, &our_spi))
1239 {
1240 case NOT_FOUND:
1241 { /* we've not seen this packet yet, create a new IKE_SA */
1242 if (!this->ikesa_limit ||
1243 this->public.get_count(&this->public) < this->ikesa_limit)
1244 {
1245 id->set_responder_spi(id, our_spi);
1246 ike_sa = ike_sa_create(id, FALSE, ike_version);
1247 if (ike_sa)
1248 {
1249 entry = entry_create();
1250 entry->ike_sa = ike_sa;
1251 entry->ike_sa_id = id;
1252
1253 segment = put_entry(this, entry);
1254 entry->checked_out = TRUE;
1255 unlock_single_segment(this, segment);
1256
1257 entry->processing = get_message_id_or_hash(message);
1258 entry->init_hash = hash;
1259
1260 DBG2(DBG_MGR, "created IKE_SA %s[%u]",
1261 ike_sa->get_name(ike_sa),
1262 ike_sa->get_unique_id(ike_sa));
1263
1264 charon->bus->set_sa(charon->bus, ike_sa);
1265 return ike_sa;
1266 }
1267 else
1268 {
1269 DBG1(DBG_MGR, "creating IKE_SA failed, ignoring message");
1270 }
1271 }
1272 else
1273 {
1274 DBG1(DBG_MGR, "ignoring %N, hitting IKE_SA limit (%u)",
1275 exchange_type_names, message->get_exchange_type(message),
1276 this->ikesa_limit);
1277 }
1278 remove_init_hash(this, hash);
1279 chunk_free(&hash);
1280 id->destroy(id);
1281 return NULL;
1282 }
1283 case FAILED:
1284 { /* we failed to allocate an SPI */
1285 chunk_free(&hash);
1286 id->destroy(id);
1287 DBG1(DBG_MGR, "ignoring message, failed to allocate SPI");
1288 return NULL;
1289 }
1290 case ALREADY_DONE:
1291 default:
1292 break;
1293 }
1294 /* it looks like we already handled this init message to some degree */
1295 id->set_responder_spi(id, our_spi);
1296 chunk_free(&hash);
1297 }
1298
1299 if (get_entry_by_id(this, id, &entry, &segment) == SUCCESS)
1300 {
1301 /* only check out if we are not already processing it. */
1302 if (entry->processing == get_message_id_or_hash(message))
1303 {
1304 DBG1(DBG_MGR, "ignoring request with ID %u, already processing",
1305 entry->processing);
1306 }
1307 else if (wait_for_entry(this, entry, segment))
1308 {
1309 ike_sa_id_t *ike_id;
1310
1311 ike_id = entry->ike_sa->get_id(entry->ike_sa);
1312 entry->checked_out = TRUE;
1313 if (message->get_first_payload_type(message) != PLV1_FRAGMENT &&
1314 message->get_first_payload_type(message) != PLV2_FRAGMENT)
1315 { /* TODO-FRAG: this fails if there are unencrypted payloads */
1316 entry->processing = get_message_id_or_hash(message);
1317 }
1318 if (ike_id->get_responder_spi(ike_id) == 0)
1319 {
1320 ike_id->set_responder_spi(ike_id, id->get_responder_spi(id));
1321 }
1322 ike_sa = entry->ike_sa;
1323 DBG2(DBG_MGR, "IKE_SA %s[%u] successfully checked out",
1324 ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1325 }
1326 unlock_single_segment(this, segment);
1327 }
1328 else
1329 {
1330 charon->bus->alert(charon->bus, ALERT_INVALID_IKE_SPI, message);
1331 }
1332 id->destroy(id);
1333 charon->bus->set_sa(charon->bus, ike_sa);
1334 return ike_sa;
1335 }
1336
1337 METHOD(ike_sa_manager_t, checkout_by_config, ike_sa_t*,
1338 private_ike_sa_manager_t *this, peer_cfg_t *peer_cfg)
1339 {
1340 enumerator_t *enumerator;
1341 entry_t *entry;
1342 ike_sa_t *ike_sa = NULL;
1343 peer_cfg_t *current_peer;
1344 ike_cfg_t *current_ike;
1345 u_int segment;
1346
1347 DBG2(DBG_MGR, "checkout IKE_SA by config");
1348
1349 if (!this->reuse_ikesa)
1350 { /* IKE_SA reuse disable by config */
1351 ike_sa = checkout_new(this, peer_cfg->get_ike_version(peer_cfg), TRUE);
1352 charon->bus->set_sa(charon->bus, ike_sa);
1353 return ike_sa;
1354 }
1355
1356 enumerator = create_table_enumerator(this);
1357 while (enumerator->enumerate(enumerator, &entry, &segment))
1358 {
1359 if (!wait_for_entry(this, entry, segment))
1360 {
1361 continue;
1362 }
1363 if (entry->ike_sa->get_state(entry->ike_sa) == IKE_DELETING)
1364 { /* skip IKE_SAs which are not usable */
1365 continue;
1366 }
1367
1368 current_peer = entry->ike_sa->get_peer_cfg(entry->ike_sa);
1369 if (current_peer && current_peer->equals(current_peer, peer_cfg))
1370 {
1371 current_ike = current_peer->get_ike_cfg(current_peer);
1372 if (current_ike->equals(current_ike, peer_cfg->get_ike_cfg(peer_cfg)))
1373 {
1374 entry->checked_out = TRUE;
1375 ike_sa = entry->ike_sa;
1376 DBG2(DBG_MGR, "found existing IKE_SA %u with a '%s' config",
1377 ike_sa->get_unique_id(ike_sa),
1378 current_peer->get_name(current_peer));
1379 break;
1380 }
1381 }
1382 }
1383 enumerator->destroy(enumerator);
1384
1385 if (!ike_sa)
1386 { /* no IKE_SA using such a config, hand out a new */
1387 ike_sa = checkout_new(this, peer_cfg->get_ike_version(peer_cfg), TRUE);
1388 }
1389 charon->bus->set_sa(charon->bus, ike_sa);
1390 return ike_sa;
1391 }
1392
1393 METHOD(ike_sa_manager_t, checkout_by_id, ike_sa_t*,
1394 private_ike_sa_manager_t *this, u_int32_t id)
1395 {
1396 enumerator_t *enumerator;
1397 entry_t *entry;
1398 ike_sa_t *ike_sa = NULL;
1399 u_int segment;
1400
1401 DBG2(DBG_MGR, "checkout IKE_SA by ID %u", id);
1402
1403 enumerator = create_table_enumerator(this);
1404 while (enumerator->enumerate(enumerator, &entry, &segment))
1405 {
1406 if (wait_for_entry(this, entry, segment))
1407 {
1408 if (entry->ike_sa->get_unique_id(entry->ike_sa) == id)
1409 {
1410 ike_sa = entry->ike_sa;
1411 entry->checked_out = TRUE;
1412 break;
1413 }
1414 }
1415 }
1416 enumerator->destroy(enumerator);
1417
1418 if (ike_sa)
1419 {
1420 DBG2(DBG_MGR, "IKE_SA %s[%u] successfully checked out",
1421 ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1422 }
1423 charon->bus->set_sa(charon->bus, ike_sa);
1424 return ike_sa;
1425 }
1426
1427 METHOD(ike_sa_manager_t, checkout_by_name, ike_sa_t*,
1428 private_ike_sa_manager_t *this, char *name, bool child)
1429 {
1430 enumerator_t *enumerator, *children;
1431 entry_t *entry;
1432 ike_sa_t *ike_sa = NULL;
1433 child_sa_t *child_sa;
1434 u_int segment;
1435
1436 enumerator = create_table_enumerator(this);
1437 while (enumerator->enumerate(enumerator, &entry, &segment))
1438 {
1439 if (wait_for_entry(this, entry, segment))
1440 {
1441 /* look for a child with such a policy name ... */
1442 if (child)
1443 {
1444 children = entry->ike_sa->create_child_sa_enumerator(entry->ike_sa);
1445 while (children->enumerate(children, (void**)&child_sa))
1446 {
1447 if (streq(child_sa->get_name(child_sa), name))
1448 {
1449 ike_sa = entry->ike_sa;
1450 break;
1451 }
1452 }
1453 children->destroy(children);
1454 }
1455 else /* ... or for a IKE_SA with such a connection name */
1456 {
1457 if (streq(entry->ike_sa->get_name(entry->ike_sa), name))
1458 {
1459 ike_sa = entry->ike_sa;
1460 }
1461 }
1462 /* got one, return */
1463 if (ike_sa)
1464 {
1465 entry->checked_out = TRUE;
1466 DBG2(DBG_MGR, "IKE_SA %s[%u] successfully checked out",
1467 ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1468 break;
1469 }
1470 }
1471 }
1472 enumerator->destroy(enumerator);
1473
1474 charon->bus->set_sa(charon->bus, ike_sa);
1475 return ike_sa;
1476 }
1477
1478 /**
1479 * enumerator filter function, waiting variant
1480 */
1481 static bool enumerator_filter_wait(private_ike_sa_manager_t *this,
1482 entry_t **in, ike_sa_t **out, u_int *segment)
1483 {
1484 if (wait_for_entry(this, *in, *segment))
1485 {
1486 *out = (*in)->ike_sa;
1487 charon->bus->set_sa(charon->bus, *out);
1488 return TRUE;
1489 }
1490 return FALSE;
1491 }
1492
1493 /**
1494 * enumerator filter function, skipping variant
1495 */
1496 static bool enumerator_filter_skip(private_ike_sa_manager_t *this,
1497 entry_t **in, ike_sa_t **out, u_int *segment)
1498 {
1499 if (!(*in)->driveout_new_threads &&
1500 !(*in)->driveout_waiting_threads &&
1501 !(*in)->checked_out)
1502 {
1503 *out = (*in)->ike_sa;
1504 charon->bus->set_sa(charon->bus, *out);
1505 return TRUE;
1506 }
1507 return FALSE;
1508 }
1509
1510 /**
1511 * Reset threads SA after enumeration
1512 */
1513 static void reset_sa(void *data)
1514 {
1515 charon->bus->set_sa(charon->bus, NULL);
1516 }
1517
1518 METHOD(ike_sa_manager_t, create_enumerator, enumerator_t*,
1519 private_ike_sa_manager_t* this, bool wait)
1520 {
1521 return enumerator_create_filter(create_table_enumerator(this),
1522 wait ? (void*)enumerator_filter_wait : (void*)enumerator_filter_skip,
1523 this, reset_sa);
1524 }
1525
1526 METHOD(ike_sa_manager_t, checkin, void,
1527 private_ike_sa_manager_t *this, ike_sa_t *ike_sa)
1528 {
1529 /* to check the SA back in, we look for the pointer of the ike_sa
1530 * in all entries.
1531 * The lookup is done by initiator SPI, so even if the SPI has changed (e.g.
1532 * on reception of a IKE_SA_INIT response) the lookup will work but
1533 * updating of the SPI MAY be necessary...
1534 */
1535 entry_t *entry;
1536 ike_sa_id_t *ike_sa_id;
1537 host_t *other;
1538 identification_t *my_id, *other_id;
1539 u_int segment;
1540
1541 ike_sa_id = ike_sa->get_id(ike_sa);
1542 my_id = ike_sa->get_my_id(ike_sa);
1543 other_id = ike_sa->get_other_eap_id(ike_sa);
1544 other = ike_sa->get_other_host(ike_sa);
1545
1546 DBG2(DBG_MGR, "checkin IKE_SA %s[%u]", ike_sa->get_name(ike_sa),
1547 ike_sa->get_unique_id(ike_sa));
1548
1549 /* look for the entry */
1550 if (get_entry_by_sa(this, ike_sa_id, ike_sa, &entry, &segment) == SUCCESS)
1551 {
1552 /* ike_sa_id must be updated */
1553 entry->ike_sa_id->replace_values(entry->ike_sa_id, ike_sa->get_id(ike_sa));
1554 /* signal waiting threads */
1555 entry->checked_out = FALSE;
1556 entry->processing = -1;
1557 /* check if this SA is half-open */
1558 if (entry->half_open && ike_sa->get_state(ike_sa) != IKE_CONNECTING)
1559 {
1560 /* not half open anymore */
1561 entry->half_open = FALSE;
1562 remove_half_open(this, entry);
1563 }
1564 else if (entry->half_open && !other->ip_equals(other, entry->other))
1565 {
1566 /* the other host's IP has changed, we must update the hash table */
1567 remove_half_open(this, entry);
1568 DESTROY_IF(entry->other);
1569 entry->other = other->clone(other);
1570 put_half_open(this, entry);
1571 }
1572 else if (!entry->half_open &&
1573 !entry->ike_sa_id->is_initiator(entry->ike_sa_id) &&
1574 ike_sa->get_state(ike_sa) == IKE_CONNECTING)
1575 {
1576 /* this is a new half-open SA */
1577 entry->half_open = TRUE;
1578 entry->other = other->clone(other);
1579 put_half_open(this, entry);
1580 }
1581 DBG2(DBG_MGR, "check-in of IKE_SA successful.");
1582 entry->condvar->signal(entry->condvar);
1583 }
1584 else
1585 {
1586 entry = entry_create();
1587 entry->ike_sa_id = ike_sa_id->clone(ike_sa_id);
1588 entry->ike_sa = ike_sa;
1589 segment = put_entry(this, entry);
1590 }
1591
1592 /* apply identities for duplicate test */
1593 if ((ike_sa->get_state(ike_sa) == IKE_ESTABLISHED ||
1594 ike_sa->get_state(ike_sa) == IKE_PASSIVE) &&
1595 entry->my_id == NULL && entry->other_id == NULL)
1596 {
1597 if (ike_sa->get_version(ike_sa) == IKEV1)
1598 {
1599 /* If authenticated and received INITIAL_CONTACT,
1600 * delete any existing IKE_SAs with that peer. */
1601 if (ike_sa->has_condition(ike_sa, COND_INIT_CONTACT_SEEN))
1602 {
1603 this->public.check_uniqueness(&this->public, ike_sa, TRUE);
1604 ike_sa->set_condition(ike_sa, COND_INIT_CONTACT_SEEN, FALSE);
1605 }
1606 }
1607
1608 entry->my_id = my_id->clone(my_id);
1609 entry->other_id = other_id->clone(other_id);
1610 if (!entry->other)
1611 {
1612 entry->other = other->clone(other);
1613 }
1614 put_connected_peers(this, entry);
1615 }
1616
1617 unlock_single_segment(this, segment);
1618
1619 charon->bus->set_sa(charon->bus, NULL);
1620 }
1621
1622 METHOD(ike_sa_manager_t, checkin_and_destroy, void,
1623 private_ike_sa_manager_t *this, ike_sa_t *ike_sa)
1624 {
1625 /* deletion is a bit complex, we must ensure that no thread is waiting for
1626 * this SA.
1627 * We take this SA from the table, and start signaling while threads
1628 * are in the condvar.
1629 */
1630 entry_t *entry;
1631 ike_sa_id_t *ike_sa_id;
1632 u_int segment;
1633
1634 ike_sa_id = ike_sa->get_id(ike_sa);
1635
1636 DBG2(DBG_MGR, "checkin and destroy IKE_SA %s[%u]", ike_sa->get_name(ike_sa),
1637 ike_sa->get_unique_id(ike_sa));
1638
1639 if (get_entry_by_sa(this, ike_sa_id, ike_sa, &entry, &segment) == SUCCESS)
1640 {
1641 if (entry->driveout_waiting_threads && entry->driveout_new_threads)
1642 { /* it looks like flush() has been called and the SA is being deleted
1643 * anyway, just check it in */
1644 DBG2(DBG_MGR, "ignored check-in and destroy of IKE_SA during shutdown");
1645 entry->checked_out = FALSE;
1646 entry->condvar->broadcast(entry->condvar);
1647 unlock_single_segment(this, segment);
1648 return;
1649 }
1650
1651 /* drive out waiting threads, as we are in hurry */
1652 entry->driveout_waiting_threads = TRUE;
1653 /* mark it, so no new threads can get this entry */
1654 entry->driveout_new_threads = TRUE;
1655 /* wait until all workers have done their work */
1656 while (entry->waiting_threads)
1657 {
1658 /* wake up all */
1659 entry->condvar->broadcast(entry->condvar);
1660 /* they will wake us again when their work is done */
1661 entry->condvar->wait(entry->condvar, this->segments[segment].mutex);
1662 }
1663 remove_entry(this, entry);
1664 unlock_single_segment(this, segment);
1665
1666 if (entry->half_open)
1667 {
1668 remove_half_open(this, entry);
1669 }
1670 if (entry->my_id && entry->other_id)
1671 {
1672 remove_connected_peers(this, entry);
1673 }
1674 if (entry->init_hash.ptr)
1675 {
1676 remove_init_hash(this, entry->init_hash);
1677 }
1678
1679 entry_destroy(entry);
1680
1681 DBG2(DBG_MGR, "check-in and destroy of IKE_SA successful");
1682 }
1683 else
1684 {
1685 DBG1(DBG_MGR, "tried to check-in and delete nonexisting IKE_SA");
1686 ike_sa->destroy(ike_sa);
1687 }
1688 charon->bus->set_sa(charon->bus, NULL);
1689 }
1690
1691 /**
1692 * Cleanup function for create_id_enumerator
1693 */
1694 static void id_enumerator_cleanup(linked_list_t *ids)
1695 {
1696 ids->destroy_offset(ids, offsetof(ike_sa_id_t, destroy));
1697 }
1698
1699 METHOD(ike_sa_manager_t, create_id_enumerator, enumerator_t*,
1700 private_ike_sa_manager_t *this, identification_t *me,
1701 identification_t *other, int family)
1702 {
1703 table_item_t *item;
1704 u_int row, segment;
1705 rwlock_t *lock;
1706 linked_list_t *ids = NULL;
1707
1708 row = chunk_hash_inc(other->get_encoding(other),
1709 chunk_hash(me->get_encoding(me))) & this->table_mask;
1710 segment = row & this->segment_mask;
1711
1712 lock = this->connected_peers_segments[segment].lock;
1713 lock->read_lock(lock);
1714 item = this->connected_peers_table[row];
1715 while (item)
1716 {
1717 connected_peers_t *current = item->value;
1718
1719 if (connected_peers_match(current, me, other, family))
1720 {
1721 ids = current->sas->clone_offset(current->sas,
1722 offsetof(ike_sa_id_t, clone));
1723 break;
1724 }
1725 item = item->next;
1726 }
1727 lock->unlock(lock);
1728
1729 if (!ids)
1730 {
1731 return enumerator_create_empty();
1732 }
1733 return enumerator_create_cleaner(ids->create_enumerator(ids),
1734 (void*)id_enumerator_cleanup, ids);
1735 }
1736
1737 /**
1738 * Move all CHILD_SAs and virtual IPs from old to new
1739 */
1740 static void adopt_children_and_vips(ike_sa_t *old, ike_sa_t *new)
1741 {
1742 enumerator_t *enumerator;
1743 child_sa_t *child_sa;
1744 host_t *vip;
1745 int chcount = 0, vipcount = 0;
1746
1747 charon->bus->children_migrate(charon->bus, new->get_id(new),
1748 new->get_unique_id(new));
1749 enumerator = old->create_child_sa_enumerator(old);
1750 while (enumerator->enumerate(enumerator, &child_sa))
1751 {
1752 old->remove_child_sa(old, enumerator);
1753 new->add_child_sa(new, child_sa);
1754 chcount++;
1755 }
1756 enumerator->destroy(enumerator);
1757
1758 enumerator = old->create_virtual_ip_enumerator(old, FALSE);
1759 while (enumerator->enumerate(enumerator, &vip))
1760 {
1761 new->add_virtual_ip(new, FALSE, vip);
1762 vipcount++;
1763 }
1764 enumerator->destroy(enumerator);
1765 /* this does not release the addresses, which is good, but it does trigger
1766 * an assign_vips(FALSE) event... */
1767 old->clear_virtual_ips(old, FALSE);
1768 /* ...trigger the analogous event on the new SA */
1769 charon->bus->set_sa(charon->bus, new);
1770 charon->bus->assign_vips(charon->bus, new, TRUE);
1771 charon->bus->children_migrate(charon->bus, NULL, 0);
1772 charon->bus->set_sa(charon->bus, old);
1773
1774 if (chcount || vipcount)
1775 {
1776 DBG1(DBG_IKE, "detected reauth of existing IKE_SA, adopting %d "
1777 "children and %d virtual IPs", chcount, vipcount);
1778 }
1779 }
1780
1781 /**
1782 * Delete an existing IKE_SA due to a unique replace policy
1783 */
1784 static status_t enforce_replace(private_ike_sa_manager_t *this,
1785 ike_sa_t *duplicate, ike_sa_t *new,
1786 identification_t *other, host_t *host)
1787 {
1788 charon->bus->alert(charon->bus, ALERT_UNIQUE_REPLACE);
1789
1790 if (host->equals(host, duplicate->get_other_host(duplicate)))
1791 {
1792 /* looks like a reauthentication attempt */
1793 if (!new->has_condition(new, COND_INIT_CONTACT_SEEN) &&
1794 new->get_version(new) == IKEV1)
1795 {
1796 /* IKEv1 implicitly takes over children, IKEv2 recreates them
1797 * explicitly. */
1798 adopt_children_and_vips(duplicate, new);
1799 }
1800 /* For IKEv1 we have to delay the delete for the old IKE_SA. Some
1801 * peers need to complete the new SA first, otherwise the quick modes
1802 * might get lost. For IKEv2 we do the same, as we want overlapping
1803 * CHILD_SAs to keep connectivity up. */
1804 lib->scheduler->schedule_job(lib->scheduler, (job_t*)
1805 delete_ike_sa_job_create(duplicate->get_id(duplicate), TRUE), 10);
1806 return SUCCESS;
1807 }
1808 DBG1(DBG_IKE, "deleting duplicate IKE_SA for peer '%Y' due to "
1809 "uniqueness policy", other);
1810 return duplicate->delete(duplicate);
1811 }
1812
1813 METHOD(ike_sa_manager_t, check_uniqueness, bool,
1814 private_ike_sa_manager_t *this, ike_sa_t *ike_sa, bool force_replace)
1815 {
1816 bool cancel = FALSE;
1817 peer_cfg_t *peer_cfg;
1818 unique_policy_t policy;
1819 enumerator_t *enumerator;
1820 ike_sa_id_t *id = NULL;
1821 identification_t *me, *other;
1822 host_t *other_host;
1823
1824 peer_cfg = ike_sa->get_peer_cfg(ike_sa);
1825 policy = peer_cfg->get_unique_policy(peer_cfg);
1826 if (policy == UNIQUE_NEVER || (policy == UNIQUE_NO && !force_replace))
1827 {
1828 return FALSE;
1829 }
1830 me = ike_sa->get_my_id(ike_sa);
1831 other = ike_sa->get_other_eap_id(ike_sa);
1832 other_host = ike_sa->get_other_host(ike_sa);
1833
1834 enumerator = create_id_enumerator(this, me, other,
1835 other_host->get_family(other_host));
1836 while (enumerator->enumerate(enumerator, &id))
1837 {
1838 status_t status = SUCCESS;
1839 ike_sa_t *duplicate;
1840
1841 duplicate = checkout(this, id);
1842 if (!duplicate)
1843 {
1844 continue;
1845 }
1846 if (force_replace)
1847 {
1848 DBG1(DBG_IKE, "destroying duplicate IKE_SA for peer '%Y', "
1849 "received INITIAL_CONTACT", other);
1850 charon->bus->ike_updown(charon->bus, duplicate, FALSE);
1851 checkin_and_destroy(this, duplicate);
1852 continue;
1853 }
1854 peer_cfg = duplicate->get_peer_cfg(duplicate);
1855 if (peer_cfg && peer_cfg->equals(peer_cfg, ike_sa->get_peer_cfg(ike_sa)))
1856 {
1857 switch (duplicate->get_state(duplicate))
1858 {
1859 case IKE_ESTABLISHED:
1860 case IKE_REKEYING:
1861 switch (policy)
1862 {
1863 case UNIQUE_REPLACE:
1864 status = enforce_replace(this, duplicate, ike_sa,
1865 other, other_host);
1866 break;
1867 case UNIQUE_KEEP:
1868 /* potential reauthentication? */
1869 if (!other_host->equals(other_host,
1870 duplicate->get_other_host(duplicate)))
1871 {
1872 cancel = TRUE;
1873 /* we keep the first IKE_SA and delete all
1874 * other duplicates that might exist */
1875 policy = UNIQUE_REPLACE;
1876 }
1877 break;
1878 default:
1879 break;
1880 }
1881 break;
1882 default:
1883 break;
1884 }
1885 }
1886 if (status == DESTROY_ME)
1887 {
1888 checkin_and_destroy(this, duplicate);
1889 }
1890 else
1891 {
1892 checkin(this, duplicate);
1893 }
1894 }
1895 enumerator->destroy(enumerator);
1896 /* reset thread's current IKE_SA after checkin */
1897 charon->bus->set_sa(charon->bus, ike_sa);
1898 return cancel;
1899 }
1900
1901 METHOD(ike_sa_manager_t, has_contact, bool,
1902 private_ike_sa_manager_t *this, identification_t *me,
1903 identification_t *other, int family)
1904 {
1905 table_item_t *item;
1906 u_int row, segment;
1907 rwlock_t *lock;
1908 bool found = FALSE;
1909
1910 row = chunk_hash_inc(other->get_encoding(other),
1911 chunk_hash(me->get_encoding(me))) & this->table_mask;
1912 segment = row & this->segment_mask;
1913 lock = this->connected_peers_segments[segment].lock;
1914 lock->read_lock(lock);
1915 item = this->connected_peers_table[row];
1916 while (item)
1917 {
1918 if (connected_peers_match(item->value, me, other, family))
1919 {
1920 found = TRUE;
1921 break;
1922 }
1923 item = item->next;
1924 }
1925 lock->unlock(lock);
1926
1927 return found;
1928 }
1929
1930 METHOD(ike_sa_manager_t, get_count, u_int,
1931 private_ike_sa_manager_t *this)
1932 {
1933 u_int segment, count = 0;
1934 mutex_t *mutex;
1935
1936 for (segment = 0; segment < this->segment_count; segment++)
1937 {
1938 mutex = this->segments[segment & this->segment_mask].mutex;
1939 mutex->lock(mutex);
1940 count += this->segments[segment].count;
1941 mutex->unlock(mutex);
1942 }
1943 return count;
1944 }
1945
1946 METHOD(ike_sa_manager_t, get_half_open_count, u_int,
1947 private_ike_sa_manager_t *this, host_t *ip)
1948 {
1949 table_item_t *item;
1950 u_int row, segment;
1951 rwlock_t *lock;
1952 chunk_t addr;
1953 u_int count = 0;
1954
1955 if (ip)
1956 {
1957 addr = ip->get_address(ip);
1958 row = chunk_hash(addr) & this->table_mask;
1959 segment = row & this->segment_mask;
1960 lock = this->half_open_segments[segment].lock;
1961 lock->read_lock(lock);
1962 item = this->half_open_table[row];
1963 while (item)
1964 {
1965 half_open_t *half_open = item->value;
1966
1967 if (chunk_equals(addr, half_open->other))
1968 {
1969 count = half_open->count;
1970 break;
1971 }
1972 item = item->next;
1973 }
1974 lock->unlock(lock);
1975 }
1976 else
1977 {
1978 count = (u_int)ref_cur(&this->half_open_count);
1979 }
1980 return count;
1981 }
1982
1983 METHOD(ike_sa_manager_t, flush, void,
1984 private_ike_sa_manager_t *this)
1985 {
1986 /* destroy all list entries */
1987 enumerator_t *enumerator;
1988 entry_t *entry;
1989 u_int segment;
1990
1991 lock_all_segments(this);
1992 DBG2(DBG_MGR, "going to destroy IKE_SA manager and all managed IKE_SA's");
1993 /* Step 1: drive out all waiting threads */
1994 DBG2(DBG_MGR, "set driveout flags for all stored IKE_SA's");
1995 enumerator = create_table_enumerator(this);
1996 while (enumerator->enumerate(enumerator, &entry, &segment))
1997 {
1998 /* do not accept new threads, drive out waiting threads */
1999 entry->driveout_new_threads = TRUE;
2000 entry->driveout_waiting_threads = TRUE;
2001 }
2002 enumerator->destroy(enumerator);
2003 DBG2(DBG_MGR, "wait for all threads to leave IKE_SA's");
2004 /* Step 2: wait until all are gone */
2005 enumerator = create_table_enumerator(this);
2006 while (enumerator->enumerate(enumerator, &entry, &segment))
2007 {
2008 while (entry->waiting_threads || entry->checked_out)
2009 {
2010 /* wake up all */
2011 entry->condvar->broadcast(entry->condvar);
2012 /* go sleeping until they are gone */
2013 entry->condvar->wait(entry->condvar, this->segments[segment].mutex);
2014 }
2015 }
2016 enumerator->destroy(enumerator);
2017 DBG2(DBG_MGR, "delete all IKE_SA's");
2018 /* Step 3: initiate deletion of all IKE_SAs */
2019 enumerator = create_table_enumerator(this);
2020 while (enumerator->enumerate(enumerator, &entry, &segment))
2021 {
2022 charon->bus->set_sa(charon->bus, entry->ike_sa);
2023 if (entry->ike_sa->get_version(entry->ike_sa) == IKEV2)
2024 { /* as the delete never gets processed, fire down events */
2025 switch (entry->ike_sa->get_state(entry->ike_sa))
2026 {
2027 case IKE_ESTABLISHED:
2028 case IKE_REKEYING:
2029 case IKE_DELETING:
2030 charon->bus->ike_updown(charon->bus, entry->ike_sa, FALSE);
2031 break;
2032 default:
2033 break;
2034 }
2035 }
2036 entry->ike_sa->delete(entry->ike_sa);
2037 }
2038 enumerator->destroy(enumerator);
2039
2040 DBG2(DBG_MGR, "destroy all entries");
2041 /* Step 4: destroy all entries */
2042 enumerator = create_table_enumerator(this);
2043 while (enumerator->enumerate(enumerator, &entry, &segment))
2044 {
2045 charon->bus->set_sa(charon->bus, entry->ike_sa);
2046 if (entry->half_open)
2047 {
2048 remove_half_open(this, entry);
2049 }
2050 if (entry->my_id && entry->other_id)
2051 {
2052 remove_connected_peers(this, entry);
2053 }
2054 if (entry->init_hash.ptr)
2055 {
2056 remove_init_hash(this, entry->init_hash);
2057 }
2058 remove_entry_at((private_enumerator_t*)enumerator);
2059 entry_destroy(entry);
2060 }
2061 enumerator->destroy(enumerator);
2062 charon->bus->set_sa(charon->bus, NULL);
2063 unlock_all_segments(this);
2064
2065 this->rng_lock->write_lock(this->rng_lock);
2066 this->rng->destroy(this->rng);
2067 this->rng = NULL;
2068 this->rng_lock->unlock(this->rng_lock);
2069 }
2070
2071 METHOD(ike_sa_manager_t, destroy, void,
2072 private_ike_sa_manager_t *this)
2073 {
2074 u_int i;
2075
2076 /* these are already cleared in flush() above */
2077 free(this->ike_sa_table);
2078 free(this->half_open_table);
2079 free(this->connected_peers_table);
2080 free(this->init_hashes_table);
2081 for (i = 0; i < this->segment_count; i++)
2082 {
2083 this->segments[i].mutex->destroy(this->segments[i].mutex);
2084 this->half_open_segments[i].lock->destroy(this->half_open_segments[i].lock);
2085 this->connected_peers_segments[i].lock->destroy(this->connected_peers_segments[i].lock);
2086 this->init_hashes_segments[i].mutex->destroy(this->init_hashes_segments[i].mutex);
2087 }
2088 free(this->segments);
2089 free(this->half_open_segments);
2090 free(this->connected_peers_segments);
2091 free(this->init_hashes_segments);
2092
2093 this->rng_lock->destroy(this->rng_lock);
2094 free(this);
2095 }
2096
2097 /**
2098 * This function returns the next-highest power of two for the given number.
2099 * The algorithm works by setting all bits on the right-hand side of the most
2100 * significant 1 to 1 and then increments the whole number so it rolls over
2101 * to the nearest power of two. Note: returns 0 for n == 0
2102 */
2103 static u_int get_nearest_powerof2(u_int n)
2104 {
2105 u_int i;
2106
2107 --n;
2108 for (i = 1; i < sizeof(u_int) * 8; i <<= 1)
2109 {
2110 n |= n >> i;
2111 }
2112 return ++n;
2113 }
2114
2115 /*
2116 * Described in header.
2117 */
2118 ike_sa_manager_t *ike_sa_manager_create()
2119 {
2120 private_ike_sa_manager_t *this;
2121 u_int i;
2122
2123 INIT(this,
2124 .public = {
2125 .checkout = _checkout,
2126 .checkout_new = _checkout_new,
2127 .checkout_by_message = _checkout_by_message,
2128 .checkout_by_config = _checkout_by_config,
2129 .checkout_by_id = _checkout_by_id,
2130 .checkout_by_name = _checkout_by_name,
2131 .check_uniqueness = _check_uniqueness,
2132 .has_contact = _has_contact,
2133 .create_enumerator = _create_enumerator,
2134 .create_id_enumerator = _create_id_enumerator,
2135 .checkin = _checkin,
2136 .checkin_and_destroy = _checkin_and_destroy,
2137 .get_count = _get_count,
2138 .get_half_open_count = _get_half_open_count,
2139 .flush = _flush,
2140 .destroy = _destroy,
2141 },
2142 );
2143
2144 this->rng = lib->crypto->create_rng(lib->crypto, RNG_WEAK);
2145 if (this->rng == NULL)
2146 {
2147 DBG1(DBG_MGR, "manager initialization failed, no RNG supported");
2148 free(this);
2149 return NULL;
2150 }
2151 this->rng_lock = rwlock_create(RWLOCK_TYPE_DEFAULT);
2152
2153 this->ikesa_limit = lib->settings->get_int(lib->settings,
2154 "%s.ikesa_limit", 0, lib->ns);
2155
2156 this->table_size = get_nearest_powerof2(lib->settings->get_int(
2157 lib->settings, "%s.ikesa_table_size",
2158 DEFAULT_HASHTABLE_SIZE, lib->ns));
2159 this->table_size = max(1, min(this->table_size, MAX_HASHTABLE_SIZE));
2160 this->table_mask = this->table_size - 1;
2161
2162 this->segment_count = get_nearest_powerof2(lib->settings->get_int(
2163 lib->settings, "%s.ikesa_table_segments",
2164 DEFAULT_SEGMENT_COUNT, lib->ns));
2165 this->segment_count = max(1, min(this->segment_count, this->table_size));
2166 this->segment_mask = this->segment_count - 1;
2167
2168 this->ike_sa_table = calloc(this->table_size, sizeof(table_item_t*));
2169 this->segments = (segment_t*)calloc(this->segment_count, sizeof(segment_t));
2170 for (i = 0; i < this->segment_count; i++)
2171 {
2172 this->segments[i].mutex = mutex_create(MUTEX_TYPE_RECURSIVE);
2173 this->segments[i].count = 0;
2174 }
2175
2176 /* we use the same table parameters for the table to track half-open SAs */
2177 this->half_open_table = calloc(this->table_size, sizeof(table_item_t*));
2178 this->half_open_segments = calloc(this->segment_count, sizeof(shareable_segment_t));
2179 for (i = 0; i < this->segment_count; i++)
2180 {
2181 this->half_open_segments[i].lock = rwlock_create(RWLOCK_TYPE_DEFAULT);
2182 this->half_open_segments[i].count = 0;
2183 }
2184
2185 /* also for the hash table used for duplicate tests */
2186 this->connected_peers_table = calloc(this->table_size, sizeof(table_item_t*));
2187 this->connected_peers_segments = calloc(this->segment_count, sizeof(shareable_segment_t));
2188 for (i = 0; i < this->segment_count; i++)
2189 {
2190 this->connected_peers_segments[i].lock = rwlock_create(RWLOCK_TYPE_DEFAULT);
2191 this->connected_peers_segments[i].count = 0;
2192 }
2193
2194 /* and again for the table of hashes of seen initial IKE messages */
2195 this->init_hashes_table = calloc(this->table_size, sizeof(table_item_t*));
2196 this->init_hashes_segments = calloc(this->segment_count, sizeof(segment_t));
2197 for (i = 0; i < this->segment_count; i++)
2198 {
2199 this->init_hashes_segments[i].mutex = mutex_create(MUTEX_TYPE_RECURSIVE);
2200 this->init_hashes_segments[i].count = 0;
2201 }
2202
2203 this->reuse_ikesa = lib->settings->get_bool(lib->settings,
2204 "%s.reuse_ikesa", TRUE, lib->ns);
2205 return &this->public;
2206 }