ike-sa-manager: Add a method to register/check out new IKE_SAs
[strongswan.git] / src / libcharon / sa / ike_sa_manager.c
1 /*
2 * Copyright (C) 2005-2011 Martin Willi
3 * Copyright (C) 2011 revosec AG
4 *
5 * Copyright (C) 2008-2021 Tobias Brunner
6 * Copyright (C) 2005 Jan Hutter
7 * HSR Hochschule fuer Technik Rapperswil
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version. See <http://www.fsf.org/copyleft/gpl.txt>.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
16 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 * for more details.
18 */
19
20 #include <string.h>
21 #include <inttypes.h>
22
23 #include "ike_sa_manager.h"
24
25 #include <daemon.h>
26 #include <sa/ike_sa_id.h>
27 #include <bus/bus.h>
28 #include <threading/thread.h>
29 #include <threading/condvar.h>
30 #include <threading/mutex.h>
31 #include <threading/rwlock.h>
32 #include <collections/array.h>
33 #include <collections/linked_list.h>
34 #include <crypto/hashers/hasher.h>
35 #include <processing/jobs/delete_ike_sa_job.h>
36
37 /* the default size of the hash table (MUST be a power of 2) */
38 #define DEFAULT_HASHTABLE_SIZE 1
39
40 /* the maximum size of the hash table (MUST be a power of 2) */
41 #define MAX_HASHTABLE_SIZE (1 << 30)
42
43 /* the default number of segments (MUST be a power of 2) */
44 #define DEFAULT_SEGMENT_COUNT 1
45
46 typedef struct entry_t entry_t;
47
48 /**
49 * An entry in the linked list, contains IKE_SA, locking and lookup data.
50 */
51 struct entry_t {
52
53 /**
54 * Number of threads waiting for this ike_sa_t object.
55 */
56 int waiting_threads;
57
58 /**
59 * Condvar where threads can wait until ike_sa_t object is free for use again.
60 */
61 condvar_t *condvar;
62
63 /**
64 * Thread by which this IKE_SA is currently checked out, if any
65 */
66 thread_t *checked_out;
67
68 /**
69 * Does this SA drives out new threads?
70 */
71 bool driveout_new_threads;
72
73 /**
74 * Does this SA drives out waiting threads?
75 */
76 bool driveout_waiting_threads;
77
78 /**
79 * Identification of an IKE_SA (SPIs).
80 */
81 ike_sa_id_t *ike_sa_id;
82
83 /**
84 * The contained ike_sa_t object.
85 */
86 ike_sa_t *ike_sa;
87
88 /**
89 * hash of the IKE_SA_INIT message, used to detect retransmissions
90 */
91 chunk_t init_hash;
92
93 /**
94 * remote host address, required for DoS detection and duplicate
95 * checking (host with same my_id and other_id is *not* considered
96 * a duplicate if the address family differs)
97 */
98 host_t *other;
99
100 /**
101 * As responder: Is this SA half-open?
102 */
103 bool half_open;
104
105 /**
106 * own identity, required for duplicate checking
107 */
108 identification_t *my_id;
109
110 /**
111 * remote identity, required for duplicate checking
112 */
113 identification_t *other_id;
114
115 /**
116 * message ID or hash of currently processing message, -1 if none
117 */
118 uint32_t processing;
119 };
120
121 /**
122 * Implementation of entry_t.destroy.
123 */
124 static status_t entry_destroy(entry_t *this)
125 {
126 /* also destroy IKE SA */
127 this->ike_sa->destroy(this->ike_sa);
128 this->ike_sa_id->destroy(this->ike_sa_id);
129 chunk_free(&this->init_hash);
130 DESTROY_IF(this->other);
131 DESTROY_IF(this->my_id);
132 DESTROY_IF(this->other_id);
133 this->condvar->destroy(this->condvar);
134 free(this);
135 return SUCCESS;
136 }
137
138 /**
139 * Creates a new entry for the ike_sa_t list.
140 */
141 static entry_t *entry_create()
142 {
143 entry_t *this;
144
145 INIT(this,
146 .condvar = condvar_create(CONDVAR_TYPE_DEFAULT),
147 .processing = -1,
148 );
149
150 return this;
151 }
152
153 /**
154 * Function that matches entry_t objects by ike_sa_id_t.
155 */
156 static bool entry_match_by_id(entry_t *entry, void *arg)
157 {
158 ike_sa_id_t *id = arg;
159
160 if (id->equals(id, entry->ike_sa_id))
161 {
162 return TRUE;
163 }
164 if ((id->get_responder_spi(id) == 0 ||
165 entry->ike_sa_id->get_responder_spi(entry->ike_sa_id) == 0) &&
166 (id->get_ike_version(id) == IKEV1_MAJOR_VERSION ||
167 id->is_initiator(id) == entry->ike_sa_id->is_initiator(entry->ike_sa_id)) &&
168 id->get_initiator_spi(id) == entry->ike_sa_id->get_initiator_spi(entry->ike_sa_id))
169 {
170 /* this is TRUE for IKE_SAs that we initiated but have not yet received a response */
171 return TRUE;
172 }
173 return FALSE;
174 }
175
176 /**
177 * Function that matches entry_t objects by ike_sa_t pointers.
178 */
179 static bool entry_match_by_sa(entry_t *entry, void *ike_sa)
180 {
181 return entry->ike_sa == ike_sa;
182 }
183
184 /**
185 * Hash function for ike_sa_id_t objects.
186 */
187 static u_int ike_sa_id_hash(ike_sa_id_t *ike_sa_id)
188 {
189 /* IKEv2 does not mandate random SPIs (RFC 5996, 2.6), they just have to be
190 * locally unique, so we use our randomly allocated SPI whether we are
191 * initiator or responder to ensure a good distribution. The latter is not
192 * possible for IKEv1 as we don't know whether we are original initiator or
193 * not (based on the IKE header). But as RFC 2408, section 2.5.3 proposes
194 * SPIs (Cookies) to be allocated near random (we allocate them randomly
195 * anyway) it seems safe to always use the initiator SPI. */
196 if (ike_sa_id->get_ike_version(ike_sa_id) == IKEV1_MAJOR_VERSION ||
197 ike_sa_id->is_initiator(ike_sa_id))
198 {
199 return ike_sa_id->get_initiator_spi(ike_sa_id);
200 }
201 return ike_sa_id->get_responder_spi(ike_sa_id);
202 }
203
204 typedef struct half_open_t half_open_t;
205
206 /**
207 * Struct to manage half-open IKE_SAs per peer.
208 */
209 struct half_open_t {
210 /** chunk of remote host address */
211 chunk_t other;
212
213 /** the number of half-open IKE_SAs with that host */
214 u_int count;
215
216 /** the number of half-open IKE_SAs we responded to with that host */
217 u_int count_responder;
218 };
219
220 /**
221 * Destroys a half_open_t object.
222 */
223 static void half_open_destroy(half_open_t *this)
224 {
225 chunk_free(&this->other);
226 free(this);
227 }
228
229 typedef struct connected_peers_t connected_peers_t;
230
231 struct connected_peers_t {
232 /** own identity */
233 identification_t *my_id;
234
235 /** remote identity */
236 identification_t *other_id;
237
238 /** ip address family of peer */
239 int family;
240
241 /** list of ike_sa_id_t objects of IKE_SAs between the two identities */
242 linked_list_t *sas;
243 };
244
245 static void connected_peers_destroy(connected_peers_t *this)
246 {
247 this->my_id->destroy(this->my_id);
248 this->other_id->destroy(this->other_id);
249 this->sas->destroy(this->sas);
250 free(this);
251 }
252
253 /**
254 * Function that matches connected_peers_t objects by the given ids.
255 */
256 static inline bool connected_peers_match(connected_peers_t *connected_peers,
257 identification_t *my_id, identification_t *other_id,
258 int family)
259 {
260 return my_id->equals(my_id, connected_peers->my_id) &&
261 other_id->equals(other_id, connected_peers->other_id) &&
262 (!family || family == connected_peers->family);
263 }
264
265 typedef struct init_hash_t init_hash_t;
266
267 struct init_hash_t {
268 /** hash of IKE_SA_INIT or initial phase1 message (data is not cloned) */
269 chunk_t hash;
270
271 /** our SPI allocated for the IKE_SA based on this message */
272 uint64_t our_spi;
273 };
274
275 typedef struct segment_t segment_t;
276
277 /**
278 * Struct to manage segments of the hash table.
279 */
280 struct segment_t {
281 /** mutex to access a segment exclusively */
282 mutex_t *mutex;
283 };
284
285 typedef struct shareable_segment_t shareable_segment_t;
286
287 /**
288 * Struct to manage segments of the "half-open" and "connected peers" hash tables.
289 */
290 struct shareable_segment_t {
291 /** rwlock to access a segment non-/exclusively */
292 rwlock_t *lock;
293
294 /** the number of entries in this segment - in case of the "half-open table"
295 * it's the sum of all half_open_t.count in a segment. */
296 u_int count;
297 };
298
299 typedef struct table_item_t table_item_t;
300
301 /**
302 * Instead of using linked_list_t for each bucket we store the data in our own
303 * list to save memory.
304 */
305 struct table_item_t {
306 /** data of this item */
307 void *value;
308
309 /** next item in the overflow list */
310 table_item_t *next;
311 };
312
313 typedef struct private_ike_sa_manager_t private_ike_sa_manager_t;
314
315 /**
316 * Additional private members of ike_sa_manager_t.
317 */
318 struct private_ike_sa_manager_t {
319 /**
320 * Public interface of ike_sa_manager_t.
321 */
322 ike_sa_manager_t public;
323
324 /**
325 * Hash table with entries for the ike_sa_t objects.
326 */
327 table_item_t **ike_sa_table;
328
329 /**
330 * The size of the hash table.
331 */
332 u_int table_size;
333
334 /**
335 * Mask to map the hashes to table rows.
336 */
337 u_int table_mask;
338
339 /**
340 * Segments of the hash table.
341 */
342 segment_t *segments;
343
344 /**
345 * The number of segments.
346 */
347 u_int segment_count;
348
349 /**
350 * Mask to map a table row to a segment.
351 */
352 u_int segment_mask;
353
354 /**
355 * Hash table with half_open_t objects.
356 */
357 table_item_t **half_open_table;
358
359 /**
360 * Segments of the "half-open" hash table.
361 */
362 shareable_segment_t *half_open_segments;
363
364 /**
365 * Total number of half-open IKE_SAs.
366 */
367 refcount_t half_open_count;
368
369 /**
370 * Total number of half-open IKE_SAs as responder.
371 */
372 refcount_t half_open_count_responder;
373
374 /**
375 * Total number of IKE_SAs registered with IKE_SA manager.
376 */
377 refcount_t total_sa_count;
378
379 /**
380 * Hash table with connected_peers_t objects.
381 */
382 table_item_t **connected_peers_table;
383
384 /**
385 * Segments of the "connected peers" hash table.
386 */
387 shareable_segment_t *connected_peers_segments;
388
389 /**
390 * Hash table with init_hash_t objects.
391 */
392 table_item_t **init_hashes_table;
393
394 /**
395 * Segments of the "hashes" hash table.
396 */
397 segment_t *init_hashes_segments;
398
399 /**
400 * Configs for which an SA is currently being checked out.
401 */
402 array_t *config_checkouts;
403
404 /**
405 * Mutex to protect access to configs.
406 */
407 mutex_t *config_mutex;
408
409 /**
410 * Condvar to indicate changes in checkout configs.
411 */
412 condvar_t *config_condvar;
413
414 /**
415 * RNG to get random SPIs for our side
416 */
417 rng_t *rng;
418
419 /**
420 * Registered callback for IKE SPIs
421 */
422 struct {
423 spi_cb_t cb;
424 void *data;
425 } spi_cb;
426
427 /**
428 * Lock to access the RNG instance and the callback
429 */
430 rwlock_t *spi_lock;
431
432 /**
433 * Mask applied to local SPIs before mixing in the label
434 */
435 uint64_t spi_mask;
436
437 /**
438 * Label applied to local SPIs
439 */
440 uint64_t spi_label;
441
442 /**
443 * reuse existing IKE_SAs in checkout_by_config
444 */
445 bool reuse_ikesa;
446
447 /**
448 * Configured IKE_SA limit, if any
449 */
450 u_int ikesa_limit;
451 };
452
453 /**
454 * Acquire a lock to access the segment of the table row with the given index.
455 * It also works with the segment index directly.
456 */
457 static inline void lock_single_segment(private_ike_sa_manager_t *this,
458 u_int index)
459 {
460 mutex_t *lock = this->segments[index & this->segment_mask].mutex;
461 lock->lock(lock);
462 }
463
464 /**
465 * Release the lock required to access the segment of the table row with the given index.
466 * It also works with the segment index directly.
467 */
468 static inline void unlock_single_segment(private_ike_sa_manager_t *this,
469 u_int index)
470 {
471 mutex_t *lock = this->segments[index & this->segment_mask].mutex;
472 lock->unlock(lock);
473 }
474
475 /**
476 * Lock all segments
477 */
478 static void lock_all_segments(private_ike_sa_manager_t *this)
479 {
480 u_int i;
481
482 for (i = 0; i < this->segment_count; i++)
483 {
484 this->segments[i].mutex->lock(this->segments[i].mutex);
485 }
486 }
487
488 /**
489 * Unlock all segments
490 */
491 static void unlock_all_segments(private_ike_sa_manager_t *this)
492 {
493 u_int i;
494
495 for (i = 0; i < this->segment_count; i++)
496 {
497 this->segments[i].mutex->unlock(this->segments[i].mutex);
498 }
499 }
500
501 typedef struct private_enumerator_t private_enumerator_t;
502
503 /**
504 * hash table enumerator implementation
505 */
506 struct private_enumerator_t {
507
508 /**
509 * implements enumerator interface
510 */
511 enumerator_t enumerator;
512
513 /**
514 * associated ike_sa_manager_t
515 */
516 private_ike_sa_manager_t *manager;
517
518 /**
519 * current segment index
520 */
521 u_int segment;
522
523 /**
524 * currently enumerating entry
525 */
526 entry_t *entry;
527
528 /**
529 * current table row index
530 */
531 u_int row;
532
533 /**
534 * current table item
535 */
536 table_item_t *current;
537
538 /**
539 * previous table item
540 */
541 table_item_t *prev;
542 };
543
544 METHOD(enumerator_t, enumerate, bool,
545 private_enumerator_t *this, va_list args)
546 {
547 entry_t **entry;
548 u_int *segment;
549
550 VA_ARGS_VGET(args, entry, segment);
551
552 if (this->entry)
553 {
554 this->entry->condvar->signal(this->entry->condvar);
555 this->entry = NULL;
556 }
557 while (this->segment < this->manager->segment_count)
558 {
559 while (this->row < this->manager->table_size)
560 {
561 this->prev = this->current;
562 if (this->current)
563 {
564 this->current = this->current->next;
565 }
566 else
567 {
568 lock_single_segment(this->manager, this->segment);
569 this->current = this->manager->ike_sa_table[this->row];
570 }
571 if (this->current)
572 {
573 *entry = this->entry = this->current->value;
574 *segment = this->segment;
575 return TRUE;
576 }
577 unlock_single_segment(this->manager, this->segment);
578 this->row += this->manager->segment_count;
579 }
580 this->segment++;
581 this->row = this->segment;
582 }
583 return FALSE;
584 }
585
586 METHOD(enumerator_t, enumerator_destroy, void,
587 private_enumerator_t *this)
588 {
589 if (this->entry)
590 {
591 this->entry->condvar->signal(this->entry->condvar);
592 }
593 if (this->current)
594 {
595 unlock_single_segment(this->manager, this->segment);
596 }
597 free(this);
598 }
599
600 /**
601 * Creates an enumerator to enumerate the entries in the hash table.
602 */
603 static enumerator_t* create_table_enumerator(private_ike_sa_manager_t *this)
604 {
605 private_enumerator_t *enumerator;
606
607 INIT(enumerator,
608 .enumerator = {
609 .enumerate = enumerator_enumerate_default,
610 .venumerate = _enumerate,
611 .destroy = _enumerator_destroy,
612 },
613 .manager = this,
614 );
615 return &enumerator->enumerator;
616 }
617
618 /**
619 * Put an entry into the hash table.
620 * Note: The caller has to unlock the returned segment.
621 */
622 static u_int put_entry(private_ike_sa_manager_t *this, entry_t *entry)
623 {
624 table_item_t *current, *item;
625 u_int row, segment;
626
627 INIT(item,
628 .value = entry,
629 );
630
631 row = ike_sa_id_hash(entry->ike_sa_id) & this->table_mask;
632 segment = row & this->segment_mask;
633
634 lock_single_segment(this, segment);
635 current = this->ike_sa_table[row];
636 if (current)
637 { /* insert at the front of current bucket */
638 item->next = current;
639 }
640 this->ike_sa_table[row] = item;
641 ref_get(&this->total_sa_count);
642 return segment;
643 }
644
645 /**
646 * Remove an entry from the hash table.
647 * Note: The caller MUST have a lock on the segment of this entry.
648 */
649 static void remove_entry(private_ike_sa_manager_t *this, entry_t *entry)
650 {
651 table_item_t *item, *prev = NULL;
652 u_int row;
653
654 row = ike_sa_id_hash(entry->ike_sa_id) & this->table_mask;
655 item = this->ike_sa_table[row];
656 while (item)
657 {
658 if (item->value == entry)
659 {
660 if (prev)
661 {
662 prev->next = item->next;
663 }
664 else
665 {
666 this->ike_sa_table[row] = item->next;
667 }
668 ignore_result(ref_put(&this->total_sa_count));
669 free(item);
670 break;
671 }
672 prev = item;
673 item = item->next;
674 }
675 }
676
677 /**
678 * Remove the entry at the current enumerator position.
679 */
680 static void remove_entry_at(private_enumerator_t *this)
681 {
682 this->entry = NULL;
683 if (this->current)
684 {
685 table_item_t *current = this->current;
686
687 ignore_result(ref_put(&this->manager->total_sa_count));
688 this->current = this->prev;
689
690 if (this->prev)
691 {
692 this->prev->next = current->next;
693 }
694 else
695 {
696 this->manager->ike_sa_table[this->row] = current->next;
697 unlock_single_segment(this->manager, this->segment);
698 }
699 free(current);
700 }
701 }
702
703 /**
704 * Find an entry using the provided match function to compare the entries for
705 * equality.
706 */
707 static status_t get_entry_by_match_function(private_ike_sa_manager_t *this,
708 ike_sa_id_t *ike_sa_id, entry_t **entry, u_int *segment,
709 bool (*match)(entry_t*,void*), void *param)
710 {
711 table_item_t *item;
712 u_int row, seg;
713
714 row = ike_sa_id_hash(ike_sa_id) & this->table_mask;
715 seg = row & this->segment_mask;
716
717 lock_single_segment(this, seg);
718 item = this->ike_sa_table[row];
719 while (item)
720 {
721 if (match(item->value, param))
722 {
723 *entry = item->value;
724 *segment = seg;
725 /* the locked segment has to be unlocked by the caller */
726 return SUCCESS;
727 }
728 item = item->next;
729 }
730 unlock_single_segment(this, seg);
731 return NOT_FOUND;
732 }
733
734 /**
735 * Find an entry by ike_sa_id_t.
736 * Note: On SUCCESS, the caller has to unlock the segment.
737 */
738 static status_t get_entry_by_id(private_ike_sa_manager_t *this,
739 ike_sa_id_t *ike_sa_id, entry_t **entry, u_int *segment)
740 {
741 return get_entry_by_match_function(this, ike_sa_id, entry, segment,
742 entry_match_by_id, ike_sa_id);
743 }
744
745 /**
746 * Find an entry by IKE_SA pointer.
747 * Note: On SUCCESS, the caller has to unlock the segment.
748 */
749 static status_t get_entry_by_sa(private_ike_sa_manager_t *this,
750 ike_sa_id_t *ike_sa_id, ike_sa_t *ike_sa, entry_t **entry, u_int *segment)
751 {
752 return get_entry_by_match_function(this, ike_sa_id, entry, segment,
753 entry_match_by_sa, ike_sa);
754 }
755
756 /**
757 * Wait until no other thread is using an IKE_SA, return FALSE if entry not
758 * acquirable.
759 */
760 static bool wait_for_entry(private_ike_sa_manager_t *this, entry_t *entry,
761 u_int segment)
762 {
763 if (entry->driveout_new_threads)
764 {
765 /* we are not allowed to get this */
766 return FALSE;
767 }
768 while (entry->checked_out && !entry->driveout_waiting_threads)
769 {
770 /* so wait until we can get it for us.
771 * we register us as waiting. */
772 entry->waiting_threads++;
773 entry->condvar->wait(entry->condvar, this->segments[segment].mutex);
774 entry->waiting_threads--;
775 }
776 /* hm, a deletion request forbids us to get this SA, get next one */
777 if (entry->driveout_waiting_threads)
778 {
779 /* we must signal here, others may be waiting on it, too */
780 entry->condvar->signal(entry->condvar);
781 return FALSE;
782 }
783 return TRUE;
784 }
785
786 /**
787 * Put a half-open SA into the hash table.
788 */
789 static void put_half_open(private_ike_sa_manager_t *this, entry_t *entry)
790 {
791 table_item_t *item;
792 u_int row, segment;
793 rwlock_t *lock;
794 ike_sa_id_t *ike_id;
795 half_open_t *half_open;
796 chunk_t addr;
797
798 ike_id = entry->ike_sa_id;
799 addr = entry->other->get_address(entry->other);
800 row = chunk_hash(addr) & this->table_mask;
801 segment = row & this->segment_mask;
802 lock = this->half_open_segments[segment].lock;
803 lock->write_lock(lock);
804 item = this->half_open_table[row];
805 while (item)
806 {
807 half_open = item->value;
808
809 if (chunk_equals(addr, half_open->other))
810 {
811 break;
812 }
813 item = item->next;
814 }
815
816 if (!item)
817 {
818 INIT(half_open,
819 .other = chunk_clone(addr),
820 );
821 INIT(item,
822 .value = half_open,
823 .next = this->half_open_table[row],
824 );
825 this->half_open_table[row] = item;
826 }
827 half_open->count++;
828 ref_get(&this->half_open_count);
829 if (!ike_id->is_initiator(ike_id))
830 {
831 half_open->count_responder++;
832 ref_get(&this->half_open_count_responder);
833 }
834 this->half_open_segments[segment].count++;
835 lock->unlock(lock);
836 }
837
838 /**
839 * Remove a half-open SA from the hash table.
840 */
841 static void remove_half_open(private_ike_sa_manager_t *this, entry_t *entry)
842 {
843 table_item_t *item, *prev = NULL;
844 u_int row, segment;
845 rwlock_t *lock;
846 ike_sa_id_t *ike_id;
847 chunk_t addr;
848
849 ike_id = entry->ike_sa_id;
850 addr = entry->other->get_address(entry->other);
851 row = chunk_hash(addr) & this->table_mask;
852 segment = row & this->segment_mask;
853 lock = this->half_open_segments[segment].lock;
854 lock->write_lock(lock);
855 item = this->half_open_table[row];
856 while (item)
857 {
858 half_open_t *half_open = item->value;
859
860 if (chunk_equals(addr, half_open->other))
861 {
862 if (!ike_id->is_initiator(ike_id))
863 {
864 half_open->count_responder--;
865 ignore_result(ref_put(&this->half_open_count_responder));
866 }
867 ignore_result(ref_put(&this->half_open_count));
868 if (--half_open->count == 0)
869 {
870 if (prev)
871 {
872 prev->next = item->next;
873 }
874 else
875 {
876 this->half_open_table[row] = item->next;
877 }
878 half_open_destroy(half_open);
879 free(item);
880 }
881 this->half_open_segments[segment].count--;
882 break;
883 }
884 prev = item;
885 item = item->next;
886 }
887 lock->unlock(lock);
888 }
889
890 /**
891 * Create an entry and put it into the hash table.
892 * Note: The caller has to unlock the segment.
893 */
894 static u_int create_and_put_entry(private_ike_sa_manager_t *this,
895 ike_sa_t *ike_sa, entry_t **entry)
896 {
897 ike_sa_id_t *ike_sa_id = ike_sa->get_id(ike_sa);
898 host_t *other = ike_sa->get_other_host(ike_sa);
899
900 *entry = entry_create();
901 (*entry)->ike_sa_id = ike_sa_id->clone(ike_sa_id);
902 (*entry)->ike_sa = ike_sa;
903
904 if (ike_sa->get_state(ike_sa) == IKE_CONNECTING)
905 {
906 (*entry)->half_open = TRUE;
907 (*entry)->other = other->clone(other);
908 put_half_open(this, *entry);
909 }
910 return put_entry(this, *entry);
911 }
912
913 CALLBACK(id_matches, bool,
914 ike_sa_id_t *a, va_list args)
915 {
916 ike_sa_id_t *b;
917
918 VA_ARGS_VGET(args, b);
919 return a->equals(a, b);
920 }
921
922 /**
923 * Put an SA between two peers into the hash table.
924 */
925 static void put_connected_peers(private_ike_sa_manager_t *this, entry_t *entry)
926 {
927 table_item_t *item;
928 u_int row, segment;
929 rwlock_t *lock;
930 connected_peers_t *connected_peers;
931 chunk_t my_id, other_id;
932 int family;
933
934 my_id = entry->my_id->get_encoding(entry->my_id);
935 other_id = entry->other_id->get_encoding(entry->other_id);
936 family = entry->other->get_family(entry->other);
937 row = chunk_hash_inc(other_id, chunk_hash(my_id)) & this->table_mask;
938 segment = row & this->segment_mask;
939 lock = this->connected_peers_segments[segment].lock;
940 lock->write_lock(lock);
941 item = this->connected_peers_table[row];
942 while (item)
943 {
944 connected_peers = item->value;
945
946 if (connected_peers_match(connected_peers, entry->my_id,
947 entry->other_id, family))
948 {
949 if (connected_peers->sas->find_first(connected_peers->sas,
950 id_matches, NULL, entry->ike_sa_id))
951 {
952 lock->unlock(lock);
953 return;
954 }
955 break;
956 }
957 item = item->next;
958 }
959
960 if (!item)
961 {
962 INIT(connected_peers,
963 .my_id = entry->my_id->clone(entry->my_id),
964 .other_id = entry->other_id->clone(entry->other_id),
965 .family = family,
966 .sas = linked_list_create(),
967 );
968 INIT(item,
969 .value = connected_peers,
970 .next = this->connected_peers_table[row],
971 );
972 this->connected_peers_table[row] = item;
973 }
974 connected_peers->sas->insert_last(connected_peers->sas,
975 entry->ike_sa_id->clone(entry->ike_sa_id));
976 this->connected_peers_segments[segment].count++;
977 lock->unlock(lock);
978 }
979
980 /**
981 * Remove an SA between two peers from the hash table.
982 */
983 static void remove_connected_peers(private_ike_sa_manager_t *this, entry_t *entry)
984 {
985 table_item_t *item, *prev = NULL;
986 u_int row, segment;
987 rwlock_t *lock;
988 chunk_t my_id, other_id;
989 int family;
990
991 my_id = entry->my_id->get_encoding(entry->my_id);
992 other_id = entry->other_id->get_encoding(entry->other_id);
993 family = entry->other->get_family(entry->other);
994
995 row = chunk_hash_inc(other_id, chunk_hash(my_id)) & this->table_mask;
996 segment = row & this->segment_mask;
997
998 lock = this->connected_peers_segments[segment].lock;
999 lock->write_lock(lock);
1000 item = this->connected_peers_table[row];
1001 while (item)
1002 {
1003 connected_peers_t *current = item->value;
1004
1005 if (connected_peers_match(current, entry->my_id, entry->other_id,
1006 family))
1007 {
1008 enumerator_t *enumerator;
1009 ike_sa_id_t *ike_sa_id;
1010
1011 enumerator = current->sas->create_enumerator(current->sas);
1012 while (enumerator->enumerate(enumerator, &ike_sa_id))
1013 {
1014 if (ike_sa_id->equals(ike_sa_id, entry->ike_sa_id))
1015 {
1016 current->sas->remove_at(current->sas, enumerator);
1017 ike_sa_id->destroy(ike_sa_id);
1018 this->connected_peers_segments[segment].count--;
1019 break;
1020 }
1021 }
1022 enumerator->destroy(enumerator);
1023 if (current->sas->get_count(current->sas) == 0)
1024 {
1025 if (prev)
1026 {
1027 prev->next = item->next;
1028 }
1029 else
1030 {
1031 this->connected_peers_table[row] = item->next;
1032 }
1033 connected_peers_destroy(current);
1034 free(item);
1035 }
1036 break;
1037 }
1038 prev = item;
1039 item = item->next;
1040 }
1041 lock->unlock(lock);
1042 }
1043
1044 /**
1045 * Get a random SPI for new IKE_SAs
1046 */
1047 static uint64_t get_spi(private_ike_sa_manager_t *this)
1048 {
1049 uint64_t spi;
1050
1051 this->spi_lock->read_lock(this->spi_lock);
1052 if (this->spi_cb.cb)
1053 {
1054 spi = this->spi_cb.cb(this->spi_cb.data);
1055 }
1056 else if (!this->rng ||
1057 !this->rng->get_bytes(this->rng, sizeof(spi), (uint8_t*)&spi))
1058 {
1059 spi = 0;
1060 }
1061 this->spi_lock->unlock(this->spi_lock);
1062
1063 if (spi)
1064 {
1065 spi = (spi & ~this->spi_mask) | this->spi_label;
1066 }
1067 return spi;
1068 }
1069
1070 /**
1071 * Calculate the hash of the initial IKE message. Memory for the hash is
1072 * allocated on success.
1073 *
1074 * @returns TRUE on success
1075 */
1076 static bool get_init_hash(hasher_t *hasher, message_t *message, chunk_t *hash)
1077 {
1078 host_t *src;
1079
1080 if (message->get_first_payload_type(message) == PLV1_FRAGMENT)
1081 { /* only hash the source IP, port and SPI for fragmented init messages */
1082 uint16_t port;
1083 uint64_t spi;
1084
1085 src = message->get_source(message);
1086 if (!hasher->allocate_hash(hasher, src->get_address(src), NULL))
1087 {
1088 return FALSE;
1089 }
1090 port = src->get_port(src);
1091 if (!hasher->allocate_hash(hasher, chunk_from_thing(port), NULL))
1092 {
1093 return FALSE;
1094 }
1095 spi = message->get_initiator_spi(message);
1096 return hasher->allocate_hash(hasher, chunk_from_thing(spi), hash);
1097 }
1098 if (message->get_exchange_type(message) == ID_PROT)
1099 { /* include the source for Main Mode as the hash will be the same if
1100 * SPIs are reused by two initiators that use the same proposal */
1101 src = message->get_source(message);
1102
1103 if (!hasher->allocate_hash(hasher, src->get_address(src), NULL))
1104 {
1105 return FALSE;
1106 }
1107 }
1108 return hasher->allocate_hash(hasher, message->get_packet_data(message), hash);
1109 }
1110
1111 /**
1112 * Check if we already have created an IKE_SA based on the initial IKE message
1113 * with the given hash.
1114 * If not the hash is stored, the hash data is not(!) cloned.
1115 *
1116 * Also, the local SPI is returned. In case of a retransmit this is already
1117 * stored together with the hash, otherwise it is newly allocated and should
1118 * be used to create the IKE_SA.
1119 *
1120 * @returns ALREADY_DONE if the message with the given hash has been seen before
1121 * NOT_FOUND if the message hash was not found
1122 * FAILED if the SPI allocation failed
1123 */
1124 static status_t check_and_put_init_hash(private_ike_sa_manager_t *this,
1125 chunk_t init_hash, uint64_t *our_spi)
1126 {
1127 table_item_t *item;
1128 u_int row, segment;
1129 mutex_t *mutex;
1130 init_hash_t *init;
1131 uint64_t spi;
1132
1133 row = chunk_hash(init_hash) & this->table_mask;
1134 segment = row & this->segment_mask;
1135 mutex = this->init_hashes_segments[segment].mutex;
1136 mutex->lock(mutex);
1137 item = this->init_hashes_table[row];
1138 while (item)
1139 {
1140 init_hash_t *current = item->value;
1141
1142 if (chunk_equals(init_hash, current->hash))
1143 {
1144 *our_spi = current->our_spi;
1145 mutex->unlock(mutex);
1146 return ALREADY_DONE;
1147 }
1148 item = item->next;
1149 }
1150
1151 spi = get_spi(this);
1152 if (!spi)
1153 {
1154 return FAILED;
1155 }
1156
1157 INIT(init,
1158 .hash = {
1159 .len = init_hash.len,
1160 .ptr = init_hash.ptr,
1161 },
1162 .our_spi = spi,
1163 );
1164 INIT(item,
1165 .value = init,
1166 .next = this->init_hashes_table[row],
1167 );
1168 this->init_hashes_table[row] = item;
1169 *our_spi = init->our_spi;
1170 mutex->unlock(mutex);
1171 return NOT_FOUND;
1172 }
1173
1174 /**
1175 * Remove the hash of an initial IKE message from the cache.
1176 */
1177 static void remove_init_hash(private_ike_sa_manager_t *this, chunk_t init_hash)
1178 {
1179 table_item_t *item, *prev = NULL;
1180 u_int row, segment;
1181 mutex_t *mutex;
1182
1183 row = chunk_hash(init_hash) & this->table_mask;
1184 segment = row & this->segment_mask;
1185 mutex = this->init_hashes_segments[segment].mutex;
1186 mutex->lock(mutex);
1187 item = this->init_hashes_table[row];
1188 while (item)
1189 {
1190 init_hash_t *current = item->value;
1191
1192 if (chunk_equals(init_hash, current->hash))
1193 {
1194 if (prev)
1195 {
1196 prev->next = item->next;
1197 }
1198 else
1199 {
1200 this->init_hashes_table[row] = item->next;
1201 }
1202 free(current);
1203 free(item);
1204 break;
1205 }
1206 prev = item;
1207 item = item->next;
1208 }
1209 mutex->unlock(mutex);
1210 }
1211
1212 METHOD(ike_sa_manager_t, checkout, ike_sa_t*,
1213 private_ike_sa_manager_t *this, ike_sa_id_t *ike_sa_id)
1214 {
1215 ike_sa_t *ike_sa = NULL;
1216 entry_t *entry;
1217 u_int segment;
1218
1219 DBG2(DBG_MGR, "checkout %N SA with SPIs %.16"PRIx64"_i %.16"PRIx64"_r",
1220 ike_version_names, ike_sa_id->get_ike_version(ike_sa_id),
1221 be64toh(ike_sa_id->get_initiator_spi(ike_sa_id)),
1222 be64toh(ike_sa_id->get_responder_spi(ike_sa_id)));
1223
1224 if (get_entry_by_id(this, ike_sa_id, &entry, &segment) == SUCCESS)
1225 {
1226 if (wait_for_entry(this, entry, segment))
1227 {
1228 entry->checked_out = thread_current();
1229 ike_sa = entry->ike_sa;
1230 DBG2(DBG_MGR, "IKE_SA %s[%u] successfully checked out",
1231 ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1232 }
1233 unlock_single_segment(this, segment);
1234 }
1235 charon->bus->set_sa(charon->bus, ike_sa);
1236
1237 if (!ike_sa)
1238 {
1239 DBG2(DBG_MGR, "IKE_SA checkout not successful");
1240 }
1241 return ike_sa;
1242 }
1243
1244 METHOD(ike_sa_manager_t, create_new, ike_sa_t*,
1245 private_ike_sa_manager_t* this, ike_version_t version, bool initiator)
1246 {
1247 ike_sa_id_t *ike_sa_id;
1248 ike_sa_t *ike_sa;
1249 uint8_t ike_version;
1250 uint64_t spi;
1251
1252 ike_version = version == IKEV1 ? IKEV1_MAJOR_VERSION : IKEV2_MAJOR_VERSION;
1253
1254 spi = get_spi(this);
1255 if (!spi)
1256 {
1257 DBG1(DBG_MGR, "failed to allocate SPI for new IKE_SA");
1258 return NULL;
1259 }
1260
1261 if (initiator)
1262 {
1263 ike_sa_id = ike_sa_id_create(ike_version, spi, 0, TRUE);
1264 }
1265 else
1266 {
1267 ike_sa_id = ike_sa_id_create(ike_version, 0, spi, FALSE);
1268 }
1269 ike_sa = ike_sa_create(ike_sa_id, initiator, version);
1270 ike_sa_id->destroy(ike_sa_id);
1271
1272 if (ike_sa)
1273 {
1274 DBG2(DBG_MGR, "created IKE_SA %s[%u]", ike_sa->get_name(ike_sa),
1275 ike_sa->get_unique_id(ike_sa));
1276 }
1277 return ike_sa;
1278 }
1279
1280 METHOD(ike_sa_manager_t, checkout_new, void,
1281 private_ike_sa_manager_t *this, ike_sa_t *ike_sa)
1282 {
1283 u_int segment;
1284 entry_t *entry;
1285
1286 segment = create_and_put_entry(this, ike_sa, &entry);
1287 entry->checked_out = thread_current();
1288 unlock_single_segment(this, segment);
1289 }
1290
1291 /**
1292 * Get the message ID or message hash to detect early retransmissions
1293 */
1294 static uint32_t get_message_id_or_hash(message_t *message)
1295 {
1296 if (message->get_major_version(message) == IKEV1_MAJOR_VERSION)
1297 {
1298 /* Use a hash for IKEv1 Phase 1, where we don't have a MID, and Quick
1299 * Mode, where all three messages use the same message ID */
1300 if (message->get_message_id(message) == 0 ||
1301 message->get_exchange_type(message) == QUICK_MODE)
1302 {
1303 return chunk_hash(message->get_packet_data(message));
1304 }
1305 }
1306 return message->get_message_id(message);
1307 }
1308
1309 METHOD(ike_sa_manager_t, checkout_by_message, ike_sa_t*,
1310 private_ike_sa_manager_t* this, message_t *message)
1311 {
1312 u_int segment;
1313 entry_t *entry;
1314 ike_sa_t *ike_sa = NULL;
1315 ike_sa_id_t *id;
1316 ike_version_t ike_version;
1317 bool is_init = FALSE;
1318
1319 id = message->get_ike_sa_id(message);
1320 /* clone the IKE_SA ID so we can modify the initiator flag */
1321 id = id->clone(id);
1322 id->switch_initiator(id);
1323
1324 DBG2(DBG_MGR, "checkout %N SA by message with SPIs %.16"PRIx64"_i "
1325 "%.16"PRIx64"_r", ike_version_names, id->get_ike_version(id),
1326 be64toh(id->get_initiator_spi(id)),
1327 be64toh(id->get_responder_spi(id)));
1328
1329 if (id->get_responder_spi(id) == 0 &&
1330 message->get_message_id(message) == 0)
1331 {
1332 if (message->get_major_version(message) == IKEV2_MAJOR_VERSION)
1333 {
1334 if (message->get_exchange_type(message) == IKE_SA_INIT &&
1335 message->get_request(message))
1336 {
1337 ike_version = IKEV2;
1338 is_init = TRUE;
1339 }
1340 }
1341 else
1342 {
1343 if (message->get_exchange_type(message) == ID_PROT ||
1344 message->get_exchange_type(message) == AGGRESSIVE)
1345 {
1346 ike_version = IKEV1;
1347 is_init = TRUE;
1348 if (id->is_initiator(id))
1349 { /* not set in IKEv1, switch back before applying to new SA */
1350 id->switch_initiator(id);
1351 }
1352 }
1353 }
1354 }
1355
1356 if (is_init)
1357 {
1358 hasher_t *hasher;
1359 uint64_t our_spi;
1360 chunk_t hash;
1361
1362 hasher = lib->crypto->create_hasher(lib->crypto, HASH_SHA1);
1363 if (!hasher || !get_init_hash(hasher, message, &hash))
1364 {
1365 DBG1(DBG_MGR, "ignoring message, failed to hash message");
1366 DESTROY_IF(hasher);
1367 id->destroy(id);
1368 goto out;
1369 }
1370 hasher->destroy(hasher);
1371
1372 /* ensure this is not a retransmit of an already handled init message */
1373 switch (check_and_put_init_hash(this, hash, &our_spi))
1374 {
1375 case NOT_FOUND:
1376 { /* we've not seen this packet yet, create a new IKE_SA */
1377 if (!this->ikesa_limit ||
1378 this->public.get_count(&this->public) < this->ikesa_limit)
1379 {
1380 id->set_responder_spi(id, our_spi);
1381 ike_sa = ike_sa_create(id, FALSE, ike_version);
1382 if (ike_sa)
1383 {
1384 entry = entry_create();
1385 entry->ike_sa = ike_sa;
1386 entry->ike_sa_id = id;
1387 entry->processing = get_message_id_or_hash(message);
1388 entry->init_hash = hash;
1389
1390 segment = put_entry(this, entry);
1391 entry->checked_out = thread_current();
1392 unlock_single_segment(this, segment);
1393
1394 DBG2(DBG_MGR, "created IKE_SA %s[%u]",
1395 ike_sa->get_name(ike_sa),
1396 ike_sa->get_unique_id(ike_sa));
1397 goto out;
1398 }
1399 else
1400 {
1401 DBG1(DBG_MGR, "creating IKE_SA failed, ignoring message");
1402 }
1403 }
1404 else
1405 {
1406 DBG1(DBG_MGR, "ignoring %N, hitting IKE_SA limit (%u)",
1407 exchange_type_names, message->get_exchange_type(message),
1408 this->ikesa_limit);
1409 }
1410 remove_init_hash(this, hash);
1411 chunk_free(&hash);
1412 id->destroy(id);
1413 goto out;
1414 }
1415 case FAILED:
1416 { /* we failed to allocate an SPI */
1417 chunk_free(&hash);
1418 id->destroy(id);
1419 DBG1(DBG_MGR, "ignoring message, failed to allocate SPI");
1420 goto out;
1421 }
1422 case ALREADY_DONE:
1423 default:
1424 break;
1425 }
1426 /* it looks like we already handled this init message to some degree */
1427 id->set_responder_spi(id, our_spi);
1428 chunk_free(&hash);
1429 }
1430
1431 if (get_entry_by_id(this, id, &entry, &segment) == SUCCESS)
1432 {
1433 /* only check out if we are not already processing it. */
1434 if (entry->processing == get_message_id_or_hash(message))
1435 {
1436 DBG1(DBG_MGR, "ignoring request with ID %u, already processing",
1437 entry->processing);
1438 }
1439 else if (wait_for_entry(this, entry, segment))
1440 {
1441 ike_sa_id_t *ike_id;
1442
1443 ike_id = entry->ike_sa->get_id(entry->ike_sa);
1444 entry->checked_out = thread_current();
1445 if (message->get_first_payload_type(message) != PLV1_FRAGMENT &&
1446 message->get_first_payload_type(message) != PLV2_FRAGMENT)
1447 { /* TODO-FRAG: this fails if there are unencrypted payloads */
1448 entry->processing = get_message_id_or_hash(message);
1449 }
1450 if (ike_id->get_responder_spi(ike_id) == 0)
1451 {
1452 ike_id->set_responder_spi(ike_id, id->get_responder_spi(id));
1453 }
1454 ike_sa = entry->ike_sa;
1455 DBG2(DBG_MGR, "IKE_SA %s[%u] successfully checked out",
1456 ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1457 }
1458 unlock_single_segment(this, segment);
1459 }
1460 else
1461 {
1462 charon->bus->alert(charon->bus, ALERT_INVALID_IKE_SPI, message);
1463 }
1464 id->destroy(id);
1465
1466 out:
1467 charon->bus->set_sa(charon->bus, ike_sa);
1468 if (!ike_sa)
1469 {
1470 DBG2(DBG_MGR, "IKE_SA checkout not successful");
1471 }
1472 return ike_sa;
1473 }
1474
1475 /**
1476 * Data used to track checkouts by config.
1477 */
1478 typedef struct {
1479 /** The peer config for which an IKE_SA is being checked out. */
1480 peer_cfg_t *cfg;
1481 /** Number of threads checking out SAs for the same config. */
1482 int threads;
1483 /** A thread is currently creating/finding an SA for this config. */
1484 bool working;
1485 } config_entry_t;
1486
1487 METHOD(ike_sa_manager_t, checkout_by_config, ike_sa_t*,
1488 private_ike_sa_manager_t *this, peer_cfg_t *peer_cfg)
1489 {
1490 enumerator_t *enumerator;
1491 entry_t *entry;
1492 ike_sa_t *ike_sa = NULL;
1493 peer_cfg_t *current_peer;
1494 ike_cfg_t *current_ike;
1495 config_entry_t *config_entry, *found = NULL;
1496 u_int segment;
1497 int i;
1498
1499 DBG2(DBG_MGR, "checkout IKE_SA by config");
1500
1501 if (!this->reuse_ikesa && peer_cfg->get_ike_version(peer_cfg) != IKEV1)
1502 { /* IKE_SA reuse disabled by config (not possible for IKEv1) */
1503 ike_sa = create_new(this, peer_cfg->get_ike_version(peer_cfg), TRUE);
1504 ike_sa->set_peer_cfg(ike_sa, peer_cfg);
1505 checkout_new(this, ike_sa);
1506 charon->bus->set_sa(charon->bus, ike_sa);
1507 goto out;
1508 }
1509
1510 this->config_mutex->lock(this->config_mutex);
1511 for (i = 0; i < array_count(this->config_checkouts); i++)
1512 {
1513 array_get(this->config_checkouts, i, &config_entry);
1514 if (config_entry->cfg->equals(config_entry->cfg, peer_cfg))
1515 {
1516 current_ike = config_entry->cfg->get_ike_cfg(config_entry->cfg);
1517 if (current_ike->equals(current_ike,
1518 peer_cfg->get_ike_cfg(peer_cfg)))
1519 {
1520 found = config_entry;
1521 break;
1522 }
1523 }
1524 }
1525 if (!found)
1526 {
1527 INIT(found,
1528 .cfg = peer_cfg->get_ref(peer_cfg),
1529 );
1530 array_insert_create(&this->config_checkouts, ARRAY_TAIL, found);
1531 }
1532 found->threads++;
1533 while (found->working)
1534 {
1535 this->config_condvar->wait(this->config_condvar, this->config_mutex);
1536 }
1537 found->working = TRUE;
1538 this->config_mutex->unlock(this->config_mutex);
1539
1540 enumerator = create_table_enumerator(this);
1541 while (enumerator->enumerate(enumerator, &entry, &segment))
1542 {
1543 if (!wait_for_entry(this, entry, segment))
1544 {
1545 continue;
1546 }
1547 if (entry->ike_sa->get_state(entry->ike_sa) == IKE_DELETING ||
1548 entry->ike_sa->get_state(entry->ike_sa) == IKE_REKEYED)
1549 { /* skip IKE_SAs which are not usable, wake other waiting threads */
1550 entry->condvar->signal(entry->condvar);
1551 continue;
1552 }
1553
1554 current_peer = entry->ike_sa->get_peer_cfg(entry->ike_sa);
1555 if (current_peer && current_peer->equals(current_peer, peer_cfg))
1556 {
1557 current_ike = current_peer->get_ike_cfg(current_peer);
1558 if (current_ike->equals(current_ike, peer_cfg->get_ike_cfg(peer_cfg)))
1559 {
1560 entry->checked_out = thread_current();
1561 ike_sa = entry->ike_sa;
1562 DBG2(DBG_MGR, "found existing IKE_SA %u with config '%s'",
1563 ike_sa->get_unique_id(ike_sa),
1564 current_peer->get_name(current_peer));
1565 break;
1566 }
1567 }
1568 /* other threads might be waiting for this entry */
1569 entry->condvar->signal(entry->condvar);
1570 }
1571 enumerator->destroy(enumerator);
1572
1573 if (!ike_sa)
1574 {
1575 ike_sa = create_new(this, peer_cfg->get_ike_version(peer_cfg), TRUE);
1576 ike_sa->set_peer_cfg(ike_sa, peer_cfg);
1577 checkout_new(this, ike_sa);
1578 }
1579 charon->bus->set_sa(charon->bus, ike_sa);
1580
1581 this->config_mutex->lock(this->config_mutex);
1582 found->working = FALSE;
1583 found->threads--;
1584 if (!found->threads)
1585 {
1586 for (i = 0; i < array_count(this->config_checkouts); i++)
1587 {
1588 array_get(this->config_checkouts, i, &config_entry);
1589 if (config_entry == found)
1590 {
1591 array_remove(this->config_checkouts, i, NULL);
1592 found->cfg->destroy(found->cfg);
1593 free(found);
1594 break;
1595 }
1596 }
1597 }
1598 this->config_condvar->signal(this->config_condvar);
1599 this->config_mutex->unlock(this->config_mutex);
1600
1601 out:
1602 if (!ike_sa)
1603 {
1604 DBG2(DBG_MGR, "IKE_SA checkout not successful");
1605 }
1606 return ike_sa;
1607 }
1608
1609 METHOD(ike_sa_manager_t, checkout_by_id, ike_sa_t*,
1610 private_ike_sa_manager_t *this, uint32_t id)
1611 {
1612 enumerator_t *enumerator;
1613 entry_t *entry;
1614 ike_sa_t *ike_sa = NULL;
1615 u_int segment;
1616
1617 DBG2(DBG_MGR, "checkout IKE_SA by unique ID %u", id);
1618
1619 enumerator = create_table_enumerator(this);
1620 while (enumerator->enumerate(enumerator, &entry, &segment))
1621 {
1622 if (wait_for_entry(this, entry, segment))
1623 {
1624 if (entry->ike_sa->get_unique_id(entry->ike_sa) == id)
1625 {
1626 ike_sa = entry->ike_sa;
1627 entry->checked_out = thread_current();
1628 break;
1629 }
1630 /* other threads might be waiting for this entry */
1631 entry->condvar->signal(entry->condvar);
1632 }
1633 }
1634 enumerator->destroy(enumerator);
1635
1636 if (ike_sa)
1637 {
1638 DBG2(DBG_MGR, "IKE_SA %s[%u] successfully checked out",
1639 ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1640 }
1641 else
1642 {
1643 DBG2(DBG_MGR, "IKE_SA checkout not successful");
1644 }
1645 charon->bus->set_sa(charon->bus, ike_sa);
1646 return ike_sa;
1647 }
1648
1649 METHOD(ike_sa_manager_t, checkout_by_name, ike_sa_t*,
1650 private_ike_sa_manager_t *this, char *name, bool child)
1651 {
1652 enumerator_t *enumerator, *children;
1653 entry_t *entry;
1654 ike_sa_t *ike_sa = NULL;
1655 child_sa_t *child_sa;
1656 u_int segment;
1657
1658 DBG2(DBG_MGR, "checkout IKE_SA by%s name '%s'", child ? " child" : "", name);
1659
1660 enumerator = create_table_enumerator(this);
1661 while (enumerator->enumerate(enumerator, &entry, &segment))
1662 {
1663 if (wait_for_entry(this, entry, segment))
1664 {
1665 /* look for a child with such a policy name ... */
1666 if (child)
1667 {
1668 children = entry->ike_sa->create_child_sa_enumerator(entry->ike_sa);
1669 while (children->enumerate(children, (void**)&child_sa))
1670 {
1671 if (streq(child_sa->get_name(child_sa), name))
1672 {
1673 ike_sa = entry->ike_sa;
1674 break;
1675 }
1676 }
1677 children->destroy(children);
1678 }
1679 else /* ... or for a IKE_SA with such a connection name */
1680 {
1681 if (streq(entry->ike_sa->get_name(entry->ike_sa), name))
1682 {
1683 ike_sa = entry->ike_sa;
1684 }
1685 }
1686 /* got one, return */
1687 if (ike_sa)
1688 {
1689 entry->checked_out = thread_current();
1690 DBG2(DBG_MGR, "IKE_SA %s[%u] successfully checked out",
1691 ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1692 break;
1693 }
1694 /* other threads might be waiting for this entry */
1695 entry->condvar->signal(entry->condvar);
1696 }
1697 }
1698 enumerator->destroy(enumerator);
1699
1700 charon->bus->set_sa(charon->bus, ike_sa);
1701
1702 if (!ike_sa)
1703 {
1704 DBG2(DBG_MGR, "IKE_SA checkout not successful");
1705 }
1706 return ike_sa;
1707 }
1708
1709 METHOD(ike_sa_manager_t, new_initiator_spi, bool,
1710 private_ike_sa_manager_t *this, ike_sa_t *ike_sa)
1711 {
1712 ike_sa_state_t state;
1713 ike_sa_id_t *ike_sa_id;
1714 entry_t *entry;
1715 u_int segment;
1716 uint64_t new_spi, spi;
1717
1718 state = ike_sa->get_state(ike_sa);
1719 if (state != IKE_CONNECTING)
1720 {
1721 DBG1(DBG_MGR, "unable to change initiator SPI for IKE_SA in state "
1722 "%N", ike_sa_state_names, state);
1723 return FALSE;
1724 }
1725
1726 ike_sa_id = ike_sa->get_id(ike_sa);
1727 if (!ike_sa_id->is_initiator(ike_sa_id))
1728 {
1729 DBG1(DBG_MGR, "unable to change initiator SPI of IKE_SA as responder");
1730 return FALSE;
1731 }
1732
1733 if (ike_sa != charon->bus->get_sa(charon->bus))
1734 {
1735 DBG1(DBG_MGR, "unable to change initiator SPI of IKE_SA not checked "
1736 "out by current thread");
1737 return FALSE;
1738 }
1739
1740 new_spi = get_spi(this);
1741 if (!new_spi)
1742 {
1743 DBG1(DBG_MGR, "unable to allocate new initiator SPI for IKE_SA");
1744 return FALSE;
1745 }
1746
1747 if (get_entry_by_sa(this, ike_sa_id, ike_sa, &entry, &segment) == SUCCESS)
1748 {
1749 if (entry->driveout_waiting_threads && entry->driveout_new_threads)
1750 { /* it looks like flush() has been called and the SA is being deleted
1751 * anyway, no need for a new SPI */
1752 DBG2(DBG_MGR, "ignored change of initiator SPI during shutdown");
1753 unlock_single_segment(this, segment);
1754 return FALSE;
1755 }
1756 }
1757 else
1758 {
1759 DBG1(DBG_MGR, "unable to change initiator SPI of IKE_SA, not found");
1760 return FALSE;
1761 }
1762
1763 /* the hashtable row and segment are determined by the local SPI as
1764 * initiator, so if we change it the row and segment derived from it might
1765 * change as well. This could be a problem for threads waiting for the
1766 * entry (in particular those enumerating entries to check them out by
1767 * unique ID or name). In order to avoid having to drive them out and thus
1768 * preventing them from checking out the entry (even though the ID or name
1769 * will not change and enumerating it is also fine), we mask the new SPI and
1770 * merge it with the old SPI so the entry ends up in the same row/segment.
1771 * Since SPIs are 64-bit and the number of rows/segments is usually
1772 * relatively low this should not be a problem. */
1773 spi = ike_sa_id->get_initiator_spi(ike_sa_id);
1774 new_spi = (spi & (uint64_t)this->table_mask) |
1775 (new_spi & ~(uint64_t)this->table_mask);
1776
1777 DBG2(DBG_MGR, "change initiator SPI of IKE_SA %s[%u] from %.16"PRIx64" to "
1778 "%.16"PRIx64, ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa),
1779 be64toh(spi), be64toh(new_spi));
1780
1781 ike_sa_id->set_initiator_spi(ike_sa_id, new_spi);
1782 entry->ike_sa_id->replace_values(entry->ike_sa_id, ike_sa_id);
1783
1784 entry->condvar->signal(entry->condvar);
1785 unlock_single_segment(this, segment);
1786 return TRUE;
1787 }
1788
1789 CALLBACK(enumerator_filter_wait, bool,
1790 private_ike_sa_manager_t *this, enumerator_t *orig, va_list args)
1791 {
1792 entry_t *entry;
1793 u_int segment;
1794 ike_sa_t **out;
1795
1796 VA_ARGS_VGET(args, out);
1797
1798 while (orig->enumerate(orig, &entry, &segment))
1799 {
1800 if (wait_for_entry(this, entry, segment))
1801 {
1802 *out = entry->ike_sa;
1803 charon->bus->set_sa(charon->bus, *out);
1804 return TRUE;
1805 }
1806 }
1807 return FALSE;
1808 }
1809
1810 CALLBACK(enumerator_filter_skip, bool,
1811 private_ike_sa_manager_t *this, enumerator_t *orig, va_list args)
1812 {
1813 entry_t *entry;
1814 u_int segment;
1815 ike_sa_t **out;
1816
1817 VA_ARGS_VGET(args, out);
1818
1819 while (orig->enumerate(orig, &entry, &segment))
1820 {
1821 if (!entry->driveout_new_threads &&
1822 !entry->driveout_waiting_threads &&
1823 !entry->checked_out)
1824 {
1825 *out = entry->ike_sa;
1826 charon->bus->set_sa(charon->bus, *out);
1827 return TRUE;
1828 }
1829 }
1830 return FALSE;
1831 }
1832
1833 CALLBACK(reset_sa, void,
1834 void *data)
1835 {
1836 charon->bus->set_sa(charon->bus, NULL);
1837 }
1838
1839 METHOD(ike_sa_manager_t, create_enumerator, enumerator_t*,
1840 private_ike_sa_manager_t* this, bool wait)
1841 {
1842 return enumerator_create_filter(create_table_enumerator(this),
1843 wait ? (void*)enumerator_filter_wait : (void*)enumerator_filter_skip,
1844 this, reset_sa);
1845 }
1846
1847 METHOD(ike_sa_manager_t, checkin, void,
1848 private_ike_sa_manager_t *this, ike_sa_t *ike_sa)
1849 {
1850 /* to check the SA back in, we look for the pointer of the ike_sa
1851 * in all entries.
1852 * The lookup is done by initiator SPI, so even if the SPI has changed (e.g.
1853 * on reception of a IKE_SA_INIT response) the lookup will work but
1854 * updating of the SPI MAY be necessary...
1855 */
1856 entry_t *entry;
1857 ike_sa_id_t *ike_sa_id;
1858 host_t *other;
1859 identification_t *my_id, *other_id;
1860 u_int segment;
1861
1862 ike_sa_id = ike_sa->get_id(ike_sa);
1863 my_id = ike_sa->get_my_id(ike_sa);
1864 other_id = ike_sa->get_other_eap_id(ike_sa);
1865 other = ike_sa->get_other_host(ike_sa);
1866
1867 DBG2(DBG_MGR, "checkin IKE_SA %s[%u]", ike_sa->get_name(ike_sa),
1868 ike_sa->get_unique_id(ike_sa));
1869
1870 /* look for the entry */
1871 if (get_entry_by_sa(this, ike_sa_id, ike_sa, &entry, &segment) == SUCCESS)
1872 {
1873 /* ike_sa_id must be updated */
1874 entry->ike_sa_id->replace_values(entry->ike_sa_id, ike_sa->get_id(ike_sa));
1875 /* signal waiting threads */
1876 entry->checked_out = NULL;
1877 entry->processing = -1;
1878 /* check if this SA is half-open */
1879 if (entry->half_open && ike_sa->get_state(ike_sa) != IKE_CONNECTING)
1880 {
1881 /* not half open anymore */
1882 entry->half_open = FALSE;
1883 remove_half_open(this, entry);
1884 }
1885 else if (entry->half_open && !other->ip_equals(other, entry->other))
1886 {
1887 /* the other host's IP has changed, we must update the hash table */
1888 remove_half_open(this, entry);
1889 DESTROY_IF(entry->other);
1890 entry->other = other->clone(other);
1891 put_half_open(this, entry);
1892 }
1893 else if (!entry->half_open &&
1894 ike_sa->get_state(ike_sa) == IKE_CONNECTING)
1895 {
1896 /* this is a new half-open SA */
1897 entry->half_open = TRUE;
1898 entry->other = other->clone(other);
1899 put_half_open(this, entry);
1900 }
1901 entry->condvar->signal(entry->condvar);
1902 }
1903 else
1904 {
1905 segment = create_and_put_entry(this, ike_sa, &entry);
1906 }
1907 DBG2(DBG_MGR, "checkin of IKE_SA successful");
1908
1909 /* apply identities for duplicate test */
1910 if ((ike_sa->get_state(ike_sa) == IKE_ESTABLISHED ||
1911 ike_sa->get_state(ike_sa) == IKE_PASSIVE) &&
1912 entry->my_id == NULL && entry->other_id == NULL)
1913 {
1914 if (ike_sa->get_version(ike_sa) == IKEV1)
1915 {
1916 /* If authenticated and received INITIAL_CONTACT,
1917 * delete any existing IKE_SAs with that peer. */
1918 if (ike_sa->has_condition(ike_sa, COND_INIT_CONTACT_SEEN))
1919 {
1920 /* We can't hold the segment locked while checking the
1921 * uniqueness as this could lead to deadlocks. We mark the
1922 * entry as checked out while we release the lock so no other
1923 * thread can acquire it. Since it is not yet in the list of
1924 * connected peers that will not cause a deadlock as no other
1925 * caller of check_uniqueness() will try to check out this SA */
1926 entry->checked_out = thread_current();
1927 unlock_single_segment(this, segment);
1928
1929 this->public.check_uniqueness(&this->public, ike_sa, TRUE);
1930 ike_sa->set_condition(ike_sa, COND_INIT_CONTACT_SEEN, FALSE);
1931
1932 /* The entry could have been modified in the mean time, e.g.
1933 * because another SA was added/removed next to it or another
1934 * thread is waiting, but it should still exist, so there is no
1935 * need for a lookup via get_entry_by... */
1936 lock_single_segment(this, segment);
1937 entry->checked_out = NULL;
1938 /* We already signaled waiting threads above, we have to do that
1939 * again after checking the SA out and back in again. */
1940 entry->condvar->signal(entry->condvar);
1941 }
1942 }
1943
1944 entry->my_id = my_id->clone(my_id);
1945 entry->other_id = other_id->clone(other_id);
1946 if (!entry->other)
1947 {
1948 entry->other = other->clone(other);
1949 }
1950 put_connected_peers(this, entry);
1951 }
1952
1953 unlock_single_segment(this, segment);
1954
1955 charon->bus->set_sa(charon->bus, NULL);
1956 }
1957
1958 METHOD(ike_sa_manager_t, checkin_and_destroy, void,
1959 private_ike_sa_manager_t *this, ike_sa_t *ike_sa)
1960 {
1961 /* deletion is a bit complex, we must ensure that no thread is waiting for
1962 * this SA.
1963 * We take this SA from the table, and start signaling while threads
1964 * are in the condvar.
1965 */
1966 entry_t *entry;
1967 ike_sa_id_t *ike_sa_id;
1968 u_int segment;
1969
1970 ike_sa_id = ike_sa->get_id(ike_sa);
1971
1972 DBG2(DBG_MGR, "checkin and destroy IKE_SA %s[%u]", ike_sa->get_name(ike_sa),
1973 ike_sa->get_unique_id(ike_sa));
1974
1975 if (get_entry_by_sa(this, ike_sa_id, ike_sa, &entry, &segment) == SUCCESS)
1976 {
1977 if (entry->driveout_waiting_threads && entry->driveout_new_threads)
1978 { /* it looks like flush() has been called and the SA is being deleted
1979 * anyway, just check it in */
1980 DBG2(DBG_MGR, "ignored checkin and destroy of IKE_SA during shutdown");
1981 entry->checked_out = NULL;
1982 entry->condvar->broadcast(entry->condvar);
1983 unlock_single_segment(this, segment);
1984 return;
1985 }
1986
1987 /* drive out waiting threads, as we are in hurry */
1988 entry->driveout_waiting_threads = TRUE;
1989 /* mark it, so no new threads can get this entry */
1990 entry->driveout_new_threads = TRUE;
1991 /* wait until all workers have done their work */
1992 while (entry->waiting_threads)
1993 {
1994 /* wake up all */
1995 entry->condvar->broadcast(entry->condvar);
1996 /* they will wake us again when their work is done */
1997 entry->condvar->wait(entry->condvar, this->segments[segment].mutex);
1998 }
1999 remove_entry(this, entry);
2000 unlock_single_segment(this, segment);
2001
2002 if (entry->half_open)
2003 {
2004 remove_half_open(this, entry);
2005 }
2006 if (entry->my_id && entry->other_id)
2007 {
2008 remove_connected_peers(this, entry);
2009 }
2010 if (entry->init_hash.ptr)
2011 {
2012 remove_init_hash(this, entry->init_hash);
2013 }
2014
2015 entry_destroy(entry);
2016
2017 DBG2(DBG_MGR, "checkin and destroy of IKE_SA successful");
2018 }
2019 else
2020 {
2021 DBG1(DBG_MGR, "tried to checkin and delete nonexistent IKE_SA");
2022 ike_sa->destroy(ike_sa);
2023 }
2024 charon->bus->set_sa(charon->bus, NULL);
2025 }
2026
2027 /**
2028 * Cleanup function for create_id_enumerator
2029 */
2030 static void id_enumerator_cleanup(linked_list_t *ids)
2031 {
2032 ids->destroy_offset(ids, offsetof(ike_sa_id_t, destroy));
2033 }
2034
2035 METHOD(ike_sa_manager_t, create_id_enumerator, enumerator_t*,
2036 private_ike_sa_manager_t *this, identification_t *me,
2037 identification_t *other, int family)
2038 {
2039 table_item_t *item;
2040 u_int row, segment;
2041 rwlock_t *lock;
2042 linked_list_t *ids = NULL;
2043
2044 row = chunk_hash_inc(other->get_encoding(other),
2045 chunk_hash(me->get_encoding(me))) & this->table_mask;
2046 segment = row & this->segment_mask;
2047
2048 lock = this->connected_peers_segments[segment].lock;
2049 lock->read_lock(lock);
2050 item = this->connected_peers_table[row];
2051 while (item)
2052 {
2053 connected_peers_t *current = item->value;
2054
2055 if (connected_peers_match(current, me, other, family))
2056 {
2057 ids = current->sas->clone_offset(current->sas,
2058 offsetof(ike_sa_id_t, clone));
2059 break;
2060 }
2061 item = item->next;
2062 }
2063 lock->unlock(lock);
2064
2065 if (!ids)
2066 {
2067 return enumerator_create_empty();
2068 }
2069 return enumerator_create_cleaner(ids->create_enumerator(ids),
2070 (void*)id_enumerator_cleanup, ids);
2071 }
2072
2073 /**
2074 * Move all CHILD_SAs and virtual IPs from old to new
2075 */
2076 static void adopt_children_and_vips(ike_sa_t *old, ike_sa_t *new)
2077 {
2078 enumerator_t *enumerator;
2079 child_sa_t *child_sa;
2080 host_t *vip;
2081 int chcount = 0, vipcount = 0;
2082
2083 charon->bus->children_migrate(charon->bus, new->get_id(new),
2084 new->get_unique_id(new));
2085 enumerator = old->create_child_sa_enumerator(old);
2086 while (enumerator->enumerate(enumerator, &child_sa))
2087 {
2088 old->remove_child_sa(old, enumerator);
2089 new->add_child_sa(new, child_sa);
2090 chcount++;
2091 }
2092 enumerator->destroy(enumerator);
2093
2094 new->adopt_child_tasks(new, old);
2095
2096 enumerator = old->create_virtual_ip_enumerator(old, FALSE);
2097 while (enumerator->enumerate(enumerator, &vip))
2098 {
2099 new->add_virtual_ip(new, FALSE, vip);
2100 vipcount++;
2101 }
2102 enumerator->destroy(enumerator);
2103 /* this does not release the addresses, which is good, but it does trigger
2104 * an assign_vips(FALSE) event... */
2105 old->clear_virtual_ips(old, FALSE);
2106 /* ...trigger the analogous event on the new SA */
2107 charon->bus->set_sa(charon->bus, new);
2108 charon->bus->assign_vips(charon->bus, new, TRUE);
2109 charon->bus->children_migrate(charon->bus, NULL, 0);
2110 charon->bus->set_sa(charon->bus, old);
2111
2112 if (chcount || vipcount)
2113 {
2114 DBG1(DBG_IKE, "detected reauth of existing IKE_SA, adopting %d "
2115 "children and %d virtual IPs", chcount, vipcount);
2116 }
2117 }
2118
2119 /**
2120 * Delete an existing IKE_SA due to a unique replace policy
2121 */
2122 static status_t enforce_replace(private_ike_sa_manager_t *this,
2123 ike_sa_t *duplicate, ike_sa_t *new,
2124 identification_t *other, host_t *host)
2125 {
2126 charon->bus->alert(charon->bus, ALERT_UNIQUE_REPLACE);
2127
2128 if (host->equals(host, duplicate->get_other_host(duplicate)))
2129 {
2130 /* looks like a reauthentication attempt */
2131 if (!new->has_condition(new, COND_INIT_CONTACT_SEEN) &&
2132 new->get_version(new) == IKEV1)
2133 {
2134 /* IKEv1 implicitly takes over children, IKEv2 recreates them
2135 * explicitly. */
2136 adopt_children_and_vips(duplicate, new);
2137 }
2138 /* For IKEv1 we have to delay the delete for the old IKE_SA. Some
2139 * peers need to complete the new SA first, otherwise the quick modes
2140 * might get lost. For IKEv2 we do the same, as we want overlapping
2141 * CHILD_SAs to keep connectivity up. */
2142 lib->scheduler->schedule_job(lib->scheduler, (job_t*)
2143 delete_ike_sa_job_create(duplicate->get_id(duplicate), TRUE), 10);
2144 DBG1(DBG_IKE, "schedule delete of duplicate IKE_SA for peer '%Y' due "
2145 "to uniqueness policy and suspected reauthentication", other);
2146 return SUCCESS;
2147 }
2148 DBG1(DBG_IKE, "deleting duplicate IKE_SA for peer '%Y' due to "
2149 "uniqueness policy", other);
2150 return duplicate->delete(duplicate, FALSE);
2151 }
2152
2153 METHOD(ike_sa_manager_t, check_uniqueness, bool,
2154 private_ike_sa_manager_t *this, ike_sa_t *ike_sa, bool force_replace)
2155 {
2156 bool cancel = FALSE;
2157 peer_cfg_t *peer_cfg;
2158 unique_policy_t policy;
2159 enumerator_t *enumerator;
2160 ike_sa_id_t *id = NULL;
2161 identification_t *me, *other;
2162 host_t *other_host;
2163
2164 peer_cfg = ike_sa->get_peer_cfg(ike_sa);
2165 policy = peer_cfg->get_unique_policy(peer_cfg);
2166 if (policy == UNIQUE_NEVER || (policy == UNIQUE_NO && !force_replace))
2167 {
2168 return FALSE;
2169 }
2170 me = ike_sa->get_my_id(ike_sa);
2171 other = ike_sa->get_other_eap_id(ike_sa);
2172 other_host = ike_sa->get_other_host(ike_sa);
2173
2174 enumerator = create_id_enumerator(this, me, other,
2175 other_host->get_family(other_host));
2176 while (enumerator->enumerate(enumerator, &id))
2177 {
2178 status_t status = SUCCESS;
2179 ike_sa_t *duplicate;
2180
2181 duplicate = checkout(this, id);
2182 if (!duplicate)
2183 {
2184 continue;
2185 }
2186 if (force_replace)
2187 {
2188 DBG1(DBG_IKE, "destroying duplicate IKE_SA for peer '%Y', "
2189 "received INITIAL_CONTACT", other);
2190 charon->bus->ike_updown(charon->bus, duplicate, FALSE);
2191 checkin_and_destroy(this, duplicate);
2192 continue;
2193 }
2194 peer_cfg = duplicate->get_peer_cfg(duplicate);
2195 if (peer_cfg && peer_cfg->equals(peer_cfg, ike_sa->get_peer_cfg(ike_sa)))
2196 {
2197 switch (duplicate->get_state(duplicate))
2198 {
2199 case IKE_ESTABLISHED:
2200 case IKE_REKEYING:
2201 switch (policy)
2202 {
2203 case UNIQUE_REPLACE:
2204 status = enforce_replace(this, duplicate, ike_sa,
2205 other, other_host);
2206 break;
2207 case UNIQUE_KEEP:
2208 /* potential reauthentication? */
2209 if (!other_host->equals(other_host,
2210 duplicate->get_other_host(duplicate)))
2211 {
2212 cancel = TRUE;
2213 /* we keep the first IKE_SA and delete all
2214 * other duplicates that might exist */
2215 policy = UNIQUE_REPLACE;
2216 }
2217 break;
2218 default:
2219 break;
2220 }
2221 break;
2222 default:
2223 break;
2224 }
2225 }
2226 if (status == DESTROY_ME)
2227 {
2228 checkin_and_destroy(this, duplicate);
2229 }
2230 else
2231 {
2232 checkin(this, duplicate);
2233 }
2234 }
2235 enumerator->destroy(enumerator);
2236 /* reset thread's current IKE_SA after checkin */
2237 charon->bus->set_sa(charon->bus, ike_sa);
2238 return cancel;
2239 }
2240
2241 METHOD(ike_sa_manager_t, has_contact, bool,
2242 private_ike_sa_manager_t *this, identification_t *me,
2243 identification_t *other, int family)
2244 {
2245 table_item_t *item;
2246 u_int row, segment;
2247 rwlock_t *lock;
2248 bool found = FALSE;
2249
2250 row = chunk_hash_inc(other->get_encoding(other),
2251 chunk_hash(me->get_encoding(me))) & this->table_mask;
2252 segment = row & this->segment_mask;
2253 lock = this->connected_peers_segments[segment].lock;
2254 lock->read_lock(lock);
2255 item = this->connected_peers_table[row];
2256 while (item)
2257 {
2258 if (connected_peers_match(item->value, me, other, family))
2259 {
2260 found = TRUE;
2261 break;
2262 }
2263 item = item->next;
2264 }
2265 lock->unlock(lock);
2266
2267 return found;
2268 }
2269
2270 METHOD(ike_sa_manager_t, get_count, u_int,
2271 private_ike_sa_manager_t *this)
2272 {
2273 return (u_int)ref_cur(&this->total_sa_count);
2274 }
2275
2276 METHOD(ike_sa_manager_t, get_half_open_count, u_int,
2277 private_ike_sa_manager_t *this, host_t *ip, bool responder_only)
2278 {
2279 table_item_t *item;
2280 u_int row, segment;
2281 rwlock_t *lock;
2282 chunk_t addr;
2283 u_int count = 0;
2284
2285 if (ip)
2286 {
2287 addr = ip->get_address(ip);
2288 row = chunk_hash(addr) & this->table_mask;
2289 segment = row & this->segment_mask;
2290 lock = this->half_open_segments[segment].lock;
2291 lock->read_lock(lock);
2292 item = this->half_open_table[row];
2293 while (item)
2294 {
2295 half_open_t *half_open = item->value;
2296
2297 if (chunk_equals(addr, half_open->other))
2298 {
2299 count = responder_only ? half_open->count_responder
2300 : half_open->count;
2301 break;
2302 }
2303 item = item->next;
2304 }
2305 lock->unlock(lock);
2306 }
2307 else
2308 {
2309 count = responder_only ? (u_int)ref_cur(&this->half_open_count_responder)
2310 : (u_int)ref_cur(&this->half_open_count);
2311 }
2312 return count;
2313 }
2314
2315 METHOD(ike_sa_manager_t, set_spi_cb, void,
2316 private_ike_sa_manager_t *this, spi_cb_t callback, void *data)
2317 {
2318 this->spi_lock->write_lock(this->spi_lock);
2319 this->spi_cb.cb = callback;
2320 this->spi_cb.data = data;
2321 this->spi_lock->unlock(this->spi_lock);
2322 }
2323
2324 /**
2325 * Destroy all entries
2326 */
2327 static void destroy_all_entries(private_ike_sa_manager_t *this)
2328 {
2329 enumerator_t *enumerator;
2330 entry_t *entry;
2331 u_int segment;
2332
2333 enumerator = create_table_enumerator(this);
2334 while (enumerator->enumerate(enumerator, &entry, &segment))
2335 {
2336 charon->bus->set_sa(charon->bus, entry->ike_sa);
2337 if (entry->half_open)
2338 {
2339 remove_half_open(this, entry);
2340 }
2341 if (entry->my_id && entry->other_id)
2342 {
2343 remove_connected_peers(this, entry);
2344 }
2345 if (entry->init_hash.ptr)
2346 {
2347 remove_init_hash(this, entry->init_hash);
2348 }
2349 remove_entry_at((private_enumerator_t*)enumerator);
2350 entry_destroy(entry);
2351 }
2352 enumerator->destroy(enumerator);
2353 charon->bus->set_sa(charon->bus, NULL);
2354 }
2355
2356 METHOD(ike_sa_manager_t, flush, void,
2357 private_ike_sa_manager_t *this)
2358 {
2359 enumerator_t *enumerator;
2360 entry_t *entry;
2361 u_int segment;
2362
2363 lock_all_segments(this);
2364 DBG2(DBG_MGR, "going to destroy IKE_SA manager and all managed IKE_SA's");
2365 /* Step 1: drive out all waiting threads */
2366 DBG2(DBG_MGR, "set driveout flags for all stored IKE_SA's");
2367 enumerator = create_table_enumerator(this);
2368 while (enumerator->enumerate(enumerator, &entry, &segment))
2369 {
2370 /* do not accept new threads, drive out waiting threads */
2371 entry->driveout_new_threads = TRUE;
2372 entry->driveout_waiting_threads = TRUE;
2373 }
2374 enumerator->destroy(enumerator);
2375 DBG2(DBG_MGR, "wait for all threads to leave IKE_SA's");
2376 /* Step 2: wait until all are gone */
2377 enumerator = create_table_enumerator(this);
2378 while (enumerator->enumerate(enumerator, &entry, &segment))
2379 {
2380 while (entry->waiting_threads || entry->checked_out)
2381 {
2382 /* wake up all */
2383 entry->condvar->broadcast(entry->condvar);
2384 /* go sleeping until they are gone */
2385 entry->condvar->wait(entry->condvar, this->segments[segment].mutex);
2386 }
2387 }
2388 enumerator->destroy(enumerator);
2389 DBG2(DBG_MGR, "delete all IKE_SA's");
2390 /* Step 3: initiate deletion of all IKE_SAs */
2391 enumerator = create_table_enumerator(this);
2392 while (enumerator->enumerate(enumerator, &entry, &segment))
2393 {
2394 charon->bus->set_sa(charon->bus, entry->ike_sa);
2395 entry->ike_sa->delete(entry->ike_sa, TRUE);
2396 }
2397 enumerator->destroy(enumerator);
2398
2399 DBG2(DBG_MGR, "destroy all entries");
2400 /* Step 4: destroy all entries */
2401 destroy_all_entries(this);
2402 unlock_all_segments(this);
2403
2404 this->spi_lock->write_lock(this->spi_lock);
2405 DESTROY_IF(this->rng);
2406 this->rng = NULL;
2407 this->spi_cb.cb = NULL;
2408 this->spi_cb.data = NULL;
2409 this->spi_lock->unlock(this->spi_lock);
2410 }
2411
2412 METHOD(ike_sa_manager_t, destroy, void,
2413 private_ike_sa_manager_t *this)
2414 {
2415 u_int i;
2416
2417 /* in case new SAs were checked in after flush() was called */
2418 lock_all_segments(this);
2419 destroy_all_entries(this);
2420 unlock_all_segments(this);
2421
2422 free(this->ike_sa_table);
2423 free(this->half_open_table);
2424 free(this->connected_peers_table);
2425 free(this->init_hashes_table);
2426 for (i = 0; i < this->segment_count; i++)
2427 {
2428 this->segments[i].mutex->destroy(this->segments[i].mutex);
2429 this->half_open_segments[i].lock->destroy(this->half_open_segments[i].lock);
2430 this->connected_peers_segments[i].lock->destroy(this->connected_peers_segments[i].lock);
2431 this->init_hashes_segments[i].mutex->destroy(this->init_hashes_segments[i].mutex);
2432 }
2433 free(this->segments);
2434 free(this->half_open_segments);
2435 free(this->connected_peers_segments);
2436 free(this->init_hashes_segments);
2437
2438 array_destroy(this->config_checkouts);
2439 this->config_mutex->destroy(this->config_mutex);
2440 this->config_condvar->destroy(this->config_condvar);
2441
2442 this->spi_lock->destroy(this->spi_lock);
2443 free(this);
2444 }
2445
2446 /**
2447 * This function returns the next-highest power of two for the given number.
2448 * The algorithm works by setting all bits on the right-hand side of the most
2449 * significant 1 to 1 and then increments the whole number so it rolls over
2450 * to the nearest power of two. Note: returns 0 for n == 0
2451 */
2452 static u_int get_nearest_powerof2(u_int n)
2453 {
2454 u_int i;
2455
2456 --n;
2457 for (i = 1; i < sizeof(u_int) * 8; i <<= 1)
2458 {
2459 n |= n >> i;
2460 }
2461 return ++n;
2462 }
2463
2464 /*
2465 * Described in header.
2466 */
2467 ike_sa_manager_t *ike_sa_manager_create()
2468 {
2469 private_ike_sa_manager_t *this;
2470 char *spi_val;
2471 u_int i;
2472
2473 INIT(this,
2474 .public = {
2475 .create_new = _create_new,
2476 .checkout_new = _checkout_new,
2477 .checkout = _checkout,
2478 .checkout_by_message = _checkout_by_message,
2479 .checkout_by_config = _checkout_by_config,
2480 .checkout_by_id = _checkout_by_id,
2481 .checkout_by_name = _checkout_by_name,
2482 .new_initiator_spi = _new_initiator_spi,
2483 .check_uniqueness = _check_uniqueness,
2484 .has_contact = _has_contact,
2485 .create_enumerator = _create_enumerator,
2486 .create_id_enumerator = _create_id_enumerator,
2487 .checkin = _checkin,
2488 .checkin_and_destroy = _checkin_and_destroy,
2489 .get_count = _get_count,
2490 .get_half_open_count = _get_half_open_count,
2491 .flush = _flush,
2492 .set_spi_cb = _set_spi_cb,
2493 .destroy = _destroy,
2494 },
2495 );
2496
2497 this->rng = lib->crypto->create_rng(lib->crypto, RNG_WEAK);
2498 if (this->rng == NULL)
2499 {
2500 DBG1(DBG_MGR, "manager initialization failed, no RNG supported");
2501 free(this);
2502 return NULL;
2503 }
2504 this->spi_lock = rwlock_create(RWLOCK_TYPE_DEFAULT);
2505 spi_val = lib->settings->get_str(lib->settings, "%s.spi_mask", NULL,
2506 lib->ns);
2507 this->spi_mask = settings_value_as_uint64(spi_val, 0);
2508 spi_val = lib->settings->get_str(lib->settings, "%s.spi_label", NULL,
2509 lib->ns);
2510 this->spi_label = settings_value_as_uint64(spi_val, 0);
2511 if (this->spi_mask || this->spi_label)
2512 {
2513 DBG1(DBG_IKE, "using SPI label 0x%.16"PRIx64" and mask 0x%.16"PRIx64,
2514 this->spi_label, this->spi_mask);
2515 /* the allocated SPI is assumed to be in network order */
2516 this->spi_mask = htobe64(this->spi_mask);
2517 this->spi_label = htobe64(this->spi_label);
2518 }
2519
2520 this->ikesa_limit = lib->settings->get_int(lib->settings,
2521 "%s.ikesa_limit", 0, lib->ns);
2522
2523 this->table_size = get_nearest_powerof2(lib->settings->get_int(
2524 lib->settings, "%s.ikesa_table_size",
2525 DEFAULT_HASHTABLE_SIZE, lib->ns));
2526 this->table_size = max(1, min(this->table_size, MAX_HASHTABLE_SIZE));
2527 this->table_mask = this->table_size - 1;
2528
2529 this->segment_count = get_nearest_powerof2(lib->settings->get_int(
2530 lib->settings, "%s.ikesa_table_segments",
2531 DEFAULT_SEGMENT_COUNT, lib->ns));
2532 this->segment_count = max(1, min(this->segment_count, this->table_size));
2533 this->segment_mask = this->segment_count - 1;
2534
2535 this->ike_sa_table = calloc(this->table_size, sizeof(table_item_t*));
2536 this->segments = (segment_t*)calloc(this->segment_count, sizeof(segment_t));
2537 for (i = 0; i < this->segment_count; i++)
2538 {
2539 this->segments[i].mutex = mutex_create(MUTEX_TYPE_RECURSIVE);
2540 }
2541
2542 /* we use the same table parameters for the table to track half-open SAs */
2543 this->half_open_table = calloc(this->table_size, sizeof(table_item_t*));
2544 this->half_open_segments = calloc(this->segment_count, sizeof(shareable_segment_t));
2545 for (i = 0; i < this->segment_count; i++)
2546 {
2547 this->half_open_segments[i].lock = rwlock_create(RWLOCK_TYPE_DEFAULT);
2548 }
2549
2550 /* also for the hash table used for duplicate tests */
2551 this->connected_peers_table = calloc(this->table_size, sizeof(table_item_t*));
2552 this->connected_peers_segments = calloc(this->segment_count, sizeof(shareable_segment_t));
2553 for (i = 0; i < this->segment_count; i++)
2554 {
2555 this->connected_peers_segments[i].lock = rwlock_create(RWLOCK_TYPE_DEFAULT);
2556 }
2557
2558 /* and again for the table of hashes of seen initial IKE messages */
2559 this->init_hashes_table = calloc(this->table_size, sizeof(table_item_t*));
2560 this->init_hashes_segments = calloc(this->segment_count, sizeof(segment_t));
2561 for (i = 0; i < this->segment_count; i++)
2562 {
2563 this->init_hashes_segments[i].mutex = mutex_create(MUTEX_TYPE_RECURSIVE);
2564 }
2565
2566 this->config_mutex = mutex_create(MUTEX_TYPE_DEFAULT);
2567 this->config_condvar = condvar_create(CONDVAR_TYPE_DEFAULT);
2568
2569 this->reuse_ikesa = lib->settings->get_bool(lib->settings,
2570 "%s.reuse_ikesa", TRUE, lib->ns);
2571 return &this->public;
2572 }