b7df1d809b319d92aae5f53202438a408c4749ac
[strongswan.git] / src / libcharon / sa / ike_sa_manager.c
1 /*
2 * Copyright (C) 2005-2011 Martin Willi
3 * Copyright (C) 2011 revosec AG
4 *
5 * Copyright (C) 2008-2021 Tobias Brunner
6 * Copyright (C) 2005 Jan Hutter
7 * HSR Hochschule fuer Technik Rapperswil
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version. See <http://www.fsf.org/copyleft/gpl.txt>.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
16 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 * for more details.
18 */
19
20 #include <string.h>
21 #include <inttypes.h>
22
23 #include "ike_sa_manager.h"
24
25 #include <daemon.h>
26 #include <sa/ike_sa_id.h>
27 #include <bus/bus.h>
28 #include <threading/thread.h>
29 #include <threading/condvar.h>
30 #include <threading/mutex.h>
31 #include <threading/rwlock.h>
32 #include <collections/array.h>
33 #include <collections/linked_list.h>
34 #include <crypto/hashers/hasher.h>
35 #include <processing/jobs/delete_ike_sa_job.h>
36
37 /* the default size of the hash table (MUST be a power of 2) */
38 #define DEFAULT_HASHTABLE_SIZE 1
39
40 /* the maximum size of the hash table (MUST be a power of 2) */
41 #define MAX_HASHTABLE_SIZE (1 << 30)
42
43 /* the default number of segments (MUST be a power of 2) */
44 #define DEFAULT_SEGMENT_COUNT 1
45
46 typedef struct entry_t entry_t;
47
48 /**
49 * An entry in the linked list, contains IKE_SA, locking and lookup data.
50 */
51 struct entry_t {
52
53 /**
54 * Number of threads waiting for this ike_sa_t object.
55 */
56 int waiting_threads;
57
58 /**
59 * Condvar where threads can wait until ike_sa_t object is free for use again.
60 */
61 condvar_t *condvar;
62
63 /**
64 * Thread by which this IKE_SA is currently checked out, if any
65 */
66 thread_t *checked_out;
67
68 /**
69 * Does this SA drives out new threads?
70 */
71 bool driveout_new_threads;
72
73 /**
74 * Does this SA drives out waiting threads?
75 */
76 bool driveout_waiting_threads;
77
78 /**
79 * Identification of an IKE_SA (SPIs).
80 */
81 ike_sa_id_t *ike_sa_id;
82
83 /**
84 * The contained ike_sa_t object.
85 */
86 ike_sa_t *ike_sa;
87
88 /**
89 * hash of the IKE_SA_INIT message, used to detect retransmissions
90 */
91 chunk_t init_hash;
92
93 /**
94 * remote host address, required for DoS detection and duplicate
95 * checking (host with same my_id and other_id is *not* considered
96 * a duplicate if the address family differs)
97 */
98 host_t *other;
99
100 /**
101 * As responder: Is this SA half-open?
102 */
103 bool half_open;
104
105 /**
106 * own identity, required for duplicate checking
107 */
108 identification_t *my_id;
109
110 /**
111 * remote identity, required for duplicate checking
112 */
113 identification_t *other_id;
114
115 /**
116 * message ID or hash of currently processing message, -1 if none
117 */
118 uint32_t processing;
119 };
120
121 /**
122 * Implementation of entry_t.destroy.
123 */
124 static status_t entry_destroy(entry_t *this)
125 {
126 /* also destroy IKE SA */
127 this->ike_sa->destroy(this->ike_sa);
128 this->ike_sa_id->destroy(this->ike_sa_id);
129 chunk_free(&this->init_hash);
130 DESTROY_IF(this->other);
131 DESTROY_IF(this->my_id);
132 DESTROY_IF(this->other_id);
133 this->condvar->destroy(this->condvar);
134 free(this);
135 return SUCCESS;
136 }
137
138 /**
139 * Creates a new entry for the ike_sa_t list.
140 */
141 static entry_t *entry_create()
142 {
143 entry_t *this;
144
145 INIT(this,
146 .condvar = condvar_create(CONDVAR_TYPE_DEFAULT),
147 .processing = -1,
148 );
149
150 return this;
151 }
152
153 /**
154 * Function that matches entry_t objects by ike_sa_id_t.
155 */
156 static bool entry_match_by_id(entry_t *entry, void *arg)
157 {
158 ike_sa_id_t *id = arg;
159
160 if (id->equals(id, entry->ike_sa_id))
161 {
162 return TRUE;
163 }
164 if ((id->get_responder_spi(id) == 0 ||
165 entry->ike_sa_id->get_responder_spi(entry->ike_sa_id) == 0) &&
166 (id->get_ike_version(id) == IKEV1_MAJOR_VERSION ||
167 id->is_initiator(id) == entry->ike_sa_id->is_initiator(entry->ike_sa_id)) &&
168 id->get_initiator_spi(id) == entry->ike_sa_id->get_initiator_spi(entry->ike_sa_id))
169 {
170 /* this is TRUE for IKE_SAs that we initiated but have not yet received a response */
171 return TRUE;
172 }
173 return FALSE;
174 }
175
176 /**
177 * Function that matches entry_t objects by ike_sa_t pointers.
178 */
179 static bool entry_match_by_sa(entry_t *entry, void *ike_sa)
180 {
181 return entry->ike_sa == ike_sa;
182 }
183
184 /**
185 * Hash function for ike_sa_id_t objects.
186 */
187 static u_int ike_sa_id_hash(ike_sa_id_t *ike_sa_id)
188 {
189 /* IKEv2 does not mandate random SPIs (RFC 5996, 2.6), they just have to be
190 * locally unique, so we use our randomly allocated SPI whether we are
191 * initiator or responder to ensure a good distribution. The latter is not
192 * possible for IKEv1 as we don't know whether we are original initiator or
193 * not (based on the IKE header). But as RFC 2408, section 2.5.3 proposes
194 * SPIs (Cookies) to be allocated near random (we allocate them randomly
195 * anyway) it seems safe to always use the initiator SPI. */
196 if (ike_sa_id->get_ike_version(ike_sa_id) == IKEV1_MAJOR_VERSION ||
197 ike_sa_id->is_initiator(ike_sa_id))
198 {
199 return ike_sa_id->get_initiator_spi(ike_sa_id);
200 }
201 return ike_sa_id->get_responder_spi(ike_sa_id);
202 }
203
204 typedef struct half_open_t half_open_t;
205
206 /**
207 * Struct to manage half-open IKE_SAs per peer.
208 */
209 struct half_open_t {
210 /** chunk of remote host address */
211 chunk_t other;
212
213 /** the number of half-open IKE_SAs with that host */
214 u_int count;
215
216 /** the number of half-open IKE_SAs we responded to with that host */
217 u_int count_responder;
218 };
219
220 /**
221 * Destroys a half_open_t object.
222 */
223 static void half_open_destroy(half_open_t *this)
224 {
225 chunk_free(&this->other);
226 free(this);
227 }
228
229 typedef struct connected_peers_t connected_peers_t;
230
231 struct connected_peers_t {
232 /** own identity */
233 identification_t *my_id;
234
235 /** remote identity */
236 identification_t *other_id;
237
238 /** ip address family of peer */
239 int family;
240
241 /** list of ike_sa_id_t objects of IKE_SAs between the two identities */
242 linked_list_t *sas;
243 };
244
245 static void connected_peers_destroy(connected_peers_t *this)
246 {
247 this->my_id->destroy(this->my_id);
248 this->other_id->destroy(this->other_id);
249 this->sas->destroy(this->sas);
250 free(this);
251 }
252
253 /**
254 * Function that matches connected_peers_t objects by the given ids.
255 */
256 static inline bool connected_peers_match(connected_peers_t *connected_peers,
257 identification_t *my_id, identification_t *other_id,
258 int family)
259 {
260 return my_id->equals(my_id, connected_peers->my_id) &&
261 other_id->equals(other_id, connected_peers->other_id) &&
262 (!family || family == connected_peers->family);
263 }
264
265 typedef struct init_hash_t init_hash_t;
266
267 struct init_hash_t {
268 /** hash of IKE_SA_INIT or initial phase1 message (data is not cloned) */
269 chunk_t hash;
270
271 /** our SPI allocated for the IKE_SA based on this message */
272 uint64_t our_spi;
273 };
274
275 typedef struct segment_t segment_t;
276
277 /**
278 * Struct to manage segments of the hash table.
279 */
280 struct segment_t {
281 /** mutex to access a segment exclusively */
282 mutex_t *mutex;
283 };
284
285 typedef struct shareable_segment_t shareable_segment_t;
286
287 /**
288 * Struct to manage segments of the "half-open" and "connected peers" hash tables.
289 */
290 struct shareable_segment_t {
291 /** rwlock to access a segment non-/exclusively */
292 rwlock_t *lock;
293
294 /** the number of entries in this segment - in case of the "half-open table"
295 * it's the sum of all half_open_t.count in a segment. */
296 u_int count;
297 };
298
299 typedef struct table_item_t table_item_t;
300
301 /**
302 * Instead of using linked_list_t for each bucket we store the data in our own
303 * list to save memory.
304 */
305 struct table_item_t {
306 /** data of this item */
307 void *value;
308
309 /** next item in the overflow list */
310 table_item_t *next;
311 };
312
313 typedef struct private_ike_sa_manager_t private_ike_sa_manager_t;
314
315 /**
316 * Additional private members of ike_sa_manager_t.
317 */
318 struct private_ike_sa_manager_t {
319 /**
320 * Public interface of ike_sa_manager_t.
321 */
322 ike_sa_manager_t public;
323
324 /**
325 * Hash table with entries for the ike_sa_t objects.
326 */
327 table_item_t **ike_sa_table;
328
329 /**
330 * The size of the hash table.
331 */
332 u_int table_size;
333
334 /**
335 * Mask to map the hashes to table rows.
336 */
337 u_int table_mask;
338
339 /**
340 * Segments of the hash table.
341 */
342 segment_t *segments;
343
344 /**
345 * The number of segments.
346 */
347 u_int segment_count;
348
349 /**
350 * Mask to map a table row to a segment.
351 */
352 u_int segment_mask;
353
354 /**
355 * Hash table with half_open_t objects.
356 */
357 table_item_t **half_open_table;
358
359 /**
360 * Segments of the "half-open" hash table.
361 */
362 shareable_segment_t *half_open_segments;
363
364 /**
365 * Total number of half-open IKE_SAs.
366 */
367 refcount_t half_open_count;
368
369 /**
370 * Total number of half-open IKE_SAs as responder.
371 */
372 refcount_t half_open_count_responder;
373
374 /**
375 * Total number of IKE_SAs registered with IKE_SA manager.
376 */
377 refcount_t total_sa_count;
378
379 /**
380 * Hash table with connected_peers_t objects.
381 */
382 table_item_t **connected_peers_table;
383
384 /**
385 * Segments of the "connected peers" hash table.
386 */
387 shareable_segment_t *connected_peers_segments;
388
389 /**
390 * Hash table with init_hash_t objects.
391 */
392 table_item_t **init_hashes_table;
393
394 /**
395 * Segments of the "hashes" hash table.
396 */
397 segment_t *init_hashes_segments;
398
399 /**
400 * Configs for which an SA is currently being checked out.
401 */
402 array_t *config_checkouts;
403
404 /**
405 * Mutex to protect access to configs.
406 */
407 mutex_t *config_mutex;
408
409 /**
410 * Condvar to indicate changes in checkout configs.
411 */
412 condvar_t *config_condvar;
413
414 /**
415 * RNG to get random SPIs for our side
416 */
417 rng_t *rng;
418
419 /**
420 * Registered callback for IKE SPIs
421 */
422 struct {
423 spi_cb_t cb;
424 void *data;
425 } spi_cb;
426
427 /**
428 * Lock to access the RNG instance and the callback
429 */
430 rwlock_t *spi_lock;
431
432 /**
433 * Mask applied to local SPIs before mixing in the label
434 */
435 uint64_t spi_mask;
436
437 /**
438 * Label applied to local SPIs
439 */
440 uint64_t spi_label;
441
442 /**
443 * reuse existing IKE_SAs in checkout_by_config
444 */
445 bool reuse_ikesa;
446
447 /**
448 * Configured IKE_SA limit, if any
449 */
450 u_int ikesa_limit;
451 };
452
453 /**
454 * Acquire a lock to access the segment of the table row with the given index.
455 * It also works with the segment index directly.
456 */
457 static inline void lock_single_segment(private_ike_sa_manager_t *this,
458 u_int index)
459 {
460 mutex_t *lock = this->segments[index & this->segment_mask].mutex;
461 lock->lock(lock);
462 }
463
464 /**
465 * Release the lock required to access the segment of the table row with the given index.
466 * It also works with the segment index directly.
467 */
468 static inline void unlock_single_segment(private_ike_sa_manager_t *this,
469 u_int index)
470 {
471 mutex_t *lock = this->segments[index & this->segment_mask].mutex;
472 lock->unlock(lock);
473 }
474
475 /**
476 * Lock all segments
477 */
478 static void lock_all_segments(private_ike_sa_manager_t *this)
479 {
480 u_int i;
481
482 for (i = 0; i < this->segment_count; i++)
483 {
484 this->segments[i].mutex->lock(this->segments[i].mutex);
485 }
486 }
487
488 /**
489 * Unlock all segments
490 */
491 static void unlock_all_segments(private_ike_sa_manager_t *this)
492 {
493 u_int i;
494
495 for (i = 0; i < this->segment_count; i++)
496 {
497 this->segments[i].mutex->unlock(this->segments[i].mutex);
498 }
499 }
500
501 typedef struct private_enumerator_t private_enumerator_t;
502
503 /**
504 * hash table enumerator implementation
505 */
506 struct private_enumerator_t {
507
508 /**
509 * implements enumerator interface
510 */
511 enumerator_t enumerator;
512
513 /**
514 * associated ike_sa_manager_t
515 */
516 private_ike_sa_manager_t *manager;
517
518 /**
519 * current segment index
520 */
521 u_int segment;
522
523 /**
524 * currently enumerating entry
525 */
526 entry_t *entry;
527
528 /**
529 * current table row index
530 */
531 u_int row;
532
533 /**
534 * current table item
535 */
536 table_item_t *current;
537
538 /**
539 * previous table item
540 */
541 table_item_t *prev;
542 };
543
544 METHOD(enumerator_t, enumerate, bool,
545 private_enumerator_t *this, va_list args)
546 {
547 entry_t **entry;
548 u_int *segment;
549
550 VA_ARGS_VGET(args, entry, segment);
551
552 if (this->entry)
553 {
554 this->entry->condvar->signal(this->entry->condvar);
555 this->entry = NULL;
556 }
557 while (this->segment < this->manager->segment_count)
558 {
559 while (this->row < this->manager->table_size)
560 {
561 this->prev = this->current;
562 if (this->current)
563 {
564 this->current = this->current->next;
565 }
566 else
567 {
568 lock_single_segment(this->manager, this->segment);
569 this->current = this->manager->ike_sa_table[this->row];
570 }
571 if (this->current)
572 {
573 *entry = this->entry = this->current->value;
574 *segment = this->segment;
575 return TRUE;
576 }
577 unlock_single_segment(this->manager, this->segment);
578 this->row += this->manager->segment_count;
579 }
580 this->segment++;
581 this->row = this->segment;
582 }
583 return FALSE;
584 }
585
586 METHOD(enumerator_t, enumerator_destroy, void,
587 private_enumerator_t *this)
588 {
589 if (this->entry)
590 {
591 this->entry->condvar->signal(this->entry->condvar);
592 }
593 if (this->current)
594 {
595 unlock_single_segment(this->manager, this->segment);
596 }
597 free(this);
598 }
599
600 /**
601 * Creates an enumerator to enumerate the entries in the hash table.
602 */
603 static enumerator_t* create_table_enumerator(private_ike_sa_manager_t *this)
604 {
605 private_enumerator_t *enumerator;
606
607 INIT(enumerator,
608 .enumerator = {
609 .enumerate = enumerator_enumerate_default,
610 .venumerate = _enumerate,
611 .destroy = _enumerator_destroy,
612 },
613 .manager = this,
614 );
615 return &enumerator->enumerator;
616 }
617
618 /**
619 * Put an entry into the hash table.
620 * Note: The caller has to unlock the returned segment.
621 */
622 static u_int put_entry(private_ike_sa_manager_t *this, entry_t *entry)
623 {
624 table_item_t *current, *item;
625 u_int row, segment;
626
627 INIT(item,
628 .value = entry,
629 );
630
631 row = ike_sa_id_hash(entry->ike_sa_id) & this->table_mask;
632 segment = row & this->segment_mask;
633
634 lock_single_segment(this, segment);
635 current = this->ike_sa_table[row];
636 if (current)
637 { /* insert at the front of current bucket */
638 item->next = current;
639 }
640 this->ike_sa_table[row] = item;
641 ref_get(&this->total_sa_count);
642 return segment;
643 }
644
645 /**
646 * Remove an entry from the hash table.
647 * Note: The caller MUST have a lock on the segment of this entry.
648 */
649 static void remove_entry(private_ike_sa_manager_t *this, entry_t *entry)
650 {
651 table_item_t *item, *prev = NULL;
652 u_int row;
653
654 row = ike_sa_id_hash(entry->ike_sa_id) & this->table_mask;
655 item = this->ike_sa_table[row];
656 while (item)
657 {
658 if (item->value == entry)
659 {
660 if (prev)
661 {
662 prev->next = item->next;
663 }
664 else
665 {
666 this->ike_sa_table[row] = item->next;
667 }
668 ignore_result(ref_put(&this->total_sa_count));
669 free(item);
670 break;
671 }
672 prev = item;
673 item = item->next;
674 }
675 }
676
677 /**
678 * Remove the entry at the current enumerator position.
679 */
680 static void remove_entry_at(private_enumerator_t *this)
681 {
682 this->entry = NULL;
683 if (this->current)
684 {
685 table_item_t *current = this->current;
686
687 ignore_result(ref_put(&this->manager->total_sa_count));
688 this->current = this->prev;
689
690 if (this->prev)
691 {
692 this->prev->next = current->next;
693 }
694 else
695 {
696 this->manager->ike_sa_table[this->row] = current->next;
697 unlock_single_segment(this->manager, this->segment);
698 }
699 free(current);
700 }
701 }
702
703 /**
704 * Find an entry using the provided match function to compare the entries for
705 * equality.
706 */
707 static status_t get_entry_by_match_function(private_ike_sa_manager_t *this,
708 ike_sa_id_t *ike_sa_id, entry_t **entry, u_int *segment,
709 bool (*match)(entry_t*,void*), void *param)
710 {
711 table_item_t *item;
712 u_int row, seg;
713
714 row = ike_sa_id_hash(ike_sa_id) & this->table_mask;
715 seg = row & this->segment_mask;
716
717 lock_single_segment(this, seg);
718 item = this->ike_sa_table[row];
719 while (item)
720 {
721 if (match(item->value, param))
722 {
723 *entry = item->value;
724 *segment = seg;
725 /* the locked segment has to be unlocked by the caller */
726 return SUCCESS;
727 }
728 item = item->next;
729 }
730 unlock_single_segment(this, seg);
731 return NOT_FOUND;
732 }
733
734 /**
735 * Find an entry by ike_sa_id_t.
736 * Note: On SUCCESS, the caller has to unlock the segment.
737 */
738 static status_t get_entry_by_id(private_ike_sa_manager_t *this,
739 ike_sa_id_t *ike_sa_id, entry_t **entry, u_int *segment)
740 {
741 return get_entry_by_match_function(this, ike_sa_id, entry, segment,
742 entry_match_by_id, ike_sa_id);
743 }
744
745 /**
746 * Find an entry by IKE_SA pointer.
747 * Note: On SUCCESS, the caller has to unlock the segment.
748 */
749 static status_t get_entry_by_sa(private_ike_sa_manager_t *this,
750 ike_sa_id_t *ike_sa_id, ike_sa_t *ike_sa, entry_t **entry, u_int *segment)
751 {
752 return get_entry_by_match_function(this, ike_sa_id, entry, segment,
753 entry_match_by_sa, ike_sa);
754 }
755
756 /**
757 * Wait until no other thread is using an IKE_SA, return FALSE if entry not
758 * acquirable.
759 */
760 static bool wait_for_entry(private_ike_sa_manager_t *this, entry_t *entry,
761 u_int segment)
762 {
763 if (entry->driveout_new_threads)
764 {
765 /* we are not allowed to get this */
766 return FALSE;
767 }
768 while (entry->checked_out && !entry->driveout_waiting_threads)
769 {
770 /* so wait until we can get it for us.
771 * we register us as waiting. */
772 entry->waiting_threads++;
773 entry->condvar->wait(entry->condvar, this->segments[segment].mutex);
774 entry->waiting_threads--;
775 }
776 /* hm, a deletion request forbids us to get this SA, get next one */
777 if (entry->driveout_waiting_threads)
778 {
779 /* we must signal here, others may be waiting on it, too */
780 entry->condvar->signal(entry->condvar);
781 return FALSE;
782 }
783 return TRUE;
784 }
785
786 /**
787 * Put a half-open SA into the hash table.
788 */
789 static void put_half_open(private_ike_sa_manager_t *this, entry_t *entry)
790 {
791 table_item_t *item;
792 u_int row, segment;
793 rwlock_t *lock;
794 ike_sa_id_t *ike_id;
795 half_open_t *half_open;
796 chunk_t addr;
797
798 ike_id = entry->ike_sa_id;
799 addr = entry->other->get_address(entry->other);
800 row = chunk_hash(addr) & this->table_mask;
801 segment = row & this->segment_mask;
802 lock = this->half_open_segments[segment].lock;
803 lock->write_lock(lock);
804 item = this->half_open_table[row];
805 while (item)
806 {
807 half_open = item->value;
808
809 if (chunk_equals(addr, half_open->other))
810 {
811 break;
812 }
813 item = item->next;
814 }
815
816 if (!item)
817 {
818 INIT(half_open,
819 .other = chunk_clone(addr),
820 );
821 INIT(item,
822 .value = half_open,
823 .next = this->half_open_table[row],
824 );
825 this->half_open_table[row] = item;
826 }
827 half_open->count++;
828 ref_get(&this->half_open_count);
829 if (!ike_id->is_initiator(ike_id))
830 {
831 half_open->count_responder++;
832 ref_get(&this->half_open_count_responder);
833 }
834 this->half_open_segments[segment].count++;
835 lock->unlock(lock);
836 }
837
838 /**
839 * Remove a half-open SA from the hash table.
840 */
841 static void remove_half_open(private_ike_sa_manager_t *this, entry_t *entry)
842 {
843 table_item_t *item, *prev = NULL;
844 u_int row, segment;
845 rwlock_t *lock;
846 ike_sa_id_t *ike_id;
847 chunk_t addr;
848
849 ike_id = entry->ike_sa_id;
850 addr = entry->other->get_address(entry->other);
851 row = chunk_hash(addr) & this->table_mask;
852 segment = row & this->segment_mask;
853 lock = this->half_open_segments[segment].lock;
854 lock->write_lock(lock);
855 item = this->half_open_table[row];
856 while (item)
857 {
858 half_open_t *half_open = item->value;
859
860 if (chunk_equals(addr, half_open->other))
861 {
862 if (!ike_id->is_initiator(ike_id))
863 {
864 half_open->count_responder--;
865 ignore_result(ref_put(&this->half_open_count_responder));
866 }
867 ignore_result(ref_put(&this->half_open_count));
868 if (--half_open->count == 0)
869 {
870 if (prev)
871 {
872 prev->next = item->next;
873 }
874 else
875 {
876 this->half_open_table[row] = item->next;
877 }
878 half_open_destroy(half_open);
879 free(item);
880 }
881 this->half_open_segments[segment].count--;
882 break;
883 }
884 prev = item;
885 item = item->next;
886 }
887 lock->unlock(lock);
888 }
889
890 /**
891 * Create an entry and put it into the hash table.
892 * Note: The caller has to unlock the segment.
893 */
894 static u_int create_and_put_entry(private_ike_sa_manager_t *this,
895 ike_sa_t *ike_sa, entry_t **entry)
896 {
897 ike_sa_id_t *ike_sa_id = ike_sa->get_id(ike_sa);
898 host_t *other = ike_sa->get_other_host(ike_sa);
899
900 *entry = entry_create();
901 (*entry)->ike_sa_id = ike_sa_id->clone(ike_sa_id);
902 (*entry)->ike_sa = ike_sa;
903
904 if (ike_sa->get_state(ike_sa) == IKE_CONNECTING)
905 {
906 (*entry)->half_open = TRUE;
907 (*entry)->other = other->clone(other);
908 put_half_open(this, *entry);
909 }
910 return put_entry(this, *entry);
911 }
912
913 CALLBACK(id_matches, bool,
914 ike_sa_id_t *a, va_list args)
915 {
916 ike_sa_id_t *b;
917
918 VA_ARGS_VGET(args, b);
919 return a->equals(a, b);
920 }
921
922 /**
923 * Put an SA between two peers into the hash table.
924 */
925 static void put_connected_peers(private_ike_sa_manager_t *this, entry_t *entry)
926 {
927 table_item_t *item;
928 u_int row, segment;
929 rwlock_t *lock;
930 connected_peers_t *connected_peers;
931 chunk_t my_id, other_id;
932 int family;
933
934 my_id = entry->my_id->get_encoding(entry->my_id);
935 other_id = entry->other_id->get_encoding(entry->other_id);
936 family = entry->other->get_family(entry->other);
937 row = chunk_hash_inc(other_id, chunk_hash(my_id)) & this->table_mask;
938 segment = row & this->segment_mask;
939 lock = this->connected_peers_segments[segment].lock;
940 lock->write_lock(lock);
941 item = this->connected_peers_table[row];
942 while (item)
943 {
944 connected_peers = item->value;
945
946 if (connected_peers_match(connected_peers, entry->my_id,
947 entry->other_id, family))
948 {
949 if (connected_peers->sas->find_first(connected_peers->sas,
950 id_matches, NULL, entry->ike_sa_id))
951 {
952 lock->unlock(lock);
953 return;
954 }
955 break;
956 }
957 item = item->next;
958 }
959
960 if (!item)
961 {
962 INIT(connected_peers,
963 .my_id = entry->my_id->clone(entry->my_id),
964 .other_id = entry->other_id->clone(entry->other_id),
965 .family = family,
966 .sas = linked_list_create(),
967 );
968 INIT(item,
969 .value = connected_peers,
970 .next = this->connected_peers_table[row],
971 );
972 this->connected_peers_table[row] = item;
973 }
974 connected_peers->sas->insert_last(connected_peers->sas,
975 entry->ike_sa_id->clone(entry->ike_sa_id));
976 this->connected_peers_segments[segment].count++;
977 lock->unlock(lock);
978 }
979
980 /**
981 * Remove an SA between two peers from the hash table.
982 */
983 static void remove_connected_peers(private_ike_sa_manager_t *this, entry_t *entry)
984 {
985 table_item_t *item, *prev = NULL;
986 u_int row, segment;
987 rwlock_t *lock;
988 chunk_t my_id, other_id;
989 int family;
990
991 my_id = entry->my_id->get_encoding(entry->my_id);
992 other_id = entry->other_id->get_encoding(entry->other_id);
993 family = entry->other->get_family(entry->other);
994
995 row = chunk_hash_inc(other_id, chunk_hash(my_id)) & this->table_mask;
996 segment = row & this->segment_mask;
997
998 lock = this->connected_peers_segments[segment].lock;
999 lock->write_lock(lock);
1000 item = this->connected_peers_table[row];
1001 while (item)
1002 {
1003 connected_peers_t *current = item->value;
1004
1005 if (connected_peers_match(current, entry->my_id, entry->other_id,
1006 family))
1007 {
1008 enumerator_t *enumerator;
1009 ike_sa_id_t *ike_sa_id;
1010
1011 enumerator = current->sas->create_enumerator(current->sas);
1012 while (enumerator->enumerate(enumerator, &ike_sa_id))
1013 {
1014 if (ike_sa_id->equals(ike_sa_id, entry->ike_sa_id))
1015 {
1016 current->sas->remove_at(current->sas, enumerator);
1017 ike_sa_id->destroy(ike_sa_id);
1018 this->connected_peers_segments[segment].count--;
1019 break;
1020 }
1021 }
1022 enumerator->destroy(enumerator);
1023 if (current->sas->get_count(current->sas) == 0)
1024 {
1025 if (prev)
1026 {
1027 prev->next = item->next;
1028 }
1029 else
1030 {
1031 this->connected_peers_table[row] = item->next;
1032 }
1033 connected_peers_destroy(current);
1034 free(item);
1035 }
1036 break;
1037 }
1038 prev = item;
1039 item = item->next;
1040 }
1041 lock->unlock(lock);
1042 }
1043
1044 /**
1045 * Get a random SPI for new IKE_SAs
1046 */
1047 static uint64_t get_spi(private_ike_sa_manager_t *this)
1048 {
1049 uint64_t spi;
1050
1051 this->spi_lock->read_lock(this->spi_lock);
1052 if (this->spi_cb.cb)
1053 {
1054 spi = this->spi_cb.cb(this->spi_cb.data);
1055 }
1056 else if (!this->rng ||
1057 !this->rng->get_bytes(this->rng, sizeof(spi), (uint8_t*)&spi))
1058 {
1059 spi = 0;
1060 }
1061 this->spi_lock->unlock(this->spi_lock);
1062
1063 if (spi)
1064 {
1065 spi = (spi & ~this->spi_mask) | this->spi_label;
1066 }
1067 return spi;
1068 }
1069
1070 /**
1071 * Calculate the hash of the initial IKE message. Memory for the hash is
1072 * allocated on success.
1073 *
1074 * @returns TRUE on success
1075 */
1076 static bool get_init_hash(hasher_t *hasher, message_t *message, chunk_t *hash)
1077 {
1078 host_t *src;
1079
1080 if (message->get_first_payload_type(message) == PLV1_FRAGMENT)
1081 { /* only hash the source IP, port and SPI for fragmented init messages */
1082 uint16_t port;
1083 uint64_t spi;
1084
1085 src = message->get_source(message);
1086 if (!hasher->allocate_hash(hasher, src->get_address(src), NULL))
1087 {
1088 return FALSE;
1089 }
1090 port = src->get_port(src);
1091 if (!hasher->allocate_hash(hasher, chunk_from_thing(port), NULL))
1092 {
1093 return FALSE;
1094 }
1095 spi = message->get_initiator_spi(message);
1096 return hasher->allocate_hash(hasher, chunk_from_thing(spi), hash);
1097 }
1098 if (message->get_exchange_type(message) == ID_PROT)
1099 { /* include the source for Main Mode as the hash will be the same if
1100 * SPIs are reused by two initiators that use the same proposal */
1101 src = message->get_source(message);
1102
1103 if (!hasher->allocate_hash(hasher, src->get_address(src), NULL))
1104 {
1105 return FALSE;
1106 }
1107 }
1108 return hasher->allocate_hash(hasher, message->get_packet_data(message), hash);
1109 }
1110
1111 /**
1112 * Check if we already have created an IKE_SA based on the initial IKE message
1113 * with the given hash.
1114 * If not the hash is stored, the hash data is not(!) cloned.
1115 *
1116 * Also, the local SPI is returned. In case of a retransmit this is already
1117 * stored together with the hash, otherwise it is newly allocated and should
1118 * be used to create the IKE_SA.
1119 *
1120 * @returns ALREADY_DONE if the message with the given hash has been seen before
1121 * NOT_FOUND if the message hash was not found
1122 * FAILED if the SPI allocation failed
1123 */
1124 static status_t check_and_put_init_hash(private_ike_sa_manager_t *this,
1125 chunk_t init_hash, uint64_t *our_spi)
1126 {
1127 table_item_t *item;
1128 u_int row, segment;
1129 mutex_t *mutex;
1130 init_hash_t *init;
1131 uint64_t spi;
1132
1133 row = chunk_hash(init_hash) & this->table_mask;
1134 segment = row & this->segment_mask;
1135 mutex = this->init_hashes_segments[segment].mutex;
1136 mutex->lock(mutex);
1137 item = this->init_hashes_table[row];
1138 while (item)
1139 {
1140 init_hash_t *current = item->value;
1141
1142 if (chunk_equals(init_hash, current->hash))
1143 {
1144 *our_spi = current->our_spi;
1145 mutex->unlock(mutex);
1146 return ALREADY_DONE;
1147 }
1148 item = item->next;
1149 }
1150
1151 spi = get_spi(this);
1152 if (!spi)
1153 {
1154 return FAILED;
1155 }
1156
1157 INIT(init,
1158 .hash = {
1159 .len = init_hash.len,
1160 .ptr = init_hash.ptr,
1161 },
1162 .our_spi = spi,
1163 );
1164 INIT(item,
1165 .value = init,
1166 .next = this->init_hashes_table[row],
1167 );
1168 this->init_hashes_table[row] = item;
1169 *our_spi = init->our_spi;
1170 mutex->unlock(mutex);
1171 return NOT_FOUND;
1172 }
1173
1174 /**
1175 * Remove the hash of an initial IKE message from the cache.
1176 */
1177 static void remove_init_hash(private_ike_sa_manager_t *this, chunk_t init_hash)
1178 {
1179 table_item_t *item, *prev = NULL;
1180 u_int row, segment;
1181 mutex_t *mutex;
1182
1183 row = chunk_hash(init_hash) & this->table_mask;
1184 segment = row & this->segment_mask;
1185 mutex = this->init_hashes_segments[segment].mutex;
1186 mutex->lock(mutex);
1187 item = this->init_hashes_table[row];
1188 while (item)
1189 {
1190 init_hash_t *current = item->value;
1191
1192 if (chunk_equals(init_hash, current->hash))
1193 {
1194 if (prev)
1195 {
1196 prev->next = item->next;
1197 }
1198 else
1199 {
1200 this->init_hashes_table[row] = item->next;
1201 }
1202 free(current);
1203 free(item);
1204 break;
1205 }
1206 prev = item;
1207 item = item->next;
1208 }
1209 mutex->unlock(mutex);
1210 }
1211
1212 METHOD(ike_sa_manager_t, checkout, ike_sa_t*,
1213 private_ike_sa_manager_t *this, ike_sa_id_t *ike_sa_id)
1214 {
1215 ike_sa_t *ike_sa = NULL;
1216 entry_t *entry;
1217 u_int segment;
1218
1219 DBG2(DBG_MGR, "checkout %N SA with SPIs %.16"PRIx64"_i %.16"PRIx64"_r",
1220 ike_version_names, ike_sa_id->get_ike_version(ike_sa_id),
1221 be64toh(ike_sa_id->get_initiator_spi(ike_sa_id)),
1222 be64toh(ike_sa_id->get_responder_spi(ike_sa_id)));
1223
1224 if (get_entry_by_id(this, ike_sa_id, &entry, &segment) == SUCCESS)
1225 {
1226 if (wait_for_entry(this, entry, segment))
1227 {
1228 entry->checked_out = thread_current();
1229 ike_sa = entry->ike_sa;
1230 DBG2(DBG_MGR, "IKE_SA %s[%u] successfully checked out",
1231 ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1232 }
1233 unlock_single_segment(this, segment);
1234 }
1235 charon->bus->set_sa(charon->bus, ike_sa);
1236
1237 if (!ike_sa)
1238 {
1239 DBG2(DBG_MGR, "IKE_SA checkout not successful");
1240 }
1241 return ike_sa;
1242 }
1243
1244 METHOD(ike_sa_manager_t, create_new, ike_sa_t*,
1245 private_ike_sa_manager_t* this, ike_version_t version, bool initiator)
1246 {
1247 ike_sa_id_t *ike_sa_id;
1248 ike_sa_t *ike_sa;
1249 uint8_t ike_version;
1250 uint64_t spi;
1251
1252 ike_version = version == IKEV1 ? IKEV1_MAJOR_VERSION : IKEV2_MAJOR_VERSION;
1253
1254 spi = get_spi(this);
1255 if (!spi)
1256 {
1257 DBG1(DBG_MGR, "failed to allocate SPI for new IKE_SA");
1258 return NULL;
1259 }
1260
1261 if (initiator)
1262 {
1263 ike_sa_id = ike_sa_id_create(ike_version, spi, 0, TRUE);
1264 }
1265 else
1266 {
1267 ike_sa_id = ike_sa_id_create(ike_version, 0, spi, FALSE);
1268 }
1269 ike_sa = ike_sa_create(ike_sa_id, initiator, version);
1270 ike_sa_id->destroy(ike_sa_id);
1271
1272 if (ike_sa)
1273 {
1274 DBG2(DBG_MGR, "created IKE_SA %s[%u]", ike_sa->get_name(ike_sa),
1275 ike_sa->get_unique_id(ike_sa));
1276 }
1277 return ike_sa;
1278 }
1279
1280 /**
1281 * Get the message ID or message hash to detect early retransmissions
1282 */
1283 static uint32_t get_message_id_or_hash(message_t *message)
1284 {
1285 if (message->get_major_version(message) == IKEV1_MAJOR_VERSION)
1286 {
1287 /* Use a hash for IKEv1 Phase 1, where we don't have a MID, and Quick
1288 * Mode, where all three messages use the same message ID */
1289 if (message->get_message_id(message) == 0 ||
1290 message->get_exchange_type(message) == QUICK_MODE)
1291 {
1292 return chunk_hash(message->get_packet_data(message));
1293 }
1294 }
1295 return message->get_message_id(message);
1296 }
1297
1298 METHOD(ike_sa_manager_t, checkout_by_message, ike_sa_t*,
1299 private_ike_sa_manager_t* this, message_t *message)
1300 {
1301 u_int segment;
1302 entry_t *entry;
1303 ike_sa_t *ike_sa = NULL;
1304 ike_sa_id_t *id;
1305 ike_version_t ike_version;
1306 bool is_init = FALSE;
1307
1308 id = message->get_ike_sa_id(message);
1309 /* clone the IKE_SA ID so we can modify the initiator flag */
1310 id = id->clone(id);
1311 id->switch_initiator(id);
1312
1313 DBG2(DBG_MGR, "checkout %N SA by message with SPIs %.16"PRIx64"_i "
1314 "%.16"PRIx64"_r", ike_version_names, id->get_ike_version(id),
1315 be64toh(id->get_initiator_spi(id)),
1316 be64toh(id->get_responder_spi(id)));
1317
1318 if (id->get_responder_spi(id) == 0 &&
1319 message->get_message_id(message) == 0)
1320 {
1321 if (message->get_major_version(message) == IKEV2_MAJOR_VERSION)
1322 {
1323 if (message->get_exchange_type(message) == IKE_SA_INIT &&
1324 message->get_request(message))
1325 {
1326 ike_version = IKEV2;
1327 is_init = TRUE;
1328 }
1329 }
1330 else
1331 {
1332 if (message->get_exchange_type(message) == ID_PROT ||
1333 message->get_exchange_type(message) == AGGRESSIVE)
1334 {
1335 ike_version = IKEV1;
1336 is_init = TRUE;
1337 if (id->is_initiator(id))
1338 { /* not set in IKEv1, switch back before applying to new SA */
1339 id->switch_initiator(id);
1340 }
1341 }
1342 }
1343 }
1344
1345 if (is_init)
1346 {
1347 hasher_t *hasher;
1348 uint64_t our_spi;
1349 chunk_t hash;
1350
1351 hasher = lib->crypto->create_hasher(lib->crypto, HASH_SHA1);
1352 if (!hasher || !get_init_hash(hasher, message, &hash))
1353 {
1354 DBG1(DBG_MGR, "ignoring message, failed to hash message");
1355 DESTROY_IF(hasher);
1356 id->destroy(id);
1357 goto out;
1358 }
1359 hasher->destroy(hasher);
1360
1361 /* ensure this is not a retransmit of an already handled init message */
1362 switch (check_and_put_init_hash(this, hash, &our_spi))
1363 {
1364 case NOT_FOUND:
1365 { /* we've not seen this packet yet, create a new IKE_SA */
1366 if (!this->ikesa_limit ||
1367 this->public.get_count(&this->public) < this->ikesa_limit)
1368 {
1369 id->set_responder_spi(id, our_spi);
1370 ike_sa = ike_sa_create(id, FALSE, ike_version);
1371 if (ike_sa)
1372 {
1373 entry = entry_create();
1374 entry->ike_sa = ike_sa;
1375 entry->ike_sa_id = id;
1376 entry->processing = get_message_id_or_hash(message);
1377 entry->init_hash = hash;
1378
1379 segment = put_entry(this, entry);
1380 entry->checked_out = thread_current();
1381 unlock_single_segment(this, segment);
1382
1383 DBG2(DBG_MGR, "created IKE_SA %s[%u]",
1384 ike_sa->get_name(ike_sa),
1385 ike_sa->get_unique_id(ike_sa));
1386 goto out;
1387 }
1388 else
1389 {
1390 DBG1(DBG_MGR, "creating IKE_SA failed, ignoring message");
1391 }
1392 }
1393 else
1394 {
1395 DBG1(DBG_MGR, "ignoring %N, hitting IKE_SA limit (%u)",
1396 exchange_type_names, message->get_exchange_type(message),
1397 this->ikesa_limit);
1398 }
1399 remove_init_hash(this, hash);
1400 chunk_free(&hash);
1401 id->destroy(id);
1402 goto out;
1403 }
1404 case FAILED:
1405 { /* we failed to allocate an SPI */
1406 chunk_free(&hash);
1407 id->destroy(id);
1408 DBG1(DBG_MGR, "ignoring message, failed to allocate SPI");
1409 goto out;
1410 }
1411 case ALREADY_DONE:
1412 default:
1413 break;
1414 }
1415 /* it looks like we already handled this init message to some degree */
1416 id->set_responder_spi(id, our_spi);
1417 chunk_free(&hash);
1418 }
1419
1420 if (get_entry_by_id(this, id, &entry, &segment) == SUCCESS)
1421 {
1422 /* only check out if we are not already processing it. */
1423 if (entry->processing == get_message_id_or_hash(message))
1424 {
1425 DBG1(DBG_MGR, "ignoring request with ID %u, already processing",
1426 entry->processing);
1427 }
1428 else if (wait_for_entry(this, entry, segment))
1429 {
1430 ike_sa_id_t *ike_id;
1431
1432 ike_id = entry->ike_sa->get_id(entry->ike_sa);
1433 entry->checked_out = thread_current();
1434 if (message->get_first_payload_type(message) != PLV1_FRAGMENT &&
1435 message->get_first_payload_type(message) != PLV2_FRAGMENT)
1436 { /* TODO-FRAG: this fails if there are unencrypted payloads */
1437 entry->processing = get_message_id_or_hash(message);
1438 }
1439 if (ike_id->get_responder_spi(ike_id) == 0)
1440 {
1441 ike_id->set_responder_spi(ike_id, id->get_responder_spi(id));
1442 }
1443 ike_sa = entry->ike_sa;
1444 DBG2(DBG_MGR, "IKE_SA %s[%u] successfully checked out",
1445 ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1446 }
1447 unlock_single_segment(this, segment);
1448 }
1449 else
1450 {
1451 charon->bus->alert(charon->bus, ALERT_INVALID_IKE_SPI, message);
1452 }
1453 id->destroy(id);
1454
1455 out:
1456 charon->bus->set_sa(charon->bus, ike_sa);
1457 if (!ike_sa)
1458 {
1459 DBG2(DBG_MGR, "IKE_SA checkout not successful");
1460 }
1461 return ike_sa;
1462 }
1463
1464 /**
1465 * Data used to track checkouts by config.
1466 */
1467 typedef struct {
1468 /** The peer config for which an IKE_SA is being checked out. */
1469 peer_cfg_t *cfg;
1470 /** Number of threads checking out SAs for the same config. */
1471 int threads;
1472 /** A thread is currently creating/finding an SA for this config. */
1473 bool working;
1474 } config_entry_t;
1475
1476 METHOD(ike_sa_manager_t, checkout_by_config, ike_sa_t*,
1477 private_ike_sa_manager_t *this, peer_cfg_t *peer_cfg)
1478 {
1479 enumerator_t *enumerator;
1480 entry_t *entry;
1481 ike_sa_t *ike_sa = NULL;
1482 peer_cfg_t *current_peer;
1483 ike_cfg_t *current_ike;
1484 config_entry_t *config_entry, *found = NULL;
1485 u_int segment;
1486 int i;
1487
1488 DBG2(DBG_MGR, "checkout IKE_SA by config");
1489
1490 if (!this->reuse_ikesa && peer_cfg->get_ike_version(peer_cfg) != IKEV1)
1491 { /* IKE_SA reuse disabled by config (not possible for IKEv1) */
1492 ike_sa = create_new(this, peer_cfg->get_ike_version(peer_cfg), TRUE);
1493 ike_sa->set_peer_cfg(ike_sa, peer_cfg);
1494
1495 segment = create_and_put_entry(this, ike_sa, &entry);
1496 entry->checked_out = thread_current();
1497 unlock_single_segment(this, segment);
1498 charon->bus->set_sa(charon->bus, ike_sa);
1499 goto out;
1500 }
1501
1502 this->config_mutex->lock(this->config_mutex);
1503 for (i = 0; i < array_count(this->config_checkouts); i++)
1504 {
1505 array_get(this->config_checkouts, i, &config_entry);
1506 if (config_entry->cfg->equals(config_entry->cfg, peer_cfg))
1507 {
1508 current_ike = config_entry->cfg->get_ike_cfg(config_entry->cfg);
1509 if (current_ike->equals(current_ike,
1510 peer_cfg->get_ike_cfg(peer_cfg)))
1511 {
1512 found = config_entry;
1513 break;
1514 }
1515 }
1516 }
1517 if (!found)
1518 {
1519 INIT(found,
1520 .cfg = peer_cfg->get_ref(peer_cfg),
1521 );
1522 array_insert_create(&this->config_checkouts, ARRAY_TAIL, found);
1523 }
1524 found->threads++;
1525 while (found->working)
1526 {
1527 this->config_condvar->wait(this->config_condvar, this->config_mutex);
1528 }
1529 found->working = TRUE;
1530 this->config_mutex->unlock(this->config_mutex);
1531
1532 enumerator = create_table_enumerator(this);
1533 while (enumerator->enumerate(enumerator, &entry, &segment))
1534 {
1535 if (!wait_for_entry(this, entry, segment))
1536 {
1537 continue;
1538 }
1539 if (entry->ike_sa->get_state(entry->ike_sa) == IKE_DELETING ||
1540 entry->ike_sa->get_state(entry->ike_sa) == IKE_REKEYED)
1541 { /* skip IKE_SAs which are not usable, wake other waiting threads */
1542 entry->condvar->signal(entry->condvar);
1543 continue;
1544 }
1545
1546 current_peer = entry->ike_sa->get_peer_cfg(entry->ike_sa);
1547 if (current_peer && current_peer->equals(current_peer, peer_cfg))
1548 {
1549 current_ike = current_peer->get_ike_cfg(current_peer);
1550 if (current_ike->equals(current_ike, peer_cfg->get_ike_cfg(peer_cfg)))
1551 {
1552 entry->checked_out = thread_current();
1553 ike_sa = entry->ike_sa;
1554 DBG2(DBG_MGR, "found existing IKE_SA %u with config '%s'",
1555 ike_sa->get_unique_id(ike_sa),
1556 current_peer->get_name(current_peer));
1557 break;
1558 }
1559 }
1560 /* other threads might be waiting for this entry */
1561 entry->condvar->signal(entry->condvar);
1562 }
1563 enumerator->destroy(enumerator);
1564
1565 if (!ike_sa)
1566 {
1567 ike_sa = create_new(this, peer_cfg->get_ike_version(peer_cfg), TRUE);
1568 ike_sa->set_peer_cfg(ike_sa, peer_cfg);
1569
1570 segment = create_and_put_entry(this, ike_sa, &entry);
1571 entry->checked_out = thread_current();
1572 unlock_single_segment(this, segment);
1573 }
1574 charon->bus->set_sa(charon->bus, ike_sa);
1575
1576 this->config_mutex->lock(this->config_mutex);
1577 found->working = FALSE;
1578 found->threads--;
1579 if (!found->threads)
1580 {
1581 for (i = 0; i < array_count(this->config_checkouts); i++)
1582 {
1583 array_get(this->config_checkouts, i, &config_entry);
1584 if (config_entry == found)
1585 {
1586 array_remove(this->config_checkouts, i, NULL);
1587 found->cfg->destroy(found->cfg);
1588 free(found);
1589 break;
1590 }
1591 }
1592 }
1593 this->config_condvar->signal(this->config_condvar);
1594 this->config_mutex->unlock(this->config_mutex);
1595
1596 out:
1597 if (!ike_sa)
1598 {
1599 DBG2(DBG_MGR, "IKE_SA checkout not successful");
1600 }
1601 return ike_sa;
1602 }
1603
1604 METHOD(ike_sa_manager_t, checkout_by_id, ike_sa_t*,
1605 private_ike_sa_manager_t *this, uint32_t id)
1606 {
1607 enumerator_t *enumerator;
1608 entry_t *entry;
1609 ike_sa_t *ike_sa = NULL;
1610 u_int segment;
1611
1612 DBG2(DBG_MGR, "checkout IKE_SA by unique ID %u", id);
1613
1614 enumerator = create_table_enumerator(this);
1615 while (enumerator->enumerate(enumerator, &entry, &segment))
1616 {
1617 if (wait_for_entry(this, entry, segment))
1618 {
1619 if (entry->ike_sa->get_unique_id(entry->ike_sa) == id)
1620 {
1621 ike_sa = entry->ike_sa;
1622 entry->checked_out = thread_current();
1623 break;
1624 }
1625 /* other threads might be waiting for this entry */
1626 entry->condvar->signal(entry->condvar);
1627 }
1628 }
1629 enumerator->destroy(enumerator);
1630
1631 if (ike_sa)
1632 {
1633 DBG2(DBG_MGR, "IKE_SA %s[%u] successfully checked out",
1634 ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1635 }
1636 else
1637 {
1638 DBG2(DBG_MGR, "IKE_SA checkout not successful");
1639 }
1640 charon->bus->set_sa(charon->bus, ike_sa);
1641 return ike_sa;
1642 }
1643
1644 METHOD(ike_sa_manager_t, checkout_by_name, ike_sa_t*,
1645 private_ike_sa_manager_t *this, char *name, bool child)
1646 {
1647 enumerator_t *enumerator, *children;
1648 entry_t *entry;
1649 ike_sa_t *ike_sa = NULL;
1650 child_sa_t *child_sa;
1651 u_int segment;
1652
1653 DBG2(DBG_MGR, "checkout IKE_SA by%s name '%s'", child ? " child" : "", name);
1654
1655 enumerator = create_table_enumerator(this);
1656 while (enumerator->enumerate(enumerator, &entry, &segment))
1657 {
1658 if (wait_for_entry(this, entry, segment))
1659 {
1660 /* look for a child with such a policy name ... */
1661 if (child)
1662 {
1663 children = entry->ike_sa->create_child_sa_enumerator(entry->ike_sa);
1664 while (children->enumerate(children, (void**)&child_sa))
1665 {
1666 if (streq(child_sa->get_name(child_sa), name))
1667 {
1668 ike_sa = entry->ike_sa;
1669 break;
1670 }
1671 }
1672 children->destroy(children);
1673 }
1674 else /* ... or for a IKE_SA with such a connection name */
1675 {
1676 if (streq(entry->ike_sa->get_name(entry->ike_sa), name))
1677 {
1678 ike_sa = entry->ike_sa;
1679 }
1680 }
1681 /* got one, return */
1682 if (ike_sa)
1683 {
1684 entry->checked_out = thread_current();
1685 DBG2(DBG_MGR, "IKE_SA %s[%u] successfully checked out",
1686 ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1687 break;
1688 }
1689 /* other threads might be waiting for this entry */
1690 entry->condvar->signal(entry->condvar);
1691 }
1692 }
1693 enumerator->destroy(enumerator);
1694
1695 charon->bus->set_sa(charon->bus, ike_sa);
1696
1697 if (!ike_sa)
1698 {
1699 DBG2(DBG_MGR, "IKE_SA checkout not successful");
1700 }
1701 return ike_sa;
1702 }
1703
1704 METHOD(ike_sa_manager_t, new_initiator_spi, bool,
1705 private_ike_sa_manager_t *this, ike_sa_t *ike_sa)
1706 {
1707 ike_sa_state_t state;
1708 ike_sa_id_t *ike_sa_id;
1709 entry_t *entry;
1710 u_int segment;
1711 uint64_t new_spi, spi;
1712
1713 state = ike_sa->get_state(ike_sa);
1714 if (state != IKE_CONNECTING)
1715 {
1716 DBG1(DBG_MGR, "unable to change initiator SPI for IKE_SA in state "
1717 "%N", ike_sa_state_names, state);
1718 return FALSE;
1719 }
1720
1721 ike_sa_id = ike_sa->get_id(ike_sa);
1722 if (!ike_sa_id->is_initiator(ike_sa_id))
1723 {
1724 DBG1(DBG_MGR, "unable to change initiator SPI of IKE_SA as responder");
1725 return FALSE;
1726 }
1727
1728 if (ike_sa != charon->bus->get_sa(charon->bus))
1729 {
1730 DBG1(DBG_MGR, "unable to change initiator SPI of IKE_SA not checked "
1731 "out by current thread");
1732 return FALSE;
1733 }
1734
1735 new_spi = get_spi(this);
1736 if (!new_spi)
1737 {
1738 DBG1(DBG_MGR, "unable to allocate new initiator SPI for IKE_SA");
1739 return FALSE;
1740 }
1741
1742 if (get_entry_by_sa(this, ike_sa_id, ike_sa, &entry, &segment) == SUCCESS)
1743 {
1744 if (entry->driveout_waiting_threads && entry->driveout_new_threads)
1745 { /* it looks like flush() has been called and the SA is being deleted
1746 * anyway, no need for a new SPI */
1747 DBG2(DBG_MGR, "ignored change of initiator SPI during shutdown");
1748 unlock_single_segment(this, segment);
1749 return FALSE;
1750 }
1751 }
1752 else
1753 {
1754 DBG1(DBG_MGR, "unable to change initiator SPI of IKE_SA, not found");
1755 return FALSE;
1756 }
1757
1758 /* the hashtable row and segment are determined by the local SPI as
1759 * initiator, so if we change it the row and segment derived from it might
1760 * change as well. This could be a problem for threads waiting for the
1761 * entry (in particular those enumerating entries to check them out by
1762 * unique ID or name). In order to avoid having to drive them out and thus
1763 * preventing them from checking out the entry (even though the ID or name
1764 * will not change and enumerating it is also fine), we mask the new SPI and
1765 * merge it with the old SPI so the entry ends up in the same row/segment.
1766 * Since SPIs are 64-bit and the number of rows/segments is usually
1767 * relatively low this should not be a problem. */
1768 spi = ike_sa_id->get_initiator_spi(ike_sa_id);
1769 new_spi = (spi & (uint64_t)this->table_mask) |
1770 (new_spi & ~(uint64_t)this->table_mask);
1771
1772 DBG2(DBG_MGR, "change initiator SPI of IKE_SA %s[%u] from %.16"PRIx64" to "
1773 "%.16"PRIx64, ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa),
1774 be64toh(spi), be64toh(new_spi));
1775
1776 ike_sa_id->set_initiator_spi(ike_sa_id, new_spi);
1777 entry->ike_sa_id->replace_values(entry->ike_sa_id, ike_sa_id);
1778
1779 entry->condvar->signal(entry->condvar);
1780 unlock_single_segment(this, segment);
1781 return TRUE;
1782 }
1783
1784 CALLBACK(enumerator_filter_wait, bool,
1785 private_ike_sa_manager_t *this, enumerator_t *orig, va_list args)
1786 {
1787 entry_t *entry;
1788 u_int segment;
1789 ike_sa_t **out;
1790
1791 VA_ARGS_VGET(args, out);
1792
1793 while (orig->enumerate(orig, &entry, &segment))
1794 {
1795 if (wait_for_entry(this, entry, segment))
1796 {
1797 *out = entry->ike_sa;
1798 charon->bus->set_sa(charon->bus, *out);
1799 return TRUE;
1800 }
1801 }
1802 return FALSE;
1803 }
1804
1805 CALLBACK(enumerator_filter_skip, bool,
1806 private_ike_sa_manager_t *this, enumerator_t *orig, va_list args)
1807 {
1808 entry_t *entry;
1809 u_int segment;
1810 ike_sa_t **out;
1811
1812 VA_ARGS_VGET(args, out);
1813
1814 while (orig->enumerate(orig, &entry, &segment))
1815 {
1816 if (!entry->driveout_new_threads &&
1817 !entry->driveout_waiting_threads &&
1818 !entry->checked_out)
1819 {
1820 *out = entry->ike_sa;
1821 charon->bus->set_sa(charon->bus, *out);
1822 return TRUE;
1823 }
1824 }
1825 return FALSE;
1826 }
1827
1828 CALLBACK(reset_sa, void,
1829 void *data)
1830 {
1831 charon->bus->set_sa(charon->bus, NULL);
1832 }
1833
1834 METHOD(ike_sa_manager_t, create_enumerator, enumerator_t*,
1835 private_ike_sa_manager_t* this, bool wait)
1836 {
1837 return enumerator_create_filter(create_table_enumerator(this),
1838 wait ? (void*)enumerator_filter_wait : (void*)enumerator_filter_skip,
1839 this, reset_sa);
1840 }
1841
1842 METHOD(ike_sa_manager_t, checkin, void,
1843 private_ike_sa_manager_t *this, ike_sa_t *ike_sa)
1844 {
1845 /* to check the SA back in, we look for the pointer of the ike_sa
1846 * in all entries.
1847 * The lookup is done by initiator SPI, so even if the SPI has changed (e.g.
1848 * on reception of a IKE_SA_INIT response) the lookup will work but
1849 * updating of the SPI MAY be necessary...
1850 */
1851 entry_t *entry;
1852 ike_sa_id_t *ike_sa_id;
1853 host_t *other;
1854 identification_t *my_id, *other_id;
1855 u_int segment;
1856
1857 ike_sa_id = ike_sa->get_id(ike_sa);
1858 my_id = ike_sa->get_my_id(ike_sa);
1859 other_id = ike_sa->get_other_eap_id(ike_sa);
1860 other = ike_sa->get_other_host(ike_sa);
1861
1862 DBG2(DBG_MGR, "checkin IKE_SA %s[%u]", ike_sa->get_name(ike_sa),
1863 ike_sa->get_unique_id(ike_sa));
1864
1865 /* look for the entry */
1866 if (get_entry_by_sa(this, ike_sa_id, ike_sa, &entry, &segment) == SUCCESS)
1867 {
1868 /* ike_sa_id must be updated */
1869 entry->ike_sa_id->replace_values(entry->ike_sa_id, ike_sa->get_id(ike_sa));
1870 /* signal waiting threads */
1871 entry->checked_out = NULL;
1872 entry->processing = -1;
1873 /* check if this SA is half-open */
1874 if (entry->half_open && ike_sa->get_state(ike_sa) != IKE_CONNECTING)
1875 {
1876 /* not half open anymore */
1877 entry->half_open = FALSE;
1878 remove_half_open(this, entry);
1879 }
1880 else if (entry->half_open && !other->ip_equals(other, entry->other))
1881 {
1882 /* the other host's IP has changed, we must update the hash table */
1883 remove_half_open(this, entry);
1884 DESTROY_IF(entry->other);
1885 entry->other = other->clone(other);
1886 put_half_open(this, entry);
1887 }
1888 else if (!entry->half_open &&
1889 ike_sa->get_state(ike_sa) == IKE_CONNECTING)
1890 {
1891 /* this is a new half-open SA */
1892 entry->half_open = TRUE;
1893 entry->other = other->clone(other);
1894 put_half_open(this, entry);
1895 }
1896 entry->condvar->signal(entry->condvar);
1897 }
1898 else
1899 {
1900 segment = create_and_put_entry(this, ike_sa, &entry);
1901 }
1902 DBG2(DBG_MGR, "checkin of IKE_SA successful");
1903
1904 /* apply identities for duplicate test */
1905 if ((ike_sa->get_state(ike_sa) == IKE_ESTABLISHED ||
1906 ike_sa->get_state(ike_sa) == IKE_PASSIVE) &&
1907 entry->my_id == NULL && entry->other_id == NULL)
1908 {
1909 if (ike_sa->get_version(ike_sa) == IKEV1)
1910 {
1911 /* If authenticated and received INITIAL_CONTACT,
1912 * delete any existing IKE_SAs with that peer. */
1913 if (ike_sa->has_condition(ike_sa, COND_INIT_CONTACT_SEEN))
1914 {
1915 /* We can't hold the segment locked while checking the
1916 * uniqueness as this could lead to deadlocks. We mark the
1917 * entry as checked out while we release the lock so no other
1918 * thread can acquire it. Since it is not yet in the list of
1919 * connected peers that will not cause a deadlock as no other
1920 * caller of check_uniqueness() will try to check out this SA */
1921 entry->checked_out = thread_current();
1922 unlock_single_segment(this, segment);
1923
1924 this->public.check_uniqueness(&this->public, ike_sa, TRUE);
1925 ike_sa->set_condition(ike_sa, COND_INIT_CONTACT_SEEN, FALSE);
1926
1927 /* The entry could have been modified in the mean time, e.g.
1928 * because another SA was added/removed next to it or another
1929 * thread is waiting, but it should still exist, so there is no
1930 * need for a lookup via get_entry_by... */
1931 lock_single_segment(this, segment);
1932 entry->checked_out = NULL;
1933 /* We already signaled waiting threads above, we have to do that
1934 * again after checking the SA out and back in again. */
1935 entry->condvar->signal(entry->condvar);
1936 }
1937 }
1938
1939 entry->my_id = my_id->clone(my_id);
1940 entry->other_id = other_id->clone(other_id);
1941 if (!entry->other)
1942 {
1943 entry->other = other->clone(other);
1944 }
1945 put_connected_peers(this, entry);
1946 }
1947
1948 unlock_single_segment(this, segment);
1949
1950 charon->bus->set_sa(charon->bus, NULL);
1951 }
1952
1953 METHOD(ike_sa_manager_t, checkin_and_destroy, void,
1954 private_ike_sa_manager_t *this, ike_sa_t *ike_sa)
1955 {
1956 /* deletion is a bit complex, we must ensure that no thread is waiting for
1957 * this SA.
1958 * We take this SA from the table, and start signaling while threads
1959 * are in the condvar.
1960 */
1961 entry_t *entry;
1962 ike_sa_id_t *ike_sa_id;
1963 u_int segment;
1964
1965 ike_sa_id = ike_sa->get_id(ike_sa);
1966
1967 DBG2(DBG_MGR, "checkin and destroy IKE_SA %s[%u]", ike_sa->get_name(ike_sa),
1968 ike_sa->get_unique_id(ike_sa));
1969
1970 if (get_entry_by_sa(this, ike_sa_id, ike_sa, &entry, &segment) == SUCCESS)
1971 {
1972 if (entry->driveout_waiting_threads && entry->driveout_new_threads)
1973 { /* it looks like flush() has been called and the SA is being deleted
1974 * anyway, just check it in */
1975 DBG2(DBG_MGR, "ignored checkin and destroy of IKE_SA during shutdown");
1976 entry->checked_out = NULL;
1977 entry->condvar->broadcast(entry->condvar);
1978 unlock_single_segment(this, segment);
1979 return;
1980 }
1981
1982 /* drive out waiting threads, as we are in hurry */
1983 entry->driveout_waiting_threads = TRUE;
1984 /* mark it, so no new threads can get this entry */
1985 entry->driveout_new_threads = TRUE;
1986 /* wait until all workers have done their work */
1987 while (entry->waiting_threads)
1988 {
1989 /* wake up all */
1990 entry->condvar->broadcast(entry->condvar);
1991 /* they will wake us again when their work is done */
1992 entry->condvar->wait(entry->condvar, this->segments[segment].mutex);
1993 }
1994 remove_entry(this, entry);
1995 unlock_single_segment(this, segment);
1996
1997 if (entry->half_open)
1998 {
1999 remove_half_open(this, entry);
2000 }
2001 if (entry->my_id && entry->other_id)
2002 {
2003 remove_connected_peers(this, entry);
2004 }
2005 if (entry->init_hash.ptr)
2006 {
2007 remove_init_hash(this, entry->init_hash);
2008 }
2009
2010 entry_destroy(entry);
2011
2012 DBG2(DBG_MGR, "checkin and destroy of IKE_SA successful");
2013 }
2014 else
2015 {
2016 DBG1(DBG_MGR, "tried to checkin and delete nonexistent IKE_SA");
2017 ike_sa->destroy(ike_sa);
2018 }
2019 charon->bus->set_sa(charon->bus, NULL);
2020 }
2021
2022 /**
2023 * Cleanup function for create_id_enumerator
2024 */
2025 static void id_enumerator_cleanup(linked_list_t *ids)
2026 {
2027 ids->destroy_offset(ids, offsetof(ike_sa_id_t, destroy));
2028 }
2029
2030 METHOD(ike_sa_manager_t, create_id_enumerator, enumerator_t*,
2031 private_ike_sa_manager_t *this, identification_t *me,
2032 identification_t *other, int family)
2033 {
2034 table_item_t *item;
2035 u_int row, segment;
2036 rwlock_t *lock;
2037 linked_list_t *ids = NULL;
2038
2039 row = chunk_hash_inc(other->get_encoding(other),
2040 chunk_hash(me->get_encoding(me))) & this->table_mask;
2041 segment = row & this->segment_mask;
2042
2043 lock = this->connected_peers_segments[segment].lock;
2044 lock->read_lock(lock);
2045 item = this->connected_peers_table[row];
2046 while (item)
2047 {
2048 connected_peers_t *current = item->value;
2049
2050 if (connected_peers_match(current, me, other, family))
2051 {
2052 ids = current->sas->clone_offset(current->sas,
2053 offsetof(ike_sa_id_t, clone));
2054 break;
2055 }
2056 item = item->next;
2057 }
2058 lock->unlock(lock);
2059
2060 if (!ids)
2061 {
2062 return enumerator_create_empty();
2063 }
2064 return enumerator_create_cleaner(ids->create_enumerator(ids),
2065 (void*)id_enumerator_cleanup, ids);
2066 }
2067
2068 /**
2069 * Move all CHILD_SAs and virtual IPs from old to new
2070 */
2071 static void adopt_children_and_vips(ike_sa_t *old, ike_sa_t *new)
2072 {
2073 enumerator_t *enumerator;
2074 child_sa_t *child_sa;
2075 host_t *vip;
2076 int chcount = 0, vipcount = 0;
2077
2078 charon->bus->children_migrate(charon->bus, new->get_id(new),
2079 new->get_unique_id(new));
2080 enumerator = old->create_child_sa_enumerator(old);
2081 while (enumerator->enumerate(enumerator, &child_sa))
2082 {
2083 old->remove_child_sa(old, enumerator);
2084 new->add_child_sa(new, child_sa);
2085 chcount++;
2086 }
2087 enumerator->destroy(enumerator);
2088
2089 new->adopt_child_tasks(new, old);
2090
2091 enumerator = old->create_virtual_ip_enumerator(old, FALSE);
2092 while (enumerator->enumerate(enumerator, &vip))
2093 {
2094 new->add_virtual_ip(new, FALSE, vip);
2095 vipcount++;
2096 }
2097 enumerator->destroy(enumerator);
2098 /* this does not release the addresses, which is good, but it does trigger
2099 * an assign_vips(FALSE) event... */
2100 old->clear_virtual_ips(old, FALSE);
2101 /* ...trigger the analogous event on the new SA */
2102 charon->bus->set_sa(charon->bus, new);
2103 charon->bus->assign_vips(charon->bus, new, TRUE);
2104 charon->bus->children_migrate(charon->bus, NULL, 0);
2105 charon->bus->set_sa(charon->bus, old);
2106
2107 if (chcount || vipcount)
2108 {
2109 DBG1(DBG_IKE, "detected reauth of existing IKE_SA, adopting %d "
2110 "children and %d virtual IPs", chcount, vipcount);
2111 }
2112 }
2113
2114 /**
2115 * Delete an existing IKE_SA due to a unique replace policy
2116 */
2117 static status_t enforce_replace(private_ike_sa_manager_t *this,
2118 ike_sa_t *duplicate, ike_sa_t *new,
2119 identification_t *other, host_t *host)
2120 {
2121 charon->bus->alert(charon->bus, ALERT_UNIQUE_REPLACE);
2122
2123 if (host->equals(host, duplicate->get_other_host(duplicate)))
2124 {
2125 /* looks like a reauthentication attempt */
2126 if (!new->has_condition(new, COND_INIT_CONTACT_SEEN) &&
2127 new->get_version(new) == IKEV1)
2128 {
2129 /* IKEv1 implicitly takes over children, IKEv2 recreates them
2130 * explicitly. */
2131 adopt_children_and_vips(duplicate, new);
2132 }
2133 /* For IKEv1 we have to delay the delete for the old IKE_SA. Some
2134 * peers need to complete the new SA first, otherwise the quick modes
2135 * might get lost. For IKEv2 we do the same, as we want overlapping
2136 * CHILD_SAs to keep connectivity up. */
2137 lib->scheduler->schedule_job(lib->scheduler, (job_t*)
2138 delete_ike_sa_job_create(duplicate->get_id(duplicate), TRUE), 10);
2139 DBG1(DBG_IKE, "schedule delete of duplicate IKE_SA for peer '%Y' due "
2140 "to uniqueness policy and suspected reauthentication", other);
2141 return SUCCESS;
2142 }
2143 DBG1(DBG_IKE, "deleting duplicate IKE_SA for peer '%Y' due to "
2144 "uniqueness policy", other);
2145 return duplicate->delete(duplicate, FALSE);
2146 }
2147
2148 METHOD(ike_sa_manager_t, check_uniqueness, bool,
2149 private_ike_sa_manager_t *this, ike_sa_t *ike_sa, bool force_replace)
2150 {
2151 bool cancel = FALSE;
2152 peer_cfg_t *peer_cfg;
2153 unique_policy_t policy;
2154 enumerator_t *enumerator;
2155 ike_sa_id_t *id = NULL;
2156 identification_t *me, *other;
2157 host_t *other_host;
2158
2159 peer_cfg = ike_sa->get_peer_cfg(ike_sa);
2160 policy = peer_cfg->get_unique_policy(peer_cfg);
2161 if (policy == UNIQUE_NEVER || (policy == UNIQUE_NO && !force_replace))
2162 {
2163 return FALSE;
2164 }
2165 me = ike_sa->get_my_id(ike_sa);
2166 other = ike_sa->get_other_eap_id(ike_sa);
2167 other_host = ike_sa->get_other_host(ike_sa);
2168
2169 enumerator = create_id_enumerator(this, me, other,
2170 other_host->get_family(other_host));
2171 while (enumerator->enumerate(enumerator, &id))
2172 {
2173 status_t status = SUCCESS;
2174 ike_sa_t *duplicate;
2175
2176 duplicate = checkout(this, id);
2177 if (!duplicate)
2178 {
2179 continue;
2180 }
2181 if (force_replace)
2182 {
2183 DBG1(DBG_IKE, "destroying duplicate IKE_SA for peer '%Y', "
2184 "received INITIAL_CONTACT", other);
2185 charon->bus->ike_updown(charon->bus, duplicate, FALSE);
2186 checkin_and_destroy(this, duplicate);
2187 continue;
2188 }
2189 peer_cfg = duplicate->get_peer_cfg(duplicate);
2190 if (peer_cfg && peer_cfg->equals(peer_cfg, ike_sa->get_peer_cfg(ike_sa)))
2191 {
2192 switch (duplicate->get_state(duplicate))
2193 {
2194 case IKE_ESTABLISHED:
2195 case IKE_REKEYING:
2196 switch (policy)
2197 {
2198 case UNIQUE_REPLACE:
2199 status = enforce_replace(this, duplicate, ike_sa,
2200 other, other_host);
2201 break;
2202 case UNIQUE_KEEP:
2203 /* potential reauthentication? */
2204 if (!other_host->equals(other_host,
2205 duplicate->get_other_host(duplicate)))
2206 {
2207 cancel = TRUE;
2208 /* we keep the first IKE_SA and delete all
2209 * other duplicates that might exist */
2210 policy = UNIQUE_REPLACE;
2211 }
2212 break;
2213 default:
2214 break;
2215 }
2216 break;
2217 default:
2218 break;
2219 }
2220 }
2221 if (status == DESTROY_ME)
2222 {
2223 checkin_and_destroy(this, duplicate);
2224 }
2225 else
2226 {
2227 checkin(this, duplicate);
2228 }
2229 }
2230 enumerator->destroy(enumerator);
2231 /* reset thread's current IKE_SA after checkin */
2232 charon->bus->set_sa(charon->bus, ike_sa);
2233 return cancel;
2234 }
2235
2236 METHOD(ike_sa_manager_t, has_contact, bool,
2237 private_ike_sa_manager_t *this, identification_t *me,
2238 identification_t *other, int family)
2239 {
2240 table_item_t *item;
2241 u_int row, segment;
2242 rwlock_t *lock;
2243 bool found = FALSE;
2244
2245 row = chunk_hash_inc(other->get_encoding(other),
2246 chunk_hash(me->get_encoding(me))) & this->table_mask;
2247 segment = row & this->segment_mask;
2248 lock = this->connected_peers_segments[segment].lock;
2249 lock->read_lock(lock);
2250 item = this->connected_peers_table[row];
2251 while (item)
2252 {
2253 if (connected_peers_match(item->value, me, other, family))
2254 {
2255 found = TRUE;
2256 break;
2257 }
2258 item = item->next;
2259 }
2260 lock->unlock(lock);
2261
2262 return found;
2263 }
2264
2265 METHOD(ike_sa_manager_t, get_count, u_int,
2266 private_ike_sa_manager_t *this)
2267 {
2268 return (u_int)ref_cur(&this->total_sa_count);
2269 }
2270
2271 METHOD(ike_sa_manager_t, get_half_open_count, u_int,
2272 private_ike_sa_manager_t *this, host_t *ip, bool responder_only)
2273 {
2274 table_item_t *item;
2275 u_int row, segment;
2276 rwlock_t *lock;
2277 chunk_t addr;
2278 u_int count = 0;
2279
2280 if (ip)
2281 {
2282 addr = ip->get_address(ip);
2283 row = chunk_hash(addr) & this->table_mask;
2284 segment = row & this->segment_mask;
2285 lock = this->half_open_segments[segment].lock;
2286 lock->read_lock(lock);
2287 item = this->half_open_table[row];
2288 while (item)
2289 {
2290 half_open_t *half_open = item->value;
2291
2292 if (chunk_equals(addr, half_open->other))
2293 {
2294 count = responder_only ? half_open->count_responder
2295 : half_open->count;
2296 break;
2297 }
2298 item = item->next;
2299 }
2300 lock->unlock(lock);
2301 }
2302 else
2303 {
2304 count = responder_only ? (u_int)ref_cur(&this->half_open_count_responder)
2305 : (u_int)ref_cur(&this->half_open_count);
2306 }
2307 return count;
2308 }
2309
2310 METHOD(ike_sa_manager_t, set_spi_cb, void,
2311 private_ike_sa_manager_t *this, spi_cb_t callback, void *data)
2312 {
2313 this->spi_lock->write_lock(this->spi_lock);
2314 this->spi_cb.cb = callback;
2315 this->spi_cb.data = data;
2316 this->spi_lock->unlock(this->spi_lock);
2317 }
2318
2319 /**
2320 * Destroy all entries
2321 */
2322 static void destroy_all_entries(private_ike_sa_manager_t *this)
2323 {
2324 enumerator_t *enumerator;
2325 entry_t *entry;
2326 u_int segment;
2327
2328 enumerator = create_table_enumerator(this);
2329 while (enumerator->enumerate(enumerator, &entry, &segment))
2330 {
2331 charon->bus->set_sa(charon->bus, entry->ike_sa);
2332 if (entry->half_open)
2333 {
2334 remove_half_open(this, entry);
2335 }
2336 if (entry->my_id && entry->other_id)
2337 {
2338 remove_connected_peers(this, entry);
2339 }
2340 if (entry->init_hash.ptr)
2341 {
2342 remove_init_hash(this, entry->init_hash);
2343 }
2344 remove_entry_at((private_enumerator_t*)enumerator);
2345 entry_destroy(entry);
2346 }
2347 enumerator->destroy(enumerator);
2348 charon->bus->set_sa(charon->bus, NULL);
2349 }
2350
2351 METHOD(ike_sa_manager_t, flush, void,
2352 private_ike_sa_manager_t *this)
2353 {
2354 enumerator_t *enumerator;
2355 entry_t *entry;
2356 u_int segment;
2357
2358 lock_all_segments(this);
2359 DBG2(DBG_MGR, "going to destroy IKE_SA manager and all managed IKE_SA's");
2360 /* Step 1: drive out all waiting threads */
2361 DBG2(DBG_MGR, "set driveout flags for all stored IKE_SA's");
2362 enumerator = create_table_enumerator(this);
2363 while (enumerator->enumerate(enumerator, &entry, &segment))
2364 {
2365 /* do not accept new threads, drive out waiting threads */
2366 entry->driveout_new_threads = TRUE;
2367 entry->driveout_waiting_threads = TRUE;
2368 }
2369 enumerator->destroy(enumerator);
2370 DBG2(DBG_MGR, "wait for all threads to leave IKE_SA's");
2371 /* Step 2: wait until all are gone */
2372 enumerator = create_table_enumerator(this);
2373 while (enumerator->enumerate(enumerator, &entry, &segment))
2374 {
2375 while (entry->waiting_threads || entry->checked_out)
2376 {
2377 /* wake up all */
2378 entry->condvar->broadcast(entry->condvar);
2379 /* go sleeping until they are gone */
2380 entry->condvar->wait(entry->condvar, this->segments[segment].mutex);
2381 }
2382 }
2383 enumerator->destroy(enumerator);
2384 DBG2(DBG_MGR, "delete all IKE_SA's");
2385 /* Step 3: initiate deletion of all IKE_SAs */
2386 enumerator = create_table_enumerator(this);
2387 while (enumerator->enumerate(enumerator, &entry, &segment))
2388 {
2389 charon->bus->set_sa(charon->bus, entry->ike_sa);
2390 entry->ike_sa->delete(entry->ike_sa, TRUE);
2391 }
2392 enumerator->destroy(enumerator);
2393
2394 DBG2(DBG_MGR, "destroy all entries");
2395 /* Step 4: destroy all entries */
2396 destroy_all_entries(this);
2397 unlock_all_segments(this);
2398
2399 this->spi_lock->write_lock(this->spi_lock);
2400 DESTROY_IF(this->rng);
2401 this->rng = NULL;
2402 this->spi_cb.cb = NULL;
2403 this->spi_cb.data = NULL;
2404 this->spi_lock->unlock(this->spi_lock);
2405 }
2406
2407 METHOD(ike_sa_manager_t, destroy, void,
2408 private_ike_sa_manager_t *this)
2409 {
2410 u_int i;
2411
2412 /* in case new SAs were checked in after flush() was called */
2413 lock_all_segments(this);
2414 destroy_all_entries(this);
2415 unlock_all_segments(this);
2416
2417 free(this->ike_sa_table);
2418 free(this->half_open_table);
2419 free(this->connected_peers_table);
2420 free(this->init_hashes_table);
2421 for (i = 0; i < this->segment_count; i++)
2422 {
2423 this->segments[i].mutex->destroy(this->segments[i].mutex);
2424 this->half_open_segments[i].lock->destroy(this->half_open_segments[i].lock);
2425 this->connected_peers_segments[i].lock->destroy(this->connected_peers_segments[i].lock);
2426 this->init_hashes_segments[i].mutex->destroy(this->init_hashes_segments[i].mutex);
2427 }
2428 free(this->segments);
2429 free(this->half_open_segments);
2430 free(this->connected_peers_segments);
2431 free(this->init_hashes_segments);
2432
2433 array_destroy(this->config_checkouts);
2434 this->config_mutex->destroy(this->config_mutex);
2435 this->config_condvar->destroy(this->config_condvar);
2436
2437 this->spi_lock->destroy(this->spi_lock);
2438 free(this);
2439 }
2440
2441 /**
2442 * This function returns the next-highest power of two for the given number.
2443 * The algorithm works by setting all bits on the right-hand side of the most
2444 * significant 1 to 1 and then increments the whole number so it rolls over
2445 * to the nearest power of two. Note: returns 0 for n == 0
2446 */
2447 static u_int get_nearest_powerof2(u_int n)
2448 {
2449 u_int i;
2450
2451 --n;
2452 for (i = 1; i < sizeof(u_int) * 8; i <<= 1)
2453 {
2454 n |= n >> i;
2455 }
2456 return ++n;
2457 }
2458
2459 /*
2460 * Described in header.
2461 */
2462 ike_sa_manager_t *ike_sa_manager_create()
2463 {
2464 private_ike_sa_manager_t *this;
2465 char *spi_val;
2466 u_int i;
2467
2468 INIT(this,
2469 .public = {
2470 .create_new = _create_new,
2471 .checkout = _checkout,
2472 .checkout_by_message = _checkout_by_message,
2473 .checkout_by_config = _checkout_by_config,
2474 .checkout_by_id = _checkout_by_id,
2475 .checkout_by_name = _checkout_by_name,
2476 .new_initiator_spi = _new_initiator_spi,
2477 .check_uniqueness = _check_uniqueness,
2478 .has_contact = _has_contact,
2479 .create_enumerator = _create_enumerator,
2480 .create_id_enumerator = _create_id_enumerator,
2481 .checkin = _checkin,
2482 .checkin_and_destroy = _checkin_and_destroy,
2483 .get_count = _get_count,
2484 .get_half_open_count = _get_half_open_count,
2485 .flush = _flush,
2486 .set_spi_cb = _set_spi_cb,
2487 .destroy = _destroy,
2488 },
2489 );
2490
2491 this->rng = lib->crypto->create_rng(lib->crypto, RNG_WEAK);
2492 if (this->rng == NULL)
2493 {
2494 DBG1(DBG_MGR, "manager initialization failed, no RNG supported");
2495 free(this);
2496 return NULL;
2497 }
2498 this->spi_lock = rwlock_create(RWLOCK_TYPE_DEFAULT);
2499 spi_val = lib->settings->get_str(lib->settings, "%s.spi_mask", NULL,
2500 lib->ns);
2501 this->spi_mask = settings_value_as_uint64(spi_val, 0);
2502 spi_val = lib->settings->get_str(lib->settings, "%s.spi_label", NULL,
2503 lib->ns);
2504 this->spi_label = settings_value_as_uint64(spi_val, 0);
2505 if (this->spi_mask || this->spi_label)
2506 {
2507 DBG1(DBG_IKE, "using SPI label 0x%.16"PRIx64" and mask 0x%.16"PRIx64,
2508 this->spi_label, this->spi_mask);
2509 /* the allocated SPI is assumed to be in network order */
2510 this->spi_mask = htobe64(this->spi_mask);
2511 this->spi_label = htobe64(this->spi_label);
2512 }
2513
2514 this->ikesa_limit = lib->settings->get_int(lib->settings,
2515 "%s.ikesa_limit", 0, lib->ns);
2516
2517 this->table_size = get_nearest_powerof2(lib->settings->get_int(
2518 lib->settings, "%s.ikesa_table_size",
2519 DEFAULT_HASHTABLE_SIZE, lib->ns));
2520 this->table_size = max(1, min(this->table_size, MAX_HASHTABLE_SIZE));
2521 this->table_mask = this->table_size - 1;
2522
2523 this->segment_count = get_nearest_powerof2(lib->settings->get_int(
2524 lib->settings, "%s.ikesa_table_segments",
2525 DEFAULT_SEGMENT_COUNT, lib->ns));
2526 this->segment_count = max(1, min(this->segment_count, this->table_size));
2527 this->segment_mask = this->segment_count - 1;
2528
2529 this->ike_sa_table = calloc(this->table_size, sizeof(table_item_t*));
2530 this->segments = (segment_t*)calloc(this->segment_count, sizeof(segment_t));
2531 for (i = 0; i < this->segment_count; i++)
2532 {
2533 this->segments[i].mutex = mutex_create(MUTEX_TYPE_RECURSIVE);
2534 }
2535
2536 /* we use the same table parameters for the table to track half-open SAs */
2537 this->half_open_table = calloc(this->table_size, sizeof(table_item_t*));
2538 this->half_open_segments = calloc(this->segment_count, sizeof(shareable_segment_t));
2539 for (i = 0; i < this->segment_count; i++)
2540 {
2541 this->half_open_segments[i].lock = rwlock_create(RWLOCK_TYPE_DEFAULT);
2542 }
2543
2544 /* also for the hash table used for duplicate tests */
2545 this->connected_peers_table = calloc(this->table_size, sizeof(table_item_t*));
2546 this->connected_peers_segments = calloc(this->segment_count, sizeof(shareable_segment_t));
2547 for (i = 0; i < this->segment_count; i++)
2548 {
2549 this->connected_peers_segments[i].lock = rwlock_create(RWLOCK_TYPE_DEFAULT);
2550 }
2551
2552 /* and again for the table of hashes of seen initial IKE messages */
2553 this->init_hashes_table = calloc(this->table_size, sizeof(table_item_t*));
2554 this->init_hashes_segments = calloc(this->segment_count, sizeof(segment_t));
2555 for (i = 0; i < this->segment_count; i++)
2556 {
2557 this->init_hashes_segments[i].mutex = mutex_create(MUTEX_TYPE_RECURSIVE);
2558 }
2559
2560 this->config_mutex = mutex_create(MUTEX_TYPE_DEFAULT);
2561 this->config_condvar = condvar_create(CONDVAR_TYPE_DEFAULT);
2562
2563 this->reuse_ikesa = lib->settings->get_bool(lib->settings,
2564 "%s.reuse_ikesa", TRUE, lib->ns);
2565 return &this->public;
2566 }