Merge branch 'android-fixes'
[strongswan.git] / src / libcharon / sa / ike_sa_manager.c
1 /*
2 * Copyright (C) 2005-2011 Martin Willi
3 * Copyright (C) 2011 revosec AG
4 *
5 * Copyright (C) 2008-2018 Tobias Brunner
6 * Copyright (C) 2005 Jan Hutter
7 * HSR Hochschule fuer Technik Rapperswil
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version. See <http://www.fsf.org/copyleft/gpl.txt>.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
16 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 * for more details.
18 */
19
20 #include <string.h>
21 #include <inttypes.h>
22
23 #include "ike_sa_manager.h"
24
25 #include <daemon.h>
26 #include <sa/ike_sa_id.h>
27 #include <bus/bus.h>
28 #include <threading/thread.h>
29 #include <threading/condvar.h>
30 #include <threading/mutex.h>
31 #include <threading/rwlock.h>
32 #include <collections/linked_list.h>
33 #include <crypto/hashers/hasher.h>
34 #include <processing/jobs/delete_ike_sa_job.h>
35
36 /* the default size of the hash table (MUST be a power of 2) */
37 #define DEFAULT_HASHTABLE_SIZE 1
38
39 /* the maximum size of the hash table (MUST be a power of 2) */
40 #define MAX_HASHTABLE_SIZE (1 << 30)
41
42 /* the default number of segments (MUST be a power of 2) */
43 #define DEFAULT_SEGMENT_COUNT 1
44
45 typedef struct entry_t entry_t;
46
47 /**
48 * An entry in the linked list, contains IKE_SA, locking and lookup data.
49 */
50 struct entry_t {
51
52 /**
53 * Number of threads waiting for this ike_sa_t object.
54 */
55 int waiting_threads;
56
57 /**
58 * Condvar where threads can wait until ike_sa_t object is free for use again.
59 */
60 condvar_t *condvar;
61
62 /**
63 * Thread by which this IKE_SA is currently checked out, if any
64 */
65 thread_t *checked_out;
66
67 /**
68 * Does this SA drives out new threads?
69 */
70 bool driveout_new_threads;
71
72 /**
73 * Does this SA drives out waiting threads?
74 */
75 bool driveout_waiting_threads;
76
77 /**
78 * Identification of an IKE_SA (SPIs).
79 */
80 ike_sa_id_t *ike_sa_id;
81
82 /**
83 * The contained ike_sa_t object.
84 */
85 ike_sa_t *ike_sa;
86
87 /**
88 * hash of the IKE_SA_INIT message, used to detect retransmissions
89 */
90 chunk_t init_hash;
91
92 /**
93 * remote host address, required for DoS detection and duplicate
94 * checking (host with same my_id and other_id is *not* considered
95 * a duplicate if the address family differs)
96 */
97 host_t *other;
98
99 /**
100 * As responder: Is this SA half-open?
101 */
102 bool half_open;
103
104 /**
105 * own identity, required for duplicate checking
106 */
107 identification_t *my_id;
108
109 /**
110 * remote identity, required for duplicate checking
111 */
112 identification_t *other_id;
113
114 /**
115 * message ID or hash of currently processing message, -1 if none
116 */
117 uint32_t processing;
118 };
119
120 /**
121 * Implementation of entry_t.destroy.
122 */
123 static status_t entry_destroy(entry_t *this)
124 {
125 /* also destroy IKE SA */
126 this->ike_sa->destroy(this->ike_sa);
127 this->ike_sa_id->destroy(this->ike_sa_id);
128 chunk_free(&this->init_hash);
129 DESTROY_IF(this->other);
130 DESTROY_IF(this->my_id);
131 DESTROY_IF(this->other_id);
132 this->condvar->destroy(this->condvar);
133 free(this);
134 return SUCCESS;
135 }
136
137 /**
138 * Creates a new entry for the ike_sa_t list.
139 */
140 static entry_t *entry_create()
141 {
142 entry_t *this;
143
144 INIT(this,
145 .condvar = condvar_create(CONDVAR_TYPE_DEFAULT),
146 .processing = -1,
147 );
148
149 return this;
150 }
151
152 /**
153 * Function that matches entry_t objects by ike_sa_id_t.
154 */
155 static bool entry_match_by_id(entry_t *entry, void *arg)
156 {
157 ike_sa_id_t *id = arg;
158
159 if (id->equals(id, entry->ike_sa_id))
160 {
161 return TRUE;
162 }
163 if ((id->get_responder_spi(id) == 0 ||
164 entry->ike_sa_id->get_responder_spi(entry->ike_sa_id) == 0) &&
165 (id->get_ike_version(id) == IKEV1_MAJOR_VERSION ||
166 id->is_initiator(id) == entry->ike_sa_id->is_initiator(entry->ike_sa_id)) &&
167 id->get_initiator_spi(id) == entry->ike_sa_id->get_initiator_spi(entry->ike_sa_id))
168 {
169 /* this is TRUE for IKE_SAs that we initiated but have not yet received a response */
170 return TRUE;
171 }
172 return FALSE;
173 }
174
175 /**
176 * Function that matches entry_t objects by ike_sa_t pointers.
177 */
178 static bool entry_match_by_sa(entry_t *entry, void *ike_sa)
179 {
180 return entry->ike_sa == ike_sa;
181 }
182
183 /**
184 * Hash function for ike_sa_id_t objects.
185 */
186 static u_int ike_sa_id_hash(ike_sa_id_t *ike_sa_id)
187 {
188 /* IKEv2 does not mandate random SPIs (RFC 5996, 2.6), they just have to be
189 * locally unique, so we use our randomly allocated SPI whether we are
190 * initiator or responder to ensure a good distribution. The latter is not
191 * possible for IKEv1 as we don't know whether we are original initiator or
192 * not (based on the IKE header). But as RFC 2408, section 2.5.3 proposes
193 * SPIs (Cookies) to be allocated near random (we allocate them randomly
194 * anyway) it seems safe to always use the initiator SPI. */
195 if (ike_sa_id->get_ike_version(ike_sa_id) == IKEV1_MAJOR_VERSION ||
196 ike_sa_id->is_initiator(ike_sa_id))
197 {
198 return ike_sa_id->get_initiator_spi(ike_sa_id);
199 }
200 return ike_sa_id->get_responder_spi(ike_sa_id);
201 }
202
203 typedef struct half_open_t half_open_t;
204
205 /**
206 * Struct to manage half-open IKE_SAs per peer.
207 */
208 struct half_open_t {
209 /** chunk of remote host address */
210 chunk_t other;
211
212 /** the number of half-open IKE_SAs with that host */
213 u_int count;
214
215 /** the number of half-open IKE_SAs we responded to with that host */
216 u_int count_responder;
217 };
218
219 /**
220 * Destroys a half_open_t object.
221 */
222 static void half_open_destroy(half_open_t *this)
223 {
224 chunk_free(&this->other);
225 free(this);
226 }
227
228 typedef struct connected_peers_t connected_peers_t;
229
230 struct connected_peers_t {
231 /** own identity */
232 identification_t *my_id;
233
234 /** remote identity */
235 identification_t *other_id;
236
237 /** ip address family of peer */
238 int family;
239
240 /** list of ike_sa_id_t objects of IKE_SAs between the two identities */
241 linked_list_t *sas;
242 };
243
244 static void connected_peers_destroy(connected_peers_t *this)
245 {
246 this->my_id->destroy(this->my_id);
247 this->other_id->destroy(this->other_id);
248 this->sas->destroy(this->sas);
249 free(this);
250 }
251
252 /**
253 * Function that matches connected_peers_t objects by the given ids.
254 */
255 static inline bool connected_peers_match(connected_peers_t *connected_peers,
256 identification_t *my_id, identification_t *other_id,
257 int family)
258 {
259 return my_id->equals(my_id, connected_peers->my_id) &&
260 other_id->equals(other_id, connected_peers->other_id) &&
261 (!family || family == connected_peers->family);
262 }
263
264 typedef struct init_hash_t init_hash_t;
265
266 struct init_hash_t {
267 /** hash of IKE_SA_INIT or initial phase1 message (data is not cloned) */
268 chunk_t hash;
269
270 /** our SPI allocated for the IKE_SA based on this message */
271 uint64_t our_spi;
272 };
273
274 typedef struct segment_t segment_t;
275
276 /**
277 * Struct to manage segments of the hash table.
278 */
279 struct segment_t {
280 /** mutex to access a segment exclusively */
281 mutex_t *mutex;
282 };
283
284 typedef struct shareable_segment_t shareable_segment_t;
285
286 /**
287 * Struct to manage segments of the "half-open" and "connected peers" hash tables.
288 */
289 struct shareable_segment_t {
290 /** rwlock to access a segment non-/exclusively */
291 rwlock_t *lock;
292
293 /** the number of entries in this segment - in case of the "half-open table"
294 * it's the sum of all half_open_t.count in a segment. */
295 u_int count;
296 };
297
298 typedef struct table_item_t table_item_t;
299
300 /**
301 * Instead of using linked_list_t for each bucket we store the data in our own
302 * list to save memory.
303 */
304 struct table_item_t {
305 /** data of this item */
306 void *value;
307
308 /** next item in the overflow list */
309 table_item_t *next;
310 };
311
312 typedef struct private_ike_sa_manager_t private_ike_sa_manager_t;
313
314 /**
315 * Additional private members of ike_sa_manager_t.
316 */
317 struct private_ike_sa_manager_t {
318 /**
319 * Public interface of ike_sa_manager_t.
320 */
321 ike_sa_manager_t public;
322
323 /**
324 * Hash table with entries for the ike_sa_t objects.
325 */
326 table_item_t **ike_sa_table;
327
328 /**
329 * The size of the hash table.
330 */
331 u_int table_size;
332
333 /**
334 * Mask to map the hashes to table rows.
335 */
336 u_int table_mask;
337
338 /**
339 * Segments of the hash table.
340 */
341 segment_t *segments;
342
343 /**
344 * The number of segments.
345 */
346 u_int segment_count;
347
348 /**
349 * Mask to map a table row to a segment.
350 */
351 u_int segment_mask;
352
353 /**
354 * Hash table with half_open_t objects.
355 */
356 table_item_t **half_open_table;
357
358 /**
359 * Segments of the "half-open" hash table.
360 */
361 shareable_segment_t *half_open_segments;
362
363 /**
364 * Total number of half-open IKE_SAs.
365 */
366 refcount_t half_open_count;
367
368 /**
369 * Total number of half-open IKE_SAs as responder.
370 */
371 refcount_t half_open_count_responder;
372
373 /**
374 * Total number of IKE_SAs registered with IKE_SA manager.
375 */
376 refcount_t total_sa_count;
377
378 /**
379 * Hash table with connected_peers_t objects.
380 */
381 table_item_t **connected_peers_table;
382
383 /**
384 * Segments of the "connected peers" hash table.
385 */
386 shareable_segment_t *connected_peers_segments;
387
388 /**
389 * Hash table with init_hash_t objects.
390 */
391 table_item_t **init_hashes_table;
392
393 /**
394 * Segments of the "hashes" hash table.
395 */
396 segment_t *init_hashes_segments;
397
398 /**
399 * RNG to get random SPIs for our side
400 */
401 rng_t *rng;
402
403 /**
404 * Registered callback for IKE SPIs
405 */
406 struct {
407 spi_cb_t cb;
408 void *data;
409 } spi_cb;
410
411 /**
412 * Lock to access the RNG instance and the callback
413 */
414 rwlock_t *spi_lock;
415
416 /**
417 * Mask applied to local SPIs before mixing in the label
418 */
419 uint64_t spi_mask;
420
421 /**
422 * Label applied to local SPIs
423 */
424 uint64_t spi_label;
425
426 /**
427 * reuse existing IKE_SAs in checkout_by_config
428 */
429 bool reuse_ikesa;
430
431 /**
432 * Configured IKE_SA limit, if any
433 */
434 u_int ikesa_limit;
435 };
436
437 /**
438 * Acquire a lock to access the segment of the table row with the given index.
439 * It also works with the segment index directly.
440 */
441 static inline void lock_single_segment(private_ike_sa_manager_t *this,
442 u_int index)
443 {
444 mutex_t *lock = this->segments[index & this->segment_mask].mutex;
445 lock->lock(lock);
446 }
447
448 /**
449 * Release the lock required to access the segment of the table row with the given index.
450 * It also works with the segment index directly.
451 */
452 static inline void unlock_single_segment(private_ike_sa_manager_t *this,
453 u_int index)
454 {
455 mutex_t *lock = this->segments[index & this->segment_mask].mutex;
456 lock->unlock(lock);
457 }
458
459 /**
460 * Lock all segments
461 */
462 static void lock_all_segments(private_ike_sa_manager_t *this)
463 {
464 u_int i;
465
466 for (i = 0; i < this->segment_count; i++)
467 {
468 this->segments[i].mutex->lock(this->segments[i].mutex);
469 }
470 }
471
472 /**
473 * Unlock all segments
474 */
475 static void unlock_all_segments(private_ike_sa_manager_t *this)
476 {
477 u_int i;
478
479 for (i = 0; i < this->segment_count; i++)
480 {
481 this->segments[i].mutex->unlock(this->segments[i].mutex);
482 }
483 }
484
485 typedef struct private_enumerator_t private_enumerator_t;
486
487 /**
488 * hash table enumerator implementation
489 */
490 struct private_enumerator_t {
491
492 /**
493 * implements enumerator interface
494 */
495 enumerator_t enumerator;
496
497 /**
498 * associated ike_sa_manager_t
499 */
500 private_ike_sa_manager_t *manager;
501
502 /**
503 * current segment index
504 */
505 u_int segment;
506
507 /**
508 * currently enumerating entry
509 */
510 entry_t *entry;
511
512 /**
513 * current table row index
514 */
515 u_int row;
516
517 /**
518 * current table item
519 */
520 table_item_t *current;
521
522 /**
523 * previous table item
524 */
525 table_item_t *prev;
526 };
527
528 METHOD(enumerator_t, enumerate, bool,
529 private_enumerator_t *this, va_list args)
530 {
531 entry_t **entry;
532 u_int *segment;
533
534 VA_ARGS_VGET(args, entry, segment);
535
536 if (this->entry)
537 {
538 this->entry->condvar->signal(this->entry->condvar);
539 this->entry = NULL;
540 }
541 while (this->segment < this->manager->segment_count)
542 {
543 while (this->row < this->manager->table_size)
544 {
545 this->prev = this->current;
546 if (this->current)
547 {
548 this->current = this->current->next;
549 }
550 else
551 {
552 lock_single_segment(this->manager, this->segment);
553 this->current = this->manager->ike_sa_table[this->row];
554 }
555 if (this->current)
556 {
557 *entry = this->entry = this->current->value;
558 *segment = this->segment;
559 return TRUE;
560 }
561 unlock_single_segment(this->manager, this->segment);
562 this->row += this->manager->segment_count;
563 }
564 this->segment++;
565 this->row = this->segment;
566 }
567 return FALSE;
568 }
569
570 METHOD(enumerator_t, enumerator_destroy, void,
571 private_enumerator_t *this)
572 {
573 if (this->entry)
574 {
575 this->entry->condvar->signal(this->entry->condvar);
576 }
577 if (this->current)
578 {
579 unlock_single_segment(this->manager, this->segment);
580 }
581 free(this);
582 }
583
584 /**
585 * Creates an enumerator to enumerate the entries in the hash table.
586 */
587 static enumerator_t* create_table_enumerator(private_ike_sa_manager_t *this)
588 {
589 private_enumerator_t *enumerator;
590
591 INIT(enumerator,
592 .enumerator = {
593 .enumerate = enumerator_enumerate_default,
594 .venumerate = _enumerate,
595 .destroy = _enumerator_destroy,
596 },
597 .manager = this,
598 );
599 return &enumerator->enumerator;
600 }
601
602 /**
603 * Put an entry into the hash table.
604 * Note: The caller has to unlock the returned segment.
605 */
606 static u_int put_entry(private_ike_sa_manager_t *this, entry_t *entry)
607 {
608 table_item_t *current, *item;
609 u_int row, segment;
610
611 INIT(item,
612 .value = entry,
613 );
614
615 row = ike_sa_id_hash(entry->ike_sa_id) & this->table_mask;
616 segment = row & this->segment_mask;
617
618 lock_single_segment(this, segment);
619 current = this->ike_sa_table[row];
620 if (current)
621 { /* insert at the front of current bucket */
622 item->next = current;
623 }
624 this->ike_sa_table[row] = item;
625 ref_get(&this->total_sa_count);
626 return segment;
627 }
628
629 /**
630 * Remove an entry from the hash table.
631 * Note: The caller MUST have a lock on the segment of this entry.
632 */
633 static void remove_entry(private_ike_sa_manager_t *this, entry_t *entry)
634 {
635 table_item_t *item, *prev = NULL;
636 u_int row;
637
638 row = ike_sa_id_hash(entry->ike_sa_id) & this->table_mask;
639 item = this->ike_sa_table[row];
640 while (item)
641 {
642 if (item->value == entry)
643 {
644 if (prev)
645 {
646 prev->next = item->next;
647 }
648 else
649 {
650 this->ike_sa_table[row] = item->next;
651 }
652 ignore_result(ref_put(&this->total_sa_count));
653 free(item);
654 break;
655 }
656 prev = item;
657 item = item->next;
658 }
659 }
660
661 /**
662 * Remove the entry at the current enumerator position.
663 */
664 static void remove_entry_at(private_enumerator_t *this)
665 {
666 this->entry = NULL;
667 if (this->current)
668 {
669 table_item_t *current = this->current;
670
671 ignore_result(ref_put(&this->manager->total_sa_count));
672 this->current = this->prev;
673
674 if (this->prev)
675 {
676 this->prev->next = current->next;
677 }
678 else
679 {
680 this->manager->ike_sa_table[this->row] = current->next;
681 unlock_single_segment(this->manager, this->segment);
682 }
683 free(current);
684 }
685 }
686
687 /**
688 * Find an entry using the provided match function to compare the entries for
689 * equality.
690 */
691 static status_t get_entry_by_match_function(private_ike_sa_manager_t *this,
692 ike_sa_id_t *ike_sa_id, entry_t **entry, u_int *segment,
693 bool (*match)(entry_t*,void*), void *param)
694 {
695 table_item_t *item;
696 u_int row, seg;
697
698 row = ike_sa_id_hash(ike_sa_id) & this->table_mask;
699 seg = row & this->segment_mask;
700
701 lock_single_segment(this, seg);
702 item = this->ike_sa_table[row];
703 while (item)
704 {
705 if (match(item->value, param))
706 {
707 *entry = item->value;
708 *segment = seg;
709 /* the locked segment has to be unlocked by the caller */
710 return SUCCESS;
711 }
712 item = item->next;
713 }
714 unlock_single_segment(this, seg);
715 return NOT_FOUND;
716 }
717
718 /**
719 * Find an entry by ike_sa_id_t.
720 * Note: On SUCCESS, the caller has to unlock the segment.
721 */
722 static status_t get_entry_by_id(private_ike_sa_manager_t *this,
723 ike_sa_id_t *ike_sa_id, entry_t **entry, u_int *segment)
724 {
725 return get_entry_by_match_function(this, ike_sa_id, entry, segment,
726 entry_match_by_id, ike_sa_id);
727 }
728
729 /**
730 * Find an entry by IKE_SA pointer.
731 * Note: On SUCCESS, the caller has to unlock the segment.
732 */
733 static status_t get_entry_by_sa(private_ike_sa_manager_t *this,
734 ike_sa_id_t *ike_sa_id, ike_sa_t *ike_sa, entry_t **entry, u_int *segment)
735 {
736 return get_entry_by_match_function(this, ike_sa_id, entry, segment,
737 entry_match_by_sa, ike_sa);
738 }
739
740 /**
741 * Wait until no other thread is using an IKE_SA, return FALSE if entry not
742 * acquirable.
743 */
744 static bool wait_for_entry(private_ike_sa_manager_t *this, entry_t *entry,
745 u_int segment)
746 {
747 if (entry->driveout_new_threads)
748 {
749 /* we are not allowed to get this */
750 return FALSE;
751 }
752 while (entry->checked_out && !entry->driveout_waiting_threads)
753 {
754 /* so wait until we can get it for us.
755 * we register us as waiting. */
756 entry->waiting_threads++;
757 entry->condvar->wait(entry->condvar, this->segments[segment].mutex);
758 entry->waiting_threads--;
759 }
760 /* hm, a deletion request forbids us to get this SA, get next one */
761 if (entry->driveout_waiting_threads)
762 {
763 /* we must signal here, others may be waiting on it, too */
764 entry->condvar->signal(entry->condvar);
765 return FALSE;
766 }
767 return TRUE;
768 }
769
770 /**
771 * Put a half-open SA into the hash table.
772 */
773 static void put_half_open(private_ike_sa_manager_t *this, entry_t *entry)
774 {
775 table_item_t *item;
776 u_int row, segment;
777 rwlock_t *lock;
778 ike_sa_id_t *ike_id;
779 half_open_t *half_open;
780 chunk_t addr;
781
782 ike_id = entry->ike_sa_id;
783 addr = entry->other->get_address(entry->other);
784 row = chunk_hash(addr) & this->table_mask;
785 segment = row & this->segment_mask;
786 lock = this->half_open_segments[segment].lock;
787 lock->write_lock(lock);
788 item = this->half_open_table[row];
789 while (item)
790 {
791 half_open = item->value;
792
793 if (chunk_equals(addr, half_open->other))
794 {
795 break;
796 }
797 item = item->next;
798 }
799
800 if (!item)
801 {
802 INIT(half_open,
803 .other = chunk_clone(addr),
804 );
805 INIT(item,
806 .value = half_open,
807 .next = this->half_open_table[row],
808 );
809 this->half_open_table[row] = item;
810 }
811 half_open->count++;
812 ref_get(&this->half_open_count);
813 if (!ike_id->is_initiator(ike_id))
814 {
815 half_open->count_responder++;
816 ref_get(&this->half_open_count_responder);
817 }
818 this->half_open_segments[segment].count++;
819 lock->unlock(lock);
820 }
821
822 /**
823 * Remove a half-open SA from the hash table.
824 */
825 static void remove_half_open(private_ike_sa_manager_t *this, entry_t *entry)
826 {
827 table_item_t *item, *prev = NULL;
828 u_int row, segment;
829 rwlock_t *lock;
830 ike_sa_id_t *ike_id;
831 chunk_t addr;
832
833 ike_id = entry->ike_sa_id;
834 addr = entry->other->get_address(entry->other);
835 row = chunk_hash(addr) & this->table_mask;
836 segment = row & this->segment_mask;
837 lock = this->half_open_segments[segment].lock;
838 lock->write_lock(lock);
839 item = this->half_open_table[row];
840 while (item)
841 {
842 half_open_t *half_open = item->value;
843
844 if (chunk_equals(addr, half_open->other))
845 {
846 if (!ike_id->is_initiator(ike_id))
847 {
848 half_open->count_responder--;
849 ignore_result(ref_put(&this->half_open_count_responder));
850 }
851 ignore_result(ref_put(&this->half_open_count));
852 if (--half_open->count == 0)
853 {
854 if (prev)
855 {
856 prev->next = item->next;
857 }
858 else
859 {
860 this->half_open_table[row] = item->next;
861 }
862 half_open_destroy(half_open);
863 free(item);
864 }
865 this->half_open_segments[segment].count--;
866 break;
867 }
868 prev = item;
869 item = item->next;
870 }
871 lock->unlock(lock);
872 }
873
874 CALLBACK(id_matches, bool,
875 ike_sa_id_t *a, va_list args)
876 {
877 ike_sa_id_t *b;
878
879 VA_ARGS_VGET(args, b);
880 return a->equals(a, b);
881 }
882
883 /**
884 * Put an SA between two peers into the hash table.
885 */
886 static void put_connected_peers(private_ike_sa_manager_t *this, entry_t *entry)
887 {
888 table_item_t *item;
889 u_int row, segment;
890 rwlock_t *lock;
891 connected_peers_t *connected_peers;
892 chunk_t my_id, other_id;
893 int family;
894
895 my_id = entry->my_id->get_encoding(entry->my_id);
896 other_id = entry->other_id->get_encoding(entry->other_id);
897 family = entry->other->get_family(entry->other);
898 row = chunk_hash_inc(other_id, chunk_hash(my_id)) & this->table_mask;
899 segment = row & this->segment_mask;
900 lock = this->connected_peers_segments[segment].lock;
901 lock->write_lock(lock);
902 item = this->connected_peers_table[row];
903 while (item)
904 {
905 connected_peers = item->value;
906
907 if (connected_peers_match(connected_peers, entry->my_id,
908 entry->other_id, family))
909 {
910 if (connected_peers->sas->find_first(connected_peers->sas,
911 id_matches, NULL, entry->ike_sa_id))
912 {
913 lock->unlock(lock);
914 return;
915 }
916 break;
917 }
918 item = item->next;
919 }
920
921 if (!item)
922 {
923 INIT(connected_peers,
924 .my_id = entry->my_id->clone(entry->my_id),
925 .other_id = entry->other_id->clone(entry->other_id),
926 .family = family,
927 .sas = linked_list_create(),
928 );
929 INIT(item,
930 .value = connected_peers,
931 .next = this->connected_peers_table[row],
932 );
933 this->connected_peers_table[row] = item;
934 }
935 connected_peers->sas->insert_last(connected_peers->sas,
936 entry->ike_sa_id->clone(entry->ike_sa_id));
937 this->connected_peers_segments[segment].count++;
938 lock->unlock(lock);
939 }
940
941 /**
942 * Remove an SA between two peers from the hash table.
943 */
944 static void remove_connected_peers(private_ike_sa_manager_t *this, entry_t *entry)
945 {
946 table_item_t *item, *prev = NULL;
947 u_int row, segment;
948 rwlock_t *lock;
949 chunk_t my_id, other_id;
950 int family;
951
952 my_id = entry->my_id->get_encoding(entry->my_id);
953 other_id = entry->other_id->get_encoding(entry->other_id);
954 family = entry->other->get_family(entry->other);
955
956 row = chunk_hash_inc(other_id, chunk_hash(my_id)) & this->table_mask;
957 segment = row & this->segment_mask;
958
959 lock = this->connected_peers_segments[segment].lock;
960 lock->write_lock(lock);
961 item = this->connected_peers_table[row];
962 while (item)
963 {
964 connected_peers_t *current = item->value;
965
966 if (connected_peers_match(current, entry->my_id, entry->other_id,
967 family))
968 {
969 enumerator_t *enumerator;
970 ike_sa_id_t *ike_sa_id;
971
972 enumerator = current->sas->create_enumerator(current->sas);
973 while (enumerator->enumerate(enumerator, &ike_sa_id))
974 {
975 if (ike_sa_id->equals(ike_sa_id, entry->ike_sa_id))
976 {
977 current->sas->remove_at(current->sas, enumerator);
978 ike_sa_id->destroy(ike_sa_id);
979 this->connected_peers_segments[segment].count--;
980 break;
981 }
982 }
983 enumerator->destroy(enumerator);
984 if (current->sas->get_count(current->sas) == 0)
985 {
986 if (prev)
987 {
988 prev->next = item->next;
989 }
990 else
991 {
992 this->connected_peers_table[row] = item->next;
993 }
994 connected_peers_destroy(current);
995 free(item);
996 }
997 break;
998 }
999 prev = item;
1000 item = item->next;
1001 }
1002 lock->unlock(lock);
1003 }
1004
1005 /**
1006 * Get a random SPI for new IKE_SAs
1007 */
1008 static uint64_t get_spi(private_ike_sa_manager_t *this)
1009 {
1010 uint64_t spi;
1011
1012 this->spi_lock->read_lock(this->spi_lock);
1013 if (this->spi_cb.cb)
1014 {
1015 spi = this->spi_cb.cb(this->spi_cb.data);
1016 }
1017 else if (!this->rng ||
1018 !this->rng->get_bytes(this->rng, sizeof(spi), (uint8_t*)&spi))
1019 {
1020 spi = 0;
1021 }
1022 this->spi_lock->unlock(this->spi_lock);
1023
1024 if (spi)
1025 {
1026 spi = (spi & ~this->spi_mask) | this->spi_label;
1027 }
1028 return spi;
1029 }
1030
1031 /**
1032 * Calculate the hash of the initial IKE message. Memory for the hash is
1033 * allocated on success.
1034 *
1035 * @returns TRUE on success
1036 */
1037 static bool get_init_hash(hasher_t *hasher, message_t *message, chunk_t *hash)
1038 {
1039 host_t *src;
1040
1041 if (message->get_first_payload_type(message) == PLV1_FRAGMENT)
1042 { /* only hash the source IP, port and SPI for fragmented init messages */
1043 uint16_t port;
1044 uint64_t spi;
1045
1046 src = message->get_source(message);
1047 if (!hasher->allocate_hash(hasher, src->get_address(src), NULL))
1048 {
1049 return FALSE;
1050 }
1051 port = src->get_port(src);
1052 if (!hasher->allocate_hash(hasher, chunk_from_thing(port), NULL))
1053 {
1054 return FALSE;
1055 }
1056 spi = message->get_initiator_spi(message);
1057 return hasher->allocate_hash(hasher, chunk_from_thing(spi), hash);
1058 }
1059 if (message->get_exchange_type(message) == ID_PROT)
1060 { /* include the source for Main Mode as the hash will be the same if
1061 * SPIs are reused by two initiators that use the same proposal */
1062 src = message->get_source(message);
1063
1064 if (!hasher->allocate_hash(hasher, src->get_address(src), NULL))
1065 {
1066 return FALSE;
1067 }
1068 }
1069 return hasher->allocate_hash(hasher, message->get_packet_data(message), hash);
1070 }
1071
1072 /**
1073 * Check if we already have created an IKE_SA based on the initial IKE message
1074 * with the given hash.
1075 * If not the hash is stored, the hash data is not(!) cloned.
1076 *
1077 * Also, the local SPI is returned. In case of a retransmit this is already
1078 * stored together with the hash, otherwise it is newly allocated and should
1079 * be used to create the IKE_SA.
1080 *
1081 * @returns ALREADY_DONE if the message with the given hash has been seen before
1082 * NOT_FOUND if the message hash was not found
1083 * FAILED if the SPI allocation failed
1084 */
1085 static status_t check_and_put_init_hash(private_ike_sa_manager_t *this,
1086 chunk_t init_hash, uint64_t *our_spi)
1087 {
1088 table_item_t *item;
1089 u_int row, segment;
1090 mutex_t *mutex;
1091 init_hash_t *init;
1092 uint64_t spi;
1093
1094 row = chunk_hash(init_hash) & this->table_mask;
1095 segment = row & this->segment_mask;
1096 mutex = this->init_hashes_segments[segment].mutex;
1097 mutex->lock(mutex);
1098 item = this->init_hashes_table[row];
1099 while (item)
1100 {
1101 init_hash_t *current = item->value;
1102
1103 if (chunk_equals(init_hash, current->hash))
1104 {
1105 *our_spi = current->our_spi;
1106 mutex->unlock(mutex);
1107 return ALREADY_DONE;
1108 }
1109 item = item->next;
1110 }
1111
1112 spi = get_spi(this);
1113 if (!spi)
1114 {
1115 return FAILED;
1116 }
1117
1118 INIT(init,
1119 .hash = {
1120 .len = init_hash.len,
1121 .ptr = init_hash.ptr,
1122 },
1123 .our_spi = spi,
1124 );
1125 INIT(item,
1126 .value = init,
1127 .next = this->init_hashes_table[row],
1128 );
1129 this->init_hashes_table[row] = item;
1130 *our_spi = init->our_spi;
1131 mutex->unlock(mutex);
1132 return NOT_FOUND;
1133 }
1134
1135 /**
1136 * Remove the hash of an initial IKE message from the cache.
1137 */
1138 static void remove_init_hash(private_ike_sa_manager_t *this, chunk_t init_hash)
1139 {
1140 table_item_t *item, *prev = NULL;
1141 u_int row, segment;
1142 mutex_t *mutex;
1143
1144 row = chunk_hash(init_hash) & this->table_mask;
1145 segment = row & this->segment_mask;
1146 mutex = this->init_hashes_segments[segment].mutex;
1147 mutex->lock(mutex);
1148 item = this->init_hashes_table[row];
1149 while (item)
1150 {
1151 init_hash_t *current = item->value;
1152
1153 if (chunk_equals(init_hash, current->hash))
1154 {
1155 if (prev)
1156 {
1157 prev->next = item->next;
1158 }
1159 else
1160 {
1161 this->init_hashes_table[row] = item->next;
1162 }
1163 free(current);
1164 free(item);
1165 break;
1166 }
1167 prev = item;
1168 item = item->next;
1169 }
1170 mutex->unlock(mutex);
1171 }
1172
1173 METHOD(ike_sa_manager_t, checkout, ike_sa_t*,
1174 private_ike_sa_manager_t *this, ike_sa_id_t *ike_sa_id)
1175 {
1176 ike_sa_t *ike_sa = NULL;
1177 entry_t *entry;
1178 u_int segment;
1179
1180 DBG2(DBG_MGR, "checkout %N SA with SPIs %.16"PRIx64"_i %.16"PRIx64"_r",
1181 ike_version_names, ike_sa_id->get_ike_version(ike_sa_id),
1182 be64toh(ike_sa_id->get_initiator_spi(ike_sa_id)),
1183 be64toh(ike_sa_id->get_responder_spi(ike_sa_id)));
1184
1185 if (get_entry_by_id(this, ike_sa_id, &entry, &segment) == SUCCESS)
1186 {
1187 if (wait_for_entry(this, entry, segment))
1188 {
1189 entry->checked_out = thread_current();
1190 ike_sa = entry->ike_sa;
1191 DBG2(DBG_MGR, "IKE_SA %s[%u] successfully checked out",
1192 ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1193 }
1194 unlock_single_segment(this, segment);
1195 }
1196 charon->bus->set_sa(charon->bus, ike_sa);
1197
1198 if (!ike_sa)
1199 {
1200 DBG2(DBG_MGR, "IKE_SA checkout not successful");
1201 }
1202 return ike_sa;
1203 }
1204
1205 METHOD(ike_sa_manager_t, checkout_new, ike_sa_t*,
1206 private_ike_sa_manager_t* this, ike_version_t version, bool initiator)
1207 {
1208 ike_sa_id_t *ike_sa_id;
1209 ike_sa_t *ike_sa;
1210 uint8_t ike_version;
1211 uint64_t spi;
1212
1213 ike_version = version == IKEV1 ? IKEV1_MAJOR_VERSION : IKEV2_MAJOR_VERSION;
1214
1215 spi = get_spi(this);
1216 if (!spi)
1217 {
1218 DBG1(DBG_MGR, "failed to allocate SPI for new IKE_SA");
1219 return NULL;
1220 }
1221
1222 if (initiator)
1223 {
1224 ike_sa_id = ike_sa_id_create(ike_version, spi, 0, TRUE);
1225 }
1226 else
1227 {
1228 ike_sa_id = ike_sa_id_create(ike_version, 0, spi, FALSE);
1229 }
1230 ike_sa = ike_sa_create(ike_sa_id, initiator, version);
1231 ike_sa_id->destroy(ike_sa_id);
1232
1233 if (ike_sa)
1234 {
1235 DBG2(DBG_MGR, "created IKE_SA %s[%u]", ike_sa->get_name(ike_sa),
1236 ike_sa->get_unique_id(ike_sa));
1237 }
1238 return ike_sa;
1239 }
1240
1241 /**
1242 * Get the message ID or message hash to detect early retransmissions
1243 */
1244 static uint32_t get_message_id_or_hash(message_t *message)
1245 {
1246 if (message->get_major_version(message) == IKEV1_MAJOR_VERSION)
1247 {
1248 /* Use a hash for IKEv1 Phase 1, where we don't have a MID, and Quick
1249 * Mode, where all three messages use the same message ID */
1250 if (message->get_message_id(message) == 0 ||
1251 message->get_exchange_type(message) == QUICK_MODE)
1252 {
1253 return chunk_hash(message->get_packet_data(message));
1254 }
1255 }
1256 return message->get_message_id(message);
1257 }
1258
1259 METHOD(ike_sa_manager_t, checkout_by_message, ike_sa_t*,
1260 private_ike_sa_manager_t* this, message_t *message)
1261 {
1262 u_int segment;
1263 entry_t *entry;
1264 ike_sa_t *ike_sa = NULL;
1265 ike_sa_id_t *id;
1266 ike_version_t ike_version;
1267 bool is_init = FALSE;
1268
1269 id = message->get_ike_sa_id(message);
1270 /* clone the IKE_SA ID so we can modify the initiator flag */
1271 id = id->clone(id);
1272 id->switch_initiator(id);
1273
1274 DBG2(DBG_MGR, "checkout %N SA by message with SPIs %.16"PRIx64"_i "
1275 "%.16"PRIx64"_r", ike_version_names, id->get_ike_version(id),
1276 be64toh(id->get_initiator_spi(id)),
1277 be64toh(id->get_responder_spi(id)));
1278
1279 if (id->get_responder_spi(id) == 0 &&
1280 message->get_message_id(message) == 0)
1281 {
1282 if (message->get_major_version(message) == IKEV2_MAJOR_VERSION)
1283 {
1284 if (message->get_exchange_type(message) == IKE_SA_INIT &&
1285 message->get_request(message))
1286 {
1287 ike_version = IKEV2;
1288 is_init = TRUE;
1289 }
1290 }
1291 else
1292 {
1293 if (message->get_exchange_type(message) == ID_PROT ||
1294 message->get_exchange_type(message) == AGGRESSIVE)
1295 {
1296 ike_version = IKEV1;
1297 is_init = TRUE;
1298 if (id->is_initiator(id))
1299 { /* not set in IKEv1, switch back before applying to new SA */
1300 id->switch_initiator(id);
1301 }
1302 }
1303 }
1304 }
1305
1306 if (is_init)
1307 {
1308 hasher_t *hasher;
1309 uint64_t our_spi;
1310 chunk_t hash;
1311
1312 hasher = lib->crypto->create_hasher(lib->crypto, HASH_SHA1);
1313 if (!hasher || !get_init_hash(hasher, message, &hash))
1314 {
1315 DBG1(DBG_MGR, "ignoring message, failed to hash message");
1316 DESTROY_IF(hasher);
1317 id->destroy(id);
1318 goto out;
1319 }
1320 hasher->destroy(hasher);
1321
1322 /* ensure this is not a retransmit of an already handled init message */
1323 switch (check_and_put_init_hash(this, hash, &our_spi))
1324 {
1325 case NOT_FOUND:
1326 { /* we've not seen this packet yet, create a new IKE_SA */
1327 if (!this->ikesa_limit ||
1328 this->public.get_count(&this->public) < this->ikesa_limit)
1329 {
1330 id->set_responder_spi(id, our_spi);
1331 ike_sa = ike_sa_create(id, FALSE, ike_version);
1332 if (ike_sa)
1333 {
1334 entry = entry_create();
1335 entry->ike_sa = ike_sa;
1336 entry->ike_sa_id = id;
1337 entry->processing = get_message_id_or_hash(message);
1338 entry->init_hash = hash;
1339
1340 segment = put_entry(this, entry);
1341 entry->checked_out = thread_current();
1342 unlock_single_segment(this, segment);
1343
1344 DBG2(DBG_MGR, "created IKE_SA %s[%u]",
1345 ike_sa->get_name(ike_sa),
1346 ike_sa->get_unique_id(ike_sa));
1347 goto out;
1348 }
1349 else
1350 {
1351 DBG1(DBG_MGR, "creating IKE_SA failed, ignoring message");
1352 }
1353 }
1354 else
1355 {
1356 DBG1(DBG_MGR, "ignoring %N, hitting IKE_SA limit (%u)",
1357 exchange_type_names, message->get_exchange_type(message),
1358 this->ikesa_limit);
1359 }
1360 remove_init_hash(this, hash);
1361 chunk_free(&hash);
1362 id->destroy(id);
1363 goto out;
1364 }
1365 case FAILED:
1366 { /* we failed to allocate an SPI */
1367 chunk_free(&hash);
1368 id->destroy(id);
1369 DBG1(DBG_MGR, "ignoring message, failed to allocate SPI");
1370 goto out;
1371 }
1372 case ALREADY_DONE:
1373 default:
1374 break;
1375 }
1376 /* it looks like we already handled this init message to some degree */
1377 id->set_responder_spi(id, our_spi);
1378 chunk_free(&hash);
1379 }
1380
1381 if (get_entry_by_id(this, id, &entry, &segment) == SUCCESS)
1382 {
1383 /* only check out if we are not already processing it. */
1384 if (entry->processing == get_message_id_or_hash(message))
1385 {
1386 DBG1(DBG_MGR, "ignoring request with ID %u, already processing",
1387 entry->processing);
1388 }
1389 else if (wait_for_entry(this, entry, segment))
1390 {
1391 ike_sa_id_t *ike_id;
1392
1393 ike_id = entry->ike_sa->get_id(entry->ike_sa);
1394 entry->checked_out = thread_current();
1395 if (message->get_first_payload_type(message) != PLV1_FRAGMENT &&
1396 message->get_first_payload_type(message) != PLV2_FRAGMENT)
1397 { /* TODO-FRAG: this fails if there are unencrypted payloads */
1398 entry->processing = get_message_id_or_hash(message);
1399 }
1400 if (ike_id->get_responder_spi(ike_id) == 0)
1401 {
1402 ike_id->set_responder_spi(ike_id, id->get_responder_spi(id));
1403 }
1404 ike_sa = entry->ike_sa;
1405 DBG2(DBG_MGR, "IKE_SA %s[%u] successfully checked out",
1406 ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1407 }
1408 unlock_single_segment(this, segment);
1409 }
1410 else
1411 {
1412 charon->bus->alert(charon->bus, ALERT_INVALID_IKE_SPI, message);
1413 }
1414 id->destroy(id);
1415
1416 out:
1417 charon->bus->set_sa(charon->bus, ike_sa);
1418 if (!ike_sa)
1419 {
1420 DBG2(DBG_MGR, "IKE_SA checkout not successful");
1421 }
1422 return ike_sa;
1423 }
1424
1425 METHOD(ike_sa_manager_t, checkout_by_config, ike_sa_t*,
1426 private_ike_sa_manager_t *this, peer_cfg_t *peer_cfg)
1427 {
1428 enumerator_t *enumerator;
1429 entry_t *entry;
1430 ike_sa_t *ike_sa = NULL;
1431 peer_cfg_t *current_peer;
1432 ike_cfg_t *current_ike;
1433 u_int segment;
1434
1435 DBG2(DBG_MGR, "checkout IKE_SA by config");
1436
1437 if (!this->reuse_ikesa && peer_cfg->get_ike_version(peer_cfg) != IKEV1)
1438 { /* IKE_SA reuse disabled by config (not possible for IKEv1) */
1439 ike_sa = checkout_new(this, peer_cfg->get_ike_version(peer_cfg), TRUE);
1440 charon->bus->set_sa(charon->bus, ike_sa);
1441 goto out;
1442 }
1443
1444 enumerator = create_table_enumerator(this);
1445 while (enumerator->enumerate(enumerator, &entry, &segment))
1446 {
1447 if (!wait_for_entry(this, entry, segment))
1448 {
1449 continue;
1450 }
1451 if (entry->ike_sa->get_state(entry->ike_sa) == IKE_DELETING ||
1452 entry->ike_sa->get_state(entry->ike_sa) == IKE_REKEYED)
1453 { /* skip IKE_SAs which are not usable, wake other waiting threads */
1454 entry->condvar->signal(entry->condvar);
1455 continue;
1456 }
1457
1458 current_peer = entry->ike_sa->get_peer_cfg(entry->ike_sa);
1459 if (current_peer && current_peer->equals(current_peer, peer_cfg))
1460 {
1461 current_ike = current_peer->get_ike_cfg(current_peer);
1462 if (current_ike->equals(current_ike, peer_cfg->get_ike_cfg(peer_cfg)))
1463 {
1464 entry->checked_out = thread_current();
1465 ike_sa = entry->ike_sa;
1466 DBG2(DBG_MGR, "found existing IKE_SA %u with a '%s' config",
1467 ike_sa->get_unique_id(ike_sa),
1468 current_peer->get_name(current_peer));
1469 break;
1470 }
1471 }
1472 /* other threads might be waiting for this entry */
1473 entry->condvar->signal(entry->condvar);
1474 }
1475 enumerator->destroy(enumerator);
1476
1477 if (!ike_sa)
1478 { /* no IKE_SA using such a config, hand out a new */
1479 ike_sa = checkout_new(this, peer_cfg->get_ike_version(peer_cfg), TRUE);
1480 }
1481 charon->bus->set_sa(charon->bus, ike_sa);
1482
1483 out:
1484 if (!ike_sa)
1485 {
1486 DBG2(DBG_MGR, "IKE_SA checkout not successful");
1487 }
1488 return ike_sa;
1489 }
1490
1491 METHOD(ike_sa_manager_t, checkout_by_id, ike_sa_t*,
1492 private_ike_sa_manager_t *this, uint32_t id)
1493 {
1494 enumerator_t *enumerator;
1495 entry_t *entry;
1496 ike_sa_t *ike_sa = NULL;
1497 u_int segment;
1498
1499 DBG2(DBG_MGR, "checkout IKE_SA by unique ID %u", id);
1500
1501 enumerator = create_table_enumerator(this);
1502 while (enumerator->enumerate(enumerator, &entry, &segment))
1503 {
1504 if (wait_for_entry(this, entry, segment))
1505 {
1506 if (entry->ike_sa->get_unique_id(entry->ike_sa) == id)
1507 {
1508 ike_sa = entry->ike_sa;
1509 entry->checked_out = thread_current();
1510 break;
1511 }
1512 /* other threads might be waiting for this entry */
1513 entry->condvar->signal(entry->condvar);
1514 }
1515 }
1516 enumerator->destroy(enumerator);
1517
1518 if (ike_sa)
1519 {
1520 DBG2(DBG_MGR, "IKE_SA %s[%u] successfully checked out",
1521 ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1522 }
1523 else
1524 {
1525 DBG2(DBG_MGR, "IKE_SA checkout not successful");
1526 }
1527 charon->bus->set_sa(charon->bus, ike_sa);
1528 return ike_sa;
1529 }
1530
1531 METHOD(ike_sa_manager_t, checkout_by_name, ike_sa_t*,
1532 private_ike_sa_manager_t *this, char *name, bool child)
1533 {
1534 enumerator_t *enumerator, *children;
1535 entry_t *entry;
1536 ike_sa_t *ike_sa = NULL;
1537 child_sa_t *child_sa;
1538 u_int segment;
1539
1540 DBG2(DBG_MGR, "checkout IKE_SA by%s name '%s'", child ? " child" : "", name);
1541
1542 enumerator = create_table_enumerator(this);
1543 while (enumerator->enumerate(enumerator, &entry, &segment))
1544 {
1545 if (wait_for_entry(this, entry, segment))
1546 {
1547 /* look for a child with such a policy name ... */
1548 if (child)
1549 {
1550 children = entry->ike_sa->create_child_sa_enumerator(entry->ike_sa);
1551 while (children->enumerate(children, (void**)&child_sa))
1552 {
1553 if (streq(child_sa->get_name(child_sa), name))
1554 {
1555 ike_sa = entry->ike_sa;
1556 break;
1557 }
1558 }
1559 children->destroy(children);
1560 }
1561 else /* ... or for a IKE_SA with such a connection name */
1562 {
1563 if (streq(entry->ike_sa->get_name(entry->ike_sa), name))
1564 {
1565 ike_sa = entry->ike_sa;
1566 }
1567 }
1568 /* got one, return */
1569 if (ike_sa)
1570 {
1571 entry->checked_out = thread_current();
1572 DBG2(DBG_MGR, "IKE_SA %s[%u] successfully checked out",
1573 ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1574 break;
1575 }
1576 /* other threads might be waiting for this entry */
1577 entry->condvar->signal(entry->condvar);
1578 }
1579 }
1580 enumerator->destroy(enumerator);
1581
1582 charon->bus->set_sa(charon->bus, ike_sa);
1583
1584 if (!ike_sa)
1585 {
1586 DBG2(DBG_MGR, "IKE_SA checkout not successful");
1587 }
1588 return ike_sa;
1589 }
1590
1591 METHOD(ike_sa_manager_t, new_initiator_spi, bool,
1592 private_ike_sa_manager_t *this, ike_sa_t *ike_sa)
1593 {
1594 ike_sa_state_t state;
1595 ike_sa_id_t *ike_sa_id;
1596 entry_t *entry;
1597 u_int segment;
1598 uint64_t new_spi, spi;
1599
1600 state = ike_sa->get_state(ike_sa);
1601 if (state != IKE_CONNECTING)
1602 {
1603 DBG1(DBG_MGR, "unable to change initiator SPI for IKE_SA in state "
1604 "%N", ike_sa_state_names, state);
1605 return FALSE;
1606 }
1607
1608 ike_sa_id = ike_sa->get_id(ike_sa);
1609 if (!ike_sa_id->is_initiator(ike_sa_id))
1610 {
1611 DBG1(DBG_MGR, "unable to change initiator SPI of IKE_SA as responder");
1612 return FALSE;
1613 }
1614
1615 if (ike_sa != charon->bus->get_sa(charon->bus))
1616 {
1617 DBG1(DBG_MGR, "unable to change initiator SPI of IKE_SA not checked "
1618 "out by current thread");
1619 return FALSE;
1620 }
1621
1622 new_spi = get_spi(this);
1623 if (!new_spi)
1624 {
1625 DBG1(DBG_MGR, "unable to allocate new initiator SPI for IKE_SA");
1626 return FALSE;
1627 }
1628
1629 if (get_entry_by_sa(this, ike_sa_id, ike_sa, &entry, &segment) == SUCCESS)
1630 {
1631 if (entry->driveout_waiting_threads && entry->driveout_new_threads)
1632 { /* it looks like flush() has been called and the SA is being deleted
1633 * anyway, no need for a new SPI */
1634 DBG2(DBG_MGR, "ignored change of initiator SPI during shutdown");
1635 unlock_single_segment(this, segment);
1636 return FALSE;
1637 }
1638 }
1639 else
1640 {
1641 DBG1(DBG_MGR, "unable to change initiator SPI of IKE_SA, not found");
1642 return FALSE;
1643 }
1644
1645 /* the hashtable row and segment are determined by the local SPI as
1646 * initiator, so if we change it the row and segment derived from it might
1647 * change as well. This could be a problem for threads waiting for the
1648 * entry (in particular those enumerating entries to check them out by
1649 * unique ID or name). In order to avoid having to drive them out and thus
1650 * preventing them from checking out the entry (even though the ID or name
1651 * will not change and enumerating it is also fine), we mask the new SPI and
1652 * merge it with the old SPI so the entry ends up in the same row/segment.
1653 * Since SPIs are 64-bit and the number of rows/segments is usually
1654 * relatively low this should not be a problem. */
1655 spi = ike_sa_id->get_initiator_spi(ike_sa_id);
1656 new_spi = (spi & (uint64_t)this->table_mask) |
1657 (new_spi & ~(uint64_t)this->table_mask);
1658
1659 DBG2(DBG_MGR, "change initiator SPI of IKE_SA %s[%u] from %.16"PRIx64" to "
1660 "%.16"PRIx64, ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa),
1661 be64toh(spi), be64toh(new_spi));
1662
1663 ike_sa_id->set_initiator_spi(ike_sa_id, new_spi);
1664 entry->ike_sa_id->replace_values(entry->ike_sa_id, ike_sa_id);
1665
1666 entry->condvar->signal(entry->condvar);
1667 unlock_single_segment(this, segment);
1668 return TRUE;
1669 }
1670
1671 CALLBACK(enumerator_filter_wait, bool,
1672 private_ike_sa_manager_t *this, enumerator_t *orig, va_list args)
1673 {
1674 entry_t *entry;
1675 u_int segment;
1676 ike_sa_t **out;
1677
1678 VA_ARGS_VGET(args, out);
1679
1680 while (orig->enumerate(orig, &entry, &segment))
1681 {
1682 if (wait_for_entry(this, entry, segment))
1683 {
1684 *out = entry->ike_sa;
1685 charon->bus->set_sa(charon->bus, *out);
1686 return TRUE;
1687 }
1688 }
1689 return FALSE;
1690 }
1691
1692 CALLBACK(enumerator_filter_skip, bool,
1693 private_ike_sa_manager_t *this, enumerator_t *orig, va_list args)
1694 {
1695 entry_t *entry;
1696 u_int segment;
1697 ike_sa_t **out;
1698
1699 VA_ARGS_VGET(args, out);
1700
1701 while (orig->enumerate(orig, &entry, &segment))
1702 {
1703 if (!entry->driveout_new_threads &&
1704 !entry->driveout_waiting_threads &&
1705 !entry->checked_out)
1706 {
1707 *out = entry->ike_sa;
1708 charon->bus->set_sa(charon->bus, *out);
1709 return TRUE;
1710 }
1711 }
1712 return FALSE;
1713 }
1714
1715 CALLBACK(reset_sa, void,
1716 void *data)
1717 {
1718 charon->bus->set_sa(charon->bus, NULL);
1719 }
1720
1721 METHOD(ike_sa_manager_t, create_enumerator, enumerator_t*,
1722 private_ike_sa_manager_t* this, bool wait)
1723 {
1724 return enumerator_create_filter(create_table_enumerator(this),
1725 wait ? (void*)enumerator_filter_wait : (void*)enumerator_filter_skip,
1726 this, reset_sa);
1727 }
1728
1729 METHOD(ike_sa_manager_t, checkin, void,
1730 private_ike_sa_manager_t *this, ike_sa_t *ike_sa)
1731 {
1732 /* to check the SA back in, we look for the pointer of the ike_sa
1733 * in all entries.
1734 * The lookup is done by initiator SPI, so even if the SPI has changed (e.g.
1735 * on reception of a IKE_SA_INIT response) the lookup will work but
1736 * updating of the SPI MAY be necessary...
1737 */
1738 entry_t *entry;
1739 ike_sa_id_t *ike_sa_id;
1740 host_t *other;
1741 identification_t *my_id, *other_id;
1742 u_int segment;
1743
1744 ike_sa_id = ike_sa->get_id(ike_sa);
1745 my_id = ike_sa->get_my_id(ike_sa);
1746 other_id = ike_sa->get_other_eap_id(ike_sa);
1747 other = ike_sa->get_other_host(ike_sa);
1748
1749 DBG2(DBG_MGR, "checkin IKE_SA %s[%u]", ike_sa->get_name(ike_sa),
1750 ike_sa->get_unique_id(ike_sa));
1751
1752 /* look for the entry */
1753 if (get_entry_by_sa(this, ike_sa_id, ike_sa, &entry, &segment) == SUCCESS)
1754 {
1755 /* ike_sa_id must be updated */
1756 entry->ike_sa_id->replace_values(entry->ike_sa_id, ike_sa->get_id(ike_sa));
1757 /* signal waiting threads */
1758 entry->checked_out = NULL;
1759 entry->processing = -1;
1760 /* check if this SA is half-open */
1761 if (entry->half_open && ike_sa->get_state(ike_sa) != IKE_CONNECTING)
1762 {
1763 /* not half open anymore */
1764 entry->half_open = FALSE;
1765 remove_half_open(this, entry);
1766 }
1767 else if (entry->half_open && !other->ip_equals(other, entry->other))
1768 {
1769 /* the other host's IP has changed, we must update the hash table */
1770 remove_half_open(this, entry);
1771 DESTROY_IF(entry->other);
1772 entry->other = other->clone(other);
1773 put_half_open(this, entry);
1774 }
1775 else if (!entry->half_open &&
1776 ike_sa->get_state(ike_sa) == IKE_CONNECTING)
1777 {
1778 /* this is a new half-open SA */
1779 entry->half_open = TRUE;
1780 entry->other = other->clone(other);
1781 put_half_open(this, entry);
1782 }
1783 entry->condvar->signal(entry->condvar);
1784 }
1785 else
1786 {
1787 entry = entry_create();
1788 entry->ike_sa_id = ike_sa_id->clone(ike_sa_id);
1789 entry->ike_sa = ike_sa;
1790 if (ike_sa->get_state(ike_sa) == IKE_CONNECTING)
1791 {
1792 entry->half_open = TRUE;
1793 entry->other = other->clone(other);
1794 put_half_open(this, entry);
1795 }
1796 segment = put_entry(this, entry);
1797 }
1798 DBG2(DBG_MGR, "checkin of IKE_SA successful");
1799
1800 /* apply identities for duplicate test */
1801 if ((ike_sa->get_state(ike_sa) == IKE_ESTABLISHED ||
1802 ike_sa->get_state(ike_sa) == IKE_PASSIVE) &&
1803 entry->my_id == NULL && entry->other_id == NULL)
1804 {
1805 if (ike_sa->get_version(ike_sa) == IKEV1)
1806 {
1807 /* If authenticated and received INITIAL_CONTACT,
1808 * delete any existing IKE_SAs with that peer. */
1809 if (ike_sa->has_condition(ike_sa, COND_INIT_CONTACT_SEEN))
1810 {
1811 /* We can't hold the segment locked while checking the
1812 * uniqueness as this could lead to deadlocks. We mark the
1813 * entry as checked out while we release the lock so no other
1814 * thread can acquire it. Since it is not yet in the list of
1815 * connected peers that will not cause a deadlock as no other
1816 * caller of check_unqiueness() will try to check out this SA */
1817 entry->checked_out = thread_current();
1818 unlock_single_segment(this, segment);
1819
1820 this->public.check_uniqueness(&this->public, ike_sa, TRUE);
1821 ike_sa->set_condition(ike_sa, COND_INIT_CONTACT_SEEN, FALSE);
1822
1823 /* The entry could have been modified in the mean time, e.g.
1824 * because another SA was added/removed next to it or another
1825 * thread is waiting, but it should still exist, so there is no
1826 * need for a lookup via get_entry_by... */
1827 lock_single_segment(this, segment);
1828 entry->checked_out = NULL;
1829 /* We already signaled waiting threads above, we have to do that
1830 * again after checking the SA out and back in again. */
1831 entry->condvar->signal(entry->condvar);
1832 }
1833 }
1834
1835 entry->my_id = my_id->clone(my_id);
1836 entry->other_id = other_id->clone(other_id);
1837 if (!entry->other)
1838 {
1839 entry->other = other->clone(other);
1840 }
1841 put_connected_peers(this, entry);
1842 }
1843
1844 unlock_single_segment(this, segment);
1845
1846 charon->bus->set_sa(charon->bus, NULL);
1847 }
1848
1849 METHOD(ike_sa_manager_t, checkin_and_destroy, void,
1850 private_ike_sa_manager_t *this, ike_sa_t *ike_sa)
1851 {
1852 /* deletion is a bit complex, we must ensure that no thread is waiting for
1853 * this SA.
1854 * We take this SA from the table, and start signaling while threads
1855 * are in the condvar.
1856 */
1857 entry_t *entry;
1858 ike_sa_id_t *ike_sa_id;
1859 u_int segment;
1860
1861 ike_sa_id = ike_sa->get_id(ike_sa);
1862
1863 DBG2(DBG_MGR, "checkin and destroy IKE_SA %s[%u]", ike_sa->get_name(ike_sa),
1864 ike_sa->get_unique_id(ike_sa));
1865
1866 if (get_entry_by_sa(this, ike_sa_id, ike_sa, &entry, &segment) == SUCCESS)
1867 {
1868 if (entry->driveout_waiting_threads && entry->driveout_new_threads)
1869 { /* it looks like flush() has been called and the SA is being deleted
1870 * anyway, just check it in */
1871 DBG2(DBG_MGR, "ignored checkin and destroy of IKE_SA during shutdown");
1872 entry->checked_out = NULL;
1873 entry->condvar->broadcast(entry->condvar);
1874 unlock_single_segment(this, segment);
1875 return;
1876 }
1877
1878 /* drive out waiting threads, as we are in hurry */
1879 entry->driveout_waiting_threads = TRUE;
1880 /* mark it, so no new threads can get this entry */
1881 entry->driveout_new_threads = TRUE;
1882 /* wait until all workers have done their work */
1883 while (entry->waiting_threads)
1884 {
1885 /* wake up all */
1886 entry->condvar->broadcast(entry->condvar);
1887 /* they will wake us again when their work is done */
1888 entry->condvar->wait(entry->condvar, this->segments[segment].mutex);
1889 }
1890 remove_entry(this, entry);
1891 unlock_single_segment(this, segment);
1892
1893 if (entry->half_open)
1894 {
1895 remove_half_open(this, entry);
1896 }
1897 if (entry->my_id && entry->other_id)
1898 {
1899 remove_connected_peers(this, entry);
1900 }
1901 if (entry->init_hash.ptr)
1902 {
1903 remove_init_hash(this, entry->init_hash);
1904 }
1905
1906 entry_destroy(entry);
1907
1908 DBG2(DBG_MGR, "checkin and destroy of IKE_SA successful");
1909 }
1910 else
1911 {
1912 DBG1(DBG_MGR, "tried to checkin and delete nonexisting IKE_SA");
1913 ike_sa->destroy(ike_sa);
1914 }
1915 charon->bus->set_sa(charon->bus, NULL);
1916 }
1917
1918 /**
1919 * Cleanup function for create_id_enumerator
1920 */
1921 static void id_enumerator_cleanup(linked_list_t *ids)
1922 {
1923 ids->destroy_offset(ids, offsetof(ike_sa_id_t, destroy));
1924 }
1925
1926 METHOD(ike_sa_manager_t, create_id_enumerator, enumerator_t*,
1927 private_ike_sa_manager_t *this, identification_t *me,
1928 identification_t *other, int family)
1929 {
1930 table_item_t *item;
1931 u_int row, segment;
1932 rwlock_t *lock;
1933 linked_list_t *ids = NULL;
1934
1935 row = chunk_hash_inc(other->get_encoding(other),
1936 chunk_hash(me->get_encoding(me))) & this->table_mask;
1937 segment = row & this->segment_mask;
1938
1939 lock = this->connected_peers_segments[segment].lock;
1940 lock->read_lock(lock);
1941 item = this->connected_peers_table[row];
1942 while (item)
1943 {
1944 connected_peers_t *current = item->value;
1945
1946 if (connected_peers_match(current, me, other, family))
1947 {
1948 ids = current->sas->clone_offset(current->sas,
1949 offsetof(ike_sa_id_t, clone));
1950 break;
1951 }
1952 item = item->next;
1953 }
1954 lock->unlock(lock);
1955
1956 if (!ids)
1957 {
1958 return enumerator_create_empty();
1959 }
1960 return enumerator_create_cleaner(ids->create_enumerator(ids),
1961 (void*)id_enumerator_cleanup, ids);
1962 }
1963
1964 /**
1965 * Move all CHILD_SAs and virtual IPs from old to new
1966 */
1967 static void adopt_children_and_vips(ike_sa_t *old, ike_sa_t *new)
1968 {
1969 enumerator_t *enumerator;
1970 child_sa_t *child_sa;
1971 host_t *vip;
1972 int chcount = 0, vipcount = 0;
1973
1974 charon->bus->children_migrate(charon->bus, new->get_id(new),
1975 new->get_unique_id(new));
1976 enumerator = old->create_child_sa_enumerator(old);
1977 while (enumerator->enumerate(enumerator, &child_sa))
1978 {
1979 old->remove_child_sa(old, enumerator);
1980 new->add_child_sa(new, child_sa);
1981 chcount++;
1982 }
1983 enumerator->destroy(enumerator);
1984
1985 new->adopt_child_tasks(new, old);
1986
1987 enumerator = old->create_virtual_ip_enumerator(old, FALSE);
1988 while (enumerator->enumerate(enumerator, &vip))
1989 {
1990 new->add_virtual_ip(new, FALSE, vip);
1991 vipcount++;
1992 }
1993 enumerator->destroy(enumerator);
1994 /* this does not release the addresses, which is good, but it does trigger
1995 * an assign_vips(FALSE) event... */
1996 old->clear_virtual_ips(old, FALSE);
1997 /* ...trigger the analogous event on the new SA */
1998 charon->bus->set_sa(charon->bus, new);
1999 charon->bus->assign_vips(charon->bus, new, TRUE);
2000 charon->bus->children_migrate(charon->bus, NULL, 0);
2001 charon->bus->set_sa(charon->bus, old);
2002
2003 if (chcount || vipcount)
2004 {
2005 DBG1(DBG_IKE, "detected reauth of existing IKE_SA, adopting %d "
2006 "children and %d virtual IPs", chcount, vipcount);
2007 }
2008 }
2009
2010 /**
2011 * Delete an existing IKE_SA due to a unique replace policy
2012 */
2013 static status_t enforce_replace(private_ike_sa_manager_t *this,
2014 ike_sa_t *duplicate, ike_sa_t *new,
2015 identification_t *other, host_t *host)
2016 {
2017 charon->bus->alert(charon->bus, ALERT_UNIQUE_REPLACE);
2018
2019 if (host->equals(host, duplicate->get_other_host(duplicate)))
2020 {
2021 /* looks like a reauthentication attempt */
2022 if (!new->has_condition(new, COND_INIT_CONTACT_SEEN) &&
2023 new->get_version(new) == IKEV1)
2024 {
2025 /* IKEv1 implicitly takes over children, IKEv2 recreates them
2026 * explicitly. */
2027 adopt_children_and_vips(duplicate, new);
2028 }
2029 /* For IKEv1 we have to delay the delete for the old IKE_SA. Some
2030 * peers need to complete the new SA first, otherwise the quick modes
2031 * might get lost. For IKEv2 we do the same, as we want overlapping
2032 * CHILD_SAs to keep connectivity up. */
2033 lib->scheduler->schedule_job(lib->scheduler, (job_t*)
2034 delete_ike_sa_job_create(duplicate->get_id(duplicate), TRUE), 10);
2035 DBG1(DBG_IKE, "schedule delete of duplicate IKE_SA for peer '%Y' due "
2036 "to uniqueness policy and suspected reauthentication", other);
2037 return SUCCESS;
2038 }
2039 DBG1(DBG_IKE, "deleting duplicate IKE_SA for peer '%Y' due to "
2040 "uniqueness policy", other);
2041 return duplicate->delete(duplicate, FALSE);
2042 }
2043
2044 METHOD(ike_sa_manager_t, check_uniqueness, bool,
2045 private_ike_sa_manager_t *this, ike_sa_t *ike_sa, bool force_replace)
2046 {
2047 bool cancel = FALSE;
2048 peer_cfg_t *peer_cfg;
2049 unique_policy_t policy;
2050 enumerator_t *enumerator;
2051 ike_sa_id_t *id = NULL;
2052 identification_t *me, *other;
2053 host_t *other_host;
2054
2055 peer_cfg = ike_sa->get_peer_cfg(ike_sa);
2056 policy = peer_cfg->get_unique_policy(peer_cfg);
2057 if (policy == UNIQUE_NEVER || (policy == UNIQUE_NO && !force_replace))
2058 {
2059 return FALSE;
2060 }
2061 me = ike_sa->get_my_id(ike_sa);
2062 other = ike_sa->get_other_eap_id(ike_sa);
2063 other_host = ike_sa->get_other_host(ike_sa);
2064
2065 enumerator = create_id_enumerator(this, me, other,
2066 other_host->get_family(other_host));
2067 while (enumerator->enumerate(enumerator, &id))
2068 {
2069 status_t status = SUCCESS;
2070 ike_sa_t *duplicate;
2071
2072 duplicate = checkout(this, id);
2073 if (!duplicate)
2074 {
2075 continue;
2076 }
2077 if (force_replace)
2078 {
2079 DBG1(DBG_IKE, "destroying duplicate IKE_SA for peer '%Y', "
2080 "received INITIAL_CONTACT", other);
2081 charon->bus->ike_updown(charon->bus, duplicate, FALSE);
2082 checkin_and_destroy(this, duplicate);
2083 continue;
2084 }
2085 peer_cfg = duplicate->get_peer_cfg(duplicate);
2086 if (peer_cfg && peer_cfg->equals(peer_cfg, ike_sa->get_peer_cfg(ike_sa)))
2087 {
2088 switch (duplicate->get_state(duplicate))
2089 {
2090 case IKE_ESTABLISHED:
2091 case IKE_REKEYING:
2092 switch (policy)
2093 {
2094 case UNIQUE_REPLACE:
2095 status = enforce_replace(this, duplicate, ike_sa,
2096 other, other_host);
2097 break;
2098 case UNIQUE_KEEP:
2099 /* potential reauthentication? */
2100 if (!other_host->equals(other_host,
2101 duplicate->get_other_host(duplicate)))
2102 {
2103 cancel = TRUE;
2104 /* we keep the first IKE_SA and delete all
2105 * other duplicates that might exist */
2106 policy = UNIQUE_REPLACE;
2107 }
2108 break;
2109 default:
2110 break;
2111 }
2112 break;
2113 default:
2114 break;
2115 }
2116 }
2117 if (status == DESTROY_ME)
2118 {
2119 checkin_and_destroy(this, duplicate);
2120 }
2121 else
2122 {
2123 checkin(this, duplicate);
2124 }
2125 }
2126 enumerator->destroy(enumerator);
2127 /* reset thread's current IKE_SA after checkin */
2128 charon->bus->set_sa(charon->bus, ike_sa);
2129 return cancel;
2130 }
2131
2132 METHOD(ike_sa_manager_t, has_contact, bool,
2133 private_ike_sa_manager_t *this, identification_t *me,
2134 identification_t *other, int family)
2135 {
2136 table_item_t *item;
2137 u_int row, segment;
2138 rwlock_t *lock;
2139 bool found = FALSE;
2140
2141 row = chunk_hash_inc(other->get_encoding(other),
2142 chunk_hash(me->get_encoding(me))) & this->table_mask;
2143 segment = row & this->segment_mask;
2144 lock = this->connected_peers_segments[segment].lock;
2145 lock->read_lock(lock);
2146 item = this->connected_peers_table[row];
2147 while (item)
2148 {
2149 if (connected_peers_match(item->value, me, other, family))
2150 {
2151 found = TRUE;
2152 break;
2153 }
2154 item = item->next;
2155 }
2156 lock->unlock(lock);
2157
2158 return found;
2159 }
2160
2161 METHOD(ike_sa_manager_t, get_count, u_int,
2162 private_ike_sa_manager_t *this)
2163 {
2164 return (u_int)ref_cur(&this->total_sa_count);
2165 }
2166
2167 METHOD(ike_sa_manager_t, get_half_open_count, u_int,
2168 private_ike_sa_manager_t *this, host_t *ip, bool responder_only)
2169 {
2170 table_item_t *item;
2171 u_int row, segment;
2172 rwlock_t *lock;
2173 chunk_t addr;
2174 u_int count = 0;
2175
2176 if (ip)
2177 {
2178 addr = ip->get_address(ip);
2179 row = chunk_hash(addr) & this->table_mask;
2180 segment = row & this->segment_mask;
2181 lock = this->half_open_segments[segment].lock;
2182 lock->read_lock(lock);
2183 item = this->half_open_table[row];
2184 while (item)
2185 {
2186 half_open_t *half_open = item->value;
2187
2188 if (chunk_equals(addr, half_open->other))
2189 {
2190 count = responder_only ? half_open->count_responder
2191 : half_open->count;
2192 break;
2193 }
2194 item = item->next;
2195 }
2196 lock->unlock(lock);
2197 }
2198 else
2199 {
2200 count = responder_only ? (u_int)ref_cur(&this->half_open_count_responder)
2201 : (u_int)ref_cur(&this->half_open_count);
2202 }
2203 return count;
2204 }
2205
2206 METHOD(ike_sa_manager_t, set_spi_cb, void,
2207 private_ike_sa_manager_t *this, spi_cb_t callback, void *data)
2208 {
2209 this->spi_lock->write_lock(this->spi_lock);
2210 this->spi_cb.cb = callback;
2211 this->spi_cb.data = data;
2212 this->spi_lock->unlock(this->spi_lock);
2213 }
2214
2215 /**
2216 * Destroy all entries
2217 */
2218 static void destroy_all_entries(private_ike_sa_manager_t *this)
2219 {
2220 enumerator_t *enumerator;
2221 entry_t *entry;
2222 u_int segment;
2223
2224 enumerator = create_table_enumerator(this);
2225 while (enumerator->enumerate(enumerator, &entry, &segment))
2226 {
2227 charon->bus->set_sa(charon->bus, entry->ike_sa);
2228 if (entry->half_open)
2229 {
2230 remove_half_open(this, entry);
2231 }
2232 if (entry->my_id && entry->other_id)
2233 {
2234 remove_connected_peers(this, entry);
2235 }
2236 if (entry->init_hash.ptr)
2237 {
2238 remove_init_hash(this, entry->init_hash);
2239 }
2240 remove_entry_at((private_enumerator_t*)enumerator);
2241 entry_destroy(entry);
2242 }
2243 enumerator->destroy(enumerator);
2244 charon->bus->set_sa(charon->bus, NULL);
2245 }
2246
2247 METHOD(ike_sa_manager_t, flush, void,
2248 private_ike_sa_manager_t *this)
2249 {
2250 enumerator_t *enumerator;
2251 entry_t *entry;
2252 u_int segment;
2253
2254 lock_all_segments(this);
2255 DBG2(DBG_MGR, "going to destroy IKE_SA manager and all managed IKE_SA's");
2256 /* Step 1: drive out all waiting threads */
2257 DBG2(DBG_MGR, "set driveout flags for all stored IKE_SA's");
2258 enumerator = create_table_enumerator(this);
2259 while (enumerator->enumerate(enumerator, &entry, &segment))
2260 {
2261 /* do not accept new threads, drive out waiting threads */
2262 entry->driveout_new_threads = TRUE;
2263 entry->driveout_waiting_threads = TRUE;
2264 }
2265 enumerator->destroy(enumerator);
2266 DBG2(DBG_MGR, "wait for all threads to leave IKE_SA's");
2267 /* Step 2: wait until all are gone */
2268 enumerator = create_table_enumerator(this);
2269 while (enumerator->enumerate(enumerator, &entry, &segment))
2270 {
2271 while (entry->waiting_threads || entry->checked_out)
2272 {
2273 /* wake up all */
2274 entry->condvar->broadcast(entry->condvar);
2275 /* go sleeping until they are gone */
2276 entry->condvar->wait(entry->condvar, this->segments[segment].mutex);
2277 }
2278 }
2279 enumerator->destroy(enumerator);
2280 DBG2(DBG_MGR, "delete all IKE_SA's");
2281 /* Step 3: initiate deletion of all IKE_SAs */
2282 enumerator = create_table_enumerator(this);
2283 while (enumerator->enumerate(enumerator, &entry, &segment))
2284 {
2285 charon->bus->set_sa(charon->bus, entry->ike_sa);
2286 entry->ike_sa->delete(entry->ike_sa, TRUE);
2287 }
2288 enumerator->destroy(enumerator);
2289
2290 DBG2(DBG_MGR, "destroy all entries");
2291 /* Step 4: destroy all entries */
2292 destroy_all_entries(this);
2293 unlock_all_segments(this);
2294
2295 this->spi_lock->write_lock(this->spi_lock);
2296 DESTROY_IF(this->rng);
2297 this->rng = NULL;
2298 this->spi_cb.cb = NULL;
2299 this->spi_cb.data = NULL;
2300 this->spi_lock->unlock(this->spi_lock);
2301 }
2302
2303 METHOD(ike_sa_manager_t, destroy, void,
2304 private_ike_sa_manager_t *this)
2305 {
2306 u_int i;
2307
2308 /* in case new SAs were checked in after flush() was called */
2309 lock_all_segments(this);
2310 destroy_all_entries(this);
2311 unlock_all_segments(this);
2312
2313 free(this->ike_sa_table);
2314 free(this->half_open_table);
2315 free(this->connected_peers_table);
2316 free(this->init_hashes_table);
2317 for (i = 0; i < this->segment_count; i++)
2318 {
2319 this->segments[i].mutex->destroy(this->segments[i].mutex);
2320 this->half_open_segments[i].lock->destroy(this->half_open_segments[i].lock);
2321 this->connected_peers_segments[i].lock->destroy(this->connected_peers_segments[i].lock);
2322 this->init_hashes_segments[i].mutex->destroy(this->init_hashes_segments[i].mutex);
2323 }
2324 free(this->segments);
2325 free(this->half_open_segments);
2326 free(this->connected_peers_segments);
2327 free(this->init_hashes_segments);
2328
2329 this->spi_lock->destroy(this->spi_lock);
2330 free(this);
2331 }
2332
2333 /**
2334 * This function returns the next-highest power of two for the given number.
2335 * The algorithm works by setting all bits on the right-hand side of the most
2336 * significant 1 to 1 and then increments the whole number so it rolls over
2337 * to the nearest power of two. Note: returns 0 for n == 0
2338 */
2339 static u_int get_nearest_powerof2(u_int n)
2340 {
2341 u_int i;
2342
2343 --n;
2344 for (i = 1; i < sizeof(u_int) * 8; i <<= 1)
2345 {
2346 n |= n >> i;
2347 }
2348 return ++n;
2349 }
2350
2351 /*
2352 * Described in header.
2353 */
2354 ike_sa_manager_t *ike_sa_manager_create()
2355 {
2356 private_ike_sa_manager_t *this;
2357 char *spi_val;
2358 u_int i;
2359
2360 INIT(this,
2361 .public = {
2362 .checkout = _checkout,
2363 .checkout_new = _checkout_new,
2364 .checkout_by_message = _checkout_by_message,
2365 .checkout_by_config = _checkout_by_config,
2366 .checkout_by_id = _checkout_by_id,
2367 .checkout_by_name = _checkout_by_name,
2368 .new_initiator_spi = _new_initiator_spi,
2369 .check_uniqueness = _check_uniqueness,
2370 .has_contact = _has_contact,
2371 .create_enumerator = _create_enumerator,
2372 .create_id_enumerator = _create_id_enumerator,
2373 .checkin = _checkin,
2374 .checkin_and_destroy = _checkin_and_destroy,
2375 .get_count = _get_count,
2376 .get_half_open_count = _get_half_open_count,
2377 .flush = _flush,
2378 .set_spi_cb = _set_spi_cb,
2379 .destroy = _destroy,
2380 },
2381 );
2382
2383 this->rng = lib->crypto->create_rng(lib->crypto, RNG_WEAK);
2384 if (this->rng == NULL)
2385 {
2386 DBG1(DBG_MGR, "manager initialization failed, no RNG supported");
2387 free(this);
2388 return NULL;
2389 }
2390 this->spi_lock = rwlock_create(RWLOCK_TYPE_DEFAULT);
2391 spi_val = lib->settings->get_str(lib->settings, "%s.spi_mask", NULL,
2392 lib->ns);
2393 this->spi_mask = settings_value_as_uint64(spi_val, 0);
2394 spi_val = lib->settings->get_str(lib->settings, "%s.spi_label", NULL,
2395 lib->ns);
2396 this->spi_label = settings_value_as_uint64(spi_val, 0);
2397 if (this->spi_mask || this->spi_label)
2398 {
2399 DBG1(DBG_IKE, "using SPI label 0x%.16"PRIx64" and mask 0x%.16"PRIx64,
2400 this->spi_label, this->spi_mask);
2401 /* the allocated SPI is assumed to be in network order */
2402 this->spi_mask = htobe64(this->spi_mask);
2403 this->spi_label = htobe64(this->spi_label);
2404 }
2405
2406 this->ikesa_limit = lib->settings->get_int(lib->settings,
2407 "%s.ikesa_limit", 0, lib->ns);
2408
2409 this->table_size = get_nearest_powerof2(lib->settings->get_int(
2410 lib->settings, "%s.ikesa_table_size",
2411 DEFAULT_HASHTABLE_SIZE, lib->ns));
2412 this->table_size = max(1, min(this->table_size, MAX_HASHTABLE_SIZE));
2413 this->table_mask = this->table_size - 1;
2414
2415 this->segment_count = get_nearest_powerof2(lib->settings->get_int(
2416 lib->settings, "%s.ikesa_table_segments",
2417 DEFAULT_SEGMENT_COUNT, lib->ns));
2418 this->segment_count = max(1, min(this->segment_count, this->table_size));
2419 this->segment_mask = this->segment_count - 1;
2420
2421 this->ike_sa_table = calloc(this->table_size, sizeof(table_item_t*));
2422 this->segments = (segment_t*)calloc(this->segment_count, sizeof(segment_t));
2423 for (i = 0; i < this->segment_count; i++)
2424 {
2425 this->segments[i].mutex = mutex_create(MUTEX_TYPE_RECURSIVE);
2426 }
2427
2428 /* we use the same table parameters for the table to track half-open SAs */
2429 this->half_open_table = calloc(this->table_size, sizeof(table_item_t*));
2430 this->half_open_segments = calloc(this->segment_count, sizeof(shareable_segment_t));
2431 for (i = 0; i < this->segment_count; i++)
2432 {
2433 this->half_open_segments[i].lock = rwlock_create(RWLOCK_TYPE_DEFAULT);
2434 }
2435
2436 /* also for the hash table used for duplicate tests */
2437 this->connected_peers_table = calloc(this->table_size, sizeof(table_item_t*));
2438 this->connected_peers_segments = calloc(this->segment_count, sizeof(shareable_segment_t));
2439 for (i = 0; i < this->segment_count; i++)
2440 {
2441 this->connected_peers_segments[i].lock = rwlock_create(RWLOCK_TYPE_DEFAULT);
2442 }
2443
2444 /* and again for the table of hashes of seen initial IKE messages */
2445 this->init_hashes_table = calloc(this->table_size, sizeof(table_item_t*));
2446 this->init_hashes_segments = calloc(this->segment_count, sizeof(segment_t));
2447 for (i = 0; i < this->segment_count; i++)
2448 {
2449 this->init_hashes_segments[i].mutex = mutex_create(MUTEX_TYPE_RECURSIVE);
2450 }
2451
2452 this->reuse_ikesa = lib->settings->get_bool(lib->settings,
2453 "%s.reuse_ikesa", TRUE, lib->ns);
2454 return &this->public;
2455 }