ike-sa-manager: Improve scalability of half-open IKE_SA checking
[strongswan.git] / src / libcharon / sa / ike_sa_manager.c
1 /*
2 * Copyright (C) 2005-2011 Martin Willi
3 * Copyright (C) 2011 revosec AG
4 * Copyright (C) 2008-2012 Tobias Brunner
5 * Copyright (C) 2005 Jan Hutter
6 * Hochschule fuer Technik Rapperswil
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version. See <http://www.fsf.org/copyleft/gpl.txt>.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 * for more details.
17 */
18
19 #include <string.h>
20
21 #include "ike_sa_manager.h"
22
23 #include <daemon.h>
24 #include <sa/ike_sa_id.h>
25 #include <bus/bus.h>
26 #include <threading/condvar.h>
27 #include <threading/mutex.h>
28 #include <threading/rwlock.h>
29 #include <collections/linked_list.h>
30 #include <crypto/hashers/hasher.h>
31 #include <processing/jobs/delete_ike_sa_job.h>
32
33 /* the default size of the hash table (MUST be a power of 2) */
34 #define DEFAULT_HASHTABLE_SIZE 1
35
36 /* the maximum size of the hash table (MUST be a power of 2) */
37 #define MAX_HASHTABLE_SIZE (1 << 30)
38
39 /* the default number of segments (MUST be a power of 2) */
40 #define DEFAULT_SEGMENT_COUNT 1
41
42 typedef struct entry_t entry_t;
43
44 /**
45 * An entry in the linked list, contains IKE_SA, locking and lookup data.
46 */
47 struct entry_t {
48
49 /**
50 * Number of threads waiting for this ike_sa_t object.
51 */
52 int waiting_threads;
53
54 /**
55 * Condvar where threads can wait until ike_sa_t object is free for use again.
56 */
57 condvar_t *condvar;
58
59 /**
60 * Is this ike_sa currently checked out?
61 */
62 bool checked_out;
63
64 /**
65 * Does this SA drives out new threads?
66 */
67 bool driveout_new_threads;
68
69 /**
70 * Does this SA drives out waiting threads?
71 */
72 bool driveout_waiting_threads;
73
74 /**
75 * Identification of an IKE_SA (SPIs).
76 */
77 ike_sa_id_t *ike_sa_id;
78
79 /**
80 * The contained ike_sa_t object.
81 */
82 ike_sa_t *ike_sa;
83
84 /**
85 * hash of the IKE_SA_INIT message, used to detect retransmissions
86 */
87 chunk_t init_hash;
88
89 /**
90 * remote host address, required for DoS detection and duplicate
91 * checking (host with same my_id and other_id is *not* considered
92 * a duplicate if the address family differs)
93 */
94 host_t *other;
95
96 /**
97 * As responder: Is this SA half-open?
98 */
99 bool half_open;
100
101 /**
102 * own identity, required for duplicate checking
103 */
104 identification_t *my_id;
105
106 /**
107 * remote identity, required for duplicate checking
108 */
109 identification_t *other_id;
110
111 /**
112 * message ID or hash of currently processing message, -1 if none
113 */
114 u_int32_t processing;
115 };
116
117 /**
118 * Implementation of entry_t.destroy.
119 */
120 static status_t entry_destroy(entry_t *this)
121 {
122 /* also destroy IKE SA */
123 this->ike_sa->destroy(this->ike_sa);
124 this->ike_sa_id->destroy(this->ike_sa_id);
125 chunk_free(&this->init_hash);
126 DESTROY_IF(this->other);
127 DESTROY_IF(this->my_id);
128 DESTROY_IF(this->other_id);
129 this->condvar->destroy(this->condvar);
130 free(this);
131 return SUCCESS;
132 }
133
134 /**
135 * Creates a new entry for the ike_sa_t list.
136 */
137 static entry_t *entry_create()
138 {
139 entry_t *this;
140
141 INIT(this,
142 .condvar = condvar_create(CONDVAR_TYPE_DEFAULT),
143 .processing = -1,
144 );
145
146 return this;
147 }
148
149 /**
150 * Function that matches entry_t objects by ike_sa_id_t.
151 */
152 static bool entry_match_by_id(entry_t *entry, ike_sa_id_t *id)
153 {
154 if (id->equals(id, entry->ike_sa_id))
155 {
156 return TRUE;
157 }
158 if ((id->get_responder_spi(id) == 0 ||
159 entry->ike_sa_id->get_responder_spi(entry->ike_sa_id) == 0) &&
160 id->get_initiator_spi(id) == entry->ike_sa_id->get_initiator_spi(entry->ike_sa_id))
161 {
162 /* this is TRUE for IKE_SAs that we initiated but have not yet received a response */
163 return TRUE;
164 }
165 return FALSE;
166 }
167
168 /**
169 * Function that matches entry_t objects by ike_sa_t pointers.
170 */
171 static bool entry_match_by_sa(entry_t *entry, ike_sa_t *ike_sa)
172 {
173 return entry->ike_sa == ike_sa;
174 }
175
176 /**
177 * Hash function for ike_sa_id_t objects.
178 */
179 static u_int ike_sa_id_hash(ike_sa_id_t *ike_sa_id)
180 {
181 /* IKEv2 does not mandate random SPIs (RFC 5996, 2.6), they just have to be
182 * locally unique, so we use our randomly allocated SPI whether we are
183 * initiator or responder to ensure a good distribution. The latter is not
184 * possible for IKEv1 as we don't know whether we are original initiator or
185 * not (based on the IKE header). But as RFC 2408, section 2.5.3 proposes
186 * SPIs (Cookies) to be allocated near random (we allocate them randomly
187 * anyway) it seems safe to always use the initiator SPI. */
188 if (ike_sa_id->get_ike_version(ike_sa_id) == IKEV1_MAJOR_VERSION ||
189 ike_sa_id->is_initiator(ike_sa_id))
190 {
191 return ike_sa_id->get_initiator_spi(ike_sa_id);
192 }
193 return ike_sa_id->get_responder_spi(ike_sa_id);
194 }
195
196 typedef struct half_open_t half_open_t;
197
198 /**
199 * Struct to manage half-open IKE_SAs per peer.
200 */
201 struct half_open_t {
202 /** chunk of remote host address */
203 chunk_t other;
204
205 /** the number of half-open IKE_SAs with that host */
206 u_int count;
207 };
208
209 /**
210 * Destroys a half_open_t object.
211 */
212 static void half_open_destroy(half_open_t *this)
213 {
214 chunk_free(&this->other);
215 free(this);
216 }
217
218 typedef struct connected_peers_t connected_peers_t;
219
220 struct connected_peers_t {
221 /** own identity */
222 identification_t *my_id;
223
224 /** remote identity */
225 identification_t *other_id;
226
227 /** ip address family of peer */
228 int family;
229
230 /** list of ike_sa_id_t objects of IKE_SAs between the two identities */
231 linked_list_t *sas;
232 };
233
234 static void connected_peers_destroy(connected_peers_t *this)
235 {
236 this->my_id->destroy(this->my_id);
237 this->other_id->destroy(this->other_id);
238 this->sas->destroy(this->sas);
239 free(this);
240 }
241
242 /**
243 * Function that matches connected_peers_t objects by the given ids.
244 */
245 static inline bool connected_peers_match(connected_peers_t *connected_peers,
246 identification_t *my_id, identification_t *other_id,
247 int family)
248 {
249 return my_id->equals(my_id, connected_peers->my_id) &&
250 other_id->equals(other_id, connected_peers->other_id) &&
251 (!family || family == connected_peers->family);
252 }
253
254 typedef struct init_hash_t init_hash_t;
255
256 struct init_hash_t {
257 /** hash of IKE_SA_INIT or initial phase1 message (data is not cloned) */
258 chunk_t hash;
259
260 /** our SPI allocated for the IKE_SA based on this message */
261 u_int64_t our_spi;
262 };
263
264 typedef struct segment_t segment_t;
265
266 /**
267 * Struct to manage segments of the hash table.
268 */
269 struct segment_t {
270 /** mutex to access a segment exclusively */
271 mutex_t *mutex;
272
273 /** the number of entries in this segment */
274 u_int count;
275 };
276
277 typedef struct shareable_segment_t shareable_segment_t;
278
279 /**
280 * Struct to manage segments of the "half-open" and "connected peers" hash tables.
281 */
282 struct shareable_segment_t {
283 /** rwlock to access a segment non-/exclusively */
284 rwlock_t *lock;
285
286 /** the number of entries in this segment - in case of the "half-open table"
287 * it's the sum of all half_open_t.count in a segment. */
288 u_int count;
289 };
290
291 typedef struct table_item_t table_item_t;
292
293 /**
294 * Instead of using linked_list_t for each bucket we store the data in our own
295 * list to save memory.
296 */
297 struct table_item_t {
298 /** data of this item */
299 void *value;
300
301 /** next item in the overflow list */
302 table_item_t *next;
303 };
304
305 typedef struct private_ike_sa_manager_t private_ike_sa_manager_t;
306
307 /**
308 * Additional private members of ike_sa_manager_t.
309 */
310 struct private_ike_sa_manager_t {
311 /**
312 * Public interface of ike_sa_manager_t.
313 */
314 ike_sa_manager_t public;
315
316 /**
317 * Hash table with entries for the ike_sa_t objects.
318 */
319 table_item_t **ike_sa_table;
320
321 /**
322 * The size of the hash table.
323 */
324 u_int table_size;
325
326 /**
327 * Mask to map the hashes to table rows.
328 */
329 u_int table_mask;
330
331 /**
332 * Segments of the hash table.
333 */
334 segment_t *segments;
335
336 /**
337 * The number of segments.
338 */
339 u_int segment_count;
340
341 /**
342 * Mask to map a table row to a segment.
343 */
344 u_int segment_mask;
345
346 /**
347 * Hash table with half_open_t objects.
348 */
349 table_item_t **half_open_table;
350
351 /**
352 * Segments of the "half-open" hash table.
353 */
354 shareable_segment_t *half_open_segments;
355
356 /**
357 * Total number of half-open IKE_SAs.
358 */
359 refcount_t half_open_count;
360
361 /**
362 * Hash table with connected_peers_t objects.
363 */
364 table_item_t **connected_peers_table;
365
366 /**
367 * Segments of the "connected peers" hash table.
368 */
369 shareable_segment_t *connected_peers_segments;
370
371 /**
372 * Hash table with init_hash_t objects.
373 */
374 table_item_t **init_hashes_table;
375
376 /**
377 * Segments of the "hashes" hash table.
378 */
379 segment_t *init_hashes_segments;
380
381 /**
382 * RNG to get random SPIs for our side
383 */
384 rng_t *rng;
385
386 /**
387 * SHA1 hasher for IKE_SA_INIT retransmit detection
388 */
389 hasher_t *hasher;
390
391 /**
392 * reuse existing IKE_SAs in checkout_by_config
393 */
394 bool reuse_ikesa;
395
396 /**
397 * Configured IKE_SA limit, if any
398 */
399 u_int ikesa_limit;
400 };
401
402 /**
403 * Acquire a lock to access the segment of the table row with the given index.
404 * It also works with the segment index directly.
405 */
406 static inline void lock_single_segment(private_ike_sa_manager_t *this,
407 u_int index)
408 {
409 mutex_t *lock = this->segments[index & this->segment_mask].mutex;
410 lock->lock(lock);
411 }
412
413 /**
414 * Release the lock required to access the segment of the table row with the given index.
415 * It also works with the segment index directly.
416 */
417 static inline void unlock_single_segment(private_ike_sa_manager_t *this,
418 u_int index)
419 {
420 mutex_t *lock = this->segments[index & this->segment_mask].mutex;
421 lock->unlock(lock);
422 }
423
424 /**
425 * Lock all segments
426 */
427 static void lock_all_segments(private_ike_sa_manager_t *this)
428 {
429 u_int i;
430
431 for (i = 0; i < this->segment_count; i++)
432 {
433 this->segments[i].mutex->lock(this->segments[i].mutex);
434 }
435 }
436
437 /**
438 * Unlock all segments
439 */
440 static void unlock_all_segments(private_ike_sa_manager_t *this)
441 {
442 u_int i;
443
444 for (i = 0; i < this->segment_count; i++)
445 {
446 this->segments[i].mutex->unlock(this->segments[i].mutex);
447 }
448 }
449
450 typedef struct private_enumerator_t private_enumerator_t;
451
452 /**
453 * hash table enumerator implementation
454 */
455 struct private_enumerator_t {
456
457 /**
458 * implements enumerator interface
459 */
460 enumerator_t enumerator;
461
462 /**
463 * associated ike_sa_manager_t
464 */
465 private_ike_sa_manager_t *manager;
466
467 /**
468 * current segment index
469 */
470 u_int segment;
471
472 /**
473 * currently enumerating entry
474 */
475 entry_t *entry;
476
477 /**
478 * current table row index
479 */
480 u_int row;
481
482 /**
483 * current table item
484 */
485 table_item_t *current;
486
487 /**
488 * previous table item
489 */
490 table_item_t *prev;
491 };
492
493 METHOD(enumerator_t, enumerate, bool,
494 private_enumerator_t *this, entry_t **entry, u_int *segment)
495 {
496 if (this->entry)
497 {
498 this->entry->condvar->signal(this->entry->condvar);
499 this->entry = NULL;
500 }
501 while (this->segment < this->manager->segment_count)
502 {
503 while (this->row < this->manager->table_size)
504 {
505 this->prev = this->current;
506 if (this->current)
507 {
508 this->current = this->current->next;
509 }
510 else
511 {
512 lock_single_segment(this->manager, this->segment);
513 this->current = this->manager->ike_sa_table[this->row];
514 }
515 if (this->current)
516 {
517 *entry = this->entry = this->current->value;
518 *segment = this->segment;
519 return TRUE;
520 }
521 unlock_single_segment(this->manager, this->segment);
522 this->row += this->manager->segment_count;
523 }
524 this->segment++;
525 this->row = this->segment;
526 }
527 return FALSE;
528 }
529
530 METHOD(enumerator_t, enumerator_destroy, void,
531 private_enumerator_t *this)
532 {
533 if (this->entry)
534 {
535 this->entry->condvar->signal(this->entry->condvar);
536 }
537 if (this->current)
538 {
539 unlock_single_segment(this->manager, this->segment);
540 }
541 free(this);
542 }
543
544 /**
545 * Creates an enumerator to enumerate the entries in the hash table.
546 */
547 static enumerator_t* create_table_enumerator(private_ike_sa_manager_t *this)
548 {
549 private_enumerator_t *enumerator;
550
551 INIT(enumerator,
552 .enumerator = {
553 .enumerate = (void*)_enumerate,
554 .destroy = _enumerator_destroy,
555 },
556 .manager = this,
557 );
558 return &enumerator->enumerator;
559 }
560
561 /**
562 * Put an entry into the hash table.
563 * Note: The caller has to unlock the returned segment.
564 */
565 static u_int put_entry(private_ike_sa_manager_t *this, entry_t *entry)
566 {
567 table_item_t *current, *item;
568 u_int row, segment;
569
570 INIT(item,
571 .value = entry,
572 );
573
574 row = ike_sa_id_hash(entry->ike_sa_id) & this->table_mask;
575 segment = row & this->segment_mask;
576
577 lock_single_segment(this, segment);
578 current = this->ike_sa_table[row];
579 if (current)
580 { /* insert at the front of current bucket */
581 item->next = current;
582 }
583 this->ike_sa_table[row] = item;
584 this->segments[segment].count++;
585 return segment;
586 }
587
588 /**
589 * Remove an entry from the hash table.
590 * Note: The caller MUST have a lock on the segment of this entry.
591 */
592 static void remove_entry(private_ike_sa_manager_t *this, entry_t *entry)
593 {
594 table_item_t *item, *prev = NULL;
595 u_int row, segment;
596
597 row = ike_sa_id_hash(entry->ike_sa_id) & this->table_mask;
598 segment = row & this->segment_mask;
599 item = this->ike_sa_table[row];
600 while (item)
601 {
602 if (item->value == entry)
603 {
604 if (prev)
605 {
606 prev->next = item->next;
607 }
608 else
609 {
610 this->ike_sa_table[row] = item->next;
611 }
612 this->segments[segment].count--;
613 free(item);
614 break;
615 }
616 prev = item;
617 item = item->next;
618 }
619 }
620
621 /**
622 * Remove the entry at the current enumerator position.
623 */
624 static void remove_entry_at(private_enumerator_t *this)
625 {
626 this->entry = NULL;
627 if (this->current)
628 {
629 table_item_t *current = this->current;
630
631 this->manager->segments[this->segment].count--;
632 this->current = this->prev;
633
634 if (this->prev)
635 {
636 this->prev->next = current->next;
637 }
638 else
639 {
640 this->manager->ike_sa_table[this->row] = current->next;
641 unlock_single_segment(this->manager, this->segment);
642 }
643 free(current);
644 }
645 }
646
647 /**
648 * Find an entry using the provided match function to compare the entries for
649 * equality.
650 */
651 static status_t get_entry_by_match_function(private_ike_sa_manager_t *this,
652 ike_sa_id_t *ike_sa_id, entry_t **entry, u_int *segment,
653 linked_list_match_t match, void *param)
654 {
655 table_item_t *item;
656 u_int row, seg;
657
658 row = ike_sa_id_hash(ike_sa_id) & this->table_mask;
659 seg = row & this->segment_mask;
660
661 lock_single_segment(this, seg);
662 item = this->ike_sa_table[row];
663 while (item)
664 {
665 if (match(item->value, param))
666 {
667 *entry = item->value;
668 *segment = seg;
669 /* the locked segment has to be unlocked by the caller */
670 return SUCCESS;
671 }
672 item = item->next;
673 }
674 unlock_single_segment(this, seg);
675 return NOT_FOUND;
676 }
677
678 /**
679 * Find an entry by ike_sa_id_t.
680 * Note: On SUCCESS, the caller has to unlock the segment.
681 */
682 static status_t get_entry_by_id(private_ike_sa_manager_t *this,
683 ike_sa_id_t *ike_sa_id, entry_t **entry, u_int *segment)
684 {
685 return get_entry_by_match_function(this, ike_sa_id, entry, segment,
686 (linked_list_match_t)entry_match_by_id, ike_sa_id);
687 }
688
689 /**
690 * Find an entry by IKE_SA pointer.
691 * Note: On SUCCESS, the caller has to unlock the segment.
692 */
693 static status_t get_entry_by_sa(private_ike_sa_manager_t *this,
694 ike_sa_id_t *ike_sa_id, ike_sa_t *ike_sa, entry_t **entry, u_int *segment)
695 {
696 return get_entry_by_match_function(this, ike_sa_id, entry, segment,
697 (linked_list_match_t)entry_match_by_sa, ike_sa);
698 }
699
700 /**
701 * Wait until no other thread is using an IKE_SA, return FALSE if entry not
702 * acquirable.
703 */
704 static bool wait_for_entry(private_ike_sa_manager_t *this, entry_t *entry,
705 u_int segment)
706 {
707 if (entry->driveout_new_threads)
708 {
709 /* we are not allowed to get this */
710 return FALSE;
711 }
712 while (entry->checked_out && !entry->driveout_waiting_threads)
713 {
714 /* so wait until we can get it for us.
715 * we register us as waiting. */
716 entry->waiting_threads++;
717 entry->condvar->wait(entry->condvar, this->segments[segment].mutex);
718 entry->waiting_threads--;
719 }
720 /* hm, a deletion request forbids us to get this SA, get next one */
721 if (entry->driveout_waiting_threads)
722 {
723 /* we must signal here, others may be waiting on it, too */
724 entry->condvar->signal(entry->condvar);
725 return FALSE;
726 }
727 return TRUE;
728 }
729
730 /**
731 * Put a half-open SA into the hash table.
732 */
733 static void put_half_open(private_ike_sa_manager_t *this, entry_t *entry)
734 {
735 table_item_t *item;
736 u_int row, segment;
737 rwlock_t *lock;
738 half_open_t *half_open;
739 chunk_t addr;
740
741 addr = entry->other->get_address(entry->other);
742 row = chunk_hash(addr) & this->table_mask;
743 segment = row & this->segment_mask;
744 lock = this->half_open_segments[segment].lock;
745 lock->write_lock(lock);
746 item = this->half_open_table[row];
747 while (item)
748 {
749 half_open = item->value;
750
751 if (chunk_equals(addr, half_open->other))
752 {
753 half_open->count++;
754 break;
755 }
756 item = item->next;
757 }
758
759 if (!item)
760 {
761 INIT(half_open,
762 .other = chunk_clone(addr),
763 .count = 1,
764 );
765 INIT(item,
766 .value = half_open,
767 .next = this->half_open_table[row],
768 );
769 this->half_open_table[row] = item;
770 }
771 this->half_open_segments[segment].count++;
772 ref_get(&this->half_open_count);
773 lock->unlock(lock);
774 }
775
776 /**
777 * Remove a half-open SA from the hash table.
778 */
779 static void remove_half_open(private_ike_sa_manager_t *this, entry_t *entry)
780 {
781 table_item_t *item, *prev = NULL;
782 u_int row, segment;
783 rwlock_t *lock;
784 chunk_t addr;
785
786 addr = entry->other->get_address(entry->other);
787 row = chunk_hash(addr) & this->table_mask;
788 segment = row & this->segment_mask;
789 lock = this->half_open_segments[segment].lock;
790 lock->write_lock(lock);
791 item = this->half_open_table[row];
792 while (item)
793 {
794 half_open_t *half_open = item->value;
795
796 if (chunk_equals(addr, half_open->other))
797 {
798 if (--half_open->count == 0)
799 {
800 if (prev)
801 {
802 prev->next = item->next;
803 }
804 else
805 {
806 this->half_open_table[row] = item->next;
807 }
808 half_open_destroy(half_open);
809 free(item);
810 }
811 this->half_open_segments[segment].count--;
812 ignore_result(ref_put(&this->half_open_count));
813 break;
814 }
815 prev = item;
816 item = item->next;
817 }
818 lock->unlock(lock);
819 }
820
821 /**
822 * Put an SA between two peers into the hash table.
823 */
824 static void put_connected_peers(private_ike_sa_manager_t *this, entry_t *entry)
825 {
826 table_item_t *item;
827 u_int row, segment;
828 rwlock_t *lock;
829 connected_peers_t *connected_peers;
830 chunk_t my_id, other_id;
831 int family;
832
833 my_id = entry->my_id->get_encoding(entry->my_id);
834 other_id = entry->other_id->get_encoding(entry->other_id);
835 family = entry->other->get_family(entry->other);
836 row = chunk_hash_inc(other_id, chunk_hash(my_id)) & this->table_mask;
837 segment = row & this->segment_mask;
838 lock = this->connected_peers_segments[segment].lock;
839 lock->write_lock(lock);
840 item = this->connected_peers_table[row];
841 while (item)
842 {
843 connected_peers = item->value;
844
845 if (connected_peers_match(connected_peers, entry->my_id,
846 entry->other_id, family))
847 {
848 if (connected_peers->sas->find_first(connected_peers->sas,
849 (linked_list_match_t)entry->ike_sa_id->equals,
850 NULL, entry->ike_sa_id) == SUCCESS)
851 {
852 lock->unlock(lock);
853 return;
854 }
855 break;
856 }
857 item = item->next;
858 }
859
860 if (!item)
861 {
862 INIT(connected_peers,
863 .my_id = entry->my_id->clone(entry->my_id),
864 .other_id = entry->other_id->clone(entry->other_id),
865 .family = family,
866 .sas = linked_list_create(),
867 );
868 INIT(item,
869 .value = connected_peers,
870 .next = this->connected_peers_table[row],
871 );
872 this->connected_peers_table[row] = item;
873 }
874 connected_peers->sas->insert_last(connected_peers->sas,
875 entry->ike_sa_id->clone(entry->ike_sa_id));
876 this->connected_peers_segments[segment].count++;
877 lock->unlock(lock);
878 }
879
880 /**
881 * Remove an SA between two peers from the hash table.
882 */
883 static void remove_connected_peers(private_ike_sa_manager_t *this, entry_t *entry)
884 {
885 table_item_t *item, *prev = NULL;
886 u_int row, segment;
887 rwlock_t *lock;
888 chunk_t my_id, other_id;
889 int family;
890
891 my_id = entry->my_id->get_encoding(entry->my_id);
892 other_id = entry->other_id->get_encoding(entry->other_id);
893 family = entry->other->get_family(entry->other);
894
895 row = chunk_hash_inc(other_id, chunk_hash(my_id)) & this->table_mask;
896 segment = row & this->segment_mask;
897
898 lock = this->connected_peers_segments[segment].lock;
899 lock->write_lock(lock);
900 item = this->connected_peers_table[row];
901 while (item)
902 {
903 connected_peers_t *current = item->value;
904
905 if (connected_peers_match(current, entry->my_id, entry->other_id,
906 family))
907 {
908 enumerator_t *enumerator;
909 ike_sa_id_t *ike_sa_id;
910
911 enumerator = current->sas->create_enumerator(current->sas);
912 while (enumerator->enumerate(enumerator, &ike_sa_id))
913 {
914 if (ike_sa_id->equals(ike_sa_id, entry->ike_sa_id))
915 {
916 current->sas->remove_at(current->sas, enumerator);
917 ike_sa_id->destroy(ike_sa_id);
918 this->connected_peers_segments[segment].count--;
919 break;
920 }
921 }
922 enumerator->destroy(enumerator);
923 if (current->sas->get_count(current->sas) == 0)
924 {
925 if (prev)
926 {
927 prev->next = item->next;
928 }
929 else
930 {
931 this->connected_peers_table[row] = item->next;
932 }
933 connected_peers_destroy(current);
934 free(item);
935 }
936 break;
937 }
938 prev = item;
939 item = item->next;
940 }
941 lock->unlock(lock);
942 }
943
944 /**
945 * Get a random SPI for new IKE_SAs
946 */
947 static u_int64_t get_spi(private_ike_sa_manager_t *this)
948 {
949 u_int64_t spi;
950
951 if (this->rng &&
952 this->rng->get_bytes(this->rng, sizeof(spi), (u_int8_t*)&spi))
953 {
954 return spi;
955 }
956 return 0;
957 }
958
959 /**
960 * Calculate the hash of the initial IKE message. Memory for the hash is
961 * allocated on success.
962 *
963 * @returns TRUE on success
964 */
965 static bool get_init_hash(private_ike_sa_manager_t *this, message_t *message,
966 chunk_t *hash)
967 {
968 host_t *src;
969
970 if (!this->hasher)
971 { /* this might be the case when flush() has been called */
972 return FALSE;
973 }
974 if (message->get_first_payload_type(message) == FRAGMENT_V1)
975 { /* only hash the source IP, port and SPI for fragmented init messages */
976 u_int16_t port;
977 u_int64_t spi;
978
979 src = message->get_source(message);
980 if (!this->hasher->allocate_hash(this->hasher,
981 src->get_address(src), NULL))
982 {
983 return FALSE;
984 }
985 port = src->get_port(src);
986 if (!this->hasher->allocate_hash(this->hasher,
987 chunk_from_thing(port), NULL))
988 {
989 return FALSE;
990 }
991 spi = message->get_initiator_spi(message);
992 return this->hasher->allocate_hash(this->hasher,
993 chunk_from_thing(spi), hash);
994 }
995 if (message->get_exchange_type(message) == ID_PROT)
996 { /* include the source for Main Mode as the hash will be the same if
997 * SPIs are reused by two initiators that use the same proposal */
998 src = message->get_source(message);
999
1000 if (!this->hasher->allocate_hash(this->hasher,
1001 src->get_address(src), NULL))
1002 {
1003 return FALSE;
1004 }
1005 }
1006 return this->hasher->allocate_hash(this->hasher,
1007 message->get_packet_data(message), hash);
1008 }
1009
1010 /**
1011 * Check if we already have created an IKE_SA based on the initial IKE message
1012 * with the given hash.
1013 * If not the hash is stored, the hash data is not(!) cloned.
1014 *
1015 * Also, the local SPI is returned. In case of a retransmit this is already
1016 * stored together with the hash, otherwise it is newly allocated and should
1017 * be used to create the IKE_SA.
1018 *
1019 * @returns ALREADY_DONE if the message with the given hash has been seen before
1020 * NOT_FOUND if the message hash was not found
1021 * FAILED if the SPI allocation failed
1022 */
1023 static status_t check_and_put_init_hash(private_ike_sa_manager_t *this,
1024 chunk_t init_hash, u_int64_t *our_spi)
1025 {
1026 table_item_t *item;
1027 u_int row, segment;
1028 mutex_t *mutex;
1029 init_hash_t *init;
1030 u_int64_t spi;
1031
1032 row = chunk_hash(init_hash) & this->table_mask;
1033 segment = row & this->segment_mask;
1034 mutex = this->init_hashes_segments[segment].mutex;
1035 mutex->lock(mutex);
1036 item = this->init_hashes_table[row];
1037 while (item)
1038 {
1039 init_hash_t *current = item->value;
1040
1041 if (chunk_equals(init_hash, current->hash))
1042 {
1043 *our_spi = current->our_spi;
1044 mutex->unlock(mutex);
1045 return ALREADY_DONE;
1046 }
1047 item = item->next;
1048 }
1049
1050 spi = get_spi(this);
1051 if (!spi)
1052 {
1053 return FAILED;
1054 }
1055
1056 INIT(init,
1057 .hash = {
1058 .len = init_hash.len,
1059 .ptr = init_hash.ptr,
1060 },
1061 .our_spi = spi,
1062 );
1063 INIT(item,
1064 .value = init,
1065 .next = this->init_hashes_table[row],
1066 );
1067 this->init_hashes_table[row] = item;
1068 *our_spi = init->our_spi;
1069 mutex->unlock(mutex);
1070 return NOT_FOUND;
1071 }
1072
1073 /**
1074 * Remove the hash of an initial IKE message from the cache.
1075 */
1076 static void remove_init_hash(private_ike_sa_manager_t *this, chunk_t init_hash)
1077 {
1078 table_item_t *item, *prev = NULL;
1079 u_int row, segment;
1080 mutex_t *mutex;
1081
1082 row = chunk_hash(init_hash) & this->table_mask;
1083 segment = row & this->segment_mask;
1084 mutex = this->init_hashes_segments[segment].mutex;
1085 mutex->lock(mutex);
1086 item = this->init_hashes_table[row];
1087 while (item)
1088 {
1089 init_hash_t *current = item->value;
1090
1091 if (chunk_equals(init_hash, current->hash))
1092 {
1093 if (prev)
1094 {
1095 prev->next = item->next;
1096 }
1097 else
1098 {
1099 this->init_hashes_table[row] = item->next;
1100 }
1101 free(current);
1102 free(item);
1103 break;
1104 }
1105 prev = item;
1106 item = item->next;
1107 }
1108 mutex->unlock(mutex);
1109 }
1110
1111 METHOD(ike_sa_manager_t, checkout, ike_sa_t*,
1112 private_ike_sa_manager_t *this, ike_sa_id_t *ike_sa_id)
1113 {
1114 ike_sa_t *ike_sa = NULL;
1115 entry_t *entry;
1116 u_int segment;
1117
1118 DBG2(DBG_MGR, "checkout IKE_SA");
1119
1120 if (get_entry_by_id(this, ike_sa_id, &entry, &segment) == SUCCESS)
1121 {
1122 if (wait_for_entry(this, entry, segment))
1123 {
1124 entry->checked_out = TRUE;
1125 ike_sa = entry->ike_sa;
1126 DBG2(DBG_MGR, "IKE_SA %s[%u] successfully checked out",
1127 ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1128 }
1129 unlock_single_segment(this, segment);
1130 }
1131 charon->bus->set_sa(charon->bus, ike_sa);
1132 return ike_sa;
1133 }
1134
1135 METHOD(ike_sa_manager_t, checkout_new, ike_sa_t*,
1136 private_ike_sa_manager_t* this, ike_version_t version, bool initiator)
1137 {
1138 ike_sa_id_t *ike_sa_id;
1139 ike_sa_t *ike_sa;
1140 u_int8_t ike_version;
1141 u_int64_t spi;
1142
1143 ike_version = version == IKEV1 ? IKEV1_MAJOR_VERSION : IKEV2_MAJOR_VERSION;
1144
1145 spi = get_spi(this);
1146 if (!spi)
1147 {
1148 DBG1(DBG_MGR, "failed to allocate SPI for new IKE_SA");
1149 return NULL;
1150 }
1151
1152 if (initiator)
1153 {
1154 ike_sa_id = ike_sa_id_create(ike_version, spi, 0, TRUE);
1155 }
1156 else
1157 {
1158 ike_sa_id = ike_sa_id_create(ike_version, 0, spi, FALSE);
1159 }
1160 ike_sa = ike_sa_create(ike_sa_id, initiator, version);
1161 ike_sa_id->destroy(ike_sa_id);
1162
1163 if (ike_sa)
1164 {
1165 DBG2(DBG_MGR, "created IKE_SA %s[%u]", ike_sa->get_name(ike_sa),
1166 ike_sa->get_unique_id(ike_sa));
1167 }
1168 return ike_sa;
1169 }
1170
1171 /**
1172 * Get the message ID or message hash to detect early retransmissions
1173 */
1174 static u_int32_t get_message_id_or_hash(message_t *message)
1175 {
1176 /* Use the message ID, or the message hash in IKEv1 Main/Aggressive mode */
1177 if (message->get_major_version(message) == IKEV1_MAJOR_VERSION &&
1178 message->get_message_id(message) == 0)
1179 {
1180 return chunk_hash(message->get_packet_data(message));
1181 }
1182 return message->get_message_id(message);
1183 }
1184
1185 METHOD(ike_sa_manager_t, checkout_by_message, ike_sa_t*,
1186 private_ike_sa_manager_t* this, message_t *message)
1187 {
1188 u_int segment;
1189 entry_t *entry;
1190 ike_sa_t *ike_sa = NULL;
1191 ike_sa_id_t *id;
1192 ike_version_t ike_version;
1193 bool is_init = FALSE;
1194
1195 id = message->get_ike_sa_id(message);
1196 /* clone the IKE_SA ID so we can modify the initiator flag */
1197 id = id->clone(id);
1198 id->switch_initiator(id);
1199
1200 DBG2(DBG_MGR, "checkout IKE_SA by message");
1201
1202 if (id->get_responder_spi(id) == 0)
1203 {
1204 if (message->get_major_version(message) == IKEV2_MAJOR_VERSION)
1205 {
1206 if (message->get_exchange_type(message) == IKE_SA_INIT &&
1207 message->get_request(message))
1208 {
1209 ike_version = IKEV2;
1210 is_init = TRUE;
1211 }
1212 }
1213 else
1214 {
1215 if (message->get_exchange_type(message) == ID_PROT ||
1216 message->get_exchange_type(message) == AGGRESSIVE)
1217 {
1218 ike_version = IKEV1;
1219 is_init = TRUE;
1220 if (id->is_initiator(id))
1221 { /* not set in IKEv1, switch back before applying to new SA */
1222 id->switch_initiator(id);
1223 }
1224 }
1225 }
1226 }
1227
1228 if (is_init)
1229 {
1230 u_int64_t our_spi;
1231 chunk_t hash;
1232
1233 if (!get_init_hash(this, message, &hash))
1234 {
1235 DBG1(DBG_MGR, "ignoring message, failed to hash message");
1236 id->destroy(id);
1237 return NULL;
1238 }
1239
1240 /* ensure this is not a retransmit of an already handled init message */
1241 switch (check_and_put_init_hash(this, hash, &our_spi))
1242 {
1243 case NOT_FOUND:
1244 { /* we've not seen this packet yet, create a new IKE_SA */
1245 if (!this->ikesa_limit ||
1246 this->public.get_count(&this->public) < this->ikesa_limit)
1247 {
1248 id->set_responder_spi(id, our_spi);
1249 ike_sa = ike_sa_create(id, FALSE, ike_version);
1250 if (ike_sa)
1251 {
1252 entry = entry_create();
1253 entry->ike_sa = ike_sa;
1254 entry->ike_sa_id = id;
1255
1256 segment = put_entry(this, entry);
1257 entry->checked_out = TRUE;
1258 unlock_single_segment(this, segment);
1259
1260 entry->processing = get_message_id_or_hash(message);
1261 entry->init_hash = hash;
1262
1263 DBG2(DBG_MGR, "created IKE_SA %s[%u]",
1264 ike_sa->get_name(ike_sa),
1265 ike_sa->get_unique_id(ike_sa));
1266
1267 charon->bus->set_sa(charon->bus, ike_sa);
1268 return ike_sa;
1269 }
1270 else
1271 {
1272 DBG1(DBG_MGR, "creating IKE_SA failed, ignoring message");
1273 }
1274 }
1275 else
1276 {
1277 DBG1(DBG_MGR, "ignoring %N, hitting IKE_SA limit (%u)",
1278 exchange_type_names, message->get_exchange_type(message),
1279 this->ikesa_limit);
1280 }
1281 remove_init_hash(this, hash);
1282 chunk_free(&hash);
1283 id->destroy(id);
1284 return NULL;
1285 }
1286 case FAILED:
1287 { /* we failed to allocate an SPI */
1288 chunk_free(&hash);
1289 id->destroy(id);
1290 DBG1(DBG_MGR, "ignoring message, failed to allocate SPI");
1291 return NULL;
1292 }
1293 case ALREADY_DONE:
1294 default:
1295 break;
1296 }
1297 /* it looks like we already handled this init message to some degree */
1298 id->set_responder_spi(id, our_spi);
1299 chunk_free(&hash);
1300 }
1301
1302 if (get_entry_by_id(this, id, &entry, &segment) == SUCCESS)
1303 {
1304 /* only check out if we are not already processing it. */
1305 if (entry->processing == get_message_id_or_hash(message))
1306 {
1307 DBG1(DBG_MGR, "ignoring request with ID %u, already processing",
1308 entry->processing);
1309 }
1310 else if (wait_for_entry(this, entry, segment))
1311 {
1312 ike_sa_id_t *ike_id;
1313
1314 ike_id = entry->ike_sa->get_id(entry->ike_sa);
1315 entry->checked_out = TRUE;
1316 if (message->get_first_payload_type(message) != FRAGMENT_V1)
1317 {
1318 entry->processing = get_message_id_or_hash(message);
1319 }
1320 if (ike_id->get_responder_spi(ike_id) == 0)
1321 {
1322 ike_id->set_responder_spi(ike_id, id->get_responder_spi(id));
1323 }
1324 ike_sa = entry->ike_sa;
1325 DBG2(DBG_MGR, "IKE_SA %s[%u] successfully checked out",
1326 ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1327 }
1328 unlock_single_segment(this, segment);
1329 }
1330 else
1331 {
1332 charon->bus->alert(charon->bus, ALERT_INVALID_IKE_SPI, message);
1333 }
1334 id->destroy(id);
1335 charon->bus->set_sa(charon->bus, ike_sa);
1336 return ike_sa;
1337 }
1338
1339 METHOD(ike_sa_manager_t, checkout_by_config, ike_sa_t*,
1340 private_ike_sa_manager_t *this, peer_cfg_t *peer_cfg)
1341 {
1342 enumerator_t *enumerator;
1343 entry_t *entry;
1344 ike_sa_t *ike_sa = NULL;
1345 peer_cfg_t *current_peer;
1346 ike_cfg_t *current_ike;
1347 u_int segment;
1348
1349 DBG2(DBG_MGR, "checkout IKE_SA by config");
1350
1351 if (!this->reuse_ikesa)
1352 { /* IKE_SA reuse disable by config */
1353 ike_sa = checkout_new(this, peer_cfg->get_ike_version(peer_cfg), TRUE);
1354 charon->bus->set_sa(charon->bus, ike_sa);
1355 return ike_sa;
1356 }
1357
1358 enumerator = create_table_enumerator(this);
1359 while (enumerator->enumerate(enumerator, &entry, &segment))
1360 {
1361 if (!wait_for_entry(this, entry, segment))
1362 {
1363 continue;
1364 }
1365 if (entry->ike_sa->get_state(entry->ike_sa) == IKE_DELETING)
1366 { /* skip IKE_SAs which are not usable */
1367 continue;
1368 }
1369
1370 current_peer = entry->ike_sa->get_peer_cfg(entry->ike_sa);
1371 if (current_peer && current_peer->equals(current_peer, peer_cfg))
1372 {
1373 current_ike = current_peer->get_ike_cfg(current_peer);
1374 if (current_ike->equals(current_ike, peer_cfg->get_ike_cfg(peer_cfg)))
1375 {
1376 entry->checked_out = TRUE;
1377 ike_sa = entry->ike_sa;
1378 DBG2(DBG_MGR, "found existing IKE_SA %u with a '%s' config",
1379 ike_sa->get_unique_id(ike_sa),
1380 current_peer->get_name(current_peer));
1381 break;
1382 }
1383 }
1384 }
1385 enumerator->destroy(enumerator);
1386
1387 if (!ike_sa)
1388 { /* no IKE_SA using such a config, hand out a new */
1389 ike_sa = checkout_new(this, peer_cfg->get_ike_version(peer_cfg), TRUE);
1390 }
1391 charon->bus->set_sa(charon->bus, ike_sa);
1392 return ike_sa;
1393 }
1394
1395 METHOD(ike_sa_manager_t, checkout_by_id, ike_sa_t*,
1396 private_ike_sa_manager_t *this, u_int32_t id, bool child)
1397 {
1398 enumerator_t *enumerator, *children;
1399 entry_t *entry;
1400 ike_sa_t *ike_sa = NULL;
1401 child_sa_t *child_sa;
1402 u_int segment;
1403
1404 DBG2(DBG_MGR, "checkout IKE_SA by ID");
1405
1406 enumerator = create_table_enumerator(this);
1407 while (enumerator->enumerate(enumerator, &entry, &segment))
1408 {
1409 if (wait_for_entry(this, entry, segment))
1410 {
1411 /* look for a child with such a reqid ... */
1412 if (child)
1413 {
1414 children = entry->ike_sa->create_child_sa_enumerator(entry->ike_sa);
1415 while (children->enumerate(children, (void**)&child_sa))
1416 {
1417 if (child_sa->get_reqid(child_sa) == id)
1418 {
1419 ike_sa = entry->ike_sa;
1420 break;
1421 }
1422 }
1423 children->destroy(children);
1424 }
1425 else /* ... or for a IKE_SA with such a unique id */
1426 {
1427 if (entry->ike_sa->get_unique_id(entry->ike_sa) == id)
1428 {
1429 ike_sa = entry->ike_sa;
1430 }
1431 }
1432 /* got one, return */
1433 if (ike_sa)
1434 {
1435 entry->checked_out = TRUE;
1436 DBG2(DBG_MGR, "IKE_SA %s[%u] successfully checked out",
1437 ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1438 break;
1439 }
1440 }
1441 }
1442 enumerator->destroy(enumerator);
1443
1444 charon->bus->set_sa(charon->bus, ike_sa);
1445 return ike_sa;
1446 }
1447
1448 METHOD(ike_sa_manager_t, checkout_by_name, ike_sa_t*,
1449 private_ike_sa_manager_t *this, char *name, bool child)
1450 {
1451 enumerator_t *enumerator, *children;
1452 entry_t *entry;
1453 ike_sa_t *ike_sa = NULL;
1454 child_sa_t *child_sa;
1455 u_int segment;
1456
1457 enumerator = create_table_enumerator(this);
1458 while (enumerator->enumerate(enumerator, &entry, &segment))
1459 {
1460 if (wait_for_entry(this, entry, segment))
1461 {
1462 /* look for a child with such a policy name ... */
1463 if (child)
1464 {
1465 children = entry->ike_sa->create_child_sa_enumerator(entry->ike_sa);
1466 while (children->enumerate(children, (void**)&child_sa))
1467 {
1468 if (streq(child_sa->get_name(child_sa), name))
1469 {
1470 ike_sa = entry->ike_sa;
1471 break;
1472 }
1473 }
1474 children->destroy(children);
1475 }
1476 else /* ... or for a IKE_SA with such a connection name */
1477 {
1478 if (streq(entry->ike_sa->get_name(entry->ike_sa), name))
1479 {
1480 ike_sa = entry->ike_sa;
1481 }
1482 }
1483 /* got one, return */
1484 if (ike_sa)
1485 {
1486 entry->checked_out = TRUE;
1487 DBG2(DBG_MGR, "IKE_SA %s[%u] successfully checked out",
1488 ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1489 break;
1490 }
1491 }
1492 }
1493 enumerator->destroy(enumerator);
1494
1495 charon->bus->set_sa(charon->bus, ike_sa);
1496 return ike_sa;
1497 }
1498
1499 /**
1500 * enumerator filter function, waiting variant
1501 */
1502 static bool enumerator_filter_wait(private_ike_sa_manager_t *this,
1503 entry_t **in, ike_sa_t **out, u_int *segment)
1504 {
1505 if (wait_for_entry(this, *in, *segment))
1506 {
1507 *out = (*in)->ike_sa;
1508 charon->bus->set_sa(charon->bus, *out);
1509 return TRUE;
1510 }
1511 return FALSE;
1512 }
1513
1514 /**
1515 * enumerator filter function, skipping variant
1516 */
1517 static bool enumerator_filter_skip(private_ike_sa_manager_t *this,
1518 entry_t **in, ike_sa_t **out, u_int *segment)
1519 {
1520 if (!(*in)->driveout_new_threads &&
1521 !(*in)->driveout_waiting_threads &&
1522 !(*in)->checked_out)
1523 {
1524 *out = (*in)->ike_sa;
1525 charon->bus->set_sa(charon->bus, *out);
1526 return TRUE;
1527 }
1528 return FALSE;
1529 }
1530
1531 /**
1532 * Reset threads SA after enumeration
1533 */
1534 static void reset_sa(void *data)
1535 {
1536 charon->bus->set_sa(charon->bus, NULL);
1537 }
1538
1539 METHOD(ike_sa_manager_t, create_enumerator, enumerator_t*,
1540 private_ike_sa_manager_t* this, bool wait)
1541 {
1542 return enumerator_create_filter(create_table_enumerator(this),
1543 wait ? (void*)enumerator_filter_wait : (void*)enumerator_filter_skip,
1544 this, reset_sa);
1545 }
1546
1547 METHOD(ike_sa_manager_t, checkin, void,
1548 private_ike_sa_manager_t *this, ike_sa_t *ike_sa)
1549 {
1550 /* to check the SA back in, we look for the pointer of the ike_sa
1551 * in all entries.
1552 * The lookup is done by initiator SPI, so even if the SPI has changed (e.g.
1553 * on reception of a IKE_SA_INIT response) the lookup will work but
1554 * updating of the SPI MAY be necessary...
1555 */
1556 entry_t *entry;
1557 ike_sa_id_t *ike_sa_id;
1558 host_t *other;
1559 identification_t *my_id, *other_id;
1560 u_int segment;
1561
1562 ike_sa_id = ike_sa->get_id(ike_sa);
1563 my_id = ike_sa->get_my_id(ike_sa);
1564 other_id = ike_sa->get_other_eap_id(ike_sa);
1565 other = ike_sa->get_other_host(ike_sa);
1566
1567 DBG2(DBG_MGR, "checkin IKE_SA %s[%u]", ike_sa->get_name(ike_sa),
1568 ike_sa->get_unique_id(ike_sa));
1569
1570 /* look for the entry */
1571 if (get_entry_by_sa(this, ike_sa_id, ike_sa, &entry, &segment) == SUCCESS)
1572 {
1573 /* ike_sa_id must be updated */
1574 entry->ike_sa_id->replace_values(entry->ike_sa_id, ike_sa->get_id(ike_sa));
1575 /* signal waiting threads */
1576 entry->checked_out = FALSE;
1577 entry->processing = -1;
1578 /* check if this SA is half-open */
1579 if (entry->half_open && ike_sa->get_state(ike_sa) != IKE_CONNECTING)
1580 {
1581 /* not half open anymore */
1582 entry->half_open = FALSE;
1583 remove_half_open(this, entry);
1584 }
1585 else if (entry->half_open && !other->ip_equals(other, entry->other))
1586 {
1587 /* the other host's IP has changed, we must update the hash table */
1588 remove_half_open(this, entry);
1589 DESTROY_IF(entry->other);
1590 entry->other = other->clone(other);
1591 put_half_open(this, entry);
1592 }
1593 else if (!entry->half_open &&
1594 !entry->ike_sa_id->is_initiator(entry->ike_sa_id) &&
1595 ike_sa->get_state(ike_sa) == IKE_CONNECTING)
1596 {
1597 /* this is a new half-open SA */
1598 entry->half_open = TRUE;
1599 entry->other = other->clone(other);
1600 put_half_open(this, entry);
1601 }
1602 DBG2(DBG_MGR, "check-in of IKE_SA successful.");
1603 entry->condvar->signal(entry->condvar);
1604 }
1605 else
1606 {
1607 entry = entry_create();
1608 entry->ike_sa_id = ike_sa_id->clone(ike_sa_id);
1609 entry->ike_sa = ike_sa;
1610 segment = put_entry(this, entry);
1611 }
1612
1613 /* apply identities for duplicate test */
1614 if ((ike_sa->get_state(ike_sa) == IKE_ESTABLISHED ||
1615 ike_sa->get_state(ike_sa) == IKE_PASSIVE) &&
1616 entry->my_id == NULL && entry->other_id == NULL)
1617 {
1618 if (ike_sa->get_version(ike_sa) == IKEV1)
1619 {
1620 /* If authenticated and received INITIAL_CONTACT,
1621 * delete any existing IKE_SAs with that peer. */
1622 if (ike_sa->has_condition(ike_sa, COND_INIT_CONTACT_SEEN))
1623 {
1624 this->public.check_uniqueness(&this->public, ike_sa, TRUE);
1625 ike_sa->set_condition(ike_sa, COND_INIT_CONTACT_SEEN, FALSE);
1626 }
1627 }
1628
1629 entry->my_id = my_id->clone(my_id);
1630 entry->other_id = other_id->clone(other_id);
1631 if (!entry->other)
1632 {
1633 entry->other = other->clone(other);
1634 }
1635 put_connected_peers(this, entry);
1636 }
1637
1638 unlock_single_segment(this, segment);
1639
1640 charon->bus->set_sa(charon->bus, NULL);
1641 }
1642
1643 METHOD(ike_sa_manager_t, checkin_and_destroy, void,
1644 private_ike_sa_manager_t *this, ike_sa_t *ike_sa)
1645 {
1646 /* deletion is a bit complex, we must ensure that no thread is waiting for
1647 * this SA.
1648 * We take this SA from the table, and start signaling while threads
1649 * are in the condvar.
1650 */
1651 entry_t *entry;
1652 ike_sa_id_t *ike_sa_id;
1653 u_int segment;
1654
1655 ike_sa_id = ike_sa->get_id(ike_sa);
1656
1657 DBG2(DBG_MGR, "checkin and destroy IKE_SA %s[%u]", ike_sa->get_name(ike_sa),
1658 ike_sa->get_unique_id(ike_sa));
1659
1660 if (get_entry_by_sa(this, ike_sa_id, ike_sa, &entry, &segment) == SUCCESS)
1661 {
1662 if (entry->driveout_waiting_threads && entry->driveout_new_threads)
1663 { /* it looks like flush() has been called and the SA is being deleted
1664 * anyway, just check it in */
1665 DBG2(DBG_MGR, "ignored check-in and destroy of IKE_SA during shutdown");
1666 entry->checked_out = FALSE;
1667 entry->condvar->broadcast(entry->condvar);
1668 unlock_single_segment(this, segment);
1669 return;
1670 }
1671
1672 /* drive out waiting threads, as we are in hurry */
1673 entry->driveout_waiting_threads = TRUE;
1674 /* mark it, so no new threads can get this entry */
1675 entry->driveout_new_threads = TRUE;
1676 /* wait until all workers have done their work */
1677 while (entry->waiting_threads)
1678 {
1679 /* wake up all */
1680 entry->condvar->broadcast(entry->condvar);
1681 /* they will wake us again when their work is done */
1682 entry->condvar->wait(entry->condvar, this->segments[segment].mutex);
1683 }
1684 remove_entry(this, entry);
1685 unlock_single_segment(this, segment);
1686
1687 if (entry->half_open)
1688 {
1689 remove_half_open(this, entry);
1690 }
1691 if (entry->my_id && entry->other_id)
1692 {
1693 remove_connected_peers(this, entry);
1694 }
1695 if (entry->init_hash.ptr)
1696 {
1697 remove_init_hash(this, entry->init_hash);
1698 }
1699
1700 entry_destroy(entry);
1701
1702 DBG2(DBG_MGR, "check-in and destroy of IKE_SA successful");
1703 }
1704 else
1705 {
1706 DBG1(DBG_MGR, "tried to check-in and delete nonexisting IKE_SA");
1707 ike_sa->destroy(ike_sa);
1708 }
1709 charon->bus->set_sa(charon->bus, NULL);
1710 }
1711
1712 /**
1713 * Cleanup function for create_id_enumerator
1714 */
1715 static void id_enumerator_cleanup(linked_list_t *ids)
1716 {
1717 ids->destroy_offset(ids, offsetof(ike_sa_id_t, destroy));
1718 }
1719
1720 METHOD(ike_sa_manager_t, create_id_enumerator, enumerator_t*,
1721 private_ike_sa_manager_t *this, identification_t *me,
1722 identification_t *other, int family)
1723 {
1724 table_item_t *item;
1725 u_int row, segment;
1726 rwlock_t *lock;
1727 linked_list_t *ids = NULL;
1728
1729 row = chunk_hash_inc(other->get_encoding(other),
1730 chunk_hash(me->get_encoding(me))) & this->table_mask;
1731 segment = row & this->segment_mask;
1732
1733 lock = this->connected_peers_segments[segment].lock;
1734 lock->read_lock(lock);
1735 item = this->connected_peers_table[row];
1736 while (item)
1737 {
1738 connected_peers_t *current = item->value;
1739
1740 if (connected_peers_match(current, me, other, family))
1741 {
1742 ids = current->sas->clone_offset(current->sas,
1743 offsetof(ike_sa_id_t, clone));
1744 break;
1745 }
1746 item = item->next;
1747 }
1748 lock->unlock(lock);
1749
1750 if (!ids)
1751 {
1752 return enumerator_create_empty();
1753 }
1754 return enumerator_create_cleaner(ids->create_enumerator(ids),
1755 (void*)id_enumerator_cleanup, ids);
1756 }
1757
1758 /**
1759 * Move all CHILD_SAs from old to new
1760 */
1761 static void adopt_children(ike_sa_t *old, ike_sa_t *new)
1762 {
1763 enumerator_t *enumerator;
1764 child_sa_t *child_sa;
1765
1766 enumerator = old->create_child_sa_enumerator(old);
1767 while (enumerator->enumerate(enumerator, &child_sa))
1768 {
1769 old->remove_child_sa(old, enumerator);
1770 new->add_child_sa(new, child_sa);
1771 }
1772 enumerator->destroy(enumerator);
1773 }
1774
1775 /**
1776 * Check if the replaced IKE_SA might get reauthenticated from host
1777 */
1778 static bool is_ikev1_reauth(ike_sa_t *duplicate, host_t *host)
1779 {
1780 return duplicate->get_version(duplicate) == IKEV1 &&
1781 host->equals(host, duplicate->get_other_host(duplicate));
1782 }
1783
1784 /**
1785 * Delete an existing IKE_SA due to a unique replace policy
1786 */
1787 static status_t enforce_replace(private_ike_sa_manager_t *this,
1788 ike_sa_t *duplicate, ike_sa_t *new,
1789 identification_t *other, host_t *host)
1790 {
1791 charon->bus->alert(charon->bus, ALERT_UNIQUE_REPLACE);
1792
1793 if (is_ikev1_reauth(duplicate, host))
1794 {
1795 /* looks like a reauthentication attempt */
1796 adopt_children(duplicate, new);
1797 /* For IKEv1 we have to delay the delete for the old IKE_SA. Some
1798 * peers need to complete the new SA first, otherwise the quick modes
1799 * might get lost. */
1800 lib->scheduler->schedule_job(lib->scheduler, (job_t*)
1801 delete_ike_sa_job_create(duplicate->get_id(duplicate), TRUE), 10);
1802 return SUCCESS;
1803 }
1804 DBG1(DBG_IKE, "deleting duplicate IKE_SA for peer '%Y' due to "
1805 "uniqueness policy", other);
1806 return duplicate->delete(duplicate);
1807 }
1808
1809 METHOD(ike_sa_manager_t, check_uniqueness, bool,
1810 private_ike_sa_manager_t *this, ike_sa_t *ike_sa, bool force_replace)
1811 {
1812 bool cancel = FALSE;
1813 peer_cfg_t *peer_cfg;
1814 unique_policy_t policy;
1815 enumerator_t *enumerator;
1816 ike_sa_id_t *id = NULL;
1817 identification_t *me, *other;
1818 host_t *other_host;
1819
1820 peer_cfg = ike_sa->get_peer_cfg(ike_sa);
1821 policy = peer_cfg->get_unique_policy(peer_cfg);
1822 if (policy == UNIQUE_NEVER || (policy == UNIQUE_NO && !force_replace))
1823 {
1824 return FALSE;
1825 }
1826 me = ike_sa->get_my_id(ike_sa);
1827 other = ike_sa->get_other_eap_id(ike_sa);
1828 other_host = ike_sa->get_other_host(ike_sa);
1829
1830 enumerator = create_id_enumerator(this, me, other,
1831 other_host->get_family(other_host));
1832 while (enumerator->enumerate(enumerator, &id))
1833 {
1834 status_t status = SUCCESS;
1835 ike_sa_t *duplicate;
1836
1837 duplicate = checkout(this, id);
1838 if (!duplicate)
1839 {
1840 continue;
1841 }
1842 if (force_replace)
1843 {
1844 DBG1(DBG_IKE, "destroying duplicate IKE_SA for peer '%Y', "
1845 "received INITIAL_CONTACT", other);
1846 charon->bus->ike_updown(charon->bus, duplicate, FALSE);
1847 checkin_and_destroy(this, duplicate);
1848 continue;
1849 }
1850 peer_cfg = duplicate->get_peer_cfg(duplicate);
1851 if (peer_cfg && peer_cfg->equals(peer_cfg, ike_sa->get_peer_cfg(ike_sa)))
1852 {
1853 switch (duplicate->get_state(duplicate))
1854 {
1855 case IKE_ESTABLISHED:
1856 case IKE_REKEYING:
1857 switch (policy)
1858 {
1859 case UNIQUE_REPLACE:
1860 status = enforce_replace(this, duplicate, ike_sa,
1861 other, other_host);
1862 break;
1863 case UNIQUE_KEEP:
1864 if (!is_ikev1_reauth(duplicate, other_host))
1865 {
1866 cancel = TRUE;
1867 /* we keep the first IKE_SA and delete all
1868 * other duplicates that might exist */
1869 policy = UNIQUE_REPLACE;
1870 }
1871 break;
1872 default:
1873 break;
1874 }
1875 break;
1876 default:
1877 break;
1878 }
1879 }
1880 if (status == DESTROY_ME)
1881 {
1882 checkin_and_destroy(this, duplicate);
1883 }
1884 else
1885 {
1886 checkin(this, duplicate);
1887 }
1888 }
1889 enumerator->destroy(enumerator);
1890 /* reset thread's current IKE_SA after checkin */
1891 charon->bus->set_sa(charon->bus, ike_sa);
1892 return cancel;
1893 }
1894
1895 METHOD(ike_sa_manager_t, has_contact, bool,
1896 private_ike_sa_manager_t *this, identification_t *me,
1897 identification_t *other, int family)
1898 {
1899 table_item_t *item;
1900 u_int row, segment;
1901 rwlock_t *lock;
1902 bool found = FALSE;
1903
1904 row = chunk_hash_inc(other->get_encoding(other),
1905 chunk_hash(me->get_encoding(me))) & this->table_mask;
1906 segment = row & this->segment_mask;
1907 lock = this->connected_peers_segments[segment].lock;
1908 lock->read_lock(lock);
1909 item = this->connected_peers_table[row];
1910 while (item)
1911 {
1912 if (connected_peers_match(item->value, me, other, family))
1913 {
1914 found = TRUE;
1915 break;
1916 }
1917 item = item->next;
1918 }
1919 lock->unlock(lock);
1920
1921 return found;
1922 }
1923
1924 METHOD(ike_sa_manager_t, get_count, u_int,
1925 private_ike_sa_manager_t *this)
1926 {
1927 u_int segment, count = 0;
1928 mutex_t *mutex;
1929
1930 for (segment = 0; segment < this->segment_count; segment++)
1931 {
1932 mutex = this->segments[segment & this->segment_mask].mutex;
1933 mutex->lock(mutex);
1934 count += this->segments[segment].count;
1935 mutex->unlock(mutex);
1936 }
1937 return count;
1938 }
1939
1940 METHOD(ike_sa_manager_t, get_half_open_count, u_int,
1941 private_ike_sa_manager_t *this, host_t *ip)
1942 {
1943 table_item_t *item;
1944 u_int row, segment;
1945 rwlock_t *lock;
1946 chunk_t addr;
1947 u_int count = 0;
1948
1949 if (ip)
1950 {
1951 addr = ip->get_address(ip);
1952 row = chunk_hash(addr) & this->table_mask;
1953 segment = row & this->segment_mask;
1954 lock = this->half_open_segments[segment].lock;
1955 lock->read_lock(lock);
1956 item = this->half_open_table[row];
1957 while (item)
1958 {
1959 half_open_t *half_open = item->value;
1960
1961 if (chunk_equals(addr, half_open->other))
1962 {
1963 count = half_open->count;
1964 break;
1965 }
1966 item = item->next;
1967 }
1968 lock->unlock(lock);
1969 }
1970 else
1971 {
1972 count = (u_int)ref_cur(&this->half_open_count);
1973 }
1974 return count;
1975 }
1976
1977 METHOD(ike_sa_manager_t, flush, void,
1978 private_ike_sa_manager_t *this)
1979 {
1980 /* destroy all list entries */
1981 enumerator_t *enumerator;
1982 entry_t *entry;
1983 u_int segment;
1984
1985 lock_all_segments(this);
1986 DBG2(DBG_MGR, "going to destroy IKE_SA manager and all managed IKE_SA's");
1987 /* Step 1: drive out all waiting threads */
1988 DBG2(DBG_MGR, "set driveout flags for all stored IKE_SA's");
1989 enumerator = create_table_enumerator(this);
1990 while (enumerator->enumerate(enumerator, &entry, &segment))
1991 {
1992 /* do not accept new threads, drive out waiting threads */
1993 entry->driveout_new_threads = TRUE;
1994 entry->driveout_waiting_threads = TRUE;
1995 }
1996 enumerator->destroy(enumerator);
1997 DBG2(DBG_MGR, "wait for all threads to leave IKE_SA's");
1998 /* Step 2: wait until all are gone */
1999 enumerator = create_table_enumerator(this);
2000 while (enumerator->enumerate(enumerator, &entry, &segment))
2001 {
2002 while (entry->waiting_threads || entry->checked_out)
2003 {
2004 /* wake up all */
2005 entry->condvar->broadcast(entry->condvar);
2006 /* go sleeping until they are gone */
2007 entry->condvar->wait(entry->condvar, this->segments[segment].mutex);
2008 }
2009 }
2010 enumerator->destroy(enumerator);
2011 DBG2(DBG_MGR, "delete all IKE_SA's");
2012 /* Step 3: initiate deletion of all IKE_SAs */
2013 enumerator = create_table_enumerator(this);
2014 while (enumerator->enumerate(enumerator, &entry, &segment))
2015 {
2016 charon->bus->set_sa(charon->bus, entry->ike_sa);
2017 if (entry->ike_sa->get_version(entry->ike_sa) == IKEV2)
2018 { /* as the delete never gets processed, fire down events */
2019 switch (entry->ike_sa->get_state(entry->ike_sa))
2020 {
2021 case IKE_ESTABLISHED:
2022 case IKE_REKEYING:
2023 case IKE_DELETING:
2024 charon->bus->ike_updown(charon->bus, entry->ike_sa, FALSE);
2025 break;
2026 default:
2027 break;
2028 }
2029 }
2030 entry->ike_sa->delete(entry->ike_sa);
2031 }
2032 enumerator->destroy(enumerator);
2033
2034 DBG2(DBG_MGR, "destroy all entries");
2035 /* Step 4: destroy all entries */
2036 enumerator = create_table_enumerator(this);
2037 while (enumerator->enumerate(enumerator, &entry, &segment))
2038 {
2039 charon->bus->set_sa(charon->bus, entry->ike_sa);
2040 if (entry->half_open)
2041 {
2042 remove_half_open(this, entry);
2043 }
2044 if (entry->my_id && entry->other_id)
2045 {
2046 remove_connected_peers(this, entry);
2047 }
2048 if (entry->init_hash.ptr)
2049 {
2050 remove_init_hash(this, entry->init_hash);
2051 }
2052 remove_entry_at((private_enumerator_t*)enumerator);
2053 entry_destroy(entry);
2054 }
2055 enumerator->destroy(enumerator);
2056 charon->bus->set_sa(charon->bus, NULL);
2057 unlock_all_segments(this);
2058
2059 this->rng->destroy(this->rng);
2060 this->rng = NULL;
2061 this->hasher->destroy(this->hasher);
2062 this->hasher = NULL;
2063 }
2064
2065 METHOD(ike_sa_manager_t, destroy, void,
2066 private_ike_sa_manager_t *this)
2067 {
2068 u_int i;
2069
2070 /* these are already cleared in flush() above */
2071 free(this->ike_sa_table);
2072 free(this->half_open_table);
2073 free(this->connected_peers_table);
2074 free(this->init_hashes_table);
2075 for (i = 0; i < this->segment_count; i++)
2076 {
2077 this->segments[i].mutex->destroy(this->segments[i].mutex);
2078 this->half_open_segments[i].lock->destroy(this->half_open_segments[i].lock);
2079 this->connected_peers_segments[i].lock->destroy(this->connected_peers_segments[i].lock);
2080 this->init_hashes_segments[i].mutex->destroy(this->init_hashes_segments[i].mutex);
2081 }
2082 free(this->segments);
2083 free(this->half_open_segments);
2084 free(this->connected_peers_segments);
2085 free(this->init_hashes_segments);
2086
2087 free(this);
2088 }
2089
2090 /**
2091 * This function returns the next-highest power of two for the given number.
2092 * The algorithm works by setting all bits on the right-hand side of the most
2093 * significant 1 to 1 and then increments the whole number so it rolls over
2094 * to the nearest power of two. Note: returns 0 for n == 0
2095 */
2096 static u_int get_nearest_powerof2(u_int n)
2097 {
2098 u_int i;
2099
2100 --n;
2101 for (i = 1; i < sizeof(u_int) * 8; i <<= 1)
2102 {
2103 n |= n >> i;
2104 }
2105 return ++n;
2106 }
2107
2108 /*
2109 * Described in header.
2110 */
2111 ike_sa_manager_t *ike_sa_manager_create()
2112 {
2113 private_ike_sa_manager_t *this;
2114 u_int i;
2115
2116 INIT(this,
2117 .public = {
2118 .checkout = _checkout,
2119 .checkout_new = _checkout_new,
2120 .checkout_by_message = _checkout_by_message,
2121 .checkout_by_config = _checkout_by_config,
2122 .checkout_by_id = _checkout_by_id,
2123 .checkout_by_name = _checkout_by_name,
2124 .check_uniqueness = _check_uniqueness,
2125 .has_contact = _has_contact,
2126 .create_enumerator = _create_enumerator,
2127 .create_id_enumerator = _create_id_enumerator,
2128 .checkin = _checkin,
2129 .checkin_and_destroy = _checkin_and_destroy,
2130 .get_count = _get_count,
2131 .get_half_open_count = _get_half_open_count,
2132 .flush = _flush,
2133 .destroy = _destroy,
2134 },
2135 );
2136
2137 this->hasher = lib->crypto->create_hasher(lib->crypto, HASH_SHA1);
2138 if (this->hasher == NULL)
2139 {
2140 DBG1(DBG_MGR, "manager initialization failed, no hasher supported");
2141 free(this);
2142 return NULL;
2143 }
2144 this->rng = lib->crypto->create_rng(lib->crypto, RNG_WEAK);
2145 if (this->rng == NULL)
2146 {
2147 DBG1(DBG_MGR, "manager initialization failed, no RNG supported");
2148 this->hasher->destroy(this->hasher);
2149 free(this);
2150 return NULL;
2151 }
2152
2153 this->ikesa_limit = lib->settings->get_int(lib->settings,
2154 "%s.ikesa_limit", 0, lib->ns);
2155
2156 this->table_size = get_nearest_powerof2(lib->settings->get_int(
2157 lib->settings, "%s.ikesa_table_size",
2158 DEFAULT_HASHTABLE_SIZE, lib->ns));
2159 this->table_size = max(1, min(this->table_size, MAX_HASHTABLE_SIZE));
2160 this->table_mask = this->table_size - 1;
2161
2162 this->segment_count = get_nearest_powerof2(lib->settings->get_int(
2163 lib->settings, "%s.ikesa_table_segments",
2164 DEFAULT_SEGMENT_COUNT, lib->ns));
2165 this->segment_count = max(1, min(this->segment_count, this->table_size));
2166 this->segment_mask = this->segment_count - 1;
2167
2168 this->ike_sa_table = calloc(this->table_size, sizeof(table_item_t*));
2169 this->segments = (segment_t*)calloc(this->segment_count, sizeof(segment_t));
2170 for (i = 0; i < this->segment_count; i++)
2171 {
2172 this->segments[i].mutex = mutex_create(MUTEX_TYPE_RECURSIVE);
2173 this->segments[i].count = 0;
2174 }
2175
2176 /* we use the same table parameters for the table to track half-open SAs */
2177 this->half_open_table = calloc(this->table_size, sizeof(table_item_t*));
2178 this->half_open_segments = calloc(this->segment_count, sizeof(shareable_segment_t));
2179 for (i = 0; i < this->segment_count; i++)
2180 {
2181 this->half_open_segments[i].lock = rwlock_create(RWLOCK_TYPE_DEFAULT);
2182 this->half_open_segments[i].count = 0;
2183 }
2184
2185 /* also for the hash table used for duplicate tests */
2186 this->connected_peers_table = calloc(this->table_size, sizeof(table_item_t*));
2187 this->connected_peers_segments = calloc(this->segment_count, sizeof(shareable_segment_t));
2188 for (i = 0; i < this->segment_count; i++)
2189 {
2190 this->connected_peers_segments[i].lock = rwlock_create(RWLOCK_TYPE_DEFAULT);
2191 this->connected_peers_segments[i].count = 0;
2192 }
2193
2194 /* and again for the table of hashes of seen initial IKE messages */
2195 this->init_hashes_table = calloc(this->table_size, sizeof(table_item_t*));
2196 this->init_hashes_segments = calloc(this->segment_count, sizeof(segment_t));
2197 for (i = 0; i < this->segment_count; i++)
2198 {
2199 this->init_hashes_segments[i].mutex = mutex_create(MUTEX_TYPE_RECURSIVE);
2200 this->init_hashes_segments[i].count = 0;
2201 }
2202
2203 this->reuse_ikesa = lib->settings->get_bool(lib->settings,
2204 "%s.reuse_ikesa", TRUE, lib->ns);
2205 return &this->public;
2206 }