2 * Copyright (C) 2005-2011 Martin Willi
3 * Copyright (C) 2011 revosec AG
4 * Copyright (C) 2008-2012 Tobias Brunner
5 * Copyright (C) 2005 Jan Hutter
6 * Hochschule fuer Technik Rapperswil
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version. See <http://www.fsf.org/copyleft/gpl.txt>.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
21 #include "ike_sa_manager.h"
24 #include <sa/ike_sa_id.h>
26 #include <threading/condvar.h>
27 #include <threading/mutex.h>
28 #include <threading/rwlock.h>
29 #include <utils/linked_list.h>
30 #include <crypto/hashers/hasher.h>
32 /* the default size of the hash table (MUST be a power of 2) */
33 #define DEFAULT_HASHTABLE_SIZE 1
35 /* the maximum size of the hash table (MUST be a power of 2) */
36 #define MAX_HASHTABLE_SIZE (1 << 30)
38 /* the default number of segments (MUST be a power of 2) */
39 #define DEFAULT_SEGMENT_COUNT 1
41 typedef struct entry_t entry_t
;
44 * An entry in the linked list, contains IKE_SA, locking and lookup data.
49 * Number of threads waiting for this ike_sa_t object.
54 * Condvar where threads can wait until ike_sa_t object is free for use again.
59 * Is this ike_sa currently checked out?
64 * Does this SA drives out new threads?
66 bool driveout_new_threads
;
69 * Does this SA drives out waiting threads?
71 bool driveout_waiting_threads
;
74 * Identification of an IKE_SA (SPIs).
76 ike_sa_id_t
*ike_sa_id
;
79 * The contained ike_sa_t object.
84 * hash of the IKE_SA_INIT message, used to detect retransmissions
89 * remote host address, required for DoS detection and duplicate
90 * checking (host with same my_id and other_id is *not* considered
91 * a duplicate if the address family differs)
96 * As responder: Is this SA half-open?
101 * own identity, required for duplicate checking
103 identification_t
*my_id
;
106 * remote identity, required for duplicate checking
108 identification_t
*other_id
;
111 * message ID currently processing, if any
113 u_int32_t message_id
;
117 * Implementation of entry_t.destroy.
119 static status_t
entry_destroy(entry_t
*this)
121 /* also destroy IKE SA */
122 this->ike_sa
->destroy(this->ike_sa
);
123 this->ike_sa_id
->destroy(this->ike_sa_id
);
124 chunk_free(&this->init_hash
);
125 DESTROY_IF(this->other
);
126 DESTROY_IF(this->my_id
);
127 DESTROY_IF(this->other_id
);
128 this->condvar
->destroy(this->condvar
);
134 * Creates a new entry for the ike_sa_t list.
136 static entry_t
*entry_create()
138 entry_t
*this = malloc_thing(entry_t
);
140 this->waiting_threads
= 0;
141 this->condvar
= condvar_create(CONDVAR_TYPE_DEFAULT
);
143 /* we set checkout flag when we really give it out */
144 this->checked_out
= FALSE
;
145 this->driveout_new_threads
= FALSE
;
146 this->driveout_waiting_threads
= FALSE
;
147 this->message_id
= -1;
148 this->init_hash
= chunk_empty
;
150 this->half_open
= FALSE
;
152 this->other_id
= NULL
;
153 this->ike_sa_id
= NULL
;
160 * Function that matches entry_t objects by ike_sa_id_t.
162 static bool entry_match_by_id(entry_t
*entry
, ike_sa_id_t
*id
)
164 if (id
->equals(id
, entry
->ike_sa_id
))
168 if ((id
->get_responder_spi(id
) == 0 ||
169 entry
->ike_sa_id
->get_responder_spi(entry
->ike_sa_id
) == 0) &&
170 id
->get_initiator_spi(id
) == entry
->ike_sa_id
->get_initiator_spi(entry
->ike_sa_id
))
172 /* this is TRUE for IKE_SAs that we initiated but have not yet received a response */
179 * Function that matches entry_t objects by ike_sa_t pointers.
181 static bool entry_match_by_sa(entry_t
*entry
, ike_sa_t
*ike_sa
)
183 return entry
->ike_sa
== ike_sa
;
187 * Hash function for ike_sa_id_t objects.
189 static u_int
ike_sa_id_hash(ike_sa_id_t
*ike_sa_id
)
191 /* IKEv2 does not mandate random SPIs (RFC 5996, 2.6), they just have to be
192 * locally unique, so we use our randomly allocated SPI whether we are
193 * initiator or responder to ensure a good distribution. The latter is not
194 * possible for IKEv1 as we don't know whether we are original initiator or
195 * not (based on the IKE header). But as RFC 2408, section 2.5.3 proposes
196 * SPIs (Cookies) to be allocated near random (we allocate them randomly
197 * anyway) it seems safe to always use the initiator SPI. */
198 if (ike_sa_id
->get_ike_version(ike_sa_id
) == IKEV1_MAJOR_VERSION
||
199 ike_sa_id
->is_initiator(ike_sa_id
))
201 return ike_sa_id
->get_initiator_spi(ike_sa_id
);
203 return ike_sa_id
->get_responder_spi(ike_sa_id
);
206 typedef struct half_open_t half_open_t
;
209 * Struct to manage half-open IKE_SAs per peer.
212 /** chunk of remote host address */
215 /** the number of half-open IKE_SAs with that host */
220 * Destroys a half_open_t object.
222 static void half_open_destroy(half_open_t
*this)
224 chunk_free(&this->other
);
228 typedef struct connected_peers_t connected_peers_t
;
230 struct connected_peers_t
{
232 identification_t
*my_id
;
234 /** remote identity */
235 identification_t
*other_id
;
237 /** ip address family of peer */
240 /** list of ike_sa_id_t objects of IKE_SAs between the two identities */
244 static void connected_peers_destroy(connected_peers_t
*this)
246 this->my_id
->destroy(this->my_id
);
247 this->other_id
->destroy(this->other_id
);
248 this->sas
->destroy(this->sas
);
253 * Function that matches connected_peers_t objects by the given ids.
255 static inline bool connected_peers_match(connected_peers_t
*connected_peers
,
256 identification_t
*my_id
, identification_t
*other_id
,
259 return my_id
->equals(my_id
, connected_peers
->my_id
) &&
260 other_id
->equals(other_id
, connected_peers
->other_id
) &&
261 (!family
|| family
== connected_peers
->family
);
264 typedef struct init_hash_t init_hash_t
;
267 /** hash of IKE_SA_INIT or initial phase1 message (data is not cloned) */
270 /** our SPI allocated for the IKE_SA based on this message */
274 typedef struct segment_t segment_t
;
277 * Struct to manage segments of the hash table.
280 /** mutex to access a segment exclusively */
283 /** the number of entries in this segment */
287 typedef struct shareable_segment_t shareable_segment_t
;
290 * Struct to manage segments of the "half-open" and "connected peers" hash tables.
292 struct shareable_segment_t
{
293 /** rwlock to access a segment non-/exclusively */
296 /** the number of entries in this segment - in case of the "half-open table"
297 * it's the sum of all half_open_t.count in a segment. */
301 typedef struct table_item_t table_item_t
;
304 * Instead of using linked_list_t for each bucket we store the data in our own
305 * list to save memory.
307 struct table_item_t
{
308 /** data of this item */
311 /** next item in the overflow list */
315 typedef struct private_ike_sa_manager_t private_ike_sa_manager_t
;
318 * Additional private members of ike_sa_manager_t.
320 struct private_ike_sa_manager_t
{
322 * Public interface of ike_sa_manager_t.
324 ike_sa_manager_t
public;
327 * Hash table with entries for the ike_sa_t objects.
329 table_item_t
**ike_sa_table
;
332 * The size of the hash table.
337 * Mask to map the hashes to table rows.
342 * Segments of the hash table.
347 * The number of segments.
352 * Mask to map a table row to a segment.
357 * Hash table with half_open_t objects.
359 table_item_t
**half_open_table
;
362 * Segments of the "half-open" hash table.
364 shareable_segment_t
*half_open_segments
;
367 * Hash table with connected_peers_t objects.
369 table_item_t
**connected_peers_table
;
372 * Segments of the "connected peers" hash table.
374 shareable_segment_t
*connected_peers_segments
;
377 * Hash table with init_hash_t objects.
379 table_item_t
**init_hashes_table
;
382 * Segments of the "hashes" hash table.
384 segment_t
*init_hashes_segments
;
387 * RNG to get random SPIs for our side
392 * SHA1 hasher for IKE_SA_INIT retransmit detection
397 * reuse existing IKE_SAs in checkout_by_config
403 * Acquire a lock to access the segment of the table row with the given index.
404 * It also works with the segment index directly.
406 static inline void lock_single_segment(private_ike_sa_manager_t
*this,
409 mutex_t
*lock
= this->segments
[index
& this->segment_mask
].mutex
;
414 * Release the lock required to access the segment of the table row with the given index.
415 * It also works with the segment index directly.
417 static inline void unlock_single_segment(private_ike_sa_manager_t
*this,
420 mutex_t
*lock
= this->segments
[index
& this->segment_mask
].mutex
;
427 static void lock_all_segments(private_ike_sa_manager_t
*this)
431 for (i
= 0; i
< this->segment_count
; i
++)
433 this->segments
[i
].mutex
->lock(this->segments
[i
].mutex
);
438 * Unlock all segments
440 static void unlock_all_segments(private_ike_sa_manager_t
*this)
444 for (i
= 0; i
< this->segment_count
; i
++)
446 this->segments
[i
].mutex
->unlock(this->segments
[i
].mutex
);
450 typedef struct private_enumerator_t private_enumerator_t
;
453 * hash table enumerator implementation
455 struct private_enumerator_t
{
458 * implements enumerator interface
460 enumerator_t enumerator
;
463 * associated ike_sa_manager_t
465 private_ike_sa_manager_t
*manager
;
468 * current segment index
473 * currently enumerating entry
478 * current table row index
485 table_item_t
*current
;
488 * previous table item
493 METHOD(enumerator_t
, enumerate
, bool,
494 private_enumerator_t
*this, entry_t
**entry
, u_int
*segment
)
498 this->entry
->condvar
->signal(this->entry
->condvar
);
501 while (this->segment
< this->manager
->segment_count
)
503 while (this->row
< this->manager
->table_size
)
505 this->prev
= this->current
;
508 this->current
= this->current
->next
;
512 lock_single_segment(this->manager
, this->segment
);
513 this->current
= this->manager
->ike_sa_table
[this->row
];
517 *entry
= this->entry
= this->current
->value
;
518 *segment
= this->segment
;
521 unlock_single_segment(this->manager
, this->segment
);
522 this->row
+= this->manager
->segment_count
;
525 this->row
= this->segment
;
530 METHOD(enumerator_t
, enumerator_destroy
, void,
531 private_enumerator_t
*this)
535 this->entry
->condvar
->signal(this->entry
->condvar
);
539 unlock_single_segment(this->manager
, this->segment
);
545 * Creates an enumerator to enumerate the entries in the hash table.
547 static enumerator_t
* create_table_enumerator(private_ike_sa_manager_t
*this)
549 private_enumerator_t
*enumerator
;
553 .enumerate
= (void*)_enumerate
,
554 .destroy
= _enumerator_destroy
,
558 return &enumerator
->enumerator
;
562 * Put an entry into the hash table.
563 * Note: The caller has to unlock the returned segment.
565 static u_int
put_entry(private_ike_sa_manager_t
*this, entry_t
*entry
)
567 table_item_t
*current
, *item
;
574 row
= ike_sa_id_hash(entry
->ike_sa_id
) & this->table_mask
;
575 segment
= row
& this->segment_mask
;
577 lock_single_segment(this, segment
);
578 current
= this->ike_sa_table
[row
];
580 { /* insert at the front of current bucket */
581 item
->next
= current
;
583 this->ike_sa_table
[row
] = item
;
584 this->segments
[segment
].count
++;
589 * Remove an entry from the hash table.
590 * Note: The caller MUST have a lock on the segment of this entry.
592 static void remove_entry(private_ike_sa_manager_t
*this, entry_t
*entry
)
594 table_item_t
*item
, *prev
= NULL
;
597 row
= ike_sa_id_hash(entry
->ike_sa_id
) & this->table_mask
;
598 segment
= row
& this->segment_mask
;
599 item
= this->ike_sa_table
[row
];
602 if (item
->value
== entry
)
606 prev
->next
= item
->next
;
610 this->ike_sa_table
[row
] = item
->next
;
612 this->segments
[segment
].count
--;
622 * Remove the entry at the current enumerator position.
624 static void remove_entry_at(private_enumerator_t
*this)
629 table_item_t
*current
= this->current
;
631 this->manager
->segments
[this->segment
].count
--;
632 this->current
= this->prev
;
636 this->prev
->next
= current
->next
;
640 this->manager
->ike_sa_table
[this->row
] = current
->next
;
641 unlock_single_segment(this->manager
, this->segment
);
648 * Find an entry using the provided match function to compare the entries for
651 static status_t
get_entry_by_match_function(private_ike_sa_manager_t
*this,
652 ike_sa_id_t
*ike_sa_id
, entry_t
**entry
, u_int
*segment
,
653 linked_list_match_t match
, void *param
)
658 row
= ike_sa_id_hash(ike_sa_id
) & this->table_mask
;
659 seg
= row
& this->segment_mask
;
661 lock_single_segment(this, seg
);
662 item
= this->ike_sa_table
[row
];
665 if (match(item
->value
, param
))
667 *entry
= item
->value
;
669 /* the locked segment has to be unlocked by the caller */
674 unlock_single_segment(this, seg
);
679 * Find an entry by ike_sa_id_t.
680 * Note: On SUCCESS, the caller has to unlock the segment.
682 static status_t
get_entry_by_id(private_ike_sa_manager_t
*this,
683 ike_sa_id_t
*ike_sa_id
, entry_t
**entry
, u_int
*segment
)
685 return get_entry_by_match_function(this, ike_sa_id
, entry
, segment
,
686 (linked_list_match_t
)entry_match_by_id
, ike_sa_id
);
690 * Find an entry by IKE_SA pointer.
691 * Note: On SUCCESS, the caller has to unlock the segment.
693 static status_t
get_entry_by_sa(private_ike_sa_manager_t
*this,
694 ike_sa_id_t
*ike_sa_id
, ike_sa_t
*ike_sa
, entry_t
**entry
, u_int
*segment
)
696 return get_entry_by_match_function(this, ike_sa_id
, entry
, segment
,
697 (linked_list_match_t
)entry_match_by_sa
, ike_sa
);
701 * Wait until no other thread is using an IKE_SA, return FALSE if entry not
704 static bool wait_for_entry(private_ike_sa_manager_t
*this, entry_t
*entry
,
707 if (entry
->driveout_new_threads
)
709 /* we are not allowed to get this */
712 while (entry
->checked_out
&& !entry
->driveout_waiting_threads
)
714 /* so wait until we can get it for us.
715 * we register us as waiting. */
716 entry
->waiting_threads
++;
717 entry
->condvar
->wait(entry
->condvar
, this->segments
[segment
].mutex
);
718 entry
->waiting_threads
--;
720 /* hm, a deletion request forbids us to get this SA, get next one */
721 if (entry
->driveout_waiting_threads
)
723 /* we must signal here, others may be waiting on it, too */
724 entry
->condvar
->signal(entry
->condvar
);
731 * Put a half-open SA into the hash table.
733 static void put_half_open(private_ike_sa_manager_t
*this, entry_t
*entry
)
738 half_open_t
*half_open
;
741 addr
= entry
->other
->get_address(entry
->other
);
742 row
= chunk_hash(addr
) & this->table_mask
;
743 segment
= row
& this->segment_mask
;
744 lock
= this->half_open_segments
[segment
].lock
;
745 lock
->write_lock(lock
);
746 item
= this->half_open_table
[row
];
749 half_open
= item
->value
;
751 if (chunk_equals(addr
, half_open
->other
))
762 .other
= chunk_clone(addr
),
767 .next
= this->half_open_table
[row
],
769 this->half_open_table
[row
] = item
;
771 this->half_open_segments
[segment
].count
++;
776 * Remove a half-open SA from the hash table.
778 static void remove_half_open(private_ike_sa_manager_t
*this, entry_t
*entry
)
780 table_item_t
*item
, *prev
= NULL
;
785 addr
= entry
->other
->get_address(entry
->other
);
786 row
= chunk_hash(addr
) & this->table_mask
;
787 segment
= row
& this->segment_mask
;
788 lock
= this->half_open_segments
[segment
].lock
;
789 lock
->write_lock(lock
);
790 item
= this->half_open_table
[row
];
793 half_open_t
*half_open
= item
->value
;
795 if (chunk_equals(addr
, half_open
->other
))
797 if (--half_open
->count
== 0)
801 prev
->next
= item
->next
;
805 this->half_open_table
[row
] = item
->next
;
807 half_open_destroy(half_open
);
810 this->half_open_segments
[segment
].count
--;
820 * Put an SA between two peers into the hash table.
822 static void put_connected_peers(private_ike_sa_manager_t
*this, entry_t
*entry
)
827 connected_peers_t
*connected_peers
;
828 chunk_t my_id
, other_id
;
831 my_id
= entry
->my_id
->get_encoding(entry
->my_id
);
832 other_id
= entry
->other_id
->get_encoding(entry
->other_id
);
833 family
= entry
->other
->get_family(entry
->other
);
834 row
= chunk_hash_inc(other_id
, chunk_hash(my_id
)) & this->table_mask
;
835 segment
= row
& this->segment_mask
;
836 lock
= this->connected_peers_segments
[segment
].lock
;
837 lock
->write_lock(lock
);
838 item
= this->connected_peers_table
[row
];
841 connected_peers
= item
->value
;
843 if (connected_peers_match(connected_peers
, entry
->my_id
,
844 entry
->other_id
, family
))
846 if (connected_peers
->sas
->find_first(connected_peers
->sas
,
847 (linked_list_match_t
)entry
->ike_sa_id
->equals
,
848 NULL
, entry
->ike_sa_id
) == SUCCESS
)
860 INIT(connected_peers
,
861 .my_id
= entry
->my_id
->clone(entry
->my_id
),
862 .other_id
= entry
->other_id
->clone(entry
->other_id
),
864 .sas
= linked_list_create(),
867 .value
= connected_peers
,
868 .next
= this->connected_peers_table
[row
],
870 this->connected_peers_table
[row
] = item
;
872 connected_peers
->sas
->insert_last(connected_peers
->sas
,
873 entry
->ike_sa_id
->clone(entry
->ike_sa_id
));
874 this->connected_peers_segments
[segment
].count
++;
879 * Remove an SA between two peers from the hash table.
881 static void remove_connected_peers(private_ike_sa_manager_t
*this, entry_t
*entry
)
883 table_item_t
*item
, *prev
= NULL
;
886 chunk_t my_id
, other_id
;
889 my_id
= entry
->my_id
->get_encoding(entry
->my_id
);
890 other_id
= entry
->other_id
->get_encoding(entry
->other_id
);
891 family
= entry
->other
->get_family(entry
->other
);
893 row
= chunk_hash_inc(other_id
, chunk_hash(my_id
)) & this->table_mask
;
894 segment
= row
& this->segment_mask
;
896 lock
= this->connected_peers_segments
[segment
].lock
;
897 lock
->write_lock(lock
);
898 item
= this->connected_peers_table
[row
];
901 connected_peers_t
*current
= item
->value
;
903 if (connected_peers_match(current
, entry
->my_id
, entry
->other_id
,
906 enumerator_t
*enumerator
;
907 ike_sa_id_t
*ike_sa_id
;
909 enumerator
= current
->sas
->create_enumerator(current
->sas
);
910 while (enumerator
->enumerate(enumerator
, &ike_sa_id
))
912 if (ike_sa_id
->equals(ike_sa_id
, entry
->ike_sa_id
))
914 current
->sas
->remove_at(current
->sas
, enumerator
);
915 ike_sa_id
->destroy(ike_sa_id
);
916 this->connected_peers_segments
[segment
].count
--;
920 enumerator
->destroy(enumerator
);
921 if (current
->sas
->get_count(current
->sas
) == 0)
925 prev
->next
= item
->next
;
929 this->connected_peers_table
[row
] = item
->next
;
931 connected_peers_destroy(current
);
943 * Get a random SPI for new IKE_SAs
945 static u_int64_t
get_spi(private_ike_sa_manager_t
*this)
950 this->rng
->get_bytes(this->rng
, sizeof(spi
), (u_int8_t
*)&spi
))
958 * Calculate the hash of the initial IKE message. Memory for the hash is
959 * allocated on success.
961 * @returns TRUE on success
963 static bool get_init_hash(private_ike_sa_manager_t
*this, message_t
*message
,
967 { /* this might be the case when flush() has been called */
970 if (message
->get_exchange_type(message
) == ID_PROT
)
971 { /* include the source for Main Mode as the hash will be the same if
972 * SPIs are reused by two initiators that use the same proposal */
973 host_t
*src
= message
->get_source(message
);
975 if (!this->hasher
->allocate_hash(this->hasher
,
976 src
->get_address(src
), NULL
))
981 return this->hasher
->allocate_hash(this->hasher
,
982 message
->get_packet_data(message
), hash
);
986 * Check if we already have created an IKE_SA based on the initial IKE message
987 * with the given hash.
988 * If not the hash is stored, the hash data is not(!) cloned.
990 * Also, the local SPI is returned. In case of a retransmit this is already
991 * stored together with the hash, otherwise it is newly allocated and should
992 * be used to create the IKE_SA.
994 * @returns ALREADY_DONE if the message with the given hash has been seen before
995 * NOT_FOUND if the message hash was not found
996 * FAILED if the SPI allocation failed
998 static status_t
check_and_put_init_hash(private_ike_sa_manager_t
*this,
999 chunk_t init_hash
, u_int64_t
*our_spi
)
1007 row
= chunk_hash(init_hash
) & this->table_mask
;
1008 segment
= row
& this->segment_mask
;
1009 mutex
= this->init_hashes_segments
[segment
].mutex
;
1011 item
= this->init_hashes_table
[row
];
1014 init_hash_t
*current
= item
->value
;
1016 if (chunk_equals(init_hash
, current
->hash
))
1018 *our_spi
= current
->our_spi
;
1019 mutex
->unlock(mutex
);
1020 return ALREADY_DONE
;
1025 spi
= get_spi(this);
1033 .len
= init_hash
.len
,
1034 .ptr
= init_hash
.ptr
,
1040 .next
= this->init_hashes_table
[row
],
1042 this->init_hashes_table
[row
] = item
;
1043 *our_spi
= init
->our_spi
;
1044 mutex
->unlock(mutex
);
1049 * Remove the hash of an initial IKE message from the cache.
1051 static void remove_init_hash(private_ike_sa_manager_t
*this, chunk_t init_hash
)
1053 table_item_t
*item
, *prev
= NULL
;
1057 row
= chunk_hash(init_hash
) & this->table_mask
;
1058 segment
= row
& this->segment_mask
;
1059 mutex
= this->init_hashes_segments
[segment
].mutex
;
1061 item
= this->init_hashes_table
[row
];
1064 init_hash_t
*current
= item
->value
;
1066 if (chunk_equals(init_hash
, current
->hash
))
1070 prev
->next
= item
->next
;
1074 this->init_hashes_table
[row
] = item
->next
;
1083 mutex
->unlock(mutex
);
1086 METHOD(ike_sa_manager_t
, checkout
, ike_sa_t
*,
1087 private_ike_sa_manager_t
*this, ike_sa_id_t
*ike_sa_id
)
1089 ike_sa_t
*ike_sa
= NULL
;
1093 DBG2(DBG_MGR
, "checkout IKE_SA");
1095 if (get_entry_by_id(this, ike_sa_id
, &entry
, &segment
) == SUCCESS
)
1097 if (wait_for_entry(this, entry
, segment
))
1099 entry
->checked_out
= TRUE
;
1100 ike_sa
= entry
->ike_sa
;
1101 DBG2(DBG_MGR
, "IKE_SA %s[%u] successfully checked out",
1102 ike_sa
->get_name(ike_sa
), ike_sa
->get_unique_id(ike_sa
));
1104 unlock_single_segment(this, segment
);
1106 charon
->bus
->set_sa(charon
->bus
, ike_sa
);
1110 METHOD(ike_sa_manager_t
, checkout_new
, ike_sa_t
*,
1111 private_ike_sa_manager_t
* this, ike_version_t version
, bool initiator
)
1113 ike_sa_id_t
*ike_sa_id
;
1115 u_int8_t ike_version
;
1118 ike_version
= version
== IKEV1 ? IKEV1_MAJOR_VERSION
: IKEV2_MAJOR_VERSION
;
1120 spi
= get_spi(this);
1123 DBG1(DBG_MGR
, "failed to allocate SPI for new IKE_SA");
1129 ike_sa_id
= ike_sa_id_create(ike_version
, spi
, 0, TRUE
);
1133 ike_sa_id
= ike_sa_id_create(ike_version
, 0, spi
, FALSE
);
1135 ike_sa
= ike_sa_create(ike_sa_id
, initiator
, version
);
1136 ike_sa_id
->destroy(ike_sa_id
);
1140 DBG2(DBG_MGR
, "created IKE_SA %s[%u]", ike_sa
->get_name(ike_sa
),
1141 ike_sa
->get_unique_id(ike_sa
));
1146 METHOD(ike_sa_manager_t
, checkout_by_message
, ike_sa_t
*,
1147 private_ike_sa_manager_t
* this, message_t
*message
)
1151 ike_sa_t
*ike_sa
= NULL
;
1153 ike_version_t ike_version
;
1154 bool is_init
= FALSE
;
1156 id
= message
->get_ike_sa_id(message
);
1157 /* clone the IKE_SA ID so we can modify the initiator flag */
1159 id
->switch_initiator(id
);
1161 DBG2(DBG_MGR
, "checkout IKE_SA by message");
1163 if (id
->get_responder_spi(id
) == 0)
1165 if (message
->get_major_version(message
) == IKEV2_MAJOR_VERSION
)
1167 if (message
->get_exchange_type(message
) == IKE_SA_INIT
&&
1168 message
->get_request(message
))
1170 ike_version
= IKEV2
;
1176 if (message
->get_exchange_type(message
) == ID_PROT
||
1177 message
->get_exchange_type(message
) == AGGRESSIVE
)
1179 ike_version
= IKEV1
;
1181 if (id
->is_initiator(id
))
1182 { /* not set in IKEv1, switch back before applying to new SA */
1183 id
->switch_initiator(id
);
1194 if (!get_init_hash(this, message
, &hash
))
1196 DBG1(DBG_MGR
, "ignoring message, failed to hash message");
1201 /* ensure this is not a retransmit of an already handled init message */
1202 switch (check_and_put_init_hash(this, hash
, &our_spi
))
1205 { /* we've not seen this packet yet, create a new IKE_SA */
1206 id
->set_responder_spi(id
, our_spi
);
1207 ike_sa
= ike_sa_create(id
, FALSE
, ike_version
);
1210 entry
= entry_create();
1211 entry
->ike_sa
= ike_sa
;
1212 entry
->ike_sa_id
= id
->clone(id
);
1214 segment
= put_entry(this, entry
);
1215 entry
->checked_out
= TRUE
;
1216 unlock_single_segment(this, segment
);
1218 entry
->message_id
= message
->get_message_id(message
);
1219 entry
->init_hash
= hash
;
1221 DBG2(DBG_MGR
, "created IKE_SA %s[%u]",
1222 ike_sa
->get_name(ike_sa
),
1223 ike_sa
->get_unique_id(ike_sa
));
1227 remove_init_hash(this, hash
);
1229 DBG1(DBG_MGR
, "ignoring message, no such IKE_SA");
1232 charon
->bus
->set_sa(charon
->bus
, ike_sa
);
1236 { /* we failed to allocate an SPI */
1239 DBG1(DBG_MGR
, "ignoring message, failed to allocate SPI");
1246 /* it looks like we already handled this init message to some degree */
1247 id
->set_responder_spi(id
, our_spi
);
1251 if (get_entry_by_id(this, id
, &entry
, &segment
) == SUCCESS
)
1253 /* only check out in IKEv2 if we are not already processing it */
1254 if (message
->get_request(message
) &&
1255 message
->get_message_id(message
) == entry
->message_id
)
1257 DBG1(DBG_MGR
, "ignoring request with ID %u, already processing",
1260 else if (wait_for_entry(this, entry
, segment
))
1262 ike_sa_id_t
*ike_id
;
1264 ike_id
= entry
->ike_sa
->get_id(entry
->ike_sa
);
1265 entry
->checked_out
= TRUE
;
1266 entry
->message_id
= message
->get_message_id(message
);
1267 if (ike_id
->get_responder_spi(ike_id
) == 0)
1269 ike_id
->set_responder_spi(ike_id
, id
->get_responder_spi(id
));
1271 ike_sa
= entry
->ike_sa
;
1272 DBG2(DBG_MGR
, "IKE_SA %s[%u] successfully checked out",
1273 ike_sa
->get_name(ike_sa
), ike_sa
->get_unique_id(ike_sa
));
1275 unlock_single_segment(this, segment
);
1278 charon
->bus
->set_sa(charon
->bus
, ike_sa
);
1282 METHOD(ike_sa_manager_t
, checkout_by_config
, ike_sa_t
*,
1283 private_ike_sa_manager_t
*this, peer_cfg_t
*peer_cfg
)
1285 enumerator_t
*enumerator
;
1287 ike_sa_t
*ike_sa
= NULL
;
1288 peer_cfg_t
*current_peer
;
1289 ike_cfg_t
*current_ike
;
1292 DBG2(DBG_MGR
, "checkout IKE_SA by config");
1294 if (!this->reuse_ikesa
)
1295 { /* IKE_SA reuse disable by config */
1296 ike_sa
= checkout_new(this, peer_cfg
->get_ike_version(peer_cfg
), TRUE
);
1297 charon
->bus
->set_sa(charon
->bus
, ike_sa
);
1301 enumerator
= create_table_enumerator(this);
1302 while (enumerator
->enumerate(enumerator
, &entry
, &segment
))
1304 if (!wait_for_entry(this, entry
, segment
))
1308 if (entry
->ike_sa
->get_state(entry
->ike_sa
) == IKE_DELETING
)
1309 { /* skip IKE_SAs which are not usable */
1313 current_peer
= entry
->ike_sa
->get_peer_cfg(entry
->ike_sa
);
1314 if (current_peer
&& current_peer
->equals(current_peer
, peer_cfg
))
1316 current_ike
= current_peer
->get_ike_cfg(current_peer
);
1317 if (current_ike
->equals(current_ike
, peer_cfg
->get_ike_cfg(peer_cfg
)))
1319 entry
->checked_out
= TRUE
;
1320 ike_sa
= entry
->ike_sa
;
1321 DBG2(DBG_MGR
, "found existing IKE_SA %u with a '%s' config",
1322 ike_sa
->get_unique_id(ike_sa
),
1323 current_peer
->get_name(current_peer
));
1328 enumerator
->destroy(enumerator
);
1331 { /* no IKE_SA using such a config, hand out a new */
1332 ike_sa
= checkout_new(this, peer_cfg
->get_ike_version(peer_cfg
), TRUE
);
1334 charon
->bus
->set_sa(charon
->bus
, ike_sa
);
1338 METHOD(ike_sa_manager_t
, checkout_by_id
, ike_sa_t
*,
1339 private_ike_sa_manager_t
*this, u_int32_t id
, bool child
)
1341 enumerator_t
*enumerator
, *children
;
1343 ike_sa_t
*ike_sa
= NULL
;
1344 child_sa_t
*child_sa
;
1347 DBG2(DBG_MGR
, "checkout IKE_SA by ID");
1349 enumerator
= create_table_enumerator(this);
1350 while (enumerator
->enumerate(enumerator
, &entry
, &segment
))
1352 if (wait_for_entry(this, entry
, segment
))
1354 /* look for a child with such a reqid ... */
1357 children
= entry
->ike_sa
->create_child_sa_enumerator(entry
->ike_sa
);
1358 while (children
->enumerate(children
, (void**)&child_sa
))
1360 if (child_sa
->get_reqid(child_sa
) == id
)
1362 ike_sa
= entry
->ike_sa
;
1366 children
->destroy(children
);
1368 else /* ... or for a IKE_SA with such a unique id */
1370 if (entry
->ike_sa
->get_unique_id(entry
->ike_sa
) == id
)
1372 ike_sa
= entry
->ike_sa
;
1375 /* got one, return */
1378 entry
->checked_out
= TRUE
;
1379 DBG2(DBG_MGR
, "IKE_SA %s[%u] successfully checked out",
1380 ike_sa
->get_name(ike_sa
), ike_sa
->get_unique_id(ike_sa
));
1385 enumerator
->destroy(enumerator
);
1387 charon
->bus
->set_sa(charon
->bus
, ike_sa
);
1391 METHOD(ike_sa_manager_t
, checkout_by_name
, ike_sa_t
*,
1392 private_ike_sa_manager_t
*this, char *name
, bool child
)
1394 enumerator_t
*enumerator
, *children
;
1396 ike_sa_t
*ike_sa
= NULL
;
1397 child_sa_t
*child_sa
;
1400 enumerator
= create_table_enumerator(this);
1401 while (enumerator
->enumerate(enumerator
, &entry
, &segment
))
1403 if (wait_for_entry(this, entry
, segment
))
1405 /* look for a child with such a policy name ... */
1408 children
= entry
->ike_sa
->create_child_sa_enumerator(entry
->ike_sa
);
1409 while (children
->enumerate(children
, (void**)&child_sa
))
1411 if (streq(child_sa
->get_name(child_sa
), name
))
1413 ike_sa
= entry
->ike_sa
;
1417 children
->destroy(children
);
1419 else /* ... or for a IKE_SA with such a connection name */
1421 if (streq(entry
->ike_sa
->get_name(entry
->ike_sa
), name
))
1423 ike_sa
= entry
->ike_sa
;
1426 /* got one, return */
1429 entry
->checked_out
= TRUE
;
1430 DBG2(DBG_MGR
, "IKE_SA %s[%u] successfully checked out",
1431 ike_sa
->get_name(ike_sa
), ike_sa
->get_unique_id(ike_sa
));
1436 enumerator
->destroy(enumerator
);
1438 charon
->bus
->set_sa(charon
->bus
, ike_sa
);
1443 * enumerator filter function, waiting variant
1445 static bool enumerator_filter_wait(private_ike_sa_manager_t
*this,
1446 entry_t
**in
, ike_sa_t
**out
, u_int
*segment
)
1448 if (wait_for_entry(this, *in
, *segment
))
1450 *out
= (*in
)->ike_sa
;
1451 charon
->bus
->set_sa(charon
->bus
, *out
);
1458 * enumerator filter function, skipping variant
1460 static bool enumerator_filter_skip(private_ike_sa_manager_t
*this,
1461 entry_t
**in
, ike_sa_t
**out
, u_int
*segment
)
1463 if (!(*in
)->driveout_new_threads
&&
1464 !(*in
)->driveout_waiting_threads
&&
1465 !(*in
)->checked_out
)
1467 *out
= (*in
)->ike_sa
;
1468 charon
->bus
->set_sa(charon
->bus
, *out
);
1475 * Reset threads SA after enumeration
1477 static void reset_sa(void *data
)
1479 charon
->bus
->set_sa(charon
->bus
, NULL
);
1482 METHOD(ike_sa_manager_t
, create_enumerator
, enumerator_t
*,
1483 private_ike_sa_manager_t
* this, bool wait
)
1485 return enumerator_create_filter(create_table_enumerator(this),
1486 wait ?
(void*)enumerator_filter_wait
: (void*)enumerator_filter_skip
,
1490 METHOD(ike_sa_manager_t
, checkin
, void,
1491 private_ike_sa_manager_t
*this, ike_sa_t
*ike_sa
)
1493 /* to check the SA back in, we look for the pointer of the ike_sa
1495 * The lookup is done by initiator SPI, so even if the SPI has changed (e.g.
1496 * on reception of a IKE_SA_INIT response) the lookup will work but
1497 * updating of the SPI MAY be necessary...
1500 ike_sa_id_t
*ike_sa_id
;
1502 identification_t
*my_id
, *other_id
;
1505 ike_sa_id
= ike_sa
->get_id(ike_sa
);
1506 my_id
= ike_sa
->get_my_id(ike_sa
);
1507 other_id
= ike_sa
->get_other_eap_id(ike_sa
);
1508 other
= ike_sa
->get_other_host(ike_sa
);
1510 DBG2(DBG_MGR
, "checkin IKE_SA %s[%u]", ike_sa
->get_name(ike_sa
),
1511 ike_sa
->get_unique_id(ike_sa
));
1513 /* look for the entry */
1514 if (get_entry_by_sa(this, ike_sa_id
, ike_sa
, &entry
, &segment
) == SUCCESS
)
1516 /* ike_sa_id must be updated */
1517 entry
->ike_sa_id
->replace_values(entry
->ike_sa_id
, ike_sa
->get_id(ike_sa
));
1518 /* signal waiting threads */
1519 entry
->checked_out
= FALSE
;
1520 entry
->message_id
= -1;
1521 /* check if this SA is half-open */
1522 if (entry
->half_open
&& ike_sa
->get_state(ike_sa
) != IKE_CONNECTING
)
1524 /* not half open anymore */
1525 entry
->half_open
= FALSE
;
1526 remove_half_open(this, entry
);
1528 else if (entry
->half_open
&& !other
->ip_equals(other
, entry
->other
))
1530 /* the other host's IP has changed, we must update the hash table */
1531 remove_half_open(this, entry
);
1532 DESTROY_IF(entry
->other
);
1533 entry
->other
= other
->clone(other
);
1534 put_half_open(this, entry
);
1536 else if (!entry
->half_open
&&
1537 !entry
->ike_sa_id
->is_initiator(entry
->ike_sa_id
) &&
1538 ike_sa
->get_state(ike_sa
) == IKE_CONNECTING
)
1540 /* this is a new half-open SA */
1541 entry
->half_open
= TRUE
;
1542 entry
->other
= other
->clone(other
);
1543 put_half_open(this, entry
);
1545 DBG2(DBG_MGR
, "check-in of IKE_SA successful.");
1546 entry
->condvar
->signal(entry
->condvar
);
1550 entry
= entry_create();
1551 entry
->ike_sa_id
= ike_sa_id
->clone(ike_sa_id
);
1552 entry
->ike_sa
= ike_sa
;
1553 segment
= put_entry(this, entry
);
1556 /* apply identities for duplicate test */
1557 if ((ike_sa
->get_state(ike_sa
) == IKE_ESTABLISHED
||
1558 ike_sa
->get_state(ike_sa
) == IKE_PASSIVE
) &&
1559 entry
->my_id
== NULL
&& entry
->other_id
== NULL
)
1561 if (ike_sa
->get_version(ike_sa
) == IKEV1
)
1563 /* If authenticated and received INITIAL_CONTACT,
1564 * delete any existing IKE_SAs with that peer. */
1565 if (ike_sa
->has_condition(ike_sa
, COND_INIT_CONTACT_SEEN
))
1567 this->public.check_uniqueness(&this->public, ike_sa
, TRUE
);
1568 ike_sa
->set_condition(ike_sa
, COND_INIT_CONTACT_SEEN
, FALSE
);
1572 entry
->my_id
= my_id
->clone(my_id
);
1573 entry
->other_id
= other_id
->clone(other_id
);
1576 entry
->other
= other
->clone(other
);
1578 put_connected_peers(this, entry
);
1581 unlock_single_segment(this, segment
);
1583 charon
->bus
->set_sa(charon
->bus
, NULL
);
1586 METHOD(ike_sa_manager_t
, checkin_and_destroy
, void,
1587 private_ike_sa_manager_t
*this, ike_sa_t
*ike_sa
)
1589 /* deletion is a bit complex, we must ensure that no thread is waiting for
1591 * We take this SA from the table, and start signaling while threads
1592 * are in the condvar.
1595 ike_sa_id_t
*ike_sa_id
;
1598 ike_sa_id
= ike_sa
->get_id(ike_sa
);
1600 DBG2(DBG_MGR
, "checkin and destroy IKE_SA %s[%u]", ike_sa
->get_name(ike_sa
),
1601 ike_sa
->get_unique_id(ike_sa
));
1603 if (get_entry_by_sa(this, ike_sa_id
, ike_sa
, &entry
, &segment
) == SUCCESS
)
1605 if (entry
->driveout_waiting_threads
&& entry
->driveout_new_threads
)
1606 { /* it looks like flush() has been called and the SA is being deleted
1607 * anyway, just check it in */
1608 DBG2(DBG_MGR
, "ignored check-in and destroy of IKE_SA during shutdown");
1609 entry
->checked_out
= FALSE
;
1610 entry
->condvar
->broadcast(entry
->condvar
);
1611 unlock_single_segment(this, segment
);
1615 /* drive out waiting threads, as we are in hurry */
1616 entry
->driveout_waiting_threads
= TRUE
;
1617 /* mark it, so no new threads can get this entry */
1618 entry
->driveout_new_threads
= TRUE
;
1619 /* wait until all workers have done their work */
1620 while (entry
->waiting_threads
)
1623 entry
->condvar
->broadcast(entry
->condvar
);
1624 /* they will wake us again when their work is done */
1625 entry
->condvar
->wait(entry
->condvar
, this->segments
[segment
].mutex
);
1627 remove_entry(this, entry
);
1628 unlock_single_segment(this, segment
);
1630 if (entry
->half_open
)
1632 remove_half_open(this, entry
);
1634 if (entry
->my_id
&& entry
->other_id
)
1636 remove_connected_peers(this, entry
);
1638 if (entry
->init_hash
.ptr
)
1640 remove_init_hash(this, entry
->init_hash
);
1643 entry_destroy(entry
);
1645 DBG2(DBG_MGR
, "check-in and destroy of IKE_SA successful");
1649 DBG1(DBG_MGR
, "tried to check-in and delete nonexisting IKE_SA");
1650 ike_sa
->destroy(ike_sa
);
1652 charon
->bus
->set_sa(charon
->bus
, NULL
);
1656 * Cleanup function for create_id_enumerator
1658 static void id_enumerator_cleanup(linked_list_t
*ids
)
1660 ids
->destroy_offset(ids
, offsetof(ike_sa_id_t
, destroy
));
1663 METHOD(ike_sa_manager_t
, create_id_enumerator
, enumerator_t
*,
1664 private_ike_sa_manager_t
*this, identification_t
*me
,
1665 identification_t
*other
, int family
)
1670 linked_list_t
*ids
= NULL
;
1672 row
= chunk_hash_inc(other
->get_encoding(other
),
1673 chunk_hash(me
->get_encoding(me
))) & this->table_mask
;
1674 segment
= row
& this->segment_mask
;
1676 lock
= this->connected_peers_segments
[segment
].lock
;
1677 lock
->read_lock(lock
);
1678 item
= this->connected_peers_table
[row
];
1681 connected_peers_t
*current
= item
->value
;
1683 if (connected_peers_match(current
, me
, other
, family
))
1685 ids
= current
->sas
->clone_offset(current
->sas
,
1686 offsetof(ike_sa_id_t
, clone
));
1695 return enumerator_create_empty();
1697 return enumerator_create_cleaner(ids
->create_enumerator(ids
),
1698 (void*)id_enumerator_cleanup
, ids
);
1701 METHOD(ike_sa_manager_t
, check_uniqueness
, bool,
1702 private_ike_sa_manager_t
*this, ike_sa_t
*ike_sa
, bool force_replace
)
1704 bool cancel
= FALSE
;
1705 peer_cfg_t
*peer_cfg
;
1706 unique_policy_t policy
;
1707 enumerator_t
*enumerator
;
1708 ike_sa_id_t
*id
= NULL
;
1709 identification_t
*me
, *other
;
1712 peer_cfg
= ike_sa
->get_peer_cfg(ike_sa
);
1713 policy
= peer_cfg
->get_unique_policy(peer_cfg
);
1714 if (policy
== UNIQUE_NEVER
|| (policy
== UNIQUE_NO
&& !force_replace
))
1718 me
= ike_sa
->get_my_id(ike_sa
);
1719 other
= ike_sa
->get_other_eap_id(ike_sa
);
1720 other_host
= ike_sa
->get_other_host(ike_sa
);
1722 enumerator
= create_id_enumerator(this, me
, other
,
1723 other_host
->get_family(other_host
));
1724 while (enumerator
->enumerate(enumerator
, &id
))
1726 status_t status
= SUCCESS
;
1727 ike_sa_t
*duplicate
;
1729 duplicate
= checkout(this, id
);
1736 DBG1(DBG_IKE
, "destroying duplicate IKE_SA for peer '%Y', "
1737 "received INITIAL_CONTACT", other
);
1738 checkin_and_destroy(this, duplicate
);
1741 peer_cfg
= duplicate
->get_peer_cfg(duplicate
);
1742 if (peer_cfg
&& peer_cfg
->equals(peer_cfg
, ike_sa
->get_peer_cfg(ike_sa
)))
1744 switch (duplicate
->get_state(duplicate
))
1746 case IKE_ESTABLISHED
:
1750 case UNIQUE_REPLACE
:
1751 DBG1(DBG_IKE
, "deleting duplicate IKE_SA for peer "
1752 "'%Y' due to uniqueness policy", other
);
1753 status
= duplicate
->delete(duplicate
);
1757 /* we keep the first IKE_SA and delete all
1758 * other duplicates that might exist */
1759 policy
= UNIQUE_REPLACE
;
1769 if (status
== DESTROY_ME
)
1771 checkin_and_destroy(this, duplicate
);
1775 checkin(this, duplicate
);
1778 enumerator
->destroy(enumerator
);
1779 /* reset thread's current IKE_SA after checkin */
1780 charon
->bus
->set_sa(charon
->bus
, ike_sa
);
1784 METHOD(ike_sa_manager_t
, has_contact
, bool,
1785 private_ike_sa_manager_t
*this, identification_t
*me
,
1786 identification_t
*other
, int family
)
1793 row
= chunk_hash_inc(other
->get_encoding(other
),
1794 chunk_hash(me
->get_encoding(me
))) & this->table_mask
;
1795 segment
= row
& this->segment_mask
;
1796 lock
= this->connected_peers_segments
[segment
].lock
;
1797 lock
->read_lock(lock
);
1798 item
= this->connected_peers_table
[row
];
1801 if (connected_peers_match(item
->value
, me
, other
, family
))
1813 METHOD(ike_sa_manager_t
, get_count
, u_int
,
1814 private_ike_sa_manager_t
*this)
1816 u_int segment
, count
= 0;
1819 for (segment
= 0; segment
< this->segment_count
; segment
++)
1821 mutex
= this->segments
[segment
& this->segment_mask
].mutex
;
1823 count
+= this->segments
[segment
].count
;
1824 mutex
->unlock(mutex
);
1829 METHOD(ike_sa_manager_t
, get_half_open_count
, u_int
,
1830 private_ike_sa_manager_t
*this, host_t
*ip
)
1840 addr
= ip
->get_address(ip
);
1841 row
= chunk_hash(addr
) & this->table_mask
;
1842 segment
= row
& this->segment_mask
;
1843 lock
= this->half_open_segments
[segment
].lock
;
1844 lock
->read_lock(lock
);
1845 item
= this->half_open_table
[row
];
1848 half_open_t
*half_open
= item
->value
;
1850 if (chunk_equals(addr
, half_open
->other
))
1852 count
= half_open
->count
;
1861 for (segment
= 0; segment
< this->segment_count
; segment
++)
1863 lock
= this->half_open_segments
[segment
].lock
;
1864 lock
->read_lock(lock
);
1865 count
+= this->half_open_segments
[segment
].count
;
1872 METHOD(ike_sa_manager_t
, flush
, void,
1873 private_ike_sa_manager_t
*this)
1875 /* destroy all list entries */
1876 enumerator_t
*enumerator
;
1880 lock_all_segments(this);
1881 DBG2(DBG_MGR
, "going to destroy IKE_SA manager and all managed IKE_SA's");
1882 /* Step 1: drive out all waiting threads */
1883 DBG2(DBG_MGR
, "set driveout flags for all stored IKE_SA's");
1884 enumerator
= create_table_enumerator(this);
1885 while (enumerator
->enumerate(enumerator
, &entry
, &segment
))
1887 /* do not accept new threads, drive out waiting threads */
1888 entry
->driveout_new_threads
= TRUE
;
1889 entry
->driveout_waiting_threads
= TRUE
;
1891 enumerator
->destroy(enumerator
);
1892 DBG2(DBG_MGR
, "wait for all threads to leave IKE_SA's");
1893 /* Step 2: wait until all are gone */
1894 enumerator
= create_table_enumerator(this);
1895 while (enumerator
->enumerate(enumerator
, &entry
, &segment
))
1897 while (entry
->waiting_threads
|| entry
->checked_out
)
1900 entry
->condvar
->broadcast(entry
->condvar
);
1901 /* go sleeping until they are gone */
1902 entry
->condvar
->wait(entry
->condvar
, this->segments
[segment
].mutex
);
1905 enumerator
->destroy(enumerator
);
1906 DBG2(DBG_MGR
, "delete all IKE_SA's");
1907 /* Step 3: initiate deletion of all IKE_SAs */
1908 enumerator
= create_table_enumerator(this);
1909 while (enumerator
->enumerate(enumerator
, &entry
, &segment
))
1911 charon
->bus
->set_sa(charon
->bus
, entry
->ike_sa
);
1912 if (entry
->ike_sa
->get_version(entry
->ike_sa
) == IKEV2
)
1913 { /* as the delete never gets processed, fire down events */
1914 switch (entry
->ike_sa
->get_state(entry
->ike_sa
))
1916 case IKE_ESTABLISHED
:
1919 charon
->bus
->ike_updown(charon
->bus
, entry
->ike_sa
, FALSE
);
1925 entry
->ike_sa
->delete(entry
->ike_sa
);
1927 enumerator
->destroy(enumerator
);
1929 DBG2(DBG_MGR
, "destroy all entries");
1930 /* Step 4: destroy all entries */
1931 enumerator
= create_table_enumerator(this);
1932 while (enumerator
->enumerate(enumerator
, &entry
, &segment
))
1934 charon
->bus
->set_sa(charon
->bus
, entry
->ike_sa
);
1935 if (entry
->half_open
)
1937 remove_half_open(this, entry
);
1939 if (entry
->my_id
&& entry
->other_id
)
1941 remove_connected_peers(this, entry
);
1943 if (entry
->init_hash
.ptr
)
1945 remove_init_hash(this, entry
->init_hash
);
1947 remove_entry_at((private_enumerator_t
*)enumerator
);
1948 entry_destroy(entry
);
1950 enumerator
->destroy(enumerator
);
1951 charon
->bus
->set_sa(charon
->bus
, NULL
);
1952 unlock_all_segments(this);
1954 this->rng
->destroy(this->rng
);
1956 this->hasher
->destroy(this->hasher
);
1957 this->hasher
= NULL
;
1960 METHOD(ike_sa_manager_t
, destroy
, void,
1961 private_ike_sa_manager_t
*this)
1965 /* these are already cleared in flush() above */
1966 free(this->ike_sa_table
);
1967 free(this->half_open_table
);
1968 free(this->connected_peers_table
);
1969 free(this->init_hashes_table
);
1970 for (i
= 0; i
< this->segment_count
; i
++)
1972 this->segments
[i
].mutex
->destroy(this->segments
[i
].mutex
);
1973 this->half_open_segments
[i
].lock
->destroy(this->half_open_segments
[i
].lock
);
1974 this->connected_peers_segments
[i
].lock
->destroy(this->connected_peers_segments
[i
].lock
);
1975 this->init_hashes_segments
[i
].mutex
->destroy(this->init_hashes_segments
[i
].mutex
);
1977 free(this->segments
);
1978 free(this->half_open_segments
);
1979 free(this->connected_peers_segments
);
1980 free(this->init_hashes_segments
);
1986 * This function returns the next-highest power of two for the given number.
1987 * The algorithm works by setting all bits on the right-hand side of the most
1988 * significant 1 to 1 and then increments the whole number so it rolls over
1989 * to the nearest power of two. Note: returns 0 for n == 0
1991 static u_int
get_nearest_powerof2(u_int n
)
1996 for (i
= 1; i
< sizeof(u_int
) * 8; i
<<= 1)
2004 * Described in header.
2006 ike_sa_manager_t
*ike_sa_manager_create()
2008 private_ike_sa_manager_t
*this;
2013 .checkout
= _checkout
,
2014 .checkout_new
= _checkout_new
,
2015 .checkout_by_message
= _checkout_by_message
,
2016 .checkout_by_config
= _checkout_by_config
,
2017 .checkout_by_id
= _checkout_by_id
,
2018 .checkout_by_name
= _checkout_by_name
,
2019 .check_uniqueness
= _check_uniqueness
,
2020 .has_contact
= _has_contact
,
2021 .create_enumerator
= _create_enumerator
,
2022 .create_id_enumerator
= _create_id_enumerator
,
2023 .checkin
= _checkin
,
2024 .checkin_and_destroy
= _checkin_and_destroy
,
2025 .get_count
= _get_count
,
2026 .get_half_open_count
= _get_half_open_count
,
2028 .destroy
= _destroy
,
2032 this->hasher
= lib
->crypto
->create_hasher(lib
->crypto
, HASH_PREFERRED
);
2033 if (this->hasher
== NULL
)
2035 DBG1(DBG_MGR
, "manager initialization failed, no hasher supported");
2039 this->rng
= lib
->crypto
->create_rng(lib
->crypto
, RNG_WEAK
);
2040 if (this->rng
== NULL
)
2042 DBG1(DBG_MGR
, "manager initialization failed, no RNG supported");
2043 this->hasher
->destroy(this->hasher
);
2048 this->table_size
= get_nearest_powerof2(lib
->settings
->get_int(
2049 lib
->settings
, "%s.ikesa_table_size",
2050 DEFAULT_HASHTABLE_SIZE
, charon
->name
));
2051 this->table_size
= max(1, min(this->table_size
, MAX_HASHTABLE_SIZE
));
2052 this->table_mask
= this->table_size
- 1;
2054 this->segment_count
= get_nearest_powerof2(lib
->settings
->get_int(
2055 lib
->settings
, "%s.ikesa_table_segments",
2056 DEFAULT_SEGMENT_COUNT
, charon
->name
));
2057 this->segment_count
= max(1, min(this->segment_count
, this->table_size
));
2058 this->segment_mask
= this->segment_count
- 1;
2060 this->ike_sa_table
= calloc(this->table_size
, sizeof(table_item_t
*));
2061 this->segments
= (segment_t
*)calloc(this->segment_count
, sizeof(segment_t
));
2062 for (i
= 0; i
< this->segment_count
; i
++)
2064 this->segments
[i
].mutex
= mutex_create(MUTEX_TYPE_RECURSIVE
);
2065 this->segments
[i
].count
= 0;
2068 /* we use the same table parameters for the table to track half-open SAs */
2069 this->half_open_table
= calloc(this->table_size
, sizeof(table_item_t
*));
2070 this->half_open_segments
= calloc(this->segment_count
, sizeof(shareable_segment_t
));
2071 for (i
= 0; i
< this->segment_count
; i
++)
2073 this->half_open_segments
[i
].lock
= rwlock_create(RWLOCK_TYPE_DEFAULT
);
2074 this->half_open_segments
[i
].count
= 0;
2077 /* also for the hash table used for duplicate tests */
2078 this->connected_peers_table
= calloc(this->table_size
, sizeof(table_item_t
*));
2079 this->connected_peers_segments
= calloc(this->segment_count
, sizeof(shareable_segment_t
));
2080 for (i
= 0; i
< this->segment_count
; i
++)
2082 this->connected_peers_segments
[i
].lock
= rwlock_create(RWLOCK_TYPE_DEFAULT
);
2083 this->connected_peers_segments
[i
].count
= 0;
2086 /* and again for the table of hashes of seen initial IKE messages */
2087 this->init_hashes_table
= calloc(this->table_size
, sizeof(table_item_t
*));
2088 this->init_hashes_segments
= calloc(this->segment_count
, sizeof(segment_t
));
2089 for (i
= 0; i
< this->segment_count
; i
++)
2091 this->init_hashes_segments
[i
].mutex
= mutex_create(MUTEX_TYPE_RECURSIVE
);
2092 this->init_hashes_segments
[i
].count
= 0;
2095 this->reuse_ikesa
= lib
->settings
->get_bool(lib
->settings
,
2096 "%s.reuse_ikesa", TRUE
, charon
->name
);
2097 return &this->public;