Use netlink_reserve() helper function in XFRM to simplify message construction
[strongswan.git] / src / libhydra / plugins / kernel_netlink / kernel_netlink_ipsec.c
1 /*
2 * Copyright (C) 2006-2012 Tobias Brunner
3 * Copyright (C) 2005-2009 Martin Willi
4 * Copyright (C) 2008 Andreas Steffen
5 * Copyright (C) 2006-2007 Fabian Hartmann, Noah Heusser
6 * Copyright (C) 2006 Daniel Roethlisberger
7 * Copyright (C) 2005 Jan Hutter
8 * Hochschule fuer Technik Rapperswil
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version. See <http://www.fsf.org/copyleft/gpl.txt>.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
17 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 * for more details.
19 */
20
21 #include <sys/types.h>
22 #include <sys/socket.h>
23 #include <stdint.h>
24 #include <linux/ipsec.h>
25 #include <linux/netlink.h>
26 #include <linux/rtnetlink.h>
27 #include <linux/xfrm.h>
28 #include <linux/udp.h>
29 #include <unistd.h>
30 #include <time.h>
31 #include <errno.h>
32 #include <string.h>
33 #include <fcntl.h>
34
35 #include "kernel_netlink_ipsec.h"
36 #include "kernel_netlink_shared.h"
37
38 #include <hydra.h>
39 #include <utils/debug.h>
40 #include <threading/thread.h>
41 #include <threading/mutex.h>
42 #include <collections/hashtable.h>
43 #include <collections/linked_list.h>
44 #include <processing/jobs/callback_job.h>
45
46 /** Required for Linux 2.6.26 kernel and later */
47 #ifndef XFRM_STATE_AF_UNSPEC
48 #define XFRM_STATE_AF_UNSPEC 32
49 #endif
50
51 /** From linux/in.h */
52 #ifndef IP_XFRM_POLICY
53 #define IP_XFRM_POLICY 17
54 #endif
55
56 /** Missing on uclibc */
57 #ifndef IPV6_XFRM_POLICY
58 #define IPV6_XFRM_POLICY 34
59 #endif /*IPV6_XFRM_POLICY*/
60
61 /* from linux/udp.h */
62 #ifndef UDP_ENCAP
63 #define UDP_ENCAP 100
64 #endif
65
66 #ifndef UDP_ENCAP_ESPINUDP
67 #define UDP_ENCAP_ESPINUDP 2
68 #endif
69
70 /* this is not defined on some platforms */
71 #ifndef SOL_UDP
72 #define SOL_UDP IPPROTO_UDP
73 #endif
74
75 /** Default priority of installed policies */
76 #define PRIO_BASE 512
77
78 /** Default replay window size, if not set using charon.replay_window */
79 #define DEFAULT_REPLAY_WINDOW 32
80
81 /**
82 * Map the limit for bytes and packets to XFRM_INF by default
83 */
84 #define XFRM_LIMIT(x) ((x) == 0 ? XFRM_INF : (x))
85
86 /**
87 * Create ORable bitfield of XFRM NL groups
88 */
89 #define XFRMNLGRP(x) (1<<(XFRMNLGRP_##x-1))
90
91 /**
92 * Returns a pointer to the first rtattr following the nlmsghdr *nlh and the
93 * 'usual' netlink data x like 'struct xfrm_usersa_info'
94 */
95 #define XFRM_RTA(nlh, x) ((struct rtattr*)(NLMSG_DATA(nlh) + \
96 NLMSG_ALIGN(sizeof(x))))
97 /**
98 * Returns the total size of attached rta data
99 * (after 'usual' netlink data x like 'struct xfrm_usersa_info')
100 */
101 #define XFRM_PAYLOAD(nlh, x) NLMSG_PAYLOAD(nlh, sizeof(x))
102
103 typedef struct kernel_algorithm_t kernel_algorithm_t;
104
105 /**
106 * Mapping of IKEv2 kernel identifier to linux crypto API names
107 */
108 struct kernel_algorithm_t {
109 /**
110 * Identifier specified in IKEv2
111 */
112 int ikev2;
113
114 /**
115 * Name of the algorithm in linux crypto API
116 */
117 char *name;
118 };
119
120 ENUM(xfrm_msg_names, XFRM_MSG_NEWSA, XFRM_MSG_MAPPING,
121 "XFRM_MSG_NEWSA",
122 "XFRM_MSG_DELSA",
123 "XFRM_MSG_GETSA",
124 "XFRM_MSG_NEWPOLICY",
125 "XFRM_MSG_DELPOLICY",
126 "XFRM_MSG_GETPOLICY",
127 "XFRM_MSG_ALLOCSPI",
128 "XFRM_MSG_ACQUIRE",
129 "XFRM_MSG_EXPIRE",
130 "XFRM_MSG_UPDPOLICY",
131 "XFRM_MSG_UPDSA",
132 "XFRM_MSG_POLEXPIRE",
133 "XFRM_MSG_FLUSHSA",
134 "XFRM_MSG_FLUSHPOLICY",
135 "XFRM_MSG_NEWAE",
136 "XFRM_MSG_GETAE",
137 "XFRM_MSG_REPORT",
138 "XFRM_MSG_MIGRATE",
139 "XFRM_MSG_NEWSADINFO",
140 "XFRM_MSG_GETSADINFO",
141 "XFRM_MSG_NEWSPDINFO",
142 "XFRM_MSG_GETSPDINFO",
143 "XFRM_MSG_MAPPING"
144 );
145
146 ENUM(xfrm_attr_type_names, XFRMA_UNSPEC, XFRMA_REPLAY_ESN_VAL,
147 "XFRMA_UNSPEC",
148 "XFRMA_ALG_AUTH",
149 "XFRMA_ALG_CRYPT",
150 "XFRMA_ALG_COMP",
151 "XFRMA_ENCAP",
152 "XFRMA_TMPL",
153 "XFRMA_SA",
154 "XFRMA_POLICY",
155 "XFRMA_SEC_CTX",
156 "XFRMA_LTIME_VAL",
157 "XFRMA_REPLAY_VAL",
158 "XFRMA_REPLAY_THRESH",
159 "XFRMA_ETIMER_THRESH",
160 "XFRMA_SRCADDR",
161 "XFRMA_COADDR",
162 "XFRMA_LASTUSED",
163 "XFRMA_POLICY_TYPE",
164 "XFRMA_MIGRATE",
165 "XFRMA_ALG_AEAD",
166 "XFRMA_KMADDRESS",
167 "XFRMA_ALG_AUTH_TRUNC",
168 "XFRMA_MARK",
169 "XFRMA_TFCPAD",
170 "XFRMA_REPLAY_ESN_VAL",
171 );
172
173 /**
174 * Algorithms for encryption
175 */
176 static kernel_algorithm_t encryption_algs[] = {
177 /* {ENCR_DES_IV64, "***" }, */
178 {ENCR_DES, "des" },
179 {ENCR_3DES, "des3_ede" },
180 /* {ENCR_RC5, "***" }, */
181 /* {ENCR_IDEA, "***" }, */
182 {ENCR_CAST, "cast128" },
183 {ENCR_BLOWFISH, "blowfish" },
184 /* {ENCR_3IDEA, "***" }, */
185 /* {ENCR_DES_IV32, "***" }, */
186 {ENCR_NULL, "cipher_null" },
187 {ENCR_AES_CBC, "aes" },
188 {ENCR_AES_CTR, "rfc3686(ctr(aes))" },
189 {ENCR_AES_CCM_ICV8, "rfc4309(ccm(aes))" },
190 {ENCR_AES_CCM_ICV12, "rfc4309(ccm(aes))" },
191 {ENCR_AES_CCM_ICV16, "rfc4309(ccm(aes))" },
192 {ENCR_AES_GCM_ICV8, "rfc4106(gcm(aes))" },
193 {ENCR_AES_GCM_ICV12, "rfc4106(gcm(aes))" },
194 {ENCR_AES_GCM_ICV16, "rfc4106(gcm(aes))" },
195 {ENCR_NULL_AUTH_AES_GMAC, "rfc4543(gcm(aes))" },
196 {ENCR_CAMELLIA_CBC, "cbc(camellia)" },
197 /* {ENCR_CAMELLIA_CTR, "***" }, */
198 /* {ENCR_CAMELLIA_CCM_ICV8, "***" }, */
199 /* {ENCR_CAMELLIA_CCM_ICV12, "***" }, */
200 /* {ENCR_CAMELLIA_CCM_ICV16, "***" }, */
201 {ENCR_SERPENT_CBC, "serpent" },
202 {ENCR_TWOFISH_CBC, "twofish" },
203 };
204
205 /**
206 * Algorithms for integrity protection
207 */
208 static kernel_algorithm_t integrity_algs[] = {
209 {AUTH_HMAC_MD5_96, "md5" },
210 {AUTH_HMAC_MD5_128, "hmac(md5)" },
211 {AUTH_HMAC_SHA1_96, "sha1" },
212 {AUTH_HMAC_SHA1_160, "hmac(sha1)" },
213 {AUTH_HMAC_SHA2_256_96, "sha256" },
214 {AUTH_HMAC_SHA2_256_128, "hmac(sha256)" },
215 {AUTH_HMAC_SHA2_384_192, "hmac(sha384)" },
216 {AUTH_HMAC_SHA2_512_256, "hmac(sha512)" },
217 /* {AUTH_DES_MAC, "***" }, */
218 /* {AUTH_KPDK_MD5, "***" }, */
219 {AUTH_AES_XCBC_96, "xcbc(aes)" },
220 };
221
222 /**
223 * Algorithms for IPComp
224 */
225 static kernel_algorithm_t compression_algs[] = {
226 /* {IPCOMP_OUI, "***" }, */
227 {IPCOMP_DEFLATE, "deflate" },
228 {IPCOMP_LZS, "lzs" },
229 {IPCOMP_LZJH, "lzjh" },
230 };
231
232 /**
233 * Look up a kernel algorithm name and its key size
234 */
235 static char* lookup_algorithm(transform_type_t type, int ikev2)
236 {
237 kernel_algorithm_t *list;
238 int i, count;
239 char *name;
240
241 switch (type)
242 {
243 case ENCRYPTION_ALGORITHM:
244 list = encryption_algs;
245 count = countof(encryption_algs);
246 break;
247 case INTEGRITY_ALGORITHM:
248 list = integrity_algs;
249 count = countof(integrity_algs);
250 break;
251 case COMPRESSION_ALGORITHM:
252 list = compression_algs;
253 count = countof(compression_algs);
254 break;
255 default:
256 return NULL;
257 }
258 for (i = 0; i < count; i++)
259 {
260 if (list[i].ikev2 == ikev2)
261 {
262 return list[i].name;
263 }
264 }
265 if (hydra->kernel_interface->lookup_algorithm(hydra->kernel_interface,
266 ikev2, type, NULL, &name))
267 {
268 return name;
269 }
270 return NULL;
271 }
272
273 typedef struct private_kernel_netlink_ipsec_t private_kernel_netlink_ipsec_t;
274
275 /**
276 * Private variables and functions of kernel_netlink class.
277 */
278 struct private_kernel_netlink_ipsec_t {
279 /**
280 * Public part of the kernel_netlink_t object
281 */
282 kernel_netlink_ipsec_t public;
283
284 /**
285 * Mutex to lock access to installed policies
286 */
287 mutex_t *mutex;
288
289 /**
290 * Hash table of installed policies (policy_entry_t)
291 */
292 hashtable_t *policies;
293
294 /**
295 * Hash table of IPsec SAs using policies (ipsec_sa_t)
296 */
297 hashtable_t *sas;
298
299 /**
300 * Netlink xfrm socket (IPsec)
301 */
302 netlink_socket_t *socket_xfrm;
303
304 /**
305 * Netlink xfrm socket to receive acquire and expire events
306 */
307 int socket_xfrm_events;
308
309 /**
310 * Whether to install routes along policies
311 */
312 bool install_routes;
313
314 /**
315 * Whether to track the history of a policy
316 */
317 bool policy_history;
318
319 /**
320 * Size of the replay window, in packets (= bits)
321 */
322 u_int32_t replay_window;
323
324 /**
325 * Size of the replay window bitmap, in number of __u32 blocks
326 */
327 u_int32_t replay_bmp;
328 };
329
330 typedef struct route_entry_t route_entry_t;
331
332 /**
333 * Installed routing entry
334 */
335 struct route_entry_t {
336 /** Name of the interface the route is bound to */
337 char *if_name;
338
339 /** Source ip of the route */
340 host_t *src_ip;
341
342 /** Gateway for this route */
343 host_t *gateway;
344
345 /** Destination net */
346 chunk_t dst_net;
347
348 /** Destination net prefixlen */
349 u_int8_t prefixlen;
350 };
351
352 /**
353 * Destroy a route_entry_t object
354 */
355 static void route_entry_destroy(route_entry_t *this)
356 {
357 free(this->if_name);
358 this->src_ip->destroy(this->src_ip);
359 DESTROY_IF(this->gateway);
360 chunk_free(&this->dst_net);
361 free(this);
362 }
363
364 /**
365 * Compare two route_entry_t objects
366 */
367 static bool route_entry_equals(route_entry_t *a, route_entry_t *b)
368 {
369 return a->if_name && b->if_name && streq(a->if_name, b->if_name) &&
370 a->src_ip->ip_equals(a->src_ip, b->src_ip) &&
371 a->gateway->ip_equals(a->gateway, b->gateway) &&
372 chunk_equals(a->dst_net, b->dst_net) && a->prefixlen == b->prefixlen;
373 }
374
375 typedef struct ipsec_sa_t ipsec_sa_t;
376
377 /**
378 * IPsec SA assigned to a policy.
379 */
380 struct ipsec_sa_t {
381 /** Source address of this SA */
382 host_t *src;
383
384 /** Destination address of this SA */
385 host_t *dst;
386
387 /** Optional mark */
388 mark_t mark;
389
390 /** Description of this SA */
391 ipsec_sa_cfg_t cfg;
392
393 /** Reference count for this SA */
394 refcount_t refcount;
395 };
396
397 /**
398 * Hash function for ipsec_sa_t objects
399 */
400 static u_int ipsec_sa_hash(ipsec_sa_t *sa)
401 {
402 return chunk_hash_inc(sa->src->get_address(sa->src),
403 chunk_hash_inc(sa->dst->get_address(sa->dst),
404 chunk_hash_inc(chunk_from_thing(sa->mark),
405 chunk_hash(chunk_from_thing(sa->cfg)))));
406 }
407
408 /**
409 * Equality function for ipsec_sa_t objects
410 */
411 static bool ipsec_sa_equals(ipsec_sa_t *sa, ipsec_sa_t *other_sa)
412 {
413 return sa->src->ip_equals(sa->src, other_sa->src) &&
414 sa->dst->ip_equals(sa->dst, other_sa->dst) &&
415 memeq(&sa->mark, &other_sa->mark, sizeof(mark_t)) &&
416 memeq(&sa->cfg, &other_sa->cfg, sizeof(ipsec_sa_cfg_t));
417 }
418
419 /**
420 * Allocate or reference an IPsec SA object
421 */
422 static ipsec_sa_t *ipsec_sa_create(private_kernel_netlink_ipsec_t *this,
423 host_t *src, host_t *dst, mark_t mark,
424 ipsec_sa_cfg_t *cfg)
425 {
426 ipsec_sa_t *sa, *found;
427 INIT(sa,
428 .src = src,
429 .dst = dst,
430 .mark = mark,
431 .cfg = *cfg,
432 );
433 found = this->sas->get(this->sas, sa);
434 if (!found)
435 {
436 sa->src = src->clone(src);
437 sa->dst = dst->clone(dst);
438 this->sas->put(this->sas, sa, sa);
439 }
440 else
441 {
442 free(sa);
443 sa = found;
444 }
445 ref_get(&sa->refcount);
446 return sa;
447 }
448
449 /**
450 * Release and destroy an IPsec SA object
451 */
452 static void ipsec_sa_destroy(private_kernel_netlink_ipsec_t *this,
453 ipsec_sa_t *sa)
454 {
455 if (ref_put(&sa->refcount))
456 {
457 this->sas->remove(this->sas, sa);
458 DESTROY_IF(sa->src);
459 DESTROY_IF(sa->dst);
460 free(sa);
461 }
462 }
463
464 typedef struct policy_sa_t policy_sa_t;
465 typedef struct policy_sa_fwd_t policy_sa_fwd_t;
466
467 /**
468 * Mapping between a policy and an IPsec SA.
469 */
470 struct policy_sa_t {
471 /** Priority assigned to the policy when installed with this SA */
472 u_int32_t priority;
473
474 /** Type of the policy */
475 policy_type_t type;
476
477 /** Assigned SA */
478 ipsec_sa_t *sa;
479 };
480
481 /**
482 * For forward policies we also cache the traffic selectors in order to install
483 * the route.
484 */
485 struct policy_sa_fwd_t {
486 /** Generic interface */
487 policy_sa_t generic;
488
489 /** Source traffic selector of this policy */
490 traffic_selector_t *src_ts;
491
492 /** Destination traffic selector of this policy */
493 traffic_selector_t *dst_ts;
494 };
495
496 /**
497 * Create a policy_sa(_fwd)_t object
498 */
499 static policy_sa_t *policy_sa_create(private_kernel_netlink_ipsec_t *this,
500 policy_dir_t dir, policy_type_t type, host_t *src, host_t *dst,
501 traffic_selector_t *src_ts, traffic_selector_t *dst_ts, mark_t mark,
502 ipsec_sa_cfg_t *cfg)
503 {
504 policy_sa_t *policy;
505
506 if (dir == POLICY_FWD)
507 {
508 policy_sa_fwd_t *fwd;
509 INIT(fwd,
510 .src_ts = src_ts->clone(src_ts),
511 .dst_ts = dst_ts->clone(dst_ts),
512 );
513 policy = &fwd->generic;
514 }
515 else
516 {
517 INIT(policy, .priority = 0);
518 }
519 policy->type = type;
520 policy->sa = ipsec_sa_create(this, src, dst, mark, cfg);
521 return policy;
522 }
523
524 /**
525 * Destroy a policy_sa(_fwd)_t object
526 */
527 static void policy_sa_destroy(policy_sa_t *policy, policy_dir_t *dir,
528 private_kernel_netlink_ipsec_t *this)
529 {
530 if (*dir == POLICY_FWD)
531 {
532 policy_sa_fwd_t *fwd = (policy_sa_fwd_t*)policy;
533 fwd->src_ts->destroy(fwd->src_ts);
534 fwd->dst_ts->destroy(fwd->dst_ts);
535 }
536 ipsec_sa_destroy(this, policy->sa);
537 free(policy);
538 }
539
540 typedef struct policy_entry_t policy_entry_t;
541
542 /**
543 * Installed kernel policy.
544 */
545 struct policy_entry_t {
546
547 /** Direction of this policy: in, out, forward */
548 u_int8_t direction;
549
550 /** Parameters of installed policy */
551 struct xfrm_selector sel;
552
553 /** Optional mark */
554 u_int32_t mark;
555
556 /** Associated route installed for this policy */
557 route_entry_t *route;
558
559 /** List of SAs this policy is used by, ordered by priority */
560 linked_list_t *used_by;
561 };
562
563 /**
564 * Destroy a policy_entry_t object
565 */
566 static void policy_entry_destroy(private_kernel_netlink_ipsec_t *this,
567 policy_entry_t *policy)
568 {
569 if (policy->route)
570 {
571 route_entry_destroy(policy->route);
572 }
573 if (policy->used_by)
574 {
575 policy->used_by->invoke_function(policy->used_by,
576 (linked_list_invoke_t)policy_sa_destroy,
577 &policy->direction, this);
578 policy->used_by->destroy(policy->used_by);
579 }
580 free(policy);
581 }
582
583 /**
584 * Hash function for policy_entry_t objects
585 */
586 static u_int policy_hash(policy_entry_t *key)
587 {
588 chunk_t chunk = chunk_from_thing(key->sel);
589 return chunk_hash_inc(chunk, chunk_hash(chunk_from_thing(key->mark)));
590 }
591
592 /**
593 * Equality function for policy_entry_t objects
594 */
595 static bool policy_equals(policy_entry_t *key, policy_entry_t *other_key)
596 {
597 return memeq(&key->sel, &other_key->sel, sizeof(struct xfrm_selector)) &&
598 key->mark == other_key->mark &&
599 key->direction == other_key->direction;
600 }
601
602 /**
603 * Calculate the priority of a policy
604 */
605 static inline u_int32_t get_priority(policy_entry_t *policy,
606 policy_priority_t prio)
607 {
608 u_int32_t priority = PRIO_BASE;
609 switch (prio)
610 {
611 case POLICY_PRIORITY_FALLBACK:
612 priority <<= 1;
613 /* fall-through */
614 case POLICY_PRIORITY_ROUTED:
615 priority <<= 1;
616 /* fall-through */
617 case POLICY_PRIORITY_DEFAULT:
618 break;
619 }
620 /* calculate priority based on selector size, small size = high prio */
621 priority -= policy->sel.prefixlen_s;
622 priority -= policy->sel.prefixlen_d;
623 priority <<= 2; /* make some room for the two flags */
624 priority += policy->sel.sport_mask || policy->sel.dport_mask ? 0 : 2;
625 priority += policy->sel.proto ? 0 : 1;
626 return priority;
627 }
628
629 /**
630 * Convert the general ipsec mode to the one defined in xfrm.h
631 */
632 static u_int8_t mode2kernel(ipsec_mode_t mode)
633 {
634 switch (mode)
635 {
636 case MODE_TRANSPORT:
637 return XFRM_MODE_TRANSPORT;
638 case MODE_TUNNEL:
639 return XFRM_MODE_TUNNEL;
640 case MODE_BEET:
641 return XFRM_MODE_BEET;
642 default:
643 return mode;
644 }
645 }
646
647 /**
648 * Convert a host_t to a struct xfrm_address
649 */
650 static void host2xfrm(host_t *host, xfrm_address_t *xfrm)
651 {
652 chunk_t chunk = host->get_address(host);
653 memcpy(xfrm, chunk.ptr, min(chunk.len, sizeof(xfrm_address_t)));
654 }
655
656 /**
657 * Convert a struct xfrm_address to a host_t
658 */
659 static host_t* xfrm2host(int family, xfrm_address_t *xfrm, u_int16_t port)
660 {
661 chunk_t chunk;
662
663 switch (family)
664 {
665 case AF_INET:
666 chunk = chunk_create((u_char*)&xfrm->a4, sizeof(xfrm->a4));
667 break;
668 case AF_INET6:
669 chunk = chunk_create((u_char*)&xfrm->a6, sizeof(xfrm->a6));
670 break;
671 default:
672 return NULL;
673 }
674 return host_create_from_chunk(family, chunk, ntohs(port));
675 }
676
677 /**
678 * Convert a traffic selector address range to subnet and its mask.
679 */
680 static void ts2subnet(traffic_selector_t* ts,
681 xfrm_address_t *net, u_int8_t *mask)
682 {
683 host_t *net_host;
684 chunk_t net_chunk;
685
686 ts->to_subnet(ts, &net_host, mask);
687 net_chunk = net_host->get_address(net_host);
688 memcpy(net, net_chunk.ptr, net_chunk.len);
689 net_host->destroy(net_host);
690 }
691
692 /**
693 * Convert a traffic selector port range to port/portmask
694 */
695 static void ts2ports(traffic_selector_t* ts,
696 u_int16_t *port, u_int16_t *mask)
697 {
698 /* Linux does not seem to accept complex portmasks. Only
699 * any or a specific port is allowed. We set to any, if we have
700 * a port range, or to a specific, if we have one port only.
701 */
702 u_int16_t from, to;
703
704 from = ts->get_from_port(ts);
705 to = ts->get_to_port(ts);
706
707 if (from == to)
708 {
709 *port = htons(from);
710 *mask = ~0;
711 }
712 else
713 {
714 *port = 0;
715 *mask = 0;
716 }
717 }
718
719 /**
720 * Convert a pair of traffic_selectors to an xfrm_selector
721 */
722 static struct xfrm_selector ts2selector(traffic_selector_t *src,
723 traffic_selector_t *dst)
724 {
725 struct xfrm_selector sel;
726
727 memset(&sel, 0, sizeof(sel));
728 sel.family = (src->get_type(src) == TS_IPV4_ADDR_RANGE) ? AF_INET : AF_INET6;
729 /* src or dest proto may be "any" (0), use more restrictive one */
730 sel.proto = max(src->get_protocol(src), dst->get_protocol(dst));
731 ts2subnet(dst, &sel.daddr, &sel.prefixlen_d);
732 ts2subnet(src, &sel.saddr, &sel.prefixlen_s);
733 ts2ports(dst, &sel.dport, &sel.dport_mask);
734 ts2ports(src, &sel.sport, &sel.sport_mask);
735 sel.ifindex = 0;
736 sel.user = 0;
737
738 return sel;
739 }
740
741 /**
742 * Convert an xfrm_selector to a src|dst traffic_selector
743 */
744 static traffic_selector_t* selector2ts(struct xfrm_selector *sel, bool src)
745 {
746 u_char *addr;
747 u_int8_t prefixlen;
748 u_int16_t port = 0;
749 host_t *host = NULL;
750
751 if (src)
752 {
753 addr = (u_char*)&sel->saddr;
754 prefixlen = sel->prefixlen_s;
755 if (sel->sport_mask)
756 {
757 port = htons(sel->sport);
758 }
759 }
760 else
761 {
762 addr = (u_char*)&sel->daddr;
763 prefixlen = sel->prefixlen_d;
764 if (sel->dport_mask)
765 {
766 port = htons(sel->dport);
767 }
768 }
769
770 /* The Linux 2.6 kernel does not set the selector's family field,
771 * so as a kludge we additionally test the prefix length.
772 */
773 if (sel->family == AF_INET || sel->prefixlen_s == 32)
774 {
775 host = host_create_from_chunk(AF_INET, chunk_create(addr, 4), 0);
776 }
777 else if (sel->family == AF_INET6 || sel->prefixlen_s == 128)
778 {
779 host = host_create_from_chunk(AF_INET6, chunk_create(addr, 16), 0);
780 }
781
782 if (host)
783 {
784 return traffic_selector_create_from_subnet(host, prefixlen,
785 sel->proto, port, port ?: 65535);
786 }
787 return NULL;
788 }
789
790 /**
791 * Process a XFRM_MSG_ACQUIRE from kernel
792 */
793 static void process_acquire(private_kernel_netlink_ipsec_t *this,
794 struct nlmsghdr *hdr)
795 {
796 struct xfrm_user_acquire *acquire;
797 struct rtattr *rta;
798 size_t rtasize;
799 traffic_selector_t *src_ts, *dst_ts;
800 u_int32_t reqid = 0;
801 int proto = 0;
802
803 acquire = (struct xfrm_user_acquire*)NLMSG_DATA(hdr);
804 rta = XFRM_RTA(hdr, struct xfrm_user_acquire);
805 rtasize = XFRM_PAYLOAD(hdr, struct xfrm_user_acquire);
806
807 DBG2(DBG_KNL, "received a XFRM_MSG_ACQUIRE");
808
809 while (RTA_OK(rta, rtasize))
810 {
811 DBG2(DBG_KNL, " %N", xfrm_attr_type_names, rta->rta_type);
812
813 if (rta->rta_type == XFRMA_TMPL)
814 {
815 struct xfrm_user_tmpl* tmpl;
816 tmpl = (struct xfrm_user_tmpl*)RTA_DATA(rta);
817 reqid = tmpl->reqid;
818 proto = tmpl->id.proto;
819 }
820 rta = RTA_NEXT(rta, rtasize);
821 }
822 switch (proto)
823 {
824 case 0:
825 case IPPROTO_ESP:
826 case IPPROTO_AH:
827 break;
828 default:
829 /* acquire for AH/ESP only, not for IPCOMP */
830 return;
831 }
832 src_ts = selector2ts(&acquire->sel, TRUE);
833 dst_ts = selector2ts(&acquire->sel, FALSE);
834
835 hydra->kernel_interface->acquire(hydra->kernel_interface, reqid, src_ts,
836 dst_ts);
837 }
838
839 /**
840 * Process a XFRM_MSG_EXPIRE from kernel
841 */
842 static void process_expire(private_kernel_netlink_ipsec_t *this,
843 struct nlmsghdr *hdr)
844 {
845 struct xfrm_user_expire *expire;
846 u_int32_t spi, reqid;
847 u_int8_t protocol;
848
849 expire = (struct xfrm_user_expire*)NLMSG_DATA(hdr);
850 protocol = expire->state.id.proto;
851 spi = expire->state.id.spi;
852 reqid = expire->state.reqid;
853
854 DBG2(DBG_KNL, "received a XFRM_MSG_EXPIRE");
855
856 if (protocol != IPPROTO_ESP && protocol != IPPROTO_AH)
857 {
858 DBG2(DBG_KNL, "ignoring XFRM_MSG_EXPIRE for SA with SPI %.8x and "
859 "reqid {%u} which is not a CHILD_SA", ntohl(spi), reqid);
860 return;
861 }
862
863 hydra->kernel_interface->expire(hydra->kernel_interface, reqid, protocol,
864 spi, expire->hard != 0);
865 }
866
867 /**
868 * Process a XFRM_MSG_MIGRATE from kernel
869 */
870 static void process_migrate(private_kernel_netlink_ipsec_t *this,
871 struct nlmsghdr *hdr)
872 {
873 struct xfrm_userpolicy_id *policy_id;
874 struct rtattr *rta;
875 size_t rtasize;
876 traffic_selector_t *src_ts, *dst_ts;
877 host_t *local = NULL, *remote = NULL;
878 host_t *old_src = NULL, *old_dst = NULL;
879 host_t *new_src = NULL, *new_dst = NULL;
880 u_int32_t reqid = 0;
881 policy_dir_t dir;
882
883 policy_id = (struct xfrm_userpolicy_id*)NLMSG_DATA(hdr);
884 rta = XFRM_RTA(hdr, struct xfrm_userpolicy_id);
885 rtasize = XFRM_PAYLOAD(hdr, struct xfrm_userpolicy_id);
886
887 DBG2(DBG_KNL, "received a XFRM_MSG_MIGRATE");
888
889 src_ts = selector2ts(&policy_id->sel, TRUE);
890 dst_ts = selector2ts(&policy_id->sel, FALSE);
891 dir = (policy_dir_t)policy_id->dir;
892
893 DBG2(DBG_KNL, " policy: %R === %R %N", src_ts, dst_ts, policy_dir_names);
894
895 while (RTA_OK(rta, rtasize))
896 {
897 DBG2(DBG_KNL, " %N", xfrm_attr_type_names, rta->rta_type);
898 if (rta->rta_type == XFRMA_KMADDRESS)
899 {
900 struct xfrm_user_kmaddress *kmaddress;
901
902 kmaddress = (struct xfrm_user_kmaddress*)RTA_DATA(rta);
903 local = xfrm2host(kmaddress->family, &kmaddress->local, 0);
904 remote = xfrm2host(kmaddress->family, &kmaddress->remote, 0);
905 DBG2(DBG_KNL, " kmaddress: %H...%H", local, remote);
906 }
907 else if (rta->rta_type == XFRMA_MIGRATE)
908 {
909 struct xfrm_user_migrate *migrate;
910
911 migrate = (struct xfrm_user_migrate*)RTA_DATA(rta);
912 old_src = xfrm2host(migrate->old_family, &migrate->old_saddr, 0);
913 old_dst = xfrm2host(migrate->old_family, &migrate->old_daddr, 0);
914 new_src = xfrm2host(migrate->new_family, &migrate->new_saddr, 0);
915 new_dst = xfrm2host(migrate->new_family, &migrate->new_daddr, 0);
916 reqid = migrate->reqid;
917 DBG2(DBG_KNL, " migrate %H...%H to %H...%H, reqid {%u}",
918 old_src, old_dst, new_src, new_dst, reqid);
919 DESTROY_IF(old_src);
920 DESTROY_IF(old_dst);
921 DESTROY_IF(new_src);
922 DESTROY_IF(new_dst);
923 }
924 rta = RTA_NEXT(rta, rtasize);
925 }
926
927 if (src_ts && dst_ts && local && remote)
928 {
929 hydra->kernel_interface->migrate(hydra->kernel_interface, reqid,
930 src_ts, dst_ts, dir, local, remote);
931 }
932 else
933 {
934 DESTROY_IF(src_ts);
935 DESTROY_IF(dst_ts);
936 DESTROY_IF(local);
937 DESTROY_IF(remote);
938 }
939 }
940
941 /**
942 * Process a XFRM_MSG_MAPPING from kernel
943 */
944 static void process_mapping(private_kernel_netlink_ipsec_t *this,
945 struct nlmsghdr *hdr)
946 {
947 struct xfrm_user_mapping *mapping;
948 u_int32_t spi, reqid;
949
950 mapping = (struct xfrm_user_mapping*)NLMSG_DATA(hdr);
951 spi = mapping->id.spi;
952 reqid = mapping->reqid;
953
954 DBG2(DBG_KNL, "received a XFRM_MSG_MAPPING");
955
956 if (mapping->id.proto == IPPROTO_ESP)
957 {
958 host_t *host;
959 host = xfrm2host(mapping->id.family, &mapping->new_saddr,
960 mapping->new_sport);
961 if (host)
962 {
963 hydra->kernel_interface->mapping(hydra->kernel_interface, reqid,
964 spi, host);
965 }
966 }
967 }
968
969 /**
970 * Receives events from kernel
971 */
972 static job_requeue_t receive_events(private_kernel_netlink_ipsec_t *this)
973 {
974 char response[1024];
975 struct nlmsghdr *hdr = (struct nlmsghdr*)response;
976 struct sockaddr_nl addr;
977 socklen_t addr_len = sizeof(addr);
978 int len;
979 bool oldstate;
980
981 oldstate = thread_cancelability(TRUE);
982 len = recvfrom(this->socket_xfrm_events, response, sizeof(response), 0,
983 (struct sockaddr*)&addr, &addr_len);
984 thread_cancelability(oldstate);
985
986 if (len < 0)
987 {
988 switch (errno)
989 {
990 case EINTR:
991 /* interrupted, try again */
992 return JOB_REQUEUE_DIRECT;
993 case EAGAIN:
994 /* no data ready, select again */
995 return JOB_REQUEUE_DIRECT;
996 default:
997 DBG1(DBG_KNL, "unable to receive from xfrm event socket");
998 sleep(1);
999 return JOB_REQUEUE_FAIR;
1000 }
1001 }
1002
1003 if (addr.nl_pid != 0)
1004 { /* not from kernel. not interested, try another one */
1005 return JOB_REQUEUE_DIRECT;
1006 }
1007
1008 while (NLMSG_OK(hdr, len))
1009 {
1010 switch (hdr->nlmsg_type)
1011 {
1012 case XFRM_MSG_ACQUIRE:
1013 process_acquire(this, hdr);
1014 break;
1015 case XFRM_MSG_EXPIRE:
1016 process_expire(this, hdr);
1017 break;
1018 case XFRM_MSG_MIGRATE:
1019 process_migrate(this, hdr);
1020 break;
1021 case XFRM_MSG_MAPPING:
1022 process_mapping(this, hdr);
1023 break;
1024 default:
1025 DBG1(DBG_KNL, "received unknown event from xfrm event "
1026 "socket: %d", hdr->nlmsg_type);
1027 break;
1028 }
1029 hdr = NLMSG_NEXT(hdr, len);
1030 }
1031 return JOB_REQUEUE_DIRECT;
1032 }
1033
1034 METHOD(kernel_ipsec_t, get_features, kernel_feature_t,
1035 private_kernel_netlink_ipsec_t *this)
1036 {
1037 return KERNEL_ESP_V3_TFC;
1038 }
1039
1040 /**
1041 * Get an SPI for a specific protocol from the kernel.
1042 */
1043 static status_t get_spi_internal(private_kernel_netlink_ipsec_t *this,
1044 host_t *src, host_t *dst, u_int8_t proto, u_int32_t min, u_int32_t max,
1045 u_int32_t reqid, u_int32_t *spi)
1046 {
1047 netlink_buf_t request;
1048 struct nlmsghdr *hdr, *out;
1049 struct xfrm_userspi_info *userspi;
1050 u_int32_t received_spi = 0;
1051 size_t len;
1052
1053 memset(&request, 0, sizeof(request));
1054
1055 hdr = (struct nlmsghdr*)request;
1056 hdr->nlmsg_flags = NLM_F_REQUEST;
1057 hdr->nlmsg_type = XFRM_MSG_ALLOCSPI;
1058 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_userspi_info));
1059
1060 userspi = (struct xfrm_userspi_info*)NLMSG_DATA(hdr);
1061 host2xfrm(src, &userspi->info.saddr);
1062 host2xfrm(dst, &userspi->info.id.daddr);
1063 userspi->info.id.proto = proto;
1064 userspi->info.mode = XFRM_MODE_TUNNEL;
1065 userspi->info.reqid = reqid;
1066 userspi->info.family = src->get_family(src);
1067 userspi->min = min;
1068 userspi->max = max;
1069
1070 if (this->socket_xfrm->send(this->socket_xfrm, hdr, &out, &len) == SUCCESS)
1071 {
1072 hdr = out;
1073 while (NLMSG_OK(hdr, len))
1074 {
1075 switch (hdr->nlmsg_type)
1076 {
1077 case XFRM_MSG_NEWSA:
1078 {
1079 struct xfrm_usersa_info* usersa = NLMSG_DATA(hdr);
1080 received_spi = usersa->id.spi;
1081 break;
1082 }
1083 case NLMSG_ERROR:
1084 {
1085 struct nlmsgerr *err = NLMSG_DATA(hdr);
1086 DBG1(DBG_KNL, "allocating SPI failed: %s (%d)",
1087 strerror(-err->error), -err->error);
1088 break;
1089 }
1090 default:
1091 hdr = NLMSG_NEXT(hdr, len);
1092 continue;
1093 case NLMSG_DONE:
1094 break;
1095 }
1096 break;
1097 }
1098 free(out);
1099 }
1100
1101 if (received_spi == 0)
1102 {
1103 return FAILED;
1104 }
1105
1106 *spi = received_spi;
1107 return SUCCESS;
1108 }
1109
1110 METHOD(kernel_ipsec_t, get_spi, status_t,
1111 private_kernel_netlink_ipsec_t *this, host_t *src, host_t *dst,
1112 u_int8_t protocol, u_int32_t reqid, u_int32_t *spi)
1113 {
1114 DBG2(DBG_KNL, "getting SPI for reqid {%u}", reqid);
1115
1116 if (get_spi_internal(this, src, dst, protocol,
1117 0xc0000000, 0xcFFFFFFF, reqid, spi) != SUCCESS)
1118 {
1119 DBG1(DBG_KNL, "unable to get SPI for reqid {%u}", reqid);
1120 return FAILED;
1121 }
1122
1123 DBG2(DBG_KNL, "got SPI %.8x for reqid {%u}", ntohl(*spi), reqid);
1124 return SUCCESS;
1125 }
1126
1127 METHOD(kernel_ipsec_t, get_cpi, status_t,
1128 private_kernel_netlink_ipsec_t *this, host_t *src, host_t *dst,
1129 u_int32_t reqid, u_int16_t *cpi)
1130 {
1131 u_int32_t received_spi = 0;
1132
1133 DBG2(DBG_KNL, "getting CPI for reqid {%u}", reqid);
1134
1135 if (get_spi_internal(this, src, dst, IPPROTO_COMP,
1136 0x100, 0xEFFF, reqid, &received_spi) != SUCCESS)
1137 {
1138 DBG1(DBG_KNL, "unable to get CPI for reqid {%u}", reqid);
1139 return FAILED;
1140 }
1141
1142 *cpi = htons((u_int16_t)ntohl(received_spi));
1143
1144 DBG2(DBG_KNL, "got CPI %.4x for reqid {%u}", ntohs(*cpi), reqid);
1145 return SUCCESS;
1146 }
1147
1148 METHOD(kernel_ipsec_t, add_sa, status_t,
1149 private_kernel_netlink_ipsec_t *this, host_t *src, host_t *dst,
1150 u_int32_t spi, u_int8_t protocol, u_int32_t reqid, mark_t mark,
1151 u_int32_t tfc, lifetime_cfg_t *lifetime, u_int16_t enc_alg, chunk_t enc_key,
1152 u_int16_t int_alg, chunk_t int_key, ipsec_mode_t mode, u_int16_t ipcomp,
1153 u_int16_t cpi, bool encap, bool esn, bool inbound,
1154 traffic_selector_t* src_ts, traffic_selector_t* dst_ts)
1155 {
1156 netlink_buf_t request;
1157 char *alg_name;
1158 struct nlmsghdr *hdr;
1159 struct xfrm_usersa_info *sa;
1160 u_int16_t icv_size = 64;
1161 status_t status = FAILED;
1162
1163 /* if IPComp is used, we install an additional IPComp SA. if the cpi is 0
1164 * we are in the recursive call below */
1165 if (ipcomp != IPCOMP_NONE && cpi != 0)
1166 {
1167 lifetime_cfg_t lft = {{0,0,0},{0,0,0},{0,0,0}};
1168 add_sa(this, src, dst, htonl(ntohs(cpi)), IPPROTO_COMP, reqid, mark,
1169 tfc, &lft, ENCR_UNDEFINED, chunk_empty, AUTH_UNDEFINED,
1170 chunk_empty, mode, ipcomp, 0, FALSE, FALSE, inbound, NULL, NULL);
1171 ipcomp = IPCOMP_NONE;
1172 /* use transport mode ESP SA, IPComp uses tunnel mode */
1173 mode = MODE_TRANSPORT;
1174 }
1175
1176 memset(&request, 0, sizeof(request));
1177
1178 DBG2(DBG_KNL, "adding SAD entry with SPI %.8x and reqid {%u} (mark "
1179 "%u/0x%08x)", ntohl(spi), reqid, mark.value, mark.mask);
1180
1181 hdr = (struct nlmsghdr*)request;
1182 hdr->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
1183 hdr->nlmsg_type = inbound ? XFRM_MSG_UPDSA : XFRM_MSG_NEWSA;
1184 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_usersa_info));
1185
1186 sa = (struct xfrm_usersa_info*)NLMSG_DATA(hdr);
1187 host2xfrm(src, &sa->saddr);
1188 host2xfrm(dst, &sa->id.daddr);
1189 sa->id.spi = spi;
1190 sa->id.proto = protocol;
1191 sa->family = src->get_family(src);
1192 sa->mode = mode2kernel(mode);
1193 switch (mode)
1194 {
1195 case MODE_TUNNEL:
1196 sa->flags |= XFRM_STATE_AF_UNSPEC;
1197 break;
1198 case MODE_BEET:
1199 case MODE_TRANSPORT:
1200 if(src_ts && dst_ts)
1201 {
1202 sa->sel = ts2selector(src_ts, dst_ts);
1203 }
1204 break;
1205 default:
1206 break;
1207 }
1208
1209 sa->reqid = reqid;
1210 sa->lft.soft_byte_limit = XFRM_LIMIT(lifetime->bytes.rekey);
1211 sa->lft.hard_byte_limit = XFRM_LIMIT(lifetime->bytes.life);
1212 sa->lft.soft_packet_limit = XFRM_LIMIT(lifetime->packets.rekey);
1213 sa->lft.hard_packet_limit = XFRM_LIMIT(lifetime->packets.life);
1214 /* we use lifetimes since added, not since used */
1215 sa->lft.soft_add_expires_seconds = lifetime->time.rekey;
1216 sa->lft.hard_add_expires_seconds = lifetime->time.life;
1217 sa->lft.soft_use_expires_seconds = 0;
1218 sa->lft.hard_use_expires_seconds = 0;
1219
1220 switch (enc_alg)
1221 {
1222 case ENCR_UNDEFINED:
1223 /* no encryption */
1224 break;
1225 case ENCR_AES_CCM_ICV16:
1226 case ENCR_AES_GCM_ICV16:
1227 case ENCR_NULL_AUTH_AES_GMAC:
1228 case ENCR_CAMELLIA_CCM_ICV16:
1229 icv_size += 32;
1230 /* FALL */
1231 case ENCR_AES_CCM_ICV12:
1232 case ENCR_AES_GCM_ICV12:
1233 case ENCR_CAMELLIA_CCM_ICV12:
1234 icv_size += 32;
1235 /* FALL */
1236 case ENCR_AES_CCM_ICV8:
1237 case ENCR_AES_GCM_ICV8:
1238 case ENCR_CAMELLIA_CCM_ICV8:
1239 {
1240 struct xfrm_algo_aead *algo;
1241
1242 alg_name = lookup_algorithm(ENCRYPTION_ALGORITHM, enc_alg);
1243 if (alg_name == NULL)
1244 {
1245 DBG1(DBG_KNL, "algorithm %N not supported by kernel!",
1246 encryption_algorithm_names, enc_alg);
1247 goto failed;
1248 }
1249 DBG2(DBG_KNL, " using encryption algorithm %N with key size %d",
1250 encryption_algorithm_names, enc_alg, enc_key.len * 8);
1251
1252 algo = netlink_reserve(hdr, sizeof(request), XFRMA_ALG_AEAD,
1253 sizeof(*algo) + enc_key.len);
1254 if (!algo)
1255 {
1256 goto failed;
1257 }
1258 algo->alg_key_len = enc_key.len * 8;
1259 algo->alg_icv_len = icv_size;
1260 strncpy(algo->alg_name, alg_name, sizeof(algo->alg_name));
1261 algo->alg_name[sizeof(algo->alg_name) - 1] = '\0';
1262 memcpy(algo->alg_key, enc_key.ptr, enc_key.len);
1263 break;
1264 }
1265 default:
1266 {
1267 struct xfrm_algo *algo;
1268
1269 alg_name = lookup_algorithm(ENCRYPTION_ALGORITHM, enc_alg);
1270 if (alg_name == NULL)
1271 {
1272 DBG1(DBG_KNL, "algorithm %N not supported by kernel!",
1273 encryption_algorithm_names, enc_alg);
1274 goto failed;
1275 }
1276 DBG2(DBG_KNL, " using encryption algorithm %N with key size %d",
1277 encryption_algorithm_names, enc_alg, enc_key.len * 8);
1278
1279 algo = netlink_reserve(hdr, sizeof(request), XFRMA_ALG_CRYPT,
1280 sizeof(*algo) + enc_key.len);
1281 if (!algo)
1282 {
1283 goto failed;
1284 }
1285 algo->alg_key_len = enc_key.len * 8;
1286 strncpy(algo->alg_name, alg_name, sizeof(algo->alg_name));
1287 algo->alg_name[sizeof(algo->alg_name) - 1] = '\0';
1288 memcpy(algo->alg_key, enc_key.ptr, enc_key.len);
1289 }
1290 }
1291
1292 if (int_alg != AUTH_UNDEFINED)
1293 {
1294 u_int trunc_len = 0;
1295
1296 alg_name = lookup_algorithm(INTEGRITY_ALGORITHM, int_alg);
1297 if (alg_name == NULL)
1298 {
1299 DBG1(DBG_KNL, "algorithm %N not supported by kernel!",
1300 integrity_algorithm_names, int_alg);
1301 goto failed;
1302 }
1303 DBG2(DBG_KNL, " using integrity algorithm %N with key size %d",
1304 integrity_algorithm_names, int_alg, int_key.len * 8);
1305
1306 switch (int_alg)
1307 {
1308 case AUTH_HMAC_MD5_128:
1309 case AUTH_HMAC_SHA2_256_128:
1310 trunc_len = 128;
1311 break;
1312 case AUTH_HMAC_SHA1_160:
1313 trunc_len = 160;
1314 break;
1315 default:
1316 break;
1317 }
1318
1319 if (trunc_len)
1320 {
1321 struct xfrm_algo_auth* algo;
1322
1323 /* the kernel uses SHA256 with 96 bit truncation by default,
1324 * use specified truncation size supported by newer kernels.
1325 * also use this for untruncated MD5 and SHA1. */
1326 algo = netlink_reserve(hdr, sizeof(request), XFRMA_ALG_AUTH_TRUNC,
1327 sizeof(*algo) + int_key.len);
1328 if (!algo)
1329 {
1330 goto failed;
1331 }
1332 algo->alg_key_len = int_key.len * 8;
1333 algo->alg_trunc_len = trunc_len;
1334 strncpy(algo->alg_name, alg_name, sizeof(algo->alg_name));
1335 algo->alg_name[sizeof(algo->alg_name) - 1] = '\0';
1336 memcpy(algo->alg_key, int_key.ptr, int_key.len);
1337 }
1338 else
1339 {
1340 struct xfrm_algo* algo;
1341
1342 algo = netlink_reserve(hdr, sizeof(request), XFRMA_ALG_AUTH,
1343 sizeof(*algo) + int_key.len);
1344 if (!algo)
1345 {
1346 goto failed;
1347 }
1348 algo->alg_key_len = int_key.len * 8;
1349 strncpy(algo->alg_name, alg_name, sizeof(algo->alg_name));
1350 algo->alg_name[sizeof(algo->alg_name) - 1] = '\0';
1351 memcpy(algo->alg_key, int_key.ptr, int_key.len);
1352 }
1353 }
1354
1355 if (ipcomp != IPCOMP_NONE)
1356 {
1357 struct xfrm_algo* algo;
1358
1359 alg_name = lookup_algorithm(COMPRESSION_ALGORITHM, ipcomp);
1360 if (alg_name == NULL)
1361 {
1362 DBG1(DBG_KNL, "algorithm %N not supported by kernel!",
1363 ipcomp_transform_names, ipcomp);
1364 goto failed;
1365 }
1366 DBG2(DBG_KNL, " using compression algorithm %N",
1367 ipcomp_transform_names, ipcomp);
1368
1369 algo = netlink_reserve(hdr, sizeof(request), XFRMA_ALG_COMP,
1370 sizeof(*algo));
1371 if (!algo)
1372 {
1373 goto failed;
1374 }
1375 algo->alg_key_len = 0;
1376 strncpy(algo->alg_name, alg_name, sizeof(algo->alg_name));
1377 algo->alg_name[sizeof(algo->alg_name) - 1] = '\0';
1378 }
1379
1380 if (encap)
1381 {
1382 struct xfrm_encap_tmpl *tmpl;
1383
1384 tmpl = netlink_reserve(hdr, sizeof(request), XFRMA_ENCAP, sizeof(*tmpl));
1385 if (!tmpl)
1386 {
1387 goto failed;
1388 }
1389 tmpl->encap_type = UDP_ENCAP_ESPINUDP;
1390 tmpl->encap_sport = htons(src->get_port(src));
1391 tmpl->encap_dport = htons(dst->get_port(dst));
1392 memset(&tmpl->encap_oa, 0, sizeof (xfrm_address_t));
1393 /* encap_oa could probably be derived from the
1394 * traffic selectors [rfc4306, p39]. In the netlink kernel
1395 * implementation pluto does the same as we do here but it uses
1396 * encap_oa in the pfkey implementation.
1397 * BUT as /usr/src/linux/net/key/af_key.c indicates the kernel ignores
1398 * it anyway
1399 * -> does that mean that NAT-T encap doesn't work in transport mode?
1400 * No. The reason the kernel ignores NAT-OA is that it recomputes
1401 * (or, rather, just ignores) the checksum. If packets pass the IPsec
1402 * checks it marks them "checksum ok" so OA isn't needed. */
1403 }
1404
1405 if (mark.value)
1406 {
1407 struct xfrm_mark *mrk;
1408
1409 mrk = netlink_reserve(hdr, sizeof(request), XFRMA_MARK, sizeof(*mrk));
1410 if (!mrk)
1411 {
1412 goto failed;
1413 }
1414 mrk->v = mark.value;
1415 mrk->m = mark.mask;
1416 }
1417
1418 if (tfc)
1419 {
1420 u_int32_t *tfcpad;
1421
1422 tfcpad = netlink_reserve(hdr, sizeof(request), XFRMA_TFCPAD,
1423 sizeof(*tfcpad));
1424 if (!tfcpad)
1425 {
1426 goto failed;
1427 }
1428 *tfcpad = tfc;
1429 }
1430
1431 if (protocol != IPPROTO_COMP)
1432 {
1433 if (esn || this->replay_window > DEFAULT_REPLAY_WINDOW)
1434 {
1435 /* for ESN or larger replay windows we need the new
1436 * XFRMA_REPLAY_ESN_VAL attribute to configure a bitmap */
1437 struct xfrm_replay_state_esn *replay;
1438
1439 replay = netlink_reserve(hdr, sizeof(request), XFRMA_REPLAY_ESN_VAL,
1440 sizeof(*replay) + (this->replay_window + 7) / 8);
1441 if (!replay)
1442 {
1443 goto failed;
1444 }
1445 /* bmp_len contains number uf __u32's */
1446 replay->bmp_len = this->replay_bmp;
1447 replay->replay_window = this->replay_window;
1448 DBG2(DBG_KNL, " using replay window of %u packets",
1449 this->replay_window);
1450
1451 if (esn)
1452 {
1453 DBG2(DBG_KNL, " using extended sequence numbers (ESN)");
1454 sa->flags |= XFRM_STATE_ESN;
1455 }
1456 }
1457 else
1458 {
1459 DBG2(DBG_KNL, " using replay window of %u packets",
1460 this->replay_window);
1461 sa->replay_window = this->replay_window;
1462 }
1463 }
1464
1465 if (this->socket_xfrm->send_ack(this->socket_xfrm, hdr) != SUCCESS)
1466 {
1467 if (mark.value)
1468 {
1469 DBG1(DBG_KNL, "unable to add SAD entry with SPI %.8x "
1470 "(mark %u/0x%08x)", ntohl(spi), mark.value, mark.mask);
1471 }
1472 else
1473 {
1474 DBG1(DBG_KNL, "unable to add SAD entry with SPI %.8x", ntohl(spi));
1475 }
1476 goto failed;
1477 }
1478
1479 status = SUCCESS;
1480
1481 failed:
1482 memwipe(request, sizeof(request));
1483 return status;
1484 }
1485
1486 /**
1487 * Get the ESN replay state (i.e. sequence numbers) of an SA.
1488 *
1489 * Allocates into one the replay state structure we get from the kernel.
1490 */
1491 static void get_replay_state(private_kernel_netlink_ipsec_t *this,
1492 u_int32_t spi, u_int8_t protocol,
1493 host_t *dst, mark_t mark,
1494 struct xfrm_replay_state_esn **replay_esn,
1495 struct xfrm_replay_state **replay)
1496 {
1497 netlink_buf_t request;
1498 struct nlmsghdr *hdr, *out = NULL;
1499 struct xfrm_aevent_id *out_aevent = NULL, *aevent_id;
1500 size_t len;
1501 struct rtattr *rta;
1502 size_t rtasize;
1503
1504 memset(&request, 0, sizeof(request));
1505
1506 DBG2(DBG_KNL, "querying replay state from SAD entry with SPI %.8x",
1507 ntohl(spi));
1508
1509 hdr = (struct nlmsghdr*)request;
1510 hdr->nlmsg_flags = NLM_F_REQUEST;
1511 hdr->nlmsg_type = XFRM_MSG_GETAE;
1512 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_aevent_id));
1513
1514 aevent_id = (struct xfrm_aevent_id*)NLMSG_DATA(hdr);
1515 aevent_id->flags = XFRM_AE_RVAL;
1516
1517 host2xfrm(dst, &aevent_id->sa_id.daddr);
1518 aevent_id->sa_id.spi = spi;
1519 aevent_id->sa_id.proto = protocol;
1520 aevent_id->sa_id.family = dst->get_family(dst);
1521
1522 if (mark.value)
1523 {
1524 struct xfrm_mark *mrk;
1525
1526 mrk = netlink_reserve(hdr, sizeof(request), XFRMA_MARK, sizeof(*mrk));
1527 if (!mrk)
1528 {
1529 return;
1530 }
1531 mrk->v = mark.value;
1532 mrk->m = mark.mask;
1533 }
1534
1535 if (this->socket_xfrm->send(this->socket_xfrm, hdr, &out, &len) == SUCCESS)
1536 {
1537 hdr = out;
1538 while (NLMSG_OK(hdr, len))
1539 {
1540 switch (hdr->nlmsg_type)
1541 {
1542 case XFRM_MSG_NEWAE:
1543 {
1544 out_aevent = NLMSG_DATA(hdr);
1545 break;
1546 }
1547 case NLMSG_ERROR:
1548 {
1549 struct nlmsgerr *err = NLMSG_DATA(hdr);
1550 DBG1(DBG_KNL, "querying replay state from SAD entry "
1551 "failed: %s (%d)", strerror(-err->error),
1552 -err->error);
1553 break;
1554 }
1555 default:
1556 hdr = NLMSG_NEXT(hdr, len);
1557 continue;
1558 case NLMSG_DONE:
1559 break;
1560 }
1561 break;
1562 }
1563 }
1564
1565 if (out_aevent)
1566 {
1567 rta = XFRM_RTA(out, struct xfrm_aevent_id);
1568 rtasize = XFRM_PAYLOAD(out, struct xfrm_aevent_id);
1569 while (RTA_OK(rta, rtasize))
1570 {
1571 if (rta->rta_type == XFRMA_REPLAY_VAL &&
1572 RTA_PAYLOAD(rta) == sizeof(**replay))
1573 {
1574 *replay = malloc(RTA_PAYLOAD(rta));
1575 memcpy(*replay, RTA_DATA(rta), RTA_PAYLOAD(rta));
1576 break;
1577 }
1578 if (rta->rta_type == XFRMA_REPLAY_ESN_VAL &&
1579 RTA_PAYLOAD(rta) >= sizeof(**replay_esn) + this->replay_bmp)
1580 {
1581 *replay_esn = malloc(RTA_PAYLOAD(rta));
1582 memcpy(*replay_esn, RTA_DATA(rta), RTA_PAYLOAD(rta));
1583 break;
1584 }
1585 rta = RTA_NEXT(rta, rtasize);
1586 }
1587 }
1588 free(out);
1589 }
1590
1591 METHOD(kernel_ipsec_t, query_sa, status_t,
1592 private_kernel_netlink_ipsec_t *this, host_t *src, host_t *dst,
1593 u_int32_t spi, u_int8_t protocol, mark_t mark, u_int64_t *bytes)
1594 {
1595 netlink_buf_t request;
1596 struct nlmsghdr *out = NULL, *hdr;
1597 struct xfrm_usersa_id *sa_id;
1598 struct xfrm_usersa_info *sa = NULL;
1599 status_t status = FAILED;
1600 size_t len;
1601
1602 memset(&request, 0, sizeof(request));
1603
1604 DBG2(DBG_KNL, "querying SAD entry with SPI %.8x (mark %u/0x%08x)",
1605 ntohl(spi), mark.value, mark.mask);
1606
1607 hdr = (struct nlmsghdr*)request;
1608 hdr->nlmsg_flags = NLM_F_REQUEST;
1609 hdr->nlmsg_type = XFRM_MSG_GETSA;
1610 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_usersa_id));
1611
1612 sa_id = (struct xfrm_usersa_id*)NLMSG_DATA(hdr);
1613 host2xfrm(dst, &sa_id->daddr);
1614 sa_id->spi = spi;
1615 sa_id->proto = protocol;
1616 sa_id->family = dst->get_family(dst);
1617
1618 if (mark.value)
1619 {
1620 struct xfrm_mark *mrk;
1621
1622 mrk = netlink_reserve(hdr, sizeof(request), XFRMA_MARK, sizeof(*mrk));
1623 if (!mrk)
1624 {
1625 return FAILED;
1626 }
1627 mrk->v = mark.value;
1628 mrk->m = mark.mask;
1629 }
1630
1631 if (this->socket_xfrm->send(this->socket_xfrm, hdr, &out, &len) == SUCCESS)
1632 {
1633 hdr = out;
1634 while (NLMSG_OK(hdr, len))
1635 {
1636 switch (hdr->nlmsg_type)
1637 {
1638 case XFRM_MSG_NEWSA:
1639 {
1640 sa = (struct xfrm_usersa_info*)NLMSG_DATA(hdr);
1641 break;
1642 }
1643 case NLMSG_ERROR:
1644 {
1645 struct nlmsgerr *err = NLMSG_DATA(hdr);
1646
1647 if (mark.value)
1648 {
1649 DBG1(DBG_KNL, "querying SAD entry with SPI %.8x "
1650 "(mark %u/0x%08x) failed: %s (%d)",
1651 ntohl(spi), mark.value, mark.mask,
1652 strerror(-err->error), -err->error);
1653 }
1654 else
1655 {
1656 DBG1(DBG_KNL, "querying SAD entry with SPI %.8x "
1657 "failed: %s (%d)", ntohl(spi),
1658 strerror(-err->error), -err->error);
1659 }
1660 break;
1661 }
1662 default:
1663 hdr = NLMSG_NEXT(hdr, len);
1664 continue;
1665 case NLMSG_DONE:
1666 break;
1667 }
1668 break;
1669 }
1670 }
1671
1672 if (sa == NULL)
1673 {
1674 DBG2(DBG_KNL, "unable to query SAD entry with SPI %.8x", ntohl(spi));
1675 }
1676 else
1677 {
1678 *bytes = sa->curlft.bytes;
1679 status = SUCCESS;
1680 }
1681 memwipe(out, len);
1682 free(out);
1683 return status;
1684 }
1685
1686 METHOD(kernel_ipsec_t, del_sa, status_t,
1687 private_kernel_netlink_ipsec_t *this, host_t *src, host_t *dst,
1688 u_int32_t spi, u_int8_t protocol, u_int16_t cpi, mark_t mark)
1689 {
1690 netlink_buf_t request;
1691 struct nlmsghdr *hdr;
1692 struct xfrm_usersa_id *sa_id;
1693
1694 /* if IPComp was used, we first delete the additional IPComp SA */
1695 if (cpi)
1696 {
1697 del_sa(this, src, dst, htonl(ntohs(cpi)), IPPROTO_COMP, 0, mark);
1698 }
1699
1700 memset(&request, 0, sizeof(request));
1701
1702 DBG2(DBG_KNL, "deleting SAD entry with SPI %.8x (mark %u/0x%08x)",
1703 ntohl(spi), mark.value, mark.mask);
1704
1705 hdr = (struct nlmsghdr*)request;
1706 hdr->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
1707 hdr->nlmsg_type = XFRM_MSG_DELSA;
1708 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_usersa_id));
1709
1710 sa_id = (struct xfrm_usersa_id*)NLMSG_DATA(hdr);
1711 host2xfrm(dst, &sa_id->daddr);
1712 sa_id->spi = spi;
1713 sa_id->proto = protocol;
1714 sa_id->family = dst->get_family(dst);
1715
1716 if (mark.value)
1717 {
1718 struct xfrm_mark *mrk;
1719
1720 mrk = netlink_reserve(hdr, sizeof(request), XFRMA_MARK, sizeof(*mrk));
1721 if (!mrk)
1722 {
1723 return FAILED;
1724 }
1725 mrk->v = mark.value;
1726 mrk->m = mark.mask;
1727 }
1728
1729 switch (this->socket_xfrm->send_ack(this->socket_xfrm, hdr))
1730 {
1731 case SUCCESS:
1732 DBG2(DBG_KNL, "deleted SAD entry with SPI %.8x (mark %u/0x%08x)",
1733 ntohl(spi), mark.value, mark.mask);
1734 return SUCCESS;
1735 case NOT_FOUND:
1736 return NOT_FOUND;
1737 default:
1738 if (mark.value)
1739 {
1740 DBG1(DBG_KNL, "unable to delete SAD entry with SPI %.8x "
1741 "(mark %u/0x%08x)", ntohl(spi), mark.value, mark.mask);
1742 }
1743 else
1744 {
1745 DBG1(DBG_KNL, "unable to delete SAD entry with SPI %.8x",
1746 ntohl(spi));
1747 }
1748 return FAILED;
1749 }
1750 }
1751
1752 METHOD(kernel_ipsec_t, update_sa, status_t,
1753 private_kernel_netlink_ipsec_t *this, u_int32_t spi, u_int8_t protocol,
1754 u_int16_t cpi, host_t *src, host_t *dst, host_t *new_src, host_t *new_dst,
1755 bool old_encap, bool new_encap, mark_t mark)
1756 {
1757 netlink_buf_t request;
1758 u_char *pos;
1759 struct nlmsghdr *hdr, *out = NULL;
1760 struct xfrm_usersa_id *sa_id;
1761 struct xfrm_usersa_info *out_sa = NULL, *sa;
1762 size_t len, newlen;
1763 struct rtattr *rta;
1764 size_t rtasize;
1765 struct xfrm_encap_tmpl* tmpl = NULL;
1766 struct xfrm_replay_state *replay = NULL;
1767 struct xfrm_replay_state_esn *replay_esn = NULL;
1768 status_t status = FAILED;
1769
1770 /* if IPComp is used, we first update the IPComp SA */
1771 if (cpi)
1772 {
1773 update_sa(this, htonl(ntohs(cpi)), IPPROTO_COMP, 0,
1774 src, dst, new_src, new_dst, FALSE, FALSE, mark);
1775 }
1776
1777 memset(&request, 0, sizeof(request));
1778
1779 DBG2(DBG_KNL, "querying SAD entry with SPI %.8x for update", ntohl(spi));
1780
1781 /* query the existing SA first */
1782 hdr = (struct nlmsghdr*)request;
1783 hdr->nlmsg_flags = NLM_F_REQUEST;
1784 hdr->nlmsg_type = XFRM_MSG_GETSA;
1785 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_usersa_id));
1786
1787 sa_id = (struct xfrm_usersa_id*)NLMSG_DATA(hdr);
1788 host2xfrm(dst, &sa_id->daddr);
1789 sa_id->spi = spi;
1790 sa_id->proto = protocol;
1791 sa_id->family = dst->get_family(dst);
1792
1793 if (mark.value)
1794 {
1795 struct xfrm_mark *mrk;
1796
1797 mrk = netlink_reserve(hdr, sizeof(request), XFRMA_MARK, sizeof(*mrk));
1798 if (!mrk)
1799 {
1800 return FAILED;
1801 }
1802 mrk->v = mark.value;
1803 mrk->m = mark.mask;
1804 }
1805
1806 if (this->socket_xfrm->send(this->socket_xfrm, hdr, &out, &len) == SUCCESS)
1807 {
1808 hdr = out;
1809 while (NLMSG_OK(hdr, len))
1810 {
1811 switch (hdr->nlmsg_type)
1812 {
1813 case XFRM_MSG_NEWSA:
1814 {
1815 out_sa = NLMSG_DATA(hdr);
1816 break;
1817 }
1818 case NLMSG_ERROR:
1819 {
1820 struct nlmsgerr *err = NLMSG_DATA(hdr);
1821 DBG1(DBG_KNL, "querying SAD entry failed: %s (%d)",
1822 strerror(-err->error), -err->error);
1823 break;
1824 }
1825 default:
1826 hdr = NLMSG_NEXT(hdr, len);
1827 continue;
1828 case NLMSG_DONE:
1829 break;
1830 }
1831 break;
1832 }
1833 }
1834 if (out_sa == NULL)
1835 {
1836 DBG1(DBG_KNL, "unable to update SAD entry with SPI %.8x", ntohl(spi));
1837 goto failed;
1838 }
1839
1840 get_replay_state(this, spi, protocol, dst, mark, &replay_esn, &replay);
1841
1842 /* delete the old SA (without affecting the IPComp SA) */
1843 if (del_sa(this, src, dst, spi, protocol, 0, mark) != SUCCESS)
1844 {
1845 DBG1(DBG_KNL, "unable to delete old SAD entry with SPI %.8x",
1846 ntohl(spi));
1847 goto failed;
1848 }
1849
1850 DBG2(DBG_KNL, "updating SAD entry with SPI %.8x from %#H..%#H to %#H..%#H",
1851 ntohl(spi), src, dst, new_src, new_dst);
1852 /* copy over the SA from out to request */
1853 hdr = (struct nlmsghdr*)request;
1854 memcpy(hdr, out, min(out->nlmsg_len, sizeof(request)));
1855 hdr->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
1856 hdr->nlmsg_type = XFRM_MSG_NEWSA;
1857 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_usersa_info));
1858 sa = NLMSG_DATA(hdr);
1859 sa->family = new_dst->get_family(new_dst);
1860
1861 if (!src->ip_equals(src, new_src))
1862 {
1863 host2xfrm(new_src, &sa->saddr);
1864 }
1865 if (!dst->ip_equals(dst, new_dst))
1866 {
1867 host2xfrm(new_dst, &sa->id.daddr);
1868 }
1869
1870 rta = XFRM_RTA(out, struct xfrm_usersa_info);
1871 rtasize = XFRM_PAYLOAD(out, struct xfrm_usersa_info);
1872 pos = (u_char*)XFRM_RTA(hdr, struct xfrm_usersa_info);
1873 while (RTA_OK(rta, rtasize))
1874 {
1875 /* copy all attributes, but not XFRMA_ENCAP if we are disabling it */
1876 if (rta->rta_type != XFRMA_ENCAP || new_encap)
1877 {
1878 if (rta->rta_type == XFRMA_ENCAP)
1879 { /* update encap tmpl */
1880 tmpl = (struct xfrm_encap_tmpl*)RTA_DATA(rta);
1881 tmpl->encap_sport = ntohs(new_src->get_port(new_src));
1882 tmpl->encap_dport = ntohs(new_dst->get_port(new_dst));
1883 }
1884 memcpy(pos, rta, rta->rta_len);
1885 newlen = NLMSG_ALIGN(hdr->nlmsg_len) + RTA_ALIGN(rta->rta_len);
1886 pos += newlen - hdr->nlmsg_len;
1887 hdr->nlmsg_len = newlen;
1888 }
1889 rta = RTA_NEXT(rta, rtasize);
1890 }
1891
1892 if (tmpl == NULL && new_encap)
1893 { /* add tmpl if we are enabling it */
1894 tmpl = netlink_reserve(hdr, sizeof(request), XFRMA_ENCAP, sizeof(*tmpl));
1895 if (!tmpl)
1896 {
1897 goto failed;
1898 }
1899 tmpl->encap_type = UDP_ENCAP_ESPINUDP;
1900 tmpl->encap_sport = ntohs(new_src->get_port(new_src));
1901 tmpl->encap_dport = ntohs(new_dst->get_port(new_dst));
1902 memset(&tmpl->encap_oa, 0, sizeof (xfrm_address_t));
1903 }
1904
1905 if (replay_esn)
1906 {
1907 struct xfrm_replay_state_esn *state;
1908
1909 state = netlink_reserve(hdr, sizeof(request), XFRMA_REPLAY_ESN_VAL,
1910 sizeof(*state) + this->replay_bmp);
1911 if (!state)
1912 {
1913 goto failed;
1914 }
1915 memcpy(state, replay_esn, sizeof(*state) + this->replay_bmp);
1916 }
1917 else if (replay)
1918 {
1919 struct xfrm_replay_state *state;
1920
1921 state = netlink_reserve(hdr, sizeof(request), XFRMA_REPLAY_VAL,
1922 sizeof(*state));
1923 if (!state)
1924 {
1925 goto failed;
1926 }
1927 memcpy(state, replay, sizeof(*state));
1928 }
1929 else
1930 {
1931 DBG1(DBG_KNL, "unable to copy replay state from old SAD entry "
1932 "with SPI %.8x", ntohl(spi));
1933 }
1934
1935 if (this->socket_xfrm->send_ack(this->socket_xfrm, hdr) != SUCCESS)
1936 {
1937 DBG1(DBG_KNL, "unable to update SAD entry with SPI %.8x", ntohl(spi));
1938 goto failed;
1939 }
1940
1941 status = SUCCESS;
1942 failed:
1943 free(replay);
1944 free(replay_esn);
1945 memwipe(out, len);
1946 memwipe(request, sizeof(request));
1947 free(out);
1948
1949 return status;
1950 }
1951
1952 METHOD(kernel_ipsec_t, flush_sas, status_t,
1953 private_kernel_netlink_ipsec_t *this)
1954 {
1955 netlink_buf_t request;
1956 struct nlmsghdr *hdr;
1957 struct xfrm_usersa_flush *flush;
1958
1959 memset(&request, 0, sizeof(request));
1960
1961 DBG2(DBG_KNL, "flushing all SAD entries");
1962
1963 hdr = (struct nlmsghdr*)request;
1964 hdr->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
1965 hdr->nlmsg_type = XFRM_MSG_FLUSHSA;
1966 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_usersa_flush));
1967
1968 flush = (struct xfrm_usersa_flush*)NLMSG_DATA(hdr);
1969 flush->proto = IPSEC_PROTO_ANY;
1970
1971 if (this->socket_xfrm->send_ack(this->socket_xfrm, hdr) != SUCCESS)
1972 {
1973 DBG1(DBG_KNL, "unable to flush SAD entries");
1974 return FAILED;
1975 }
1976 return SUCCESS;
1977 }
1978
1979 /**
1980 * Add or update a policy in the kernel.
1981 *
1982 * Note: The mutex has to be locked when entering this function
1983 * and is unlocked here in any case.
1984 */
1985 static status_t add_policy_internal(private_kernel_netlink_ipsec_t *this,
1986 policy_entry_t *policy, policy_sa_t *mapping, bool update)
1987 {
1988 netlink_buf_t request;
1989 policy_entry_t clone;
1990 ipsec_sa_t *ipsec = mapping->sa;
1991 struct xfrm_userpolicy_info *policy_info;
1992 struct nlmsghdr *hdr;
1993 int i;
1994
1995 /* clone the policy so we are able to check it out again later */
1996 memcpy(&clone, policy, sizeof(policy_entry_t));
1997
1998 memset(&request, 0, sizeof(request));
1999 hdr = (struct nlmsghdr*)request;
2000 hdr->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
2001 hdr->nlmsg_type = update ? XFRM_MSG_UPDPOLICY : XFRM_MSG_NEWPOLICY;
2002 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_userpolicy_info));
2003
2004 policy_info = (struct xfrm_userpolicy_info*)NLMSG_DATA(hdr);
2005 policy_info->sel = policy->sel;
2006 policy_info->dir = policy->direction;
2007
2008 /* calculate priority based on selector size, small size = high prio */
2009 policy_info->priority = mapping->priority;
2010 policy_info->action = mapping->type != POLICY_DROP ? XFRM_POLICY_ALLOW
2011 : XFRM_POLICY_BLOCK;
2012 policy_info->share = XFRM_SHARE_ANY;
2013
2014 /* policies don't expire */
2015 policy_info->lft.soft_byte_limit = XFRM_INF;
2016 policy_info->lft.soft_packet_limit = XFRM_INF;
2017 policy_info->lft.hard_byte_limit = XFRM_INF;
2018 policy_info->lft.hard_packet_limit = XFRM_INF;
2019 policy_info->lft.soft_add_expires_seconds = 0;
2020 policy_info->lft.hard_add_expires_seconds = 0;
2021 policy_info->lft.soft_use_expires_seconds = 0;
2022 policy_info->lft.hard_use_expires_seconds = 0;
2023
2024 if (mapping->type == POLICY_IPSEC)
2025 {
2026 struct xfrm_user_tmpl *tmpl;
2027 struct {
2028 u_int8_t proto;
2029 bool use;
2030 } protos[] = {
2031 { IPPROTO_COMP, ipsec->cfg.ipcomp.transform != IPCOMP_NONE },
2032 { IPPROTO_ESP, ipsec->cfg.esp.use },
2033 { IPPROTO_AH, ipsec->cfg.ah.use },
2034 };
2035 ipsec_mode_t proto_mode = ipsec->cfg.mode;
2036 int count = 0;
2037
2038 for (i = 0; i < countof(protos); i++)
2039 {
2040 if (protos[i].use)
2041 {
2042 count++;
2043 }
2044 }
2045 tmpl = netlink_reserve(hdr, sizeof(request), XFRMA_TMPL,
2046 count * sizeof(*tmpl));
2047 if (!tmpl)
2048 {
2049 this->mutex->unlock(this->mutex);
2050 return FAILED;
2051 }
2052
2053 for (i = 0; i < countof(protos); i++)
2054 {
2055 if (!protos[i].use)
2056 {
2057 continue;
2058 }
2059 tmpl->reqid = ipsec->cfg.reqid;
2060 tmpl->id.proto = protos[i].proto;
2061 tmpl->aalgos = tmpl->ealgos = tmpl->calgos = ~0;
2062 tmpl->mode = mode2kernel(proto_mode);
2063 tmpl->optional = protos[i].proto == IPPROTO_COMP &&
2064 policy->direction != POLICY_OUT;
2065 tmpl->family = ipsec->src->get_family(ipsec->src);
2066
2067 if (proto_mode == MODE_TUNNEL)
2068 { /* only for tunnel mode */
2069 host2xfrm(ipsec->src, &tmpl->saddr);
2070 host2xfrm(ipsec->dst, &tmpl->id.daddr);
2071 }
2072
2073 tmpl++;
2074
2075 /* use transport mode for other SAs */
2076 proto_mode = MODE_TRANSPORT;
2077 }
2078 }
2079
2080 if (ipsec->mark.value)
2081 {
2082 struct xfrm_mark *mrk;
2083
2084 mrk = netlink_reserve(hdr, sizeof(request), XFRMA_MARK, sizeof(*mrk));
2085 if (!mrk)
2086 {
2087 this->mutex->unlock(this->mutex);
2088 return FAILED;
2089 }
2090 mrk->v = ipsec->mark.value;
2091 mrk->m = ipsec->mark.mask;
2092 }
2093 this->mutex->unlock(this->mutex);
2094
2095 if (this->socket_xfrm->send_ack(this->socket_xfrm, hdr) != SUCCESS)
2096 {
2097 return FAILED;
2098 }
2099
2100 /* find the policy again */
2101 this->mutex->lock(this->mutex);
2102 policy = this->policies->get(this->policies, &clone);
2103 if (!policy ||
2104 policy->used_by->find_first(policy->used_by,
2105 NULL, (void**)&mapping) != SUCCESS)
2106 { /* policy or mapping is already gone, ignore */
2107 this->mutex->unlock(this->mutex);
2108 return SUCCESS;
2109 }
2110
2111 /* install a route, if:
2112 * - this is a forward policy (to just get one for each child)
2113 * - we are in tunnel/BEET mode or install a bypass policy
2114 * - routing is not disabled via strongswan.conf
2115 */
2116 if (policy->direction == POLICY_FWD && this->install_routes &&
2117 (mapping->type != POLICY_IPSEC || ipsec->cfg.mode != MODE_TRANSPORT))
2118 {
2119 policy_sa_fwd_t *fwd = (policy_sa_fwd_t*)mapping;
2120 route_entry_t *route;
2121 host_t *iface;
2122
2123 INIT(route,
2124 .prefixlen = policy->sel.prefixlen_s,
2125 );
2126
2127 if (hydra->kernel_interface->get_address_by_ts(hydra->kernel_interface,
2128 fwd->dst_ts, &route->src_ip) == SUCCESS)
2129 {
2130 /* get the nexthop to src (src as we are in POLICY_FWD) */
2131 route->gateway = hydra->kernel_interface->get_nexthop(
2132 hydra->kernel_interface, ipsec->src,
2133 ipsec->dst);
2134 route->dst_net = chunk_alloc(policy->sel.family == AF_INET ? 4 : 16);
2135 memcpy(route->dst_net.ptr, &policy->sel.saddr, route->dst_net.len);
2136
2137 /* get the interface to install the route for. If we have a local
2138 * address, use it. Otherwise (for shunt policies) use the
2139 * routes source address. */
2140 iface = ipsec->dst;
2141 if (iface->is_anyaddr(iface))
2142 {
2143 iface = route->src_ip;
2144 }
2145 /* install route via outgoing interface */
2146 if (!hydra->kernel_interface->get_interface(hydra->kernel_interface,
2147 iface, &route->if_name))
2148 {
2149 this->mutex->unlock(this->mutex);
2150 route_entry_destroy(route);
2151 return SUCCESS;
2152 }
2153
2154 if (policy->route)
2155 {
2156 route_entry_t *old = policy->route;
2157 if (route_entry_equals(old, route))
2158 {
2159 this->mutex->unlock(this->mutex);
2160 route_entry_destroy(route);
2161 return SUCCESS;
2162 }
2163 /* uninstall previously installed route */
2164 if (hydra->kernel_interface->del_route(hydra->kernel_interface,
2165 old->dst_net, old->prefixlen, old->gateway,
2166 old->src_ip, old->if_name) != SUCCESS)
2167 {
2168 DBG1(DBG_KNL, "error uninstalling route installed with "
2169 "policy %R === %R %N", fwd->src_ts,
2170 fwd->dst_ts, policy_dir_names,
2171 policy->direction);
2172 }
2173 route_entry_destroy(old);
2174 policy->route = NULL;
2175 }
2176
2177 DBG2(DBG_KNL, "installing route: %R via %H src %H dev %s",
2178 fwd->src_ts, route->gateway, route->src_ip, route->if_name);
2179 switch (hydra->kernel_interface->add_route(
2180 hydra->kernel_interface, route->dst_net,
2181 route->prefixlen, route->gateway,
2182 route->src_ip, route->if_name))
2183 {
2184 default:
2185 DBG1(DBG_KNL, "unable to install source route for %H",
2186 route->src_ip);
2187 /* FALL */
2188 case ALREADY_DONE:
2189 /* route exists, do not uninstall */
2190 route_entry_destroy(route);
2191 break;
2192 case SUCCESS:
2193 /* cache the installed route */
2194 policy->route = route;
2195 break;
2196 }
2197 }
2198 else
2199 {
2200 free(route);
2201 }
2202 }
2203 this->mutex->unlock(this->mutex);
2204 return SUCCESS;
2205 }
2206
2207 METHOD(kernel_ipsec_t, add_policy, status_t,
2208 private_kernel_netlink_ipsec_t *this, host_t *src, host_t *dst,
2209 traffic_selector_t *src_ts, traffic_selector_t *dst_ts,
2210 policy_dir_t direction, policy_type_t type, ipsec_sa_cfg_t *sa,
2211 mark_t mark, policy_priority_t priority)
2212 {
2213 policy_entry_t *policy, *current;
2214 policy_sa_t *assigned_sa, *current_sa;
2215 enumerator_t *enumerator;
2216 bool found = FALSE, update = TRUE;
2217
2218 /* create a policy */
2219 INIT(policy,
2220 .sel = ts2selector(src_ts, dst_ts),
2221 .mark = mark.value & mark.mask,
2222 .direction = direction,
2223 );
2224
2225 /* find the policy, which matches EXACTLY */
2226 this->mutex->lock(this->mutex);
2227 current = this->policies->get(this->policies, policy);
2228 if (current)
2229 {
2230 /* use existing policy */
2231 DBG2(DBG_KNL, "policy %R === %R %N (mark %u/0x%08x) "
2232 "already exists, increasing refcount",
2233 src_ts, dst_ts, policy_dir_names, direction,
2234 mark.value, mark.mask);
2235 policy_entry_destroy(this, policy);
2236 policy = current;
2237 found = TRUE;
2238 }
2239 else
2240 { /* use the new one, if we have no such policy */
2241 policy->used_by = linked_list_create();
2242 this->policies->put(this->policies, policy, policy);
2243 }
2244
2245 /* cache the assigned IPsec SA */
2246 assigned_sa = policy_sa_create(this, direction, type, src, dst, src_ts,
2247 dst_ts, mark, sa);
2248 assigned_sa->priority = get_priority(policy, priority);
2249
2250 if (this->policy_history)
2251 { /* insert the SA according to its priority */
2252 enumerator = policy->used_by->create_enumerator(policy->used_by);
2253 while (enumerator->enumerate(enumerator, (void**)&current_sa))
2254 {
2255 if (current_sa->priority >= assigned_sa->priority)
2256 {
2257 break;
2258 }
2259 update = FALSE;
2260 }
2261 policy->used_by->insert_before(policy->used_by, enumerator,
2262 assigned_sa);
2263 enumerator->destroy(enumerator);
2264 }
2265 else
2266 { /* simply insert it last and only update if it is not installed yet */
2267 policy->used_by->insert_last(policy->used_by, assigned_sa);
2268 update = !found;
2269 }
2270
2271 if (!update)
2272 { /* we don't update the policy if the priority is lower than that of
2273 * the currently installed one */
2274 this->mutex->unlock(this->mutex);
2275 return SUCCESS;
2276 }
2277
2278 DBG2(DBG_KNL, "%s policy %R === %R %N (mark %u/0x%08x)",
2279 found ? "updating" : "adding", src_ts, dst_ts,
2280 policy_dir_names, direction, mark.value, mark.mask);
2281
2282 if (add_policy_internal(this, policy, assigned_sa, found) != SUCCESS)
2283 {
2284 DBG1(DBG_KNL, "unable to %s policy %R === %R %N",
2285 found ? "update" : "add", src_ts, dst_ts,
2286 policy_dir_names, direction);
2287 return FAILED;
2288 }
2289 return SUCCESS;
2290 }
2291
2292 METHOD(kernel_ipsec_t, query_policy, status_t,
2293 private_kernel_netlink_ipsec_t *this, traffic_selector_t *src_ts,
2294 traffic_selector_t *dst_ts, policy_dir_t direction, mark_t mark,
2295 u_int32_t *use_time)
2296 {
2297 netlink_buf_t request;
2298 struct nlmsghdr *out = NULL, *hdr;
2299 struct xfrm_userpolicy_id *policy_id;
2300 struct xfrm_userpolicy_info *policy = NULL;
2301 size_t len;
2302
2303 memset(&request, 0, sizeof(request));
2304
2305 DBG2(DBG_KNL, "querying policy %R === %R %N (mark %u/0x%08x)",
2306 src_ts, dst_ts, policy_dir_names, direction,
2307 mark.value, mark.mask);
2308
2309 hdr = (struct nlmsghdr*)request;
2310 hdr->nlmsg_flags = NLM_F_REQUEST;
2311 hdr->nlmsg_type = XFRM_MSG_GETPOLICY;
2312 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_userpolicy_id));
2313
2314 policy_id = (struct xfrm_userpolicy_id*)NLMSG_DATA(hdr);
2315 policy_id->sel = ts2selector(src_ts, dst_ts);
2316 policy_id->dir = direction;
2317
2318 if (mark.value)
2319 {
2320 struct xfrm_mark *mrk;
2321
2322 mrk = netlink_reserve(hdr, sizeof(request), XFRMA_MARK, sizeof(*mrk));
2323 if (!mrk)
2324 {
2325 return FAILED;
2326 }
2327 mrk->v = mark.value;
2328 mrk->m = mark.mask;
2329 }
2330
2331 if (this->socket_xfrm->send(this->socket_xfrm, hdr, &out, &len) == SUCCESS)
2332 {
2333 hdr = out;
2334 while (NLMSG_OK(hdr, len))
2335 {
2336 switch (hdr->nlmsg_type)
2337 {
2338 case XFRM_MSG_NEWPOLICY:
2339 {
2340 policy = (struct xfrm_userpolicy_info*)NLMSG_DATA(hdr);
2341 break;
2342 }
2343 case NLMSG_ERROR:
2344 {
2345 struct nlmsgerr *err = NLMSG_DATA(hdr);
2346 DBG1(DBG_KNL, "querying policy failed: %s (%d)",
2347 strerror(-err->error), -err->error);
2348 break;
2349 }
2350 default:
2351 hdr = NLMSG_NEXT(hdr, len);
2352 continue;
2353 case NLMSG_DONE:
2354 break;
2355 }
2356 break;
2357 }
2358 }
2359
2360 if (policy == NULL)
2361 {
2362 DBG2(DBG_KNL, "unable to query policy %R === %R %N", src_ts, dst_ts,
2363 policy_dir_names, direction);
2364 free(out);
2365 return FAILED;
2366 }
2367
2368 if (policy->curlft.use_time)
2369 {
2370 /* we need the monotonic time, but the kernel returns system time. */
2371 *use_time = time_monotonic(NULL) - (time(NULL) - policy->curlft.use_time);
2372 }
2373 else
2374 {
2375 *use_time = 0;
2376 }
2377
2378 free(out);
2379 return SUCCESS;
2380 }
2381
2382 METHOD(kernel_ipsec_t, del_policy, status_t,
2383 private_kernel_netlink_ipsec_t *this, traffic_selector_t *src_ts,
2384 traffic_selector_t *dst_ts, policy_dir_t direction, u_int32_t reqid,
2385 mark_t mark, policy_priority_t prio)
2386 {
2387 policy_entry_t *current, policy;
2388 enumerator_t *enumerator;
2389 policy_sa_t *mapping;
2390 netlink_buf_t request;
2391 struct nlmsghdr *hdr;
2392 struct xfrm_userpolicy_id *policy_id;
2393 bool is_installed = TRUE;
2394 u_int32_t priority;
2395
2396 DBG2(DBG_KNL, "deleting policy %R === %R %N (mark %u/0x%08x)",
2397 src_ts, dst_ts, policy_dir_names, direction,
2398 mark.value, mark.mask);
2399
2400 /* create a policy */
2401 memset(&policy, 0, sizeof(policy_entry_t));
2402 policy.sel = ts2selector(src_ts, dst_ts);
2403 policy.mark = mark.value & mark.mask;
2404 policy.direction = direction;
2405
2406 /* find the policy */
2407 this->mutex->lock(this->mutex);
2408 current = this->policies->get(this->policies, &policy);
2409 if (!current)
2410 {
2411 if (mark.value)
2412 {
2413 DBG1(DBG_KNL, "deleting policy %R === %R %N (mark %u/0x%08x) "
2414 "failed, not found", src_ts, dst_ts, policy_dir_names,
2415 direction, mark.value, mark.mask);
2416 }
2417 else
2418 {
2419 DBG1(DBG_KNL, "deleting policy %R === %R %N failed, not found",
2420 src_ts, dst_ts, policy_dir_names, direction);
2421 }
2422 this->mutex->unlock(this->mutex);
2423 return NOT_FOUND;
2424 }
2425
2426 if (this->policy_history)
2427 { /* remove mapping to SA by reqid and priority */
2428 priority = get_priority(current, prio);
2429 enumerator = current->used_by->create_enumerator(current->used_by);
2430 while (enumerator->enumerate(enumerator, (void**)&mapping))
2431 {
2432 if (reqid == mapping->sa->cfg.reqid &&
2433 priority == mapping->priority)
2434 {
2435 current->used_by->remove_at(current->used_by, enumerator);
2436 policy_sa_destroy(mapping, &direction, this);
2437 break;
2438 }
2439 is_installed = FALSE;
2440 }
2441 enumerator->destroy(enumerator);
2442 }
2443 else
2444 { /* remove one of the SAs but don't update the policy */
2445 current->used_by->remove_last(current->used_by, (void**)&mapping);
2446 policy_sa_destroy(mapping, &direction, this);
2447 is_installed = FALSE;
2448 }
2449
2450 if (current->used_by->get_count(current->used_by) > 0)
2451 { /* policy is used by more SAs, keep in kernel */
2452 DBG2(DBG_KNL, "policy still used by another CHILD_SA, not removed");
2453 if (!is_installed)
2454 { /* no need to update as the policy was not installed for this SA */
2455 this->mutex->unlock(this->mutex);
2456 return SUCCESS;
2457 }
2458
2459 DBG2(DBG_KNL, "updating policy %R === %R %N (mark %u/0x%08x)",
2460 src_ts, dst_ts, policy_dir_names, direction,
2461 mark.value, mark.mask);
2462
2463 current->used_by->get_first(current->used_by, (void**)&mapping);
2464 if (add_policy_internal(this, current, mapping, TRUE) != SUCCESS)
2465 {
2466 DBG1(DBG_KNL, "unable to update policy %R === %R %N",
2467 src_ts, dst_ts, policy_dir_names, direction);
2468 return FAILED;
2469 }
2470 return SUCCESS;
2471 }
2472
2473 memset(&request, 0, sizeof(request));
2474
2475 hdr = (struct nlmsghdr*)request;
2476 hdr->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
2477 hdr->nlmsg_type = XFRM_MSG_DELPOLICY;
2478 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_userpolicy_id));
2479
2480 policy_id = (struct xfrm_userpolicy_id*)NLMSG_DATA(hdr);
2481 policy_id->sel = current->sel;
2482 policy_id->dir = direction;
2483
2484 if (mark.value)
2485 {
2486 struct xfrm_mark *mrk;
2487
2488 mrk = netlink_reserve(hdr, sizeof(request), XFRMA_MARK, sizeof(*mrk));
2489 if (!mrk)
2490 {
2491 return FAILED;
2492 }
2493 mrk->v = mark.value;
2494 mrk->m = mark.mask;
2495 }
2496
2497 if (current->route)
2498 {
2499 route_entry_t *route = current->route;
2500 if (hydra->kernel_interface->del_route(hydra->kernel_interface,
2501 route->dst_net, route->prefixlen, route->gateway,
2502 route->src_ip, route->if_name) != SUCCESS)
2503 {
2504 DBG1(DBG_KNL, "error uninstalling route installed with "
2505 "policy %R === %R %N", src_ts, dst_ts,
2506 policy_dir_names, direction);
2507 }
2508 }
2509
2510 this->policies->remove(this->policies, current);
2511 policy_entry_destroy(this, current);
2512 this->mutex->unlock(this->mutex);
2513
2514 if (this->socket_xfrm->send_ack(this->socket_xfrm, hdr) != SUCCESS)
2515 {
2516 if (mark.value)
2517 {
2518 DBG1(DBG_KNL, "unable to delete policy %R === %R %N "
2519 "(mark %u/0x%08x)", src_ts, dst_ts, policy_dir_names,
2520 direction, mark.value, mark.mask);
2521 }
2522 else
2523 {
2524 DBG1(DBG_KNL, "unable to delete policy %R === %R %N",
2525 src_ts, dst_ts, policy_dir_names, direction);
2526 }
2527 return FAILED;
2528 }
2529 return SUCCESS;
2530 }
2531
2532 METHOD(kernel_ipsec_t, flush_policies, status_t,
2533 private_kernel_netlink_ipsec_t *this)
2534 {
2535 netlink_buf_t request;
2536 struct nlmsghdr *hdr;
2537
2538 memset(&request, 0, sizeof(request));
2539
2540 DBG2(DBG_KNL, "flushing all policies from SPD");
2541
2542 hdr = (struct nlmsghdr*)request;
2543 hdr->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
2544 hdr->nlmsg_type = XFRM_MSG_FLUSHPOLICY;
2545 hdr->nlmsg_len = NLMSG_LENGTH(0); /* no data associated */
2546
2547 /* by adding an rtattr of type XFRMA_POLICY_TYPE we could restrict this
2548 * to main or sub policies (default is main) */
2549
2550 if (this->socket_xfrm->send_ack(this->socket_xfrm, hdr) != SUCCESS)
2551 {
2552 DBG1(DBG_KNL, "unable to flush SPD entries");
2553 return FAILED;
2554 }
2555 return SUCCESS;
2556 }
2557
2558
2559 METHOD(kernel_ipsec_t, bypass_socket, bool,
2560 private_kernel_netlink_ipsec_t *this, int fd, int family)
2561 {
2562 struct xfrm_userpolicy_info policy;
2563 u_int sol, ipsec_policy;
2564
2565 switch (family)
2566 {
2567 case AF_INET:
2568 sol = SOL_IP;
2569 ipsec_policy = IP_XFRM_POLICY;
2570 break;
2571 case AF_INET6:
2572 sol = SOL_IPV6;
2573 ipsec_policy = IPV6_XFRM_POLICY;
2574 break;
2575 default:
2576 return FALSE;
2577 }
2578
2579 memset(&policy, 0, sizeof(policy));
2580 policy.action = XFRM_POLICY_ALLOW;
2581 policy.sel.family = family;
2582
2583 policy.dir = XFRM_POLICY_OUT;
2584 if (setsockopt(fd, sol, ipsec_policy, &policy, sizeof(policy)) < 0)
2585 {
2586 DBG1(DBG_KNL, "unable to set IPSEC_POLICY on socket: %s",
2587 strerror(errno));
2588 return FALSE;
2589 }
2590 policy.dir = XFRM_POLICY_IN;
2591 if (setsockopt(fd, sol, ipsec_policy, &policy, sizeof(policy)) < 0)
2592 {
2593 DBG1(DBG_KNL, "unable to set IPSEC_POLICY on socket: %s",
2594 strerror(errno));
2595 return FALSE;
2596 }
2597 return TRUE;
2598 }
2599
2600 METHOD(kernel_ipsec_t, enable_udp_decap, bool,
2601 private_kernel_netlink_ipsec_t *this, int fd, int family, u_int16_t port)
2602 {
2603 int type = UDP_ENCAP_ESPINUDP;
2604
2605 if (setsockopt(fd, SOL_UDP, UDP_ENCAP, &type, sizeof(type)) < 0)
2606 {
2607 DBG1(DBG_KNL, "unable to set UDP_ENCAP: %s", strerror(errno));
2608 return FALSE;
2609 }
2610 return TRUE;
2611 }
2612
2613 METHOD(kernel_ipsec_t, destroy, void,
2614 private_kernel_netlink_ipsec_t *this)
2615 {
2616 enumerator_t *enumerator;
2617 policy_entry_t *policy;
2618
2619 if (this->socket_xfrm_events > 0)
2620 {
2621 close(this->socket_xfrm_events);
2622 }
2623 DESTROY_IF(this->socket_xfrm);
2624 enumerator = this->policies->create_enumerator(this->policies);
2625 while (enumerator->enumerate(enumerator, &policy, &policy))
2626 {
2627 policy_entry_destroy(this, policy);
2628 }
2629 enumerator->destroy(enumerator);
2630 this->policies->destroy(this->policies);
2631 this->sas->destroy(this->sas);
2632 this->mutex->destroy(this->mutex);
2633 free(this);
2634 }
2635
2636 /*
2637 * Described in header.
2638 */
2639 kernel_netlink_ipsec_t *kernel_netlink_ipsec_create()
2640 {
2641 private_kernel_netlink_ipsec_t *this;
2642 bool register_for_events = TRUE;
2643 int fd;
2644
2645 INIT(this,
2646 .public = {
2647 .interface = {
2648 .get_features = _get_features,
2649 .get_spi = _get_spi,
2650 .get_cpi = _get_cpi,
2651 .add_sa = _add_sa,
2652 .update_sa = _update_sa,
2653 .query_sa = _query_sa,
2654 .del_sa = _del_sa,
2655 .flush_sas = _flush_sas,
2656 .add_policy = _add_policy,
2657 .query_policy = _query_policy,
2658 .del_policy = _del_policy,
2659 .flush_policies = _flush_policies,
2660 .bypass_socket = _bypass_socket,
2661 .enable_udp_decap = _enable_udp_decap,
2662 .destroy = _destroy,
2663 },
2664 },
2665 .policies = hashtable_create((hashtable_hash_t)policy_hash,
2666 (hashtable_equals_t)policy_equals, 32),
2667 .sas = hashtable_create((hashtable_hash_t)ipsec_sa_hash,
2668 (hashtable_equals_t)ipsec_sa_equals, 32),
2669 .mutex = mutex_create(MUTEX_TYPE_DEFAULT),
2670 .policy_history = TRUE,
2671 .install_routes = lib->settings->get_bool(lib->settings,
2672 "%s.install_routes", TRUE, hydra->daemon),
2673 .replay_window = lib->settings->get_int(lib->settings,
2674 "%s.replay_window", DEFAULT_REPLAY_WINDOW, hydra->daemon),
2675 );
2676
2677 this->replay_bmp = (this->replay_window + sizeof(u_int32_t) * 8 - 1) /
2678 (sizeof(u_int32_t) * 8);
2679
2680 if (streq(hydra->daemon, "pluto"))
2681 { /* no routes for pluto, they are installed via updown script */
2682 this->install_routes = FALSE;
2683 /* no policy history for pluto */
2684 this->policy_history = FALSE;
2685 }
2686 else if (streq(hydra->daemon, "starter"))
2687 { /* starter has no threads, so we do not register for kernel events */
2688 register_for_events = FALSE;
2689 }
2690
2691 /* disable lifetimes for allocated SPIs in kernel */
2692 fd = open("/proc/sys/net/core/xfrm_acq_expires", O_WRONLY);
2693 if (fd > 0)
2694 {
2695 ignore_result(write(fd, "165", 3));
2696 close(fd);
2697 }
2698
2699 this->socket_xfrm = netlink_socket_create(NETLINK_XFRM);
2700 if (!this->socket_xfrm)
2701 {
2702 destroy(this);
2703 return NULL;
2704 }
2705
2706 if (register_for_events)
2707 {
2708 struct sockaddr_nl addr;
2709
2710 memset(&addr, 0, sizeof(addr));
2711 addr.nl_family = AF_NETLINK;
2712
2713 /* create and bind XFRM socket for ACQUIRE, EXPIRE, MIGRATE & MAPPING */
2714 this->socket_xfrm_events = socket(AF_NETLINK, SOCK_RAW, NETLINK_XFRM);
2715 if (this->socket_xfrm_events <= 0)
2716 {
2717 DBG1(DBG_KNL, "unable to create XFRM event socket");
2718 destroy(this);
2719 return NULL;
2720 }
2721 addr.nl_groups = XFRMNLGRP(ACQUIRE) | XFRMNLGRP(EXPIRE) |
2722 XFRMNLGRP(MIGRATE) | XFRMNLGRP(MAPPING);
2723 if (bind(this->socket_xfrm_events, (struct sockaddr*)&addr, sizeof(addr)))
2724 {
2725 DBG1(DBG_KNL, "unable to bind XFRM event socket");
2726 destroy(this);
2727 return NULL;
2728 }
2729 lib->processor->queue_job(lib->processor,
2730 (job_t*)callback_job_create_with_prio(
2731 (callback_job_cb_t)receive_events, this, NULL,
2732 (callback_job_cancel_t)return_false, JOB_PRIO_CRITICAL));
2733 }
2734
2735 return &this->public;
2736 }