Avoid unneeded termination of netlink algorithm name arrays with END_OF_LIST
[strongswan.git] / src / libhydra / plugins / kernel_netlink / kernel_netlink_ipsec.c
1 /*
2 * Copyright (C) 2006-2012 Tobias Brunner
3 * Copyright (C) 2005-2009 Martin Willi
4 * Copyright (C) 2008 Andreas Steffen
5 * Copyright (C) 2006-2007 Fabian Hartmann, Noah Heusser
6 * Copyright (C) 2006 Daniel Roethlisberger
7 * Copyright (C) 2005 Jan Hutter
8 * Hochschule fuer Technik Rapperswil
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version. See <http://www.fsf.org/copyleft/gpl.txt>.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
17 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 * for more details.
19 */
20
21 #include <sys/types.h>
22 #include <sys/socket.h>
23 #include <stdint.h>
24 #include <linux/ipsec.h>
25 #include <linux/netlink.h>
26 #include <linux/rtnetlink.h>
27 #include <linux/xfrm.h>
28 #include <linux/udp.h>
29 #include <unistd.h>
30 #include <time.h>
31 #include <errno.h>
32 #include <string.h>
33 #include <fcntl.h>
34
35 #include "kernel_netlink_ipsec.h"
36 #include "kernel_netlink_shared.h"
37
38 #include <hydra.h>
39 #include <utils/debug.h>
40 #include <threading/thread.h>
41 #include <threading/mutex.h>
42 #include <collections/hashtable.h>
43 #include <collections/linked_list.h>
44 #include <processing/jobs/callback_job.h>
45
46 /** Required for Linux 2.6.26 kernel and later */
47 #ifndef XFRM_STATE_AF_UNSPEC
48 #define XFRM_STATE_AF_UNSPEC 32
49 #endif
50
51 /** From linux/in.h */
52 #ifndef IP_XFRM_POLICY
53 #define IP_XFRM_POLICY 17
54 #endif
55
56 /** Missing on uclibc */
57 #ifndef IPV6_XFRM_POLICY
58 #define IPV6_XFRM_POLICY 34
59 #endif /*IPV6_XFRM_POLICY*/
60
61 /* from linux/udp.h */
62 #ifndef UDP_ENCAP
63 #define UDP_ENCAP 100
64 #endif
65
66 #ifndef UDP_ENCAP_ESPINUDP
67 #define UDP_ENCAP_ESPINUDP 2
68 #endif
69
70 /* this is not defined on some platforms */
71 #ifndef SOL_UDP
72 #define SOL_UDP IPPROTO_UDP
73 #endif
74
75 /** Default priority of installed policies */
76 #define PRIO_BASE 512
77
78 /** Default replay window size, if not set using charon.replay_window */
79 #define DEFAULT_REPLAY_WINDOW 32
80
81 /**
82 * Map the limit for bytes and packets to XFRM_INF by default
83 */
84 #define XFRM_LIMIT(x) ((x) == 0 ? XFRM_INF : (x))
85
86 /**
87 * Create ORable bitfield of XFRM NL groups
88 */
89 #define XFRMNLGRP(x) (1<<(XFRMNLGRP_##x-1))
90
91 /**
92 * Returns a pointer to the first rtattr following the nlmsghdr *nlh and the
93 * 'usual' netlink data x like 'struct xfrm_usersa_info'
94 */
95 #define XFRM_RTA(nlh, x) ((struct rtattr*)(NLMSG_DATA(nlh) + \
96 NLMSG_ALIGN(sizeof(x))))
97 /**
98 * Returns a pointer to the next rtattr following rta.
99 * !!! Do not use this to parse messages. Use RTA_NEXT and RTA_OK instead !!!
100 */
101 #define XFRM_RTA_NEXT(rta) ((struct rtattr*)(((char*)(rta)) + \
102 RTA_ALIGN((rta)->rta_len)))
103 /**
104 * Returns the total size of attached rta data
105 * (after 'usual' netlink data x like 'struct xfrm_usersa_info')
106 */
107 #define XFRM_PAYLOAD(nlh, x) NLMSG_PAYLOAD(nlh, sizeof(x))
108
109 typedef struct kernel_algorithm_t kernel_algorithm_t;
110
111 /**
112 * Mapping of IKEv2 kernel identifier to linux crypto API names
113 */
114 struct kernel_algorithm_t {
115 /**
116 * Identifier specified in IKEv2
117 */
118 int ikev2;
119
120 /**
121 * Name of the algorithm in linux crypto API
122 */
123 char *name;
124 };
125
126 ENUM(xfrm_msg_names, XFRM_MSG_NEWSA, XFRM_MSG_MAPPING,
127 "XFRM_MSG_NEWSA",
128 "XFRM_MSG_DELSA",
129 "XFRM_MSG_GETSA",
130 "XFRM_MSG_NEWPOLICY",
131 "XFRM_MSG_DELPOLICY",
132 "XFRM_MSG_GETPOLICY",
133 "XFRM_MSG_ALLOCSPI",
134 "XFRM_MSG_ACQUIRE",
135 "XFRM_MSG_EXPIRE",
136 "XFRM_MSG_UPDPOLICY",
137 "XFRM_MSG_UPDSA",
138 "XFRM_MSG_POLEXPIRE",
139 "XFRM_MSG_FLUSHSA",
140 "XFRM_MSG_FLUSHPOLICY",
141 "XFRM_MSG_NEWAE",
142 "XFRM_MSG_GETAE",
143 "XFRM_MSG_REPORT",
144 "XFRM_MSG_MIGRATE",
145 "XFRM_MSG_NEWSADINFO",
146 "XFRM_MSG_GETSADINFO",
147 "XFRM_MSG_NEWSPDINFO",
148 "XFRM_MSG_GETSPDINFO",
149 "XFRM_MSG_MAPPING"
150 );
151
152 ENUM(xfrm_attr_type_names, XFRMA_UNSPEC, XFRMA_REPLAY_ESN_VAL,
153 "XFRMA_UNSPEC",
154 "XFRMA_ALG_AUTH",
155 "XFRMA_ALG_CRYPT",
156 "XFRMA_ALG_COMP",
157 "XFRMA_ENCAP",
158 "XFRMA_TMPL",
159 "XFRMA_SA",
160 "XFRMA_POLICY",
161 "XFRMA_SEC_CTX",
162 "XFRMA_LTIME_VAL",
163 "XFRMA_REPLAY_VAL",
164 "XFRMA_REPLAY_THRESH",
165 "XFRMA_ETIMER_THRESH",
166 "XFRMA_SRCADDR",
167 "XFRMA_COADDR",
168 "XFRMA_LASTUSED",
169 "XFRMA_POLICY_TYPE",
170 "XFRMA_MIGRATE",
171 "XFRMA_ALG_AEAD",
172 "XFRMA_KMADDRESS",
173 "XFRMA_ALG_AUTH_TRUNC",
174 "XFRMA_MARK",
175 "XFRMA_TFCPAD",
176 "XFRMA_REPLAY_ESN_VAL",
177 );
178
179 /**
180 * Algorithms for encryption
181 */
182 static kernel_algorithm_t encryption_algs[] = {
183 /* {ENCR_DES_IV64, "***" }, */
184 {ENCR_DES, "des" },
185 {ENCR_3DES, "des3_ede" },
186 /* {ENCR_RC5, "***" }, */
187 /* {ENCR_IDEA, "***" }, */
188 {ENCR_CAST, "cast128" },
189 {ENCR_BLOWFISH, "blowfish" },
190 /* {ENCR_3IDEA, "***" }, */
191 /* {ENCR_DES_IV32, "***" }, */
192 {ENCR_NULL, "cipher_null" },
193 {ENCR_AES_CBC, "aes" },
194 {ENCR_AES_CTR, "rfc3686(ctr(aes))" },
195 {ENCR_AES_CCM_ICV8, "rfc4309(ccm(aes))" },
196 {ENCR_AES_CCM_ICV12, "rfc4309(ccm(aes))" },
197 {ENCR_AES_CCM_ICV16, "rfc4309(ccm(aes))" },
198 {ENCR_AES_GCM_ICV8, "rfc4106(gcm(aes))" },
199 {ENCR_AES_GCM_ICV12, "rfc4106(gcm(aes))" },
200 {ENCR_AES_GCM_ICV16, "rfc4106(gcm(aes))" },
201 {ENCR_NULL_AUTH_AES_GMAC, "rfc4543(gcm(aes))" },
202 {ENCR_CAMELLIA_CBC, "cbc(camellia)" },
203 /* {ENCR_CAMELLIA_CTR, "***" }, */
204 /* {ENCR_CAMELLIA_CCM_ICV8, "***" }, */
205 /* {ENCR_CAMELLIA_CCM_ICV12, "***" }, */
206 /* {ENCR_CAMELLIA_CCM_ICV16, "***" }, */
207 {ENCR_SERPENT_CBC, "serpent" },
208 {ENCR_TWOFISH_CBC, "twofish" },
209 };
210
211 /**
212 * Algorithms for integrity protection
213 */
214 static kernel_algorithm_t integrity_algs[] = {
215 {AUTH_HMAC_MD5_96, "md5" },
216 {AUTH_HMAC_MD5_128, "hmac(md5)" },
217 {AUTH_HMAC_SHA1_96, "sha1" },
218 {AUTH_HMAC_SHA1_160, "hmac(sha1)" },
219 {AUTH_HMAC_SHA2_256_96, "sha256" },
220 {AUTH_HMAC_SHA2_256_128, "hmac(sha256)" },
221 {AUTH_HMAC_SHA2_384_192, "hmac(sha384)" },
222 {AUTH_HMAC_SHA2_512_256, "hmac(sha512)" },
223 /* {AUTH_DES_MAC, "***" }, */
224 /* {AUTH_KPDK_MD5, "***" }, */
225 {AUTH_AES_XCBC_96, "xcbc(aes)" },
226 };
227
228 /**
229 * Algorithms for IPComp
230 */
231 static kernel_algorithm_t compression_algs[] = {
232 /* {IPCOMP_OUI, "***" }, */
233 {IPCOMP_DEFLATE, "deflate" },
234 {IPCOMP_LZS, "lzs" },
235 {IPCOMP_LZJH, "lzjh" },
236 };
237
238 /**
239 * Look up a kernel algorithm name and its key size
240 */
241 static char* lookup_algorithm(transform_type_t type, int ikev2)
242 {
243 kernel_algorithm_t *list;
244 int i, count;
245 char *name;
246
247 switch (type)
248 {
249 case ENCRYPTION_ALGORITHM:
250 list = encryption_algs;
251 count = countof(encryption_algs);
252 break;
253 case INTEGRITY_ALGORITHM:
254 list = integrity_algs;
255 count = countof(integrity_algs);
256 break;
257 case COMPRESSION_ALGORITHM:
258 list = compression_algs;
259 count = countof(compression_algs);
260 break;
261 default:
262 return NULL;
263 }
264 for (i = 0; i < count; i++)
265 {
266 if (list[i].ikev2 == ikev2)
267 {
268 return list[i].name;
269 }
270 }
271 if (hydra->kernel_interface->lookup_algorithm(hydra->kernel_interface,
272 ikev2, type, NULL, &name))
273 {
274 return name;
275 }
276 return NULL;
277 }
278
279 typedef struct private_kernel_netlink_ipsec_t private_kernel_netlink_ipsec_t;
280
281 /**
282 * Private variables and functions of kernel_netlink class.
283 */
284 struct private_kernel_netlink_ipsec_t {
285 /**
286 * Public part of the kernel_netlink_t object
287 */
288 kernel_netlink_ipsec_t public;
289
290 /**
291 * Mutex to lock access to installed policies
292 */
293 mutex_t *mutex;
294
295 /**
296 * Hash table of installed policies (policy_entry_t)
297 */
298 hashtable_t *policies;
299
300 /**
301 * Hash table of IPsec SAs using policies (ipsec_sa_t)
302 */
303 hashtable_t *sas;
304
305 /**
306 * Netlink xfrm socket (IPsec)
307 */
308 netlink_socket_t *socket_xfrm;
309
310 /**
311 * Netlink xfrm socket to receive acquire and expire events
312 */
313 int socket_xfrm_events;
314
315 /**
316 * Whether to install routes along policies
317 */
318 bool install_routes;
319
320 /**
321 * Whether to track the history of a policy
322 */
323 bool policy_history;
324
325 /**
326 * Size of the replay window, in packets (= bits)
327 */
328 u_int32_t replay_window;
329
330 /**
331 * Size of the replay window bitmap, in number of __u32 blocks
332 */
333 u_int32_t replay_bmp;
334 };
335
336 typedef struct route_entry_t route_entry_t;
337
338 /**
339 * Installed routing entry
340 */
341 struct route_entry_t {
342 /** Name of the interface the route is bound to */
343 char *if_name;
344
345 /** Source ip of the route */
346 host_t *src_ip;
347
348 /** Gateway for this route */
349 host_t *gateway;
350
351 /** Destination net */
352 chunk_t dst_net;
353
354 /** Destination net prefixlen */
355 u_int8_t prefixlen;
356 };
357
358 /**
359 * Destroy a route_entry_t object
360 */
361 static void route_entry_destroy(route_entry_t *this)
362 {
363 free(this->if_name);
364 this->src_ip->destroy(this->src_ip);
365 DESTROY_IF(this->gateway);
366 chunk_free(&this->dst_net);
367 free(this);
368 }
369
370 /**
371 * Compare two route_entry_t objects
372 */
373 static bool route_entry_equals(route_entry_t *a, route_entry_t *b)
374 {
375 return a->if_name && b->if_name && streq(a->if_name, b->if_name) &&
376 a->src_ip->ip_equals(a->src_ip, b->src_ip) &&
377 a->gateway->ip_equals(a->gateway, b->gateway) &&
378 chunk_equals(a->dst_net, b->dst_net) && a->prefixlen == b->prefixlen;
379 }
380
381 typedef struct ipsec_sa_t ipsec_sa_t;
382
383 /**
384 * IPsec SA assigned to a policy.
385 */
386 struct ipsec_sa_t {
387 /** Source address of this SA */
388 host_t *src;
389
390 /** Destination address of this SA */
391 host_t *dst;
392
393 /** Optional mark */
394 mark_t mark;
395
396 /** Description of this SA */
397 ipsec_sa_cfg_t cfg;
398
399 /** Reference count for this SA */
400 refcount_t refcount;
401 };
402
403 /**
404 * Hash function for ipsec_sa_t objects
405 */
406 static u_int ipsec_sa_hash(ipsec_sa_t *sa)
407 {
408 return chunk_hash_inc(sa->src->get_address(sa->src),
409 chunk_hash_inc(sa->dst->get_address(sa->dst),
410 chunk_hash_inc(chunk_from_thing(sa->mark),
411 chunk_hash(chunk_from_thing(sa->cfg)))));
412 }
413
414 /**
415 * Equality function for ipsec_sa_t objects
416 */
417 static bool ipsec_sa_equals(ipsec_sa_t *sa, ipsec_sa_t *other_sa)
418 {
419 return sa->src->ip_equals(sa->src, other_sa->src) &&
420 sa->dst->ip_equals(sa->dst, other_sa->dst) &&
421 memeq(&sa->mark, &other_sa->mark, sizeof(mark_t)) &&
422 memeq(&sa->cfg, &other_sa->cfg, sizeof(ipsec_sa_cfg_t));
423 }
424
425 /**
426 * Allocate or reference an IPsec SA object
427 */
428 static ipsec_sa_t *ipsec_sa_create(private_kernel_netlink_ipsec_t *this,
429 host_t *src, host_t *dst, mark_t mark,
430 ipsec_sa_cfg_t *cfg)
431 {
432 ipsec_sa_t *sa, *found;
433 INIT(sa,
434 .src = src,
435 .dst = dst,
436 .mark = mark,
437 .cfg = *cfg,
438 );
439 found = this->sas->get(this->sas, sa);
440 if (!found)
441 {
442 sa->src = src->clone(src);
443 sa->dst = dst->clone(dst);
444 this->sas->put(this->sas, sa, sa);
445 }
446 else
447 {
448 free(sa);
449 sa = found;
450 }
451 ref_get(&sa->refcount);
452 return sa;
453 }
454
455 /**
456 * Release and destroy an IPsec SA object
457 */
458 static void ipsec_sa_destroy(private_kernel_netlink_ipsec_t *this,
459 ipsec_sa_t *sa)
460 {
461 if (ref_put(&sa->refcount))
462 {
463 this->sas->remove(this->sas, sa);
464 DESTROY_IF(sa->src);
465 DESTROY_IF(sa->dst);
466 free(sa);
467 }
468 }
469
470 typedef struct policy_sa_t policy_sa_t;
471 typedef struct policy_sa_fwd_t policy_sa_fwd_t;
472
473 /**
474 * Mapping between a policy and an IPsec SA.
475 */
476 struct policy_sa_t {
477 /** Priority assigned to the policy when installed with this SA */
478 u_int32_t priority;
479
480 /** Type of the policy */
481 policy_type_t type;
482
483 /** Assigned SA */
484 ipsec_sa_t *sa;
485 };
486
487 /**
488 * For forward policies we also cache the traffic selectors in order to install
489 * the route.
490 */
491 struct policy_sa_fwd_t {
492 /** Generic interface */
493 policy_sa_t generic;
494
495 /** Source traffic selector of this policy */
496 traffic_selector_t *src_ts;
497
498 /** Destination traffic selector of this policy */
499 traffic_selector_t *dst_ts;
500 };
501
502 /**
503 * Create a policy_sa(_fwd)_t object
504 */
505 static policy_sa_t *policy_sa_create(private_kernel_netlink_ipsec_t *this,
506 policy_dir_t dir, policy_type_t type, host_t *src, host_t *dst,
507 traffic_selector_t *src_ts, traffic_selector_t *dst_ts, mark_t mark,
508 ipsec_sa_cfg_t *cfg)
509 {
510 policy_sa_t *policy;
511
512 if (dir == POLICY_FWD)
513 {
514 policy_sa_fwd_t *fwd;
515 INIT(fwd,
516 .src_ts = src_ts->clone(src_ts),
517 .dst_ts = dst_ts->clone(dst_ts),
518 );
519 policy = &fwd->generic;
520 }
521 else
522 {
523 INIT(policy, .priority = 0);
524 }
525 policy->type = type;
526 policy->sa = ipsec_sa_create(this, src, dst, mark, cfg);
527 return policy;
528 }
529
530 /**
531 * Destroy a policy_sa(_fwd)_t object
532 */
533 static void policy_sa_destroy(policy_sa_t *policy, policy_dir_t *dir,
534 private_kernel_netlink_ipsec_t *this)
535 {
536 if (*dir == POLICY_FWD)
537 {
538 policy_sa_fwd_t *fwd = (policy_sa_fwd_t*)policy;
539 fwd->src_ts->destroy(fwd->src_ts);
540 fwd->dst_ts->destroy(fwd->dst_ts);
541 }
542 ipsec_sa_destroy(this, policy->sa);
543 free(policy);
544 }
545
546 typedef struct policy_entry_t policy_entry_t;
547
548 /**
549 * Installed kernel policy.
550 */
551 struct policy_entry_t {
552
553 /** Direction of this policy: in, out, forward */
554 u_int8_t direction;
555
556 /** Parameters of installed policy */
557 struct xfrm_selector sel;
558
559 /** Optional mark */
560 u_int32_t mark;
561
562 /** Associated route installed for this policy */
563 route_entry_t *route;
564
565 /** List of SAs this policy is used by, ordered by priority */
566 linked_list_t *used_by;
567 };
568
569 /**
570 * Destroy a policy_entry_t object
571 */
572 static void policy_entry_destroy(private_kernel_netlink_ipsec_t *this,
573 policy_entry_t *policy)
574 {
575 if (policy->route)
576 {
577 route_entry_destroy(policy->route);
578 }
579 if (policy->used_by)
580 {
581 policy->used_by->invoke_function(policy->used_by,
582 (linked_list_invoke_t)policy_sa_destroy,
583 &policy->direction, this);
584 policy->used_by->destroy(policy->used_by);
585 }
586 free(policy);
587 }
588
589 /**
590 * Hash function for policy_entry_t objects
591 */
592 static u_int policy_hash(policy_entry_t *key)
593 {
594 chunk_t chunk = chunk_from_thing(key->sel);
595 return chunk_hash_inc(chunk, chunk_hash(chunk_from_thing(key->mark)));
596 }
597
598 /**
599 * Equality function for policy_entry_t objects
600 */
601 static bool policy_equals(policy_entry_t *key, policy_entry_t *other_key)
602 {
603 return memeq(&key->sel, &other_key->sel, sizeof(struct xfrm_selector)) &&
604 key->mark == other_key->mark &&
605 key->direction == other_key->direction;
606 }
607
608 /**
609 * Calculate the priority of a policy
610 */
611 static inline u_int32_t get_priority(policy_entry_t *policy,
612 policy_priority_t prio)
613 {
614 u_int32_t priority = PRIO_BASE;
615 switch (prio)
616 {
617 case POLICY_PRIORITY_FALLBACK:
618 priority <<= 1;
619 /* fall-through */
620 case POLICY_PRIORITY_ROUTED:
621 priority <<= 1;
622 /* fall-through */
623 case POLICY_PRIORITY_DEFAULT:
624 break;
625 }
626 /* calculate priority based on selector size, small size = high prio */
627 priority -= policy->sel.prefixlen_s;
628 priority -= policy->sel.prefixlen_d;
629 priority <<= 2; /* make some room for the two flags */
630 priority += policy->sel.sport_mask || policy->sel.dport_mask ? 0 : 2;
631 priority += policy->sel.proto ? 0 : 1;
632 return priority;
633 }
634
635 /**
636 * Convert the general ipsec mode to the one defined in xfrm.h
637 */
638 static u_int8_t mode2kernel(ipsec_mode_t mode)
639 {
640 switch (mode)
641 {
642 case MODE_TRANSPORT:
643 return XFRM_MODE_TRANSPORT;
644 case MODE_TUNNEL:
645 return XFRM_MODE_TUNNEL;
646 case MODE_BEET:
647 return XFRM_MODE_BEET;
648 default:
649 return mode;
650 }
651 }
652
653 /**
654 * Convert a host_t to a struct xfrm_address
655 */
656 static void host2xfrm(host_t *host, xfrm_address_t *xfrm)
657 {
658 chunk_t chunk = host->get_address(host);
659 memcpy(xfrm, chunk.ptr, min(chunk.len, sizeof(xfrm_address_t)));
660 }
661
662 /**
663 * Convert a struct xfrm_address to a host_t
664 */
665 static host_t* xfrm2host(int family, xfrm_address_t *xfrm, u_int16_t port)
666 {
667 chunk_t chunk;
668
669 switch (family)
670 {
671 case AF_INET:
672 chunk = chunk_create((u_char*)&xfrm->a4, sizeof(xfrm->a4));
673 break;
674 case AF_INET6:
675 chunk = chunk_create((u_char*)&xfrm->a6, sizeof(xfrm->a6));
676 break;
677 default:
678 return NULL;
679 }
680 return host_create_from_chunk(family, chunk, ntohs(port));
681 }
682
683 /**
684 * Convert a traffic selector address range to subnet and its mask.
685 */
686 static void ts2subnet(traffic_selector_t* ts,
687 xfrm_address_t *net, u_int8_t *mask)
688 {
689 host_t *net_host;
690 chunk_t net_chunk;
691
692 ts->to_subnet(ts, &net_host, mask);
693 net_chunk = net_host->get_address(net_host);
694 memcpy(net, net_chunk.ptr, net_chunk.len);
695 net_host->destroy(net_host);
696 }
697
698 /**
699 * Convert a traffic selector port range to port/portmask
700 */
701 static void ts2ports(traffic_selector_t* ts,
702 u_int16_t *port, u_int16_t *mask)
703 {
704 /* Linux does not seem to accept complex portmasks. Only
705 * any or a specific port is allowed. We set to any, if we have
706 * a port range, or to a specific, if we have one port only.
707 */
708 u_int16_t from, to;
709
710 from = ts->get_from_port(ts);
711 to = ts->get_to_port(ts);
712
713 if (from == to)
714 {
715 *port = htons(from);
716 *mask = ~0;
717 }
718 else
719 {
720 *port = 0;
721 *mask = 0;
722 }
723 }
724
725 /**
726 * Convert a pair of traffic_selectors to an xfrm_selector
727 */
728 static struct xfrm_selector ts2selector(traffic_selector_t *src,
729 traffic_selector_t *dst)
730 {
731 struct xfrm_selector sel;
732
733 memset(&sel, 0, sizeof(sel));
734 sel.family = (src->get_type(src) == TS_IPV4_ADDR_RANGE) ? AF_INET : AF_INET6;
735 /* src or dest proto may be "any" (0), use more restrictive one */
736 sel.proto = max(src->get_protocol(src), dst->get_protocol(dst));
737 ts2subnet(dst, &sel.daddr, &sel.prefixlen_d);
738 ts2subnet(src, &sel.saddr, &sel.prefixlen_s);
739 ts2ports(dst, &sel.dport, &sel.dport_mask);
740 ts2ports(src, &sel.sport, &sel.sport_mask);
741 sel.ifindex = 0;
742 sel.user = 0;
743
744 return sel;
745 }
746
747 /**
748 * Convert an xfrm_selector to a src|dst traffic_selector
749 */
750 static traffic_selector_t* selector2ts(struct xfrm_selector *sel, bool src)
751 {
752 u_char *addr;
753 u_int8_t prefixlen;
754 u_int16_t port = 0;
755 host_t *host = NULL;
756
757 if (src)
758 {
759 addr = (u_char*)&sel->saddr;
760 prefixlen = sel->prefixlen_s;
761 if (sel->sport_mask)
762 {
763 port = htons(sel->sport);
764 }
765 }
766 else
767 {
768 addr = (u_char*)&sel->daddr;
769 prefixlen = sel->prefixlen_d;
770 if (sel->dport_mask)
771 {
772 port = htons(sel->dport);
773 }
774 }
775
776 /* The Linux 2.6 kernel does not set the selector's family field,
777 * so as a kludge we additionally test the prefix length.
778 */
779 if (sel->family == AF_INET || sel->prefixlen_s == 32)
780 {
781 host = host_create_from_chunk(AF_INET, chunk_create(addr, 4), 0);
782 }
783 else if (sel->family == AF_INET6 || sel->prefixlen_s == 128)
784 {
785 host = host_create_from_chunk(AF_INET6, chunk_create(addr, 16), 0);
786 }
787
788 if (host)
789 {
790 return traffic_selector_create_from_subnet(host, prefixlen,
791 sel->proto, port, port ?: 65535);
792 }
793 return NULL;
794 }
795
796 /**
797 * Process a XFRM_MSG_ACQUIRE from kernel
798 */
799 static void process_acquire(private_kernel_netlink_ipsec_t *this,
800 struct nlmsghdr *hdr)
801 {
802 struct xfrm_user_acquire *acquire;
803 struct rtattr *rta;
804 size_t rtasize;
805 traffic_selector_t *src_ts, *dst_ts;
806 u_int32_t reqid = 0;
807 int proto = 0;
808
809 acquire = (struct xfrm_user_acquire*)NLMSG_DATA(hdr);
810 rta = XFRM_RTA(hdr, struct xfrm_user_acquire);
811 rtasize = XFRM_PAYLOAD(hdr, struct xfrm_user_acquire);
812
813 DBG2(DBG_KNL, "received a XFRM_MSG_ACQUIRE");
814
815 while (RTA_OK(rta, rtasize))
816 {
817 DBG2(DBG_KNL, " %N", xfrm_attr_type_names, rta->rta_type);
818
819 if (rta->rta_type == XFRMA_TMPL)
820 {
821 struct xfrm_user_tmpl* tmpl;
822 tmpl = (struct xfrm_user_tmpl*)RTA_DATA(rta);
823 reqid = tmpl->reqid;
824 proto = tmpl->id.proto;
825 }
826 rta = RTA_NEXT(rta, rtasize);
827 }
828 switch (proto)
829 {
830 case 0:
831 case IPPROTO_ESP:
832 case IPPROTO_AH:
833 break;
834 default:
835 /* acquire for AH/ESP only, not for IPCOMP */
836 return;
837 }
838 src_ts = selector2ts(&acquire->sel, TRUE);
839 dst_ts = selector2ts(&acquire->sel, FALSE);
840
841 hydra->kernel_interface->acquire(hydra->kernel_interface, reqid, src_ts,
842 dst_ts);
843 }
844
845 /**
846 * Process a XFRM_MSG_EXPIRE from kernel
847 */
848 static void process_expire(private_kernel_netlink_ipsec_t *this,
849 struct nlmsghdr *hdr)
850 {
851 struct xfrm_user_expire *expire;
852 u_int32_t spi, reqid;
853 u_int8_t protocol;
854
855 expire = (struct xfrm_user_expire*)NLMSG_DATA(hdr);
856 protocol = expire->state.id.proto;
857 spi = expire->state.id.spi;
858 reqid = expire->state.reqid;
859
860 DBG2(DBG_KNL, "received a XFRM_MSG_EXPIRE");
861
862 if (protocol != IPPROTO_ESP && protocol != IPPROTO_AH)
863 {
864 DBG2(DBG_KNL, "ignoring XFRM_MSG_EXPIRE for SA with SPI %.8x and "
865 "reqid {%u} which is not a CHILD_SA", ntohl(spi), reqid);
866 return;
867 }
868
869 hydra->kernel_interface->expire(hydra->kernel_interface, reqid, protocol,
870 spi, expire->hard != 0);
871 }
872
873 /**
874 * Process a XFRM_MSG_MIGRATE from kernel
875 */
876 static void process_migrate(private_kernel_netlink_ipsec_t *this,
877 struct nlmsghdr *hdr)
878 {
879 struct xfrm_userpolicy_id *policy_id;
880 struct rtattr *rta;
881 size_t rtasize;
882 traffic_selector_t *src_ts, *dst_ts;
883 host_t *local = NULL, *remote = NULL;
884 host_t *old_src = NULL, *old_dst = NULL;
885 host_t *new_src = NULL, *new_dst = NULL;
886 u_int32_t reqid = 0;
887 policy_dir_t dir;
888
889 policy_id = (struct xfrm_userpolicy_id*)NLMSG_DATA(hdr);
890 rta = XFRM_RTA(hdr, struct xfrm_userpolicy_id);
891 rtasize = XFRM_PAYLOAD(hdr, struct xfrm_userpolicy_id);
892
893 DBG2(DBG_KNL, "received a XFRM_MSG_MIGRATE");
894
895 src_ts = selector2ts(&policy_id->sel, TRUE);
896 dst_ts = selector2ts(&policy_id->sel, FALSE);
897 dir = (policy_dir_t)policy_id->dir;
898
899 DBG2(DBG_KNL, " policy: %R === %R %N", src_ts, dst_ts, policy_dir_names);
900
901 while (RTA_OK(rta, rtasize))
902 {
903 DBG2(DBG_KNL, " %N", xfrm_attr_type_names, rta->rta_type);
904 if (rta->rta_type == XFRMA_KMADDRESS)
905 {
906 struct xfrm_user_kmaddress *kmaddress;
907
908 kmaddress = (struct xfrm_user_kmaddress*)RTA_DATA(rta);
909 local = xfrm2host(kmaddress->family, &kmaddress->local, 0);
910 remote = xfrm2host(kmaddress->family, &kmaddress->remote, 0);
911 DBG2(DBG_KNL, " kmaddress: %H...%H", local, remote);
912 }
913 else if (rta->rta_type == XFRMA_MIGRATE)
914 {
915 struct xfrm_user_migrate *migrate;
916
917 migrate = (struct xfrm_user_migrate*)RTA_DATA(rta);
918 old_src = xfrm2host(migrate->old_family, &migrate->old_saddr, 0);
919 old_dst = xfrm2host(migrate->old_family, &migrate->old_daddr, 0);
920 new_src = xfrm2host(migrate->new_family, &migrate->new_saddr, 0);
921 new_dst = xfrm2host(migrate->new_family, &migrate->new_daddr, 0);
922 reqid = migrate->reqid;
923 DBG2(DBG_KNL, " migrate %H...%H to %H...%H, reqid {%u}",
924 old_src, old_dst, new_src, new_dst, reqid);
925 DESTROY_IF(old_src);
926 DESTROY_IF(old_dst);
927 DESTROY_IF(new_src);
928 DESTROY_IF(new_dst);
929 }
930 rta = RTA_NEXT(rta, rtasize);
931 }
932
933 if (src_ts && dst_ts && local && remote)
934 {
935 hydra->kernel_interface->migrate(hydra->kernel_interface, reqid,
936 src_ts, dst_ts, dir, local, remote);
937 }
938 else
939 {
940 DESTROY_IF(src_ts);
941 DESTROY_IF(dst_ts);
942 DESTROY_IF(local);
943 DESTROY_IF(remote);
944 }
945 }
946
947 /**
948 * Process a XFRM_MSG_MAPPING from kernel
949 */
950 static void process_mapping(private_kernel_netlink_ipsec_t *this,
951 struct nlmsghdr *hdr)
952 {
953 struct xfrm_user_mapping *mapping;
954 u_int32_t spi, reqid;
955
956 mapping = (struct xfrm_user_mapping*)NLMSG_DATA(hdr);
957 spi = mapping->id.spi;
958 reqid = mapping->reqid;
959
960 DBG2(DBG_KNL, "received a XFRM_MSG_MAPPING");
961
962 if (mapping->id.proto == IPPROTO_ESP)
963 {
964 host_t *host;
965 host = xfrm2host(mapping->id.family, &mapping->new_saddr,
966 mapping->new_sport);
967 if (host)
968 {
969 hydra->kernel_interface->mapping(hydra->kernel_interface, reqid,
970 spi, host);
971 }
972 }
973 }
974
975 /**
976 * Receives events from kernel
977 */
978 static job_requeue_t receive_events(private_kernel_netlink_ipsec_t *this)
979 {
980 char response[1024];
981 struct nlmsghdr *hdr = (struct nlmsghdr*)response;
982 struct sockaddr_nl addr;
983 socklen_t addr_len = sizeof(addr);
984 int len;
985 bool oldstate;
986
987 oldstate = thread_cancelability(TRUE);
988 len = recvfrom(this->socket_xfrm_events, response, sizeof(response), 0,
989 (struct sockaddr*)&addr, &addr_len);
990 thread_cancelability(oldstate);
991
992 if (len < 0)
993 {
994 switch (errno)
995 {
996 case EINTR:
997 /* interrupted, try again */
998 return JOB_REQUEUE_DIRECT;
999 case EAGAIN:
1000 /* no data ready, select again */
1001 return JOB_REQUEUE_DIRECT;
1002 default:
1003 DBG1(DBG_KNL, "unable to receive from xfrm event socket");
1004 sleep(1);
1005 return JOB_REQUEUE_FAIR;
1006 }
1007 }
1008
1009 if (addr.nl_pid != 0)
1010 { /* not from kernel. not interested, try another one */
1011 return JOB_REQUEUE_DIRECT;
1012 }
1013
1014 while (NLMSG_OK(hdr, len))
1015 {
1016 switch (hdr->nlmsg_type)
1017 {
1018 case XFRM_MSG_ACQUIRE:
1019 process_acquire(this, hdr);
1020 break;
1021 case XFRM_MSG_EXPIRE:
1022 process_expire(this, hdr);
1023 break;
1024 case XFRM_MSG_MIGRATE:
1025 process_migrate(this, hdr);
1026 break;
1027 case XFRM_MSG_MAPPING:
1028 process_mapping(this, hdr);
1029 break;
1030 default:
1031 DBG1(DBG_KNL, "received unknown event from xfrm event "
1032 "socket: %d", hdr->nlmsg_type);
1033 break;
1034 }
1035 hdr = NLMSG_NEXT(hdr, len);
1036 }
1037 return JOB_REQUEUE_DIRECT;
1038 }
1039
1040 METHOD(kernel_ipsec_t, get_features, kernel_feature_t,
1041 private_kernel_netlink_ipsec_t *this)
1042 {
1043 return KERNEL_ESP_V3_TFC;
1044 }
1045
1046 /**
1047 * Get an SPI for a specific protocol from the kernel.
1048 */
1049 static status_t get_spi_internal(private_kernel_netlink_ipsec_t *this,
1050 host_t *src, host_t *dst, u_int8_t proto, u_int32_t min, u_int32_t max,
1051 u_int32_t reqid, u_int32_t *spi)
1052 {
1053 netlink_buf_t request;
1054 struct nlmsghdr *hdr, *out;
1055 struct xfrm_userspi_info *userspi;
1056 u_int32_t received_spi = 0;
1057 size_t len;
1058
1059 memset(&request, 0, sizeof(request));
1060
1061 hdr = (struct nlmsghdr*)request;
1062 hdr->nlmsg_flags = NLM_F_REQUEST;
1063 hdr->nlmsg_type = XFRM_MSG_ALLOCSPI;
1064 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_userspi_info));
1065
1066 userspi = (struct xfrm_userspi_info*)NLMSG_DATA(hdr);
1067 host2xfrm(src, &userspi->info.saddr);
1068 host2xfrm(dst, &userspi->info.id.daddr);
1069 userspi->info.id.proto = proto;
1070 userspi->info.mode = XFRM_MODE_TUNNEL;
1071 userspi->info.reqid = reqid;
1072 userspi->info.family = src->get_family(src);
1073 userspi->min = min;
1074 userspi->max = max;
1075
1076 if (this->socket_xfrm->send(this->socket_xfrm, hdr, &out, &len) == SUCCESS)
1077 {
1078 hdr = out;
1079 while (NLMSG_OK(hdr, len))
1080 {
1081 switch (hdr->nlmsg_type)
1082 {
1083 case XFRM_MSG_NEWSA:
1084 {
1085 struct xfrm_usersa_info* usersa = NLMSG_DATA(hdr);
1086 received_spi = usersa->id.spi;
1087 break;
1088 }
1089 case NLMSG_ERROR:
1090 {
1091 struct nlmsgerr *err = NLMSG_DATA(hdr);
1092 DBG1(DBG_KNL, "allocating SPI failed: %s (%d)",
1093 strerror(-err->error), -err->error);
1094 break;
1095 }
1096 default:
1097 hdr = NLMSG_NEXT(hdr, len);
1098 continue;
1099 case NLMSG_DONE:
1100 break;
1101 }
1102 break;
1103 }
1104 free(out);
1105 }
1106
1107 if (received_spi == 0)
1108 {
1109 return FAILED;
1110 }
1111
1112 *spi = received_spi;
1113 return SUCCESS;
1114 }
1115
1116 METHOD(kernel_ipsec_t, get_spi, status_t,
1117 private_kernel_netlink_ipsec_t *this, host_t *src, host_t *dst,
1118 u_int8_t protocol, u_int32_t reqid, u_int32_t *spi)
1119 {
1120 DBG2(DBG_KNL, "getting SPI for reqid {%u}", reqid);
1121
1122 if (get_spi_internal(this, src, dst, protocol,
1123 0xc0000000, 0xcFFFFFFF, reqid, spi) != SUCCESS)
1124 {
1125 DBG1(DBG_KNL, "unable to get SPI for reqid {%u}", reqid);
1126 return FAILED;
1127 }
1128
1129 DBG2(DBG_KNL, "got SPI %.8x for reqid {%u}", ntohl(*spi), reqid);
1130 return SUCCESS;
1131 }
1132
1133 METHOD(kernel_ipsec_t, get_cpi, status_t,
1134 private_kernel_netlink_ipsec_t *this, host_t *src, host_t *dst,
1135 u_int32_t reqid, u_int16_t *cpi)
1136 {
1137 u_int32_t received_spi = 0;
1138
1139 DBG2(DBG_KNL, "getting CPI for reqid {%u}", reqid);
1140
1141 if (get_spi_internal(this, src, dst, IPPROTO_COMP,
1142 0x100, 0xEFFF, reqid, &received_spi) != SUCCESS)
1143 {
1144 DBG1(DBG_KNL, "unable to get CPI for reqid {%u}", reqid);
1145 return FAILED;
1146 }
1147
1148 *cpi = htons((u_int16_t)ntohl(received_spi));
1149
1150 DBG2(DBG_KNL, "got CPI %.4x for reqid {%u}", ntohs(*cpi), reqid);
1151 return SUCCESS;
1152 }
1153
1154 METHOD(kernel_ipsec_t, add_sa, status_t,
1155 private_kernel_netlink_ipsec_t *this, host_t *src, host_t *dst,
1156 u_int32_t spi, u_int8_t protocol, u_int32_t reqid, mark_t mark,
1157 u_int32_t tfc, lifetime_cfg_t *lifetime, u_int16_t enc_alg, chunk_t enc_key,
1158 u_int16_t int_alg, chunk_t int_key, ipsec_mode_t mode, u_int16_t ipcomp,
1159 u_int16_t cpi, bool encap, bool esn, bool inbound,
1160 traffic_selector_t* src_ts, traffic_selector_t* dst_ts)
1161 {
1162 netlink_buf_t request;
1163 char *alg_name;
1164 struct nlmsghdr *hdr;
1165 struct xfrm_usersa_info *sa;
1166 u_int16_t icv_size = 64;
1167 status_t status = FAILED;
1168
1169 /* if IPComp is used, we install an additional IPComp SA. if the cpi is 0
1170 * we are in the recursive call below */
1171 if (ipcomp != IPCOMP_NONE && cpi != 0)
1172 {
1173 lifetime_cfg_t lft = {{0,0,0},{0,0,0},{0,0,0}};
1174 add_sa(this, src, dst, htonl(ntohs(cpi)), IPPROTO_COMP, reqid, mark,
1175 tfc, &lft, ENCR_UNDEFINED, chunk_empty, AUTH_UNDEFINED,
1176 chunk_empty, mode, ipcomp, 0, FALSE, FALSE, inbound, NULL, NULL);
1177 ipcomp = IPCOMP_NONE;
1178 /* use transport mode ESP SA, IPComp uses tunnel mode */
1179 mode = MODE_TRANSPORT;
1180 }
1181
1182 memset(&request, 0, sizeof(request));
1183
1184 DBG2(DBG_KNL, "adding SAD entry with SPI %.8x and reqid {%u} (mark "
1185 "%u/0x%08x)", ntohl(spi), reqid, mark.value, mark.mask);
1186
1187 hdr = (struct nlmsghdr*)request;
1188 hdr->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
1189 hdr->nlmsg_type = inbound ? XFRM_MSG_UPDSA : XFRM_MSG_NEWSA;
1190 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_usersa_info));
1191
1192 sa = (struct xfrm_usersa_info*)NLMSG_DATA(hdr);
1193 host2xfrm(src, &sa->saddr);
1194 host2xfrm(dst, &sa->id.daddr);
1195 sa->id.spi = spi;
1196 sa->id.proto = protocol;
1197 sa->family = src->get_family(src);
1198 sa->mode = mode2kernel(mode);
1199 switch (mode)
1200 {
1201 case MODE_TUNNEL:
1202 sa->flags |= XFRM_STATE_AF_UNSPEC;
1203 break;
1204 case MODE_BEET:
1205 case MODE_TRANSPORT:
1206 if(src_ts && dst_ts)
1207 {
1208 sa->sel = ts2selector(src_ts, dst_ts);
1209 }
1210 break;
1211 default:
1212 break;
1213 }
1214
1215 sa->reqid = reqid;
1216 sa->lft.soft_byte_limit = XFRM_LIMIT(lifetime->bytes.rekey);
1217 sa->lft.hard_byte_limit = XFRM_LIMIT(lifetime->bytes.life);
1218 sa->lft.soft_packet_limit = XFRM_LIMIT(lifetime->packets.rekey);
1219 sa->lft.hard_packet_limit = XFRM_LIMIT(lifetime->packets.life);
1220 /* we use lifetimes since added, not since used */
1221 sa->lft.soft_add_expires_seconds = lifetime->time.rekey;
1222 sa->lft.hard_add_expires_seconds = lifetime->time.life;
1223 sa->lft.soft_use_expires_seconds = 0;
1224 sa->lft.hard_use_expires_seconds = 0;
1225
1226 struct rtattr *rthdr = XFRM_RTA(hdr, struct xfrm_usersa_info);
1227
1228 switch (enc_alg)
1229 {
1230 case ENCR_UNDEFINED:
1231 /* no encryption */
1232 break;
1233 case ENCR_AES_CCM_ICV16:
1234 case ENCR_AES_GCM_ICV16:
1235 case ENCR_NULL_AUTH_AES_GMAC:
1236 case ENCR_CAMELLIA_CCM_ICV16:
1237 icv_size += 32;
1238 /* FALL */
1239 case ENCR_AES_CCM_ICV12:
1240 case ENCR_AES_GCM_ICV12:
1241 case ENCR_CAMELLIA_CCM_ICV12:
1242 icv_size += 32;
1243 /* FALL */
1244 case ENCR_AES_CCM_ICV8:
1245 case ENCR_AES_GCM_ICV8:
1246 case ENCR_CAMELLIA_CCM_ICV8:
1247 {
1248 struct xfrm_algo_aead *algo;
1249
1250 alg_name = lookup_algorithm(ENCRYPTION_ALGORITHM, enc_alg);
1251 if (alg_name == NULL)
1252 {
1253 DBG1(DBG_KNL, "algorithm %N not supported by kernel!",
1254 encryption_algorithm_names, enc_alg);
1255 goto failed;
1256 }
1257 DBG2(DBG_KNL, " using encryption algorithm %N with key size %d",
1258 encryption_algorithm_names, enc_alg, enc_key.len * 8);
1259
1260 rthdr->rta_type = XFRMA_ALG_AEAD;
1261 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_algo_aead) +
1262 enc_key.len);
1263 hdr->nlmsg_len = NLMSG_ALIGN(hdr->nlmsg_len) +
1264 RTA_ALIGN(rthdr->rta_len);
1265 if (hdr->nlmsg_len > sizeof(request))
1266 {
1267 goto failed;
1268 }
1269
1270 algo = (struct xfrm_algo_aead*)RTA_DATA(rthdr);
1271 algo->alg_key_len = enc_key.len * 8;
1272 algo->alg_icv_len = icv_size;
1273 strncpy(algo->alg_name, alg_name, sizeof(algo->alg_name));
1274 algo->alg_name[sizeof(algo->alg_name) - 1] = '\0';
1275 memcpy(algo->alg_key, enc_key.ptr, enc_key.len);
1276
1277 rthdr = XFRM_RTA_NEXT(rthdr);
1278 break;
1279 }
1280 default:
1281 {
1282 struct xfrm_algo *algo;
1283
1284 alg_name = lookup_algorithm(ENCRYPTION_ALGORITHM, enc_alg);
1285 if (alg_name == NULL)
1286 {
1287 DBG1(DBG_KNL, "algorithm %N not supported by kernel!",
1288 encryption_algorithm_names, enc_alg);
1289 goto failed;
1290 }
1291 DBG2(DBG_KNL, " using encryption algorithm %N with key size %d",
1292 encryption_algorithm_names, enc_alg, enc_key.len * 8);
1293
1294 rthdr->rta_type = XFRMA_ALG_CRYPT;
1295 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_algo) + enc_key.len);
1296 hdr->nlmsg_len = NLMSG_ALIGN(hdr->nlmsg_len) +
1297 RTA_ALIGN(rthdr->rta_len);
1298 if (hdr->nlmsg_len > sizeof(request))
1299 {
1300 goto failed;
1301 }
1302
1303 algo = (struct xfrm_algo*)RTA_DATA(rthdr);
1304 algo->alg_key_len = enc_key.len * 8;
1305 strncpy(algo->alg_name, alg_name, sizeof(algo->alg_name));
1306 algo->alg_name[sizeof(algo->alg_name) - 1] = '\0';
1307 memcpy(algo->alg_key, enc_key.ptr, enc_key.len);
1308
1309 rthdr = XFRM_RTA_NEXT(rthdr);
1310 }
1311 }
1312
1313 if (int_alg != AUTH_UNDEFINED)
1314 {
1315 u_int trunc_len = 0;
1316
1317 alg_name = lookup_algorithm(INTEGRITY_ALGORITHM, int_alg);
1318 if (alg_name == NULL)
1319 {
1320 DBG1(DBG_KNL, "algorithm %N not supported by kernel!",
1321 integrity_algorithm_names, int_alg);
1322 goto failed;
1323 }
1324 DBG2(DBG_KNL, " using integrity algorithm %N with key size %d",
1325 integrity_algorithm_names, int_alg, int_key.len * 8);
1326
1327 switch (int_alg)
1328 {
1329 case AUTH_HMAC_MD5_128:
1330 case AUTH_HMAC_SHA2_256_128:
1331 trunc_len = 128;
1332 break;
1333 case AUTH_HMAC_SHA1_160:
1334 trunc_len = 160;
1335 break;
1336 default:
1337 break;
1338 }
1339
1340 if (trunc_len)
1341 {
1342 struct xfrm_algo_auth* algo;
1343
1344 /* the kernel uses SHA256 with 96 bit truncation by default,
1345 * use specified truncation size supported by newer kernels.
1346 * also use this for untruncated MD5 and SHA1. */
1347 rthdr->rta_type = XFRMA_ALG_AUTH_TRUNC;
1348 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_algo_auth) +
1349 int_key.len);
1350 hdr->nlmsg_len = NLMSG_ALIGN(hdr->nlmsg_len) +
1351 RTA_ALIGN(rthdr->rta_len);
1352 if (hdr->nlmsg_len > sizeof(request))
1353 {
1354 goto failed;
1355 }
1356
1357 algo = (struct xfrm_algo_auth*)RTA_DATA(rthdr);
1358 algo->alg_key_len = int_key.len * 8;
1359 algo->alg_trunc_len = trunc_len;
1360 strncpy(algo->alg_name, alg_name, sizeof(algo->alg_name));
1361 algo->alg_name[sizeof(algo->alg_name) - 1] = '\0';
1362 memcpy(algo->alg_key, int_key.ptr, int_key.len);
1363 }
1364 else
1365 {
1366 struct xfrm_algo* algo;
1367
1368 rthdr->rta_type = XFRMA_ALG_AUTH;
1369 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_algo) + int_key.len);
1370 hdr->nlmsg_len = NLMSG_ALIGN(hdr->nlmsg_len) +
1371 RTA_ALIGN(rthdr->rta_len);
1372 if (hdr->nlmsg_len > sizeof(request))
1373 {
1374 goto failed;
1375 }
1376
1377 algo = (struct xfrm_algo*)RTA_DATA(rthdr);
1378 algo->alg_key_len = int_key.len * 8;
1379 strncpy(algo->alg_name, alg_name, sizeof(algo->alg_name));
1380 algo->alg_name[sizeof(algo->alg_name) - 1] = '\0';
1381 memcpy(algo->alg_key, int_key.ptr, int_key.len);
1382 }
1383 rthdr = XFRM_RTA_NEXT(rthdr);
1384 }
1385
1386 if (ipcomp != IPCOMP_NONE)
1387 {
1388 rthdr->rta_type = XFRMA_ALG_COMP;
1389 alg_name = lookup_algorithm(COMPRESSION_ALGORITHM, ipcomp);
1390 if (alg_name == NULL)
1391 {
1392 DBG1(DBG_KNL, "algorithm %N not supported by kernel!",
1393 ipcomp_transform_names, ipcomp);
1394 goto failed;
1395 }
1396 DBG2(DBG_KNL, " using compression algorithm %N",
1397 ipcomp_transform_names, ipcomp);
1398
1399 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_algo));
1400 hdr->nlmsg_len = NLMSG_ALIGN(hdr->nlmsg_len) + RTA_ALIGN(rthdr->rta_len);
1401 if (hdr->nlmsg_len > sizeof(request))
1402 {
1403 goto failed;
1404 }
1405
1406 struct xfrm_algo* algo = (struct xfrm_algo*)RTA_DATA(rthdr);
1407 algo->alg_key_len = 0;
1408 strncpy(algo->alg_name, alg_name, sizeof(algo->alg_name));
1409 algo->alg_name[sizeof(algo->alg_name) - 1] = '\0';
1410
1411 rthdr = XFRM_RTA_NEXT(rthdr);
1412 }
1413
1414 if (encap)
1415 {
1416 struct xfrm_encap_tmpl *tmpl;
1417
1418 rthdr->rta_type = XFRMA_ENCAP;
1419 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_encap_tmpl));
1420 hdr->nlmsg_len = NLMSG_ALIGN(hdr->nlmsg_len) + RTA_ALIGN(rthdr->rta_len);
1421 if (hdr->nlmsg_len > sizeof(request))
1422 {
1423 goto failed;
1424 }
1425
1426 tmpl = (struct xfrm_encap_tmpl*)RTA_DATA(rthdr);
1427 tmpl->encap_type = UDP_ENCAP_ESPINUDP;
1428 tmpl->encap_sport = htons(src->get_port(src));
1429 tmpl->encap_dport = htons(dst->get_port(dst));
1430 memset(&tmpl->encap_oa, 0, sizeof (xfrm_address_t));
1431 /* encap_oa could probably be derived from the
1432 * traffic selectors [rfc4306, p39]. In the netlink kernel
1433 * implementation pluto does the same as we do here but it uses
1434 * encap_oa in the pfkey implementation.
1435 * BUT as /usr/src/linux/net/key/af_key.c indicates the kernel ignores
1436 * it anyway
1437 * -> does that mean that NAT-T encap doesn't work in transport mode?
1438 * No. The reason the kernel ignores NAT-OA is that it recomputes
1439 * (or, rather, just ignores) the checksum. If packets pass the IPsec
1440 * checks it marks them "checksum ok" so OA isn't needed. */
1441 rthdr = XFRM_RTA_NEXT(rthdr);
1442 }
1443
1444 if (mark.value)
1445 {
1446 struct xfrm_mark *mrk;
1447
1448 rthdr->rta_type = XFRMA_MARK;
1449 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_mark));
1450
1451 hdr->nlmsg_len = NLMSG_ALIGN(hdr->nlmsg_len) + RTA_ALIGN(rthdr->rta_len);
1452 if (hdr->nlmsg_len > sizeof(request))
1453 {
1454 goto failed;
1455 }
1456
1457 mrk = (struct xfrm_mark*)RTA_DATA(rthdr);
1458 mrk->v = mark.value;
1459 mrk->m = mark.mask;
1460 rthdr = XFRM_RTA_NEXT(rthdr);
1461 }
1462
1463 if (tfc)
1464 {
1465 u_int32_t *tfcpad;
1466
1467 rthdr->rta_type = XFRMA_TFCPAD;
1468 rthdr->rta_len = RTA_LENGTH(sizeof(u_int32_t));
1469 hdr->nlmsg_len = NLMSG_ALIGN(hdr->nlmsg_len) + RTA_ALIGN(rthdr->rta_len);
1470 if (hdr->nlmsg_len > sizeof(request))
1471 {
1472 goto failed;
1473 }
1474
1475 tfcpad = (u_int32_t*)RTA_DATA(rthdr);
1476 *tfcpad = tfc;
1477 rthdr = XFRM_RTA_NEXT(rthdr);
1478 }
1479
1480 if (protocol != IPPROTO_COMP)
1481 {
1482 if (esn || this->replay_window > DEFAULT_REPLAY_WINDOW)
1483 {
1484 /* for ESN or larger replay windows we need the new
1485 * XFRMA_REPLAY_ESN_VAL attribute to configure a bitmap */
1486 struct xfrm_replay_state_esn *replay;
1487
1488 rthdr->rta_type = XFRMA_REPLAY_ESN_VAL;
1489 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_replay_state_esn) +
1490 (this->replay_window + 7) / 8);
1491 hdr->nlmsg_len = NLMSG_ALIGN(hdr->nlmsg_len) +
1492 RTA_ALIGN(rthdr->rta_len);
1493 if (hdr->nlmsg_len > sizeof(request))
1494 {
1495 goto failed;
1496 }
1497
1498 replay = (struct xfrm_replay_state_esn*)RTA_DATA(rthdr);
1499 /* bmp_len contains number uf __u32's */
1500 replay->bmp_len = this->replay_bmp;
1501 replay->replay_window = this->replay_window;
1502 DBG2(DBG_KNL, " using replay window of %u packets",
1503 this->replay_window);
1504
1505 rthdr = XFRM_RTA_NEXT(rthdr);
1506 if (esn)
1507 {
1508 DBG2(DBG_KNL, " using extended sequence numbers (ESN)");
1509 sa->flags |= XFRM_STATE_ESN;
1510 }
1511 }
1512 else
1513 {
1514 DBG2(DBG_KNL, " using replay window of %u packets",
1515 this->replay_window);
1516 sa->replay_window = this->replay_window;
1517 }
1518 }
1519
1520 if (this->socket_xfrm->send_ack(this->socket_xfrm, hdr) != SUCCESS)
1521 {
1522 if (mark.value)
1523 {
1524 DBG1(DBG_KNL, "unable to add SAD entry with SPI %.8x "
1525 "(mark %u/0x%08x)", ntohl(spi), mark.value, mark.mask);
1526 }
1527 else
1528 {
1529 DBG1(DBG_KNL, "unable to add SAD entry with SPI %.8x", ntohl(spi));
1530 }
1531 goto failed;
1532 }
1533
1534 status = SUCCESS;
1535
1536 failed:
1537 memwipe(request, sizeof(request));
1538 return status;
1539 }
1540
1541 /**
1542 * Get the ESN replay state (i.e. sequence numbers) of an SA.
1543 *
1544 * Allocates into one the replay state structure we get from the kernel.
1545 */
1546 static void get_replay_state(private_kernel_netlink_ipsec_t *this,
1547 u_int32_t spi, u_int8_t protocol,
1548 host_t *dst, mark_t mark,
1549 struct xfrm_replay_state_esn **replay_esn,
1550 struct xfrm_replay_state **replay)
1551 {
1552 netlink_buf_t request;
1553 struct nlmsghdr *hdr, *out = NULL;
1554 struct xfrm_aevent_id *out_aevent = NULL, *aevent_id;
1555 size_t len;
1556 struct rtattr *rta;
1557 size_t rtasize;
1558
1559 memset(&request, 0, sizeof(request));
1560
1561 DBG2(DBG_KNL, "querying replay state from SAD entry with SPI %.8x",
1562 ntohl(spi));
1563
1564 hdr = (struct nlmsghdr*)request;
1565 hdr->nlmsg_flags = NLM_F_REQUEST;
1566 hdr->nlmsg_type = XFRM_MSG_GETAE;
1567 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_aevent_id));
1568
1569 aevent_id = (struct xfrm_aevent_id*)NLMSG_DATA(hdr);
1570 aevent_id->flags = XFRM_AE_RVAL;
1571
1572 host2xfrm(dst, &aevent_id->sa_id.daddr);
1573 aevent_id->sa_id.spi = spi;
1574 aevent_id->sa_id.proto = protocol;
1575 aevent_id->sa_id.family = dst->get_family(dst);
1576
1577 if (mark.value)
1578 {
1579 struct xfrm_mark *mrk;
1580 struct rtattr *rthdr = XFRM_RTA(hdr, struct xfrm_aevent_id);
1581
1582 rthdr->rta_type = XFRMA_MARK;
1583 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_mark));
1584 hdr->nlmsg_len = NLMSG_ALIGN(hdr->nlmsg_len) + RTA_ALIGN(rthdr->rta_len);
1585 if (hdr->nlmsg_len > sizeof(request))
1586 {
1587 return;
1588 }
1589
1590 mrk = (struct xfrm_mark*)RTA_DATA(rthdr);
1591 mrk->v = mark.value;
1592 mrk->m = mark.mask;
1593 }
1594
1595 if (this->socket_xfrm->send(this->socket_xfrm, hdr, &out, &len) == SUCCESS)
1596 {
1597 hdr = out;
1598 while (NLMSG_OK(hdr, len))
1599 {
1600 switch (hdr->nlmsg_type)
1601 {
1602 case XFRM_MSG_NEWAE:
1603 {
1604 out_aevent = NLMSG_DATA(hdr);
1605 break;
1606 }
1607 case NLMSG_ERROR:
1608 {
1609 struct nlmsgerr *err = NLMSG_DATA(hdr);
1610 DBG1(DBG_KNL, "querying replay state from SAD entry "
1611 "failed: %s (%d)", strerror(-err->error),
1612 -err->error);
1613 break;
1614 }
1615 default:
1616 hdr = NLMSG_NEXT(hdr, len);
1617 continue;
1618 case NLMSG_DONE:
1619 break;
1620 }
1621 break;
1622 }
1623 }
1624
1625 if (out_aevent)
1626 {
1627 rta = XFRM_RTA(out, struct xfrm_aevent_id);
1628 rtasize = XFRM_PAYLOAD(out, struct xfrm_aevent_id);
1629 while (RTA_OK(rta, rtasize))
1630 {
1631 if (rta->rta_type == XFRMA_REPLAY_VAL &&
1632 RTA_PAYLOAD(rta) == sizeof(**replay))
1633 {
1634 *replay = malloc(RTA_PAYLOAD(rta));
1635 memcpy(*replay, RTA_DATA(rta), RTA_PAYLOAD(rta));
1636 break;
1637 }
1638 if (rta->rta_type == XFRMA_REPLAY_ESN_VAL &&
1639 RTA_PAYLOAD(rta) >= sizeof(**replay_esn) + this->replay_bmp)
1640 {
1641 *replay_esn = malloc(RTA_PAYLOAD(rta));
1642 memcpy(*replay_esn, RTA_DATA(rta), RTA_PAYLOAD(rta));
1643 break;
1644 }
1645 rta = RTA_NEXT(rta, rtasize);
1646 }
1647 }
1648 free(out);
1649 }
1650
1651 METHOD(kernel_ipsec_t, query_sa, status_t,
1652 private_kernel_netlink_ipsec_t *this, host_t *src, host_t *dst,
1653 u_int32_t spi, u_int8_t protocol, mark_t mark, u_int64_t *bytes)
1654 {
1655 netlink_buf_t request;
1656 struct nlmsghdr *out = NULL, *hdr;
1657 struct xfrm_usersa_id *sa_id;
1658 struct xfrm_usersa_info *sa = NULL;
1659 status_t status = FAILED;
1660 size_t len;
1661
1662 memset(&request, 0, sizeof(request));
1663
1664 DBG2(DBG_KNL, "querying SAD entry with SPI %.8x (mark %u/0x%08x)",
1665 ntohl(spi), mark.value, mark.mask);
1666
1667 hdr = (struct nlmsghdr*)request;
1668 hdr->nlmsg_flags = NLM_F_REQUEST;
1669 hdr->nlmsg_type = XFRM_MSG_GETSA;
1670 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_usersa_id));
1671
1672 sa_id = (struct xfrm_usersa_id*)NLMSG_DATA(hdr);
1673 host2xfrm(dst, &sa_id->daddr);
1674 sa_id->spi = spi;
1675 sa_id->proto = protocol;
1676 sa_id->family = dst->get_family(dst);
1677
1678 if (mark.value)
1679 {
1680 struct xfrm_mark *mrk;
1681 struct rtattr *rthdr = XFRM_RTA(hdr, struct xfrm_usersa_id);
1682
1683 rthdr->rta_type = XFRMA_MARK;
1684 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_mark));
1685 hdr->nlmsg_len = NLMSG_ALIGN(hdr->nlmsg_len) + RTA_ALIGN(rthdr->rta_len);
1686 if (hdr->nlmsg_len > sizeof(request))
1687 {
1688 return FAILED;
1689 }
1690
1691 mrk = (struct xfrm_mark*)RTA_DATA(rthdr);
1692 mrk->v = mark.value;
1693 mrk->m = mark.mask;
1694 }
1695
1696 if (this->socket_xfrm->send(this->socket_xfrm, hdr, &out, &len) == SUCCESS)
1697 {
1698 hdr = out;
1699 while (NLMSG_OK(hdr, len))
1700 {
1701 switch (hdr->nlmsg_type)
1702 {
1703 case XFRM_MSG_NEWSA:
1704 {
1705 sa = (struct xfrm_usersa_info*)NLMSG_DATA(hdr);
1706 break;
1707 }
1708 case NLMSG_ERROR:
1709 {
1710 struct nlmsgerr *err = NLMSG_DATA(hdr);
1711
1712 if (mark.value)
1713 {
1714 DBG1(DBG_KNL, "querying SAD entry with SPI %.8x "
1715 "(mark %u/0x%08x) failed: %s (%d)",
1716 ntohl(spi), mark.value, mark.mask,
1717 strerror(-err->error), -err->error);
1718 }
1719 else
1720 {
1721 DBG1(DBG_KNL, "querying SAD entry with SPI %.8x "
1722 "failed: %s (%d)", ntohl(spi),
1723 strerror(-err->error), -err->error);
1724 }
1725 break;
1726 }
1727 default:
1728 hdr = NLMSG_NEXT(hdr, len);
1729 continue;
1730 case NLMSG_DONE:
1731 break;
1732 }
1733 break;
1734 }
1735 }
1736
1737 if (sa == NULL)
1738 {
1739 DBG2(DBG_KNL, "unable to query SAD entry with SPI %.8x", ntohl(spi));
1740 }
1741 else
1742 {
1743 *bytes = sa->curlft.bytes;
1744 status = SUCCESS;
1745 }
1746 memwipe(out, len);
1747 free(out);
1748 return status;
1749 }
1750
1751 METHOD(kernel_ipsec_t, del_sa, status_t,
1752 private_kernel_netlink_ipsec_t *this, host_t *src, host_t *dst,
1753 u_int32_t spi, u_int8_t protocol, u_int16_t cpi, mark_t mark)
1754 {
1755 netlink_buf_t request;
1756 struct nlmsghdr *hdr;
1757 struct xfrm_usersa_id *sa_id;
1758
1759 /* if IPComp was used, we first delete the additional IPComp SA */
1760 if (cpi)
1761 {
1762 del_sa(this, src, dst, htonl(ntohs(cpi)), IPPROTO_COMP, 0, mark);
1763 }
1764
1765 memset(&request, 0, sizeof(request));
1766
1767 DBG2(DBG_KNL, "deleting SAD entry with SPI %.8x (mark %u/0x%08x)",
1768 ntohl(spi), mark.value, mark.mask);
1769
1770 hdr = (struct nlmsghdr*)request;
1771 hdr->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
1772 hdr->nlmsg_type = XFRM_MSG_DELSA;
1773 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_usersa_id));
1774
1775 sa_id = (struct xfrm_usersa_id*)NLMSG_DATA(hdr);
1776 host2xfrm(dst, &sa_id->daddr);
1777 sa_id->spi = spi;
1778 sa_id->proto = protocol;
1779 sa_id->family = dst->get_family(dst);
1780
1781 if (mark.value)
1782 {
1783 struct xfrm_mark *mrk;
1784 struct rtattr *rthdr = XFRM_RTA(hdr, struct xfrm_usersa_id);
1785
1786 rthdr->rta_type = XFRMA_MARK;
1787 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_mark));
1788 hdr->nlmsg_len = NLMSG_ALIGN(hdr->nlmsg_len) + RTA_ALIGN(rthdr->rta_len);
1789 if (hdr->nlmsg_len > sizeof(request))
1790 {
1791 return FAILED;
1792 }
1793
1794 mrk = (struct xfrm_mark*)RTA_DATA(rthdr);
1795 mrk->v = mark.value;
1796 mrk->m = mark.mask;
1797 }
1798
1799 switch (this->socket_xfrm->send_ack(this->socket_xfrm, hdr))
1800 {
1801 case SUCCESS:
1802 DBG2(DBG_KNL, "deleted SAD entry with SPI %.8x (mark %u/0x%08x)",
1803 ntohl(spi), mark.value, mark.mask);
1804 return SUCCESS;
1805 case NOT_FOUND:
1806 return NOT_FOUND;
1807 default:
1808 if (mark.value)
1809 {
1810 DBG1(DBG_KNL, "unable to delete SAD entry with SPI %.8x "
1811 "(mark %u/0x%08x)", ntohl(spi), mark.value, mark.mask);
1812 }
1813 else
1814 {
1815 DBG1(DBG_KNL, "unable to delete SAD entry with SPI %.8x",
1816 ntohl(spi));
1817 }
1818 return FAILED;
1819 }
1820 }
1821
1822 METHOD(kernel_ipsec_t, update_sa, status_t,
1823 private_kernel_netlink_ipsec_t *this, u_int32_t spi, u_int8_t protocol,
1824 u_int16_t cpi, host_t *src, host_t *dst, host_t *new_src, host_t *new_dst,
1825 bool old_encap, bool new_encap, mark_t mark)
1826 {
1827 netlink_buf_t request;
1828 u_char *pos;
1829 struct nlmsghdr *hdr, *out = NULL;
1830 struct xfrm_usersa_id *sa_id;
1831 struct xfrm_usersa_info *out_sa = NULL, *sa;
1832 size_t len, newlen;
1833 struct rtattr *rta;
1834 size_t rtasize;
1835 struct xfrm_encap_tmpl* tmpl = NULL;
1836 struct xfrm_replay_state *replay = NULL;
1837 struct xfrm_replay_state_esn *replay_esn = NULL;
1838 status_t status = FAILED;
1839
1840 /* if IPComp is used, we first update the IPComp SA */
1841 if (cpi)
1842 {
1843 update_sa(this, htonl(ntohs(cpi)), IPPROTO_COMP, 0,
1844 src, dst, new_src, new_dst, FALSE, FALSE, mark);
1845 }
1846
1847 memset(&request, 0, sizeof(request));
1848
1849 DBG2(DBG_KNL, "querying SAD entry with SPI %.8x for update", ntohl(spi));
1850
1851 /* query the existing SA first */
1852 hdr = (struct nlmsghdr*)request;
1853 hdr->nlmsg_flags = NLM_F_REQUEST;
1854 hdr->nlmsg_type = XFRM_MSG_GETSA;
1855 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_usersa_id));
1856
1857 sa_id = (struct xfrm_usersa_id*)NLMSG_DATA(hdr);
1858 host2xfrm(dst, &sa_id->daddr);
1859 sa_id->spi = spi;
1860 sa_id->proto = protocol;
1861 sa_id->family = dst->get_family(dst);
1862
1863 if (mark.value)
1864 {
1865 struct xfrm_mark *mrk;
1866 struct rtattr *rthdr = XFRM_RTA(hdr, struct xfrm_usersa_id);
1867
1868 rthdr->rta_type = XFRMA_MARK;
1869 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_mark));
1870 hdr->nlmsg_len = NLMSG_ALIGN(hdr->nlmsg_len) + RTA_ALIGN(rthdr->rta_len);
1871 if (hdr->nlmsg_len > sizeof(request))
1872 {
1873 return FAILED;
1874 }
1875
1876 mrk = (struct xfrm_mark*)RTA_DATA(rthdr);
1877 mrk->v = mark.value;
1878 mrk->m = mark.mask;
1879 }
1880
1881 if (this->socket_xfrm->send(this->socket_xfrm, hdr, &out, &len) == SUCCESS)
1882 {
1883 hdr = out;
1884 while (NLMSG_OK(hdr, len))
1885 {
1886 switch (hdr->nlmsg_type)
1887 {
1888 case XFRM_MSG_NEWSA:
1889 {
1890 out_sa = NLMSG_DATA(hdr);
1891 break;
1892 }
1893 case NLMSG_ERROR:
1894 {
1895 struct nlmsgerr *err = NLMSG_DATA(hdr);
1896 DBG1(DBG_KNL, "querying SAD entry failed: %s (%d)",
1897 strerror(-err->error), -err->error);
1898 break;
1899 }
1900 default:
1901 hdr = NLMSG_NEXT(hdr, len);
1902 continue;
1903 case NLMSG_DONE:
1904 break;
1905 }
1906 break;
1907 }
1908 }
1909 if (out_sa == NULL)
1910 {
1911 DBG1(DBG_KNL, "unable to update SAD entry with SPI %.8x", ntohl(spi));
1912 goto failed;
1913 }
1914
1915 get_replay_state(this, spi, protocol, dst, mark, &replay_esn, &replay);
1916
1917 /* delete the old SA (without affecting the IPComp SA) */
1918 if (del_sa(this, src, dst, spi, protocol, 0, mark) != SUCCESS)
1919 {
1920 DBG1(DBG_KNL, "unable to delete old SAD entry with SPI %.8x",
1921 ntohl(spi));
1922 goto failed;
1923 }
1924
1925 DBG2(DBG_KNL, "updating SAD entry with SPI %.8x from %#H..%#H to %#H..%#H",
1926 ntohl(spi), src, dst, new_src, new_dst);
1927 /* copy over the SA from out to request */
1928 hdr = (struct nlmsghdr*)request;
1929 memcpy(hdr, out, min(out->nlmsg_len, sizeof(request)));
1930 hdr->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
1931 hdr->nlmsg_type = XFRM_MSG_NEWSA;
1932 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_usersa_info));
1933 sa = NLMSG_DATA(hdr);
1934 sa->family = new_dst->get_family(new_dst);
1935
1936 if (!src->ip_equals(src, new_src))
1937 {
1938 host2xfrm(new_src, &sa->saddr);
1939 }
1940 if (!dst->ip_equals(dst, new_dst))
1941 {
1942 host2xfrm(new_dst, &sa->id.daddr);
1943 }
1944
1945 rta = XFRM_RTA(out, struct xfrm_usersa_info);
1946 rtasize = XFRM_PAYLOAD(out, struct xfrm_usersa_info);
1947 pos = (u_char*)XFRM_RTA(hdr, struct xfrm_usersa_info);
1948 while(RTA_OK(rta, rtasize))
1949 {
1950 /* copy all attributes, but not XFRMA_ENCAP if we are disabling it */
1951 if (rta->rta_type != XFRMA_ENCAP || new_encap)
1952 {
1953 if (rta->rta_type == XFRMA_ENCAP)
1954 { /* update encap tmpl */
1955 tmpl = (struct xfrm_encap_tmpl*)RTA_DATA(rta);
1956 tmpl->encap_sport = ntohs(new_src->get_port(new_src));
1957 tmpl->encap_dport = ntohs(new_dst->get_port(new_dst));
1958 }
1959 memcpy(pos, rta, rta->rta_len);
1960 newlen = NLMSG_ALIGN(hdr->nlmsg_len) + RTA_ALIGN(rta->rta_len);
1961 pos += newlen - hdr->nlmsg_len;
1962 hdr->nlmsg_len = newlen;
1963 }
1964 rta = RTA_NEXT(rta, rtasize);
1965 }
1966
1967 rta = (struct rtattr*)pos;
1968 if (tmpl == NULL && new_encap)
1969 { /* add tmpl if we are enabling it */
1970 rta->rta_type = XFRMA_ENCAP;
1971 rta->rta_len = RTA_LENGTH(sizeof(struct xfrm_encap_tmpl));
1972 hdr->nlmsg_len = NLMSG_ALIGN(hdr->nlmsg_len) + RTA_ALIGN(rta->rta_len);
1973 if (hdr->nlmsg_len > sizeof(request))
1974 {
1975 goto failed;
1976 }
1977
1978 tmpl = (struct xfrm_encap_tmpl*)RTA_DATA(rta);
1979 tmpl->encap_type = UDP_ENCAP_ESPINUDP;
1980 tmpl->encap_sport = ntohs(new_src->get_port(new_src));
1981 tmpl->encap_dport = ntohs(new_dst->get_port(new_dst));
1982 memset(&tmpl->encap_oa, 0, sizeof (xfrm_address_t));
1983
1984 rta = XFRM_RTA_NEXT(rta);
1985 }
1986
1987 if (replay_esn)
1988 {
1989 rta->rta_type = XFRMA_REPLAY_ESN_VAL;
1990 rta->rta_len = RTA_LENGTH(sizeof(struct xfrm_replay_state_esn) +
1991 this->replay_bmp);
1992 hdr->nlmsg_len = NLMSG_ALIGN(hdr->nlmsg_len) + RTA_ALIGN(rta->rta_len);
1993 if (hdr->nlmsg_len > sizeof(request))
1994 {
1995 goto failed;
1996 }
1997 memcpy(RTA_DATA(rta), replay_esn,
1998 sizeof(struct xfrm_replay_state_esn) + this->replay_bmp);
1999
2000 rta = XFRM_RTA_NEXT(rta);
2001 }
2002 else if (replay)
2003 {
2004 rta->rta_type = XFRMA_REPLAY_VAL;
2005 rta->rta_len = RTA_LENGTH(sizeof(struct xfrm_replay_state));
2006 hdr->nlmsg_len = NLMSG_ALIGN(hdr->nlmsg_len) + RTA_ALIGN(rta->rta_len);
2007 if (hdr->nlmsg_len > sizeof(request))
2008 {
2009 goto failed;
2010 }
2011 memcpy(RTA_DATA(rta), replay, sizeof(struct xfrm_replay_state));
2012
2013 rta = XFRM_RTA_NEXT(rta);
2014 }
2015 else
2016 {
2017 DBG1(DBG_KNL, "unable to copy replay state from old SAD entry "
2018 "with SPI %.8x", ntohl(spi));
2019 }
2020
2021 if (this->socket_xfrm->send_ack(this->socket_xfrm, hdr) != SUCCESS)
2022 {
2023 DBG1(DBG_KNL, "unable to update SAD entry with SPI %.8x", ntohl(spi));
2024 goto failed;
2025 }
2026
2027 status = SUCCESS;
2028 failed:
2029 free(replay);
2030 free(replay_esn);
2031 memwipe(out, len);
2032 memwipe(request, sizeof(request));
2033 free(out);
2034
2035 return status;
2036 }
2037
2038 METHOD(kernel_ipsec_t, flush_sas, status_t,
2039 private_kernel_netlink_ipsec_t *this)
2040 {
2041 netlink_buf_t request;
2042 struct nlmsghdr *hdr;
2043 struct xfrm_usersa_flush *flush;
2044
2045 memset(&request, 0, sizeof(request));
2046
2047 DBG2(DBG_KNL, "flushing all SAD entries");
2048
2049 hdr = (struct nlmsghdr*)request;
2050 hdr->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
2051 hdr->nlmsg_type = XFRM_MSG_FLUSHSA;
2052 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_usersa_flush));
2053
2054 flush = (struct xfrm_usersa_flush*)NLMSG_DATA(hdr);
2055 flush->proto = IPSEC_PROTO_ANY;
2056
2057 if (this->socket_xfrm->send_ack(this->socket_xfrm, hdr) != SUCCESS)
2058 {
2059 DBG1(DBG_KNL, "unable to flush SAD entries");
2060 return FAILED;
2061 }
2062 return SUCCESS;
2063 }
2064
2065 /**
2066 * Add or update a policy in the kernel.
2067 *
2068 * Note: The mutex has to be locked when entering this function
2069 * and is unlocked here in any case.
2070 */
2071 static status_t add_policy_internal(private_kernel_netlink_ipsec_t *this,
2072 policy_entry_t *policy, policy_sa_t *mapping, bool update)
2073 {
2074 netlink_buf_t request;
2075 policy_entry_t clone;
2076 ipsec_sa_t *ipsec = mapping->sa;
2077 struct xfrm_userpolicy_info *policy_info;
2078 struct nlmsghdr *hdr;
2079 int i;
2080
2081 /* clone the policy so we are able to check it out again later */
2082 memcpy(&clone, policy, sizeof(policy_entry_t));
2083
2084 memset(&request, 0, sizeof(request));
2085 hdr = (struct nlmsghdr*)request;
2086 hdr->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
2087 hdr->nlmsg_type = update ? XFRM_MSG_UPDPOLICY : XFRM_MSG_NEWPOLICY;
2088 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_userpolicy_info));
2089
2090 policy_info = (struct xfrm_userpolicy_info*)NLMSG_DATA(hdr);
2091 policy_info->sel = policy->sel;
2092 policy_info->dir = policy->direction;
2093
2094 /* calculate priority based on selector size, small size = high prio */
2095 policy_info->priority = mapping->priority;
2096 policy_info->action = mapping->type != POLICY_DROP ? XFRM_POLICY_ALLOW
2097 : XFRM_POLICY_BLOCK;
2098 policy_info->share = XFRM_SHARE_ANY;
2099
2100 /* policies don't expire */
2101 policy_info->lft.soft_byte_limit = XFRM_INF;
2102 policy_info->lft.soft_packet_limit = XFRM_INF;
2103 policy_info->lft.hard_byte_limit = XFRM_INF;
2104 policy_info->lft.hard_packet_limit = XFRM_INF;
2105 policy_info->lft.soft_add_expires_seconds = 0;
2106 policy_info->lft.hard_add_expires_seconds = 0;
2107 policy_info->lft.soft_use_expires_seconds = 0;
2108 policy_info->lft.hard_use_expires_seconds = 0;
2109
2110 struct rtattr *rthdr = XFRM_RTA(hdr, struct xfrm_userpolicy_info);
2111
2112 if (mapping->type == POLICY_IPSEC)
2113 {
2114 struct xfrm_user_tmpl *tmpl = (struct xfrm_user_tmpl*)RTA_DATA(rthdr);
2115 struct {
2116 u_int8_t proto;
2117 bool use;
2118 } protos[] = {
2119 { IPPROTO_COMP, ipsec->cfg.ipcomp.transform != IPCOMP_NONE },
2120 { IPPROTO_ESP, ipsec->cfg.esp.use },
2121 { IPPROTO_AH, ipsec->cfg.ah.use },
2122 };
2123 ipsec_mode_t proto_mode = ipsec->cfg.mode;
2124
2125 rthdr->rta_type = XFRMA_TMPL;
2126 rthdr->rta_len = 0; /* actual length is set below */
2127
2128 for (i = 0; i < countof(protos); i++)
2129 {
2130 if (!protos[i].use)
2131 {
2132 continue;
2133 }
2134
2135 rthdr->rta_len += RTA_LENGTH(sizeof(struct xfrm_user_tmpl));
2136 hdr->nlmsg_len = NLMSG_ALIGN(hdr->nlmsg_len) +
2137 RTA_ALIGN(RTA_LENGTH(sizeof(struct xfrm_user_tmpl)));
2138 if (hdr->nlmsg_len > sizeof(request))
2139 {
2140 this->mutex->unlock(this->mutex);
2141 return FAILED;
2142 }
2143
2144 tmpl->reqid = ipsec->cfg.reqid;
2145 tmpl->id.proto = protos[i].proto;
2146 tmpl->aalgos = tmpl->ealgos = tmpl->calgos = ~0;
2147 tmpl->mode = mode2kernel(proto_mode);
2148 tmpl->optional = protos[i].proto == IPPROTO_COMP &&
2149 policy->direction != POLICY_OUT;
2150 tmpl->family = ipsec->src->get_family(ipsec->src);
2151
2152 if (proto_mode == MODE_TUNNEL)
2153 { /* only for tunnel mode */
2154 host2xfrm(ipsec->src, &tmpl->saddr);
2155 host2xfrm(ipsec->dst, &tmpl->id.daddr);
2156 }
2157
2158 tmpl++;
2159
2160 /* use transport mode for other SAs */
2161 proto_mode = MODE_TRANSPORT;
2162 }
2163
2164 rthdr = XFRM_RTA_NEXT(rthdr);
2165 }
2166
2167 if (ipsec->mark.value)
2168 {
2169 struct xfrm_mark *mrk;
2170
2171 rthdr->rta_type = XFRMA_MARK;
2172 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_mark));
2173
2174 hdr->nlmsg_len = NLMSG_ALIGN(hdr->nlmsg_len) + RTA_ALIGN(rthdr->rta_len);
2175 if (hdr->nlmsg_len > sizeof(request))
2176 {
2177 this->mutex->unlock(this->mutex);
2178 return FAILED;
2179 }
2180
2181 mrk = (struct xfrm_mark*)RTA_DATA(rthdr);
2182 mrk->v = ipsec->mark.value;
2183 mrk->m = ipsec->mark.mask;
2184 }
2185 this->mutex->unlock(this->mutex);
2186
2187 if (this->socket_xfrm->send_ack(this->socket_xfrm, hdr) != SUCCESS)
2188 {
2189 return FAILED;
2190 }
2191
2192 /* find the policy again */
2193 this->mutex->lock(this->mutex);
2194 policy = this->policies->get(this->policies, &clone);
2195 if (!policy ||
2196 policy->used_by->find_first(policy->used_by,
2197 NULL, (void**)&mapping) != SUCCESS)
2198 { /* policy or mapping is already gone, ignore */
2199 this->mutex->unlock(this->mutex);
2200 return SUCCESS;
2201 }
2202
2203 /* install a route, if:
2204 * - this is a forward policy (to just get one for each child)
2205 * - we are in tunnel/BEET mode or install a bypass policy
2206 * - routing is not disabled via strongswan.conf
2207 */
2208 if (policy->direction == POLICY_FWD && this->install_routes &&
2209 (mapping->type != POLICY_IPSEC || ipsec->cfg.mode != MODE_TRANSPORT))
2210 {
2211 policy_sa_fwd_t *fwd = (policy_sa_fwd_t*)mapping;
2212 route_entry_t *route;
2213 host_t *iface;
2214
2215 INIT(route,
2216 .prefixlen = policy->sel.prefixlen_s,
2217 );
2218
2219 if (hydra->kernel_interface->get_address_by_ts(hydra->kernel_interface,
2220 fwd->dst_ts, &route->src_ip) == SUCCESS)
2221 {
2222 /* get the nexthop to src (src as we are in POLICY_FWD) */
2223 route->gateway = hydra->kernel_interface->get_nexthop(
2224 hydra->kernel_interface, ipsec->src,
2225 ipsec->dst);
2226 route->dst_net = chunk_alloc(policy->sel.family == AF_INET ? 4 : 16);
2227 memcpy(route->dst_net.ptr, &policy->sel.saddr, route->dst_net.len);
2228
2229 /* get the interface to install the route for. If we have a local
2230 * address, use it. Otherwise (for shunt policies) use the
2231 * routes source address. */
2232 iface = ipsec->dst;
2233 if (iface->is_anyaddr(iface))
2234 {
2235 iface = route->src_ip;
2236 }
2237 /* install route via outgoing interface */
2238 if (!hydra->kernel_interface->get_interface(hydra->kernel_interface,
2239 iface, &route->if_name))
2240 {
2241 this->mutex->unlock(this->mutex);
2242 route_entry_destroy(route);
2243 return SUCCESS;
2244 }
2245
2246 if (policy->route)
2247 {
2248 route_entry_t *old = policy->route;
2249 if (route_entry_equals(old, route))
2250 {
2251 this->mutex->unlock(this->mutex);
2252 route_entry_destroy(route);
2253 return SUCCESS;
2254 }
2255 /* uninstall previously installed route */
2256 if (hydra->kernel_interface->del_route(hydra->kernel_interface,
2257 old->dst_net, old->prefixlen, old->gateway,
2258 old->src_ip, old->if_name) != SUCCESS)
2259 {
2260 DBG1(DBG_KNL, "error uninstalling route installed with "
2261 "policy %R === %R %N", fwd->src_ts,
2262 fwd->dst_ts, policy_dir_names,
2263 policy->direction);
2264 }
2265 route_entry_destroy(old);
2266 policy->route = NULL;
2267 }
2268
2269 DBG2(DBG_KNL, "installing route: %R via %H src %H dev %s",
2270 fwd->src_ts, route->gateway, route->src_ip, route->if_name);
2271 switch (hydra->kernel_interface->add_route(
2272 hydra->kernel_interface, route->dst_net,
2273 route->prefixlen, route->gateway,
2274 route->src_ip, route->if_name))
2275 {
2276 default:
2277 DBG1(DBG_KNL, "unable to install source route for %H",
2278 route->src_ip);
2279 /* FALL */
2280 case ALREADY_DONE:
2281 /* route exists, do not uninstall */
2282 route_entry_destroy(route);
2283 break;
2284 case SUCCESS:
2285 /* cache the installed route */
2286 policy->route = route;
2287 break;
2288 }
2289 }
2290 else
2291 {
2292 free(route);
2293 }
2294 }
2295 this->mutex->unlock(this->mutex);
2296 return SUCCESS;
2297 }
2298
2299 METHOD(kernel_ipsec_t, add_policy, status_t,
2300 private_kernel_netlink_ipsec_t *this, host_t *src, host_t *dst,
2301 traffic_selector_t *src_ts, traffic_selector_t *dst_ts,
2302 policy_dir_t direction, policy_type_t type, ipsec_sa_cfg_t *sa,
2303 mark_t mark, policy_priority_t priority)
2304 {
2305 policy_entry_t *policy, *current;
2306 policy_sa_t *assigned_sa, *current_sa;
2307 enumerator_t *enumerator;
2308 bool found = FALSE, update = TRUE;
2309
2310 /* create a policy */
2311 INIT(policy,
2312 .sel = ts2selector(src_ts, dst_ts),
2313 .mark = mark.value & mark.mask,
2314 .direction = direction,
2315 );
2316
2317 /* find the policy, which matches EXACTLY */
2318 this->mutex->lock(this->mutex);
2319 current = this->policies->get(this->policies, policy);
2320 if (current)
2321 {
2322 /* use existing policy */
2323 DBG2(DBG_KNL, "policy %R === %R %N (mark %u/0x%08x) "
2324 "already exists, increasing refcount",
2325 src_ts, dst_ts, policy_dir_names, direction,
2326 mark.value, mark.mask);
2327 policy_entry_destroy(this, policy);
2328 policy = current;
2329 found = TRUE;
2330 }
2331 else
2332 { /* use the new one, if we have no such policy */
2333 policy->used_by = linked_list_create();
2334 this->policies->put(this->policies, policy, policy);
2335 }
2336
2337 /* cache the assigned IPsec SA */
2338 assigned_sa = policy_sa_create(this, direction, type, src, dst, src_ts,
2339 dst_ts, mark, sa);
2340 assigned_sa->priority = get_priority(policy, priority);
2341
2342 if (this->policy_history)
2343 { /* insert the SA according to its priority */
2344 enumerator = policy->used_by->create_enumerator(policy->used_by);
2345 while (enumerator->enumerate(enumerator, (void**)&current_sa))
2346 {
2347 if (current_sa->priority >= assigned_sa->priority)
2348 {
2349 break;
2350 }
2351 update = FALSE;
2352 }
2353 policy->used_by->insert_before(policy->used_by, enumerator,
2354 assigned_sa);
2355 enumerator->destroy(enumerator);
2356 }
2357 else
2358 { /* simply insert it last and only update if it is not installed yet */
2359 policy->used_by->insert_last(policy->used_by, assigned_sa);
2360 update = !found;
2361 }
2362
2363 if (!update)
2364 { /* we don't update the policy if the priority is lower than that of
2365 * the currently installed one */
2366 this->mutex->unlock(this->mutex);
2367 return SUCCESS;
2368 }
2369
2370 DBG2(DBG_KNL, "%s policy %R === %R %N (mark %u/0x%08x)",
2371 found ? "updating" : "adding", src_ts, dst_ts,
2372 policy_dir_names, direction, mark.value, mark.mask);
2373
2374 if (add_policy_internal(this, policy, assigned_sa, found) != SUCCESS)
2375 {
2376 DBG1(DBG_KNL, "unable to %s policy %R === %R %N",
2377 found ? "update" : "add", src_ts, dst_ts,
2378 policy_dir_names, direction);
2379 return FAILED;
2380 }
2381 return SUCCESS;
2382 }
2383
2384 METHOD(kernel_ipsec_t, query_policy, status_t,
2385 private_kernel_netlink_ipsec_t *this, traffic_selector_t *src_ts,
2386 traffic_selector_t *dst_ts, policy_dir_t direction, mark_t mark,
2387 u_int32_t *use_time)
2388 {
2389 netlink_buf_t request;
2390 struct nlmsghdr *out = NULL, *hdr;
2391 struct xfrm_userpolicy_id *policy_id;
2392 struct xfrm_userpolicy_info *policy = NULL;
2393 size_t len;
2394
2395 memset(&request, 0, sizeof(request));
2396
2397 DBG2(DBG_KNL, "querying policy %R === %R %N (mark %u/0x%08x)",
2398 src_ts, dst_ts, policy_dir_names, direction,
2399 mark.value, mark.mask);
2400
2401 hdr = (struct nlmsghdr*)request;
2402 hdr->nlmsg_flags = NLM_F_REQUEST;
2403 hdr->nlmsg_type = XFRM_MSG_GETPOLICY;
2404 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_userpolicy_id));
2405
2406 policy_id = (struct xfrm_userpolicy_id*)NLMSG_DATA(hdr);
2407 policy_id->sel = ts2selector(src_ts, dst_ts);
2408 policy_id->dir = direction;
2409
2410 if (mark.value)
2411 {
2412 struct xfrm_mark *mrk;
2413 struct rtattr *rthdr = XFRM_RTA(hdr, struct xfrm_userpolicy_id);
2414
2415 rthdr->rta_type = XFRMA_MARK;
2416 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_mark));
2417 hdr->nlmsg_len = NLMSG_ALIGN(hdr->nlmsg_len) + RTA_ALIGN(rthdr->rta_len);
2418 if (hdr->nlmsg_len > sizeof(request))
2419 {
2420 return FAILED;
2421 }
2422
2423 mrk = (struct xfrm_mark*)RTA_DATA(rthdr);
2424 mrk->v = mark.value;
2425 mrk->m = mark.mask;
2426 }
2427
2428 if (this->socket_xfrm->send(this->socket_xfrm, hdr, &out, &len) == SUCCESS)
2429 {
2430 hdr = out;
2431 while (NLMSG_OK(hdr, len))
2432 {
2433 switch (hdr->nlmsg_type)
2434 {
2435 case XFRM_MSG_NEWPOLICY:
2436 {
2437 policy = (struct xfrm_userpolicy_info*)NLMSG_DATA(hdr);
2438 break;
2439 }
2440 case NLMSG_ERROR:
2441 {
2442 struct nlmsgerr *err = NLMSG_DATA(hdr);
2443 DBG1(DBG_KNL, "querying policy failed: %s (%d)",
2444 strerror(-err->error), -err->error);
2445 break;
2446 }
2447 default:
2448 hdr = NLMSG_NEXT(hdr, len);
2449 continue;
2450 case NLMSG_DONE:
2451 break;
2452 }
2453 break;
2454 }
2455 }
2456
2457 if (policy == NULL)
2458 {
2459 DBG2(DBG_KNL, "unable to query policy %R === %R %N", src_ts, dst_ts,
2460 policy_dir_names, direction);
2461 free(out);
2462 return FAILED;
2463 }
2464
2465 if (policy->curlft.use_time)
2466 {
2467 /* we need the monotonic time, but the kernel returns system time. */
2468 *use_time = time_monotonic(NULL) - (time(NULL) - policy->curlft.use_time);
2469 }
2470 else
2471 {
2472 *use_time = 0;
2473 }
2474
2475 free(out);
2476 return SUCCESS;
2477 }
2478
2479 METHOD(kernel_ipsec_t, del_policy, status_t,
2480 private_kernel_netlink_ipsec_t *this, traffic_selector_t *src_ts,
2481 traffic_selector_t *dst_ts, policy_dir_t direction, u_int32_t reqid,
2482 mark_t mark, policy_priority_t prio)
2483 {
2484 policy_entry_t *current, policy;
2485 enumerator_t *enumerator;
2486 policy_sa_t *mapping;
2487 netlink_buf_t request;
2488 struct nlmsghdr *hdr;
2489 struct xfrm_userpolicy_id *policy_id;
2490 bool is_installed = TRUE;
2491 u_int32_t priority;
2492
2493 DBG2(DBG_KNL, "deleting policy %R === %R %N (mark %u/0x%08x)",
2494 src_ts, dst_ts, policy_dir_names, direction,
2495 mark.value, mark.mask);
2496
2497 /* create a policy */
2498 memset(&policy, 0, sizeof(policy_entry_t));
2499 policy.sel = ts2selector(src_ts, dst_ts);
2500 policy.mark = mark.value & mark.mask;
2501 policy.direction = direction;
2502
2503 /* find the policy */
2504 this->mutex->lock(this->mutex);
2505 current = this->policies->get(this->policies, &policy);
2506 if (!current)
2507 {
2508 if (mark.value)
2509 {
2510 DBG1(DBG_KNL, "deleting policy %R === %R %N (mark %u/0x%08x) "
2511 "failed, not found", src_ts, dst_ts, policy_dir_names,
2512 direction, mark.value, mark.mask);
2513 }
2514 else
2515 {
2516 DBG1(DBG_KNL, "deleting policy %R === %R %N failed, not found",
2517 src_ts, dst_ts, policy_dir_names, direction);
2518 }
2519 this->mutex->unlock(this->mutex);
2520 return NOT_FOUND;
2521 }
2522
2523 if (this->policy_history)
2524 { /* remove mapping to SA by reqid and priority */
2525 priority = get_priority(current, prio);
2526 enumerator = current->used_by->create_enumerator(current->used_by);
2527 while (enumerator->enumerate(enumerator, (void**)&mapping))
2528 {
2529 if (reqid == mapping->sa->cfg.reqid &&
2530 priority == mapping->priority)
2531 {
2532 current->used_by->remove_at(current->used_by, enumerator);
2533 policy_sa_destroy(mapping, &direction, this);
2534 break;
2535 }
2536 is_installed = FALSE;
2537 }
2538 enumerator->destroy(enumerator);
2539 }
2540 else
2541 { /* remove one of the SAs but don't update the policy */
2542 current->used_by->remove_last(current->used_by, (void**)&mapping);
2543 policy_sa_destroy(mapping, &direction, this);
2544 is_installed = FALSE;
2545 }
2546
2547 if (current->used_by->get_count(current->used_by) > 0)
2548 { /* policy is used by more SAs, keep in kernel */
2549 DBG2(DBG_KNL, "policy still used by another CHILD_SA, not removed");
2550 if (!is_installed)
2551 { /* no need to update as the policy was not installed for this SA */
2552 this->mutex->unlock(this->mutex);
2553 return SUCCESS;
2554 }
2555
2556 DBG2(DBG_KNL, "updating policy %R === %R %N (mark %u/0x%08x)",
2557 src_ts, dst_ts, policy_dir_names, direction,
2558 mark.value, mark.mask);
2559
2560 current->used_by->get_first(current->used_by, (void**)&mapping);
2561 if (add_policy_internal(this, current, mapping, TRUE) != SUCCESS)
2562 {
2563 DBG1(DBG_KNL, "unable to update policy %R === %R %N",
2564 src_ts, dst_ts, policy_dir_names, direction);
2565 return FAILED;
2566 }
2567 return SUCCESS;
2568 }
2569
2570 memset(&request, 0, sizeof(request));
2571
2572 hdr = (struct nlmsghdr*)request;
2573 hdr->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
2574 hdr->nlmsg_type = XFRM_MSG_DELPOLICY;
2575 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_userpolicy_id));
2576
2577 policy_id = (struct xfrm_userpolicy_id*)NLMSG_DATA(hdr);
2578 policy_id->sel = current->sel;
2579 policy_id->dir = direction;
2580
2581 if (mark.value)
2582 {
2583 struct xfrm_mark *mrk;
2584 struct rtattr *rthdr = XFRM_RTA(hdr, struct xfrm_userpolicy_id);
2585
2586 rthdr->rta_type = XFRMA_MARK;
2587 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_mark));
2588 hdr->nlmsg_len = NLMSG_ALIGN(hdr->nlmsg_len) + RTA_ALIGN(rthdr->rta_len);
2589 if (hdr->nlmsg_len > sizeof(request))
2590 {
2591 this->mutex->unlock(this->mutex);
2592 return FAILED;
2593 }
2594
2595 mrk = (struct xfrm_mark*)RTA_DATA(rthdr);
2596 mrk->v = mark.value;
2597 mrk->m = mark.mask;
2598 }
2599
2600 if (current->route)
2601 {
2602 route_entry_t *route = current->route;
2603 if (hydra->kernel_interface->del_route(hydra->kernel_interface,
2604 route->dst_net, route->prefixlen, route->gateway,
2605 route->src_ip, route->if_name) != SUCCESS)
2606 {
2607 DBG1(DBG_KNL, "error uninstalling route installed with "
2608 "policy %R === %R %N", src_ts, dst_ts,
2609 policy_dir_names, direction);
2610 }
2611 }
2612
2613 this->policies->remove(this->policies, current);
2614 policy_entry_destroy(this, current);
2615 this->mutex->unlock(this->mutex);
2616
2617 if (this->socket_xfrm->send_ack(this->socket_xfrm, hdr) != SUCCESS)
2618 {
2619 if (mark.value)
2620 {
2621 DBG1(DBG_KNL, "unable to delete policy %R === %R %N "
2622 "(mark %u/0x%08x)", src_ts, dst_ts, policy_dir_names,
2623 direction, mark.value, mark.mask);
2624 }
2625 else
2626 {
2627 DBG1(DBG_KNL, "unable to delete policy %R === %R %N",
2628 src_ts, dst_ts, policy_dir_names, direction);
2629 }
2630 return FAILED;
2631 }
2632 return SUCCESS;
2633 }
2634
2635 METHOD(kernel_ipsec_t, flush_policies, status_t,
2636 private_kernel_netlink_ipsec_t *this)
2637 {
2638 netlink_buf_t request;
2639 struct nlmsghdr *hdr;
2640
2641 memset(&request, 0, sizeof(request));
2642
2643 DBG2(DBG_KNL, "flushing all policies from SPD");
2644
2645 hdr = (struct nlmsghdr*)request;
2646 hdr->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
2647 hdr->nlmsg_type = XFRM_MSG_FLUSHPOLICY;
2648 hdr->nlmsg_len = NLMSG_LENGTH(0); /* no data associated */
2649
2650 /* by adding an rtattr of type XFRMA_POLICY_TYPE we could restrict this
2651 * to main or sub policies (default is main) */
2652
2653 if (this->socket_xfrm->send_ack(this->socket_xfrm, hdr) != SUCCESS)
2654 {
2655 DBG1(DBG_KNL, "unable to flush SPD entries");
2656 return FAILED;
2657 }
2658 return SUCCESS;
2659 }
2660
2661
2662 METHOD(kernel_ipsec_t, bypass_socket, bool,
2663 private_kernel_netlink_ipsec_t *this, int fd, int family)
2664 {
2665 struct xfrm_userpolicy_info policy;
2666 u_int sol, ipsec_policy;
2667
2668 switch (family)
2669 {
2670 case AF_INET:
2671 sol = SOL_IP;
2672 ipsec_policy = IP_XFRM_POLICY;
2673 break;
2674 case AF_INET6:
2675 sol = SOL_IPV6;
2676 ipsec_policy = IPV6_XFRM_POLICY;
2677 break;
2678 default:
2679 return FALSE;
2680 }
2681
2682 memset(&policy, 0, sizeof(policy));
2683 policy.action = XFRM_POLICY_ALLOW;
2684 policy.sel.family = family;
2685
2686 policy.dir = XFRM_POLICY_OUT;
2687 if (setsockopt(fd, sol, ipsec_policy, &policy, sizeof(policy)) < 0)
2688 {
2689 DBG1(DBG_KNL, "unable to set IPSEC_POLICY on socket: %s",
2690 strerror(errno));
2691 return FALSE;
2692 }
2693 policy.dir = XFRM_POLICY_IN;
2694 if (setsockopt(fd, sol, ipsec_policy, &policy, sizeof(policy)) < 0)
2695 {
2696 DBG1(DBG_KNL, "unable to set IPSEC_POLICY on socket: %s",
2697 strerror(errno));
2698 return FALSE;
2699 }
2700 return TRUE;
2701 }
2702
2703 METHOD(kernel_ipsec_t, enable_udp_decap, bool,
2704 private_kernel_netlink_ipsec_t *this, int fd, int family, u_int16_t port)
2705 {
2706 int type = UDP_ENCAP_ESPINUDP;
2707
2708 if (setsockopt(fd, SOL_UDP, UDP_ENCAP, &type, sizeof(type)) < 0)
2709 {
2710 DBG1(DBG_KNL, "unable to set UDP_ENCAP: %s", strerror(errno));
2711 return FALSE;
2712 }
2713 return TRUE;
2714 }
2715
2716 METHOD(kernel_ipsec_t, destroy, void,
2717 private_kernel_netlink_ipsec_t *this)
2718 {
2719 enumerator_t *enumerator;
2720 policy_entry_t *policy;
2721
2722 if (this->socket_xfrm_events > 0)
2723 {
2724 close(this->socket_xfrm_events);
2725 }
2726 DESTROY_IF(this->socket_xfrm);
2727 enumerator = this->policies->create_enumerator(this->policies);
2728 while (enumerator->enumerate(enumerator, &policy, &policy))
2729 {
2730 policy_entry_destroy(this, policy);
2731 }
2732 enumerator->destroy(enumerator);
2733 this->policies->destroy(this->policies);
2734 this->sas->destroy(this->sas);
2735 this->mutex->destroy(this->mutex);
2736 free(this);
2737 }
2738
2739 /*
2740 * Described in header.
2741 */
2742 kernel_netlink_ipsec_t *kernel_netlink_ipsec_create()
2743 {
2744 private_kernel_netlink_ipsec_t *this;
2745 bool register_for_events = TRUE;
2746 int fd;
2747
2748 INIT(this,
2749 .public = {
2750 .interface = {
2751 .get_features = _get_features,
2752 .get_spi = _get_spi,
2753 .get_cpi = _get_cpi,
2754 .add_sa = _add_sa,
2755 .update_sa = _update_sa,
2756 .query_sa = _query_sa,
2757 .del_sa = _del_sa,
2758 .flush_sas = _flush_sas,
2759 .add_policy = _add_policy,
2760 .query_policy = _query_policy,
2761 .del_policy = _del_policy,
2762 .flush_policies = _flush_policies,
2763 .bypass_socket = _bypass_socket,
2764 .enable_udp_decap = _enable_udp_decap,
2765 .destroy = _destroy,
2766 },
2767 },
2768 .policies = hashtable_create((hashtable_hash_t)policy_hash,
2769 (hashtable_equals_t)policy_equals, 32),
2770 .sas = hashtable_create((hashtable_hash_t)ipsec_sa_hash,
2771 (hashtable_equals_t)ipsec_sa_equals, 32),
2772 .mutex = mutex_create(MUTEX_TYPE_DEFAULT),
2773 .policy_history = TRUE,
2774 .install_routes = lib->settings->get_bool(lib->settings,
2775 "%s.install_routes", TRUE, hydra->daemon),
2776 .replay_window = lib->settings->get_int(lib->settings,
2777 "%s.replay_window", DEFAULT_REPLAY_WINDOW, hydra->daemon),
2778 );
2779
2780 this->replay_bmp = (this->replay_window + sizeof(u_int32_t) * 8 - 1) /
2781 (sizeof(u_int32_t) * 8);
2782
2783 if (streq(hydra->daemon, "pluto"))
2784 { /* no routes for pluto, they are installed via updown script */
2785 this->install_routes = FALSE;
2786 /* no policy history for pluto */
2787 this->policy_history = FALSE;
2788 }
2789 else if (streq(hydra->daemon, "starter"))
2790 { /* starter has no threads, so we do not register for kernel events */
2791 register_for_events = FALSE;
2792 }
2793
2794 /* disable lifetimes for allocated SPIs in kernel */
2795 fd = open("/proc/sys/net/core/xfrm_acq_expires", O_WRONLY);
2796 if (fd > 0)
2797 {
2798 ignore_result(write(fd, "165", 3));
2799 close(fd);
2800 }
2801
2802 this->socket_xfrm = netlink_socket_create(NETLINK_XFRM);
2803 if (!this->socket_xfrm)
2804 {
2805 destroy(this);
2806 return NULL;
2807 }
2808
2809 if (register_for_events)
2810 {
2811 struct sockaddr_nl addr;
2812
2813 memset(&addr, 0, sizeof(addr));
2814 addr.nl_family = AF_NETLINK;
2815
2816 /* create and bind XFRM socket for ACQUIRE, EXPIRE, MIGRATE & MAPPING */
2817 this->socket_xfrm_events = socket(AF_NETLINK, SOCK_RAW, NETLINK_XFRM);
2818 if (this->socket_xfrm_events <= 0)
2819 {
2820 DBG1(DBG_KNL, "unable to create XFRM event socket");
2821 destroy(this);
2822 return NULL;
2823 }
2824 addr.nl_groups = XFRMNLGRP(ACQUIRE) | XFRMNLGRP(EXPIRE) |
2825 XFRMNLGRP(MIGRATE) | XFRMNLGRP(MAPPING);
2826 if (bind(this->socket_xfrm_events, (struct sockaddr*)&addr, sizeof(addr)))
2827 {
2828 DBG1(DBG_KNL, "unable to bind XFRM event socket");
2829 destroy(this);
2830 return NULL;
2831 }
2832 lib->processor->queue_job(lib->processor,