8e90e01b1929b37245d68db0323ad8e870d71d24
[strongswan.git] / src / libhydra / plugins / kernel_netlink / kernel_netlink_ipsec.c
1 /*
2 * Copyright (C) 2006-2012 Tobias Brunner
3 * Copyright (C) 2005-2009 Martin Willi
4 * Copyright (C) 2008 Andreas Steffen
5 * Copyright (C) 2006-2007 Fabian Hartmann, Noah Heusser
6 * Copyright (C) 2006 Daniel Roethlisberger
7 * Copyright (C) 2005 Jan Hutter
8 * Hochschule fuer Technik Rapperswil
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version. See <http://www.fsf.org/copyleft/gpl.txt>.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
17 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 * for more details.
19 */
20
21 #include <sys/types.h>
22 #include <sys/socket.h>
23 #include <stdint.h>
24 #include <linux/ipsec.h>
25 #include <linux/netlink.h>
26 #include <linux/rtnetlink.h>
27 #include <linux/xfrm.h>
28 #include <linux/udp.h>
29 #include <unistd.h>
30 #include <time.h>
31 #include <errno.h>
32 #include <string.h>
33 #include <fcntl.h>
34
35 #include "kernel_netlink_ipsec.h"
36 #include "kernel_netlink_shared.h"
37
38 #include <hydra.h>
39 #include <utils/debug.h>
40 #include <threading/thread.h>
41 #include <threading/mutex.h>
42 #include <collections/hashtable.h>
43 #include <collections/linked_list.h>
44 #include <processing/jobs/callback_job.h>
45
46 /** Required for Linux 2.6.26 kernel and later */
47 #ifndef XFRM_STATE_AF_UNSPEC
48 #define XFRM_STATE_AF_UNSPEC 32
49 #endif
50
51 /** From linux/in.h */
52 #ifndef IP_XFRM_POLICY
53 #define IP_XFRM_POLICY 17
54 #endif
55
56 /** Missing on uclibc */
57 #ifndef IPV6_XFRM_POLICY
58 #define IPV6_XFRM_POLICY 34
59 #endif /*IPV6_XFRM_POLICY*/
60
61 /* from linux/udp.h */
62 #ifndef UDP_ENCAP
63 #define UDP_ENCAP 100
64 #endif
65
66 #ifndef UDP_ENCAP_ESPINUDP
67 #define UDP_ENCAP_ESPINUDP 2
68 #endif
69
70 /* this is not defined on some platforms */
71 #ifndef SOL_UDP
72 #define SOL_UDP IPPROTO_UDP
73 #endif
74
75 /** Default priority of installed policies */
76 #define PRIO_BASE 512
77
78 /** Default replay window size, if not set using charon.replay_window */
79 #define DEFAULT_REPLAY_WINDOW 32
80
81 /**
82 * Map the limit for bytes and packets to XFRM_INF by default
83 */
84 #define XFRM_LIMIT(x) ((x) == 0 ? XFRM_INF : (x))
85
86 /**
87 * Create ORable bitfield of XFRM NL groups
88 */
89 #define XFRMNLGRP(x) (1<<(XFRMNLGRP_##x-1))
90
91 /**
92 * Returns a pointer to the first rtattr following the nlmsghdr *nlh and the
93 * 'usual' netlink data x like 'struct xfrm_usersa_info'
94 */
95 #define XFRM_RTA(nlh, x) ((struct rtattr*)(NLMSG_DATA(nlh) + \
96 NLMSG_ALIGN(sizeof(x))))
97 /**
98 * Returns a pointer to the next rtattr following rta.
99 * !!! Do not use this to parse messages. Use RTA_NEXT and RTA_OK instead !!!
100 */
101 #define XFRM_RTA_NEXT(rta) ((struct rtattr*)(((char*)(rta)) + \
102 RTA_ALIGN((rta)->rta_len)))
103 /**
104 * Returns the total size of attached rta data
105 * (after 'usual' netlink data x like 'struct xfrm_usersa_info')
106 */
107 #define XFRM_PAYLOAD(nlh, x) NLMSG_PAYLOAD(nlh, sizeof(x))
108
109 typedef struct kernel_algorithm_t kernel_algorithm_t;
110
111 /**
112 * Mapping of IKEv2 kernel identifier to linux crypto API names
113 */
114 struct kernel_algorithm_t {
115 /**
116 * Identifier specified in IKEv2
117 */
118 int ikev2;
119
120 /**
121 * Name of the algorithm in linux crypto API
122 */
123 char *name;
124 };
125
126 ENUM(xfrm_msg_names, XFRM_MSG_NEWSA, XFRM_MSG_MAPPING,
127 "XFRM_MSG_NEWSA",
128 "XFRM_MSG_DELSA",
129 "XFRM_MSG_GETSA",
130 "XFRM_MSG_NEWPOLICY",
131 "XFRM_MSG_DELPOLICY",
132 "XFRM_MSG_GETPOLICY",
133 "XFRM_MSG_ALLOCSPI",
134 "XFRM_MSG_ACQUIRE",
135 "XFRM_MSG_EXPIRE",
136 "XFRM_MSG_UPDPOLICY",
137 "XFRM_MSG_UPDSA",
138 "XFRM_MSG_POLEXPIRE",
139 "XFRM_MSG_FLUSHSA",
140 "XFRM_MSG_FLUSHPOLICY",
141 "XFRM_MSG_NEWAE",
142 "XFRM_MSG_GETAE",
143 "XFRM_MSG_REPORT",
144 "XFRM_MSG_MIGRATE",
145 "XFRM_MSG_NEWSADINFO",
146 "XFRM_MSG_GETSADINFO",
147 "XFRM_MSG_NEWSPDINFO",
148 "XFRM_MSG_GETSPDINFO",
149 "XFRM_MSG_MAPPING"
150 );
151
152 ENUM(xfrm_attr_type_names, XFRMA_UNSPEC, XFRMA_REPLAY_ESN_VAL,
153 "XFRMA_UNSPEC",
154 "XFRMA_ALG_AUTH",
155 "XFRMA_ALG_CRYPT",
156 "XFRMA_ALG_COMP",
157 "XFRMA_ENCAP",
158 "XFRMA_TMPL",
159 "XFRMA_SA",
160 "XFRMA_POLICY",
161 "XFRMA_SEC_CTX",
162 "XFRMA_LTIME_VAL",
163 "XFRMA_REPLAY_VAL",
164 "XFRMA_REPLAY_THRESH",
165 "XFRMA_ETIMER_THRESH",
166 "XFRMA_SRCADDR",
167 "XFRMA_COADDR",
168 "XFRMA_LASTUSED",
169 "XFRMA_POLICY_TYPE",
170 "XFRMA_MIGRATE",
171 "XFRMA_ALG_AEAD",
172 "XFRMA_KMADDRESS",
173 "XFRMA_ALG_AUTH_TRUNC",
174 "XFRMA_MARK",
175 "XFRMA_TFCPAD",
176 "XFRMA_REPLAY_ESN_VAL",
177 );
178
179 #define END_OF_LIST -1
180
181 /**
182 * Algorithms for encryption
183 */
184 static kernel_algorithm_t encryption_algs[] = {
185 /* {ENCR_DES_IV64, "***" }, */
186 {ENCR_DES, "des" },
187 {ENCR_3DES, "des3_ede" },
188 /* {ENCR_RC5, "***" }, */
189 /* {ENCR_IDEA, "***" }, */
190 {ENCR_CAST, "cast128" },
191 {ENCR_BLOWFISH, "blowfish" },
192 /* {ENCR_3IDEA, "***" }, */
193 /* {ENCR_DES_IV32, "***" }, */
194 {ENCR_NULL, "cipher_null" },
195 {ENCR_AES_CBC, "aes" },
196 {ENCR_AES_CTR, "rfc3686(ctr(aes))" },
197 {ENCR_AES_CCM_ICV8, "rfc4309(ccm(aes))" },
198 {ENCR_AES_CCM_ICV12, "rfc4309(ccm(aes))" },
199 {ENCR_AES_CCM_ICV16, "rfc4309(ccm(aes))" },
200 {ENCR_AES_GCM_ICV8, "rfc4106(gcm(aes))" },
201 {ENCR_AES_GCM_ICV12, "rfc4106(gcm(aes))" },
202 {ENCR_AES_GCM_ICV16, "rfc4106(gcm(aes))" },
203 {ENCR_NULL_AUTH_AES_GMAC, "rfc4543(gcm(aes))" },
204 {ENCR_CAMELLIA_CBC, "cbc(camellia)" },
205 /* {ENCR_CAMELLIA_CTR, "***" }, */
206 /* {ENCR_CAMELLIA_CCM_ICV8, "***" }, */
207 /* {ENCR_CAMELLIA_CCM_ICV12, "***" }, */
208 /* {ENCR_CAMELLIA_CCM_ICV16, "***" }, */
209 {ENCR_SERPENT_CBC, "serpent" },
210 {ENCR_TWOFISH_CBC, "twofish" },
211 {END_OF_LIST, NULL }
212 };
213
214 /**
215 * Algorithms for integrity protection
216 */
217 static kernel_algorithm_t integrity_algs[] = {
218 {AUTH_HMAC_MD5_96, "md5" },
219 {AUTH_HMAC_MD5_128, "hmac(md5)" },
220 {AUTH_HMAC_SHA1_96, "sha1" },
221 {AUTH_HMAC_SHA1_160, "hmac(sha1)" },
222 {AUTH_HMAC_SHA2_256_96, "sha256" },
223 {AUTH_HMAC_SHA2_256_128, "hmac(sha256)" },
224 {AUTH_HMAC_SHA2_384_192, "hmac(sha384)" },
225 {AUTH_HMAC_SHA2_512_256, "hmac(sha512)" },
226 /* {AUTH_DES_MAC, "***" }, */
227 /* {AUTH_KPDK_MD5, "***" }, */
228 {AUTH_AES_XCBC_96, "xcbc(aes)" },
229 {END_OF_LIST, NULL }
230 };
231
232 /**
233 * Algorithms for IPComp
234 */
235 static kernel_algorithm_t compression_algs[] = {
236 /* {IPCOMP_OUI, "***" }, */
237 {IPCOMP_DEFLATE, "deflate" },
238 {IPCOMP_LZS, "lzs" },
239 {IPCOMP_LZJH, "lzjh" },
240 {END_OF_LIST, NULL }
241 };
242
243 /**
244 * Look up a kernel algorithm name and its key size
245 */
246 static char* lookup_algorithm(transform_type_t type, int ikev2)
247 {
248 kernel_algorithm_t *list;
249 char *name = NULL;
250
251 switch (type)
252 {
253 case ENCRYPTION_ALGORITHM:
254 list = encryption_algs;
255 break;
256 case INTEGRITY_ALGORITHM:
257 list = integrity_algs;
258 break;
259 case COMPRESSION_ALGORITHM:
260 list = compression_algs;
261 break;
262 default:
263 return NULL;
264 }
265 while (list->ikev2 != END_OF_LIST)
266 {
267 if (list->ikev2 == ikev2)
268 {
269 return list->name;
270 }
271 list++;
272 }
273 hydra->kernel_interface->lookup_algorithm(hydra->kernel_interface, ikev2,
274 type, NULL, &name);
275 return name;
276 }
277
278 typedef struct private_kernel_netlink_ipsec_t private_kernel_netlink_ipsec_t;
279
280 /**
281 * Private variables and functions of kernel_netlink class.
282 */
283 struct private_kernel_netlink_ipsec_t {
284 /**
285 * Public part of the kernel_netlink_t object
286 */
287 kernel_netlink_ipsec_t public;
288
289 /**
290 * Mutex to lock access to installed policies
291 */
292 mutex_t *mutex;
293
294 /**
295 * Hash table of installed policies (policy_entry_t)
296 */
297 hashtable_t *policies;
298
299 /**
300 * Hash table of IPsec SAs using policies (ipsec_sa_t)
301 */
302 hashtable_t *sas;
303
304 /**
305 * Netlink xfrm socket (IPsec)
306 */
307 netlink_socket_t *socket_xfrm;
308
309 /**
310 * Netlink xfrm socket to receive acquire and expire events
311 */
312 int socket_xfrm_events;
313
314 /**
315 * Whether to install routes along policies
316 */
317 bool install_routes;
318
319 /**
320 * Whether to track the history of a policy
321 */
322 bool policy_history;
323
324 /**
325 * Size of the replay window, in packets (= bits)
326 */
327 u_int32_t replay_window;
328
329 /**
330 * Size of the replay window bitmap, in number of __u32 blocks
331 */
332 u_int32_t replay_bmp;
333 };
334
335 typedef struct route_entry_t route_entry_t;
336
337 /**
338 * Installed routing entry
339 */
340 struct route_entry_t {
341 /** Name of the interface the route is bound to */
342 char *if_name;
343
344 /** Source ip of the route */
345 host_t *src_ip;
346
347 /** Gateway for this route */
348 host_t *gateway;
349
350 /** Destination net */
351 chunk_t dst_net;
352
353 /** Destination net prefixlen */
354 u_int8_t prefixlen;
355 };
356
357 /**
358 * Destroy a route_entry_t object
359 */
360 static void route_entry_destroy(route_entry_t *this)
361 {
362 free(this->if_name);
363 this->src_ip->destroy(this->src_ip);
364 DESTROY_IF(this->gateway);
365 chunk_free(&this->dst_net);
366 free(this);
367 }
368
369 /**
370 * Compare two route_entry_t objects
371 */
372 static bool route_entry_equals(route_entry_t *a, route_entry_t *b)
373 {
374 return a->if_name && b->if_name && streq(a->if_name, b->if_name) &&
375 a->src_ip->ip_equals(a->src_ip, b->src_ip) &&
376 a->gateway->ip_equals(a->gateway, b->gateway) &&
377 chunk_equals(a->dst_net, b->dst_net) && a->prefixlen == b->prefixlen;
378 }
379
380 typedef struct ipsec_sa_t ipsec_sa_t;
381
382 /**
383 * IPsec SA assigned to a policy.
384 */
385 struct ipsec_sa_t {
386 /** Source address of this SA */
387 host_t *src;
388
389 /** Destination address of this SA */
390 host_t *dst;
391
392 /** Optional mark */
393 mark_t mark;
394
395 /** Description of this SA */
396 ipsec_sa_cfg_t cfg;
397
398 /** Reference count for this SA */
399 refcount_t refcount;
400 };
401
402 /**
403 * Hash function for ipsec_sa_t objects
404 */
405 static u_int ipsec_sa_hash(ipsec_sa_t *sa)
406 {
407 return chunk_hash_inc(sa->src->get_address(sa->src),
408 chunk_hash_inc(sa->dst->get_address(sa->dst),
409 chunk_hash_inc(chunk_from_thing(sa->mark),
410 chunk_hash(chunk_from_thing(sa->cfg)))));
411 }
412
413 /**
414 * Equality function for ipsec_sa_t objects
415 */
416 static bool ipsec_sa_equals(ipsec_sa_t *sa, ipsec_sa_t *other_sa)
417 {
418 return sa->src->ip_equals(sa->src, other_sa->src) &&
419 sa->dst->ip_equals(sa->dst, other_sa->dst) &&
420 memeq(&sa->mark, &other_sa->mark, sizeof(mark_t)) &&
421 memeq(&sa->cfg, &other_sa->cfg, sizeof(ipsec_sa_cfg_t));
422 }
423
424 /**
425 * Allocate or reference an IPsec SA object
426 */
427 static ipsec_sa_t *ipsec_sa_create(private_kernel_netlink_ipsec_t *this,
428 host_t *src, host_t *dst, mark_t mark,
429 ipsec_sa_cfg_t *cfg)
430 {
431 ipsec_sa_t *sa, *found;
432 INIT(sa,
433 .src = src,
434 .dst = dst,
435 .mark = mark,
436 .cfg = *cfg,
437 );
438 found = this->sas->get(this->sas, sa);
439 if (!found)
440 {
441 sa->src = src->clone(src);
442 sa->dst = dst->clone(dst);
443 this->sas->put(this->sas, sa, sa);
444 }
445 else
446 {
447 free(sa);
448 sa = found;
449 }
450 ref_get(&sa->refcount);
451 return sa;
452 }
453
454 /**
455 * Release and destroy an IPsec SA object
456 */
457 static void ipsec_sa_destroy(private_kernel_netlink_ipsec_t *this,
458 ipsec_sa_t *sa)
459 {
460 if (ref_put(&sa->refcount))
461 {
462 this->sas->remove(this->sas, sa);
463 DESTROY_IF(sa->src);
464 DESTROY_IF(sa->dst);
465 free(sa);
466 }
467 }
468
469 typedef struct policy_sa_t policy_sa_t;
470 typedef struct policy_sa_fwd_t policy_sa_fwd_t;
471
472 /**
473 * Mapping between a policy and an IPsec SA.
474 */
475 struct policy_sa_t {
476 /** Priority assigned to the policy when installed with this SA */
477 u_int32_t priority;
478
479 /** Type of the policy */
480 policy_type_t type;
481
482 /** Assigned SA */
483 ipsec_sa_t *sa;
484 };
485
486 /**
487 * For forward policies we also cache the traffic selectors in order to install
488 * the route.
489 */
490 struct policy_sa_fwd_t {
491 /** Generic interface */
492 policy_sa_t generic;
493
494 /** Source traffic selector of this policy */
495 traffic_selector_t *src_ts;
496
497 /** Destination traffic selector of this policy */
498 traffic_selector_t *dst_ts;
499 };
500
501 /**
502 * Create a policy_sa(_fwd)_t object
503 */
504 static policy_sa_t *policy_sa_create(private_kernel_netlink_ipsec_t *this,
505 policy_dir_t dir, policy_type_t type, host_t *src, host_t *dst,
506 traffic_selector_t *src_ts, traffic_selector_t *dst_ts, mark_t mark,
507 ipsec_sa_cfg_t *cfg)
508 {
509 policy_sa_t *policy;
510
511 if (dir == POLICY_FWD)
512 {
513 policy_sa_fwd_t *fwd;
514 INIT(fwd,
515 .src_ts = src_ts->clone(src_ts),
516 .dst_ts = dst_ts->clone(dst_ts),
517 );
518 policy = &fwd->generic;
519 }
520 else
521 {
522 INIT(policy, .priority = 0);
523 }
524 policy->type = type;
525 policy->sa = ipsec_sa_create(this, src, dst, mark, cfg);
526 return policy;
527 }
528
529 /**
530 * Destroy a policy_sa(_fwd)_t object
531 */
532 static void policy_sa_destroy(policy_sa_t *policy, policy_dir_t *dir,
533 private_kernel_netlink_ipsec_t *this)
534 {
535 if (*dir == POLICY_FWD)
536 {
537 policy_sa_fwd_t *fwd = (policy_sa_fwd_t*)policy;
538 fwd->src_ts->destroy(fwd->src_ts);
539 fwd->dst_ts->destroy(fwd->dst_ts);
540 }
541 ipsec_sa_destroy(this, policy->sa);
542 free(policy);
543 }
544
545 typedef struct policy_entry_t policy_entry_t;
546
547 /**
548 * Installed kernel policy.
549 */
550 struct policy_entry_t {
551
552 /** Direction of this policy: in, out, forward */
553 u_int8_t direction;
554
555 /** Parameters of installed policy */
556 struct xfrm_selector sel;
557
558 /** Optional mark */
559 u_int32_t mark;
560
561 /** Associated route installed for this policy */
562 route_entry_t *route;
563
564 /** List of SAs this policy is used by, ordered by priority */
565 linked_list_t *used_by;
566 };
567
568 /**
569 * Destroy a policy_entry_t object
570 */
571 static void policy_entry_destroy(private_kernel_netlink_ipsec_t *this,
572 policy_entry_t *policy)
573 {
574 if (policy->route)
575 {
576 route_entry_destroy(policy->route);
577 }
578 if (policy->used_by)
579 {
580 policy->used_by->invoke_function(policy->used_by,
581 (linked_list_invoke_t)policy_sa_destroy,
582 &policy->direction, this);
583 policy->used_by->destroy(policy->used_by);
584 }
585 free(policy);
586 }
587
588 /**
589 * Hash function for policy_entry_t objects
590 */
591 static u_int policy_hash(policy_entry_t *key)
592 {
593 chunk_t chunk = chunk_from_thing(key->sel);
594 return chunk_hash_inc(chunk, chunk_hash(chunk_from_thing(key->mark)));
595 }
596
597 /**
598 * Equality function for policy_entry_t objects
599 */
600 static bool policy_equals(policy_entry_t *key, policy_entry_t *other_key)
601 {
602 return memeq(&key->sel, &other_key->sel, sizeof(struct xfrm_selector)) &&
603 key->mark == other_key->mark &&
604 key->direction == other_key->direction;
605 }
606
607 /**
608 * Calculate the priority of a policy
609 */
610 static inline u_int32_t get_priority(policy_entry_t *policy,
611 policy_priority_t prio)
612 {
613 u_int32_t priority = PRIO_BASE;
614 switch (prio)
615 {
616 case POLICY_PRIORITY_FALLBACK:
617 priority <<= 1;
618 /* fall-through */
619 case POLICY_PRIORITY_ROUTED:
620 priority <<= 1;
621 /* fall-through */
622 case POLICY_PRIORITY_DEFAULT:
623 break;
624 }
625 /* calculate priority based on selector size, small size = high prio */
626 priority -= policy->sel.prefixlen_s;
627 priority -= policy->sel.prefixlen_d;
628 priority <<= 2; /* make some room for the two flags */
629 priority += policy->sel.sport_mask || policy->sel.dport_mask ? 0 : 2;
630 priority += policy->sel.proto ? 0 : 1;
631 return priority;
632 }
633
634 /**
635 * Convert the general ipsec mode to the one defined in xfrm.h
636 */
637 static u_int8_t mode2kernel(ipsec_mode_t mode)
638 {
639 switch (mode)
640 {
641 case MODE_TRANSPORT:
642 return XFRM_MODE_TRANSPORT;
643 case MODE_TUNNEL:
644 return XFRM_MODE_TUNNEL;
645 case MODE_BEET:
646 return XFRM_MODE_BEET;
647 default:
648 return mode;
649 }
650 }
651
652 /**
653 * Convert a host_t to a struct xfrm_address
654 */
655 static void host2xfrm(host_t *host, xfrm_address_t *xfrm)
656 {
657 chunk_t chunk = host->get_address(host);
658 memcpy(xfrm, chunk.ptr, min(chunk.len, sizeof(xfrm_address_t)));
659 }
660
661 /**
662 * Convert a struct xfrm_address to a host_t
663 */
664 static host_t* xfrm2host(int family, xfrm_address_t *xfrm, u_int16_t port)
665 {
666 chunk_t chunk;
667
668 switch (family)
669 {
670 case AF_INET:
671 chunk = chunk_create((u_char*)&xfrm->a4, sizeof(xfrm->a4));
672 break;
673 case AF_INET6:
674 chunk = chunk_create((u_char*)&xfrm->a6, sizeof(xfrm->a6));
675 break;
676 default:
677 return NULL;
678 }
679 return host_create_from_chunk(family, chunk, ntohs(port));
680 }
681
682 /**
683 * Convert a traffic selector address range to subnet and its mask.
684 */
685 static void ts2subnet(traffic_selector_t* ts,
686 xfrm_address_t *net, u_int8_t *mask)
687 {
688 host_t *net_host;
689 chunk_t net_chunk;
690
691 ts->to_subnet(ts, &net_host, mask);
692 net_chunk = net_host->get_address(net_host);
693 memcpy(net, net_chunk.ptr, net_chunk.len);
694 net_host->destroy(net_host);
695 }
696
697 /**
698 * Convert a traffic selector port range to port/portmask
699 */
700 static void ts2ports(traffic_selector_t* ts,
701 u_int16_t *port, u_int16_t *mask)
702 {
703 /* Linux does not seem to accept complex portmasks. Only
704 * any or a specific port is allowed. We set to any, if we have
705 * a port range, or to a specific, if we have one port only.
706 */
707 u_int16_t from, to;
708
709 from = ts->get_from_port(ts);
710 to = ts->get_to_port(ts);
711
712 if (from == to)
713 {
714 *port = htons(from);
715 *mask = ~0;
716 }
717 else
718 {
719 *port = 0;
720 *mask = 0;
721 }
722 }
723
724 /**
725 * Convert a pair of traffic_selectors to an xfrm_selector
726 */
727 static struct xfrm_selector ts2selector(traffic_selector_t *src,
728 traffic_selector_t *dst)
729 {
730 struct xfrm_selector sel;
731
732 memset(&sel, 0, sizeof(sel));
733 sel.family = (src->get_type(src) == TS_IPV4_ADDR_RANGE) ? AF_INET : AF_INET6;
734 /* src or dest proto may be "any" (0), use more restrictive one */
735 sel.proto = max(src->get_protocol(src), dst->get_protocol(dst));
736 ts2subnet(dst, &sel.daddr, &sel.prefixlen_d);
737 ts2subnet(src, &sel.saddr, &sel.prefixlen_s);
738 ts2ports(dst, &sel.dport, &sel.dport_mask);
739 ts2ports(src, &sel.sport, &sel.sport_mask);
740 sel.ifindex = 0;
741 sel.user = 0;
742
743 return sel;
744 }
745
746 /**
747 * Convert an xfrm_selector to a src|dst traffic_selector
748 */
749 static traffic_selector_t* selector2ts(struct xfrm_selector *sel, bool src)
750 {
751 u_char *addr;
752 u_int8_t prefixlen;
753 u_int16_t port = 0;
754 host_t *host = NULL;
755
756 if (src)
757 {
758 addr = (u_char*)&sel->saddr;
759 prefixlen = sel->prefixlen_s;
760 if (sel->sport_mask)
761 {
762 port = htons(sel->sport);
763 }
764 }
765 else
766 {
767 addr = (u_char*)&sel->daddr;
768 prefixlen = sel->prefixlen_d;
769 if (sel->dport_mask)
770 {
771 port = htons(sel->dport);
772 }
773 }
774
775 /* The Linux 2.6 kernel does not set the selector's family field,
776 * so as a kludge we additionally test the prefix length.
777 */
778 if (sel->family == AF_INET || sel->prefixlen_s == 32)
779 {
780 host = host_create_from_chunk(AF_INET, chunk_create(addr, 4), 0);
781 }
782 else if (sel->family == AF_INET6 || sel->prefixlen_s == 128)
783 {
784 host = host_create_from_chunk(AF_INET6, chunk_create(addr, 16), 0);
785 }
786
787 if (host)
788 {
789 return traffic_selector_create_from_subnet(host, prefixlen,
790 sel->proto, port, port ?: 65535);
791 }
792 return NULL;
793 }
794
795 /**
796 * Process a XFRM_MSG_ACQUIRE from kernel
797 */
798 static void process_acquire(private_kernel_netlink_ipsec_t *this,
799 struct nlmsghdr *hdr)
800 {
801 struct xfrm_user_acquire *acquire;
802 struct rtattr *rta;
803 size_t rtasize;
804 traffic_selector_t *src_ts, *dst_ts;
805 u_int32_t reqid = 0;
806 int proto = 0;
807
808 acquire = (struct xfrm_user_acquire*)NLMSG_DATA(hdr);
809 rta = XFRM_RTA(hdr, struct xfrm_user_acquire);
810 rtasize = XFRM_PAYLOAD(hdr, struct xfrm_user_acquire);
811
812 DBG2(DBG_KNL, "received a XFRM_MSG_ACQUIRE");
813
814 while (RTA_OK(rta, rtasize))
815 {
816 DBG2(DBG_KNL, " %N", xfrm_attr_type_names, rta->rta_type);
817
818 if (rta->rta_type == XFRMA_TMPL)
819 {
820 struct xfrm_user_tmpl* tmpl;
821 tmpl = (struct xfrm_user_tmpl*)RTA_DATA(rta);
822 reqid = tmpl->reqid;
823 proto = tmpl->id.proto;
824 }
825 rta = RTA_NEXT(rta, rtasize);
826 }
827 switch (proto)
828 {
829 case 0:
830 case IPPROTO_ESP:
831 case IPPROTO_AH:
832 break;
833 default:
834 /* acquire for AH/ESP only, not for IPCOMP */
835 return;
836 }
837 src_ts = selector2ts(&acquire->sel, TRUE);
838 dst_ts = selector2ts(&acquire->sel, FALSE);
839
840 hydra->kernel_interface->acquire(hydra->kernel_interface, reqid, src_ts,
841 dst_ts);
842 }
843
844 /**
845 * Process a XFRM_MSG_EXPIRE from kernel
846 */
847 static void process_expire(private_kernel_netlink_ipsec_t *this,
848 struct nlmsghdr *hdr)
849 {
850 struct xfrm_user_expire *expire;
851 u_int32_t spi, reqid;
852 u_int8_t protocol;
853
854 expire = (struct xfrm_user_expire*)NLMSG_DATA(hdr);
855 protocol = expire->state.id.proto;
856 spi = expire->state.id.spi;
857 reqid = expire->state.reqid;
858
859 DBG2(DBG_KNL, "received a XFRM_MSG_EXPIRE");
860
861 if (protocol != IPPROTO_ESP && protocol != IPPROTO_AH)
862 {
863 DBG2(DBG_KNL, "ignoring XFRM_MSG_EXPIRE for SA with SPI %.8x and "
864 "reqid {%u} which is not a CHILD_SA", ntohl(spi), reqid);
865 return;
866 }
867
868 hydra->kernel_interface->expire(hydra->kernel_interface, reqid, protocol,
869 spi, expire->hard != 0);
870 }
871
872 /**
873 * Process a XFRM_MSG_MIGRATE from kernel
874 */
875 static void process_migrate(private_kernel_netlink_ipsec_t *this,
876 struct nlmsghdr *hdr)
877 {
878 struct xfrm_userpolicy_id *policy_id;
879 struct rtattr *rta;
880 size_t rtasize;
881 traffic_selector_t *src_ts, *dst_ts;
882 host_t *local = NULL, *remote = NULL;
883 host_t *old_src = NULL, *old_dst = NULL;
884 host_t *new_src = NULL, *new_dst = NULL;
885 u_int32_t reqid = 0;
886 policy_dir_t dir;
887
888 policy_id = (struct xfrm_userpolicy_id*)NLMSG_DATA(hdr);
889 rta = XFRM_RTA(hdr, struct xfrm_userpolicy_id);
890 rtasize = XFRM_PAYLOAD(hdr, struct xfrm_userpolicy_id);
891
892 DBG2(DBG_KNL, "received a XFRM_MSG_MIGRATE");
893
894 src_ts = selector2ts(&policy_id->sel, TRUE);
895 dst_ts = selector2ts(&policy_id->sel, FALSE);
896 dir = (policy_dir_t)policy_id->dir;
897
898 DBG2(DBG_KNL, " policy: %R === %R %N", src_ts, dst_ts, policy_dir_names);
899
900 while (RTA_OK(rta, rtasize))
901 {
902 DBG2(DBG_KNL, " %N", xfrm_attr_type_names, rta->rta_type);
903 if (rta->rta_type == XFRMA_KMADDRESS)
904 {
905 struct xfrm_user_kmaddress *kmaddress;
906
907 kmaddress = (struct xfrm_user_kmaddress*)RTA_DATA(rta);
908 local = xfrm2host(kmaddress->family, &kmaddress->local, 0);
909 remote = xfrm2host(kmaddress->family, &kmaddress->remote, 0);
910 DBG2(DBG_KNL, " kmaddress: %H...%H", local, remote);
911 }
912 else if (rta->rta_type == XFRMA_MIGRATE)
913 {
914 struct xfrm_user_migrate *migrate;
915
916 migrate = (struct xfrm_user_migrate*)RTA_DATA(rta);
917 old_src = xfrm2host(migrate->old_family, &migrate->old_saddr, 0);
918 old_dst = xfrm2host(migrate->old_family, &migrate->old_daddr, 0);
919 new_src = xfrm2host(migrate->new_family, &migrate->new_saddr, 0);
920 new_dst = xfrm2host(migrate->new_family, &migrate->new_daddr, 0);
921 reqid = migrate->reqid;
922 DBG2(DBG_KNL, " migrate %H...%H to %H...%H, reqid {%u}",
923 old_src, old_dst, new_src, new_dst, reqid);
924 DESTROY_IF(old_src);
925 DESTROY_IF(old_dst);
926 DESTROY_IF(new_src);
927 DESTROY_IF(new_dst);
928 }
929 rta = RTA_NEXT(rta, rtasize);
930 }
931
932 if (src_ts && dst_ts && local && remote)
933 {
934 hydra->kernel_interface->migrate(hydra->kernel_interface, reqid,
935 src_ts, dst_ts, dir, local, remote);
936 }
937 else
938 {
939 DESTROY_IF(src_ts);
940 DESTROY_IF(dst_ts);
941 DESTROY_IF(local);
942 DESTROY_IF(remote);
943 }
944 }
945
946 /**
947 * Process a XFRM_MSG_MAPPING from kernel
948 */
949 static void process_mapping(private_kernel_netlink_ipsec_t *this,
950 struct nlmsghdr *hdr)
951 {
952 struct xfrm_user_mapping *mapping;
953 u_int32_t spi, reqid;
954
955 mapping = (struct xfrm_user_mapping*)NLMSG_DATA(hdr);
956 spi = mapping->id.spi;
957 reqid = mapping->reqid;
958
959 DBG2(DBG_KNL, "received a XFRM_MSG_MAPPING");
960
961 if (mapping->id.proto == IPPROTO_ESP)
962 {
963 host_t *host;
964 host = xfrm2host(mapping->id.family, &mapping->new_saddr,
965 mapping->new_sport);
966 if (host)
967 {
968 hydra->kernel_interface->mapping(hydra->kernel_interface, reqid,
969 spi, host);
970 }
971 }
972 }
973
974 /**
975 * Receives events from kernel
976 */
977 static job_requeue_t receive_events(private_kernel_netlink_ipsec_t *this)
978 {
979 char response[1024];
980 struct nlmsghdr *hdr = (struct nlmsghdr*)response;
981 struct sockaddr_nl addr;
982 socklen_t addr_len = sizeof(addr);
983 int len;
984 bool oldstate;
985
986 oldstate = thread_cancelability(TRUE);
987 len = recvfrom(this->socket_xfrm_events, response, sizeof(response), 0,
988 (struct sockaddr*)&addr, &addr_len);
989 thread_cancelability(oldstate);
990
991 if (len < 0)
992 {
993 switch (errno)
994 {
995 case EINTR:
996 /* interrupted, try again */
997 return JOB_REQUEUE_DIRECT;
998 case EAGAIN:
999 /* no data ready, select again */
1000 return JOB_REQUEUE_DIRECT;
1001 default:
1002 DBG1(DBG_KNL, "unable to receive from xfrm event socket");
1003 sleep(1);
1004 return JOB_REQUEUE_FAIR;
1005 }
1006 }
1007
1008 if (addr.nl_pid != 0)
1009 { /* not from kernel. not interested, try another one */
1010 return JOB_REQUEUE_DIRECT;
1011 }
1012
1013 while (NLMSG_OK(hdr, len))
1014 {
1015 switch (hdr->nlmsg_type)
1016 {
1017 case XFRM_MSG_ACQUIRE:
1018 process_acquire(this, hdr);
1019 break;
1020 case XFRM_MSG_EXPIRE:
1021 process_expire(this, hdr);
1022 break;
1023 case XFRM_MSG_MIGRATE:
1024 process_migrate(this, hdr);
1025 break;
1026 case XFRM_MSG_MAPPING:
1027 process_mapping(this, hdr);
1028 break;
1029 default:
1030 DBG1(DBG_KNL, "received unknown event from xfrm event "
1031 "socket: %d", hdr->nlmsg_type);
1032 break;
1033 }
1034 hdr = NLMSG_NEXT(hdr, len);
1035 }
1036 return JOB_REQUEUE_DIRECT;
1037 }
1038
1039 METHOD(kernel_ipsec_t, get_features, kernel_feature_t,
1040 private_kernel_netlink_ipsec_t *this)
1041 {
1042 return KERNEL_ESP_V3_TFC;
1043 }
1044
1045 /**
1046 * Get an SPI for a specific protocol from the kernel.
1047 */
1048 static status_t get_spi_internal(private_kernel_netlink_ipsec_t *this,
1049 host_t *src, host_t *dst, u_int8_t proto, u_int32_t min, u_int32_t max,
1050 u_int32_t reqid, u_int32_t *spi)
1051 {
1052 netlink_buf_t request;
1053 struct nlmsghdr *hdr, *out;
1054 struct xfrm_userspi_info *userspi;
1055 u_int32_t received_spi = 0;
1056 size_t len;
1057
1058 memset(&request, 0, sizeof(request));
1059
1060 hdr = (struct nlmsghdr*)request;
1061 hdr->nlmsg_flags = NLM_F_REQUEST;
1062 hdr->nlmsg_type = XFRM_MSG_ALLOCSPI;
1063 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_userspi_info));
1064
1065 userspi = (struct xfrm_userspi_info*)NLMSG_DATA(hdr);
1066 host2xfrm(src, &userspi->info.saddr);
1067 host2xfrm(dst, &userspi->info.id.daddr);
1068 userspi->info.id.proto = proto;
1069 userspi->info.mode = XFRM_MODE_TUNNEL;
1070 userspi->info.reqid = reqid;
1071 userspi->info.family = src->get_family(src);
1072 userspi->min = min;
1073 userspi->max = max;
1074
1075 if (this->socket_xfrm->send(this->socket_xfrm, hdr, &out, &len) == SUCCESS)
1076 {
1077 hdr = out;
1078 while (NLMSG_OK(hdr, len))
1079 {
1080 switch (hdr->nlmsg_type)
1081 {
1082 case XFRM_MSG_NEWSA:
1083 {
1084 struct xfrm_usersa_info* usersa = NLMSG_DATA(hdr);
1085 received_spi = usersa->id.spi;
1086 break;
1087 }
1088 case NLMSG_ERROR:
1089 {
1090 struct nlmsgerr *err = NLMSG_DATA(hdr);
1091 DBG1(DBG_KNL, "allocating SPI failed: %s (%d)",
1092 strerror(-err->error), -err->error);
1093 break;
1094 }
1095 default:
1096 hdr = NLMSG_NEXT(hdr, len);
1097 continue;
1098 case NLMSG_DONE:
1099 break;
1100 }
1101 break;
1102 }
1103 free(out);
1104 }
1105
1106 if (received_spi == 0)
1107 {
1108 return FAILED;
1109 }
1110
1111 *spi = received_spi;
1112 return SUCCESS;
1113 }
1114
1115 METHOD(kernel_ipsec_t, get_spi, status_t,
1116 private_kernel_netlink_ipsec_t *this, host_t *src, host_t *dst,
1117 u_int8_t protocol, u_int32_t reqid, u_int32_t *spi)
1118 {
1119 DBG2(DBG_KNL, "getting SPI for reqid {%u}", reqid);
1120
1121 if (get_spi_internal(this, src, dst, protocol,
1122 0xc0000000, 0xcFFFFFFF, reqid, spi) != SUCCESS)
1123 {
1124 DBG1(DBG_KNL, "unable to get SPI for reqid {%u}", reqid);
1125 return FAILED;
1126 }
1127
1128 DBG2(DBG_KNL, "got SPI %.8x for reqid {%u}", ntohl(*spi), reqid);
1129 return SUCCESS;
1130 }
1131
1132 METHOD(kernel_ipsec_t, get_cpi, status_t,
1133 private_kernel_netlink_ipsec_t *this, host_t *src, host_t *dst,
1134 u_int32_t reqid, u_int16_t *cpi)
1135 {
1136 u_int32_t received_spi = 0;
1137
1138 DBG2(DBG_KNL, "getting CPI for reqid {%u}", reqid);
1139
1140 if (get_spi_internal(this, src, dst, IPPROTO_COMP,
1141 0x100, 0xEFFF, reqid, &received_spi) != SUCCESS)
1142 {
1143 DBG1(DBG_KNL, "unable to get CPI for reqid {%u}", reqid);
1144 return FAILED;
1145 }
1146
1147 *cpi = htons((u_int16_t)ntohl(received_spi));
1148
1149 DBG2(DBG_KNL, "got CPI %.4x for reqid {%u}", ntohs(*cpi), reqid);
1150 return SUCCESS;
1151 }
1152
1153 METHOD(kernel_ipsec_t, add_sa, status_t,
1154 private_kernel_netlink_ipsec_t *this, host_t *src, host_t *dst,
1155 u_int32_t spi, u_int8_t protocol, u_int32_t reqid, mark_t mark,
1156 u_int32_t tfc, lifetime_cfg_t *lifetime, u_int16_t enc_alg, chunk_t enc_key,
1157 u_int16_t int_alg, chunk_t int_key, ipsec_mode_t mode, u_int16_t ipcomp,
1158 u_int16_t cpi, bool encap, bool esn, bool inbound,
1159 traffic_selector_t* src_ts, traffic_selector_t* dst_ts)
1160 {
1161 netlink_buf_t request;
1162 char *alg_name;
1163 struct nlmsghdr *hdr;
1164 struct xfrm_usersa_info *sa;
1165 u_int16_t icv_size = 64;
1166 status_t status = FAILED;
1167
1168 /* if IPComp is used, we install an additional IPComp SA. if the cpi is 0
1169 * we are in the recursive call below */
1170 if (ipcomp != IPCOMP_NONE && cpi != 0)
1171 {
1172 lifetime_cfg_t lft = {{0,0,0},{0,0,0},{0,0,0}};
1173 add_sa(this, src, dst, htonl(ntohs(cpi)), IPPROTO_COMP, reqid, mark,
1174 tfc, &lft, ENCR_UNDEFINED, chunk_empty, AUTH_UNDEFINED,
1175 chunk_empty, mode, ipcomp, 0, FALSE, FALSE, inbound, NULL, NULL);
1176 ipcomp = IPCOMP_NONE;
1177 /* use transport mode ESP SA, IPComp uses tunnel mode */
1178 mode = MODE_TRANSPORT;
1179 }
1180
1181 memset(&request, 0, sizeof(request));
1182
1183 DBG2(DBG_KNL, "adding SAD entry with SPI %.8x and reqid {%u} (mark "
1184 "%u/0x%08x)", ntohl(spi), reqid, mark.value, mark.mask);
1185
1186 hdr = (struct nlmsghdr*)request;
1187 hdr->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
1188 hdr->nlmsg_type = inbound ? XFRM_MSG_UPDSA : XFRM_MSG_NEWSA;
1189 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_usersa_info));
1190
1191 sa = (struct xfrm_usersa_info*)NLMSG_DATA(hdr);
1192 host2xfrm(src, &sa->saddr);
1193 host2xfrm(dst, &sa->id.daddr);
1194 sa->id.spi = spi;
1195 sa->id.proto = protocol;
1196 sa->family = src->get_family(src);
1197 sa->mode = mode2kernel(mode);
1198 switch (mode)
1199 {
1200 case MODE_TUNNEL:
1201 sa->flags |= XFRM_STATE_AF_UNSPEC;
1202 break;
1203 case MODE_BEET:
1204 case MODE_TRANSPORT:
1205 if(src_ts && dst_ts)
1206 {
1207 sa->sel = ts2selector(src_ts, dst_ts);
1208 }
1209 break;
1210 default:
1211 break;
1212 }
1213
1214 sa->reqid = reqid;
1215 sa->lft.soft_byte_limit = XFRM_LIMIT(lifetime->bytes.rekey);
1216 sa->lft.hard_byte_limit = XFRM_LIMIT(lifetime->bytes.life);
1217 sa->lft.soft_packet_limit = XFRM_LIMIT(lifetime->packets.rekey);
1218 sa->lft.hard_packet_limit = XFRM_LIMIT(lifetime->packets.life);
1219 /* we use lifetimes since added, not since used */
1220 sa->lft.soft_add_expires_seconds = lifetime->time.rekey;
1221 sa->lft.hard_add_expires_seconds = lifetime->time.life;
1222 sa->lft.soft_use_expires_seconds = 0;
1223 sa->lft.hard_use_expires_seconds = 0;
1224
1225 struct rtattr *rthdr = XFRM_RTA(hdr, struct xfrm_usersa_info);
1226
1227 switch (enc_alg)
1228 {
1229 case ENCR_UNDEFINED:
1230 /* no encryption */
1231 break;
1232 case ENCR_AES_CCM_ICV16:
1233 case ENCR_AES_GCM_ICV16:
1234 case ENCR_NULL_AUTH_AES_GMAC:
1235 case ENCR_CAMELLIA_CCM_ICV16:
1236 icv_size += 32;
1237 /* FALL */
1238 case ENCR_AES_CCM_ICV12:
1239 case ENCR_AES_GCM_ICV12:
1240 case ENCR_CAMELLIA_CCM_ICV12:
1241 icv_size += 32;
1242 /* FALL */
1243 case ENCR_AES_CCM_ICV8:
1244 case ENCR_AES_GCM_ICV8:
1245 case ENCR_CAMELLIA_CCM_ICV8:
1246 {
1247 struct xfrm_algo_aead *algo;
1248
1249 alg_name = lookup_algorithm(ENCRYPTION_ALGORITHM, enc_alg);
1250 if (alg_name == NULL)
1251 {
1252 DBG1(DBG_KNL, "algorithm %N not supported by kernel!",
1253 encryption_algorithm_names, enc_alg);
1254 goto failed;
1255 }
1256 DBG2(DBG_KNL, " using encryption algorithm %N with key size %d",
1257 encryption_algorithm_names, enc_alg, enc_key.len * 8);
1258
1259 rthdr->rta_type = XFRMA_ALG_AEAD;
1260 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_algo_aead) +
1261 enc_key.len);
1262 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1263 if (hdr->nlmsg_len > sizeof(request))
1264 {
1265 goto failed;
1266 }
1267
1268 algo = (struct xfrm_algo_aead*)RTA_DATA(rthdr);
1269 algo->alg_key_len = enc_key.len * 8;
1270 algo->alg_icv_len = icv_size;
1271 strncpy(algo->alg_name, alg_name, sizeof(algo->alg_name));
1272 algo->alg_name[sizeof(algo->alg_name) - 1] = '\0';
1273 memcpy(algo->alg_key, enc_key.ptr, enc_key.len);
1274
1275 rthdr = XFRM_RTA_NEXT(rthdr);
1276 break;
1277 }
1278 default:
1279 {
1280 struct xfrm_algo *algo;
1281
1282 alg_name = lookup_algorithm(ENCRYPTION_ALGORITHM, enc_alg);
1283 if (alg_name == NULL)
1284 {
1285 DBG1(DBG_KNL, "algorithm %N not supported by kernel!",
1286 encryption_algorithm_names, enc_alg);
1287 goto failed;
1288 }
1289 DBG2(DBG_KNL, " using encryption algorithm %N with key size %d",
1290 encryption_algorithm_names, enc_alg, enc_key.len * 8);
1291
1292 rthdr->rta_type = XFRMA_ALG_CRYPT;
1293 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_algo) + enc_key.len);
1294 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1295 if (hdr->nlmsg_len > sizeof(request))
1296 {
1297 goto failed;
1298 }
1299
1300 algo = (struct xfrm_algo*)RTA_DATA(rthdr);
1301 algo->alg_key_len = enc_key.len * 8;
1302 strncpy(algo->alg_name, alg_name, sizeof(algo->alg_name));
1303 algo->alg_name[sizeof(algo->alg_name) - 1] = '\0';
1304 memcpy(algo->alg_key, enc_key.ptr, enc_key.len);
1305
1306 rthdr = XFRM_RTA_NEXT(rthdr);
1307 }
1308 }
1309
1310 if (int_alg != AUTH_UNDEFINED)
1311 {
1312 u_int trunc_len = 0;
1313
1314 alg_name = lookup_algorithm(INTEGRITY_ALGORITHM, int_alg);
1315 if (alg_name == NULL)
1316 {
1317 DBG1(DBG_KNL, "algorithm %N not supported by kernel!",
1318 integrity_algorithm_names, int_alg);
1319 goto failed;
1320 }
1321 DBG2(DBG_KNL, " using integrity algorithm %N with key size %d",
1322 integrity_algorithm_names, int_alg, int_key.len * 8);
1323
1324 switch (int_alg)
1325 {
1326 case AUTH_HMAC_MD5_128:
1327 case AUTH_HMAC_SHA2_256_128:
1328 trunc_len = 128;
1329 break;
1330 case AUTH_HMAC_SHA1_160:
1331 trunc_len = 160;
1332 break;
1333 default:
1334 break;
1335 }
1336
1337 if (trunc_len)
1338 {
1339 struct xfrm_algo_auth* algo;
1340
1341 /* the kernel uses SHA256 with 96 bit truncation by default,
1342 * use specified truncation size supported by newer kernels.
1343 * also use this for untruncated MD5 and SHA1. */
1344 rthdr->rta_type = XFRMA_ALG_AUTH_TRUNC;
1345 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_algo_auth) +
1346 int_key.len);
1347
1348 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1349 if (hdr->nlmsg_len > sizeof(request))
1350 {
1351 goto failed;
1352 }
1353
1354 algo = (struct xfrm_algo_auth*)RTA_DATA(rthdr);
1355 algo->alg_key_len = int_key.len * 8;
1356 algo->alg_trunc_len = trunc_len;
1357 strncpy(algo->alg_name, alg_name, sizeof(algo->alg_name));
1358 algo->alg_name[sizeof(algo->alg_name) - 1] = '\0';
1359 memcpy(algo->alg_key, int_key.ptr, int_key.len);
1360 }
1361 else
1362 {
1363 struct xfrm_algo* algo;
1364
1365 rthdr->rta_type = XFRMA_ALG_AUTH;
1366 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_algo) + int_key.len);
1367
1368 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1369 if (hdr->nlmsg_len > sizeof(request))
1370 {
1371 goto failed;
1372 }
1373
1374 algo = (struct xfrm_algo*)RTA_DATA(rthdr);
1375 algo->alg_key_len = int_key.len * 8;
1376 strncpy(algo->alg_name, alg_name, sizeof(algo->alg_name));
1377 algo->alg_name[sizeof(algo->alg_name) - 1] = '\0';
1378 memcpy(algo->alg_key, int_key.ptr, int_key.len);
1379 }
1380 rthdr = XFRM_RTA_NEXT(rthdr);
1381 }
1382
1383 if (ipcomp != IPCOMP_NONE)
1384 {
1385 rthdr->rta_type = XFRMA_ALG_COMP;
1386 alg_name = lookup_algorithm(COMPRESSION_ALGORITHM, ipcomp);
1387 if (alg_name == NULL)
1388 {
1389 DBG1(DBG_KNL, "algorithm %N not supported by kernel!",
1390 ipcomp_transform_names, ipcomp);
1391 goto failed;
1392 }
1393 DBG2(DBG_KNL, " using compression algorithm %N",
1394 ipcomp_transform_names, ipcomp);
1395
1396 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_algo));
1397 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1398 if (hdr->nlmsg_len > sizeof(request))
1399 {
1400 goto failed;
1401 }
1402
1403 struct xfrm_algo* algo = (struct xfrm_algo*)RTA_DATA(rthdr);
1404 algo->alg_key_len = 0;
1405 strncpy(algo->alg_name, alg_name, sizeof(algo->alg_name));
1406 algo->alg_name[sizeof(algo->alg_name) - 1] = '\0';
1407
1408 rthdr = XFRM_RTA_NEXT(rthdr);
1409 }
1410
1411 if (encap)
1412 {
1413 struct xfrm_encap_tmpl *tmpl;
1414
1415 rthdr->rta_type = XFRMA_ENCAP;
1416 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_encap_tmpl));
1417
1418 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1419 if (hdr->nlmsg_len > sizeof(request))
1420 {
1421 goto failed;
1422 }
1423
1424 tmpl = (struct xfrm_encap_tmpl*)RTA_DATA(rthdr);
1425 tmpl->encap_type = UDP_ENCAP_ESPINUDP;
1426 tmpl->encap_sport = htons(src->get_port(src));
1427 tmpl->encap_dport = htons(dst->get_port(dst));
1428 memset(&tmpl->encap_oa, 0, sizeof (xfrm_address_t));
1429 /* encap_oa could probably be derived from the
1430 * traffic selectors [rfc4306, p39]. In the netlink kernel
1431 * implementation pluto does the same as we do here but it uses
1432 * encap_oa in the pfkey implementation.
1433 * BUT as /usr/src/linux/net/key/af_key.c indicates the kernel ignores
1434 * it anyway
1435 * -> does that mean that NAT-T encap doesn't work in transport mode?
1436 * No. The reason the kernel ignores NAT-OA is that it recomputes
1437 * (or, rather, just ignores) the checksum. If packets pass the IPsec
1438 * checks it marks them "checksum ok" so OA isn't needed. */
1439 rthdr = XFRM_RTA_NEXT(rthdr);
1440 }
1441
1442 if (mark.value)
1443 {
1444 struct xfrm_mark *mrk;
1445
1446 rthdr->rta_type = XFRMA_MARK;
1447 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_mark));
1448
1449 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1450 if (hdr->nlmsg_len > sizeof(request))
1451 {
1452 goto failed;
1453 }
1454
1455 mrk = (struct xfrm_mark*)RTA_DATA(rthdr);
1456 mrk->v = mark.value;
1457 mrk->m = mark.mask;
1458 rthdr = XFRM_RTA_NEXT(rthdr);
1459 }
1460
1461 if (tfc)
1462 {
1463 u_int32_t *tfcpad;
1464
1465 rthdr->rta_type = XFRMA_TFCPAD;
1466 rthdr->rta_len = RTA_LENGTH(sizeof(u_int32_t));
1467
1468 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1469 if (hdr->nlmsg_len > sizeof(request))
1470 {
1471 goto failed;
1472 }
1473
1474 tfcpad = (u_int32_t*)RTA_DATA(rthdr);
1475 *tfcpad = tfc;
1476 rthdr = XFRM_RTA_NEXT(rthdr);
1477 }
1478
1479 if (protocol != IPPROTO_COMP)
1480 {
1481 if (esn || this->replay_window > DEFAULT_REPLAY_WINDOW)
1482 {
1483 /* for ESN or larger replay windows we need the new
1484 * XFRMA_REPLAY_ESN_VAL attribute to configure a bitmap */
1485 struct xfrm_replay_state_esn *replay;
1486
1487 rthdr->rta_type = XFRMA_REPLAY_ESN_VAL;
1488 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_replay_state_esn) +
1489 (this->replay_window + 7) / 8);
1490
1491 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1492 if (hdr->nlmsg_len > sizeof(request))
1493 {
1494 goto failed;
1495 }
1496
1497 replay = (struct xfrm_replay_state_esn*)RTA_DATA(rthdr);
1498 /* bmp_len contains number uf __u32's */
1499 replay->bmp_len = this->replay_bmp;
1500 replay->replay_window = this->replay_window;
1501 DBG2(DBG_KNL, " using replay window of %u packets",
1502 this->replay_window);
1503
1504 rthdr = XFRM_RTA_NEXT(rthdr);
1505 if (esn)
1506 {
1507 DBG2(DBG_KNL, " using extended sequence numbers (ESN)");
1508 sa->flags |= XFRM_STATE_ESN;
1509 }
1510 }
1511 else
1512 {
1513 DBG2(DBG_KNL, " using replay window of %u packets",
1514 this->replay_window);
1515 sa->replay_window = this->replay_window;
1516 }
1517 }
1518
1519 if (this->socket_xfrm->send_ack(this->socket_xfrm, hdr) != SUCCESS)
1520 {
1521 if (mark.value)
1522 {
1523 DBG1(DBG_KNL, "unable to add SAD entry with SPI %.8x "
1524 "(mark %u/0x%08x)", ntohl(spi), mark.value, mark.mask);
1525 }
1526 else
1527 {
1528 DBG1(DBG_KNL, "unable to add SAD entry with SPI %.8x", ntohl(spi));
1529 }
1530 goto failed;
1531 }
1532
1533 status = SUCCESS;
1534
1535 failed:
1536 memwipe(request, sizeof(request));
1537 return status;
1538 }
1539
1540 /**
1541 * Get the ESN replay state (i.e. sequence numbers) of an SA.
1542 *
1543 * Allocates into one the replay state structure we get from the kernel.
1544 */
1545 static void get_replay_state(private_kernel_netlink_ipsec_t *this,
1546 u_int32_t spi, u_int8_t protocol,
1547 host_t *dst, mark_t mark,
1548 struct xfrm_replay_state_esn **replay_esn,
1549 struct xfrm_replay_state **replay)
1550 {
1551 netlink_buf_t request;
1552 struct nlmsghdr *hdr, *out = NULL;
1553 struct xfrm_aevent_id *out_aevent = NULL, *aevent_id;
1554 size_t len;
1555 struct rtattr *rta;
1556 size_t rtasize;
1557
1558 memset(&request, 0, sizeof(request));
1559
1560 DBG2(DBG_KNL, "querying replay state from SAD entry with SPI %.8x",
1561 ntohl(spi));
1562
1563 hdr = (struct nlmsghdr*)request;
1564 hdr->nlmsg_flags = NLM_F_REQUEST;
1565 hdr->nlmsg_type = XFRM_MSG_GETAE;
1566 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_aevent_id));
1567
1568 aevent_id = (struct xfrm_aevent_id*)NLMSG_DATA(hdr);
1569 aevent_id->flags = XFRM_AE_RVAL;
1570
1571 host2xfrm(dst, &aevent_id->sa_id.daddr);
1572 aevent_id->sa_id.spi = spi;
1573 aevent_id->sa_id.proto = protocol;
1574 aevent_id->sa_id.family = dst->get_family(dst);
1575
1576 if (mark.value)
1577 {
1578 struct xfrm_mark *mrk;
1579 struct rtattr *rthdr = XFRM_RTA(hdr, struct xfrm_aevent_id);
1580
1581 rthdr->rta_type = XFRMA_MARK;
1582 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_mark));
1583 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1584 if (hdr->nlmsg_len > sizeof(request))
1585 {
1586 return;
1587 }
1588
1589 mrk = (struct xfrm_mark*)RTA_DATA(rthdr);
1590 mrk->v = mark.value;
1591 mrk->m = mark.mask;
1592 }
1593
1594 if (this->socket_xfrm->send(this->socket_xfrm, hdr, &out, &len) == SUCCESS)
1595 {
1596 hdr = out;
1597 while (NLMSG_OK(hdr, len))
1598 {
1599 switch (hdr->nlmsg_type)
1600 {
1601 case XFRM_MSG_NEWAE:
1602 {
1603 out_aevent = NLMSG_DATA(hdr);
1604 break;
1605 }
1606 case NLMSG_ERROR:
1607 {
1608 struct nlmsgerr *err = NLMSG_DATA(hdr);
1609 DBG1(DBG_KNL, "querying replay state from SAD entry "
1610 "failed: %s (%d)", strerror(-err->error),
1611 -err->error);
1612 break;
1613 }
1614 default:
1615 hdr = NLMSG_NEXT(hdr, len);
1616 continue;
1617 case NLMSG_DONE:
1618 break;
1619 }
1620 break;
1621 }
1622 }
1623
1624 if (out_aevent)
1625 {
1626 rta = XFRM_RTA(out, struct xfrm_aevent_id);
1627 rtasize = XFRM_PAYLOAD(out, struct xfrm_aevent_id);
1628 while (RTA_OK(rta, rtasize))
1629 {
1630 if (rta->rta_type == XFRMA_REPLAY_VAL &&
1631 RTA_PAYLOAD(rta) == sizeof(**replay))
1632 {
1633 *replay = malloc(RTA_PAYLOAD(rta));
1634 memcpy(*replay, RTA_DATA(rta), RTA_PAYLOAD(rta));
1635 break;
1636 }
1637 if (rta->rta_type == XFRMA_REPLAY_ESN_VAL &&
1638 RTA_PAYLOAD(rta) >= sizeof(**replay_esn) + this->replay_bmp)
1639 {
1640 *replay_esn = malloc(RTA_PAYLOAD(rta));
1641 memcpy(*replay_esn, RTA_DATA(rta), RTA_PAYLOAD(rta));
1642 break;
1643 }
1644 rta = RTA_NEXT(rta, rtasize);
1645 }
1646 }
1647 free(out);
1648 }
1649
1650 METHOD(kernel_ipsec_t, query_sa, status_t,
1651 private_kernel_netlink_ipsec_t *this, host_t *src, host_t *dst,
1652 u_int32_t spi, u_int8_t protocol, mark_t mark,
1653 u_int64_t *bytes, u_int64_t *packets)
1654 {
1655 netlink_buf_t request;
1656 struct nlmsghdr *out = NULL, *hdr;
1657 struct xfrm_usersa_id *sa_id;
1658 struct xfrm_usersa_info *sa = NULL;
1659 status_t status = FAILED;
1660 size_t len;
1661
1662 memset(&request, 0, sizeof(request));
1663
1664 DBG2(DBG_KNL, "querying SAD entry with SPI %.8x (mark %u/0x%08x)",
1665 ntohl(spi), mark.value, mark.mask);
1666
1667 hdr = (struct nlmsghdr*)request;
1668 hdr->nlmsg_flags = NLM_F_REQUEST;
1669 hdr->nlmsg_type = XFRM_MSG_GETSA;
1670 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_usersa_id));
1671
1672 sa_id = (struct xfrm_usersa_id*)NLMSG_DATA(hdr);
1673 host2xfrm(dst, &sa_id->daddr);
1674 sa_id->spi = spi;
1675 sa_id->proto = protocol;
1676 sa_id->family = dst->get_family(dst);
1677
1678 if (mark.value)
1679 {
1680 struct xfrm_mark *mrk;
1681 struct rtattr *rthdr = XFRM_RTA(hdr, struct xfrm_usersa_id);
1682
1683 rthdr->rta_type = XFRMA_MARK;
1684 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_mark));
1685 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1686 if (hdr->nlmsg_len > sizeof(request))
1687 {
1688 return FAILED;
1689 }
1690
1691 mrk = (struct xfrm_mark*)RTA_DATA(rthdr);
1692 mrk->v = mark.value;
1693 mrk->m = mark.mask;
1694 }
1695
1696 if (this->socket_xfrm->send(this->socket_xfrm, hdr, &out, &len) == SUCCESS)
1697 {
1698 hdr = out;
1699 while (NLMSG_OK(hdr, len))
1700 {
1701 switch (hdr->nlmsg_type)
1702 {
1703 case XFRM_MSG_NEWSA:
1704 {
1705 sa = (struct xfrm_usersa_info*)NLMSG_DATA(hdr);
1706 break;
1707 }
1708 case NLMSG_ERROR:
1709 {
1710 struct nlmsgerr *err = NLMSG_DATA(hdr);
1711
1712 if (mark.value)
1713 {
1714 DBG1(DBG_KNL, "querying SAD entry with SPI %.8x "
1715 "(mark %u/0x%08x) failed: %s (%d)",
1716 ntohl(spi), mark.value, mark.mask,
1717 strerror(-err->error), -err->error);
1718 }
1719 else
1720 {
1721 DBG1(DBG_KNL, "querying SAD entry with SPI %.8x "
1722 "failed: %s (%d)", ntohl(spi),
1723 strerror(-err->error), -err->error);
1724 }
1725 break;
1726 }
1727 default:
1728 hdr = NLMSG_NEXT(hdr, len);
1729 continue;
1730 case NLMSG_DONE:
1731 break;
1732 }
1733 break;
1734 }
1735 }
1736
1737 if (sa == NULL)
1738 {
1739 DBG2(DBG_KNL, "unable to query SAD entry with SPI %.8x", ntohl(spi));
1740 }
1741 else
1742 {
1743 if (bytes)
1744 {
1745 *bytes = sa->curlft.bytes;
1746 }
1747 if (packets)
1748 {
1749 *packets = sa->curlft.packets;
1750 }
1751 status = SUCCESS;
1752 }
1753 memwipe(out, len);
1754 free(out);
1755 return status;
1756 }
1757
1758 METHOD(kernel_ipsec_t, del_sa, status_t,
1759 private_kernel_netlink_ipsec_t *this, host_t *src, host_t *dst,
1760 u_int32_t spi, u_int8_t protocol, u_int16_t cpi, mark_t mark)
1761 {
1762 netlink_buf_t request;
1763 struct nlmsghdr *hdr;
1764 struct xfrm_usersa_id *sa_id;
1765
1766 /* if IPComp was used, we first delete the additional IPComp SA */
1767 if (cpi)
1768 {
1769 del_sa(this, src, dst, htonl(ntohs(cpi)), IPPROTO_COMP, 0, mark);
1770 }
1771
1772 memset(&request, 0, sizeof(request));
1773
1774 DBG2(DBG_KNL, "deleting SAD entry with SPI %.8x (mark %u/0x%08x)",
1775 ntohl(spi), mark.value, mark.mask);
1776
1777 hdr = (struct nlmsghdr*)request;
1778 hdr->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
1779 hdr->nlmsg_type = XFRM_MSG_DELSA;
1780 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_usersa_id));
1781
1782 sa_id = (struct xfrm_usersa_id*)NLMSG_DATA(hdr);
1783 host2xfrm(dst, &sa_id->daddr);
1784 sa_id->spi = spi;
1785 sa_id->proto = protocol;
1786 sa_id->family = dst->get_family(dst);
1787
1788 if (mark.value)
1789 {
1790 struct xfrm_mark *mrk;
1791 struct rtattr *rthdr = XFRM_RTA(hdr, struct xfrm_usersa_id);
1792
1793 rthdr->rta_type = XFRMA_MARK;
1794 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_mark));
1795 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1796 if (hdr->nlmsg_len > sizeof(request))
1797 {
1798 return FAILED;
1799 }
1800
1801 mrk = (struct xfrm_mark*)RTA_DATA(rthdr);
1802 mrk->v = mark.value;
1803 mrk->m = mark.mask;
1804 }
1805
1806 switch (this->socket_xfrm->send_ack(this->socket_xfrm, hdr))
1807 {
1808 case SUCCESS:
1809 DBG2(DBG_KNL, "deleted SAD entry with SPI %.8x (mark %u/0x%08x)",
1810 ntohl(spi), mark.value, mark.mask);
1811 return SUCCESS;
1812 case NOT_FOUND:
1813 return NOT_FOUND;
1814 default:
1815 if (mark.value)
1816 {
1817 DBG1(DBG_KNL, "unable to delete SAD entry with SPI %.8x "
1818 "(mark %u/0x%08x)", ntohl(spi), mark.value, mark.mask);
1819 }
1820 else
1821 {
1822 DBG1(DBG_KNL, "unable to delete SAD entry with SPI %.8x",
1823 ntohl(spi));
1824 }
1825 return FAILED;
1826 }
1827 }
1828
1829 METHOD(kernel_ipsec_t, update_sa, status_t,
1830 private_kernel_netlink_ipsec_t *this, u_int32_t spi, u_int8_t protocol,
1831 u_int16_t cpi, host_t *src, host_t *dst, host_t *new_src, host_t *new_dst,
1832 bool old_encap, bool new_encap, mark_t mark)
1833 {
1834 netlink_buf_t request;
1835 u_char *pos;
1836 struct nlmsghdr *hdr, *out = NULL;
1837 struct xfrm_usersa_id *sa_id;
1838 struct xfrm_usersa_info *out_sa = NULL, *sa;
1839 size_t len;
1840 struct rtattr *rta;
1841 size_t rtasize;
1842 struct xfrm_encap_tmpl* tmpl = NULL;
1843 struct xfrm_replay_state *replay = NULL;
1844 struct xfrm_replay_state_esn *replay_esn = NULL;
1845 status_t status = FAILED;
1846
1847 /* if IPComp is used, we first update the IPComp SA */
1848 if (cpi)
1849 {
1850 update_sa(this, htonl(ntohs(cpi)), IPPROTO_COMP, 0,
1851 src, dst, new_src, new_dst, FALSE, FALSE, mark);
1852 }
1853
1854 memset(&request, 0, sizeof(request));
1855
1856 DBG2(DBG_KNL, "querying SAD entry with SPI %.8x for update", ntohl(spi));
1857
1858 /* query the existing SA first */
1859 hdr = (struct nlmsghdr*)request;
1860 hdr->nlmsg_flags = NLM_F_REQUEST;
1861 hdr->nlmsg_type = XFRM_MSG_GETSA;
1862 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_usersa_id));
1863
1864 sa_id = (struct xfrm_usersa_id*)NLMSG_DATA(hdr);
1865 host2xfrm(dst, &sa_id->daddr);
1866 sa_id->spi = spi;
1867 sa_id->proto = protocol;
1868 sa_id->family = dst->get_family(dst);
1869
1870 if (mark.value)
1871 {
1872 struct xfrm_mark *mrk;
1873 struct rtattr *rthdr = XFRM_RTA(hdr, struct xfrm_usersa_id);
1874
1875 rthdr->rta_type = XFRMA_MARK;
1876 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_mark));
1877 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1878 if (hdr->nlmsg_len > sizeof(request))
1879 {
1880 return FAILED;
1881 }
1882
1883 mrk = (struct xfrm_mark*)RTA_DATA(rthdr);
1884 mrk->v = mark.value;
1885 mrk->m = mark.mask;
1886 }
1887
1888 if (this->socket_xfrm->send(this->socket_xfrm, hdr, &out, &len) == SUCCESS)
1889 {
1890 hdr = out;
1891 while (NLMSG_OK(hdr, len))
1892 {
1893 switch (hdr->nlmsg_type)
1894 {
1895 case XFRM_MSG_NEWSA:
1896 {
1897 out_sa = NLMSG_DATA(hdr);
1898 break;
1899 }
1900 case NLMSG_ERROR:
1901 {
1902 struct nlmsgerr *err = NLMSG_DATA(hdr);
1903 DBG1(DBG_KNL, "querying SAD entry failed: %s (%d)",
1904 strerror(-err->error), -err->error);
1905 break;
1906 }
1907 default:
1908 hdr = NLMSG_NEXT(hdr, len);
1909 continue;
1910 case NLMSG_DONE:
1911 break;
1912 }
1913 break;
1914 }
1915 }
1916 if (out_sa == NULL)
1917 {
1918 DBG1(DBG_KNL, "unable to update SAD entry with SPI %.8x", ntohl(spi));
1919 goto failed;
1920 }
1921
1922 get_replay_state(this, spi, protocol, dst, mark, &replay_esn, &replay);
1923
1924 /* delete the old SA (without affecting the IPComp SA) */
1925 if (del_sa(this, src, dst, spi, protocol, 0, mark) != SUCCESS)
1926 {
1927 DBG1(DBG_KNL, "unable to delete old SAD entry with SPI %.8x",
1928 ntohl(spi));
1929 goto failed;
1930 }
1931
1932 DBG2(DBG_KNL, "updating SAD entry with SPI %.8x from %#H..%#H to %#H..%#H",
1933 ntohl(spi), src, dst, new_src, new_dst);
1934 /* copy over the SA from out to request */
1935 hdr = (struct nlmsghdr*)request;
1936 memcpy(hdr, out, min(out->nlmsg_len, sizeof(request)));
1937 hdr->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
1938 hdr->nlmsg_type = XFRM_MSG_NEWSA;
1939 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_usersa_info));
1940 sa = NLMSG_DATA(hdr);
1941 sa->family = new_dst->get_family(new_dst);
1942
1943 if (!src->ip_equals(src, new_src))
1944 {
1945 host2xfrm(new_src, &sa->saddr);
1946 }
1947 if (!dst->ip_equals(dst, new_dst))
1948 {
1949 host2xfrm(new_dst, &sa->id.daddr);
1950 }
1951
1952 rta = XFRM_RTA(out, struct xfrm_usersa_info);
1953 rtasize = XFRM_PAYLOAD(out, struct xfrm_usersa_info);
1954 pos = (u_char*)XFRM_RTA(hdr, struct xfrm_usersa_info);
1955 while(RTA_OK(rta, rtasize))
1956 {
1957 /* copy all attributes, but not XFRMA_ENCAP if we are disabling it */
1958 if (rta->rta_type != XFRMA_ENCAP || new_encap)
1959 {
1960 if (rta->rta_type == XFRMA_ENCAP)
1961 { /* update encap tmpl */
1962 tmpl = (struct xfrm_encap_tmpl*)RTA_DATA(rta);
1963 tmpl->encap_sport = ntohs(new_src->get_port(new_src));
1964 tmpl->encap_dport = ntohs(new_dst->get_port(new_dst));
1965 }
1966 memcpy(pos, rta, rta->rta_len);
1967 pos += RTA_ALIGN(rta->rta_len);
1968 hdr->nlmsg_len += RTA_ALIGN(rta->rta_len);
1969 }
1970 rta = RTA_NEXT(rta, rtasize);
1971 }
1972
1973 rta = (struct rtattr*)pos;
1974 if (tmpl == NULL && new_encap)
1975 { /* add tmpl if we are enabling it */
1976 rta->rta_type = XFRMA_ENCAP;
1977 rta->rta_len = RTA_LENGTH(sizeof(struct xfrm_encap_tmpl));
1978
1979 hdr->nlmsg_len += RTA_ALIGN(rta->rta_len);
1980 if (hdr->nlmsg_len > sizeof(request))
1981 {
1982 goto failed;
1983 }
1984
1985 tmpl = (struct xfrm_encap_tmpl*)RTA_DATA(rta);
1986 tmpl->encap_type = UDP_ENCAP_ESPINUDP;
1987 tmpl->encap_sport = ntohs(new_src->get_port(new_src));
1988 tmpl->encap_dport = ntohs(new_dst->get_port(new_dst));
1989 memset(&tmpl->encap_oa, 0, sizeof (xfrm_address_t));
1990
1991 rta = XFRM_RTA_NEXT(rta);
1992 }
1993
1994 if (replay_esn)
1995 {
1996 rta->rta_type = XFRMA_REPLAY_ESN_VAL;
1997 rta->rta_len = RTA_LENGTH(sizeof(struct xfrm_replay_state_esn) +
1998 this->replay_bmp);
1999
2000 hdr->nlmsg_len += RTA_ALIGN(rta->rta_len);
2001 if (hdr->nlmsg_len > sizeof(request))
2002 {
2003 goto failed;
2004 }
2005 memcpy(RTA_DATA(rta), replay_esn,
2006 sizeof(struct xfrm_replay_state_esn) + this->replay_bmp);
2007
2008 rta = XFRM_RTA_NEXT(rta);
2009 }
2010 else if (replay)
2011 {
2012 rta->rta_type = XFRMA_REPLAY_VAL;
2013 rta->rta_len = RTA_LENGTH(sizeof(struct xfrm_replay_state));
2014
2015 hdr->nlmsg_len += RTA_ALIGN(rta->rta_len);
2016 if (hdr->nlmsg_len > sizeof(request))
2017 {
2018 goto failed;
2019 }
2020 memcpy(RTA_DATA(rta), replay, sizeof(struct xfrm_replay_state));
2021
2022 rta = XFRM_RTA_NEXT(rta);
2023 }
2024 else
2025 {
2026 DBG1(DBG_KNL, "unable to copy replay state from old SAD entry "
2027 "with SPI %.8x", ntohl(spi));
2028 }
2029
2030 if (this->socket_xfrm->send_ack(this->socket_xfrm, hdr) != SUCCESS)
2031 {
2032 DBG1(DBG_KNL, "unable to update SAD entry with SPI %.8x", ntohl(spi));
2033 goto failed;
2034 }
2035
2036 status = SUCCESS;
2037 failed:
2038 free(replay);
2039 free(replay_esn);
2040 memwipe(out, len);
2041 memwipe(request, sizeof(request));
2042 free(out);
2043
2044 return status;
2045 }
2046
2047 METHOD(kernel_ipsec_t, flush_sas, status_t,
2048 private_kernel_netlink_ipsec_t *this)
2049 {
2050 netlink_buf_t request;
2051 struct nlmsghdr *hdr;
2052 struct xfrm_usersa_flush *flush;
2053
2054 memset(&request, 0, sizeof(request));
2055
2056 DBG2(DBG_KNL, "flushing all SAD entries");
2057
2058 hdr = (struct nlmsghdr*)request;
2059 hdr->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
2060 hdr->nlmsg_type = XFRM_MSG_FLUSHSA;
2061 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_usersa_flush));
2062
2063 flush = (struct xfrm_usersa_flush*)NLMSG_DATA(hdr);
2064 flush->proto = IPSEC_PROTO_ANY;
2065
2066 if (this->socket_xfrm->send_ack(this->socket_xfrm, hdr) != SUCCESS)
2067 {
2068 DBG1(DBG_KNL, "unable to flush SAD entries");
2069 return FAILED;
2070 }
2071 return SUCCESS;
2072 }
2073
2074 /**
2075 * Add or update a policy in the kernel.
2076 *
2077 * Note: The mutex has to be locked when entering this function
2078 * and is unlocked here in any case.
2079 */
2080 static status_t add_policy_internal(private_kernel_netlink_ipsec_t *this,
2081 policy_entry_t *policy, policy_sa_t *mapping, bool update)
2082 {
2083 netlink_buf_t request;
2084 policy_entry_t clone;
2085 ipsec_sa_t *ipsec = mapping->sa;
2086 struct xfrm_userpolicy_info *policy_info;
2087 struct nlmsghdr *hdr;
2088 int i;
2089
2090 /* clone the policy so we are able to check it out again later */
2091 memcpy(&clone, policy, sizeof(policy_entry_t));
2092
2093 memset(&request, 0, sizeof(request));
2094 hdr = (struct nlmsghdr*)request;
2095 hdr->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
2096 hdr->nlmsg_type = update ? XFRM_MSG_UPDPOLICY : XFRM_MSG_NEWPOLICY;
2097 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_userpolicy_info));
2098
2099 policy_info = (struct xfrm_userpolicy_info*)NLMSG_DATA(hdr);
2100 policy_info->sel = policy->sel;
2101 policy_info->dir = policy->direction;
2102
2103 /* calculate priority based on selector size, small size = high prio */
2104 policy_info->priority = mapping->priority;
2105 policy_info->action = mapping->type != POLICY_DROP ? XFRM_POLICY_ALLOW
2106 : XFRM_POLICY_BLOCK;
2107 policy_info->share = XFRM_SHARE_ANY;
2108
2109 /* policies don't expire */
2110 policy_info->lft.soft_byte_limit = XFRM_INF;
2111 policy_info->lft.soft_packet_limit = XFRM_INF;
2112 policy_info->lft.hard_byte_limit = XFRM_INF;
2113 policy_info->lft.hard_packet_limit = XFRM_INF;
2114 policy_info->lft.soft_add_expires_seconds = 0;
2115 policy_info->lft.hard_add_expires_seconds = 0;
2116 policy_info->lft.soft_use_expires_seconds = 0;
2117 policy_info->lft.hard_use_expires_seconds = 0;
2118
2119 struct rtattr *rthdr = XFRM_RTA(hdr, struct xfrm_userpolicy_info);
2120
2121 if (mapping->type == POLICY_IPSEC)
2122 {
2123 struct xfrm_user_tmpl *tmpl = (struct xfrm_user_tmpl*)RTA_DATA(rthdr);
2124 struct {
2125 u_int8_t proto;
2126 bool use;
2127 } protos[] = {
2128 { IPPROTO_COMP, ipsec->cfg.ipcomp.transform != IPCOMP_NONE },
2129 { IPPROTO_ESP, ipsec->cfg.esp.use },
2130 { IPPROTO_AH, ipsec->cfg.ah.use },
2131 };
2132 ipsec_mode_t proto_mode = ipsec->cfg.mode;
2133
2134 rthdr->rta_type = XFRMA_TMPL;
2135 rthdr->rta_len = 0; /* actual length is set below */
2136
2137 for (i = 0; i < countof(protos); i++)
2138 {
2139 if (!protos[i].use)
2140 {
2141 continue;
2142 }
2143
2144 rthdr->rta_len += RTA_LENGTH(sizeof(struct xfrm_user_tmpl));
2145 hdr->nlmsg_len += RTA_ALIGN(RTA_LENGTH(sizeof(struct xfrm_user_tmpl)));
2146 if (hdr->nlmsg_len > sizeof(request))
2147 {
2148 this->mutex->unlock(this->mutex);
2149 return FAILED;
2150 }
2151
2152 tmpl->reqid = ipsec->cfg.reqid;
2153 tmpl->id.proto = protos[i].proto;
2154 tmpl->aalgos = tmpl->ealgos = tmpl->calgos = ~0;
2155 tmpl->mode = mode2kernel(proto_mode);
2156 tmpl->optional = protos[i].proto == IPPROTO_COMP &&
2157 policy->direction != POLICY_OUT;
2158 tmpl->family = ipsec->src->get_family(ipsec->src);
2159
2160 if (proto_mode == MODE_TUNNEL)
2161 { /* only for tunnel mode */
2162 host2xfrm(ipsec->src, &tmpl->saddr);
2163 host2xfrm(ipsec->dst, &tmpl->id.daddr);
2164 }
2165
2166 tmpl++;
2167
2168 /* use transport mode for other SAs */
2169 proto_mode = MODE_TRANSPORT;
2170 }
2171
2172 rthdr = XFRM_RTA_NEXT(rthdr);
2173 }
2174
2175 if (ipsec->mark.value)
2176 {
2177 struct xfrm_mark *mrk;
2178
2179 rthdr->rta_type = XFRMA_MARK;
2180 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_mark));
2181
2182 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
2183 if (hdr->nlmsg_len > sizeof(request))
2184 {
2185 this->mutex->unlock(this->mutex);
2186 return FAILED;
2187 }
2188
2189 mrk = (struct xfrm_mark*)RTA_DATA(rthdr);
2190 mrk->v = ipsec->mark.value;
2191 mrk->m = ipsec->mark.mask;
2192 }
2193 this->mutex->unlock(this->mutex);
2194
2195 if (this->socket_xfrm->send_ack(this->socket_xfrm, hdr) != SUCCESS)
2196 {
2197 return FAILED;
2198 }
2199
2200 /* find the policy again */
2201 this->mutex->lock(this->mutex);
2202 policy = this->policies->get(this->policies, &clone);
2203 if (!policy ||
2204 policy->used_by->find_first(policy->used_by,
2205 NULL, (void**)&mapping) != SUCCESS)
2206 { /* policy or mapping is already gone, ignore */
2207 this->mutex->unlock(this->mutex);
2208 return SUCCESS;
2209 }
2210
2211 /* install a route, if:
2212 * - this is a forward policy (to just get one for each child)
2213 * - we are in tunnel/BEET mode or install a bypass policy
2214 * - routing is not disabled via strongswan.conf
2215 */
2216 if (policy->direction == POLICY_FWD && this->install_routes &&
2217 (mapping->type != POLICY_IPSEC || ipsec->cfg.mode != MODE_TRANSPORT))
2218 {
2219 policy_sa_fwd_t *fwd = (policy_sa_fwd_t*)mapping;
2220 route_entry_t *route;
2221 host_t *iface;
2222
2223 INIT(route,
2224 .prefixlen = policy->sel.prefixlen_s,
2225 );
2226
2227 if (hydra->kernel_interface->get_address_by_ts(hydra->kernel_interface,
2228 fwd->dst_ts, &route->src_ip) == SUCCESS)
2229 {
2230 /* get the nexthop to src (src as we are in POLICY_FWD) */
2231 route->gateway = hydra->kernel_interface->get_nexthop(
2232 hydra->kernel_interface, ipsec->src,
2233 ipsec->dst);
2234 route->dst_net = chunk_alloc(policy->sel.family == AF_INET ? 4 : 16);
2235 memcpy(route->dst_net.ptr, &policy->sel.saddr, route->dst_net.len);
2236
2237 /* get the interface to install the route for. If we have a local
2238 * address, use it. Otherwise (for shunt policies) use the
2239 * routes source address. */
2240 iface = ipsec->dst;
2241 if (iface->is_anyaddr(iface))
2242 {
2243 iface = route->src_ip;
2244 }
2245 /* install route via outgoing interface */
2246 if (!hydra->kernel_interface->get_interface(hydra->kernel_interface,
2247 iface, &route->if_name))
2248 {
2249 this->mutex->unlock(this->mutex);
2250 route_entry_destroy(route);
2251 return SUCCESS;
2252 }
2253
2254 if (policy->route)
2255 {
2256 route_entry_t *old = policy->route;
2257 if (route_entry_equals(old, route))
2258 {
2259 this->mutex->unlock(this->mutex);
2260 route_entry_destroy(route);
2261 return SUCCESS;
2262 }
2263 /* uninstall previously installed route */
2264 if (hydra->kernel_interface->del_route(hydra->kernel_interface,
2265 old->dst_net, old->prefixlen, old->gateway,
2266 old->src_ip, old->if_name) != SUCCESS)
2267 {
2268 DBG1(DBG_KNL, "error uninstalling route installed with "
2269 "policy %R === %R %N", fwd->src_ts,
2270 fwd->dst_ts, policy_dir_names,
2271 policy->direction);
2272 }
2273 route_entry_destroy(old);
2274 policy->route = NULL;
2275 }
2276
2277 DBG2(DBG_KNL, "installing route: %R via %H src %H dev %s",
2278 fwd->src_ts, route->gateway, route->src_ip, route->if_name);
2279 switch (hydra->kernel_interface->add_route(
2280 hydra->kernel_interface, route->dst_net,
2281 route->prefixlen, route->gateway,
2282 route->src_ip, route->if_name))
2283 {
2284 default:
2285 DBG1(DBG_KNL, "unable to install source route for %H",
2286 route->src_ip);
2287 /* FALL */
2288 case ALREADY_DONE:
2289 /* route exists, do not uninstall */
2290 route_entry_destroy(route);
2291 break;
2292 case SUCCESS:
2293 /* cache the installed route */
2294 policy->route = route;
2295 break;
2296 }
2297 }
2298 else
2299 {
2300 free(route);
2301 }
2302 }
2303 this->mutex->unlock(this->mutex);
2304 return SUCCESS;
2305 }
2306
2307 METHOD(kernel_ipsec_t, add_policy, status_t,
2308 private_kernel_netlink_ipsec_t *this, host_t *src, host_t *dst,
2309 traffic_selector_t *src_ts, traffic_selector_t *dst_ts,
2310 policy_dir_t direction, policy_type_t type, ipsec_sa_cfg_t *sa,
2311 mark_t mark, policy_priority_t priority)
2312 {
2313 policy_entry_t *policy, *current;
2314 policy_sa_t *assigned_sa, *current_sa;
2315 enumerator_t *enumerator;
2316 bool found = FALSE, update = TRUE;
2317
2318 /* create a policy */
2319 INIT(policy,
2320 .sel = ts2selector(src_ts, dst_ts),
2321 .mark = mark.value & mark.mask,
2322 .direction = direction,
2323 );
2324
2325 /* find the policy, which matches EXACTLY */
2326 this->mutex->lock(this->mutex);
2327 current = this->policies->get(this->policies, policy);
2328 if (current)
2329 {
2330 /* use existing policy */
2331 DBG2(DBG_KNL, "policy %R === %R %N (mark %u/0x%08x) "
2332 "already exists, increasing refcount",
2333 src_ts, dst_ts, policy_dir_names, direction,
2334 mark.value, mark.mask);
2335 policy_entry_destroy(this, policy);
2336 policy = current;
2337 found = TRUE;
2338 }
2339 else
2340 { /* use the new one, if we have no such policy */
2341 policy->used_by = linked_list_create();
2342 this->policies->put(this->policies, policy, policy);
2343 }
2344
2345 /* cache the assigned IPsec SA */
2346 assigned_sa = policy_sa_create(this, direction, type, src, dst, src_ts,
2347 dst_ts, mark, sa);
2348 assigned_sa->priority = get_priority(policy, priority);
2349
2350 if (this->policy_history)
2351 { /* insert the SA according to its priority */
2352 enumerator = policy->used_by->create_enumerator(policy->used_by);
2353 while (enumerator->enumerate(enumerator, (void**)&current_sa))
2354 {
2355 if (current_sa->priority >= assigned_sa->priority)
2356 {
2357 break;
2358 }
2359 update = FALSE;
2360 }
2361 policy->used_by->insert_before(policy->used_by, enumerator,
2362 assigned_sa);
2363 enumerator->destroy(enumerator);
2364 }
2365 else
2366 { /* simply insert it last and only update if it is not installed yet */
2367 policy->used_by->insert_last(policy->used_by, assigned_sa);
2368 update = !found;
2369 }
2370
2371 if (!update)
2372 { /* we don't update the policy if the priority is lower than that of
2373 * the currently installed one */
2374 this->mutex->unlock(this->mutex);
2375 return SUCCESS;
2376 }
2377
2378 DBG2(DBG_KNL, "%s policy %R === %R %N (mark %u/0x%08x)",
2379 found ? "updating" : "adding", src_ts, dst_ts,
2380 policy_dir_names, direction, mark.value, mark.mask);
2381
2382 if (add_policy_internal(this, policy, assigned_sa, found) != SUCCESS)
2383 {
2384 DBG1(DBG_KNL, "unable to %s policy %R === %R %N",
2385 found ? "update" : "add", src_ts, dst_ts,
2386 policy_dir_names, direction);
2387 return FAILED;
2388 }
2389 return SUCCESS;
2390 }
2391
2392 METHOD(kernel_ipsec_t, query_policy, status_t,
2393 private_kernel_netlink_ipsec_t *this, traffic_selector_t *src_ts,
2394 traffic_selector_t *dst_ts, policy_dir_t direction, mark_t mark,
2395 u_int32_t *use_time)
2396 {
2397 netlink_buf_t request;
2398 struct nlmsghdr *out = NULL, *hdr;
2399 struct xfrm_userpolicy_id *policy_id;
2400 struct xfrm_userpolicy_info *policy = NULL;
2401 size_t len;
2402
2403 memset(&request, 0, sizeof(request));
2404
2405 DBG2(DBG_KNL, "querying policy %R === %R %N (mark %u/0x%08x)",
2406 src_ts, dst_ts, policy_dir_names, direction,
2407 mark.value, mark.mask);
2408
2409 hdr = (struct nlmsghdr*)request;
2410 hdr->nlmsg_flags = NLM_F_REQUEST;
2411 hdr->nlmsg_type = XFRM_MSG_GETPOLICY;
2412 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_userpolicy_id));
2413
2414 policy_id = (struct xfrm_userpolicy_id*)NLMSG_DATA(hdr);
2415 policy_id->sel = ts2selector(src_ts, dst_ts);
2416 policy_id->dir = direction;
2417
2418 if (mark.value)
2419 {
2420 struct xfrm_mark *mrk;
2421 struct rtattr *rthdr = XFRM_RTA(hdr, struct xfrm_userpolicy_id);
2422
2423 rthdr->rta_type = XFRMA_MARK;
2424 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_mark));
2425
2426 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
2427 if (hdr->nlmsg_len > sizeof(request))
2428 {
2429 return FAILED;
2430 }
2431
2432 mrk = (struct xfrm_mark*)RTA_DATA(rthdr);
2433 mrk->v = mark.value;
2434 mrk->m = mark.mask;
2435 }
2436
2437 if (this->socket_xfrm->send(this->socket_xfrm, hdr, &out, &len) == SUCCESS)
2438 {
2439 hdr = out;
2440 while (NLMSG_OK(hdr, len))
2441 {
2442 switch (hdr->nlmsg_type)
2443 {
2444 case XFRM_MSG_NEWPOLICY:
2445 {
2446 policy = (struct xfrm_userpolicy_info*)NLMSG_DATA(hdr);
2447 break;
2448 }
2449 case NLMSG_ERROR:
2450 {
2451 struct nlmsgerr *err = NLMSG_DATA(hdr);
2452 DBG1(DBG_KNL, "querying policy failed: %s (%d)",
2453 strerror(-err->error), -err->error);
2454 break;
2455 }
2456 default:
2457 hdr = NLMSG_NEXT(hdr, len);
2458 continue;
2459 case NLMSG_DONE:
2460 break;
2461 }
2462 break;
2463 }
2464 }
2465
2466 if (policy == NULL)
2467 {
2468 DBG2(DBG_KNL, "unable to query policy %R === %R %N", src_ts, dst_ts,
2469 policy_dir_names, direction);
2470 free(out);
2471 return FAILED;
2472 }
2473
2474 if (policy->curlft.use_time)
2475 {
2476 /* we need the monotonic time, but the kernel returns system time. */
2477 *use_time = time_monotonic(NULL) - (time(NULL) - policy->curlft.use_time);
2478 }
2479 else
2480 {
2481 *use_time = 0;
2482 }
2483
2484 free(out);
2485 return SUCCESS;
2486 }
2487
2488 METHOD(kernel_ipsec_t, del_policy, status_t,
2489 private_kernel_netlink_ipsec_t *this, traffic_selector_t *src_ts,
2490 traffic_selector_t *dst_ts, policy_dir_t direction, u_int32_t reqid,
2491 mark_t mark, policy_priority_t prio)
2492 {
2493 policy_entry_t *current, policy;
2494 enumerator_t *enumerator;
2495 policy_sa_t *mapping;
2496 netlink_buf_t request;
2497 struct nlmsghdr *hdr;
2498 struct xfrm_userpolicy_id *policy_id;
2499 bool is_installed = TRUE;
2500 u_int32_t priority;
2501
2502 DBG2(DBG_KNL, "deleting policy %R === %R %N (mark %u/0x%08x)",
2503 src_ts, dst_ts, policy_dir_names, direction,
2504 mark.value, mark.mask);
2505
2506 /* create a policy */
2507 memset(&policy, 0, sizeof(policy_entry_t));
2508 policy.sel = ts2selector(src_ts, dst_ts);
2509 policy.mark = mark.value & mark.mask;
2510 policy.direction = direction;
2511
2512 /* find the policy */
2513 this->mutex->lock(this->mutex);
2514 current = this->policies->get(this->policies, &policy);
2515 if (!current)
2516 {
2517 if (mark.value)
2518 {
2519 DBG1(DBG_KNL, "deleting policy %R === %R %N (mark %u/0x%08x) "
2520 "failed, not found", src_ts, dst_ts, policy_dir_names,
2521 direction, mark.value, mark.mask);
2522 }
2523 else
2524 {
2525 DBG1(DBG_KNL, "deleting policy %R === %R %N failed, not found",
2526 src_ts, dst_ts, policy_dir_names, direction);
2527 }
2528 this->mutex->unlock(this->mutex);
2529 return NOT_FOUND;
2530 }
2531
2532 if (this->policy_history)
2533 { /* remove mapping to SA by reqid and priority */
2534 priority = get_priority(current, prio);
2535 enumerator = current->used_by->create_enumerator(current->used_by);
2536 while (enumerator->enumerate(enumerator, (void**)&mapping))
2537 {
2538 if (reqid == mapping->sa->cfg.reqid &&
2539 priority == mapping->priority)
2540 {
2541 current->used_by->remove_at(current->used_by, enumerator);
2542 policy_sa_destroy(mapping, &direction, this);
2543 break;
2544 }
2545 is_installed = FALSE;
2546 }
2547 enumerator->destroy(enumerator);
2548 }
2549 else
2550 { /* remove one of the SAs but don't update the policy */
2551 current->used_by->remove_last(current->used_by, (void**)&mapping);
2552 policy_sa_destroy(mapping, &direction, this);
2553 is_installed = FALSE;
2554 }
2555
2556 if (current->used_by->get_count(current->used_by) > 0)
2557 { /* policy is used by more SAs, keep in kernel */
2558 DBG2(DBG_KNL, "policy still used by another CHILD_SA, not removed");
2559 if (!is_installed)
2560 { /* no need to update as the policy was not installed for this SA */
2561 this->mutex->unlock(this->mutex);
2562 return SUCCESS;
2563 }
2564
2565 DBG2(DBG_KNL, "updating policy %R === %R %N (mark %u/0x%08x)",
2566 src_ts, dst_ts, policy_dir_names, direction,
2567 mark.value, mark.mask);
2568
2569 current->used_by->get_first(current->used_by, (void**)&mapping);
2570 if (add_policy_internal(this, current, mapping, TRUE) != SUCCESS)
2571 {
2572 DBG1(DBG_KNL, "unable to update policy %R === %R %N",
2573 src_ts, dst_ts, policy_dir_names, direction);
2574 return FAILED;
2575 }
2576 return SUCCESS;
2577 }
2578
2579 memset(&request, 0, sizeof(request));
2580
2581 hdr = (struct nlmsghdr*)request;
2582 hdr->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
2583 hdr->nlmsg_type = XFRM_MSG_DELPOLICY;
2584 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_userpolicy_id));
2585
2586 policy_id = (struct xfrm_userpolicy_id*)NLMSG_DATA(hdr);
2587 policy_id->sel = current->sel;
2588 policy_id->dir = direction;
2589
2590 if (mark.value)
2591 {
2592 struct xfrm_mark *mrk;
2593 struct rtattr *rthdr = XFRM_RTA(hdr, struct xfrm_userpolicy_id);
2594
2595 rthdr->rta_type = XFRMA_MARK;
2596 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_mark));
2597 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
2598 if (hdr->nlmsg_len > sizeof(request))
2599 {
2600 this->mutex->unlock(this->mutex);
2601 return FAILED;
2602 }
2603
2604 mrk = (struct xfrm_mark*)RTA_DATA(rthdr);
2605 mrk->v = mark.value;
2606 mrk->m = mark.mask;
2607 }
2608
2609 if (current->route)
2610 {
2611 route_entry_t *route = current->route;
2612 if (hydra->kernel_interface->del_route(hydra->kernel_interface,
2613 route->dst_net, route->prefixlen, route->gateway,
2614 route->src_ip, route->if_name) != SUCCESS)
2615 {
2616 DBG1(DBG_KNL, "error uninstalling route installed with "
2617 "policy %R === %R %N", src_ts, dst_ts,
2618 policy_dir_names, direction);
2619 }
2620 }
2621
2622 this->policies->remove(this->policies, current);
2623 policy_entry_destroy(this, current);
2624 this->mutex->unlock(this->mutex);
2625
2626 if (this->socket_xfrm->send_ack(this->socket_xfrm, hdr) != SUCCESS)
2627 {
2628 if (mark.value)
2629 {
2630 DBG1(DBG_KNL, "unable to delete policy %R === %R %N "
2631 "(mark %u/0x%08x)", src_ts, dst_ts, policy_dir_names,
2632 direction, mark.value, mark.mask);
2633 }
2634 else
2635 {
2636 DBG1(DBG_KNL, "unable to delete policy %R === %R %N",
2637 src_ts, dst_ts, policy_dir_names, direction);
2638 }
2639 return FAILED;
2640 }
2641 return SUCCESS;
2642 }
2643
2644 METHOD(kernel_ipsec_t, flush_policies, status_t,
2645 private_kernel_netlink_ipsec_t *this)
2646 {
2647 netlink_buf_t request;
2648 struct nlmsghdr *hdr;
2649
2650 memset(&request, 0, sizeof(request));
2651
2652 DBG2(DBG_KNL, "flushing all policies from SPD");
2653
2654 hdr = (struct nlmsghdr*)request;
2655 hdr->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
2656 hdr->nlmsg_type = XFRM_MSG_FLUSHPOLICY;
2657 hdr->nlmsg_len = NLMSG_LENGTH(0); /* no data associated */
2658
2659 /* by adding an rtattr of type XFRMA_POLICY_TYPE we could restrict this
2660 * to main or sub policies (default is main) */
2661
2662 if (this->socket_xfrm->send_ack(this->socket_xfrm, hdr) != SUCCESS)
2663 {
2664 DBG1(DBG_KNL, "unable to flush SPD entries");
2665 return FAILED;
2666 }
2667 return SUCCESS;
2668 }
2669
2670
2671 METHOD(kernel_ipsec_t, bypass_socket, bool,
2672 private_kernel_netlink_ipsec_t *this, int fd, int family)
2673 {
2674 struct xfrm_userpolicy_info policy;
2675 u_int sol, ipsec_policy;
2676
2677 switch (family)
2678 {
2679 case AF_INET:
2680 sol = SOL_IP;
2681 ipsec_policy = IP_XFRM_POLICY;
2682 break;
2683 case AF_INET6:
2684 sol = SOL_IPV6;
2685 ipsec_policy = IPV6_XFRM_POLICY;
2686 break;
2687 default:
2688 return FALSE;
2689 }
2690
2691 memset(&policy, 0, sizeof(policy));
2692 policy.action = XFRM_POLICY_ALLOW;
2693 policy.sel.family = family;
2694
2695 policy.dir = XFRM_POLICY_OUT;
2696 if (setsockopt(fd, sol, ipsec_policy, &policy, sizeof(policy)) < 0)
2697 {
2698 DBG1(DBG_KNL, "unable to set IPSEC_POLICY on socket: %s",
2699 strerror(errno));
2700 return FALSE;
2701 }
2702 policy.dir = XFRM_POLICY_IN;
2703 if (setsockopt(fd, sol, ipsec_policy, &policy, sizeof(policy)) < 0)
2704 {
2705 DBG1(DBG_KNL, "unable to set IPSEC_POLICY on socket: %s",
2706 strerror(errno));
2707 return FALSE;
2708 }
2709 return TRUE;
2710 }
2711
2712 METHOD(kernel_ipsec_t, enable_udp_decap, bool,
2713 private_kernel_netlink_ipsec_t *this, int fd, int family, u_int16_t port)
2714 {
2715 int type = UDP_ENCAP_ESPINUDP;
2716
2717 if (setsockopt(fd, SOL_UDP, UDP_ENCAP, &type, sizeof(type)) < 0)
2718 {
2719 DBG1(DBG_KNL, "unable to set UDP_ENCAP: %s", strerror(errno));
2720 return FALSE;
2721 }
2722 return TRUE;
2723 }
2724
2725 METHOD(kernel_ipsec_t, destroy, void,
2726 private_kernel_netlink_ipsec_t *this)
2727 {
2728 enumerator_t *enumerator;
2729 policy_entry_t *policy;
2730
2731 if (this->socket_xfrm_events > 0)
2732 {
2733 close(this->socket_xfrm_events);
2734 }
2735 DESTROY_IF(this->socket_xfrm);
2736 enumerator = this->policies->create_enumerator(this->policies);
2737 while (enumerator->enumerate(enumerator, &policy, &policy))
2738 {
2739 policy_entry_destroy(this, policy);
2740 }
2741 enumerator->destroy(enumerator);
2742 this->policies->destroy(this->policies);
2743 this->sas->destroy(this->sas);
2744 this->mutex->destroy(this->mutex);
2745 free(this);
2746 }
2747
2748 /*
2749 * Described in header.
2750 */
2751 kernel_netlink_ipsec_t *kernel_netlink_ipsec_create()
2752 {
2753 private_kernel_netlink_ipsec_t *this;
2754 bool register_for_events = TRUE;
2755 int fd;
2756
2757 INIT(this,
2758 .public = {
2759 .interface = {
2760 .get_features = _get_features,
2761 .get_spi = _get_spi,
2762 .get_cpi = _get_cpi,
2763 .add_sa = _add_sa,
2764 .update_sa = _update_sa,
2765 .query_sa = _query_sa,
2766 .del_sa = _del_sa,
2767 .flush_sas = _flush_sas,
2768 .add_policy = _add_policy,
2769 .query_policy = _query_policy,
2770 .del_policy = _del_policy,
2771 .flush_policies = _flush_policies,
2772 .bypass_socket = _bypass_socket,
2773 .enable_udp_decap = _enable_udp_decap,
2774 .destroy = _destroy,
2775 },
2776 },
2777 .policies = hashtable_create((hashtable_hash_t)policy_hash,
2778 (hashtable_equals_t)policy_equals, 32),
2779 .sas = hashtable_create((hashtable_hash_t)ipsec_sa_hash,
2780 (hashtable_equals_t)ipsec_sa_equals, 32),
2781 .mutex = mutex_create(MUTEX_TYPE_DEFAULT),
2782 .policy_history = TRUE,
2783 .install_routes = lib->settings->get_bool(lib->settings,
2784 "%s.install_routes", TRUE, hydra->daemon),
2785 .replay_window = lib->settings->get_int(lib->settings,
2786 "%s.replay_window", DEFAULT_REPLAY_WINDOW, hydra->daemon),
2787 );
2788
2789 this->replay_bmp = (this->replay_window + sizeof(u_int32_t) * 8 - 1) /
2790 (sizeof(u_int32_t) * 8);
2791
2792 if (streq(hydra->daemon, "pluto"))
2793 { /* no routes for pluto, they are installed via updown script */
2794 this->install_routes = FALSE;
2795 /* no policy history for pluto */
2796 this->policy_history = FALSE;
2797 }
2798 else if (streq(hydra->daemon, "starter"))
2799 { /* starter has no threads, so we do not register for kernel events */
2800 register_for_events = FALSE;
2801 }
2802
2803 /* disable lifetimes for allocated SPIs in kernel */
2804 fd = open("/proc/sys/net/core/xfrm_acq_expires", O_WRONLY);
2805 if (fd > 0)
2806 {
2807 ignore_result(write(fd, "165", 3));
2808 close(fd);
2809 }
2810
2811 this->socket_xfrm = netlink_socket_create(NETLINK_XFRM);
2812 if (!this->socket_xfrm)
2813 {
2814 destroy(this);
2815 return NULL;
2816 }
2817
2818 if (register_for_events)
2819 {
2820 struct sockaddr_nl addr;
2821
2822 memset(&addr, 0, sizeof(addr));
2823 addr.nl_family = AF_NETLINK;
2824
2825 /* create and bind XFRM socket for ACQUIRE, EXPIRE, MIGRATE & MAPPING */
2826 this->socket_xfrm_events = socket(AF_NETLINK, SOCK_RAW, NETLINK_XFRM);
2827 if (this->socket_xfrm_events <= 0)
2828 {
2829 DBG1(DBG_KNL, "unable to create XFRM event socket");
2830 destroy(this);
2831 return NULL;
2832 }
2833 addr.nl_groups = XFRMNLGRP(ACQUIRE) | XFRMNLGRP(EXPIRE) |
2834 XFRMNLGRP(MIGRATE) | XFRMNLGRP(MAPPING);
2835 if (bind(this->socket_xfrm_events, (struct sockaddr*)&addr, sizeof(addr)))
2836 {
2837 DBG1(DBG_KNL, "unable to bind XFRM event socket");
2838 destroy(this);
2839 return NULL;
2840 }
2841 lib->processor->queue_job(lib->processor,
2842 (job_t*)callback_job_create_with_prio(
2843 (callback_job_cb_t)receive_events, this, NULL,
2844 (callback_job_cancel_t)return_false, JOB_PRIO_CRITICAL));
2845 }
2846
2847