Algorithm names are not always static anymore, avoid string overflows
[strongswan.git] / src / libhydra / plugins / kernel_netlink / kernel_netlink_ipsec.c
1 /*
2 * Copyright (C) 2006-2012 Tobias Brunner
3 * Copyright (C) 2005-2009 Martin Willi
4 * Copyright (C) 2008 Andreas Steffen
5 * Copyright (C) 2006-2007 Fabian Hartmann, Noah Heusser
6 * Copyright (C) 2006 Daniel Roethlisberger
7 * Copyright (C) 2005 Jan Hutter
8 * Hochschule fuer Technik Rapperswil
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version. See <http://www.fsf.org/copyleft/gpl.txt>.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
17 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 * for more details.
19 */
20
21 #include <sys/types.h>
22 #include <sys/socket.h>
23 #include <stdint.h>
24 #include <linux/ipsec.h>
25 #include <linux/netlink.h>
26 #include <linux/rtnetlink.h>
27 #include <linux/xfrm.h>
28 #include <linux/udp.h>
29 #include <unistd.h>
30 #include <time.h>
31 #include <errno.h>
32 #include <string.h>
33 #include <fcntl.h>
34
35 #include "kernel_netlink_ipsec.h"
36 #include "kernel_netlink_shared.h"
37
38 #include <hydra.h>
39 #include <debug.h>
40 #include <threading/thread.h>
41 #include <threading/mutex.h>
42 #include <utils/hashtable.h>
43 #include <utils/linked_list.h>
44 #include <processing/jobs/callback_job.h>
45
46 /** Required for Linux 2.6.26 kernel and later */
47 #ifndef XFRM_STATE_AF_UNSPEC
48 #define XFRM_STATE_AF_UNSPEC 32
49 #endif
50
51 /** From linux/in.h */
52 #ifndef IP_XFRM_POLICY
53 #define IP_XFRM_POLICY 17
54 #endif
55
56 /** Missing on uclibc */
57 #ifndef IPV6_XFRM_POLICY
58 #define IPV6_XFRM_POLICY 34
59 #endif /*IPV6_XFRM_POLICY*/
60
61 /* from linux/udp.h */
62 #ifndef UDP_ENCAP
63 #define UDP_ENCAP 100
64 #endif
65
66 #ifndef UDP_ENCAP_ESPINUDP
67 #define UDP_ENCAP_ESPINUDP 2
68 #endif
69
70 /* this is not defined on some platforms */
71 #ifndef SOL_UDP
72 #define SOL_UDP IPPROTO_UDP
73 #endif
74
75 /** Default priority of installed policies */
76 #define PRIO_BASE 512
77
78 /** Default replay window size, if not set using charon.replay_window */
79 #define DEFAULT_REPLAY_WINDOW 32
80
81 /**
82 * Map the limit for bytes and packets to XFRM_INF by default
83 */
84 #define XFRM_LIMIT(x) ((x) == 0 ? XFRM_INF : (x))
85
86 /**
87 * Create ORable bitfield of XFRM NL groups
88 */
89 #define XFRMNLGRP(x) (1<<(XFRMNLGRP_##x-1))
90
91 /**
92 * Returns a pointer to the first rtattr following the nlmsghdr *nlh and the
93 * 'usual' netlink data x like 'struct xfrm_usersa_info'
94 */
95 #define XFRM_RTA(nlh, x) ((struct rtattr*)(NLMSG_DATA(nlh) + \
96 NLMSG_ALIGN(sizeof(x))))
97 /**
98 * Returns a pointer to the next rtattr following rta.
99 * !!! Do not use this to parse messages. Use RTA_NEXT and RTA_OK instead !!!
100 */
101 #define XFRM_RTA_NEXT(rta) ((struct rtattr*)(((char*)(rta)) + \
102 RTA_ALIGN((rta)->rta_len)))
103 /**
104 * Returns the total size of attached rta data
105 * (after 'usual' netlink data x like 'struct xfrm_usersa_info')
106 */
107 #define XFRM_PAYLOAD(nlh, x) NLMSG_PAYLOAD(nlh, sizeof(x))
108
109 typedef struct kernel_algorithm_t kernel_algorithm_t;
110
111 /**
112 * Mapping of IKEv2 kernel identifier to linux crypto API names
113 */
114 struct kernel_algorithm_t {
115 /**
116 * Identifier specified in IKEv2
117 */
118 int ikev2;
119
120 /**
121 * Name of the algorithm in linux crypto API
122 */
123 char *name;
124 };
125
126 ENUM(xfrm_msg_names, XFRM_MSG_NEWSA, XFRM_MSG_MAPPING,
127 "XFRM_MSG_NEWSA",
128 "XFRM_MSG_DELSA",
129 "XFRM_MSG_GETSA",
130 "XFRM_MSG_NEWPOLICY",
131 "XFRM_MSG_DELPOLICY",
132 "XFRM_MSG_GETPOLICY",
133 "XFRM_MSG_ALLOCSPI",
134 "XFRM_MSG_ACQUIRE",
135 "XFRM_MSG_EXPIRE",
136 "XFRM_MSG_UPDPOLICY",
137 "XFRM_MSG_UPDSA",
138 "XFRM_MSG_POLEXPIRE",
139 "XFRM_MSG_FLUSHSA",
140 "XFRM_MSG_FLUSHPOLICY",
141 "XFRM_MSG_NEWAE",
142 "XFRM_MSG_GETAE",
143 "XFRM_MSG_REPORT",
144 "XFRM_MSG_MIGRATE",
145 "XFRM_MSG_NEWSADINFO",
146 "XFRM_MSG_GETSADINFO",
147 "XFRM_MSG_NEWSPDINFO",
148 "XFRM_MSG_GETSPDINFO",
149 "XFRM_MSG_MAPPING"
150 );
151
152 ENUM(xfrm_attr_type_names, XFRMA_UNSPEC, XFRMA_REPLAY_ESN_VAL,
153 "XFRMA_UNSPEC",
154 "XFRMA_ALG_AUTH",
155 "XFRMA_ALG_CRYPT",
156 "XFRMA_ALG_COMP",
157 "XFRMA_ENCAP",
158 "XFRMA_TMPL",
159 "XFRMA_SA",
160 "XFRMA_POLICY",
161 "XFRMA_SEC_CTX",
162 "XFRMA_LTIME_VAL",
163 "XFRMA_REPLAY_VAL",
164 "XFRMA_REPLAY_THRESH",
165 "XFRMA_ETIMER_THRESH",
166 "XFRMA_SRCADDR",
167 "XFRMA_COADDR",
168 "XFRMA_LASTUSED",
169 "XFRMA_POLICY_TYPE",
170 "XFRMA_MIGRATE",
171 "XFRMA_ALG_AEAD",
172 "XFRMA_KMADDRESS",
173 "XFRMA_ALG_AUTH_TRUNC",
174 "XFRMA_MARK",
175 "XFRMA_TFCPAD",
176 "XFRMA_REPLAY_ESN_VAL",
177 );
178
179 #define END_OF_LIST -1
180
181 /**
182 * Algorithms for encryption
183 */
184 static kernel_algorithm_t encryption_algs[] = {
185 /* {ENCR_DES_IV64, "***" }, */
186 {ENCR_DES, "des" },
187 {ENCR_3DES, "des3_ede" },
188 /* {ENCR_RC5, "***" }, */
189 /* {ENCR_IDEA, "***" }, */
190 {ENCR_CAST, "cast128" },
191 {ENCR_BLOWFISH, "blowfish" },
192 /* {ENCR_3IDEA, "***" }, */
193 /* {ENCR_DES_IV32, "***" }, */
194 {ENCR_NULL, "cipher_null" },
195 {ENCR_AES_CBC, "aes" },
196 {ENCR_AES_CTR, "rfc3686(ctr(aes))" },
197 {ENCR_AES_CCM_ICV8, "rfc4309(ccm(aes))" },
198 {ENCR_AES_CCM_ICV12, "rfc4309(ccm(aes))" },
199 {ENCR_AES_CCM_ICV16, "rfc4309(ccm(aes))" },
200 {ENCR_AES_GCM_ICV8, "rfc4106(gcm(aes))" },
201 {ENCR_AES_GCM_ICV12, "rfc4106(gcm(aes))" },
202 {ENCR_AES_GCM_ICV16, "rfc4106(gcm(aes))" },
203 {ENCR_NULL_AUTH_AES_GMAC, "rfc4543(gcm(aes))" },
204 {ENCR_CAMELLIA_CBC, "cbc(camellia)" },
205 /* {ENCR_CAMELLIA_CTR, "***" }, */
206 /* {ENCR_CAMELLIA_CCM_ICV8, "***" }, */
207 /* {ENCR_CAMELLIA_CCM_ICV12, "***" }, */
208 /* {ENCR_CAMELLIA_CCM_ICV16, "***" }, */
209 {ENCR_SERPENT_CBC, "serpent" },
210 {ENCR_TWOFISH_CBC, "twofish" },
211 {END_OF_LIST, NULL }
212 };
213
214 /**
215 * Algorithms for integrity protection
216 */
217 static kernel_algorithm_t integrity_algs[] = {
218 {AUTH_HMAC_MD5_96, "md5" },
219 {AUTH_HMAC_MD5_128, "hmac(md5)" },
220 {AUTH_HMAC_SHA1_96, "sha1" },
221 {AUTH_HMAC_SHA1_160, "hmac(sha1)" },
222 {AUTH_HMAC_SHA2_256_96, "sha256" },
223 {AUTH_HMAC_SHA2_256_128, "hmac(sha256)" },
224 {AUTH_HMAC_SHA2_384_192, "hmac(sha384)" },
225 {AUTH_HMAC_SHA2_512_256, "hmac(sha512)" },
226 /* {AUTH_DES_MAC, "***" }, */
227 /* {AUTH_KPDK_MD5, "***" }, */
228 {AUTH_AES_XCBC_96, "xcbc(aes)" },
229 {END_OF_LIST, NULL }
230 };
231
232 /**
233 * Algorithms for IPComp
234 */
235 static kernel_algorithm_t compression_algs[] = {
236 /* {IPCOMP_OUI, "***" }, */
237 {IPCOMP_DEFLATE, "deflate" },
238 {IPCOMP_LZS, "lzs" },
239 {IPCOMP_LZJH, "lzjh" },
240 {END_OF_LIST, NULL }
241 };
242
243 /**
244 * Look up a kernel algorithm name and its key size
245 */
246 static char* lookup_algorithm(transform_type_t type, int ikev2)
247 {
248 kernel_algorithm_t *list;
249 char *name = NULL;
250
251 switch (type)
252 {
253 case ENCRYPTION_ALGORITHM:
254 list = encryption_algs;
255 break;
256 case INTEGRITY_ALGORITHM:
257 list = integrity_algs;
258 break;
259 case COMPRESSION_ALGORITHM:
260 list = compression_algs;
261 break;
262 default:
263 return NULL;
264 }
265 while (list->ikev2 != END_OF_LIST)
266 {
267 if (list->ikev2 == ikev2)
268 {
269 return list->name;
270 }
271 list++;
272 }
273 hydra->kernel_interface->lookup_algorithm(hydra->kernel_interface, ikev2,
274 type, NULL, &name);
275 return name;
276 }
277
278 typedef struct private_kernel_netlink_ipsec_t private_kernel_netlink_ipsec_t;
279
280 /**
281 * Private variables and functions of kernel_netlink class.
282 */
283 struct private_kernel_netlink_ipsec_t {
284 /**
285 * Public part of the kernel_netlink_t object
286 */
287 kernel_netlink_ipsec_t public;
288
289 /**
290 * Mutex to lock access to installed policies
291 */
292 mutex_t *mutex;
293
294 /**
295 * Hash table of installed policies (policy_entry_t)
296 */
297 hashtable_t *policies;
298
299 /**
300 * Hash table of IPsec SAs using policies (ipsec_sa_t)
301 */
302 hashtable_t *sas;
303
304 /**
305 * Netlink xfrm socket (IPsec)
306 */
307 netlink_socket_t *socket_xfrm;
308
309 /**
310 * Netlink xfrm socket to receive acquire and expire events
311 */
312 int socket_xfrm_events;
313
314 /**
315 * Whether to install routes along policies
316 */
317 bool install_routes;
318
319 /**
320 * Whether to track the history of a policy
321 */
322 bool policy_history;
323
324 /**
325 * Size of the replay window, in packets (= bits)
326 */
327 u_int32_t replay_window;
328
329 /**
330 * Size of the replay window bitmap, in number of __u32 blocks
331 */
332 u_int32_t replay_bmp;
333 };
334
335 typedef struct route_entry_t route_entry_t;
336
337 /**
338 * Installed routing entry
339 */
340 struct route_entry_t {
341 /** Name of the interface the route is bound to */
342 char *if_name;
343
344 /** Source ip of the route */
345 host_t *src_ip;
346
347 /** Gateway for this route */
348 host_t *gateway;
349
350 /** Destination net */
351 chunk_t dst_net;
352
353 /** Destination net prefixlen */
354 u_int8_t prefixlen;
355 };
356
357 /**
358 * Destroy a route_entry_t object
359 */
360 static void route_entry_destroy(route_entry_t *this)
361 {
362 free(this->if_name);
363 this->src_ip->destroy(this->src_ip);
364 DESTROY_IF(this->gateway);
365 chunk_free(&this->dst_net);
366 free(this);
367 }
368
369 /**
370 * Compare two route_entry_t objects
371 */
372 static bool route_entry_equals(route_entry_t *a, route_entry_t *b)
373 {
374 return a->if_name && b->if_name && streq(a->if_name, b->if_name) &&
375 a->src_ip->ip_equals(a->src_ip, b->src_ip) &&
376 a->gateway->ip_equals(a->gateway, b->gateway) &&
377 chunk_equals(a->dst_net, b->dst_net) && a->prefixlen == b->prefixlen;
378 }
379
380 typedef struct ipsec_sa_t ipsec_sa_t;
381
382 /**
383 * IPsec SA assigned to a policy.
384 */
385 struct ipsec_sa_t {
386 /** Source address of this SA */
387 host_t *src;
388
389 /** Destination address of this SA */
390 host_t *dst;
391
392 /** Optional mark */
393 mark_t mark;
394
395 /** Description of this SA */
396 ipsec_sa_cfg_t cfg;
397
398 /** Reference count for this SA */
399 refcount_t refcount;
400 };
401
402 /**
403 * Hash function for ipsec_sa_t objects
404 */
405 static u_int ipsec_sa_hash(ipsec_sa_t *sa)
406 {
407 return chunk_hash_inc(sa->src->get_address(sa->src),
408 chunk_hash_inc(sa->dst->get_address(sa->dst),
409 chunk_hash_inc(chunk_from_thing(sa->mark),
410 chunk_hash(chunk_from_thing(sa->cfg)))));
411 }
412
413 /**
414 * Equality function for ipsec_sa_t objects
415 */
416 static bool ipsec_sa_equals(ipsec_sa_t *sa, ipsec_sa_t *other_sa)
417 {
418 return sa->src->ip_equals(sa->src, other_sa->src) &&
419 sa->dst->ip_equals(sa->dst, other_sa->dst) &&
420 memeq(&sa->mark, &other_sa->mark, sizeof(mark_t)) &&
421 memeq(&sa->cfg, &other_sa->cfg, sizeof(ipsec_sa_cfg_t));
422 }
423
424 /**
425 * Allocate or reference an IPsec SA object
426 */
427 static ipsec_sa_t *ipsec_sa_create(private_kernel_netlink_ipsec_t *this,
428 host_t *src, host_t *dst, mark_t mark,
429 ipsec_sa_cfg_t *cfg)
430 {
431 ipsec_sa_t *sa, *found;
432 INIT(sa,
433 .src = src,
434 .dst = dst,
435 .mark = mark,
436 .cfg = *cfg,
437 );
438 found = this->sas->get(this->sas, sa);
439 if (!found)
440 {
441 sa->src = src->clone(src);
442 sa->dst = dst->clone(dst);
443 this->sas->put(this->sas, sa, sa);
444 }
445 else
446 {
447 free(sa);
448 sa = found;
449 }
450 ref_get(&sa->refcount);
451 return sa;
452 }
453
454 /**
455 * Release and destroy an IPsec SA object
456 */
457 static void ipsec_sa_destroy(private_kernel_netlink_ipsec_t *this,
458 ipsec_sa_t *sa)
459 {
460 if (ref_put(&sa->refcount))
461 {
462 this->sas->remove(this->sas, sa);
463 DESTROY_IF(sa->src);
464 DESTROY_IF(sa->dst);
465 free(sa);
466 }
467 }
468
469 typedef struct policy_sa_t policy_sa_t;
470 typedef struct policy_sa_fwd_t policy_sa_fwd_t;
471
472 /**
473 * Mapping between a policy and an IPsec SA.
474 */
475 struct policy_sa_t {
476 /** Priority assigned to the policy when installed with this SA */
477 u_int32_t priority;
478
479 /** Type of the policy */
480 policy_type_t type;
481
482 /** Assigned SA */
483 ipsec_sa_t *sa;
484 };
485
486 /**
487 * For forward policies we also cache the traffic selectors in order to install
488 * the route.
489 */
490 struct policy_sa_fwd_t {
491 /** Generic interface */
492 policy_sa_t generic;
493
494 /** Source traffic selector of this policy */
495 traffic_selector_t *src_ts;
496
497 /** Destination traffic selector of this policy */
498 traffic_selector_t *dst_ts;
499 };
500
501 /**
502 * Create a policy_sa(_fwd)_t object
503 */
504 static policy_sa_t *policy_sa_create(private_kernel_netlink_ipsec_t *this,
505 policy_dir_t dir, policy_type_t type, host_t *src, host_t *dst,
506 traffic_selector_t *src_ts, traffic_selector_t *dst_ts, mark_t mark,
507 ipsec_sa_cfg_t *cfg)
508 {
509 policy_sa_t *policy;
510
511 if (dir == POLICY_FWD)
512 {
513 policy_sa_fwd_t *fwd;
514 INIT(fwd,
515 .src_ts = src_ts->clone(src_ts),
516 .dst_ts = dst_ts->clone(dst_ts),
517 );
518 policy = &fwd->generic;
519 }
520 else
521 {
522 INIT(policy, .priority = 0);
523 }
524 policy->type = type;
525 policy->sa = ipsec_sa_create(this, src, dst, mark, cfg);
526 return policy;
527 }
528
529 /**
530 * Destroy a policy_sa(_fwd)_t object
531 */
532 static void policy_sa_destroy(policy_sa_t *policy, policy_dir_t *dir,
533 private_kernel_netlink_ipsec_t *this)
534 {
535 if (*dir == POLICY_FWD)
536 {
537 policy_sa_fwd_t *fwd = (policy_sa_fwd_t*)policy;
538 fwd->src_ts->destroy(fwd->src_ts);
539 fwd->dst_ts->destroy(fwd->dst_ts);
540 }
541 ipsec_sa_destroy(this, policy->sa);
542 free(policy);
543 }
544
545 typedef struct policy_entry_t policy_entry_t;
546
547 /**
548 * Installed kernel policy.
549 */
550 struct policy_entry_t {
551
552 /** Direction of this policy: in, out, forward */
553 u_int8_t direction;
554
555 /** Parameters of installed policy */
556 struct xfrm_selector sel;
557
558 /** Optional mark */
559 u_int32_t mark;
560
561 /** Associated route installed for this policy */
562 route_entry_t *route;
563
564 /** List of SAs this policy is used by, ordered by priority */
565 linked_list_t *used_by;
566 };
567
568 /**
569 * Destroy a policy_entry_t object
570 */
571 static void policy_entry_destroy(private_kernel_netlink_ipsec_t *this,
572 policy_entry_t *policy)
573 {
574 if (policy->route)
575 {
576 route_entry_destroy(policy->route);
577 }
578 if (policy->used_by)
579 {
580 policy->used_by->invoke_function(policy->used_by,
581 (linked_list_invoke_t)policy_sa_destroy,
582 &policy->direction, this);
583 policy->used_by->destroy(policy->used_by);
584 }
585 free(policy);
586 }
587
588 /**
589 * Hash function for policy_entry_t objects
590 */
591 static u_int policy_hash(policy_entry_t *key)
592 {
593 chunk_t chunk = chunk_create((void*)&key->sel,
594 sizeof(struct xfrm_selector) + sizeof(u_int32_t));
595 return chunk_hash(chunk);
596 }
597
598 /**
599 * Equality function for policy_entry_t objects
600 */
601 static bool policy_equals(policy_entry_t *key, policy_entry_t *other_key)
602 {
603 return memeq(&key->sel, &other_key->sel,
604 sizeof(struct xfrm_selector) + sizeof(u_int32_t)) &&
605 key->direction == other_key->direction;
606 }
607
608 /**
609 * Calculate the priority of a policy
610 */
611 static inline u_int32_t get_priority(policy_entry_t *policy,
612 policy_priority_t prio)
613 {
614 u_int32_t priority = PRIO_BASE;
615 switch (prio)
616 {
617 case POLICY_PRIORITY_FALLBACK:
618 priority <<= 1;
619 /* fall-through */
620 case POLICY_PRIORITY_ROUTED:
621 priority <<= 1;
622 /* fall-through */
623 case POLICY_PRIORITY_DEFAULT:
624 break;
625 }
626 /* calculate priority based on selector size, small size = high prio */
627 priority -= policy->sel.prefixlen_s;
628 priority -= policy->sel.prefixlen_d;
629 priority <<= 2; /* make some room for the two flags */
630 priority += policy->sel.sport_mask || policy->sel.dport_mask ? 0 : 2;
631 priority += policy->sel.proto ? 0 : 1;
632 return priority;
633 }
634
635 /**
636 * Convert the general ipsec mode to the one defined in xfrm.h
637 */
638 static u_int8_t mode2kernel(ipsec_mode_t mode)
639 {
640 switch (mode)
641 {
642 case MODE_TRANSPORT:
643 return XFRM_MODE_TRANSPORT;
644 case MODE_TUNNEL:
645 return XFRM_MODE_TUNNEL;
646 case MODE_BEET:
647 return XFRM_MODE_BEET;
648 default:
649 return mode;
650 }
651 }
652
653 /**
654 * Convert a host_t to a struct xfrm_address
655 */
656 static void host2xfrm(host_t *host, xfrm_address_t *xfrm)
657 {
658 chunk_t chunk = host->get_address(host);
659 memcpy(xfrm, chunk.ptr, min(chunk.len, sizeof(xfrm_address_t)));
660 }
661
662 /**
663 * Convert a struct xfrm_address to a host_t
664 */
665 static host_t* xfrm2host(int family, xfrm_address_t *xfrm, u_int16_t port)
666 {
667 chunk_t chunk;
668
669 switch (family)
670 {
671 case AF_INET:
672 chunk = chunk_create((u_char*)&xfrm->a4, sizeof(xfrm->a4));
673 break;
674 case AF_INET6:
675 chunk = chunk_create((u_char*)&xfrm->a6, sizeof(xfrm->a6));
676 break;
677 default:
678 return NULL;
679 }
680 return host_create_from_chunk(family, chunk, ntohs(port));
681 }
682
683 /**
684 * Convert a traffic selector address range to subnet and its mask.
685 */
686 static void ts2subnet(traffic_selector_t* ts,
687 xfrm_address_t *net, u_int8_t *mask)
688 {
689 host_t *net_host;
690 chunk_t net_chunk;
691
692 ts->to_subnet(ts, &net_host, mask);
693 net_chunk = net_host->get_address(net_host);
694 memcpy(net, net_chunk.ptr, net_chunk.len);
695 net_host->destroy(net_host);
696 }
697
698 /**
699 * Convert a traffic selector port range to port/portmask
700 */
701 static void ts2ports(traffic_selector_t* ts,
702 u_int16_t *port, u_int16_t *mask)
703 {
704 /* Linux does not seem to accept complex portmasks. Only
705 * any or a specific port is allowed. We set to any, if we have
706 * a port range, or to a specific, if we have one port only.
707 */
708 u_int16_t from, to;
709
710 from = ts->get_from_port(ts);
711 to = ts->get_to_port(ts);
712
713 if (from == to)
714 {
715 *port = htons(from);
716 *mask = ~0;
717 }
718 else
719 {
720 *port = 0;
721 *mask = 0;
722 }
723 }
724
725 /**
726 * Convert a pair of traffic_selectors to an xfrm_selector
727 */
728 static struct xfrm_selector ts2selector(traffic_selector_t *src,
729 traffic_selector_t *dst)
730 {
731 struct xfrm_selector sel;
732
733 memset(&sel, 0, sizeof(sel));
734 sel.family = (src->get_type(src) == TS_IPV4_ADDR_RANGE) ? AF_INET : AF_INET6;
735 /* src or dest proto may be "any" (0), use more restrictive one */
736 sel.proto = max(src->get_protocol(src), dst->get_protocol(dst));
737 ts2subnet(dst, &sel.daddr, &sel.prefixlen_d);
738 ts2subnet(src, &sel.saddr, &sel.prefixlen_s);
739 ts2ports(dst, &sel.dport, &sel.dport_mask);
740 ts2ports(src, &sel.sport, &sel.sport_mask);
741 sel.ifindex = 0;
742 sel.user = 0;
743
744 return sel;
745 }
746
747 /**
748 * Convert an xfrm_selector to a src|dst traffic_selector
749 */
750 static traffic_selector_t* selector2ts(struct xfrm_selector *sel, bool src)
751 {
752 u_char *addr;
753 u_int8_t prefixlen;
754 u_int16_t port = 0;
755 host_t *host = NULL;
756
757 if (src)
758 {
759 addr = (u_char*)&sel->saddr;
760 prefixlen = sel->prefixlen_s;
761 if (sel->sport_mask)
762 {
763 port = htons(sel->sport);
764 }
765 }
766 else
767 {
768 addr = (u_char*)&sel->daddr;
769 prefixlen = sel->prefixlen_d;
770 if (sel->dport_mask)
771 {
772 port = htons(sel->dport);
773 }
774 }
775
776 /* The Linux 2.6 kernel does not set the selector's family field,
777 * so as a kludge we additionally test the prefix length.
778 */
779 if (sel->family == AF_INET || sel->prefixlen_s == 32)
780 {
781 host = host_create_from_chunk(AF_INET, chunk_create(addr, 4), 0);
782 }
783 else if (sel->family == AF_INET6 || sel->prefixlen_s == 128)
784 {
785 host = host_create_from_chunk(AF_INET6, chunk_create(addr, 16), 0);
786 }
787
788 if (host)
789 {
790 return traffic_selector_create_from_subnet(host, prefixlen,
791 sel->proto, port);
792 }
793 return NULL;
794 }
795
796 /**
797 * Process a XFRM_MSG_ACQUIRE from kernel
798 */
799 static void process_acquire(private_kernel_netlink_ipsec_t *this,
800 struct nlmsghdr *hdr)
801 {
802 struct xfrm_user_acquire *acquire;
803 struct rtattr *rta;
804 size_t rtasize;
805 traffic_selector_t *src_ts, *dst_ts;
806 u_int32_t reqid = 0;
807 int proto = 0;
808
809 acquire = (struct xfrm_user_acquire*)NLMSG_DATA(hdr);
810 rta = XFRM_RTA(hdr, struct xfrm_user_acquire);
811 rtasize = XFRM_PAYLOAD(hdr, struct xfrm_user_acquire);
812
813 DBG2(DBG_KNL, "received a XFRM_MSG_ACQUIRE");
814
815 while (RTA_OK(rta, rtasize))
816 {
817 DBG2(DBG_KNL, " %N", xfrm_attr_type_names, rta->rta_type);
818
819 if (rta->rta_type == XFRMA_TMPL)
820 {
821 struct xfrm_user_tmpl* tmpl;
822 tmpl = (struct xfrm_user_tmpl*)RTA_DATA(rta);
823 reqid = tmpl->reqid;
824 proto = tmpl->id.proto;
825 }
826 rta = RTA_NEXT(rta, rtasize);
827 }
828 switch (proto)
829 {
830 case 0:
831 case IPPROTO_ESP:
832 case IPPROTO_AH:
833 break;
834 default:
835 /* acquire for AH/ESP only, not for IPCOMP */
836 return;
837 }
838 src_ts = selector2ts(&acquire->sel, TRUE);
839 dst_ts = selector2ts(&acquire->sel, FALSE);
840
841 hydra->kernel_interface->acquire(hydra->kernel_interface, reqid, src_ts,
842 dst_ts);
843 }
844
845 /**
846 * Process a XFRM_MSG_EXPIRE from kernel
847 */
848 static void process_expire(private_kernel_netlink_ipsec_t *this,
849 struct nlmsghdr *hdr)
850 {
851 struct xfrm_user_expire *expire;
852 u_int32_t spi, reqid;
853 u_int8_t protocol;
854
855 expire = (struct xfrm_user_expire*)NLMSG_DATA(hdr);
856 protocol = expire->state.id.proto;
857 spi = expire->state.id.spi;
858 reqid = expire->state.reqid;
859
860 DBG2(DBG_KNL, "received a XFRM_MSG_EXPIRE");
861
862 if (protocol != IPPROTO_ESP && protocol != IPPROTO_AH)
863 {
864 DBG2(DBG_KNL, "ignoring XFRM_MSG_EXPIRE for SA with SPI %.8x and "
865 "reqid {%u} which is not a CHILD_SA", ntohl(spi), reqid);
866 return;
867 }
868
869 hydra->kernel_interface->expire(hydra->kernel_interface, reqid, protocol,
870 spi, expire->hard != 0);
871 }
872
873 /**
874 * Process a XFRM_MSG_MIGRATE from kernel
875 */
876 static void process_migrate(private_kernel_netlink_ipsec_t *this,
877 struct nlmsghdr *hdr)
878 {
879 struct xfrm_userpolicy_id *policy_id;
880 struct rtattr *rta;
881 size_t rtasize;
882 traffic_selector_t *src_ts, *dst_ts;
883 host_t *local = NULL, *remote = NULL;
884 host_t *old_src = NULL, *old_dst = NULL;
885 host_t *new_src = NULL, *new_dst = NULL;
886 u_int32_t reqid = 0;
887 policy_dir_t dir;
888
889 policy_id = (struct xfrm_userpolicy_id*)NLMSG_DATA(hdr);
890 rta = XFRM_RTA(hdr, struct xfrm_userpolicy_id);
891 rtasize = XFRM_PAYLOAD(hdr, struct xfrm_userpolicy_id);
892
893 DBG2(DBG_KNL, "received a XFRM_MSG_MIGRATE");
894
895 src_ts = selector2ts(&policy_id->sel, TRUE);
896 dst_ts = selector2ts(&policy_id->sel, FALSE);
897 dir = (policy_dir_t)policy_id->dir;
898
899 DBG2(DBG_KNL, " policy: %R === %R %N", src_ts, dst_ts, policy_dir_names);
900
901 while (RTA_OK(rta, rtasize))
902 {
903 DBG2(DBG_KNL, " %N", xfrm_attr_type_names, rta->rta_type);
904 if (rta->rta_type == XFRMA_KMADDRESS)
905 {
906 struct xfrm_user_kmaddress *kmaddress;
907
908 kmaddress = (struct xfrm_user_kmaddress*)RTA_DATA(rta);
909 local = xfrm2host(kmaddress->family, &kmaddress->local, 0);
910 remote = xfrm2host(kmaddress->family, &kmaddress->remote, 0);
911 DBG2(DBG_KNL, " kmaddress: %H...%H", local, remote);
912 }
913 else if (rta->rta_type == XFRMA_MIGRATE)
914 {
915 struct xfrm_user_migrate *migrate;
916
917 migrate = (struct xfrm_user_migrate*)RTA_DATA(rta);
918 old_src = xfrm2host(migrate->old_family, &migrate->old_saddr, 0);
919 old_dst = xfrm2host(migrate->old_family, &migrate->old_daddr, 0);
920 new_src = xfrm2host(migrate->new_family, &migrate->new_saddr, 0);
921 new_dst = xfrm2host(migrate->new_family, &migrate->new_daddr, 0);
922 reqid = migrate->reqid;
923 DBG2(DBG_KNL, " migrate %H...%H to %H...%H, reqid {%u}",
924 old_src, old_dst, new_src, new_dst, reqid);
925 DESTROY_IF(old_src);
926 DESTROY_IF(old_dst);
927 DESTROY_IF(new_src);
928 DESTROY_IF(new_dst);
929 }
930 rta = RTA_NEXT(rta, rtasize);
931 }
932
933 if (src_ts && dst_ts && local && remote)
934 {
935 hydra->kernel_interface->migrate(hydra->kernel_interface, reqid,
936 src_ts, dst_ts, dir, local, remote);
937 }
938 else
939 {
940 DESTROY_IF(src_ts);
941 DESTROY_IF(dst_ts);
942 DESTROY_IF(local);
943 DESTROY_IF(remote);
944 }
945 }
946
947 /**
948 * Process a XFRM_MSG_MAPPING from kernel
949 */
950 static void process_mapping(private_kernel_netlink_ipsec_t *this,
951 struct nlmsghdr *hdr)
952 {
953 struct xfrm_user_mapping *mapping;
954 u_int32_t spi, reqid;
955
956 mapping = (struct xfrm_user_mapping*)NLMSG_DATA(hdr);
957 spi = mapping->id.spi;
958 reqid = mapping->reqid;
959
960 DBG2(DBG_KNL, "received a XFRM_MSG_MAPPING");
961
962 if (mapping->id.proto == IPPROTO_ESP)
963 {
964 host_t *host;
965 host = xfrm2host(mapping->id.family, &mapping->new_saddr,
966 mapping->new_sport);
967 if (host)
968 {
969 hydra->kernel_interface->mapping(hydra->kernel_interface, reqid,
970 spi, host);
971 }
972 }
973 }
974
975 /**
976 * Receives events from kernel
977 */
978 static job_requeue_t receive_events(private_kernel_netlink_ipsec_t *this)
979 {
980 char response[1024];
981 struct nlmsghdr *hdr = (struct nlmsghdr*)response;
982 struct sockaddr_nl addr;
983 socklen_t addr_len = sizeof(addr);
984 int len;
985 bool oldstate;
986
987 oldstate = thread_cancelability(TRUE);
988 len = recvfrom(this->socket_xfrm_events, response, sizeof(response), 0,
989 (struct sockaddr*)&addr, &addr_len);
990 thread_cancelability(oldstate);
991
992 if (len < 0)
993 {
994 switch (errno)
995 {
996 case EINTR:
997 /* interrupted, try again */
998 return JOB_REQUEUE_DIRECT;
999 case EAGAIN:
1000 /* no data ready, select again */
1001 return JOB_REQUEUE_DIRECT;
1002 default:
1003 DBG1(DBG_KNL, "unable to receive from xfrm event socket");
1004 sleep(1);
1005 return JOB_REQUEUE_FAIR;
1006 }
1007 }
1008
1009 if (addr.nl_pid != 0)
1010 { /* not from kernel. not interested, try another one */
1011 return JOB_REQUEUE_DIRECT;
1012 }
1013
1014 while (NLMSG_OK(hdr, len))
1015 {
1016 switch (hdr->nlmsg_type)
1017 {
1018 case XFRM_MSG_ACQUIRE:
1019 process_acquire(this, hdr);
1020 break;
1021 case XFRM_MSG_EXPIRE:
1022 process_expire(this, hdr);
1023 break;
1024 case XFRM_MSG_MIGRATE:
1025 process_migrate(this, hdr);
1026 break;
1027 case XFRM_MSG_MAPPING:
1028 process_mapping(this, hdr);
1029 break;
1030 default:
1031 DBG1(DBG_KNL, "received unknown event from xfrm event "
1032 "socket: %d", hdr->nlmsg_type);
1033 break;
1034 }
1035 hdr = NLMSG_NEXT(hdr, len);
1036 }
1037 return JOB_REQUEUE_DIRECT;
1038 }
1039
1040 /**
1041 * Get an SPI for a specific protocol from the kernel.
1042 */
1043 static status_t get_spi_internal(private_kernel_netlink_ipsec_t *this,
1044 host_t *src, host_t *dst, u_int8_t proto, u_int32_t min, u_int32_t max,
1045 u_int32_t reqid, u_int32_t *spi)
1046 {
1047 netlink_buf_t request;
1048 struct nlmsghdr *hdr, *out;
1049 struct xfrm_userspi_info *userspi;
1050 u_int32_t received_spi = 0;
1051 size_t len;
1052
1053 memset(&request, 0, sizeof(request));
1054
1055 hdr = (struct nlmsghdr*)request;
1056 hdr->nlmsg_flags = NLM_F_REQUEST;
1057 hdr->nlmsg_type = XFRM_MSG_ALLOCSPI;
1058 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_userspi_info));
1059
1060 userspi = (struct xfrm_userspi_info*)NLMSG_DATA(hdr);
1061 host2xfrm(src, &userspi->info.saddr);
1062 host2xfrm(dst, &userspi->info.id.daddr);
1063 userspi->info.id.proto = proto;
1064 userspi->info.mode = XFRM_MODE_TUNNEL;
1065 userspi->info.reqid = reqid;
1066 userspi->info.family = src->get_family(src);
1067 userspi->min = min;
1068 userspi->max = max;
1069
1070 if (this->socket_xfrm->send(this->socket_xfrm, hdr, &out, &len) == SUCCESS)
1071 {
1072 hdr = out;
1073 while (NLMSG_OK(hdr, len))
1074 {
1075 switch (hdr->nlmsg_type)
1076 {
1077 case XFRM_MSG_NEWSA:
1078 {
1079 struct xfrm_usersa_info* usersa = NLMSG_DATA(hdr);
1080 received_spi = usersa->id.spi;
1081 break;
1082 }
1083 case NLMSG_ERROR:
1084 {
1085 struct nlmsgerr *err = NLMSG_DATA(hdr);
1086 DBG1(DBG_KNL, "allocating SPI failed: %s (%d)",
1087 strerror(-err->error), -err->error);
1088 break;
1089 }
1090 default:
1091 hdr = NLMSG_NEXT(hdr, len);
1092 continue;
1093 case NLMSG_DONE:
1094 break;
1095 }
1096 break;
1097 }
1098 free(out);
1099 }
1100
1101 if (received_spi == 0)
1102 {
1103 return FAILED;
1104 }
1105
1106 *spi = received_spi;
1107 return SUCCESS;
1108 }
1109
1110 METHOD(kernel_ipsec_t, get_spi, status_t,
1111 private_kernel_netlink_ipsec_t *this, host_t *src, host_t *dst,
1112 u_int8_t protocol, u_int32_t reqid, u_int32_t *spi)
1113 {
1114 DBG2(DBG_KNL, "getting SPI for reqid {%u}", reqid);
1115
1116 if (get_spi_internal(this, src, dst, protocol,
1117 0xc0000000, 0xcFFFFFFF, reqid, spi) != SUCCESS)
1118 {
1119 DBG1(DBG_KNL, "unable to get SPI for reqid {%u}", reqid);
1120 return FAILED;
1121 }
1122
1123 DBG2(DBG_KNL, "got SPI %.8x for reqid {%u}", ntohl(*spi), reqid);
1124 return SUCCESS;
1125 }
1126
1127 METHOD(kernel_ipsec_t, get_cpi, status_t,
1128 private_kernel_netlink_ipsec_t *this, host_t *src, host_t *dst,
1129 u_int32_t reqid, u_int16_t *cpi)
1130 {
1131 u_int32_t received_spi = 0;
1132
1133 DBG2(DBG_KNL, "getting CPI for reqid {%u}", reqid);
1134
1135 if (get_spi_internal(this, src, dst, IPPROTO_COMP,
1136 0x100, 0xEFFF, reqid, &received_spi) != SUCCESS)
1137 {
1138 DBG1(DBG_KNL, "unable to get CPI for reqid {%u}", reqid);
1139 return FAILED;
1140 }
1141
1142 *cpi = htons((u_int16_t)ntohl(received_spi));
1143
1144 DBG2(DBG_KNL, "got CPI %.4x for reqid {%u}", ntohs(*cpi), reqid);
1145 return SUCCESS;
1146 }
1147
1148 METHOD(kernel_ipsec_t, add_sa, status_t,
1149 private_kernel_netlink_ipsec_t *this, host_t *src, host_t *dst,
1150 u_int32_t spi, u_int8_t protocol, u_int32_t reqid, mark_t mark,
1151 u_int32_t tfc, lifetime_cfg_t *lifetime, u_int16_t enc_alg, chunk_t enc_key,
1152 u_int16_t int_alg, chunk_t int_key, ipsec_mode_t mode, u_int16_t ipcomp,
1153 u_int16_t cpi, bool encap, bool esn, bool inbound,
1154 traffic_selector_t* src_ts, traffic_selector_t* dst_ts)
1155 {
1156 netlink_buf_t request;
1157 char *alg_name;
1158 struct nlmsghdr *hdr;
1159 struct xfrm_usersa_info *sa;
1160 u_int16_t icv_size = 64;
1161 status_t status = FAILED;
1162
1163 /* if IPComp is used, we install an additional IPComp SA. if the cpi is 0
1164 * we are in the recursive call below */
1165 if (ipcomp != IPCOMP_NONE && cpi != 0)
1166 {
1167 lifetime_cfg_t lft = {{0,0,0},{0,0,0},{0,0,0}};
1168 add_sa(this, src, dst, htonl(ntohs(cpi)), IPPROTO_COMP, reqid, mark,
1169 tfc, &lft, ENCR_UNDEFINED, chunk_empty, AUTH_UNDEFINED,
1170 chunk_empty, mode, ipcomp, 0, FALSE, FALSE, inbound, NULL, NULL);
1171 ipcomp = IPCOMP_NONE;
1172 /* use transport mode ESP SA, IPComp uses tunnel mode */
1173 mode = MODE_TRANSPORT;
1174 }
1175
1176 memset(&request, 0, sizeof(request));
1177
1178 DBG2(DBG_KNL, "adding SAD entry with SPI %.8x and reqid {%u} (mark "
1179 "%u/0x%08x)", ntohl(spi), reqid, mark.value, mark.mask);
1180
1181 hdr = (struct nlmsghdr*)request;
1182 hdr->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
1183 hdr->nlmsg_type = inbound ? XFRM_MSG_UPDSA : XFRM_MSG_NEWSA;
1184 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_usersa_info));
1185
1186 sa = (struct xfrm_usersa_info*)NLMSG_DATA(hdr);
1187 host2xfrm(src, &sa->saddr);
1188 host2xfrm(dst, &sa->id.daddr);
1189 sa->id.spi = spi;
1190 sa->id.proto = protocol;
1191 sa->family = src->get_family(src);
1192 sa->mode = mode2kernel(mode);
1193 switch (mode)
1194 {
1195 case MODE_TUNNEL:
1196 sa->flags |= XFRM_STATE_AF_UNSPEC;
1197 break;
1198 case MODE_BEET:
1199 case MODE_TRANSPORT:
1200 if(src_ts && dst_ts)
1201 {
1202 sa->sel = ts2selector(src_ts, dst_ts);
1203 }
1204 break;
1205 default:
1206 break;
1207 }
1208
1209 sa->reqid = reqid;
1210 sa->lft.soft_byte_limit = XFRM_LIMIT(lifetime->bytes.rekey);
1211 sa->lft.hard_byte_limit = XFRM_LIMIT(lifetime->bytes.life);
1212 sa->lft.soft_packet_limit = XFRM_LIMIT(lifetime->packets.rekey);
1213 sa->lft.hard_packet_limit = XFRM_LIMIT(lifetime->packets.life);
1214 /* we use lifetimes since added, not since used */
1215 sa->lft.soft_add_expires_seconds = lifetime->time.rekey;
1216 sa->lft.hard_add_expires_seconds = lifetime->time.life;
1217 sa->lft.soft_use_expires_seconds = 0;
1218 sa->lft.hard_use_expires_seconds = 0;
1219
1220 struct rtattr *rthdr = XFRM_RTA(hdr, struct xfrm_usersa_info);
1221
1222 switch (enc_alg)
1223 {
1224 case ENCR_UNDEFINED:
1225 /* no encryption */
1226 break;
1227 case ENCR_AES_CCM_ICV16:
1228 case ENCR_AES_GCM_ICV16:
1229 case ENCR_NULL_AUTH_AES_GMAC:
1230 case ENCR_CAMELLIA_CCM_ICV16:
1231 icv_size += 32;
1232 /* FALL */
1233 case ENCR_AES_CCM_ICV12:
1234 case ENCR_AES_GCM_ICV12:
1235 case ENCR_CAMELLIA_CCM_ICV12:
1236 icv_size += 32;
1237 /* FALL */
1238 case ENCR_AES_CCM_ICV8:
1239 case ENCR_AES_GCM_ICV8:
1240 case ENCR_CAMELLIA_CCM_ICV8:
1241 {
1242 struct xfrm_algo_aead *algo;
1243
1244 alg_name = lookup_algorithm(ENCRYPTION_ALGORITHM, enc_alg);
1245 if (alg_name == NULL)
1246 {
1247 DBG1(DBG_KNL, "algorithm %N not supported by kernel!",
1248 encryption_algorithm_names, enc_alg);
1249 goto failed;
1250 }
1251 DBG2(DBG_KNL, " using encryption algorithm %N with key size %d",
1252 encryption_algorithm_names, enc_alg, enc_key.len * 8);
1253
1254 rthdr->rta_type = XFRMA_ALG_AEAD;
1255 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_algo_aead) +
1256 enc_key.len);
1257 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1258 if (hdr->nlmsg_len > sizeof(request))
1259 {
1260 goto failed;
1261 }
1262
1263 algo = (struct xfrm_algo_aead*)RTA_DATA(rthdr);
1264 algo->alg_key_len = enc_key.len * 8;
1265 algo->alg_icv_len = icv_size;
1266 strncpy(algo->alg_name, alg_name, sizeof(algo->alg_name));
1267 algo->alg_name[sizeof(algo->alg_name) - 1] = '\0';
1268 memcpy(algo->alg_key, enc_key.ptr, enc_key.len);
1269
1270 rthdr = XFRM_RTA_NEXT(rthdr);
1271 break;
1272 }
1273 default:
1274 {
1275 struct xfrm_algo *algo;
1276
1277 alg_name = lookup_algorithm(ENCRYPTION_ALGORITHM, enc_alg);
1278 if (alg_name == NULL)
1279 {
1280 DBG1(DBG_KNL, "algorithm %N not supported by kernel!",
1281 encryption_algorithm_names, enc_alg);
1282 goto failed;
1283 }
1284 DBG2(DBG_KNL, " using encryption algorithm %N with key size %d",
1285 encryption_algorithm_names, enc_alg, enc_key.len * 8);
1286
1287 rthdr->rta_type = XFRMA_ALG_CRYPT;
1288 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_algo) + enc_key.len);
1289 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1290 if (hdr->nlmsg_len > sizeof(request))
1291 {
1292 goto failed;
1293 }
1294
1295 algo = (struct xfrm_algo*)RTA_DATA(rthdr);
1296 algo->alg_key_len = enc_key.len * 8;
1297 strncpy(algo->alg_name, alg_name, sizeof(algo->alg_name));
1298 algo->alg_name[sizeof(algo->alg_name) - 1] = '\0';
1299 memcpy(algo->alg_key, enc_key.ptr, enc_key.len);
1300
1301 rthdr = XFRM_RTA_NEXT(rthdr);
1302 }
1303 }
1304
1305 if (int_alg != AUTH_UNDEFINED)
1306 {
1307 u_int trunc_len = 0;
1308
1309 alg_name = lookup_algorithm(INTEGRITY_ALGORITHM, int_alg);
1310 if (alg_name == NULL)
1311 {
1312 DBG1(DBG_KNL, "algorithm %N not supported by kernel!",
1313 integrity_algorithm_names, int_alg);
1314 goto failed;
1315 }
1316 DBG2(DBG_KNL, " using integrity algorithm %N with key size %d",
1317 integrity_algorithm_names, int_alg, int_key.len * 8);
1318
1319 switch (int_alg)
1320 {
1321 case AUTH_HMAC_MD5_128:
1322 case AUTH_HMAC_SHA2_256_128:
1323 trunc_len = 128;
1324 break;
1325 case AUTH_HMAC_SHA1_160:
1326 trunc_len = 160;
1327 break;
1328 default:
1329 break;
1330 }
1331
1332 if (trunc_len)
1333 {
1334 struct xfrm_algo_auth* algo;
1335
1336 /* the kernel uses SHA256 with 96 bit truncation by default,
1337 * use specified truncation size supported by newer kernels.
1338 * also use this for untruncated MD5 and SHA1. */
1339 rthdr->rta_type = XFRMA_ALG_AUTH_TRUNC;
1340 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_algo_auth) +
1341 int_key.len);
1342
1343 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1344 if (hdr->nlmsg_len > sizeof(request))
1345 {
1346 goto failed;
1347 }
1348
1349 algo = (struct xfrm_algo_auth*)RTA_DATA(rthdr);
1350 algo->alg_key_len = int_key.len * 8;
1351 algo->alg_trunc_len = trunc_len;
1352 strncpy(algo->alg_name, alg_name, sizeof(algo->alg_name));
1353 algo->alg_name[sizeof(algo->alg_name) - 1] = '\0';
1354 memcpy(algo->alg_key, int_key.ptr, int_key.len);
1355 }
1356 else
1357 {
1358 struct xfrm_algo* algo;
1359
1360 rthdr->rta_type = XFRMA_ALG_AUTH;
1361 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_algo) + int_key.len);
1362
1363 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1364 if (hdr->nlmsg_len > sizeof(request))
1365 {
1366 goto failed;
1367 }
1368
1369 algo = (struct xfrm_algo*)RTA_DATA(rthdr);
1370 algo->alg_key_len = int_key.len * 8;
1371 strncpy(algo->alg_name, alg_name, sizeof(algo->alg_name));
1372 algo->alg_name[sizeof(algo->alg_name) - 1] = '\0';
1373 memcpy(algo->alg_key, int_key.ptr, int_key.len);
1374 }
1375 rthdr = XFRM_RTA_NEXT(rthdr);
1376 }
1377
1378 if (ipcomp != IPCOMP_NONE)
1379 {
1380 rthdr->rta_type = XFRMA_ALG_COMP;
1381 alg_name = lookup_algorithm(COMPRESSION_ALGORITHM, ipcomp);
1382 if (alg_name == NULL)
1383 {
1384 DBG1(DBG_KNL, "algorithm %N not supported by kernel!",
1385 ipcomp_transform_names, ipcomp);
1386 goto failed;
1387 }
1388 DBG2(DBG_KNL, " using compression algorithm %N",
1389 ipcomp_transform_names, ipcomp);
1390
1391 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_algo));
1392 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1393 if (hdr->nlmsg_len > sizeof(request))
1394 {
1395 goto failed;
1396 }
1397
1398 struct xfrm_algo* algo = (struct xfrm_algo*)RTA_DATA(rthdr);
1399 algo->alg_key_len = 0;
1400 strncpy(algo->alg_name, alg_name, sizeof(algo->alg_name));
1401 algo->alg_name[sizeof(algo->alg_name) - 1] = '\0';
1402
1403 rthdr = XFRM_RTA_NEXT(rthdr);
1404 }
1405
1406 if (encap)
1407 {
1408 struct xfrm_encap_tmpl *tmpl;
1409
1410 rthdr->rta_type = XFRMA_ENCAP;
1411 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_encap_tmpl));
1412
1413 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1414 if (hdr->nlmsg_len > sizeof(request))
1415 {
1416 goto failed;
1417 }
1418
1419 tmpl = (struct xfrm_encap_tmpl*)RTA_DATA(rthdr);
1420 tmpl->encap_type = UDP_ENCAP_ESPINUDP;
1421 tmpl->encap_sport = htons(src->get_port(src));
1422 tmpl->encap_dport = htons(dst->get_port(dst));
1423 memset(&tmpl->encap_oa, 0, sizeof (xfrm_address_t));
1424 /* encap_oa could probably be derived from the
1425 * traffic selectors [rfc4306, p39]. In the netlink kernel
1426 * implementation pluto does the same as we do here but it uses
1427 * encap_oa in the pfkey implementation.
1428 * BUT as /usr/src/linux/net/key/af_key.c indicates the kernel ignores
1429 * it anyway
1430 * -> does that mean that NAT-T encap doesn't work in transport mode?
1431 * No. The reason the kernel ignores NAT-OA is that it recomputes
1432 * (or, rather, just ignores) the checksum. If packets pass the IPsec
1433 * checks it marks them "checksum ok" so OA isn't needed. */
1434 rthdr = XFRM_RTA_NEXT(rthdr);
1435 }
1436
1437 if (mark.value)
1438 {
1439 struct xfrm_mark *mrk;
1440
1441 rthdr->rta_type = XFRMA_MARK;
1442 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_mark));
1443
1444 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1445 if (hdr->nlmsg_len > sizeof(request))
1446 {
1447 goto failed;
1448 }
1449
1450 mrk = (struct xfrm_mark*)RTA_DATA(rthdr);
1451 mrk->v = mark.value;
1452 mrk->m = mark.mask;
1453 rthdr = XFRM_RTA_NEXT(rthdr);
1454 }
1455
1456 if (tfc)
1457 {
1458 u_int32_t *tfcpad;
1459
1460 rthdr->rta_type = XFRMA_TFCPAD;
1461 rthdr->rta_len = RTA_LENGTH(sizeof(u_int32_t));
1462
1463 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1464 if (hdr->nlmsg_len > sizeof(request))
1465 {
1466 goto failed;
1467 }
1468
1469 tfcpad = (u_int32_t*)RTA_DATA(rthdr);
1470 *tfcpad = tfc;
1471 rthdr = XFRM_RTA_NEXT(rthdr);
1472 }
1473
1474 if (protocol != IPPROTO_COMP)
1475 {
1476 if (esn || this->replay_window > DEFAULT_REPLAY_WINDOW)
1477 {
1478 /* for ESN or larger replay windows we need the new
1479 * XFRMA_REPLAY_ESN_VAL attribute to configure a bitmap */
1480 struct xfrm_replay_state_esn *replay;
1481
1482 rthdr->rta_type = XFRMA_REPLAY_ESN_VAL;
1483 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_replay_state_esn) +
1484 (this->replay_window + 7) / 8);
1485
1486 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1487 if (hdr->nlmsg_len > sizeof(request))
1488 {
1489 goto failed;
1490 }
1491
1492 replay = (struct xfrm_replay_state_esn*)RTA_DATA(rthdr);
1493 /* bmp_len contains number uf __u32's */
1494 replay->bmp_len = this->replay_bmp;
1495 replay->replay_window = this->replay_window;
1496 DBG2(DBG_KNL, " using replay window of %u packets",
1497 this->replay_window);
1498
1499 rthdr = XFRM_RTA_NEXT(rthdr);
1500 if (esn)
1501 {
1502 DBG2(DBG_KNL, " using extended sequence numbers (ESN)");
1503 sa->flags |= XFRM_STATE_ESN;
1504 }
1505 }
1506 else
1507 {
1508 DBG2(DBG_KNL, " using replay window of %u packets",
1509 this->replay_window);
1510 sa->replay_window = this->replay_window;
1511 }
1512 }
1513
1514 if (this->socket_xfrm->send_ack(this->socket_xfrm, hdr) != SUCCESS)
1515 {
1516 if (mark.value)
1517 {
1518 DBG1(DBG_KNL, "unable to add SAD entry with SPI %.8x "
1519 "(mark %u/0x%08x)", ntohl(spi), mark.value, mark.mask);
1520 }
1521 else
1522 {
1523 DBG1(DBG_KNL, "unable to add SAD entry with SPI %.8x", ntohl(spi));
1524 }
1525 goto failed;
1526 }
1527
1528 status = SUCCESS;
1529
1530 failed:
1531 memwipe(request, sizeof(request));
1532 return status;
1533 }
1534
1535 /**
1536 * Get the ESN replay state (i.e. sequence numbers) of an SA.
1537 *
1538 * Allocates into one the replay state structure we get from the kernel.
1539 */
1540 static void get_replay_state(private_kernel_netlink_ipsec_t *this,
1541 u_int32_t spi, u_int8_t protocol, host_t *dst,
1542 struct xfrm_replay_state_esn **replay_esn,
1543 struct xfrm_replay_state **replay)
1544 {
1545 netlink_buf_t request;
1546 struct nlmsghdr *hdr, *out = NULL;
1547 struct xfrm_aevent_id *out_aevent = NULL, *aevent_id;
1548 size_t len;
1549 struct rtattr *rta;
1550 size_t rtasize;
1551
1552 memset(&request, 0, sizeof(request));
1553
1554 DBG2(DBG_KNL, "querying replay state from SAD entry with SPI %.8x",
1555 ntohl(spi));
1556
1557 hdr = (struct nlmsghdr*)request;
1558 hdr->nlmsg_flags = NLM_F_REQUEST;
1559 hdr->nlmsg_type = XFRM_MSG_GETAE;
1560 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_aevent_id));
1561
1562 aevent_id = (struct xfrm_aevent_id*)NLMSG_DATA(hdr);
1563 aevent_id->flags = XFRM_AE_RVAL;
1564
1565 host2xfrm(dst, &aevent_id->sa_id.daddr);
1566 aevent_id->sa_id.spi = spi;
1567 aevent_id->sa_id.proto = protocol;
1568 aevent_id->sa_id.family = dst->get_family(dst);
1569
1570 if (this->socket_xfrm->send(this->socket_xfrm, hdr, &out, &len) == SUCCESS)
1571 {
1572 hdr = out;
1573 while (NLMSG_OK(hdr, len))
1574 {
1575 switch (hdr->nlmsg_type)
1576 {
1577 case XFRM_MSG_NEWAE:
1578 {
1579 out_aevent = NLMSG_DATA(hdr);
1580 break;
1581 }
1582 case NLMSG_ERROR:
1583 {
1584 struct nlmsgerr *err = NLMSG_DATA(hdr);
1585 DBG1(DBG_KNL, "querying replay state from SAD entry "
1586 "failed: %s (%d)", strerror(-err->error),
1587 -err->error);
1588 break;
1589 }
1590 default:
1591 hdr = NLMSG_NEXT(hdr, len);
1592 continue;
1593 case NLMSG_DONE:
1594 break;
1595 }
1596 break;
1597 }
1598 }
1599
1600 if (out_aevent)
1601 {
1602 rta = XFRM_RTA(out, struct xfrm_aevent_id);
1603 rtasize = XFRM_PAYLOAD(out, struct xfrm_aevent_id);
1604 while (RTA_OK(rta, rtasize))
1605 {
1606 if (rta->rta_type == XFRMA_REPLAY_VAL &&
1607 RTA_PAYLOAD(rta) == sizeof(**replay))
1608 {
1609 *replay = malloc(RTA_PAYLOAD(rta));
1610 memcpy(*replay, RTA_DATA(rta), RTA_PAYLOAD(rta));
1611 break;
1612 }
1613 if (rta->rta_type == XFRMA_REPLAY_ESN_VAL &&
1614 RTA_PAYLOAD(rta) >= sizeof(**replay_esn) + this->replay_bmp)
1615 {
1616 *replay_esn = malloc(RTA_PAYLOAD(rta));
1617 memcpy(*replay_esn, RTA_DATA(rta), RTA_PAYLOAD(rta));
1618 break;
1619 }
1620 rta = RTA_NEXT(rta, rtasize);
1621 }
1622 }
1623 free(out);
1624 }
1625
1626 METHOD(kernel_ipsec_t, query_sa, status_t,
1627 private_kernel_netlink_ipsec_t *this, host_t *src, host_t *dst,
1628 u_int32_t spi, u_int8_t protocol, mark_t mark, u_int64_t *bytes)
1629 {
1630 netlink_buf_t request;
1631 struct nlmsghdr *out = NULL, *hdr;
1632 struct xfrm_usersa_id *sa_id;
1633 struct xfrm_usersa_info *sa = NULL;
1634 status_t status = FAILED;
1635 size_t len;
1636
1637 memset(&request, 0, sizeof(request));
1638
1639 DBG2(DBG_KNL, "querying SAD entry with SPI %.8x (mark %u/0x%08x)",
1640 ntohl(spi), mark.value, mark.mask);
1641
1642 hdr = (struct nlmsghdr*)request;
1643 hdr->nlmsg_flags = NLM_F_REQUEST;
1644 hdr->nlmsg_type = XFRM_MSG_GETSA;
1645 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_usersa_id));
1646
1647 sa_id = (struct xfrm_usersa_id*)NLMSG_DATA(hdr);
1648 host2xfrm(dst, &sa_id->daddr);
1649 sa_id->spi = spi;
1650 sa_id->proto = protocol;
1651 sa_id->family = dst->get_family(dst);
1652
1653 if (mark.value)
1654 {
1655 struct xfrm_mark *mrk;
1656 struct rtattr *rthdr = XFRM_RTA(hdr, struct xfrm_usersa_id);
1657
1658 rthdr->rta_type = XFRMA_MARK;
1659 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_mark));
1660 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1661 if (hdr->nlmsg_len > sizeof(request))
1662 {
1663 return FAILED;
1664 }
1665
1666 mrk = (struct xfrm_mark*)RTA_DATA(rthdr);
1667 mrk->v = mark.value;
1668 mrk->m = mark.mask;
1669 }
1670
1671 if (this->socket_xfrm->send(this->socket_xfrm, hdr, &out, &len) == SUCCESS)
1672 {
1673 hdr = out;
1674 while (NLMSG_OK(hdr, len))
1675 {
1676 switch (hdr->nlmsg_type)
1677 {
1678 case XFRM_MSG_NEWSA:
1679 {
1680 sa = (struct xfrm_usersa_info*)NLMSG_DATA(hdr);
1681 break;
1682 }
1683 case NLMSG_ERROR:
1684 {
1685 struct nlmsgerr *err = NLMSG_DATA(hdr);
1686
1687 if (mark.value)
1688 {
1689 DBG1(DBG_KNL, "querying SAD entry with SPI %.8x "
1690 "(mark %u/0x%08x) failed: %s (%d)",
1691 ntohl(spi), mark.value, mark.mask,
1692 strerror(-err->error), -err->error);
1693 }
1694 else
1695 {
1696 DBG1(DBG_KNL, "querying SAD entry with SPI %.8x "
1697 "failed: %s (%d)", ntohl(spi),
1698 strerror(-err->error), -err->error);
1699 }
1700 break;
1701 }
1702 default:
1703 hdr = NLMSG_NEXT(hdr, len);
1704 continue;
1705 case NLMSG_DONE:
1706 break;
1707 }
1708 break;
1709 }
1710 }
1711
1712 if (sa == NULL)
1713 {
1714 DBG2(DBG_KNL, "unable to query SAD entry with SPI %.8x", ntohl(spi));
1715 }
1716 else
1717 {
1718 *bytes = sa->curlft.bytes;
1719 status = SUCCESS;
1720 }
1721 memwipe(out, len);
1722 free(out);
1723 return status;
1724 }
1725
1726 METHOD(kernel_ipsec_t, del_sa, status_t,
1727 private_kernel_netlink_ipsec_t *this, host_t *src, host_t *dst,
1728 u_int32_t spi, u_int8_t protocol, u_int16_t cpi, mark_t mark)
1729 {
1730 netlink_buf_t request;
1731 struct nlmsghdr *hdr;
1732 struct xfrm_usersa_id *sa_id;
1733
1734 /* if IPComp was used, we first delete the additional IPComp SA */
1735 if (cpi)
1736 {
1737 del_sa(this, src, dst, htonl(ntohs(cpi)), IPPROTO_COMP, 0, mark);
1738 }
1739
1740 memset(&request, 0, sizeof(request));
1741
1742 DBG2(DBG_KNL, "deleting SAD entry with SPI %.8x (mark %u/0x%08x)",
1743 ntohl(spi), mark.value, mark.mask);
1744
1745 hdr = (struct nlmsghdr*)request;
1746 hdr->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
1747 hdr->nlmsg_type = XFRM_MSG_DELSA;
1748 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_usersa_id));
1749
1750 sa_id = (struct xfrm_usersa_id*)NLMSG_DATA(hdr);
1751 host2xfrm(dst, &sa_id->daddr);
1752 sa_id->spi = spi;
1753 sa_id->proto = protocol;
1754 sa_id->family = dst->get_family(dst);
1755
1756 if (mark.value)
1757 {
1758 struct xfrm_mark *mrk;
1759 struct rtattr *rthdr = XFRM_RTA(hdr, struct xfrm_usersa_id);
1760
1761 rthdr->rta_type = XFRMA_MARK;
1762 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_mark));
1763 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1764 if (hdr->nlmsg_len > sizeof(request))
1765 {
1766 return FAILED;
1767 }
1768
1769 mrk = (struct xfrm_mark*)RTA_DATA(rthdr);
1770 mrk->v = mark.value;
1771 mrk->m = mark.mask;
1772 }
1773
1774 switch (this->socket_xfrm->send_ack(this->socket_xfrm, hdr))
1775 {
1776 case SUCCESS:
1777 DBG2(DBG_KNL, "deleted SAD entry with SPI %.8x (mark %u/0x%08x)",
1778 ntohl(spi), mark.value, mark.mask);
1779 return SUCCESS;
1780 case NOT_FOUND:
1781 return NOT_FOUND;
1782 default:
1783 if (mark.value)
1784 {
1785 DBG1(DBG_KNL, "unable to delete SAD entry with SPI %.8x "
1786 "(mark %u/0x%08x)", ntohl(spi), mark.value, mark.mask);
1787 }
1788 else
1789 {
1790 DBG1(DBG_KNL, "unable to delete SAD entry with SPI %.8x",
1791 ntohl(spi));
1792 }
1793 return FAILED;
1794 }
1795 }
1796
1797 METHOD(kernel_ipsec_t, update_sa, status_t,
1798 private_kernel_netlink_ipsec_t *this, u_int32_t spi, u_int8_t protocol,
1799 u_int16_t cpi, host_t *src, host_t *dst, host_t *new_src, host_t *new_dst,
1800 bool old_encap, bool new_encap, mark_t mark)
1801 {
1802 netlink_buf_t request;
1803 u_char *pos;
1804 struct nlmsghdr *hdr, *out = NULL;
1805 struct xfrm_usersa_id *sa_id;
1806 struct xfrm_usersa_info *out_sa = NULL, *sa;
1807 size_t len;
1808 struct rtattr *rta;
1809 size_t rtasize;
1810 struct xfrm_encap_tmpl* tmpl = NULL;
1811 struct xfrm_replay_state *replay = NULL;
1812 struct xfrm_replay_state_esn *replay_esn = NULL;
1813 status_t status = FAILED;
1814
1815 /* if IPComp is used, we first update the IPComp SA */
1816 if (cpi)
1817 {
1818 update_sa(this, htonl(ntohs(cpi)), IPPROTO_COMP, 0,
1819 src, dst, new_src, new_dst, FALSE, FALSE, mark);
1820 }
1821
1822 memset(&request, 0, sizeof(request));
1823
1824 DBG2(DBG_KNL, "querying SAD entry with SPI %.8x for update", ntohl(spi));
1825
1826 /* query the existing SA first */
1827 hdr = (struct nlmsghdr*)request;
1828 hdr->nlmsg_flags = NLM_F_REQUEST;
1829 hdr->nlmsg_type = XFRM_MSG_GETSA;
1830 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_usersa_id));
1831
1832 sa_id = (struct xfrm_usersa_id*)NLMSG_DATA(hdr);
1833 host2xfrm(dst, &sa_id->daddr);
1834 sa_id->spi = spi;
1835 sa_id->proto = protocol;
1836 sa_id->family = dst->get_family(dst);
1837
1838 if (this->socket_xfrm->send(this->socket_xfrm, hdr, &out, &len) == SUCCESS)
1839 {
1840 hdr = out;
1841 while (NLMSG_OK(hdr, len))
1842 {
1843 switch (hdr->nlmsg_type)
1844 {
1845 case XFRM_MSG_NEWSA:
1846 {
1847 out_sa = NLMSG_DATA(hdr);
1848 break;
1849 }
1850 case NLMSG_ERROR:
1851 {
1852 struct nlmsgerr *err = NLMSG_DATA(hdr);
1853 DBG1(DBG_KNL, "querying SAD entry failed: %s (%d)",
1854 strerror(-err->error), -err->error);
1855 break;
1856 }
1857 default:
1858 hdr = NLMSG_NEXT(hdr, len);
1859 continue;
1860 case NLMSG_DONE:
1861 break;
1862 }
1863 break;
1864 }
1865 }
1866 if (out_sa == NULL)
1867 {
1868 DBG1(DBG_KNL, "unable to update SAD entry with SPI %.8x", ntohl(spi));
1869 goto failed;
1870 }
1871
1872 get_replay_state(this, spi, protocol, dst, &replay_esn, &replay);
1873
1874 /* delete the old SA (without affecting the IPComp SA) */
1875 if (del_sa(this, src, dst, spi, protocol, 0, mark) != SUCCESS)
1876 {
1877 DBG1(DBG_KNL, "unable to delete old SAD entry with SPI %.8x",
1878 ntohl(spi));
1879 goto failed;
1880 }
1881
1882 DBG2(DBG_KNL, "updating SAD entry with SPI %.8x from %#H..%#H to %#H..%#H",
1883 ntohl(spi), src, dst, new_src, new_dst);
1884 /* copy over the SA from out to request */
1885 hdr = (struct nlmsghdr*)request;
1886 memcpy(hdr, out, min(out->nlmsg_len, sizeof(request)));
1887 hdr->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
1888 hdr->nlmsg_type = XFRM_MSG_NEWSA;
1889 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_usersa_info));
1890 sa = NLMSG_DATA(hdr);
1891 sa->family = new_dst->get_family(new_dst);
1892
1893 if (!src->ip_equals(src, new_src))
1894 {
1895 host2xfrm(new_src, &sa->saddr);
1896 }
1897 if (!dst->ip_equals(dst, new_dst))
1898 {
1899 host2xfrm(new_dst, &sa->id.daddr);
1900 }
1901
1902 rta = XFRM_RTA(out, struct xfrm_usersa_info);
1903 rtasize = XFRM_PAYLOAD(out, struct xfrm_usersa_info);
1904 pos = (u_char*)XFRM_RTA(hdr, struct xfrm_usersa_info);
1905 while(RTA_OK(rta, rtasize))
1906 {
1907 /* copy all attributes, but not XFRMA_ENCAP if we are disabling it */
1908 if (rta->rta_type != XFRMA_ENCAP || new_encap)
1909 {
1910 if (rta->rta_type == XFRMA_ENCAP)
1911 { /* update encap tmpl */
1912 tmpl = (struct xfrm_encap_tmpl*)RTA_DATA(rta);
1913 tmpl->encap_sport = ntohs(new_src->get_port(new_src));
1914 tmpl->encap_dport = ntohs(new_dst->get_port(new_dst));
1915 }
1916 memcpy(pos, rta, rta->rta_len);
1917 pos += RTA_ALIGN(rta->rta_len);
1918 hdr->nlmsg_len += RTA_ALIGN(rta->rta_len);
1919 }
1920 rta = RTA_NEXT(rta, rtasize);
1921 }
1922
1923 rta = (struct rtattr*)pos;
1924 if (tmpl == NULL && new_encap)
1925 { /* add tmpl if we are enabling it */
1926 rta->rta_type = XFRMA_ENCAP;
1927 rta->rta_len = RTA_LENGTH(sizeof(struct xfrm_encap_tmpl));
1928
1929 hdr->nlmsg_len += RTA_ALIGN(rta->rta_len);
1930 if (hdr->nlmsg_len > sizeof(request))
1931 {
1932 goto failed;
1933 }
1934
1935 tmpl = (struct xfrm_encap_tmpl*)RTA_DATA(rta);
1936 tmpl->encap_type = UDP_ENCAP_ESPINUDP;
1937 tmpl->encap_sport = ntohs(new_src->get_port(new_src));
1938 tmpl->encap_dport = ntohs(new_dst->get_port(new_dst));
1939 memset(&tmpl->encap_oa, 0, sizeof (xfrm_address_t));
1940
1941 rta = XFRM_RTA_NEXT(rta);
1942 }
1943
1944 if (replay_esn)
1945 {
1946 rta->rta_type = XFRMA_REPLAY_ESN_VAL;
1947 rta->rta_len = RTA_LENGTH(sizeof(struct xfrm_replay_state_esn) +
1948 this->replay_bmp);
1949
1950 hdr->nlmsg_len += RTA_ALIGN(rta->rta_len);
1951 if (hdr->nlmsg_len > sizeof(request))
1952 {
1953 goto failed;
1954 }
1955 memcpy(RTA_DATA(rta), replay_esn,
1956 sizeof(struct xfrm_replay_state_esn) + this->replay_bmp);
1957
1958 rta = XFRM_RTA_NEXT(rta);
1959 }
1960 else if (replay)
1961 {
1962 rta->rta_type = XFRMA_REPLAY_VAL;
1963 rta->rta_len = RTA_LENGTH(sizeof(struct xfrm_replay_state));
1964
1965 hdr->nlmsg_len += RTA_ALIGN(rta->rta_len);
1966 if (hdr->nlmsg_len > sizeof(request))
1967 {
1968 goto failed;
1969 }
1970 memcpy(RTA_DATA(rta), replay, sizeof(replay));
1971
1972 rta = XFRM_RTA_NEXT(rta);
1973 }
1974 else
1975 {
1976 DBG1(DBG_KNL, "unable to copy replay state from old SAD entry "
1977 "with SPI %.8x", ntohl(spi));
1978 }
1979
1980 if (this->socket_xfrm->send_ack(this->socket_xfrm, hdr) != SUCCESS)
1981 {
1982 DBG1(DBG_KNL, "unable to update SAD entry with SPI %.8x", ntohl(spi));
1983 goto failed;
1984 }
1985
1986 status = SUCCESS;
1987 failed:
1988 free(replay);
1989 free(replay_esn);
1990 memwipe(out, len);
1991 memwipe(request, sizeof(request));
1992 free(out);
1993
1994 return status;
1995 }
1996
1997 METHOD(kernel_ipsec_t, flush_sas, status_t,
1998 private_kernel_netlink_ipsec_t *this)
1999 {
2000 netlink_buf_t request;
2001 struct nlmsghdr *hdr;
2002 struct xfrm_usersa_flush *flush;
2003
2004 memset(&request, 0, sizeof(request));
2005
2006 DBG2(DBG_KNL, "flushing all SAD entries");
2007
2008 hdr = (struct nlmsghdr*)request;
2009 hdr->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
2010 hdr->nlmsg_type = XFRM_MSG_FLUSHSA;
2011 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_usersa_flush));
2012
2013 flush = (struct xfrm_usersa_flush*)NLMSG_DATA(hdr);
2014 flush->proto = IPSEC_PROTO_ANY;
2015
2016 if (this->socket_xfrm->send_ack(this->socket_xfrm, hdr) != SUCCESS)
2017 {
2018 DBG1(DBG_KNL, "unable to flush SAD entries");
2019 return FAILED;
2020 }
2021 return SUCCESS;
2022 }
2023
2024 /**
2025 * Add or update a policy in the kernel.
2026 *
2027 * Note: The mutex has to be locked when entering this function
2028 * and is unlocked here in any case.
2029 */
2030 static status_t add_policy_internal(private_kernel_netlink_ipsec_t *this,
2031 policy_entry_t *policy, policy_sa_t *mapping, bool update)
2032 {
2033 netlink_buf_t request;
2034 policy_entry_t clone;
2035 ipsec_sa_t *ipsec = mapping->sa;
2036 struct xfrm_userpolicy_info *policy_info;
2037 struct nlmsghdr *hdr;
2038 int i;
2039
2040 /* clone the policy so we are able to check it out again later */
2041 memcpy(&clone, policy, sizeof(policy_entry_t));
2042
2043 memset(&request, 0, sizeof(request));
2044 hdr = (struct nlmsghdr*)request;
2045 hdr->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
2046 hdr->nlmsg_type = update ? XFRM_MSG_UPDPOLICY : XFRM_MSG_NEWPOLICY;
2047 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_userpolicy_info));
2048
2049 policy_info = (struct xfrm_userpolicy_info*)NLMSG_DATA(hdr);
2050 policy_info->sel = policy->sel;
2051 policy_info->dir = policy->direction;
2052
2053 /* calculate priority based on selector size, small size = high prio */
2054 policy_info->priority = mapping->priority;
2055 policy_info->action = mapping->type != POLICY_DROP ? XFRM_POLICY_ALLOW
2056 : XFRM_POLICY_BLOCK;
2057 policy_info->share = XFRM_SHARE_ANY;
2058
2059 /* policies don't expire */
2060 policy_info->lft.soft_byte_limit = XFRM_INF;
2061 policy_info->lft.soft_packet_limit = XFRM_INF;
2062 policy_info->lft.hard_byte_limit = XFRM_INF;
2063 policy_info->lft.hard_packet_limit = XFRM_INF;
2064 policy_info->lft.soft_add_expires_seconds = 0;
2065 policy_info->lft.hard_add_expires_seconds = 0;
2066 policy_info->lft.soft_use_expires_seconds = 0;
2067 policy_info->lft.hard_use_expires_seconds = 0;
2068
2069 struct rtattr *rthdr = XFRM_RTA(hdr, struct xfrm_userpolicy_info);
2070
2071 if (mapping->type == POLICY_IPSEC)
2072 {
2073 struct xfrm_user_tmpl *tmpl = (struct xfrm_user_tmpl*)RTA_DATA(rthdr);
2074 struct {
2075 u_int8_t proto;
2076 bool use;
2077 } protos[] = {
2078 { IPPROTO_COMP, ipsec->cfg.ipcomp.transform != IPCOMP_NONE },
2079 { IPPROTO_ESP, ipsec->cfg.esp.use },
2080 { IPPROTO_AH, ipsec->cfg.ah.use },
2081 };
2082 ipsec_mode_t proto_mode = ipsec->cfg.mode;
2083
2084 rthdr->rta_type = XFRMA_TMPL;
2085 rthdr->rta_len = 0; /* actual length is set below */
2086
2087 for (i = 0; i < countof(protos); i++)
2088 {
2089 if (!protos[i].use)
2090 {
2091 continue;
2092 }
2093
2094 rthdr->rta_len += RTA_LENGTH(sizeof(struct xfrm_user_tmpl));
2095 hdr->nlmsg_len += RTA_ALIGN(RTA_LENGTH(sizeof(struct xfrm_user_tmpl)));
2096 if (hdr->nlmsg_len > sizeof(request))
2097 {
2098 this->mutex->unlock(this->mutex);
2099 return FAILED;
2100 }
2101
2102 tmpl->reqid = ipsec->cfg.reqid;
2103 tmpl->id.proto = protos[i].proto;
2104 tmpl->aalgos = tmpl->ealgos = tmpl->calgos = ~0;
2105 tmpl->mode = mode2kernel(proto_mode);
2106 tmpl->optional = protos[i].proto == IPPROTO_COMP &&
2107 policy->direction != POLICY_OUT;
2108 tmpl->family = ipsec->src->get_family(ipsec->src);
2109
2110 if (proto_mode == MODE_TUNNEL)
2111 { /* only for tunnel mode */
2112 host2xfrm(ipsec->src, &tmpl->saddr);
2113 host2xfrm(ipsec->dst, &tmpl->id.daddr);
2114 }
2115
2116 tmpl++;
2117
2118 /* use transport mode for other SAs */
2119 proto_mode = MODE_TRANSPORT;
2120 }
2121
2122 rthdr = XFRM_RTA_NEXT(rthdr);
2123 }
2124
2125 if (ipsec->mark.value)
2126 {
2127 struct xfrm_mark *mrk;
2128
2129 rthdr->rta_type = XFRMA_MARK;
2130 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_mark));
2131
2132 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
2133 if (hdr->nlmsg_len > sizeof(request))
2134 {
2135 this->mutex->unlock(this->mutex);
2136 return FAILED;
2137 }
2138
2139 mrk = (struct xfrm_mark*)RTA_DATA(rthdr);
2140 mrk->v = ipsec->mark.value;
2141 mrk->m = ipsec->mark.mask;
2142 }
2143 this->mutex->unlock(this->mutex);
2144
2145 if (this->socket_xfrm->send_ack(this->socket_xfrm, hdr) != SUCCESS)
2146 {
2147 return FAILED;
2148 }
2149
2150 /* find the policy again */
2151 this->mutex->lock(this->mutex);
2152 policy = this->policies->get(this->policies, &clone);
2153 if (!policy ||
2154 policy->used_by->find_first(policy->used_by,
2155 NULL, (void**)&mapping) != SUCCESS)
2156 { /* policy or mapping is already gone, ignore */
2157 this->mutex->unlock(this->mutex);
2158 return SUCCESS;
2159 }
2160
2161 /* install a route, if:
2162 * - this is a forward policy (to just get one for each child)
2163 * - we are in tunnel/BEET mode
2164 * - routing is not disabled via strongswan.conf
2165 */
2166 if (policy->direction == POLICY_FWD &&
2167 ipsec->cfg.mode != MODE_TRANSPORT && this->install_routes)
2168 {
2169 policy_sa_fwd_t *fwd = (policy_sa_fwd_t*)mapping;
2170 route_entry_t *route;
2171
2172 INIT(route,
2173 .prefixlen = policy->sel.prefixlen_s,
2174 );
2175
2176 if (hydra->kernel_interface->get_address_by_ts(hydra->kernel_interface,
2177 fwd->dst_ts, &route->src_ip) == SUCCESS)
2178 {
2179 /* get the nexthop to src (src as we are in POLICY_FWD) */
2180 route->gateway = hydra->kernel_interface->get_nexthop(
2181 hydra->kernel_interface, ipsec->src,
2182 ipsec->dst);
2183 route->dst_net = chunk_alloc(policy->sel.family == AF_INET ? 4 : 16);
2184 memcpy(route->dst_net.ptr, &policy->sel.saddr, route->dst_net.len);
2185
2186 /* install route via outgoing interface */
2187 if (!hydra->kernel_interface->get_interface(hydra->kernel_interface,
2188 ipsec->dst, &route->if_name))
2189 {
2190 this->mutex->unlock(this->mutex);
2191 route_entry_destroy(route);
2192 return SUCCESS;
2193 }
2194
2195 if (policy->route)
2196 {
2197 route_entry_t *old = policy->route;
2198 if (route_entry_equals(old, route))
2199 {
2200 this->mutex->unlock(this->mutex);
2201 route_entry_destroy(route);
2202 return SUCCESS;
2203 }
2204 /* uninstall previously installed route */
2205 if (hydra->kernel_interface->del_route(hydra->kernel_interface,
2206 old->dst_net, old->prefixlen, old->gateway,
2207 old->src_ip, old->if_name) != SUCCESS)
2208 {
2209 DBG1(DBG_KNL, "error uninstalling route installed with "
2210 "policy %R === %R %N", fwd->src_ts,
2211 fwd->dst_ts, policy_dir_names,
2212 policy->direction);
2213 }
2214 route_entry_destroy(old);
2215 policy->route = NULL;
2216 }
2217
2218 DBG2(DBG_KNL, "installing route: %R via %H src %H dev %s",
2219 fwd->src_ts, route->gateway, route->src_ip, route->if_name);
2220 switch (hydra->kernel_interface->add_route(
2221 hydra->kernel_interface, route->dst_net,
2222 route->prefixlen, route->gateway,
2223 route->src_ip, route->if_name))
2224 {
2225 default:
2226 DBG1(DBG_KNL, "unable to install source route for %H",
2227 route->src_ip);
2228 /* FALL */
2229 case ALREADY_DONE:
2230 /* route exists, do not uninstall */
2231 route_entry_destroy(route);
2232 break;
2233 case SUCCESS:
2234 /* cache the installed route */
2235 policy->route = route;
2236 break;
2237 }
2238 }
2239 else
2240 {
2241 free(route);
2242 }
2243 }
2244 this->mutex->unlock(this->mutex);
2245 return SUCCESS;
2246 }
2247
2248 METHOD(kernel_ipsec_t, add_policy, status_t,
2249 private_kernel_netlink_ipsec_t *this, host_t *src, host_t *dst,
2250 traffic_selector_t *src_ts, traffic_selector_t *dst_ts,
2251 policy_dir_t direction, policy_type_t type, ipsec_sa_cfg_t *sa,
2252 mark_t mark, policy_priority_t priority)
2253 {
2254 policy_entry_t *policy, *current;
2255 policy_sa_t *assigned_sa, *current_sa;
2256 enumerator_t *enumerator;
2257 bool found = FALSE, update = TRUE;
2258
2259 /* create a policy */
2260 INIT(policy,
2261 .sel = ts2selector(src_ts, dst_ts),
2262 .mark = mark.value & mark.mask,
2263 .direction = direction,
2264 );
2265
2266 /* find the policy, which matches EXACTLY */
2267 this->mutex->lock(this->mutex);
2268 current = this->policies->get(this->policies, policy);
2269 if (current)
2270 {
2271 /* use existing policy */
2272 DBG2(DBG_KNL, "policy %R === %R %N (mark %u/0x%08x) "
2273 "already exists, increasing refcount",
2274 src_ts, dst_ts, policy_dir_names, direction,
2275 mark.value, mark.mask);
2276 policy_entry_destroy(this, policy);
2277 policy = current;
2278 found = TRUE;
2279 }
2280 else
2281 { /* use the new one, if we have no such policy */
2282 policy->used_by = linked_list_create();
2283 this->policies->put(this->policies, policy, policy);
2284 }
2285
2286 /* cache the assigned IPsec SA */
2287 assigned_sa = policy_sa_create(this, direction, type, src, dst, src_ts,
2288 dst_ts, mark, sa);
2289 assigned_sa->priority = get_priority(policy, priority);
2290
2291 if (this->policy_history)
2292 { /* insert the SA according to its priority */
2293 enumerator = policy->used_by->create_enumerator(policy->used_by);
2294 while (enumerator->enumerate(enumerator, (void**)&current_sa))
2295 {
2296 if (current_sa->priority >= assigned_sa->priority)
2297 {
2298 break;
2299 }
2300 update = FALSE;
2301 }
2302 policy->used_by->insert_before(policy->used_by, enumerator,
2303 assigned_sa);
2304 enumerator->destroy(enumerator);
2305 }
2306 else
2307 { /* simply insert it last and only update if it is not installed yet */
2308 policy->used_by->insert_last(policy->used_by, assigned_sa);
2309 update = !found;
2310 }
2311
2312 if (!update)
2313 { /* we don't update the policy if the priority is lower than that of
2314 * the currently installed one */
2315 this->mutex->unlock(this->mutex);
2316 return SUCCESS;
2317 }
2318
2319 DBG2(DBG_KNL, "%s policy %R === %R %N (mark %u/0x%08x)",
2320 found ? "updating" : "adding", src_ts, dst_ts,
2321 policy_dir_names, direction, mark.value, mark.mask);
2322
2323 if (add_policy_internal(this, policy, assigned_sa, found) != SUCCESS)
2324 {
2325 DBG1(DBG_KNL, "unable to %s policy %R === %R %N",
2326 found ? "update" : "add", src_ts, dst_ts,
2327 policy_dir_names, direction);
2328 return FAILED;
2329 }
2330 return SUCCESS;
2331 }
2332
2333 METHOD(kernel_ipsec_t, query_policy, status_t,
2334 private_kernel_netlink_ipsec_t *this, traffic_selector_t *src_ts,
2335 traffic_selector_t *dst_ts, policy_dir_t direction, mark_t mark,
2336 u_int32_t *use_time)
2337 {
2338 netlink_buf_t request;
2339 struct nlmsghdr *out = NULL, *hdr;
2340 struct xfrm_userpolicy_id *policy_id;
2341 struct xfrm_userpolicy_info *policy = NULL;
2342 size_t len;
2343
2344 memset(&request, 0, sizeof(request));
2345
2346 DBG2(DBG_KNL, "querying policy %R === %R %N (mark %u/0x%08x)",
2347 src_ts, dst_ts, policy_dir_names, direction,
2348 mark.value, mark.mask);
2349
2350 hdr = (struct nlmsghdr*)request;
2351 hdr->nlmsg_flags = NLM_F_REQUEST;
2352 hdr->nlmsg_type = XFRM_MSG_GETPOLICY;
2353 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_userpolicy_id));
2354
2355 policy_id = (struct xfrm_userpolicy_id*)NLMSG_DATA(hdr);
2356 policy_id->sel = ts2selector(src_ts, dst_ts);
2357 policy_id->dir = direction;
2358
2359 if (mark.value)
2360 {
2361 struct xfrm_mark *mrk;
2362 struct rtattr *rthdr = XFRM_RTA(hdr, struct xfrm_userpolicy_id);
2363
2364 rthdr->rta_type = XFRMA_MARK;
2365 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_mark));
2366
2367 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
2368 if (hdr->nlmsg_len > sizeof(request))
2369 {
2370 return FAILED;
2371 }
2372
2373 mrk = (struct xfrm_mark*)RTA_DATA(rthdr);
2374 mrk->v = mark.value;
2375 mrk->m = mark.mask;
2376 }
2377
2378 if (this->socket_xfrm->send(this->socket_xfrm, hdr, &out, &len) == SUCCESS)
2379 {
2380 hdr = out;
2381 while (NLMSG_OK(hdr, len))
2382 {
2383 switch (hdr->nlmsg_type)
2384 {
2385 case XFRM_MSG_NEWPOLICY:
2386 {
2387 policy = (struct xfrm_userpolicy_info*)NLMSG_DATA(hdr);
2388 break;
2389 }
2390 case NLMSG_ERROR:
2391 {
2392 struct nlmsgerr *err = NLMSG_DATA(hdr);
2393 DBG1(DBG_KNL, "querying policy failed: %s (%d)",
2394 strerror(-err->error), -err->error);
2395 break;
2396 }
2397 default:
2398 hdr = NLMSG_NEXT(hdr, len);
2399 continue;
2400 case NLMSG_DONE:
2401 break;
2402 }
2403 break;
2404 }
2405 }
2406
2407 if (policy == NULL)
2408 {
2409 DBG2(DBG_KNL, "unable to query policy %R === %R %N", src_ts, dst_ts,
2410 policy_dir_names, direction);
2411 free(out);
2412 return FAILED;
2413 }
2414
2415 if (policy->curlft.use_time)
2416 {
2417 /* we need the monotonic time, but the kernel returns system time. */
2418 *use_time = time_monotonic(NULL) - (time(NULL) - policy->curlft.use_time);
2419 }
2420 else
2421 {
2422 *use_time = 0;
2423 }
2424
2425 free(out);
2426 return SUCCESS;
2427 }
2428
2429 METHOD(kernel_ipsec_t, del_policy, status_t,
2430 private_kernel_netlink_ipsec_t *this, traffic_selector_t *src_ts,
2431 traffic_selector_t *dst_ts, policy_dir_t direction, u_int32_t reqid,
2432 mark_t mark, policy_priority_t prio)
2433 {
2434 policy_entry_t *current, policy;
2435 enumerator_t *enumerator;
2436 policy_sa_t *mapping;
2437 netlink_buf_t request;
2438 struct nlmsghdr *hdr;
2439 struct xfrm_userpolicy_id *policy_id;
2440 bool is_installed = TRUE;
2441 u_int32_t priority;
2442
2443 DBG2(DBG_KNL, "deleting policy %R === %R %N (mark %u/0x%08x)",
2444 src_ts, dst_ts, policy_dir_names, direction,
2445 mark.value, mark.mask);
2446
2447 /* create a policy */
2448 memset(&policy, 0, sizeof(policy_entry_t));
2449 policy.sel = ts2selector(src_ts, dst_ts);
2450 policy.mark = mark.value & mark.mask;
2451 policy.direction = direction;
2452
2453 /* find the policy */
2454 this->mutex->lock(this->mutex);
2455 current = this->policies->get(this->policies, &policy);
2456 if (!current)
2457 {
2458 if (mark.value)
2459 {
2460 DBG1(DBG_KNL, "deleting policy %R === %R %N (mark %u/0x%08x) "
2461 "failed, not found", src_ts, dst_ts, policy_dir_names,
2462 direction, mark.value, mark.mask);
2463 }
2464 else
2465 {
2466 DBG1(DBG_KNL, "deleting policy %R === %R %N failed, not found",
2467 src_ts, dst_ts, policy_dir_names, direction);
2468 }
2469 this->mutex->unlock(this->mutex);
2470 return NOT_FOUND;
2471 }
2472
2473 if (this->policy_history)
2474 { /* remove mapping to SA by reqid and priority */
2475 priority = get_priority(current, prio);
2476 enumerator = current->used_by->create_enumerator(current->used_by);
2477 while (enumerator->enumerate(enumerator, (void**)&mapping))
2478 {
2479 if (reqid == mapping->sa->cfg.reqid &&
2480 priority == mapping->priority)
2481 {
2482 current->used_by->remove_at(current->used_by, enumerator);
2483 policy_sa_destroy(mapping, &direction, this);
2484 break;
2485 }
2486 is_installed = FALSE;
2487 }
2488 enumerator->destroy(enumerator);
2489 }
2490 else
2491 { /* remove one of the SAs but don't update the policy */
2492 current->used_by->remove_last(current->used_by, (void**)&mapping);
2493 policy_sa_destroy(mapping, &direction, this);
2494 is_installed = FALSE;
2495 }
2496
2497 if (current->used_by->get_count(current->used_by) > 0)
2498 { /* policy is used by more SAs, keep in kernel */
2499 DBG2(DBG_KNL, "policy still used by another CHILD_SA, not removed");
2500 if (!is_installed)
2501 { /* no need to update as the policy was not installed for this SA */
2502 this->mutex->unlock(this->mutex);
2503 return SUCCESS;
2504 }
2505
2506 DBG2(DBG_KNL, "updating policy %R === %R %N (mark %u/0x%08x)",
2507 src_ts, dst_ts, policy_dir_names, direction,
2508 mark.value, mark.mask);
2509
2510 current->used_by->get_first(current->used_by, (void**)&mapping);
2511 if (add_policy_internal(this, current, mapping, TRUE) != SUCCESS)
2512 {
2513 DBG1(DBG_KNL, "unable to update policy %R === %R %N",
2514 src_ts, dst_ts, policy_dir_names, direction);
2515 return FAILED;
2516 }
2517 return SUCCESS;
2518 }
2519
2520 memset(&request, 0, sizeof(request));
2521
2522 hdr = (struct nlmsghdr*)request;
2523 hdr->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
2524 hdr->nlmsg_type = XFRM_MSG_DELPOLICY;
2525 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_userpolicy_id));
2526
2527 policy_id = (struct xfrm_userpolicy_id*)NLMSG_DATA(hdr);
2528 policy_id->sel = current->sel;
2529 policy_id->dir = direction;
2530
2531 if (mark.value)
2532 {
2533 struct xfrm_mark *mrk;
2534 struct rtattr *rthdr = XFRM_RTA(hdr, struct xfrm_userpolicy_id);
2535
2536 rthdr->rta_type = XFRMA_MARK;
2537 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_mark));
2538 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
2539 if (hdr->nlmsg_len > sizeof(request))
2540 {
2541 this->mutex->unlock(this->mutex);
2542 return FAILED;
2543 }
2544
2545 mrk = (struct xfrm_mark*)RTA_DATA(rthdr);
2546 mrk->v = mark.value;
2547 mrk->m = mark.mask;
2548 }
2549
2550 if (current->route)
2551 {
2552 route_entry_t *route = current->route;
2553 if (hydra->kernel_interface->del_route(hydra->kernel_interface,
2554 route->dst_net, route->prefixlen, route->gateway,
2555 route->src_ip, route->if_name) != SUCCESS)
2556 {
2557 DBG1(DBG_KNL, "error uninstalling route installed with "
2558 "policy %R === %R %N", src_ts, dst_ts,
2559 policy_dir_names, direction);
2560 }
2561 }
2562
2563 this->policies->remove(this->policies, current);
2564 policy_entry_destroy(this, current);
2565 this->mutex->unlock(this->mutex);
2566
2567 if (this->socket_xfrm->send_ack(this->socket_xfrm, hdr) != SUCCESS)
2568 {
2569 if (mark.value)
2570 {
2571 DBG1(DBG_KNL, "unable to delete policy %R === %R %N "
2572 "(mark %u/0x%08x)", src_ts, dst_ts, policy_dir_names,
2573 direction, mark.value, mark.mask);
2574 }
2575 else
2576 {
2577 DBG1(DBG_KNL, "unable to delete policy %R === %R %N",
2578 src_ts, dst_ts, policy_dir_names, direction);
2579 }
2580 return FAILED;
2581 }
2582 return SUCCESS;
2583 }
2584
2585 METHOD(kernel_ipsec_t, flush_policies, status_t,
2586 private_kernel_netlink_ipsec_t *this)
2587 {
2588 netlink_buf_t request;
2589 struct nlmsghdr *hdr;
2590
2591 memset(&request, 0, sizeof(request));
2592
2593 DBG2(DBG_KNL, "flushing all policies from SPD");
2594
2595 hdr = (struct nlmsghdr*)request;
2596 hdr->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
2597 hdr->nlmsg_type = XFRM_MSG_FLUSHPOLICY;
2598 hdr->nlmsg_len = NLMSG_LENGTH(0); /* no data associated */
2599
2600 /* by adding an rtattr of type XFRMA_POLICY_TYPE we could restrict this
2601 * to main or sub policies (default is main) */
2602
2603 if (this->socket_xfrm->send_ack(this->socket_xfrm, hdr) != SUCCESS)
2604 {
2605 DBG1(DBG_KNL, "unable to flush SPD entries");
2606 return FAILED;
2607 }
2608 return SUCCESS;
2609 }
2610
2611
2612 METHOD(kernel_ipsec_t, bypass_socket, bool,
2613 private_kernel_netlink_ipsec_t *this, int fd, int family)
2614 {
2615 struct xfrm_userpolicy_info policy;
2616 u_int sol, ipsec_policy;
2617
2618 switch (family)
2619 {
2620 case AF_INET:
2621 sol = SOL_IP;
2622 ipsec_policy = IP_XFRM_POLICY;
2623 break;
2624 case AF_INET6:
2625 sol = SOL_IPV6;
2626 ipsec_policy = IPV6_XFRM_POLICY;
2627 break;
2628 default:
2629 return FALSE;
2630 }
2631
2632 memset(&policy, 0, sizeof(policy));
2633 policy.action = XFRM_POLICY_ALLOW;
2634 policy.sel.family = family;
2635
2636 policy.dir = XFRM_POLICY_OUT;
2637 if (setsockopt(fd, sol, ipsec_policy, &policy, sizeof(policy)) < 0)
2638 {
2639 DBG1(DBG_KNL, "unable to set IPSEC_POLICY on socket: %s",
2640 strerror(errno));
2641 return FALSE;
2642 }
2643 policy.dir = XFRM_POLICY_IN;
2644 if (setsockopt(fd, sol, ipsec_policy, &policy, sizeof(policy)) < 0)
2645 {
2646 DBG1(DBG_KNL, "unable to set IPSEC_POLICY on socket: %s",
2647 strerror(errno));
2648 return FALSE;
2649 }
2650 return TRUE;
2651 }
2652
2653 METHOD(kernel_ipsec_t, enable_udp_decap, bool,
2654 private_kernel_netlink_ipsec_t *this, int fd, int family, u_int16_t port)
2655 {
2656 int type = UDP_ENCAP_ESPINUDP;
2657
2658 if (setsockopt(fd, SOL_UDP, UDP_ENCAP, &type, sizeof(type)) < 0)
2659 {
2660 DBG1(DBG_KNL, "unable to set UDP_ENCAP: %s", strerror(errno));
2661 return FALSE;
2662 }
2663 return TRUE;
2664 }
2665
2666 METHOD(kernel_ipsec_t, destroy, void,
2667 private_kernel_netlink_ipsec_t *this)
2668 {
2669 enumerator_t *enumerator;
2670 policy_entry_t *policy;
2671
2672 if (this->socket_xfrm_events > 0)
2673 {
2674 close(this->socket_xfrm_events);
2675 }
2676 DESTROY_IF(this->socket_xfrm);
2677 enumerator = this->policies->create_enumerator(this->policies);
2678 while (enumerator->enumerate(enumerator, &policy, &policy))
2679 {
2680 policy_entry_destroy(this, policy);
2681 }
2682 enumerator->destroy(enumerator);
2683 this->policies->destroy(this->policies);
2684 this->sas->destroy(this->sas);
2685 this->mutex->destroy(this->mutex);
2686 free(this);
2687 }
2688
2689 /*
2690 * Described in header.
2691 */
2692 kernel_netlink_ipsec_t *kernel_netlink_ipsec_create()
2693 {
2694 private_kernel_netlink_ipsec_t *this;
2695 bool register_for_events = TRUE;
2696 int fd;
2697
2698 INIT(this,
2699 .public = {
2700 .interface = {
2701 .get_spi = _get_spi,
2702 .get_cpi = _get_cpi,
2703 .add_sa = _add_sa,
2704 .update_sa = _update_sa,
2705 .query_sa = _query_sa,
2706 .del_sa = _del_sa,
2707 .flush_sas = _flush_sas,
2708 .add_policy = _add_policy,
2709 .query_policy = _query_policy,
2710 .del_policy = _del_policy,
2711 .flush_policies = _flush_policies,
2712 .bypass_socket = _bypass_socket,
2713 .enable_udp_decap = _enable_udp_decap,
2714 .destroy = _destroy,
2715 },
2716 },
2717 .policies = hashtable_create((hashtable_hash_t)policy_hash,
2718 (hashtable_equals_t)policy_equals, 32),
2719 .sas = hashtable_create((hashtable_hash_t)ipsec_sa_hash,
2720 (hashtable_equals_t)ipsec_sa_equals, 32),
2721 .mutex = mutex_create(MUTEX_TYPE_DEFAULT),
2722 .policy_history = TRUE,
2723 .install_routes = lib->settings->get_bool(lib->settings,
2724 "%s.install_routes", TRUE, hydra->daemon),
2725 .replay_window = lib->settings->get_int(lib->settings,
2726 "%s.replay_window", DEFAULT_REPLAY_WINDOW, hydra->daemon),
2727 );
2728
2729 this->replay_bmp = (this->replay_window + sizeof(u_int32_t) * 8 - 1) /
2730 (sizeof(u_int32_t) * 8);
2731
2732 if (streq(hydra->daemon, "pluto"))
2733 { /* no routes for pluto, they are installed via updown script */
2734 this->install_routes = FALSE;
2735 /* no policy history for pluto */
2736 this->policy_history = FALSE;
2737 }
2738 else if (streq(hydra->daemon, "starter"))
2739 { /* starter has no threads, so we do not register for kernel events */
2740 register_for_events = FALSE;
2741 }
2742
2743 /* disable lifetimes for allocated SPIs in kernel */
2744 fd = open("/proc/sys/net/core/xfrm_acq_expires", O_WRONLY);
2745 if (fd)
2746 {
2747 ignore_result(write(fd, "165", 3));
2748 close(fd);
2749 }
2750
2751 this->socket_xfrm = netlink_socket_create(NETLINK_XFRM);
2752 if (!this->socket_xfrm)
2753 {
2754 destroy(this);
2755 return NULL;
2756 }
2757
2758 if (register_for_events)
2759 {
2760 struct sockaddr_nl addr;
2761
2762 memset(&addr, 0, sizeof(addr));
2763 addr.nl_family = AF_NETLINK;
2764
2765 /* create and bind XFRM socket for ACQUIRE, EXPIRE, MIGRATE & MAPPING */
2766 this->socket_xfrm_events = socket(AF_NETLINK, SOCK_RAW, NETLINK_XFRM);
2767 if (this->socket_xfrm_events <= 0)
2768 {
2769 DBG1(DBG_KNL, "unable to create XFRM event socket");
2770 destroy(this);
2771 return NULL;
2772 }
2773 addr.nl_groups = XFRMNLGRP(ACQUIRE) | XFRMNLGRP(EXPIRE) |
2774 XFRMNLGRP(MIGRATE) | XFRMNLGRP(MAPPING);
2775 if (bind(this->socket_xfrm_events, (struct sockaddr*)&addr, sizeof(addr)))
2776 {
2777 DBG1(DBG_KNL, "unable to bind XFRM event socket");
2778 destroy(this);
2779 return NULL;
2780 }
2781 lib->processor->queue_job(lib->processor,
2782 (job_t*)callback_job_create_with_prio(
2783 (callback_job_cb_t)receive_events, this, NULL,
2784 (callback_job_cancel_t)return_false, JOB_PRIO_CRITICAL));
2785 }
2786
2787 return &this->public;
2788 }
2789