6a92c4df6e6bb477a8e09b9b71d348a29a77ba73
[strongswan.git] / src / libhydra / plugins / kernel_netlink / kernel_netlink_ipsec.c
1 /*
2 * Copyright (C) 2006-2011 Tobias Brunner
3 * Copyright (C) 2005-2009 Martin Willi
4 * Copyright (C) 2008 Andreas Steffen
5 * Copyright (C) 2006-2007 Fabian Hartmann, Noah Heusser
6 * Copyright (C) 2006 Daniel Roethlisberger
7 * Copyright (C) 2005 Jan Hutter
8 * Hochschule fuer Technik Rapperswil
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version. See <http://www.fsf.org/copyleft/gpl.txt>.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
17 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 * for more details.
19 */
20
21 #include <sys/types.h>
22 #include <sys/socket.h>
23 #include <stdint.h>
24 #include <linux/ipsec.h>
25 #include <linux/netlink.h>
26 #include <linux/rtnetlink.h>
27 #include <linux/xfrm.h>
28 #include <linux/udp.h>
29 #include <unistd.h>
30 #include <time.h>
31 #include <errno.h>
32 #include <string.h>
33 #include <fcntl.h>
34
35 #include "kernel_netlink_ipsec.h"
36 #include "kernel_netlink_shared.h"
37
38 #include <hydra.h>
39 #include <debug.h>
40 #include <threading/thread.h>
41 #include <threading/mutex.h>
42 #include <utils/hashtable.h>
43 #include <utils/linked_list.h>
44 #include <processing/jobs/callback_job.h>
45
46 /** required for Linux 2.6.26 kernel and later */
47 #ifndef XFRM_STATE_AF_UNSPEC
48 #define XFRM_STATE_AF_UNSPEC 32
49 #endif
50
51 /** from linux/in.h */
52 #ifndef IP_XFRM_POLICY
53 #define IP_XFRM_POLICY 17
54 #endif
55
56 /* missing on uclibc */
57 #ifndef IPV6_XFRM_POLICY
58 #define IPV6_XFRM_POLICY 34
59 #endif /*IPV6_XFRM_POLICY*/
60
61 /** default priority of installed policies */
62 #define PRIO_LOW 1024
63 #define PRIO_HIGH 512
64
65 /** default replay window size, if not set using charon.replay_window */
66 #define DEFAULT_REPLAY_WINDOW 32
67
68 /**
69 * map the limit for bytes and packets to XFRM_INF per default
70 */
71 #define XFRM_LIMIT(x) ((x) == 0 ? XFRM_INF : (x))
72
73 /**
74 * Create ORable bitfield of XFRM NL groups
75 */
76 #define XFRMNLGRP(x) (1<<(XFRMNLGRP_##x-1))
77
78 /**
79 * returns a pointer to the first rtattr following the nlmsghdr *nlh and the
80 * 'usual' netlink data x like 'struct xfrm_usersa_info'
81 */
82 #define XFRM_RTA(nlh, x) ((struct rtattr*)(NLMSG_DATA(nlh) + NLMSG_ALIGN(sizeof(x))))
83 /**
84 * returns a pointer to the next rtattr following rta.
85 * !!! do not use this to parse messages. use RTA_NEXT and RTA_OK instead !!!
86 */
87 #define XFRM_RTA_NEXT(rta) ((struct rtattr*)(((char*)(rta)) + RTA_ALIGN((rta)->rta_len)))
88 /**
89 * returns the total size of attached rta data
90 * (after 'usual' netlink data x like 'struct xfrm_usersa_info')
91 */
92 #define XFRM_PAYLOAD(nlh, x) NLMSG_PAYLOAD(nlh, sizeof(x))
93
94 typedef struct kernel_algorithm_t kernel_algorithm_t;
95
96 /**
97 * Mapping of IKEv2 kernel identifier to linux crypto API names
98 */
99 struct kernel_algorithm_t {
100 /**
101 * Identifier specified in IKEv2
102 */
103 int ikev2;
104
105 /**
106 * Name of the algorithm in linux crypto API
107 */
108 char *name;
109 };
110
111 ENUM(xfrm_msg_names, XFRM_MSG_NEWSA, XFRM_MSG_MAPPING,
112 "XFRM_MSG_NEWSA",
113 "XFRM_MSG_DELSA",
114 "XFRM_MSG_GETSA",
115 "XFRM_MSG_NEWPOLICY",
116 "XFRM_MSG_DELPOLICY",
117 "XFRM_MSG_GETPOLICY",
118 "XFRM_MSG_ALLOCSPI",
119 "XFRM_MSG_ACQUIRE",
120 "XFRM_MSG_EXPIRE",
121 "XFRM_MSG_UPDPOLICY",
122 "XFRM_MSG_UPDSA",
123 "XFRM_MSG_POLEXPIRE",
124 "XFRM_MSG_FLUSHSA",
125 "XFRM_MSG_FLUSHPOLICY",
126 "XFRM_MSG_NEWAE",
127 "XFRM_MSG_GETAE",
128 "XFRM_MSG_REPORT",
129 "XFRM_MSG_MIGRATE",
130 "XFRM_MSG_NEWSADINFO",
131 "XFRM_MSG_GETSADINFO",
132 "XFRM_MSG_NEWSPDINFO",
133 "XFRM_MSG_GETSPDINFO",
134 "XFRM_MSG_MAPPING"
135 );
136
137 ENUM(xfrm_attr_type_names, XFRMA_UNSPEC, XFRMA_KMADDRESS,
138 "XFRMA_UNSPEC",
139 "XFRMA_ALG_AUTH",
140 "XFRMA_ALG_CRYPT",
141 "XFRMA_ALG_COMP",
142 "XFRMA_ENCAP",
143 "XFRMA_TMPL",
144 "XFRMA_SA",
145 "XFRMA_POLICY",
146 "XFRMA_SEC_CTX",
147 "XFRMA_LTIME_VAL",
148 "XFRMA_REPLAY_VAL",
149 "XFRMA_REPLAY_THRESH",
150 "XFRMA_ETIMER_THRESH",
151 "XFRMA_SRCADDR",
152 "XFRMA_COADDR",
153 "XFRMA_LASTUSED",
154 "XFRMA_POLICY_TYPE",
155 "XFRMA_MIGRATE",
156 "XFRMA_ALG_AEAD",
157 "XFRMA_KMADDRESS"
158 );
159
160 #define END_OF_LIST -1
161
162 /**
163 * Algorithms for encryption
164 */
165 static kernel_algorithm_t encryption_algs[] = {
166 /* {ENCR_DES_IV64, "***" }, */
167 {ENCR_DES, "des" },
168 {ENCR_3DES, "des3_ede" },
169 /* {ENCR_RC5, "***" }, */
170 /* {ENCR_IDEA, "***" }, */
171 {ENCR_CAST, "cast128" },
172 {ENCR_BLOWFISH, "blowfish" },
173 /* {ENCR_3IDEA, "***" }, */
174 /* {ENCR_DES_IV32, "***" }, */
175 {ENCR_NULL, "cipher_null" },
176 {ENCR_AES_CBC, "aes" },
177 {ENCR_AES_CTR, "rfc3686(ctr(aes))" },
178 {ENCR_AES_CCM_ICV8, "rfc4309(ccm(aes))" },
179 {ENCR_AES_CCM_ICV12, "rfc4309(ccm(aes))" },
180 {ENCR_AES_CCM_ICV16, "rfc4309(ccm(aes))" },
181 {ENCR_AES_GCM_ICV8, "rfc4106(gcm(aes))" },
182 {ENCR_AES_GCM_ICV12, "rfc4106(gcm(aes))" },
183 {ENCR_AES_GCM_ICV16, "rfc4106(gcm(aes))" },
184 {ENCR_NULL_AUTH_AES_GMAC, "rfc4543(gcm(aes))" },
185 {ENCR_CAMELLIA_CBC, "cbc(camellia)" },
186 /* {ENCR_CAMELLIA_CTR, "***" }, */
187 /* {ENCR_CAMELLIA_CCM_ICV8, "***" }, */
188 /* {ENCR_CAMELLIA_CCM_ICV12, "***" }, */
189 /* {ENCR_CAMELLIA_CCM_ICV16, "***" }, */
190 {ENCR_SERPENT_CBC, "serpent" },
191 {ENCR_TWOFISH_CBC, "twofish" },
192 {END_OF_LIST, NULL }
193 };
194
195 /**
196 * Algorithms for integrity protection
197 */
198 static kernel_algorithm_t integrity_algs[] = {
199 {AUTH_HMAC_MD5_96, "md5" },
200 {AUTH_HMAC_SHA1_96, "sha1" },
201 {AUTH_HMAC_SHA2_256_96, "sha256" },
202 {AUTH_HMAC_SHA2_256_128, "hmac(sha256)" },
203 {AUTH_HMAC_SHA2_384_192, "hmac(sha384)" },
204 {AUTH_HMAC_SHA2_512_256, "hmac(sha512)" },
205 /* {AUTH_DES_MAC, "***" }, */
206 /* {AUTH_KPDK_MD5, "***" }, */
207 {AUTH_AES_XCBC_96, "xcbc(aes)" },
208 {END_OF_LIST, NULL }
209 };
210
211 /**
212 * Algorithms for IPComp
213 */
214 static kernel_algorithm_t compression_algs[] = {
215 /* {IPCOMP_OUI, "***" }, */
216 {IPCOMP_DEFLATE, "deflate" },
217 {IPCOMP_LZS, "lzs" },
218 {IPCOMP_LZJH, "lzjh" },
219 {END_OF_LIST, NULL }
220 };
221
222 /**
223 * Look up a kernel algorithm name and its key size
224 */
225 static char* lookup_algorithm(kernel_algorithm_t *list, int ikev2)
226 {
227 while (list->ikev2 != END_OF_LIST)
228 {
229 if (list->ikev2 == ikev2)
230 {
231 return list->name;
232 }
233 list++;
234 }
235 return NULL;
236 }
237
238 typedef struct route_entry_t route_entry_t;
239
240 /**
241 * installed routing entry
242 */
243 struct route_entry_t {
244 /** name of the interface the route is bound to */
245 char *if_name;
246
247 /** source ip of the route */
248 host_t *src_ip;
249
250 /** gateway for this route */
251 host_t *gateway;
252
253 /** destination net */
254 chunk_t dst_net;
255
256 /** destination net prefixlen */
257 u_int8_t prefixlen;
258 };
259
260 /**
261 * destroy a route_entry_t object
262 */
263 static void route_entry_destroy(route_entry_t *this)
264 {
265 free(this->if_name);
266 this->src_ip->destroy(this->src_ip);
267 DESTROY_IF(this->gateway);
268 chunk_free(&this->dst_net);
269 free(this);
270 }
271
272 /**
273 * compare two route_entry_t objects
274 */
275 static bool route_entry_equals(route_entry_t *a, route_entry_t *b)
276 {
277 return a->if_name && b->if_name && streq(a->if_name, b->if_name) &&
278 a->src_ip->equals(a->src_ip, b->src_ip) &&
279 a->gateway->equals(a->gateway, b->gateway) &&
280 chunk_equals(a->dst_net, b->dst_net) && a->prefixlen == b->prefixlen;
281 }
282
283 typedef struct policy_sa_t policy_sa_t;
284
285 /**
286 * IPsec SA assigned to a policy.
287 */
288 struct policy_sa_t {
289 /** priority assigned to the policy when installed with this SA */
290 u_int32_t priority;
291
292 /** type of the policy */
293 policy_type_t type;
294
295 /** source address of this SA */
296 host_t *src;
297
298 /** destination address of this SA */
299 host_t *dst;
300
301 /** source traffic selector of this SA */
302 traffic_selector_t *src_ts;
303
304 /** destination traffic selector of this SA */
305 traffic_selector_t *dst_ts;
306
307 /** optional mark */
308 mark_t mark;
309
310 /** description of this SA */
311 ipsec_sa_cfg_t cfg;
312 };
313
314 static void policy_sa_destroy(policy_sa_t *this)
315 {
316 DESTROY_IF(this->src);
317 DESTROY_IF(this->dst);
318 DESTROY_IF(this->src_ts);
319 DESTROY_IF(this->dst_ts);
320 free(this);
321 }
322
323 typedef struct policy_entry_t policy_entry_t;
324
325 /**
326 * installed kernel policy.
327 */
328 struct policy_entry_t {
329
330 /** direction of this policy: in, out, forward */
331 u_int8_t direction;
332
333 /** parameters of installed policy */
334 struct xfrm_selector sel;
335
336 /** optional mark */
337 u_int32_t mark;
338
339 /** associated route installed for this policy */
340 route_entry_t *route;
341
342 /** the SAs this policy is used by, ordered by priority */
343 linked_list_t *sas;
344 };
345
346 static void policy_entry_destroy(policy_entry_t *this)
347 {
348 if (this->route)
349 {
350 route_entry_destroy(this->route);
351 }
352 this->sas->destroy_function(this->sas, (void*)policy_sa_destroy);
353 free(this);
354 }
355
356 /**
357 * Hash function for policy_entry_t objects
358 */
359 static u_int policy_hash(policy_entry_t *key)
360 {
361 chunk_t chunk = chunk_create((void*)&key->sel,
362 sizeof(struct xfrm_selector) + sizeof(u_int32_t));
363 return chunk_hash(chunk);
364 }
365
366 /**
367 * Equality function for policy_entry_t objects
368 */
369 static bool policy_equals(policy_entry_t *key, policy_entry_t *other_key)
370 {
371 return memeq(&key->sel, &other_key->sel,
372 sizeof(struct xfrm_selector) + sizeof(u_int32_t)) &&
373 key->direction == other_key->direction;
374 }
375
376 typedef struct private_kernel_netlink_ipsec_t private_kernel_netlink_ipsec_t;
377
378 /**
379 * Private variables and functions of kernel_netlink class.
380 */
381 struct private_kernel_netlink_ipsec_t {
382 /**
383 * Public part of the kernel_netlink_t object.
384 */
385 kernel_netlink_ipsec_t public;
386
387 /**
388 * mutex to lock access to installed policies
389 */
390 mutex_t *mutex;
391
392 /**
393 * Hash table of installed policies (policy_entry_t)
394 */
395 hashtable_t *policies;
396
397 /**
398 * job receiving netlink events
399 */
400 callback_job_t *job;
401
402 /**
403 * Netlink xfrm socket (IPsec)
404 */
405 netlink_socket_t *socket_xfrm;
406
407 /**
408 * netlink xfrm socket to receive acquire and expire events
409 */
410 int socket_xfrm_events;
411
412 /**
413 * whether to install routes along policies
414 */
415 bool install_routes;
416
417 /**
418 * Size of the replay window, in packets
419 */
420 u_int32_t replay_window;
421
422 /**
423 * Size of the replay window bitmap, in bytes
424 */
425 u_int32_t replay_bmp;
426 };
427
428 /**
429 * convert the general ipsec mode to the one defined in xfrm.h
430 */
431 static u_int8_t mode2kernel(ipsec_mode_t mode)
432 {
433 switch (mode)
434 {
435 case MODE_TRANSPORT:
436 return XFRM_MODE_TRANSPORT;
437 case MODE_TUNNEL:
438 return XFRM_MODE_TUNNEL;
439 case MODE_BEET:
440 return XFRM_MODE_BEET;
441 default:
442 return mode;
443 }
444 }
445
446 /**
447 * convert a host_t to a struct xfrm_address
448 */
449 static void host2xfrm(host_t *host, xfrm_address_t *xfrm)
450 {
451 chunk_t chunk = host->get_address(host);
452 memcpy(xfrm, chunk.ptr, min(chunk.len, sizeof(xfrm_address_t)));
453 }
454
455 /**
456 * convert a struct xfrm_address to a host_t
457 */
458 static host_t* xfrm2host(int family, xfrm_address_t *xfrm, u_int16_t port)
459 {
460 chunk_t chunk;
461
462 switch (family)
463 {
464 case AF_INET:
465 chunk = chunk_create((u_char*)&xfrm->a4, sizeof(xfrm->a4));
466 break;
467 case AF_INET6:
468 chunk = chunk_create((u_char*)&xfrm->a6, sizeof(xfrm->a6));
469 break;
470 default:
471 return NULL;
472 }
473 return host_create_from_chunk(family, chunk, ntohs(port));
474 }
475
476 /**
477 * convert a traffic selector address range to subnet and its mask.
478 */
479 static void ts2subnet(traffic_selector_t* ts,
480 xfrm_address_t *net, u_int8_t *mask)
481 {
482 host_t *net_host;
483 chunk_t net_chunk;
484
485 ts->to_subnet(ts, &net_host, mask);
486 net_chunk = net_host->get_address(net_host);
487 memcpy(net, net_chunk.ptr, net_chunk.len);
488 net_host->destroy(net_host);
489 }
490
491 /**
492 * convert a traffic selector port range to port/portmask
493 */
494 static void ts2ports(traffic_selector_t* ts,
495 u_int16_t *port, u_int16_t *mask)
496 {
497 /* linux does not seem to accept complex portmasks. Only
498 * any or a specific port is allowed. We set to any, if we have
499 * a port range, or to a specific, if we have one port only.
500 */
501 u_int16_t from, to;
502
503 from = ts->get_from_port(ts);
504 to = ts->get_to_port(ts);
505
506 if (from == to)
507 {
508 *port = htons(from);
509 *mask = ~0;
510 }
511 else
512 {
513 *port = 0;
514 *mask = 0;
515 }
516 }
517
518 /**
519 * convert a pair of traffic_selectors to a xfrm_selector
520 */
521 static struct xfrm_selector ts2selector(traffic_selector_t *src,
522 traffic_selector_t *dst)
523 {
524 struct xfrm_selector sel;
525
526 memset(&sel, 0, sizeof(sel));
527 sel.family = (src->get_type(src) == TS_IPV4_ADDR_RANGE) ? AF_INET : AF_INET6;
528 /* src or dest proto may be "any" (0), use more restrictive one */
529 sel.proto = max(src->get_protocol(src), dst->get_protocol(dst));
530 ts2subnet(dst, &sel.daddr, &sel.prefixlen_d);
531 ts2subnet(src, &sel.saddr, &sel.prefixlen_s);
532 ts2ports(dst, &sel.dport, &sel.dport_mask);
533 ts2ports(src, &sel.sport, &sel.sport_mask);
534 sel.ifindex = 0;
535 sel.user = 0;
536
537 return sel;
538 }
539
540 /**
541 * convert a xfrm_selector to a src|dst traffic_selector
542 */
543 static traffic_selector_t* selector2ts(struct xfrm_selector *sel, bool src)
544 {
545 u_char *addr;
546 u_int8_t prefixlen;
547 u_int16_t port = 0;
548 host_t *host = NULL;
549
550 if (src)
551 {
552 addr = (u_char*)&sel->saddr;
553 prefixlen = sel->prefixlen_s;
554 if (sel->sport_mask)
555 {
556 port = htons(sel->sport);
557 }
558 }
559 else
560 {
561 addr = (u_char*)&sel->daddr;
562 prefixlen = sel->prefixlen_d;
563 if (sel->dport_mask)
564 {
565 port = htons(sel->dport);
566 }
567 }
568
569 /* The Linux 2.6 kernel does not set the selector's family field,
570 * so as a kludge we additionally test the prefix length.
571 */
572 if (sel->family == AF_INET || sel->prefixlen_s == 32)
573 {
574 host = host_create_from_chunk(AF_INET, chunk_create(addr, 4), 0);
575 }
576 else if (sel->family == AF_INET6 || sel->prefixlen_s == 128)
577 {
578 host = host_create_from_chunk(AF_INET6, chunk_create(addr, 16), 0);
579 }
580
581 if (host)
582 {
583 return traffic_selector_create_from_subnet(host, prefixlen,
584 sel->proto, port);
585 }
586 return NULL;
587 }
588
589 /**
590 * process a XFRM_MSG_ACQUIRE from kernel
591 */
592 static void process_acquire(private_kernel_netlink_ipsec_t *this, struct nlmsghdr *hdr)
593 {
594 u_int32_t reqid = 0;
595 int proto = 0;
596 traffic_selector_t *src_ts, *dst_ts;
597 struct xfrm_user_acquire *acquire;
598 struct rtattr *rta;
599 size_t rtasize;
600
601 acquire = (struct xfrm_user_acquire*)NLMSG_DATA(hdr);
602 rta = XFRM_RTA(hdr, struct xfrm_user_acquire);
603 rtasize = XFRM_PAYLOAD(hdr, struct xfrm_user_acquire);
604
605 DBG2(DBG_KNL, "received a XFRM_MSG_ACQUIRE");
606
607 while (RTA_OK(rta, rtasize))
608 {
609 DBG2(DBG_KNL, " %N", xfrm_attr_type_names, rta->rta_type);
610
611 if (rta->rta_type == XFRMA_TMPL)
612 {
613 struct xfrm_user_tmpl* tmpl;
614
615 tmpl = (struct xfrm_user_tmpl*)RTA_DATA(rta);
616 reqid = tmpl->reqid;
617 proto = tmpl->id.proto;
618 }
619 rta = RTA_NEXT(rta, rtasize);
620 }
621 switch (proto)
622 {
623 case 0:
624 case IPPROTO_ESP:
625 case IPPROTO_AH:
626 break;
627 default:
628 /* acquire for AH/ESP only, not for IPCOMP */
629 return;
630 }
631 src_ts = selector2ts(&acquire->sel, TRUE);
632 dst_ts = selector2ts(&acquire->sel, FALSE);
633
634 hydra->kernel_interface->acquire(hydra->kernel_interface, reqid, src_ts,
635 dst_ts);
636 }
637
638 /**
639 * process a XFRM_MSG_EXPIRE from kernel
640 */
641 static void process_expire(private_kernel_netlink_ipsec_t *this, struct nlmsghdr *hdr)
642 {
643 u_int8_t protocol;
644 u_int32_t spi, reqid;
645 struct xfrm_user_expire *expire;
646
647 expire = (struct xfrm_user_expire*)NLMSG_DATA(hdr);
648 protocol = expire->state.id.proto;
649 spi = expire->state.id.spi;
650 reqid = expire->state.reqid;
651
652 DBG2(DBG_KNL, "received a XFRM_MSG_EXPIRE");
653
654 if (protocol != IPPROTO_ESP && protocol != IPPROTO_AH)
655 {
656 DBG2(DBG_KNL, "ignoring XFRM_MSG_EXPIRE for SA with SPI %.8x and "
657 "reqid {%u} which is not a CHILD_SA", ntohl(spi), reqid);
658 return;
659 }
660
661 hydra->kernel_interface->expire(hydra->kernel_interface, reqid, protocol,
662 spi, expire->hard != 0);
663 }
664
665 /**
666 * process a XFRM_MSG_MIGRATE from kernel
667 */
668 static void process_migrate(private_kernel_netlink_ipsec_t *this, struct nlmsghdr *hdr)
669 {
670 traffic_selector_t *src_ts, *dst_ts;
671 host_t *local = NULL, *remote = NULL;
672 host_t *old_src = NULL, *old_dst = NULL;
673 host_t *new_src = NULL, *new_dst = NULL;
674 struct xfrm_userpolicy_id *policy_id;
675 struct rtattr *rta;
676 size_t rtasize;
677 u_int32_t reqid = 0;
678 policy_dir_t dir;
679
680 policy_id = (struct xfrm_userpolicy_id*)NLMSG_DATA(hdr);
681 rta = XFRM_RTA(hdr, struct xfrm_userpolicy_id);
682 rtasize = XFRM_PAYLOAD(hdr, struct xfrm_userpolicy_id);
683
684 DBG2(DBG_KNL, "received a XFRM_MSG_MIGRATE");
685
686 src_ts = selector2ts(&policy_id->sel, TRUE);
687 dst_ts = selector2ts(&policy_id->sel, FALSE);
688 dir = (policy_dir_t)policy_id->dir;
689
690 DBG2(DBG_KNL, " policy: %R === %R %N", src_ts, dst_ts, policy_dir_names);
691
692 while (RTA_OK(rta, rtasize))
693 {
694 DBG2(DBG_KNL, " %N", xfrm_attr_type_names, rta->rta_type);
695 if (rta->rta_type == XFRMA_KMADDRESS)
696 {
697 struct xfrm_user_kmaddress *kmaddress;
698
699 kmaddress = (struct xfrm_user_kmaddress*)RTA_DATA(rta);
700 local = xfrm2host(kmaddress->family, &kmaddress->local, 0);
701 remote = xfrm2host(kmaddress->family, &kmaddress->remote, 0);
702 DBG2(DBG_KNL, " kmaddress: %H...%H", local, remote);
703 }
704 else if (rta->rta_type == XFRMA_MIGRATE)
705 {
706 struct xfrm_user_migrate *migrate;
707
708 migrate = (struct xfrm_user_migrate*)RTA_DATA(rta);
709 old_src = xfrm2host(migrate->old_family, &migrate->old_saddr, 0);
710 old_dst = xfrm2host(migrate->old_family, &migrate->old_daddr, 0);
711 new_src = xfrm2host(migrate->new_family, &migrate->new_saddr, 0);
712 new_dst = xfrm2host(migrate->new_family, &migrate->new_daddr, 0);
713 reqid = migrate->reqid;
714 DBG2(DBG_KNL, " migrate %H...%H to %H...%H, reqid {%u}",
715 old_src, old_dst, new_src, new_dst, reqid);
716 DESTROY_IF(old_src);
717 DESTROY_IF(old_dst);
718 DESTROY_IF(new_src);
719 DESTROY_IF(new_dst);
720 }
721 rta = RTA_NEXT(rta, rtasize);
722 }
723
724 if (src_ts && dst_ts && local && remote)
725 {
726 hydra->kernel_interface->migrate(hydra->kernel_interface, reqid,
727 src_ts, dst_ts, dir, local, remote);
728 }
729 else
730 {
731 DESTROY_IF(src_ts);
732 DESTROY_IF(dst_ts);
733 DESTROY_IF(local);
734 DESTROY_IF(remote);
735 }
736 }
737
738 /**
739 * process a XFRM_MSG_MAPPING from kernel
740 */
741 static void process_mapping(private_kernel_netlink_ipsec_t *this,
742 struct nlmsghdr *hdr)
743 {
744 u_int32_t spi, reqid;
745 struct xfrm_user_mapping *mapping;
746 host_t *host;
747
748 mapping = (struct xfrm_user_mapping*)NLMSG_DATA(hdr);
749 spi = mapping->id.spi;
750 reqid = mapping->reqid;
751
752 DBG2(DBG_KNL, "received a XFRM_MSG_MAPPING");
753
754 if (mapping->id.proto == IPPROTO_ESP)
755 {
756 host = xfrm2host(mapping->id.family, &mapping->new_saddr,
757 mapping->new_sport);
758 if (host)
759 {
760 hydra->kernel_interface->mapping(hydra->kernel_interface, reqid,
761 spi, host);
762 }
763 }
764 }
765
766 /**
767 * Receives events from kernel
768 */
769 static job_requeue_t receive_events(private_kernel_netlink_ipsec_t *this)
770 {
771 char response[1024];
772 struct nlmsghdr *hdr = (struct nlmsghdr*)response;
773 struct sockaddr_nl addr;
774 socklen_t addr_len = sizeof(addr);
775 int len;
776 bool oldstate;
777
778 oldstate = thread_cancelability(TRUE);
779 len = recvfrom(this->socket_xfrm_events, response, sizeof(response), 0,
780 (struct sockaddr*)&addr, &addr_len);
781 thread_cancelability(oldstate);
782
783 if (len < 0)
784 {
785 switch (errno)
786 {
787 case EINTR:
788 /* interrupted, try again */
789 return JOB_REQUEUE_DIRECT;
790 case EAGAIN:
791 /* no data ready, select again */
792 return JOB_REQUEUE_DIRECT;
793 default:
794 DBG1(DBG_KNL, "unable to receive from xfrm event socket");
795 sleep(1);
796 return JOB_REQUEUE_FAIR;
797 }
798 }
799
800 if (addr.nl_pid != 0)
801 { /* not from kernel. not interested, try another one */
802 return JOB_REQUEUE_DIRECT;
803 }
804
805 while (NLMSG_OK(hdr, len))
806 {
807 switch (hdr->nlmsg_type)
808 {
809 case XFRM_MSG_ACQUIRE:
810 process_acquire(this, hdr);
811 break;
812 case XFRM_MSG_EXPIRE:
813 process_expire(this, hdr);
814 break;
815 case XFRM_MSG_MIGRATE:
816 process_migrate(this, hdr);
817 break;
818 case XFRM_MSG_MAPPING:
819 process_mapping(this, hdr);
820 break;
821 default:
822 DBG1(DBG_KNL, "received unknown event from xfrm event socket: %d", hdr->nlmsg_type);
823 break;
824 }
825 hdr = NLMSG_NEXT(hdr, len);
826 }
827 return JOB_REQUEUE_DIRECT;
828 }
829
830 /**
831 * Get an SPI for a specific protocol from the kernel.
832 */
833 static status_t get_spi_internal(private_kernel_netlink_ipsec_t *this,
834 host_t *src, host_t *dst, u_int8_t proto, u_int32_t min, u_int32_t max,
835 u_int32_t reqid, u_int32_t *spi)
836 {
837 netlink_buf_t request;
838 struct nlmsghdr *hdr, *out;
839 struct xfrm_userspi_info *userspi;
840 u_int32_t received_spi = 0;
841 size_t len;
842
843 memset(&request, 0, sizeof(request));
844
845 hdr = (struct nlmsghdr*)request;
846 hdr->nlmsg_flags = NLM_F_REQUEST;
847 hdr->nlmsg_type = XFRM_MSG_ALLOCSPI;
848 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_userspi_info));
849
850 userspi = (struct xfrm_userspi_info*)NLMSG_DATA(hdr);
851 host2xfrm(src, &userspi->info.saddr);
852 host2xfrm(dst, &userspi->info.id.daddr);
853 userspi->info.id.proto = proto;
854 userspi->info.mode = XFRM_MODE_TUNNEL;
855 userspi->info.reqid = reqid;
856 userspi->info.family = src->get_family(src);
857 userspi->min = min;
858 userspi->max = max;
859
860 if (this->socket_xfrm->send(this->socket_xfrm, hdr, &out, &len) == SUCCESS)
861 {
862 hdr = out;
863 while (NLMSG_OK(hdr, len))
864 {
865 switch (hdr->nlmsg_type)
866 {
867 case XFRM_MSG_NEWSA:
868 {
869 struct xfrm_usersa_info* usersa = NLMSG_DATA(hdr);
870 received_spi = usersa->id.spi;
871 break;
872 }
873 case NLMSG_ERROR:
874 {
875 struct nlmsgerr *err = NLMSG_DATA(hdr);
876
877 DBG1(DBG_KNL, "allocating SPI failed: %s (%d)",
878 strerror(-err->error), -err->error);
879 break;
880 }
881 default:
882 hdr = NLMSG_NEXT(hdr, len);
883 continue;
884 case NLMSG_DONE:
885 break;
886 }
887 break;
888 }
889 free(out);
890 }
891
892 if (received_spi == 0)
893 {
894 return FAILED;
895 }
896
897 *spi = received_spi;
898 return SUCCESS;
899 }
900
901 METHOD(kernel_ipsec_t, get_spi, status_t,
902 private_kernel_netlink_ipsec_t *this, host_t *src, host_t *dst,
903 u_int8_t protocol, u_int32_t reqid, u_int32_t *spi)
904 {
905 DBG2(DBG_KNL, "getting SPI for reqid {%u}", reqid);
906
907 if (get_spi_internal(this, src, dst, protocol,
908 0xc0000000, 0xcFFFFFFF, reqid, spi) != SUCCESS)
909 {
910 DBG1(DBG_KNL, "unable to get SPI for reqid {%u}", reqid);
911 return FAILED;
912 }
913
914 DBG2(DBG_KNL, "got SPI %.8x for reqid {%u}", ntohl(*spi), reqid);
915
916 return SUCCESS;
917 }
918
919 METHOD(kernel_ipsec_t, get_cpi, status_t,
920 private_kernel_netlink_ipsec_t *this, host_t *src, host_t *dst,
921 u_int32_t reqid, u_int16_t *cpi)
922 {
923 u_int32_t received_spi = 0;
924
925 DBG2(DBG_KNL, "getting CPI for reqid {%u}", reqid);
926
927 if (get_spi_internal(this, src, dst,
928 IPPROTO_COMP, 0x100, 0xEFFF, reqid, &received_spi) != SUCCESS)
929 {
930 DBG1(DBG_KNL, "unable to get CPI for reqid {%u}", reqid);
931 return FAILED;
932 }
933
934 *cpi = htons((u_int16_t)ntohl(received_spi));
935
936 DBG2(DBG_KNL, "got CPI %.4x for reqid {%u}", ntohs(*cpi), reqid);
937
938 return SUCCESS;
939 }
940
941 METHOD(kernel_ipsec_t, add_sa, status_t,
942 private_kernel_netlink_ipsec_t *this, host_t *src, host_t *dst,
943 u_int32_t spi, u_int8_t protocol, u_int32_t reqid, mark_t mark,
944 u_int32_t tfc, lifetime_cfg_t *lifetime, u_int16_t enc_alg, chunk_t enc_key,
945 u_int16_t int_alg, chunk_t int_key, ipsec_mode_t mode, u_int16_t ipcomp,
946 u_int16_t cpi, bool encap, bool esn, bool inbound,
947 traffic_selector_t* src_ts, traffic_selector_t* dst_ts)
948 {
949 netlink_buf_t request;
950 char *alg_name;
951 struct nlmsghdr *hdr;
952 struct xfrm_usersa_info *sa;
953 u_int16_t icv_size = 64;
954 status_t status = FAILED;
955
956 /* if IPComp is used, we install an additional IPComp SA. if the cpi is 0
957 * we are in the recursive call below */
958 if (ipcomp != IPCOMP_NONE && cpi != 0)
959 {
960 lifetime_cfg_t lft = {{0,0,0},{0,0,0},{0,0,0}};
961 add_sa(this, src, dst, htonl(ntohs(cpi)), IPPROTO_COMP, reqid, mark, tfc,
962 &lft, ENCR_UNDEFINED, chunk_empty, AUTH_UNDEFINED, chunk_empty,
963 mode, ipcomp, 0, FALSE, FALSE, inbound, NULL, NULL);
964 ipcomp = IPCOMP_NONE;
965 /* use transport mode ESP SA, IPComp uses tunnel mode */
966 mode = MODE_TRANSPORT;
967 }
968
969 memset(&request, 0, sizeof(request));
970
971 if (mark.value)
972 {
973 DBG2(DBG_KNL, "adding SAD entry with SPI %.8x and reqid {%u} "
974 "(mark %u/0x%8x)", ntohl(spi), reqid, mark.value, mark.mask);
975 }
976 else
977 {
978 DBG2(DBG_KNL, "adding SAD entry with SPI %.8x and reqid {%u}",
979 ntohl(spi), reqid);
980 }
981 hdr = (struct nlmsghdr*)request;
982 hdr->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
983 hdr->nlmsg_type = inbound ? XFRM_MSG_UPDSA : XFRM_MSG_NEWSA;
984 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_usersa_info));
985
986 sa = (struct xfrm_usersa_info*)NLMSG_DATA(hdr);
987 host2xfrm(src, &sa->saddr);
988 host2xfrm(dst, &sa->id.daddr);
989 sa->id.spi = spi;
990 sa->id.proto = protocol;
991 sa->family = src->get_family(src);
992 sa->mode = mode2kernel(mode);
993 switch (mode)
994 {
995 case MODE_TUNNEL:
996 sa->flags |= XFRM_STATE_AF_UNSPEC;
997 break;
998 case MODE_BEET:
999 case MODE_TRANSPORT:
1000 if(src_ts && dst_ts)
1001 {
1002 sa->sel = ts2selector(src_ts, dst_ts);
1003 }
1004 break;
1005 default:
1006 break;
1007 }
1008
1009 sa->reqid = reqid;
1010 sa->lft.soft_byte_limit = XFRM_LIMIT(lifetime->bytes.rekey);
1011 sa->lft.hard_byte_limit = XFRM_LIMIT(lifetime->bytes.life);
1012 sa->lft.soft_packet_limit = XFRM_LIMIT(lifetime->packets.rekey);
1013 sa->lft.hard_packet_limit = XFRM_LIMIT(lifetime->packets.life);
1014 /* we use lifetimes since added, not since used */
1015 sa->lft.soft_add_expires_seconds = lifetime->time.rekey;
1016 sa->lft.hard_add_expires_seconds = lifetime->time.life;
1017 sa->lft.soft_use_expires_seconds = 0;
1018 sa->lft.hard_use_expires_seconds = 0;
1019
1020 struct rtattr *rthdr = XFRM_RTA(hdr, struct xfrm_usersa_info);
1021
1022 switch (enc_alg)
1023 {
1024 case ENCR_UNDEFINED:
1025 /* no encryption */
1026 break;
1027 case ENCR_AES_CCM_ICV16:
1028 case ENCR_AES_GCM_ICV16:
1029 case ENCR_NULL_AUTH_AES_GMAC:
1030 case ENCR_CAMELLIA_CCM_ICV16:
1031 icv_size += 32;
1032 /* FALL */
1033 case ENCR_AES_CCM_ICV12:
1034 case ENCR_AES_GCM_ICV12:
1035 case ENCR_CAMELLIA_CCM_ICV12:
1036 icv_size += 32;
1037 /* FALL */
1038 case ENCR_AES_CCM_ICV8:
1039 case ENCR_AES_GCM_ICV8:
1040 case ENCR_CAMELLIA_CCM_ICV8:
1041 {
1042 struct xfrm_algo_aead *algo;
1043
1044 alg_name = lookup_algorithm(encryption_algs, enc_alg);
1045 if (alg_name == NULL)
1046 {
1047 DBG1(DBG_KNL, "algorithm %N not supported by kernel!",
1048 encryption_algorithm_names, enc_alg);
1049 goto failed;
1050 }
1051 DBG2(DBG_KNL, " using encryption algorithm %N with key size %d",
1052 encryption_algorithm_names, enc_alg, enc_key.len * 8);
1053
1054 rthdr->rta_type = XFRMA_ALG_AEAD;
1055 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_algo_aead) + enc_key.len);
1056 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1057 if (hdr->nlmsg_len > sizeof(request))
1058 {
1059 goto failed;
1060 }
1061
1062 algo = (struct xfrm_algo_aead*)RTA_DATA(rthdr);
1063 algo->alg_key_len = enc_key.len * 8;
1064 algo->alg_icv_len = icv_size;
1065 strcpy(algo->alg_name, alg_name);
1066 memcpy(algo->alg_key, enc_key.ptr, enc_key.len);
1067
1068 rthdr = XFRM_RTA_NEXT(rthdr);
1069 break;
1070 }
1071 default:
1072 {
1073 struct xfrm_algo *algo;
1074
1075 alg_name = lookup_algorithm(encryption_algs, enc_alg);
1076 if (alg_name == NULL)
1077 {
1078 DBG1(DBG_KNL, "algorithm %N not supported by kernel!",
1079 encryption_algorithm_names, enc_alg);
1080 goto failed;
1081 }
1082 DBG2(DBG_KNL, " using encryption algorithm %N with key size %d",
1083 encryption_algorithm_names, enc_alg, enc_key.len * 8);
1084
1085 rthdr->rta_type = XFRMA_ALG_CRYPT;
1086 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_algo) + enc_key.len);
1087 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1088 if (hdr->nlmsg_len > sizeof(request))
1089 {
1090 goto failed;
1091 }
1092
1093 algo = (struct xfrm_algo*)RTA_DATA(rthdr);
1094 algo->alg_key_len = enc_key.len * 8;
1095 strcpy(algo->alg_name, alg_name);
1096 memcpy(algo->alg_key, enc_key.ptr, enc_key.len);
1097
1098 rthdr = XFRM_RTA_NEXT(rthdr);
1099 }
1100 }
1101
1102 if (int_alg != AUTH_UNDEFINED)
1103 {
1104 alg_name = lookup_algorithm(integrity_algs, int_alg);
1105 if (alg_name == NULL)
1106 {
1107 DBG1(DBG_KNL, "algorithm %N not supported by kernel!",
1108 integrity_algorithm_names, int_alg);
1109 goto failed;
1110 }
1111 DBG2(DBG_KNL, " using integrity algorithm %N with key size %d",
1112 integrity_algorithm_names, int_alg, int_key.len * 8);
1113
1114 if (int_alg == AUTH_HMAC_SHA2_256_128)
1115 {
1116 struct xfrm_algo_auth* algo;
1117
1118 /* the kernel uses SHA256 with 96 bit truncation by default,
1119 * use specified truncation size supported by newer kernels */
1120 rthdr->rta_type = XFRMA_ALG_AUTH_TRUNC;
1121 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_algo_auth) + int_key.len);
1122
1123 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1124 if (hdr->nlmsg_len > sizeof(request))
1125 {
1126 goto failed;
1127 }
1128
1129 algo = (struct xfrm_algo_auth*)RTA_DATA(rthdr);
1130 algo->alg_key_len = int_key.len * 8;
1131 algo->alg_trunc_len = 128;
1132 strcpy(algo->alg_name, alg_name);
1133 memcpy(algo->alg_key, int_key.ptr, int_key.len);
1134 }
1135 else
1136 {
1137 struct xfrm_algo* algo;
1138
1139 rthdr->rta_type = XFRMA_ALG_AUTH;
1140 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_algo) + int_key.len);
1141
1142 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1143 if (hdr->nlmsg_len > sizeof(request))
1144 {
1145 goto failed;
1146 }
1147
1148 algo = (struct xfrm_algo*)RTA_DATA(rthdr);
1149 algo->alg_key_len = int_key.len * 8;
1150 strcpy(algo->alg_name, alg_name);
1151 memcpy(algo->alg_key, int_key.ptr, int_key.len);
1152 }
1153 rthdr = XFRM_RTA_NEXT(rthdr);
1154 }
1155
1156 if (ipcomp != IPCOMP_NONE)
1157 {
1158 rthdr->rta_type = XFRMA_ALG_COMP;
1159 alg_name = lookup_algorithm(compression_algs, ipcomp);
1160 if (alg_name == NULL)
1161 {
1162 DBG1(DBG_KNL, "algorithm %N not supported by kernel!",
1163 ipcomp_transform_names, ipcomp);
1164 goto failed;
1165 }
1166 DBG2(DBG_KNL, " using compression algorithm %N",
1167 ipcomp_transform_names, ipcomp);
1168
1169 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_algo));
1170 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1171 if (hdr->nlmsg_len > sizeof(request))
1172 {
1173 goto failed;
1174 }
1175
1176 struct xfrm_algo* algo = (struct xfrm_algo*)RTA_DATA(rthdr);
1177 algo->alg_key_len = 0;
1178 strcpy(algo->alg_name, alg_name);
1179
1180 rthdr = XFRM_RTA_NEXT(rthdr);
1181 }
1182
1183 if (encap)
1184 {
1185 struct xfrm_encap_tmpl *tmpl;
1186
1187 rthdr->rta_type = XFRMA_ENCAP;
1188 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_encap_tmpl));
1189
1190 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1191 if (hdr->nlmsg_len > sizeof(request))
1192 {
1193 goto failed;
1194 }
1195
1196 tmpl = (struct xfrm_encap_tmpl*)RTA_DATA(rthdr);
1197 tmpl->encap_type = UDP_ENCAP_ESPINUDP;
1198 tmpl->encap_sport = htons(src->get_port(src));
1199 tmpl->encap_dport = htons(dst->get_port(dst));
1200 memset(&tmpl->encap_oa, 0, sizeof (xfrm_address_t));
1201 /* encap_oa could probably be derived from the
1202 * traffic selectors [rfc4306, p39]. In the netlink kernel implementation
1203 * pluto does the same as we do here but it uses encap_oa in the
1204 * pfkey implementation. BUT as /usr/src/linux/net/key/af_key.c indicates
1205 * the kernel ignores it anyway
1206 * -> does that mean that NAT-T encap doesn't work in transport mode?
1207 * No. The reason the kernel ignores NAT-OA is that it recomputes
1208 * (or, rather, just ignores) the checksum. If packets pass
1209 * the IPsec checks it marks them "checksum ok" so OA isn't needed. */
1210 rthdr = XFRM_RTA_NEXT(rthdr);
1211 }
1212
1213 if (mark.value)
1214 {
1215 struct xfrm_mark *mrk;
1216
1217 rthdr->rta_type = XFRMA_MARK;
1218 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_mark));
1219
1220 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1221 if (hdr->nlmsg_len > sizeof(request))
1222 {
1223 goto failed;
1224 }
1225
1226 mrk = (struct xfrm_mark*)RTA_DATA(rthdr);
1227 mrk->v = mark.value;
1228 mrk->m = mark.mask;
1229 rthdr = XFRM_RTA_NEXT(rthdr);
1230 }
1231
1232 if (tfc)
1233 {
1234 u_int32_t *tfcpad;
1235
1236 rthdr->rta_type = XFRMA_TFCPAD;
1237 rthdr->rta_len = RTA_LENGTH(sizeof(u_int32_t));
1238
1239 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1240 if (hdr->nlmsg_len > sizeof(request))
1241 {
1242 goto failed;
1243 }
1244
1245 tfcpad = (u_int32_t*)RTA_DATA(rthdr);
1246 *tfcpad = tfc;
1247 rthdr = XFRM_RTA_NEXT(rthdr);
1248 }
1249
1250 if (protocol != IPPROTO_COMP)
1251 {
1252 if (esn || this->replay_window > DEFAULT_REPLAY_WINDOW)
1253 {
1254 /* for ESN or larger replay windows we need the new
1255 * XFRMA_REPLAY_ESN_VAL attribute to configure a bitmap */
1256 struct xfrm_replay_state_esn *replay;
1257
1258 rthdr->rta_type = XFRMA_REPLAY_ESN_VAL;
1259 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_replay_state_esn) +
1260 (this->replay_window + 7) / 8);
1261
1262 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1263 if (hdr->nlmsg_len > sizeof(request))
1264 {
1265 goto failed;
1266 }
1267
1268 replay = (struct xfrm_replay_state_esn*)RTA_DATA(rthdr);
1269 /* bmp_len contains number uf __u32's */
1270 replay->bmp_len = this->replay_bmp;
1271 replay->replay_window = this->replay_window;
1272
1273 rthdr = XFRM_RTA_NEXT(rthdr);
1274 if (esn)
1275 {
1276 sa->flags |= XFRM_STATE_ESN;
1277 }
1278 }
1279 else
1280 {
1281 sa->replay_window = DEFAULT_REPLAY_WINDOW;
1282 }
1283 }
1284
1285 if (this->socket_xfrm->send_ack(this->socket_xfrm, hdr) != SUCCESS)
1286 {
1287 if (mark.value)
1288 {
1289 DBG1(DBG_KNL, "unable to add SAD entry with SPI %.8x "
1290 "(mark %u/0x%8x)", ntohl(spi), mark.value, mark.mask);
1291 }
1292 else
1293 {
1294 DBG1(DBG_KNL, "unable to add SAD entry with SPI %.8x", ntohl(spi));
1295 }
1296 goto failed;
1297 }
1298
1299 status = SUCCESS;
1300
1301 failed:
1302 memwipe(request, sizeof(request));
1303 return status;
1304 }
1305
1306 /**
1307 * Get the ESN replay state (i.e. sequence numbers) of an SA.
1308 *
1309 * Allocates into one the replay state structure we get from the kernel.
1310 */
1311 static void get_replay_state(private_kernel_netlink_ipsec_t *this,
1312 u_int32_t spi, u_int8_t protocol, host_t *dst,
1313 struct xfrm_replay_state_esn **replay_esn,
1314 struct xfrm_replay_state **replay)
1315 {
1316 netlink_buf_t request;
1317 struct nlmsghdr *hdr, *out = NULL;
1318 struct xfrm_aevent_id *out_aevent = NULL, *aevent_id;
1319 size_t len;
1320 struct rtattr *rta;
1321 size_t rtasize;
1322
1323 memset(&request, 0, sizeof(request));
1324
1325 DBG2(DBG_KNL, "querying replay state from SAD entry with SPI %.8x",
1326 ntohl(spi));
1327
1328 hdr = (struct nlmsghdr*)request;
1329 hdr->nlmsg_flags = NLM_F_REQUEST;
1330 hdr->nlmsg_type = XFRM_MSG_GETAE;
1331 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_aevent_id));
1332
1333 aevent_id = (struct xfrm_aevent_id*)NLMSG_DATA(hdr);
1334 aevent_id->flags = XFRM_AE_RVAL;
1335
1336 host2xfrm(dst, &aevent_id->sa_id.daddr);
1337 aevent_id->sa_id.spi = spi;
1338 aevent_id->sa_id.proto = protocol;
1339 aevent_id->sa_id.family = dst->get_family(dst);
1340
1341 if (this->socket_xfrm->send(this->socket_xfrm, hdr, &out, &len) == SUCCESS)
1342 {
1343 hdr = out;
1344 while (NLMSG_OK(hdr, len))
1345 {
1346 switch (hdr->nlmsg_type)
1347 {
1348 case XFRM_MSG_NEWAE:
1349 {
1350 out_aevent = NLMSG_DATA(hdr);
1351 break;
1352 }
1353 case NLMSG_ERROR:
1354 {
1355 struct nlmsgerr *err = NLMSG_DATA(hdr);
1356 DBG1(DBG_KNL, "querying replay state from SAD entry failed: %s (%d)",
1357 strerror(-err->error), -err->error);
1358 break;
1359 }
1360 default:
1361 hdr = NLMSG_NEXT(hdr, len);
1362 continue;
1363 case NLMSG_DONE:
1364 break;
1365 }
1366 break;
1367 }
1368 }
1369
1370 if (out_aevent)
1371 {
1372 rta = XFRM_RTA(out, struct xfrm_aevent_id);
1373 rtasize = XFRM_PAYLOAD(out, struct xfrm_aevent_id);
1374 while (RTA_OK(rta, rtasize))
1375 {
1376 if (rta->rta_type == XFRMA_REPLAY_VAL &&
1377 RTA_PAYLOAD(rta) == sizeof(**replay))
1378 {
1379 *replay = malloc(RTA_PAYLOAD(rta));
1380 memcpy(*replay, RTA_DATA(rta), RTA_PAYLOAD(rta));
1381 break;
1382 }
1383 if (rta->rta_type == XFRMA_REPLAY_ESN_VAL &&
1384 RTA_PAYLOAD(rta) >= sizeof(**replay_esn) + this->replay_bmp)
1385 {
1386 *replay_esn = malloc(RTA_PAYLOAD(rta));
1387 memcpy(*replay_esn, RTA_DATA(rta), RTA_PAYLOAD(rta));
1388 break;
1389 }
1390 rta = RTA_NEXT(rta, rtasize);
1391 }
1392 }
1393 free(out);
1394 }
1395
1396 METHOD(kernel_ipsec_t, query_sa, status_t,
1397 private_kernel_netlink_ipsec_t *this, host_t *src, host_t *dst,
1398 u_int32_t spi, u_int8_t protocol, mark_t mark, u_int64_t *bytes)
1399 {
1400 netlink_buf_t request;
1401 struct nlmsghdr *out = NULL, *hdr;
1402 struct xfrm_usersa_id *sa_id;
1403 struct xfrm_usersa_info *sa = NULL;
1404 status_t status = FAILED;
1405 size_t len;
1406
1407 memset(&request, 0, sizeof(request));
1408
1409 if (mark.value)
1410 {
1411 DBG2(DBG_KNL, "querying SAD entry with SPI %.8x (mark %u/0x%8x)",
1412 ntohl(spi), mark.value, mark.mask);
1413 }
1414 else
1415 {
1416 DBG2(DBG_KNL, "querying SAD entry with SPI %.8x", ntohl(spi));
1417 }
1418 hdr = (struct nlmsghdr*)request;
1419 hdr->nlmsg_flags = NLM_F_REQUEST;
1420 hdr->nlmsg_type = XFRM_MSG_GETSA;
1421 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_usersa_id));
1422
1423 sa_id = (struct xfrm_usersa_id*)NLMSG_DATA(hdr);
1424 host2xfrm(dst, &sa_id->daddr);
1425 sa_id->spi = spi;
1426 sa_id->proto = protocol;
1427 sa_id->family = dst->get_family(dst);
1428
1429 if (mark.value)
1430 {
1431 struct xfrm_mark *mrk;
1432 struct rtattr *rthdr = XFRM_RTA(hdr, struct xfrm_usersa_id);
1433
1434 rthdr->rta_type = XFRMA_MARK;
1435 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_mark));
1436 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1437 if (hdr->nlmsg_len > sizeof(request))
1438 {
1439 return FAILED;
1440 }
1441
1442 mrk = (struct xfrm_mark*)RTA_DATA(rthdr);
1443 mrk->v = mark.value;
1444 mrk->m = mark.mask;
1445 }
1446
1447 if (this->socket_xfrm->send(this->socket_xfrm, hdr, &out, &len) == SUCCESS)
1448 {
1449 hdr = out;
1450 while (NLMSG_OK(hdr, len))
1451 {
1452 switch (hdr->nlmsg_type)
1453 {
1454 case XFRM_MSG_NEWSA:
1455 {
1456 sa = (struct xfrm_usersa_info*)NLMSG_DATA(hdr);
1457 break;
1458 }
1459 case NLMSG_ERROR:
1460 {
1461 struct nlmsgerr *err = NLMSG_DATA(hdr);
1462
1463 if (mark.value)
1464 {
1465 DBG1(DBG_KNL, "querying SAD entry with SPI %.8x "
1466 "(mark %u/0x%8x) failed: %s (%d)",
1467 ntohl(spi), mark.value, mark.mask,
1468 strerror(-err->error), -err->error);
1469 }
1470 else
1471 {
1472 DBG1(DBG_KNL, "querying SAD entry with SPI %.8x "
1473 "failed: %s (%d)", ntohl(spi),
1474 strerror(-err->error), -err->error);
1475 }
1476 break;
1477 }
1478 default:
1479 hdr = NLMSG_NEXT(hdr, len);
1480 continue;
1481 case NLMSG_DONE:
1482 break;
1483 }
1484 break;
1485 }
1486 }
1487
1488 if (sa == NULL)
1489 {
1490 DBG2(DBG_KNL, "unable to query SAD entry with SPI %.8x", ntohl(spi));
1491 }
1492 else
1493 {
1494 *bytes = sa->curlft.bytes;
1495 status = SUCCESS;
1496 }
1497 memwipe(out, len);
1498 free(out);
1499 return status;
1500 }
1501
1502 METHOD(kernel_ipsec_t, del_sa, status_t,
1503 private_kernel_netlink_ipsec_t *this, host_t *src, host_t *dst,
1504 u_int32_t spi, u_int8_t protocol, u_int16_t cpi, mark_t mark)
1505 {
1506 netlink_buf_t request;
1507 struct nlmsghdr *hdr;
1508 struct xfrm_usersa_id *sa_id;
1509
1510 /* if IPComp was used, we first delete the additional IPComp SA */
1511 if (cpi)
1512 {
1513 del_sa(this, src, dst, htonl(ntohs(cpi)), IPPROTO_COMP, 0, mark);
1514 }
1515
1516 memset(&request, 0, sizeof(request));
1517
1518 if (mark.value)
1519 {
1520 DBG2(DBG_KNL, "deleting SAD entry with SPI %.8x (mark %u/0x%8x)",
1521 ntohl(spi), mark.value, mark.mask);
1522 }
1523 else
1524 {
1525 DBG2(DBG_KNL, "deleting SAD entry with SPI %.8x", ntohl(spi));
1526 }
1527 hdr = (struct nlmsghdr*)request;
1528 hdr->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
1529 hdr->nlmsg_type = XFRM_MSG_DELSA;
1530 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_usersa_id));
1531
1532 sa_id = (struct xfrm_usersa_id*)NLMSG_DATA(hdr);
1533 host2xfrm(dst, &sa_id->daddr);
1534 sa_id->spi = spi;
1535 sa_id->proto = protocol;
1536 sa_id->family = dst->get_family(dst);
1537
1538 if (mark.value)
1539 {
1540 struct xfrm_mark *mrk;
1541 struct rtattr *rthdr = XFRM_RTA(hdr, struct xfrm_usersa_id);
1542
1543 rthdr->rta_type = XFRMA_MARK;
1544 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_mark));
1545 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1546 if (hdr->nlmsg_len > sizeof(request))
1547 {
1548 return FAILED;
1549 }
1550
1551 mrk = (struct xfrm_mark*)RTA_DATA(rthdr);
1552 mrk->v = mark.value;
1553 mrk->m = mark.mask;
1554 }
1555
1556 if (this->socket_xfrm->send_ack(this->socket_xfrm, hdr) != SUCCESS)
1557 {
1558 if (mark.value)
1559 {
1560 DBG1(DBG_KNL, "unable to delete SAD entry with SPI %.8x "
1561 "(mark %u/0x%8x)", ntohl(spi), mark.value, mark.mask);
1562 }
1563 else
1564 {
1565 DBG1(DBG_KNL, "unable to delete SAD entry with SPI %.8x", ntohl(spi));
1566 }
1567 return FAILED;
1568 }
1569 if (mark.value)
1570 {
1571 DBG2(DBG_KNL, "deleted SAD entry with SPI %.8x (mark %u/0x%8x)",
1572 ntohl(spi), mark.value, mark.mask);
1573 }
1574 else
1575 {
1576 DBG2(DBG_KNL, "deleted SAD entry with SPI %.8x", ntohl(spi));
1577 }
1578 return SUCCESS;
1579 }
1580
1581 METHOD(kernel_ipsec_t, update_sa, status_t,
1582 private_kernel_netlink_ipsec_t *this, u_int32_t spi, u_int8_t protocol,
1583 u_int16_t cpi, host_t *src, host_t *dst, host_t *new_src, host_t *new_dst,
1584 bool old_encap, bool new_encap, mark_t mark)
1585 {
1586 netlink_buf_t request;
1587 u_char *pos;
1588 struct nlmsghdr *hdr, *out = NULL;
1589 struct xfrm_usersa_id *sa_id;
1590 struct xfrm_usersa_info *out_sa = NULL, *sa;
1591 size_t len;
1592 struct rtattr *rta;
1593 size_t rtasize;
1594 struct xfrm_encap_tmpl* tmpl = NULL;
1595 struct xfrm_replay_state *replay = NULL;
1596 struct xfrm_replay_state_esn *replay_esn = NULL;
1597 status_t status = FAILED;
1598
1599 /* if IPComp is used, we first update the IPComp SA */
1600 if (cpi)
1601 {
1602 update_sa(this, htonl(ntohs(cpi)), IPPROTO_COMP, 0,
1603 src, dst, new_src, new_dst, FALSE, FALSE, mark);
1604 }
1605
1606 memset(&request, 0, sizeof(request));
1607
1608 DBG2(DBG_KNL, "querying SAD entry with SPI %.8x for update", ntohl(spi));
1609
1610 /* query the existing SA first */
1611 hdr = (struct nlmsghdr*)request;
1612 hdr->nlmsg_flags = NLM_F_REQUEST;
1613 hdr->nlmsg_type = XFRM_MSG_GETSA;
1614 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_usersa_id));
1615
1616 sa_id = (struct xfrm_usersa_id*)NLMSG_DATA(hdr);
1617 host2xfrm(dst, &sa_id->daddr);
1618 sa_id->spi = spi;
1619 sa_id->proto = protocol;
1620 sa_id->family = dst->get_family(dst);
1621
1622 if (this->socket_xfrm->send(this->socket_xfrm, hdr, &out, &len) == SUCCESS)
1623 {
1624 hdr = out;
1625 while (NLMSG_OK(hdr, len))
1626 {
1627 switch (hdr->nlmsg_type)
1628 {
1629 case XFRM_MSG_NEWSA:
1630 {
1631 out_sa = NLMSG_DATA(hdr);
1632 break;
1633 }
1634 case NLMSG_ERROR:
1635 {
1636 struct nlmsgerr *err = NLMSG_DATA(hdr);
1637 DBG1(DBG_KNL, "querying SAD entry failed: %s (%d)",
1638 strerror(-err->error), -err->error);
1639 break;
1640 }
1641 default:
1642 hdr = NLMSG_NEXT(hdr, len);
1643 continue;
1644 case NLMSG_DONE:
1645 break;
1646 }
1647 break;
1648 }
1649 }
1650 if (out_sa == NULL)
1651 {
1652 DBG1(DBG_KNL, "unable to update SAD entry with SPI %.8x", ntohl(spi));
1653 goto failed;
1654 }
1655
1656 get_replay_state(this, spi, protocol, dst, &replay_esn, &replay);
1657
1658 /* delete the old SA (without affecting the IPComp SA) */
1659 if (del_sa(this, src, dst, spi, protocol, 0, mark) != SUCCESS)
1660 {
1661 DBG1(DBG_KNL, "unable to delete old SAD entry with SPI %.8x", ntohl(spi));
1662 goto failed;
1663 }
1664
1665 DBG2(DBG_KNL, "updating SAD entry with SPI %.8x from %#H..%#H to %#H..%#H",
1666 ntohl(spi), src, dst, new_src, new_dst);
1667 /* copy over the SA from out to request */
1668 hdr = (struct nlmsghdr*)request;
1669 memcpy(hdr, out, min(out->nlmsg_len, sizeof(request)));
1670 hdr->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
1671 hdr->nlmsg_type = XFRM_MSG_NEWSA;
1672 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_usersa_info));
1673 sa = NLMSG_DATA(hdr);
1674 sa->family = new_dst->get_family(new_dst);
1675
1676 if (!src->ip_equals(src, new_src))
1677 {
1678 host2xfrm(new_src, &sa->saddr);
1679 }
1680 if (!dst->ip_equals(dst, new_dst))
1681 {
1682 host2xfrm(new_dst, &sa->id.daddr);
1683 }
1684
1685 rta = XFRM_RTA(out, struct xfrm_usersa_info);
1686 rtasize = XFRM_PAYLOAD(out, struct xfrm_usersa_info);
1687 pos = (u_char*)XFRM_RTA(hdr, struct xfrm_usersa_info);
1688 while(RTA_OK(rta, rtasize))
1689 {
1690 /* copy all attributes, but not XFRMA_ENCAP if we are disabling it */
1691 if (rta->rta_type != XFRMA_ENCAP || new_encap)
1692 {
1693 if (rta->rta_type == XFRMA_ENCAP)
1694 { /* update encap tmpl */
1695 tmpl = (struct xfrm_encap_tmpl*)RTA_DATA(rta);
1696 tmpl->encap_sport = ntohs(new_src->get_port(new_src));
1697 tmpl->encap_dport = ntohs(new_dst->get_port(new_dst));
1698 }
1699 memcpy(pos, rta, rta->rta_len);
1700 pos += RTA_ALIGN(rta->rta_len);
1701 hdr->nlmsg_len += RTA_ALIGN(rta->rta_len);
1702 }
1703 rta = RTA_NEXT(rta, rtasize);
1704 }
1705
1706 rta = (struct rtattr*)pos;
1707 if (tmpl == NULL && new_encap)
1708 { /* add tmpl if we are enabling it */
1709 rta->rta_type = XFRMA_ENCAP;
1710 rta->rta_len = RTA_LENGTH(sizeof(struct xfrm_encap_tmpl));
1711
1712 hdr->nlmsg_len += RTA_ALIGN(rta->rta_len);
1713 if (hdr->nlmsg_len > sizeof(request))
1714 {
1715 goto failed;
1716 }
1717
1718 tmpl = (struct xfrm_encap_tmpl*)RTA_DATA(rta);
1719 tmpl->encap_type = UDP_ENCAP_ESPINUDP;
1720 tmpl->encap_sport = ntohs(new_src->get_port(new_src));
1721 tmpl->encap_dport = ntohs(new_dst->get_port(new_dst));
1722 memset(&tmpl->encap_oa, 0, sizeof (xfrm_address_t));
1723
1724 rta = XFRM_RTA_NEXT(rta);
1725 }
1726
1727 if (replay_esn)
1728 {
1729 rta->rta_type = XFRMA_REPLAY_ESN_VAL;
1730 rta->rta_len = RTA_LENGTH(sizeof(struct xfrm_replay_state_esn) +
1731 this->replay_bmp);
1732
1733 hdr->nlmsg_len += RTA_ALIGN(rta->rta_len);
1734 if (hdr->nlmsg_len > sizeof(request))
1735 {
1736 goto failed;
1737 }
1738 memcpy(RTA_DATA(rta), replay_esn,
1739 sizeof(struct xfrm_replay_state_esn) + this->replay_bmp);
1740
1741 rta = XFRM_RTA_NEXT(rta);
1742 }
1743 else if (replay)
1744 {
1745 rta->rta_type = XFRMA_REPLAY_VAL;
1746 rta->rta_len = RTA_LENGTH(sizeof(struct xfrm_replay_state));
1747
1748 hdr->nlmsg_len += RTA_ALIGN(rta->rta_len);
1749 if (hdr->nlmsg_len > sizeof(request))
1750 {
1751 goto failed;
1752 }
1753 memcpy(RTA_DATA(rta), replay, sizeof(replay));
1754
1755 rta = XFRM_RTA_NEXT(rta);
1756 }
1757 else
1758 {
1759 DBG1(DBG_KNL, "unable to copy replay state from old SAD entry "
1760 "with SPI %.8x", ntohl(spi));
1761 }
1762
1763 if (this->socket_xfrm->send_ack(this->socket_xfrm, hdr) != SUCCESS)
1764 {
1765 DBG1(DBG_KNL, "unable to update SAD entry with SPI %.8x", ntohl(spi));
1766 goto failed;
1767 }
1768
1769 status = SUCCESS;
1770 failed:
1771 free(replay);
1772 free(replay_esn);
1773 memwipe(out, len);
1774 free(out);
1775
1776 return status;
1777 }
1778
1779 /**
1780 * Add or update a policy in the kernel.
1781 *
1782 * Note: The mutex has to be locked when entering this function.
1783 */
1784 static status_t add_policy_internal(private_kernel_netlink_ipsec_t *this,
1785 policy_entry_t *policy, policy_sa_t *sa, bool update)
1786 {
1787 netlink_buf_t request;
1788 policy_entry_t clone;
1789 struct xfrm_userpolicy_info *policy_info;
1790 struct nlmsghdr *hdr;
1791 int i;
1792
1793 /* clone the policy so we are able to check it out again later */
1794 memcpy(&clone, policy, sizeof(policy_entry_t));
1795
1796 memset(&request, 0, sizeof(request));
1797 hdr = (struct nlmsghdr*)request;
1798 hdr->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
1799 hdr->nlmsg_type = update ? XFRM_MSG_UPDPOLICY : XFRM_MSG_NEWPOLICY;
1800 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_userpolicy_info));
1801
1802 policy_info = (struct xfrm_userpolicy_info*)NLMSG_DATA(hdr);
1803 policy_info->sel = policy->sel;
1804 policy_info->dir = policy->direction;
1805
1806 /* calculate priority based on selector size, small size = high prio */
1807 policy_info->priority = sa->priority;
1808 policy_info->action = sa->type != POLICY_DROP ? XFRM_POLICY_ALLOW
1809 : XFRM_POLICY_BLOCK;
1810 policy_info->share = XFRM_SHARE_ANY;
1811
1812 /* policies don't expire */
1813 policy_info->lft.soft_byte_limit = XFRM_INF;
1814 policy_info->lft.soft_packet_limit = XFRM_INF;
1815 policy_info->lft.hard_byte_limit = XFRM_INF;
1816 policy_info->lft.hard_packet_limit = XFRM_INF;
1817 policy_info->lft.soft_add_expires_seconds = 0;
1818 policy_info->lft.hard_add_expires_seconds = 0;
1819 policy_info->lft.soft_use_expires_seconds = 0;
1820 policy_info->lft.hard_use_expires_seconds = 0;
1821
1822 struct rtattr *rthdr = XFRM_RTA(hdr, struct xfrm_userpolicy_info);
1823
1824 if (sa->type == POLICY_IPSEC)
1825 {
1826 struct xfrm_user_tmpl *tmpl = (struct xfrm_user_tmpl*)RTA_DATA(rthdr);
1827 struct {
1828 u_int8_t proto;
1829 bool use;
1830 } protos[] = {
1831 { IPPROTO_COMP, sa->cfg.ipcomp.transform != IPCOMP_NONE },
1832 { IPPROTO_ESP, sa->cfg.esp.use },
1833 { IPPROTO_AH, sa->cfg.ah.use },
1834 };
1835 ipsec_mode_t proto_mode = sa->cfg.mode;
1836
1837 rthdr->rta_type = XFRMA_TMPL;
1838 rthdr->rta_len = 0; /* actual length is set below */
1839
1840 for (i = 0; i < countof(protos); i++)
1841 {
1842 if (!protos[i].use)
1843 {
1844 continue;
1845 }
1846
1847 rthdr->rta_len += RTA_LENGTH(sizeof(struct xfrm_user_tmpl));
1848 hdr->nlmsg_len += RTA_ALIGN(RTA_LENGTH(sizeof(struct xfrm_user_tmpl)));
1849 if (hdr->nlmsg_len > sizeof(request))
1850 {
1851 return FAILED;
1852 }
1853
1854 tmpl->reqid = sa->cfg.reqid;
1855 tmpl->id.proto = protos[i].proto;
1856 tmpl->aalgos = tmpl->ealgos = tmpl->calgos = ~0;
1857 tmpl->mode = mode2kernel(proto_mode);
1858 tmpl->optional = protos[i].proto == IPPROTO_COMP &&
1859 policy->direction != POLICY_OUT;
1860 tmpl->family = sa->src->get_family(sa->src);
1861
1862 if (proto_mode == MODE_TUNNEL)
1863 { /* only for tunnel mode */
1864 host2xfrm(sa->src, &tmpl->saddr);
1865 host2xfrm(sa->dst, &tmpl->id.daddr);
1866 }
1867
1868 tmpl++;
1869
1870 /* use transport mode for other SAs */
1871 proto_mode = MODE_TRANSPORT;
1872 }
1873
1874 rthdr = XFRM_RTA_NEXT(rthdr);
1875 }
1876
1877 if (sa->mark.value)
1878 {
1879 struct xfrm_mark *mrk;
1880
1881 rthdr->rta_type = XFRMA_MARK;
1882 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_mark));
1883
1884 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1885 if (hdr->nlmsg_len > sizeof(request))
1886 {
1887 return FAILED;
1888 }
1889
1890 mrk = (struct xfrm_mark*)RTA_DATA(rthdr);
1891 mrk->v = sa->mark.value;
1892 mrk->m = sa->mark.mask;
1893 }
1894 this->mutex->unlock(this->mutex);
1895
1896 if (this->socket_xfrm->send_ack(this->socket_xfrm, hdr) != SUCCESS)
1897 {
1898 return FAILED;
1899 }
1900
1901 /* find the policy again */
1902 this->mutex->lock(this->mutex);
1903 policy = this->policies->get(this->policies, &clone);
1904 if (!policy ||
1905 policy->sas->find_first(policy->sas, NULL, (void**)&sa) != SUCCESS)
1906 { /* policy or sa is already gone, ignore */
1907 this->mutex->unlock(this->mutex);
1908 return SUCCESS;
1909 }
1910
1911 /* install a route, if:
1912 * - this is a forward policy (to just get one for each child)
1913 * - we are in tunnel/BEET mode
1914 * - routing is not disabled via strongswan.conf
1915 */
1916 if (policy->direction == POLICY_FWD &&
1917 sa->cfg.mode != MODE_TRANSPORT && this->install_routes)
1918 {
1919 route_entry_t *route = malloc_thing(route_entry_t);
1920
1921 if (hydra->kernel_interface->get_address_by_ts(hydra->kernel_interface,
1922 sa->dst_ts, &route->src_ip) == SUCCESS)
1923 {
1924 /* get the nexthop to src (src as we are in POLICY_FWD).*/
1925 route->gateway = hydra->kernel_interface->get_nexthop(
1926 hydra->kernel_interface, sa->src);
1927 /* install route via outgoing interface */
1928 route->if_name = hydra->kernel_interface->get_interface(
1929 hydra->kernel_interface, sa->dst);
1930 route->dst_net = chunk_alloc(policy->sel.family == AF_INET ? 4 : 16);
1931 memcpy(route->dst_net.ptr, &policy->sel.saddr, route->dst_net.len);
1932 route->prefixlen = policy->sel.prefixlen_s;
1933
1934 if (!route->if_name)
1935 {
1936 this->mutex->unlock(this->mutex);
1937 route_entry_destroy(route);
1938 return SUCCESS;
1939 }
1940
1941 if (policy->route)
1942 {
1943 route_entry_t *old = policy->route;
1944 if (route_entry_equals(old, route))
1945 { /* keep previously installed route */
1946 this->mutex->unlock(this->mutex);
1947 route_entry_destroy(route);
1948 return SUCCESS;
1949 }
1950 /* uninstall previously installed route */
1951 if (hydra->kernel_interface->del_route(hydra->kernel_interface,
1952 old->dst_net, old->prefixlen, old->gateway,
1953 old->src_ip, old->if_name) != SUCCESS)
1954 {
1955 DBG1(DBG_KNL, "error uninstalling route installed with "
1956 "policy %R === %R %N", sa->src_ts,
1957 sa->dst_ts, policy_dir_names,
1958 policy->direction);
1959 }
1960 route_entry_destroy(old);
1961 policy->route = NULL;
1962 }
1963
1964 DBG2(DBG_KNL, "installing route: %R via %H src %H dev %s",
1965 sa->src_ts, route->gateway, route->src_ip, route->if_name);
1966 switch (hydra->kernel_interface->add_route(
1967 hydra->kernel_interface, route->dst_net,
1968 route->prefixlen, route->gateway,
1969 route->src_ip, route->if_name))
1970 {
1971 default:
1972 DBG1(DBG_KNL, "unable to install source route for %H",
1973 route->src_ip);
1974 /* FALL */
1975 case ALREADY_DONE:
1976 /* route exists, do not uninstall */
1977 route_entry_destroy(route);
1978 break;
1979 case SUCCESS:
1980 /* cache the installed route */
1981 policy->route = route;
1982 break;
1983 }
1984 }
1985 else
1986 {
1987 free(route);
1988 }
1989 }
1990 this->mutex->unlock(this->mutex);
1991 return SUCCESS;
1992 }
1993
1994 METHOD(kernel_ipsec_t, add_policy, status_t,
1995 private_kernel_netlink_ipsec_t *this, host_t *src, host_t *dst,
1996 traffic_selector_t *src_ts, traffic_selector_t *dst_ts,
1997 policy_dir_t direction, policy_type_t type, ipsec_sa_cfg_t *sa,
1998 mark_t mark, bool routed)
1999 {
2000 policy_entry_t *policy, *current;
2001 policy_sa_t *assigned_sa, *current_sa;
2002 enumerator_t *enumerator;
2003 bool found = FALSE, update = TRUE;
2004
2005 /* create a policy */
2006 INIT(policy,
2007 .sel = ts2selector(src_ts, dst_ts),
2008 .mark = mark.value & mark.mask,
2009 .direction = direction,
2010 .sas = linked_list_create(),
2011 );
2012
2013 /* find the policy, which matches EXACTLY */
2014 this->mutex->lock(this->mutex);
2015 current = this->policies->get(this->policies, policy);
2016 if (current)
2017 {
2018 /* use existing policy */
2019 if (mark.value)
2020 {
2021 DBG2(DBG_KNL, "policy %R === %R %N (mark %u/0x%8x) "
2022 "already exists, increasing refcount",
2023 src_ts, dst_ts, policy_dir_names, direction,
2024 mark.value, mark.mask);
2025 }
2026 else
2027 {
2028 DBG2(DBG_KNL, "policy %R === %R %N "
2029 "already exists, increasing refcount",
2030 src_ts, dst_ts, policy_dir_names, direction);
2031 }
2032 policy_entry_destroy(policy);
2033 policy = current;
2034 found = TRUE;
2035 }
2036 else
2037 { /* apply the new one, if we have no such policy */
2038 this->policies->put(this->policies, policy, policy);
2039 }
2040
2041 /* cache the assigned IPsec SA */
2042 INIT(assigned_sa,
2043 .type = type,
2044 .src = src->clone(src),
2045 .dst = dst->clone(dst),
2046 .src_ts = src_ts->clone(src_ts),
2047 .dst_ts = dst_ts->clone(dst_ts),
2048 .mark = mark,
2049 .cfg = *sa,
2050 );
2051
2052 /* calculate priority based on selector size, small size = high prio */
2053 assigned_sa->priority = routed ? PRIO_LOW : PRIO_HIGH;
2054 assigned_sa->priority -= policy->sel.prefixlen_s;
2055 assigned_sa->priority -= policy->sel.prefixlen_d;
2056 assigned_sa->priority <<= 2; /* make some room for the two flags */
2057 assigned_sa->priority += policy->sel.sport_mask ||
2058 policy->sel.dport_mask ? 0 : 2;
2059 assigned_sa->priority += policy->sel.proto ? 0 : 1;
2060
2061 /* insert the SA according to its priority */
2062 enumerator = policy->sas->create_enumerator(policy->sas);
2063 while (enumerator->enumerate(enumerator, (void**)&current_sa))
2064 {
2065 if (current_sa->priority >= assigned_sa->priority)
2066 {
2067 break;
2068 }
2069 update = FALSE;
2070 }
2071 policy->sas->insert_before(policy->sas, enumerator, assigned_sa);
2072 enumerator->destroy(enumerator);
2073
2074 if (!update)
2075 { /* we don't update the policy if the priority is lower than that of the
2076 * currently installed one */
2077 this->mutex->unlock(this->mutex);
2078 return SUCCESS;
2079 }
2080
2081 if (mark.value)
2082 {
2083 DBG2(DBG_KNL, "%s policy %R === %R %N (mark %u/0x%8x)",
2084 found ? "updating" : "adding", src_ts, dst_ts,
2085 policy_dir_names, direction, mark.value, mark.mask);
2086 }
2087 else
2088 {
2089 DBG2(DBG_KNL, "%s policy %R === %R %N",
2090 found ? "updating" : "adding", src_ts, dst_ts,
2091 policy_dir_names, direction);
2092 }
2093
2094 if (add_policy_internal(this, policy, assigned_sa, found) != SUCCESS)
2095 {
2096 DBG1(DBG_KNL, "unable to %s policy %R === %R %N",
2097 found ? "update" : "add", src_ts, dst_ts,
2098 policy_dir_names, direction);
2099 return FAILED;
2100 }
2101 return SUCCESS;
2102 }
2103
2104 METHOD(kernel_ipsec_t, query_policy, status_t,
2105 private_kernel_netlink_ipsec_t *this, traffic_selector_t *src_ts,
2106 traffic_selector_t *dst_ts, policy_dir_t direction, mark_t mark,
2107 u_int32_t *use_time)
2108 {
2109 netlink_buf_t request;
2110 struct nlmsghdr *out = NULL, *hdr;
2111 struct xfrm_userpolicy_id *policy_id;
2112 struct xfrm_userpolicy_info *policy = NULL;
2113 size_t len;
2114
2115 memset(&request, 0, sizeof(request));
2116
2117 if (mark.value)
2118 {
2119 DBG2(DBG_KNL, "querying policy %R === %R %N (mark %u/0x%8x)",
2120 src_ts, dst_ts, policy_dir_names, direction,
2121 mark.value, mark.mask);
2122 }
2123 else
2124 {
2125 DBG2(DBG_KNL, "querying policy %R === %R %N", src_ts, dst_ts,
2126 policy_dir_names, direction);
2127 }
2128 hdr = (struct nlmsghdr*)request;
2129 hdr->nlmsg_flags = NLM_F_REQUEST;
2130 hdr->nlmsg_type = XFRM_MSG_GETPOLICY;
2131 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_userpolicy_id));
2132
2133 policy_id = (struct xfrm_userpolicy_id*)NLMSG_DATA(hdr);
2134 policy_id->sel = ts2selector(src_ts, dst_ts);
2135 policy_id->dir = direction;
2136
2137 if (mark.value)
2138 {
2139 struct xfrm_mark *mrk;
2140 struct rtattr *rthdr = XFRM_RTA(hdr, struct xfrm_userpolicy_id);
2141
2142 rthdr->rta_type = XFRMA_MARK;
2143 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_mark));
2144
2145 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
2146 if (hdr->nlmsg_len > sizeof(request))
2147 {
2148 return FAILED;
2149 }
2150
2151 mrk = (struct xfrm_mark*)RTA_DATA(rthdr);
2152 mrk->v = mark.value;
2153 mrk->m = mark.mask;
2154 }
2155
2156 if (this->socket_xfrm->send(this->socket_xfrm, hdr, &out, &len) == SUCCESS)
2157 {
2158 hdr = out;
2159 while (NLMSG_OK(hdr, len))
2160 {
2161 switch (hdr->nlmsg_type)
2162 {
2163 case XFRM_MSG_NEWPOLICY:
2164 {
2165 policy = (struct xfrm_userpolicy_info*)NLMSG_DATA(hdr);
2166 break;
2167 }
2168 case NLMSG_ERROR:
2169 {
2170 struct nlmsgerr *err = NLMSG_DATA(hdr);
2171 DBG1(DBG_KNL, "querying policy failed: %s (%d)",
2172 strerror(-err->error), -err->error);
2173 break;
2174 }
2175 default:
2176 hdr = NLMSG_NEXT(hdr, len);
2177 continue;
2178 case NLMSG_DONE:
2179 break;
2180 }
2181 break;
2182 }
2183 }
2184
2185 if (policy == NULL)
2186 {
2187 DBG2(DBG_KNL, "unable to query policy %R === %R %N", src_ts, dst_ts,
2188 policy_dir_names, direction);
2189 free(out);
2190 return FAILED;
2191 }
2192
2193 if (policy->curlft.use_time)
2194 {
2195 /* we need the monotonic time, but the kernel returns system time. */
2196 *use_time = time_monotonic(NULL) - (time(NULL) - policy->curlft.use_time);
2197 }
2198 else
2199 {
2200 *use_time = 0;
2201 }
2202
2203 free(out);
2204 return SUCCESS;
2205 }
2206
2207 METHOD(kernel_ipsec_t, del_policy, status_t,
2208 private_kernel_netlink_ipsec_t *this, traffic_selector_t *src_ts,
2209 traffic_selector_t *dst_ts, policy_dir_t direction, u_int32_t reqid,
2210 mark_t mark, bool unrouted)
2211 {
2212 policy_entry_t *current, policy;
2213 enumerator_t *enumerator;
2214 policy_sa_t *sa;
2215 netlink_buf_t request;
2216 struct nlmsghdr *hdr;
2217 struct xfrm_userpolicy_id *policy_id;
2218 bool is_installed = TRUE;
2219
2220 if (mark.value)
2221 {
2222 DBG2(DBG_KNL, "deleting policy %R === %R %N (mark %u/0x%8x)",
2223 src_ts, dst_ts, policy_dir_names, direction,
2224 mark.value, mark.mask);
2225 }
2226 else
2227 {
2228 DBG2(DBG_KNL, "deleting policy %R === %R %N",
2229 src_ts, dst_ts, policy_dir_names, direction);
2230 }
2231
2232 /* create a policy */
2233 memset(&policy, 0, sizeof(policy_entry_t));
2234 policy.sel = ts2selector(src_ts, dst_ts);
2235 policy.mark = mark.value & mark.mask;
2236 policy.direction = direction;
2237
2238 /* find the policy */
2239 this->mutex->lock(this->mutex);
2240 current = this->policies->get(this->policies, &policy);
2241 if (!current)
2242 {
2243 if (mark.value)
2244 {
2245 DBG1(DBG_KNL, "deleting policy %R === %R %N (mark %u/0x%8x) "
2246 "failed, not found", src_ts, dst_ts, policy_dir_names,
2247 direction, mark.value, mark.mask);
2248 }
2249 else
2250 {
2251 DBG1(DBG_KNL, "deleting policy %R === %R %N failed, not found",
2252 src_ts, dst_ts, policy_dir_names, direction);
2253 }
2254 this->mutex->unlock(this->mutex);
2255 return NOT_FOUND;
2256 }
2257
2258 /* remove cached SA by reqid */
2259 enumerator = current->sas->create_enumerator(current->sas);
2260 while (enumerator->enumerate(enumerator, (void**)&sa))
2261 {
2262 if (reqid == sa->cfg.reqid)
2263 {
2264 current->sas->remove_at(current->sas, enumerator);
2265 break;
2266 }
2267 is_installed = FALSE;
2268 }
2269 enumerator->destroy(enumerator);
2270
2271 if (current->sas->get_count(current->sas) > 0)
2272 { /* policy is used by more SAs, keep in kernel */
2273 DBG2(DBG_KNL, "policy still used by another CHILD_SA, not removed");
2274 if (!is_installed)
2275 { /* no need to update as the policy was not installed for this SA */
2276 this->mutex->unlock(this->mutex);
2277 policy_sa_destroy(sa);
2278 return SUCCESS;
2279 }
2280 policy_sa_destroy(sa);
2281
2282 if (mark.value)
2283 {
2284 DBG2(DBG_KNL, "updating policy %R === %R %N (mark %u/0x%8x)",
2285 src_ts, dst_ts, policy_dir_names, direction,
2286 mark.value, mark.mask);
2287 }
2288 else
2289 {
2290 DBG2(DBG_KNL, "updating policy %R === %R %N",
2291 src_ts, dst_ts, policy_dir_names, direction);
2292 }
2293
2294 current->sas->get_first(current->sas, (void**)&sa);
2295 if (add_policy_internal(this, current, sa, TRUE) != SUCCESS)
2296 {
2297 DBG1(DBG_KNL, "unable to update policy %R === %R %N",
2298 src_ts, dst_ts, policy_dir_names, direction);
2299 return FAILED;
2300 }
2301 return SUCCESS;
2302 }
2303 policy_sa_destroy(sa);
2304
2305 memset(&request, 0, sizeof(request));
2306
2307 hdr = (struct nlmsghdr*)request;
2308 hdr->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
2309 hdr->nlmsg_type = XFRM_MSG_DELPOLICY;
2310 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_userpolicy_id));
2311
2312 policy_id = (struct xfrm_userpolicy_id*)NLMSG_DATA(hdr);
2313 policy_id->sel = current->sel;
2314 policy_id->dir = direction;
2315
2316 if (mark.value)
2317 {
2318 struct xfrm_mark *mrk;
2319 struct rtattr *rthdr = XFRM_RTA(hdr, struct xfrm_userpolicy_id);
2320
2321 rthdr->rta_type = XFRMA_MARK;
2322 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_mark));
2323 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
2324 if (hdr->nlmsg_len > sizeof(request))
2325 {
2326 return FAILED;
2327 }
2328
2329 mrk = (struct xfrm_mark*)RTA_DATA(rthdr);
2330 mrk->v = mark.value;
2331 mrk->m = mark.mask;
2332 }
2333
2334 if (current->route)
2335 {
2336 route_entry_t *route = current->route;
2337 if (hydra->kernel_interface->del_route(hydra->kernel_interface,
2338 route->dst_net, route->prefixlen, route->gateway,
2339 route->src_ip, route->if_name) != SUCCESS)
2340 {
2341 DBG1(DBG_KNL, "error uninstalling route installed with "
2342 "policy %R === %R %N", src_ts, dst_ts,
2343 policy_dir_names, direction);
2344 }
2345 }
2346
2347 this->policies->remove(this->policies, current);
2348 policy_entry_destroy(current);
2349 this->mutex->unlock(this->mutex);
2350
2351 if (this->socket_xfrm->send_ack(this->socket_xfrm, hdr) != SUCCESS)
2352 {
2353 if (mark.value)
2354 {
2355 DBG1(DBG_KNL, "unable to delete policy %R === %R %N "
2356 "(mark %u/0x%8x)", src_ts, dst_ts, policy_dir_names,
2357 direction, mark.value, mark.mask);
2358 }
2359 else
2360 {
2361 DBG1(DBG_KNL, "unable to delete policy %R === %R %N",
2362 src_ts, dst_ts, policy_dir_names, direction);
2363 }
2364 return FAILED;
2365 }
2366 return SUCCESS;
2367 }
2368
2369 METHOD(kernel_ipsec_t, bypass_socket, bool,
2370 private_kernel_netlink_ipsec_t *this, int fd, int family)
2371 {
2372 struct xfrm_userpolicy_info policy;
2373 u_int sol, ipsec_policy;
2374
2375 switch (family)
2376 {
2377 case AF_INET:
2378 sol = SOL_IP;
2379 ipsec_policy = IP_XFRM_POLICY;
2380 break;
2381 case AF_INET6:
2382 sol = SOL_IPV6;
2383 ipsec_policy = IPV6_XFRM_POLICY;
2384 break;
2385 default:
2386 return FALSE;
2387 }
2388
2389 memset(&policy, 0, sizeof(policy));
2390 policy.action = XFRM_POLICY_ALLOW;
2391 policy.sel.family = family;
2392
2393 policy.dir = XFRM_POLICY_OUT;
2394 if (setsockopt(fd, sol, ipsec_policy, &policy, sizeof(policy)) < 0)
2395 {
2396 DBG1(DBG_KNL, "unable to set IPSEC_POLICY on socket: %s",
2397 strerror(errno));
2398 return FALSE;
2399 }
2400 policy.dir = XFRM_POLICY_IN;
2401 if (setsockopt(fd, sol, ipsec_policy, &policy, sizeof(policy)) < 0)
2402 {
2403 DBG1(DBG_KNL, "unable to set IPSEC_POLICY on socket: %s",
2404 strerror(errno));
2405 return FALSE;
2406 }
2407 return TRUE;
2408 }
2409
2410 METHOD(kernel_ipsec_t, destroy, void,
2411 private_kernel_netlink_ipsec_t *this)
2412 {
2413 enumerator_t *enumerator;
2414 policy_entry_t *policy;
2415
2416 if (this->job)
2417 {
2418 this->job->cancel(this->job);
2419 }
2420 if (this->socket_xfrm_events > 0)
2421 {
2422 close(this->socket_xfrm_events);
2423 }
2424 DESTROY_IF(this->socket_xfrm);
2425 enumerator = this->policies->create_enumerator(this->policies);
2426 while (enumerator->enumerate(enumerator, &policy, &policy))
2427 {
2428 policy_entry_destroy(policy);
2429 }
2430 enumerator->destroy(enumerator);
2431 this->policies->destroy(this->policies);
2432 this->mutex->destroy(this->mutex);
2433 free(this);
2434 }
2435
2436 /*
2437 * Described in header.
2438 */
2439 kernel_netlink_ipsec_t *kernel_netlink_ipsec_create()
2440 {
2441 private_kernel_netlink_ipsec_t *this;
2442 struct sockaddr_nl addr;
2443 int fd;
2444
2445 INIT(this,
2446 .public = {
2447 .interface = {
2448 .get_spi = _get_spi,
2449 .get_cpi = _get_cpi,
2450 .add_sa = _add_sa,
2451 .update_sa = _update_sa,
2452 .query_sa = _query_sa,
2453 .del_sa = _del_sa,
2454 .add_policy = _add_policy,
2455 .query_policy = _query_policy,
2456 .del_policy = _del_policy,
2457 .bypass_socket = _bypass_socket,
2458 .destroy = _destroy,
2459 },
2460 },
2461 .policies = hashtable_create((hashtable_hash_t)policy_hash,
2462 (hashtable_equals_t)policy_equals, 32),
2463 .mutex = mutex_create(MUTEX_TYPE_DEFAULT),
2464 .install_routes = lib->settings->get_bool(lib->settings,
2465 "%s.install_routes", TRUE, hydra->daemon),
2466 .replay_window = lib->settings->get_int(lib->settings,
2467 "%s.replay_window", DEFAULT_REPLAY_WINDOW, hydra->daemon),
2468 );
2469
2470 this->replay_bmp = (this->replay_window + sizeof(u_int32_t) * 8 - 1) /
2471 (sizeof(u_int32_t) * 8);
2472
2473 if (streq(hydra->daemon, "pluto"))
2474 { /* no routes for pluto, they are installed via updown script */
2475 this->install_routes = FALSE;
2476 }
2477
2478 /* disable lifetimes for allocated SPIs in kernel */
2479 fd = open("/proc/sys/net/core/xfrm_acq_expires", O_WRONLY);
2480 if (fd)
2481 {
2482 ignore_result(write(fd, "165", 3));
2483 close(fd);
2484 }
2485
2486 this->socket_xfrm = netlink_socket_create(NETLINK_XFRM);
2487 if (!this->socket_xfrm)
2488 {
2489 destroy(this);
2490 return NULL;
2491 }
2492
2493 memset(&addr, 0, sizeof(addr));
2494 addr.nl_family = AF_NETLINK;
2495
2496 /* create and bind XFRM socket for ACQUIRE, EXPIRE, MIGRATE & MAPPING */
2497 this->socket_xfrm_events = socket(AF_NETLINK, SOCK_RAW, NETLINK_XFRM);
2498 if (this->socket_xfrm_events <= 0)
2499 {
2500 DBG1(DBG_KNL, "unable to create XFRM event socket");
2501 destroy(this);
2502 return NULL;
2503 }
2504 addr.nl_groups = XFRMNLGRP(ACQUIRE) | XFRMNLGRP(EXPIRE) |
2505 XFRMNLGRP(MIGRATE) | XFRMNLGRP(MAPPING);
2506 if (bind(this->socket_xfrm_events, (struct sockaddr*)&addr, sizeof(addr)))
2507 {
2508 DBG1(DBG_KNL, "unable to bind XFRM event socket");
2509 destroy(this);
2510 return NULL;
2511 }
2512 this->job = callback_job_create_with_prio((callback_job_cb_t)receive_events,
2513 this, NULL, NULL, JOB_PRIO_CRITICAL);
2514 lib->processor->queue_job(lib->processor, (job_t*)this->job);
2515
2516 return &this->public;
2517 }
2518