Use CRITICAL job priority class for long running dispatcher jobs
[strongswan.git] / src / libhydra / plugins / kernel_netlink / kernel_netlink_ipsec.c
1 /*
2 * Copyright (C) 2006-2010 Tobias Brunner
3 * Copyright (C) 2005-2009 Martin Willi
4 * Copyright (C) 2008 Andreas Steffen
5 * Copyright (C) 2006-2007 Fabian Hartmann, Noah Heusser
6 * Copyright (C) 2006 Daniel Roethlisberger
7 * Copyright (C) 2005 Jan Hutter
8 * Hochschule fuer Technik Rapperswil
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version. See <http://www.fsf.org/copyleft/gpl.txt>.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
17 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 * for more details.
19 */
20
21 #include <sys/types.h>
22 #include <sys/socket.h>
23 #include <stdint.h>
24 #include <linux/ipsec.h>
25 #include <linux/netlink.h>
26 #include <linux/rtnetlink.h>
27 #include <linux/xfrm.h>
28 #include <linux/udp.h>
29 #include <unistd.h>
30 #include <time.h>
31 #include <errno.h>
32 #include <string.h>
33 #include <fcntl.h>
34
35 #include "kernel_netlink_ipsec.h"
36 #include "kernel_netlink_shared.h"
37
38 #include <hydra.h>
39 #include <debug.h>
40 #include <threading/thread.h>
41 #include <threading/mutex.h>
42 #include <utils/hashtable.h>
43 #include <processing/jobs/callback_job.h>
44
45 /** required for Linux 2.6.26 kernel and later */
46 #ifndef XFRM_STATE_AF_UNSPEC
47 #define XFRM_STATE_AF_UNSPEC 32
48 #endif
49
50 /** from linux/in.h */
51 #ifndef IP_XFRM_POLICY
52 #define IP_XFRM_POLICY 17
53 #endif
54
55 /* missing on uclibc */
56 #ifndef IPV6_XFRM_POLICY
57 #define IPV6_XFRM_POLICY 34
58 #endif /*IPV6_XFRM_POLICY*/
59
60 /** default priority of installed policies */
61 #define PRIO_LOW 1024
62 #define PRIO_HIGH 512
63
64 /** default replay window size, if not set using charon.replay_window */
65 #define DEFAULT_REPLAY_WINDOW 32
66
67 /**
68 * map the limit for bytes and packets to XFRM_INF per default
69 */
70 #define XFRM_LIMIT(x) ((x) == 0 ? XFRM_INF : (x))
71
72 /**
73 * Create ORable bitfield of XFRM NL groups
74 */
75 #define XFRMNLGRP(x) (1<<(XFRMNLGRP_##x-1))
76
77 /**
78 * returns a pointer to the first rtattr following the nlmsghdr *nlh and the
79 * 'usual' netlink data x like 'struct xfrm_usersa_info'
80 */
81 #define XFRM_RTA(nlh, x) ((struct rtattr*)(NLMSG_DATA(nlh) + NLMSG_ALIGN(sizeof(x))))
82 /**
83 * returns a pointer to the next rtattr following rta.
84 * !!! do not use this to parse messages. use RTA_NEXT and RTA_OK instead !!!
85 */
86 #define XFRM_RTA_NEXT(rta) ((struct rtattr*)(((char*)(rta)) + RTA_ALIGN((rta)->rta_len)))
87 /**
88 * returns the total size of attached rta data
89 * (after 'usual' netlink data x like 'struct xfrm_usersa_info')
90 */
91 #define XFRM_PAYLOAD(nlh, x) NLMSG_PAYLOAD(nlh, sizeof(x))
92
93 typedef struct kernel_algorithm_t kernel_algorithm_t;
94
95 /**
96 * Mapping of IKEv2 kernel identifier to linux crypto API names
97 */
98 struct kernel_algorithm_t {
99 /**
100 * Identifier specified in IKEv2
101 */
102 int ikev2;
103
104 /**
105 * Name of the algorithm in linux crypto API
106 */
107 char *name;
108 };
109
110 ENUM(xfrm_msg_names, XFRM_MSG_NEWSA, XFRM_MSG_MAPPING,
111 "XFRM_MSG_NEWSA",
112 "XFRM_MSG_DELSA",
113 "XFRM_MSG_GETSA",
114 "XFRM_MSG_NEWPOLICY",
115 "XFRM_MSG_DELPOLICY",
116 "XFRM_MSG_GETPOLICY",
117 "XFRM_MSG_ALLOCSPI",
118 "XFRM_MSG_ACQUIRE",
119 "XFRM_MSG_EXPIRE",
120 "XFRM_MSG_UPDPOLICY",
121 "XFRM_MSG_UPDSA",
122 "XFRM_MSG_POLEXPIRE",
123 "XFRM_MSG_FLUSHSA",
124 "XFRM_MSG_FLUSHPOLICY",
125 "XFRM_MSG_NEWAE",
126 "XFRM_MSG_GETAE",
127 "XFRM_MSG_REPORT",
128 "XFRM_MSG_MIGRATE",
129 "XFRM_MSG_NEWSADINFO",
130 "XFRM_MSG_GETSADINFO",
131 "XFRM_MSG_NEWSPDINFO",
132 "XFRM_MSG_GETSPDINFO",
133 "XFRM_MSG_MAPPING"
134 );
135
136 ENUM(xfrm_attr_type_names, XFRMA_UNSPEC, XFRMA_KMADDRESS,
137 "XFRMA_UNSPEC",
138 "XFRMA_ALG_AUTH",
139 "XFRMA_ALG_CRYPT",
140 "XFRMA_ALG_COMP",
141 "XFRMA_ENCAP",
142 "XFRMA_TMPL",
143 "XFRMA_SA",
144 "XFRMA_POLICY",
145 "XFRMA_SEC_CTX",
146 "XFRMA_LTIME_VAL",
147 "XFRMA_REPLAY_VAL",
148 "XFRMA_REPLAY_THRESH",
149 "XFRMA_ETIMER_THRESH",
150 "XFRMA_SRCADDR",
151 "XFRMA_COADDR",
152 "XFRMA_LASTUSED",
153 "XFRMA_POLICY_TYPE",
154 "XFRMA_MIGRATE",
155 "XFRMA_ALG_AEAD",
156 "XFRMA_KMADDRESS"
157 );
158
159 #define END_OF_LIST -1
160
161 /**
162 * Algorithms for encryption
163 */
164 static kernel_algorithm_t encryption_algs[] = {
165 /* {ENCR_DES_IV64, "***" }, */
166 {ENCR_DES, "des" },
167 {ENCR_3DES, "des3_ede" },
168 /* {ENCR_RC5, "***" }, */
169 /* {ENCR_IDEA, "***" }, */
170 {ENCR_CAST, "cast128" },
171 {ENCR_BLOWFISH, "blowfish" },
172 /* {ENCR_3IDEA, "***" }, */
173 /* {ENCR_DES_IV32, "***" }, */
174 {ENCR_NULL, "cipher_null" },
175 {ENCR_AES_CBC, "aes" },
176 {ENCR_AES_CTR, "rfc3686(ctr(aes))" },
177 {ENCR_AES_CCM_ICV8, "rfc4309(ccm(aes))" },
178 {ENCR_AES_CCM_ICV12, "rfc4309(ccm(aes))" },
179 {ENCR_AES_CCM_ICV16, "rfc4309(ccm(aes))" },
180 {ENCR_AES_GCM_ICV8, "rfc4106(gcm(aes))" },
181 {ENCR_AES_GCM_ICV12, "rfc4106(gcm(aes))" },
182 {ENCR_AES_GCM_ICV16, "rfc4106(gcm(aes))" },
183 {ENCR_NULL_AUTH_AES_GMAC, "rfc4543(gcm(aes))" },
184 {ENCR_CAMELLIA_CBC, "cbc(camellia)" },
185 /* {ENCR_CAMELLIA_CTR, "***" }, */
186 /* {ENCR_CAMELLIA_CCM_ICV8, "***" }, */
187 /* {ENCR_CAMELLIA_CCM_ICV12, "***" }, */
188 /* {ENCR_CAMELLIA_CCM_ICV16, "***" }, */
189 {ENCR_SERPENT_CBC, "serpent" },
190 {ENCR_TWOFISH_CBC, "twofish" },
191 {END_OF_LIST, NULL }
192 };
193
194 /**
195 * Algorithms for integrity protection
196 */
197 static kernel_algorithm_t integrity_algs[] = {
198 {AUTH_HMAC_MD5_96, "md5" },
199 {AUTH_HMAC_SHA1_96, "sha1" },
200 {AUTH_HMAC_SHA2_256_96, "sha256" },
201 {AUTH_HMAC_SHA2_256_128, "hmac(sha256)" },
202 {AUTH_HMAC_SHA2_384_192, "hmac(sha384)" },
203 {AUTH_HMAC_SHA2_512_256, "hmac(sha512)" },
204 /* {AUTH_DES_MAC, "***" }, */
205 /* {AUTH_KPDK_MD5, "***" }, */
206 {AUTH_AES_XCBC_96, "xcbc(aes)" },
207 {END_OF_LIST, NULL }
208 };
209
210 /**
211 * Algorithms for IPComp
212 */
213 static kernel_algorithm_t compression_algs[] = {
214 /* {IPCOMP_OUI, "***" }, */
215 {IPCOMP_DEFLATE, "deflate" },
216 {IPCOMP_LZS, "lzs" },
217 {IPCOMP_LZJH, "lzjh" },
218 {END_OF_LIST, NULL }
219 };
220
221 /**
222 * Look up a kernel algorithm name and its key size
223 */
224 static char* lookup_algorithm(kernel_algorithm_t *list, int ikev2)
225 {
226 while (list->ikev2 != END_OF_LIST)
227 {
228 if (list->ikev2 == ikev2)
229 {
230 return list->name;
231 }
232 list++;
233 }
234 return NULL;
235 }
236
237 typedef struct route_entry_t route_entry_t;
238
239 /**
240 * installed routing entry
241 */
242 struct route_entry_t {
243 /** Name of the interface the route is bound to */
244 char *if_name;
245
246 /** Source ip of the route */
247 host_t *src_ip;
248
249 /** gateway for this route */
250 host_t *gateway;
251
252 /** Destination net */
253 chunk_t dst_net;
254
255 /** Destination net prefixlen */
256 u_int8_t prefixlen;
257 };
258
259 /**
260 * destroy an route_entry_t object
261 */
262 static void route_entry_destroy(route_entry_t *this)
263 {
264 free(this->if_name);
265 this->src_ip->destroy(this->src_ip);
266 DESTROY_IF(this->gateway);
267 chunk_free(&this->dst_net);
268 free(this);
269 }
270
271 typedef struct policy_entry_t policy_entry_t;
272
273 /**
274 * installed kernel policy.
275 */
276 struct policy_entry_t {
277
278 /** direction of this policy: in, out, forward */
279 u_int8_t direction;
280
281 /** parameters of installed policy */
282 struct xfrm_selector sel;
283
284 /** optional mark */
285 u_int32_t mark;
286
287 /** associated route installed for this policy */
288 route_entry_t *route;
289
290 /** by how many CHILD_SA's this policy is used */
291 u_int refcount;
292 };
293
294 /**
295 * Hash function for policy_entry_t objects
296 */
297 static u_int policy_hash(policy_entry_t *key)
298 {
299 chunk_t chunk = chunk_create((void*)&key->sel,
300 sizeof(struct xfrm_selector) + sizeof(u_int32_t));
301 return chunk_hash(chunk);
302 }
303
304 /**
305 * Equality function for policy_entry_t objects
306 */
307 static bool policy_equals(policy_entry_t *key, policy_entry_t *other_key)
308 {
309 return memeq(&key->sel, &other_key->sel,
310 sizeof(struct xfrm_selector) + sizeof(u_int32_t)) &&
311 key->direction == other_key->direction;
312 }
313
314 typedef struct private_kernel_netlink_ipsec_t private_kernel_netlink_ipsec_t;
315
316 /**
317 * Private variables and functions of kernel_netlink class.
318 */
319 struct private_kernel_netlink_ipsec_t {
320 /**
321 * Public part of the kernel_netlink_t object.
322 */
323 kernel_netlink_ipsec_t public;
324
325 /**
326 * mutex to lock access to various lists
327 */
328 mutex_t *mutex;
329
330 /**
331 * Hash table of installed policies (policy_entry_t)
332 */
333 hashtable_t *policies;
334
335 /**
336 * job receiving netlink events
337 */
338 callback_job_t *job;
339
340 /**
341 * Netlink xfrm socket (IPsec)
342 */
343 netlink_socket_t *socket_xfrm;
344
345 /**
346 * netlink xfrm socket to receive acquire and expire events
347 */
348 int socket_xfrm_events;
349
350 /**
351 * whether to install routes along policies
352 */
353 bool install_routes;
354
355 /**
356 * Size of the replay window, in packets
357 */
358 u_int32_t replay_window;
359
360 /**
361 * Size of the replay window bitmap, in bytes
362 */
363 u_int32_t replay_bmp;
364 };
365
366 /**
367 * convert the general ipsec mode to the one defined in xfrm.h
368 */
369 static u_int8_t mode2kernel(ipsec_mode_t mode)
370 {
371 switch (mode)
372 {
373 case MODE_TRANSPORT:
374 return XFRM_MODE_TRANSPORT;
375 case MODE_TUNNEL:
376 return XFRM_MODE_TUNNEL;
377 case MODE_BEET:
378 return XFRM_MODE_BEET;
379 default:
380 return mode;
381 }
382 }
383
384 /**
385 * convert a host_t to a struct xfrm_address
386 */
387 static void host2xfrm(host_t *host, xfrm_address_t *xfrm)
388 {
389 chunk_t chunk = host->get_address(host);
390 memcpy(xfrm, chunk.ptr, min(chunk.len, sizeof(xfrm_address_t)));
391 }
392
393 /**
394 * convert a struct xfrm_address to a host_t
395 */
396 static host_t* xfrm2host(int family, xfrm_address_t *xfrm, u_int16_t port)
397 {
398 chunk_t chunk;
399
400 switch (family)
401 {
402 case AF_INET:
403 chunk = chunk_create((u_char*)&xfrm->a4, sizeof(xfrm->a4));
404 break;
405 case AF_INET6:
406 chunk = chunk_create((u_char*)&xfrm->a6, sizeof(xfrm->a6));
407 break;
408 default:
409 return NULL;
410 }
411 return host_create_from_chunk(family, chunk, ntohs(port));
412 }
413
414 /**
415 * convert a traffic selector address range to subnet and its mask.
416 */
417 static void ts2subnet(traffic_selector_t* ts,
418 xfrm_address_t *net, u_int8_t *mask)
419 {
420 host_t *net_host;
421 chunk_t net_chunk;
422
423 ts->to_subnet(ts, &net_host, mask);
424 net_chunk = net_host->get_address(net_host);
425 memcpy(net, net_chunk.ptr, net_chunk.len);
426 net_host->destroy(net_host);
427 }
428
429 /**
430 * convert a traffic selector port range to port/portmask
431 */
432 static void ts2ports(traffic_selector_t* ts,
433 u_int16_t *port, u_int16_t *mask)
434 {
435 /* linux does not seem to accept complex portmasks. Only
436 * any or a specific port is allowed. We set to any, if we have
437 * a port range, or to a specific, if we have one port only.
438 */
439 u_int16_t from, to;
440
441 from = ts->get_from_port(ts);
442 to = ts->get_to_port(ts);
443
444 if (from == to)
445 {
446 *port = htons(from);
447 *mask = ~0;
448 }
449 else
450 {
451 *port = 0;
452 *mask = 0;
453 }
454 }
455
456 /**
457 * convert a pair of traffic_selectors to a xfrm_selector
458 */
459 static struct xfrm_selector ts2selector(traffic_selector_t *src,
460 traffic_selector_t *dst)
461 {
462 struct xfrm_selector sel;
463
464 memset(&sel, 0, sizeof(sel));
465 sel.family = (src->get_type(src) == TS_IPV4_ADDR_RANGE) ? AF_INET : AF_INET6;
466 /* src or dest proto may be "any" (0), use more restrictive one */
467 sel.proto = max(src->get_protocol(src), dst->get_protocol(dst));
468 ts2subnet(dst, &sel.daddr, &sel.prefixlen_d);
469 ts2subnet(src, &sel.saddr, &sel.prefixlen_s);
470 ts2ports(dst, &sel.dport, &sel.dport_mask);
471 ts2ports(src, &sel.sport, &sel.sport_mask);
472 sel.ifindex = 0;
473 sel.user = 0;
474
475 return sel;
476 }
477
478 /**
479 * convert a xfrm_selector to a src|dst traffic_selector
480 */
481 static traffic_selector_t* selector2ts(struct xfrm_selector *sel, bool src)
482 {
483 u_char *addr;
484 u_int8_t prefixlen;
485 u_int16_t port = 0;
486 host_t *host = NULL;
487
488 if (src)
489 {
490 addr = (u_char*)&sel->saddr;
491 prefixlen = sel->prefixlen_s;
492 if (sel->sport_mask)
493 {
494 port = htons(sel->sport);
495 }
496 }
497 else
498 {
499 addr = (u_char*)&sel->daddr;
500 prefixlen = sel->prefixlen_d;
501 if (sel->dport_mask)
502 {
503 port = htons(sel->dport);
504 }
505 }
506
507 /* The Linux 2.6 kernel does not set the selector's family field,
508 * so as a kludge we additionally test the prefix length.
509 */
510 if (sel->family == AF_INET || sel->prefixlen_s == 32)
511 {
512 host = host_create_from_chunk(AF_INET, chunk_create(addr, 4), 0);
513 }
514 else if (sel->family == AF_INET6 || sel->prefixlen_s == 128)
515 {
516 host = host_create_from_chunk(AF_INET6, chunk_create(addr, 16), 0);
517 }
518
519 if (host)
520 {
521 return traffic_selector_create_from_subnet(host, prefixlen,
522 sel->proto, port);
523 }
524 return NULL;
525 }
526
527 /**
528 * process a XFRM_MSG_ACQUIRE from kernel
529 */
530 static void process_acquire(private_kernel_netlink_ipsec_t *this, struct nlmsghdr *hdr)
531 {
532 u_int32_t reqid = 0;
533 int proto = 0;
534 traffic_selector_t *src_ts, *dst_ts;
535 struct xfrm_user_acquire *acquire;
536 struct rtattr *rta;
537 size_t rtasize;
538
539 acquire = (struct xfrm_user_acquire*)NLMSG_DATA(hdr);
540 rta = XFRM_RTA(hdr, struct xfrm_user_acquire);
541 rtasize = XFRM_PAYLOAD(hdr, struct xfrm_user_acquire);
542
543 DBG2(DBG_KNL, "received a XFRM_MSG_ACQUIRE");
544
545 while (RTA_OK(rta, rtasize))
546 {
547 DBG2(DBG_KNL, " %N", xfrm_attr_type_names, rta->rta_type);
548
549 if (rta->rta_type == XFRMA_TMPL)
550 {
551 struct xfrm_user_tmpl* tmpl;
552
553 tmpl = (struct xfrm_user_tmpl*)RTA_DATA(rta);
554 reqid = tmpl->reqid;
555 proto = tmpl->id.proto;
556 }
557 rta = RTA_NEXT(rta, rtasize);
558 }
559 switch (proto)
560 {
561 case 0:
562 case IPPROTO_ESP:
563 case IPPROTO_AH:
564 break;
565 default:
566 /* acquire for AH/ESP only, not for IPCOMP */
567 return;
568 }
569 src_ts = selector2ts(&acquire->sel, TRUE);
570 dst_ts = selector2ts(&acquire->sel, FALSE);
571
572 hydra->kernel_interface->acquire(hydra->kernel_interface, reqid, src_ts,
573 dst_ts);
574 }
575
576 /**
577 * process a XFRM_MSG_EXPIRE from kernel
578 */
579 static void process_expire(private_kernel_netlink_ipsec_t *this, struct nlmsghdr *hdr)
580 {
581 u_int8_t protocol;
582 u_int32_t spi, reqid;
583 struct xfrm_user_expire *expire;
584
585 expire = (struct xfrm_user_expire*)NLMSG_DATA(hdr);
586 protocol = expire->state.id.proto;
587 spi = expire->state.id.spi;
588 reqid = expire->state.reqid;
589
590 DBG2(DBG_KNL, "received a XFRM_MSG_EXPIRE");
591
592 if (protocol != IPPROTO_ESP && protocol != IPPROTO_AH)
593 {
594 DBG2(DBG_KNL, "ignoring XFRM_MSG_EXPIRE for SA with SPI %.8x and "
595 "reqid {%u} which is not a CHILD_SA", ntohl(spi), reqid);
596 return;
597 }
598
599 hydra->kernel_interface->expire(hydra->kernel_interface, reqid, protocol,
600 spi, expire->hard != 0);
601 }
602
603 /**
604 * process a XFRM_MSG_MIGRATE from kernel
605 */
606 static void process_migrate(private_kernel_netlink_ipsec_t *this, struct nlmsghdr *hdr)
607 {
608 traffic_selector_t *src_ts, *dst_ts;
609 host_t *local = NULL, *remote = NULL;
610 host_t *old_src = NULL, *old_dst = NULL;
611 host_t *new_src = NULL, *new_dst = NULL;
612 struct xfrm_userpolicy_id *policy_id;
613 struct rtattr *rta;
614 size_t rtasize;
615 u_int32_t reqid = 0;
616 policy_dir_t dir;
617
618 policy_id = (struct xfrm_userpolicy_id*)NLMSG_DATA(hdr);
619 rta = XFRM_RTA(hdr, struct xfrm_userpolicy_id);
620 rtasize = XFRM_PAYLOAD(hdr, struct xfrm_userpolicy_id);
621
622 DBG2(DBG_KNL, "received a XFRM_MSG_MIGRATE");
623
624 src_ts = selector2ts(&policy_id->sel, TRUE);
625 dst_ts = selector2ts(&policy_id->sel, FALSE);
626 dir = (policy_dir_t)policy_id->dir;
627
628 DBG2(DBG_KNL, " policy: %R === %R %N", src_ts, dst_ts, policy_dir_names);
629
630 while (RTA_OK(rta, rtasize))
631 {
632 DBG2(DBG_KNL, " %N", xfrm_attr_type_names, rta->rta_type);
633 if (rta->rta_type == XFRMA_KMADDRESS)
634 {
635 struct xfrm_user_kmaddress *kmaddress;
636
637 kmaddress = (struct xfrm_user_kmaddress*)RTA_DATA(rta);
638 local = xfrm2host(kmaddress->family, &kmaddress->local, 0);
639 remote = xfrm2host(kmaddress->family, &kmaddress->remote, 0);
640 DBG2(DBG_KNL, " kmaddress: %H...%H", local, remote);
641 }
642 else if (rta->rta_type == XFRMA_MIGRATE)
643 {
644 struct xfrm_user_migrate *migrate;
645
646 migrate = (struct xfrm_user_migrate*)RTA_DATA(rta);
647 old_src = xfrm2host(migrate->old_family, &migrate->old_saddr, 0);
648 old_dst = xfrm2host(migrate->old_family, &migrate->old_daddr, 0);
649 new_src = xfrm2host(migrate->new_family, &migrate->new_saddr, 0);
650 new_dst = xfrm2host(migrate->new_family, &migrate->new_daddr, 0);
651 reqid = migrate->reqid;
652 DBG2(DBG_KNL, " migrate %H...%H to %H...%H, reqid {%u}",
653 old_src, old_dst, new_src, new_dst, reqid);
654 DESTROY_IF(old_src);
655 DESTROY_IF(old_dst);
656 DESTROY_IF(new_src);
657 DESTROY_IF(new_dst);
658 }
659 rta = RTA_NEXT(rta, rtasize);
660 }
661
662 if (src_ts && dst_ts && local && remote)
663 {
664 hydra->kernel_interface->migrate(hydra->kernel_interface, reqid,
665 src_ts, dst_ts, dir, local, remote);
666 }
667 else
668 {
669 DESTROY_IF(src_ts);
670 DESTROY_IF(dst_ts);
671 DESTROY_IF(local);
672 DESTROY_IF(remote);
673 }
674 }
675
676 /**
677 * process a XFRM_MSG_MAPPING from kernel
678 */
679 static void process_mapping(private_kernel_netlink_ipsec_t *this,
680 struct nlmsghdr *hdr)
681 {
682 u_int32_t spi, reqid;
683 struct xfrm_user_mapping *mapping;
684 host_t *host;
685
686 mapping = (struct xfrm_user_mapping*)NLMSG_DATA(hdr);
687 spi = mapping->id.spi;
688 reqid = mapping->reqid;
689
690 DBG2(DBG_KNL, "received a XFRM_MSG_MAPPING");
691
692 if (mapping->id.proto == IPPROTO_ESP)
693 {
694 host = xfrm2host(mapping->id.family, &mapping->new_saddr,
695 mapping->new_sport);
696 if (host)
697 {
698 hydra->kernel_interface->mapping(hydra->kernel_interface, reqid,
699 spi, host);
700 }
701 }
702 }
703
704 /**
705 * Receives events from kernel
706 */
707 static job_requeue_t receive_events(private_kernel_netlink_ipsec_t *this)
708 {
709 char response[1024];
710 struct nlmsghdr *hdr = (struct nlmsghdr*)response;
711 struct sockaddr_nl addr;
712 socklen_t addr_len = sizeof(addr);
713 int len;
714 bool oldstate;
715
716 oldstate = thread_cancelability(TRUE);
717 len = recvfrom(this->socket_xfrm_events, response, sizeof(response), 0,
718 (struct sockaddr*)&addr, &addr_len);
719 thread_cancelability(oldstate);
720
721 if (len < 0)
722 {
723 switch (errno)
724 {
725 case EINTR:
726 /* interrupted, try again */
727 return JOB_REQUEUE_DIRECT;
728 case EAGAIN:
729 /* no data ready, select again */
730 return JOB_REQUEUE_DIRECT;
731 default:
732 DBG1(DBG_KNL, "unable to receive from xfrm event socket");
733 sleep(1);
734 return JOB_REQUEUE_FAIR;
735 }
736 }
737
738 if (addr.nl_pid != 0)
739 { /* not from kernel. not interested, try another one */
740 return JOB_REQUEUE_DIRECT;
741 }
742
743 while (NLMSG_OK(hdr, len))
744 {
745 switch (hdr->nlmsg_type)
746 {
747 case XFRM_MSG_ACQUIRE:
748 process_acquire(this, hdr);
749 break;
750 case XFRM_MSG_EXPIRE:
751 process_expire(this, hdr);
752 break;
753 case XFRM_MSG_MIGRATE:
754 process_migrate(this, hdr);
755 break;
756 case XFRM_MSG_MAPPING:
757 process_mapping(this, hdr);
758 break;
759 default:
760 DBG1(DBG_KNL, "received unknown event from xfrm event socket: %d", hdr->nlmsg_type);
761 break;
762 }
763 hdr = NLMSG_NEXT(hdr, len);
764 }
765 return JOB_REQUEUE_DIRECT;
766 }
767
768 /**
769 * Get an SPI for a specific protocol from the kernel.
770 */
771 static status_t get_spi_internal(private_kernel_netlink_ipsec_t *this,
772 host_t *src, host_t *dst, u_int8_t proto, u_int32_t min, u_int32_t max,
773 u_int32_t reqid, u_int32_t *spi)
774 {
775 netlink_buf_t request;
776 struct nlmsghdr *hdr, *out;
777 struct xfrm_userspi_info *userspi;
778 u_int32_t received_spi = 0;
779 size_t len;
780
781 memset(&request, 0, sizeof(request));
782
783 hdr = (struct nlmsghdr*)request;
784 hdr->nlmsg_flags = NLM_F_REQUEST;
785 hdr->nlmsg_type = XFRM_MSG_ALLOCSPI;
786 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_userspi_info));
787
788 userspi = (struct xfrm_userspi_info*)NLMSG_DATA(hdr);
789 host2xfrm(src, &userspi->info.saddr);
790 host2xfrm(dst, &userspi->info.id.daddr);
791 userspi->info.id.proto = proto;
792 userspi->info.mode = XFRM_MODE_TUNNEL;
793 userspi->info.reqid = reqid;
794 userspi->info.family = src->get_family(src);
795 userspi->min = min;
796 userspi->max = max;
797
798 if (this->socket_xfrm->send(this->socket_xfrm, hdr, &out, &len) == SUCCESS)
799 {
800 hdr = out;
801 while (NLMSG_OK(hdr, len))
802 {
803 switch (hdr->nlmsg_type)
804 {
805 case XFRM_MSG_NEWSA:
806 {
807 struct xfrm_usersa_info* usersa = NLMSG_DATA(hdr);
808 received_spi = usersa->id.spi;
809 break;
810 }
811 case NLMSG_ERROR:
812 {
813 struct nlmsgerr *err = NLMSG_DATA(hdr);
814
815 DBG1(DBG_KNL, "allocating SPI failed: %s (%d)",
816 strerror(-err->error), -err->error);
817 break;
818 }
819 default:
820 hdr = NLMSG_NEXT(hdr, len);
821 continue;
822 case NLMSG_DONE:
823 break;
824 }
825 break;
826 }
827 free(out);
828 }
829
830 if (received_spi == 0)
831 {
832 return FAILED;
833 }
834
835 *spi = received_spi;
836 return SUCCESS;
837 }
838
839 METHOD(kernel_ipsec_t, get_spi, status_t,
840 private_kernel_netlink_ipsec_t *this, host_t *src, host_t *dst,
841 u_int8_t protocol, u_int32_t reqid, u_int32_t *spi)
842 {
843 DBG2(DBG_KNL, "getting SPI for reqid {%u}", reqid);
844
845 if (get_spi_internal(this, src, dst, protocol,
846 0xc0000000, 0xcFFFFFFF, reqid, spi) != SUCCESS)
847 {
848 DBG1(DBG_KNL, "unable to get SPI for reqid {%u}", reqid);
849 return FAILED;
850 }
851
852 DBG2(DBG_KNL, "got SPI %.8x for reqid {%u}", ntohl(*spi), reqid);
853
854 return SUCCESS;
855 }
856
857 METHOD(kernel_ipsec_t, get_cpi, status_t,
858 private_kernel_netlink_ipsec_t *this, host_t *src, host_t *dst,
859 u_int32_t reqid, u_int16_t *cpi)
860 {
861 u_int32_t received_spi = 0;
862
863 DBG2(DBG_KNL, "getting CPI for reqid {%u}", reqid);
864
865 if (get_spi_internal(this, src, dst,
866 IPPROTO_COMP, 0x100, 0xEFFF, reqid, &received_spi) != SUCCESS)
867 {
868 DBG1(DBG_KNL, "unable to get CPI for reqid {%u}", reqid);
869 return FAILED;
870 }
871
872 *cpi = htons((u_int16_t)ntohl(received_spi));
873
874 DBG2(DBG_KNL, "got CPI %.4x for reqid {%u}", ntohs(*cpi), reqid);
875
876 return SUCCESS;
877 }
878
879 METHOD(kernel_ipsec_t, add_sa, status_t,
880 private_kernel_netlink_ipsec_t *this, host_t *src, host_t *dst,
881 u_int32_t spi, u_int8_t protocol, u_int32_t reqid, mark_t mark,
882 u_int32_t tfc, lifetime_cfg_t *lifetime, u_int16_t enc_alg, chunk_t enc_key,
883 u_int16_t int_alg, chunk_t int_key, ipsec_mode_t mode, u_int16_t ipcomp,
884 u_int16_t cpi, bool encap, bool esn, bool inbound,
885 traffic_selector_t* src_ts, traffic_selector_t* dst_ts)
886 {
887 netlink_buf_t request;
888 char *alg_name;
889 struct nlmsghdr *hdr;
890 struct xfrm_usersa_info *sa;
891 u_int16_t icv_size = 64;
892 status_t status = FAILED;
893
894 /* if IPComp is used, we install an additional IPComp SA. if the cpi is 0
895 * we are in the recursive call below */
896 if (ipcomp != IPCOMP_NONE && cpi != 0)
897 {
898 lifetime_cfg_t lft = {{0,0,0},{0,0,0},{0,0,0}};
899 add_sa(this, src, dst, htonl(ntohs(cpi)), IPPROTO_COMP, reqid, mark, tfc,
900 &lft, ENCR_UNDEFINED, chunk_empty, AUTH_UNDEFINED, chunk_empty,
901 mode, ipcomp, 0, FALSE, FALSE, inbound, NULL, NULL);
902 ipcomp = IPCOMP_NONE;
903 /* use transport mode ESP SA, IPComp uses tunnel mode */
904 mode = MODE_TRANSPORT;
905 }
906
907 memset(&request, 0, sizeof(request));
908
909 if (mark.value)
910 {
911 DBG2(DBG_KNL, "adding SAD entry with SPI %.8x and reqid {%u} "
912 "(mark %u/0x%8x)", ntohl(spi), reqid, mark.value, mark.mask);
913 }
914 else
915 {
916 DBG2(DBG_KNL, "adding SAD entry with SPI %.8x and reqid {%u}",
917 ntohl(spi), reqid);
918 }
919 hdr = (struct nlmsghdr*)request;
920 hdr->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
921 hdr->nlmsg_type = inbound ? XFRM_MSG_UPDSA : XFRM_MSG_NEWSA;
922 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_usersa_info));
923
924 sa = (struct xfrm_usersa_info*)NLMSG_DATA(hdr);
925 host2xfrm(src, &sa->saddr);
926 host2xfrm(dst, &sa->id.daddr);
927 sa->id.spi = spi;
928 sa->id.proto = protocol;
929 sa->family = src->get_family(src);
930 sa->mode = mode2kernel(mode);
931 switch (mode)
932 {
933 case MODE_TUNNEL:
934 sa->flags |= XFRM_STATE_AF_UNSPEC;
935 break;
936 case MODE_BEET:
937 case MODE_TRANSPORT:
938 if(src_ts && dst_ts)
939 {
940 sa->sel = ts2selector(src_ts, dst_ts);
941 }
942 break;
943 default:
944 break;
945 }
946
947 sa->reqid = reqid;
948 sa->lft.soft_byte_limit = XFRM_LIMIT(lifetime->bytes.rekey);
949 sa->lft.hard_byte_limit = XFRM_LIMIT(lifetime->bytes.life);
950 sa->lft.soft_packet_limit = XFRM_LIMIT(lifetime->packets.rekey);
951 sa->lft.hard_packet_limit = XFRM_LIMIT(lifetime->packets.life);
952 /* we use lifetimes since added, not since used */
953 sa->lft.soft_add_expires_seconds = lifetime->time.rekey;
954 sa->lft.hard_add_expires_seconds = lifetime->time.life;
955 sa->lft.soft_use_expires_seconds = 0;
956 sa->lft.hard_use_expires_seconds = 0;
957
958 struct rtattr *rthdr = XFRM_RTA(hdr, struct xfrm_usersa_info);
959
960 switch (enc_alg)
961 {
962 case ENCR_UNDEFINED:
963 /* no encryption */
964 break;
965 case ENCR_AES_CCM_ICV16:
966 case ENCR_AES_GCM_ICV16:
967 case ENCR_NULL_AUTH_AES_GMAC:
968 case ENCR_CAMELLIA_CCM_ICV16:
969 icv_size += 32;
970 /* FALL */
971 case ENCR_AES_CCM_ICV12:
972 case ENCR_AES_GCM_ICV12:
973 case ENCR_CAMELLIA_CCM_ICV12:
974 icv_size += 32;
975 /* FALL */
976 case ENCR_AES_CCM_ICV8:
977 case ENCR_AES_GCM_ICV8:
978 case ENCR_CAMELLIA_CCM_ICV8:
979 {
980 struct xfrm_algo_aead *algo;
981
982 alg_name = lookup_algorithm(encryption_algs, enc_alg);
983 if (alg_name == NULL)
984 {
985 DBG1(DBG_KNL, "algorithm %N not supported by kernel!",
986 encryption_algorithm_names, enc_alg);
987 goto failed;
988 }
989 DBG2(DBG_KNL, " using encryption algorithm %N with key size %d",
990 encryption_algorithm_names, enc_alg, enc_key.len * 8);
991
992 rthdr->rta_type = XFRMA_ALG_AEAD;
993 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_algo_aead) + enc_key.len);
994 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
995 if (hdr->nlmsg_len > sizeof(request))
996 {
997 goto failed;
998 }
999
1000 algo = (struct xfrm_algo_aead*)RTA_DATA(rthdr);
1001 algo->alg_key_len = enc_key.len * 8;
1002 algo->alg_icv_len = icv_size;
1003 strcpy(algo->alg_name, alg_name);
1004 memcpy(algo->alg_key, enc_key.ptr, enc_key.len);
1005
1006 rthdr = XFRM_RTA_NEXT(rthdr);
1007 break;
1008 }
1009 default:
1010 {
1011 struct xfrm_algo *algo;
1012
1013 alg_name = lookup_algorithm(encryption_algs, enc_alg);
1014 if (alg_name == NULL)
1015 {
1016 DBG1(DBG_KNL, "algorithm %N not supported by kernel!",
1017 encryption_algorithm_names, enc_alg);
1018 goto failed;
1019 }
1020 DBG2(DBG_KNL, " using encryption algorithm %N with key size %d",
1021 encryption_algorithm_names, enc_alg, enc_key.len * 8);
1022
1023 rthdr->rta_type = XFRMA_ALG_CRYPT;
1024 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_algo) + enc_key.len);
1025 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1026 if (hdr->nlmsg_len > sizeof(request))
1027 {
1028 goto failed;
1029 }
1030
1031 algo = (struct xfrm_algo*)RTA_DATA(rthdr);
1032 algo->alg_key_len = enc_key.len * 8;
1033 strcpy(algo->alg_name, alg_name);
1034 memcpy(algo->alg_key, enc_key.ptr, enc_key.len);
1035
1036 rthdr = XFRM_RTA_NEXT(rthdr);
1037 }
1038 }
1039
1040 if (int_alg != AUTH_UNDEFINED)
1041 {
1042 alg_name = lookup_algorithm(integrity_algs, int_alg);
1043 if (alg_name == NULL)
1044 {
1045 DBG1(DBG_KNL, "algorithm %N not supported by kernel!",
1046 integrity_algorithm_names, int_alg);
1047 goto failed;
1048 }
1049 DBG2(DBG_KNL, " using integrity algorithm %N with key size %d",
1050 integrity_algorithm_names, int_alg, int_key.len * 8);
1051
1052 if (int_alg == AUTH_HMAC_SHA2_256_128)
1053 {
1054 struct xfrm_algo_auth* algo;
1055
1056 /* the kernel uses SHA256 with 96 bit truncation by default,
1057 * use specified truncation size supported by newer kernels */
1058 rthdr->rta_type = XFRMA_ALG_AUTH_TRUNC;
1059 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_algo_auth) + int_key.len);
1060
1061 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1062 if (hdr->nlmsg_len > sizeof(request))
1063 {
1064 goto failed;
1065 }
1066
1067 algo = (struct xfrm_algo_auth*)RTA_DATA(rthdr);
1068 algo->alg_key_len = int_key.len * 8;
1069 algo->alg_trunc_len = 128;
1070 strcpy(algo->alg_name, alg_name);
1071 memcpy(algo->alg_key, int_key.ptr, int_key.len);
1072 }
1073 else
1074 {
1075 struct xfrm_algo* algo;
1076
1077 rthdr->rta_type = XFRMA_ALG_AUTH;
1078 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_algo) + int_key.len);
1079
1080 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1081 if (hdr->nlmsg_len > sizeof(request))
1082 {
1083 goto failed;
1084 }
1085
1086 algo = (struct xfrm_algo*)RTA_DATA(rthdr);
1087 algo->alg_key_len = int_key.len * 8;
1088 strcpy(algo->alg_name, alg_name);
1089 memcpy(algo->alg_key, int_key.ptr, int_key.len);
1090 }
1091 rthdr = XFRM_RTA_NEXT(rthdr);
1092 }
1093
1094 if (ipcomp != IPCOMP_NONE)
1095 {
1096 rthdr->rta_type = XFRMA_ALG_COMP;
1097 alg_name = lookup_algorithm(compression_algs, ipcomp);
1098 if (alg_name == NULL)
1099 {
1100 DBG1(DBG_KNL, "algorithm %N not supported by kernel!",
1101 ipcomp_transform_names, ipcomp);
1102 goto failed;
1103 }
1104 DBG2(DBG_KNL, " using compression algorithm %N",
1105 ipcomp_transform_names, ipcomp);
1106
1107 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_algo));
1108 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1109 if (hdr->nlmsg_len > sizeof(request))
1110 {
1111 goto failed;
1112 }
1113
1114 struct xfrm_algo* algo = (struct xfrm_algo*)RTA_DATA(rthdr);
1115 algo->alg_key_len = 0;
1116 strcpy(algo->alg_name, alg_name);
1117
1118 rthdr = XFRM_RTA_NEXT(rthdr);
1119 }
1120
1121 if (encap)
1122 {
1123 struct xfrm_encap_tmpl *tmpl;
1124
1125 rthdr->rta_type = XFRMA_ENCAP;
1126 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_encap_tmpl));
1127
1128 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1129 if (hdr->nlmsg_len > sizeof(request))
1130 {
1131 goto failed;
1132 }
1133
1134 tmpl = (struct xfrm_encap_tmpl*)RTA_DATA(rthdr);
1135 tmpl->encap_type = UDP_ENCAP_ESPINUDP;
1136 tmpl->encap_sport = htons(src->get_port(src));
1137 tmpl->encap_dport = htons(dst->get_port(dst));
1138 memset(&tmpl->encap_oa, 0, sizeof (xfrm_address_t));
1139 /* encap_oa could probably be derived from the
1140 * traffic selectors [rfc4306, p39]. In the netlink kernel implementation
1141 * pluto does the same as we do here but it uses encap_oa in the
1142 * pfkey implementation. BUT as /usr/src/linux/net/key/af_key.c indicates
1143 * the kernel ignores it anyway
1144 * -> does that mean that NAT-T encap doesn't work in transport mode?
1145 * No. The reason the kernel ignores NAT-OA is that it recomputes
1146 * (or, rather, just ignores) the checksum. If packets pass
1147 * the IPsec checks it marks them "checksum ok" so OA isn't needed. */
1148 rthdr = XFRM_RTA_NEXT(rthdr);
1149 }
1150
1151 if (mark.value)
1152 {
1153 struct xfrm_mark *mrk;
1154
1155 rthdr->rta_type = XFRMA_MARK;
1156 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_mark));
1157
1158 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1159 if (hdr->nlmsg_len > sizeof(request))
1160 {
1161 goto failed;
1162 }
1163
1164 mrk = (struct xfrm_mark*)RTA_DATA(rthdr);
1165 mrk->v = mark.value;
1166 mrk->m = mark.mask;
1167 rthdr = XFRM_RTA_NEXT(rthdr);
1168 }
1169
1170 if (tfc)
1171 {
1172 u_int32_t *tfcpad;
1173
1174 rthdr->rta_type = XFRMA_TFCPAD;
1175 rthdr->rta_len = RTA_LENGTH(sizeof(u_int32_t));
1176
1177 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1178 if (hdr->nlmsg_len > sizeof(request))
1179 {
1180 goto failed;
1181 }
1182
1183 tfcpad = (u_int32_t*)RTA_DATA(rthdr);
1184 *tfcpad = tfc;
1185 rthdr = XFRM_RTA_NEXT(rthdr);
1186 }
1187
1188 if (protocol != IPPROTO_COMP)
1189 {
1190 if (esn || this->replay_window > DEFAULT_REPLAY_WINDOW)
1191 {
1192 /* for ESN or larger replay windows we need the new
1193 * XFRMA_REPLAY_ESN_VAL attribute to configure a bitmap */
1194 struct xfrm_replay_state_esn *replay;
1195
1196 rthdr->rta_type = XFRMA_REPLAY_ESN_VAL;
1197 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_replay_state_esn) +
1198 (this->replay_window + 7) / 8);
1199
1200 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1201 if (hdr->nlmsg_len > sizeof(request))
1202 {
1203 goto failed;
1204 }
1205
1206 replay = (struct xfrm_replay_state_esn*)RTA_DATA(rthdr);
1207 /* bmp_len contains number uf __u32's */
1208 replay->bmp_len = this->replay_bmp;
1209 replay->replay_window = this->replay_window;
1210
1211 rthdr = XFRM_RTA_NEXT(rthdr);
1212 if (esn)
1213 {
1214 sa->flags |= XFRM_STATE_ESN;
1215 }
1216 }
1217 else
1218 {
1219 sa->replay_window = DEFAULT_REPLAY_WINDOW;
1220 }
1221 }
1222
1223 if (this->socket_xfrm->send_ack(this->socket_xfrm, hdr) != SUCCESS)
1224 {
1225 if (mark.value)
1226 {
1227 DBG1(DBG_KNL, "unable to add SAD entry with SPI %.8x "
1228 "(mark %u/0x%8x)", ntohl(spi), mark.value, mark.mask);
1229 }
1230 else
1231 {
1232 DBG1(DBG_KNL, "unable to add SAD entry with SPI %.8x", ntohl(spi));
1233 }
1234 goto failed;
1235 }
1236
1237 status = SUCCESS;
1238
1239 failed:
1240 memwipe(request, sizeof(request));
1241 return status;
1242 }
1243
1244 /**
1245 * Get the ESN replay state (i.e. sequence numbers) of an SA.
1246 *
1247 * Allocates into one the replay state structure we get from the kernel.
1248 */
1249 static void get_replay_state(private_kernel_netlink_ipsec_t *this,
1250 u_int32_t spi, u_int8_t protocol, host_t *dst,
1251 struct xfrm_replay_state_esn **replay_esn,
1252 struct xfrm_replay_state **replay)
1253 {
1254 netlink_buf_t request;
1255 struct nlmsghdr *hdr, *out = NULL;
1256 struct xfrm_aevent_id *out_aevent = NULL, *aevent_id;
1257 size_t len;
1258 struct rtattr *rta;
1259 size_t rtasize;
1260
1261 memset(&request, 0, sizeof(request));
1262
1263 DBG2(DBG_KNL, "querying replay state from SAD entry with SPI %.8x",
1264 ntohl(spi));
1265
1266 hdr = (struct nlmsghdr*)request;
1267 hdr->nlmsg_flags = NLM_F_REQUEST;
1268 hdr->nlmsg_type = XFRM_MSG_GETAE;
1269 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_aevent_id));
1270
1271 aevent_id = (struct xfrm_aevent_id*)NLMSG_DATA(hdr);
1272 aevent_id->flags = XFRM_AE_RVAL;
1273
1274 host2xfrm(dst, &aevent_id->sa_id.daddr);
1275 aevent_id->sa_id.spi = spi;
1276 aevent_id->sa_id.proto = protocol;
1277 aevent_id->sa_id.family = dst->get_family(dst);
1278
1279 if (this->socket_xfrm->send(this->socket_xfrm, hdr, &out, &len) == SUCCESS)
1280 {
1281 hdr = out;
1282 while (NLMSG_OK(hdr, len))
1283 {
1284 switch (hdr->nlmsg_type)
1285 {
1286 case XFRM_MSG_NEWAE:
1287 {
1288 out_aevent = NLMSG_DATA(hdr);
1289 break;
1290 }
1291 case NLMSG_ERROR:
1292 {
1293 struct nlmsgerr *err = NLMSG_DATA(hdr);
1294 DBG1(DBG_KNL, "querying replay state from SAD entry failed: %s (%d)",
1295 strerror(-err->error), -err->error);
1296 break;
1297 }
1298 default:
1299 hdr = NLMSG_NEXT(hdr, len);
1300 continue;
1301 case NLMSG_DONE:
1302 break;
1303 }
1304 break;
1305 }
1306 }
1307
1308 if (out_aevent)
1309 {
1310 rta = XFRM_RTA(out, struct xfrm_aevent_id);
1311 rtasize = XFRM_PAYLOAD(out, struct xfrm_aevent_id);
1312 while (RTA_OK(rta, rtasize))
1313 {
1314 if (rta->rta_type == XFRMA_REPLAY_VAL &&
1315 RTA_PAYLOAD(rta) == sizeof(**replay))
1316 {
1317 *replay = malloc(RTA_PAYLOAD(rta));
1318 memcpy(*replay, RTA_DATA(rta), RTA_PAYLOAD(rta));
1319 break;
1320 }
1321 if (rta->rta_type == XFRMA_REPLAY_ESN_VAL &&
1322 RTA_PAYLOAD(rta) >= sizeof(**replay_esn) + this->replay_bmp)
1323 {
1324 *replay_esn = malloc(RTA_PAYLOAD(rta));
1325 memcpy(*replay_esn, RTA_DATA(rta), RTA_PAYLOAD(rta));
1326 break;
1327 }
1328 rta = RTA_NEXT(rta, rtasize);
1329 }
1330 }
1331 free(out);
1332 }
1333
1334 METHOD(kernel_ipsec_t, query_sa, status_t,
1335 private_kernel_netlink_ipsec_t *this, host_t *src, host_t *dst,
1336 u_int32_t spi, u_int8_t protocol, mark_t mark, u_int64_t *bytes)
1337 {
1338 netlink_buf_t request;
1339 struct nlmsghdr *out = NULL, *hdr;
1340 struct xfrm_usersa_id *sa_id;
1341 struct xfrm_usersa_info *sa = NULL;
1342 status_t status = FAILED;
1343 size_t len;
1344
1345 memset(&request, 0, sizeof(request));
1346
1347 if (mark.value)
1348 {
1349 DBG2(DBG_KNL, "querying SAD entry with SPI %.8x (mark %u/0x%8x)",
1350 ntohl(spi), mark.value, mark.mask);
1351 }
1352 else
1353 {
1354 DBG2(DBG_KNL, "querying SAD entry with SPI %.8x", ntohl(spi));
1355 }
1356 hdr = (struct nlmsghdr*)request;
1357 hdr->nlmsg_flags = NLM_F_REQUEST;
1358 hdr->nlmsg_type = XFRM_MSG_GETSA;
1359 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_usersa_id));
1360
1361 sa_id = (struct xfrm_usersa_id*)NLMSG_DATA(hdr);
1362 host2xfrm(dst, &sa_id->daddr);
1363 sa_id->spi = spi;
1364 sa_id->proto = protocol;
1365 sa_id->family = dst->get_family(dst);
1366
1367 if (mark.value)
1368 {
1369 struct xfrm_mark *mrk;
1370 struct rtattr *rthdr = XFRM_RTA(hdr, struct xfrm_usersa_id);
1371
1372 rthdr->rta_type = XFRMA_MARK;
1373 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_mark));
1374 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1375 if (hdr->nlmsg_len > sizeof(request))
1376 {
1377 return FAILED;
1378 }
1379
1380 mrk = (struct xfrm_mark*)RTA_DATA(rthdr);
1381 mrk->v = mark.value;
1382 mrk->m = mark.mask;
1383 }
1384
1385 if (this->socket_xfrm->send(this->socket_xfrm, hdr, &out, &len) == SUCCESS)
1386 {
1387 hdr = out;
1388 while (NLMSG_OK(hdr, len))
1389 {
1390 switch (hdr->nlmsg_type)
1391 {
1392 case XFRM_MSG_NEWSA:
1393 {
1394 sa = (struct xfrm_usersa_info*)NLMSG_DATA(hdr);
1395 break;
1396 }
1397 case NLMSG_ERROR:
1398 {
1399 struct nlmsgerr *err = NLMSG_DATA(hdr);
1400
1401 if (mark.value)
1402 {
1403 DBG1(DBG_KNL, "querying SAD entry with SPI %.8x "
1404 "(mark %u/0x%8x) failed: %s (%d)",
1405 ntohl(spi), mark.value, mark.mask,
1406 strerror(-err->error), -err->error);
1407 }
1408 else
1409 {
1410 DBG1(DBG_KNL, "querying SAD entry with SPI %.8x "
1411 "failed: %s (%d)", ntohl(spi),
1412 strerror(-err->error), -err->error);
1413 }
1414 break;
1415 }
1416 default:
1417 hdr = NLMSG_NEXT(hdr, len);
1418 continue;
1419 case NLMSG_DONE:
1420 break;
1421 }
1422 break;
1423 }
1424 }
1425
1426 if (sa == NULL)
1427 {
1428 DBG2(DBG_KNL, "unable to query SAD entry with SPI %.8x", ntohl(spi));
1429 }
1430 else
1431 {
1432 *bytes = sa->curlft.bytes;
1433 status = SUCCESS;
1434 }
1435 memwipe(out, len);
1436 free(out);
1437 return status;
1438 }
1439
1440 METHOD(kernel_ipsec_t, del_sa, status_t,
1441 private_kernel_netlink_ipsec_t *this, host_t *src, host_t *dst,
1442 u_int32_t spi, u_int8_t protocol, u_int16_t cpi, mark_t mark)
1443 {
1444 netlink_buf_t request;
1445 struct nlmsghdr *hdr;
1446 struct xfrm_usersa_id *sa_id;
1447
1448 /* if IPComp was used, we first delete the additional IPComp SA */
1449 if (cpi)
1450 {
1451 del_sa(this, src, dst, htonl(ntohs(cpi)), IPPROTO_COMP, 0, mark);
1452 }
1453
1454 memset(&request, 0, sizeof(request));
1455
1456 if (mark.value)
1457 {
1458 DBG2(DBG_KNL, "deleting SAD entry with SPI %.8x (mark %u/0x%8x)",
1459 ntohl(spi), mark.value, mark.mask);
1460 }
1461 else
1462 {
1463 DBG2(DBG_KNL, "deleting SAD entry with SPI %.8x", ntohl(spi));
1464 }
1465 hdr = (struct nlmsghdr*)request;
1466 hdr->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
1467 hdr->nlmsg_type = XFRM_MSG_DELSA;
1468 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_usersa_id));
1469
1470 sa_id = (struct xfrm_usersa_id*)NLMSG_DATA(hdr);
1471 host2xfrm(dst, &sa_id->daddr);
1472 sa_id->spi = spi;
1473 sa_id->proto = protocol;
1474 sa_id->family = dst->get_family(dst);
1475
1476 if (mark.value)
1477 {
1478 struct xfrm_mark *mrk;
1479 struct rtattr *rthdr = XFRM_RTA(hdr, struct xfrm_usersa_id);
1480
1481 rthdr->rta_type = XFRMA_MARK;
1482 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_mark));
1483 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1484 if (hdr->nlmsg_len > sizeof(request))
1485 {
1486 return FAILED;
1487 }
1488
1489 mrk = (struct xfrm_mark*)RTA_DATA(rthdr);
1490 mrk->v = mark.value;
1491 mrk->m = mark.mask;
1492 }
1493
1494 if (this->socket_xfrm->send_ack(this->socket_xfrm, hdr) != SUCCESS)
1495 {
1496 if (mark.value)
1497 {
1498 DBG1(DBG_KNL, "unable to delete SAD entry with SPI %.8x "
1499 "(mark %u/0x%8x)", ntohl(spi), mark.value, mark.mask);
1500 }
1501 else
1502 {
1503 DBG1(DBG_KNL, "unable to delete SAD entry with SPI %.8x", ntohl(spi));
1504 }
1505 return FAILED;
1506 }
1507 if (mark.value)
1508 {
1509 DBG2(DBG_KNL, "deleted SAD entry with SPI %.8x (mark %u/0x%8x)",
1510 ntohl(spi), mark.value, mark.mask);
1511 }
1512 else
1513 {
1514 DBG2(DBG_KNL, "deleted SAD entry with SPI %.8x", ntohl(spi));
1515 }
1516 return SUCCESS;
1517 }
1518
1519 METHOD(kernel_ipsec_t, update_sa, status_t,
1520 private_kernel_netlink_ipsec_t *this, u_int32_t spi, u_int8_t protocol,
1521 u_int16_t cpi, host_t *src, host_t *dst, host_t *new_src, host_t *new_dst,
1522 bool old_encap, bool new_encap, mark_t mark)
1523 {
1524 netlink_buf_t request;
1525 u_char *pos;
1526 struct nlmsghdr *hdr, *out = NULL;
1527 struct xfrm_usersa_id *sa_id;
1528 struct xfrm_usersa_info *out_sa = NULL, *sa;
1529 size_t len;
1530 struct rtattr *rta;
1531 size_t rtasize;
1532 struct xfrm_encap_tmpl* tmpl = NULL;
1533 struct xfrm_replay_state *replay = NULL;
1534 struct xfrm_replay_state_esn *replay_esn = NULL;
1535 status_t status = FAILED;
1536
1537 /* if IPComp is used, we first update the IPComp SA */
1538 if (cpi)
1539 {
1540 update_sa(this, htonl(ntohs(cpi)), IPPROTO_COMP, 0,
1541 src, dst, new_src, new_dst, FALSE, FALSE, mark);
1542 }
1543
1544 memset(&request, 0, sizeof(request));
1545
1546 DBG2(DBG_KNL, "querying SAD entry with SPI %.8x for update", ntohl(spi));
1547
1548 /* query the existing SA first */
1549 hdr = (struct nlmsghdr*)request;
1550 hdr->nlmsg_flags = NLM_F_REQUEST;
1551 hdr->nlmsg_type = XFRM_MSG_GETSA;
1552 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_usersa_id));
1553
1554 sa_id = (struct xfrm_usersa_id*)NLMSG_DATA(hdr);
1555 host2xfrm(dst, &sa_id->daddr);
1556 sa_id->spi = spi;
1557 sa_id->proto = protocol;
1558 sa_id->family = dst->get_family(dst);
1559
1560 if (this->socket_xfrm->send(this->socket_xfrm, hdr, &out, &len) == SUCCESS)
1561 {
1562 hdr = out;
1563 while (NLMSG_OK(hdr, len))
1564 {
1565 switch (hdr->nlmsg_type)
1566 {
1567 case XFRM_MSG_NEWSA:
1568 {
1569 out_sa = NLMSG_DATA(hdr);
1570 break;
1571 }
1572 case NLMSG_ERROR:
1573 {
1574 struct nlmsgerr *err = NLMSG_DATA(hdr);
1575 DBG1(DBG_KNL, "querying SAD entry failed: %s (%d)",
1576 strerror(-err->error), -err->error);
1577 break;
1578 }
1579 default:
1580 hdr = NLMSG_NEXT(hdr, len);
1581 continue;
1582 case NLMSG_DONE:
1583 break;
1584 }
1585 break;
1586 }
1587 }
1588 if (out_sa == NULL)
1589 {
1590 DBG1(DBG_KNL, "unable to update SAD entry with SPI %.8x", ntohl(spi));
1591 goto failed;
1592 }
1593
1594 get_replay_state(this, spi, protocol, dst, &replay_esn, &replay);
1595
1596 /* delete the old SA (without affecting the IPComp SA) */
1597 if (del_sa(this, src, dst, spi, protocol, 0, mark) != SUCCESS)
1598 {
1599 DBG1(DBG_KNL, "unable to delete old SAD entry with SPI %.8x", ntohl(spi));
1600 goto failed;
1601 }
1602
1603 DBG2(DBG_KNL, "updating SAD entry with SPI %.8x from %#H..%#H to %#H..%#H",
1604 ntohl(spi), src, dst, new_src, new_dst);
1605 /* copy over the SA from out to request */
1606 hdr = (struct nlmsghdr*)request;
1607 memcpy(hdr, out, min(out->nlmsg_len, sizeof(request)));
1608 hdr->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
1609 hdr->nlmsg_type = XFRM_MSG_NEWSA;
1610 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_usersa_info));
1611 sa = NLMSG_DATA(hdr);
1612 sa->family = new_dst->get_family(new_dst);
1613
1614 if (!src->ip_equals(src, new_src))
1615 {
1616 host2xfrm(new_src, &sa->saddr);
1617 }
1618 if (!dst->ip_equals(dst, new_dst))
1619 {
1620 host2xfrm(new_dst, &sa->id.daddr);
1621 }
1622
1623 rta = XFRM_RTA(out, struct xfrm_usersa_info);
1624 rtasize = XFRM_PAYLOAD(out, struct xfrm_usersa_info);
1625 pos = (u_char*)XFRM_RTA(hdr, struct xfrm_usersa_info);
1626 while(RTA_OK(rta, rtasize))
1627 {
1628 /* copy all attributes, but not XFRMA_ENCAP if we are disabling it */
1629 if (rta->rta_type != XFRMA_ENCAP || new_encap)
1630 {
1631 if (rta->rta_type == XFRMA_ENCAP)
1632 { /* update encap tmpl */
1633 tmpl = (struct xfrm_encap_tmpl*)RTA_DATA(rta);
1634 tmpl->encap_sport = ntohs(new_src->get_port(new_src));
1635 tmpl->encap_dport = ntohs(new_dst->get_port(new_dst));
1636 }
1637 memcpy(pos, rta, rta->rta_len);
1638 pos += RTA_ALIGN(rta->rta_len);
1639 hdr->nlmsg_len += RTA_ALIGN(rta->rta_len);
1640 }
1641 rta = RTA_NEXT(rta, rtasize);
1642 }
1643
1644 rta = (struct rtattr*)pos;
1645 if (tmpl == NULL && new_encap)
1646 { /* add tmpl if we are enabling it */
1647 rta->rta_type = XFRMA_ENCAP;
1648 rta->rta_len = RTA_LENGTH(sizeof(struct xfrm_encap_tmpl));
1649
1650 hdr->nlmsg_len += RTA_ALIGN(rta->rta_len);
1651 if (hdr->nlmsg_len > sizeof(request))
1652 {
1653 goto failed;
1654 }
1655
1656 tmpl = (struct xfrm_encap_tmpl*)RTA_DATA(rta);
1657 tmpl->encap_type = UDP_ENCAP_ESPINUDP;
1658 tmpl->encap_sport = ntohs(new_src->get_port(new_src));
1659 tmpl->encap_dport = ntohs(new_dst->get_port(new_dst));
1660 memset(&tmpl->encap_oa, 0, sizeof (xfrm_address_t));
1661
1662 rta = XFRM_RTA_NEXT(rta);
1663 }
1664
1665 if (replay_esn)
1666 {
1667 rta->rta_type = XFRMA_REPLAY_ESN_VAL;
1668 rta->rta_len = RTA_LENGTH(sizeof(struct xfrm_replay_state_esn) +
1669 this->replay_bmp);
1670
1671 hdr->nlmsg_len += RTA_ALIGN(rta->rta_len);
1672 if (hdr->nlmsg_len > sizeof(request))
1673 {
1674 goto failed;
1675 }
1676 memcpy(RTA_DATA(rta), replay_esn,
1677 sizeof(struct xfrm_replay_state_esn) + this->replay_bmp);
1678
1679 rta = XFRM_RTA_NEXT(rta);
1680 }
1681 else if (replay)
1682 {
1683 rta->rta_type = XFRMA_REPLAY_VAL;
1684 rta->rta_len = RTA_LENGTH(sizeof(struct xfrm_replay_state));
1685
1686 hdr->nlmsg_len += RTA_ALIGN(rta->rta_len);
1687 if (hdr->nlmsg_len > sizeof(request))
1688 {
1689 goto failed;
1690 }
1691 memcpy(RTA_DATA(rta), replay, sizeof(replay));
1692
1693 rta = XFRM_RTA_NEXT(rta);
1694 }
1695 else
1696 {
1697 DBG1(DBG_KNL, "unable to copy replay state from old SAD entry "
1698 "with SPI %.8x", ntohl(spi));
1699 }
1700
1701 if (this->socket_xfrm->send_ack(this->socket_xfrm, hdr) != SUCCESS)
1702 {
1703 DBG1(DBG_KNL, "unable to update SAD entry with SPI %.8x", ntohl(spi));
1704 goto failed;
1705 }
1706
1707 status = SUCCESS;
1708 failed:
1709 free(replay);
1710 free(replay_esn);
1711 memwipe(out, len);
1712 free(out);
1713
1714 return status;
1715 }
1716
1717 METHOD(kernel_ipsec_t, add_policy, status_t,
1718 private_kernel_netlink_ipsec_t *this, host_t *src, host_t *dst,
1719 traffic_selector_t *src_ts, traffic_selector_t *dst_ts,
1720 policy_dir_t direction, policy_type_t type, ipsec_sa_cfg_t *sa,
1721 mark_t mark, bool routed)
1722 {
1723 policy_entry_t *current, *policy;
1724 bool found = FALSE;
1725 netlink_buf_t request;
1726 struct xfrm_userpolicy_info *policy_info;
1727 struct nlmsghdr *hdr;
1728 int i;
1729
1730 /* create a policy */
1731 policy = malloc_thing(policy_entry_t);
1732 memset(policy, 0, sizeof(policy_entry_t));
1733 policy->sel = ts2selector(src_ts, dst_ts);
1734 policy->mark = mark.value & mark.mask;
1735 policy->direction = direction;
1736
1737 /* find the policy, which matches EXACTLY */
1738 this->mutex->lock(this->mutex);
1739 current = this->policies->get(this->policies, policy);
1740 if (current)
1741 {
1742 /* use existing policy */
1743 current->refcount++;
1744 if (mark.value)
1745 {
1746 DBG2(DBG_KNL, "policy %R === %R %N (mark %u/0x%8x) "
1747 "already exists, increasing refcount",
1748 src_ts, dst_ts, policy_dir_names, direction,
1749 mark.value, mark.mask);
1750 }
1751 else
1752 {
1753 DBG2(DBG_KNL, "policy %R === %R %N "
1754 "already exists, increasing refcount",
1755 src_ts, dst_ts, policy_dir_names, direction);
1756 }
1757 free(policy);
1758 policy = current;
1759 found = TRUE;
1760 }
1761 else
1762 { /* apply the new one, if we have no such policy */
1763 this->policies->put(this->policies, policy, policy);
1764 policy->refcount = 1;
1765 }
1766
1767 if (mark.value)
1768 {
1769 DBG2(DBG_KNL, "adding policy %R === %R %N (mark %u/0x%8x)",
1770 src_ts, dst_ts, policy_dir_names, direction,
1771 mark.value, mark.mask);
1772 }
1773 else
1774 {
1775 DBG2(DBG_KNL, "adding policy %R === %R %N",
1776 src_ts, dst_ts, policy_dir_names, direction);
1777 }
1778
1779 memset(&request, 0, sizeof(request));
1780 hdr = (struct nlmsghdr*)request;
1781 hdr->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
1782 hdr->nlmsg_type = found ? XFRM_MSG_UPDPOLICY : XFRM_MSG_NEWPOLICY;
1783 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_userpolicy_info));
1784
1785 policy_info = (struct xfrm_userpolicy_info*)NLMSG_DATA(hdr);
1786 policy_info->sel = policy->sel;
1787 policy_info->dir = policy->direction;
1788
1789 /* calculate priority based on selector size, small size = high prio */
1790 policy_info->priority = routed ? PRIO_LOW : PRIO_HIGH;
1791 policy_info->priority -= policy->sel.prefixlen_s;
1792 policy_info->priority -= policy->sel.prefixlen_d;
1793 policy_info->priority <<= 2; /* make some room for the two flags */
1794 policy_info->priority += policy->sel.sport_mask ||
1795 policy->sel.dport_mask ? 0 : 2;
1796 policy_info->priority += policy->sel.proto ? 0 : 1;
1797
1798 policy_info->action = type != POLICY_DROP ? XFRM_POLICY_ALLOW
1799 : XFRM_POLICY_BLOCK;
1800 policy_info->share = XFRM_SHARE_ANY;
1801 this->mutex->unlock(this->mutex);
1802
1803 /* policies don't expire */
1804 policy_info->lft.soft_byte_limit = XFRM_INF;
1805 policy_info->lft.soft_packet_limit = XFRM_INF;
1806 policy_info->lft.hard_byte_limit = XFRM_INF;
1807 policy_info->lft.hard_packet_limit = XFRM_INF;
1808 policy_info->lft.soft_add_expires_seconds = 0;
1809 policy_info->lft.hard_add_expires_seconds = 0;
1810 policy_info->lft.soft_use_expires_seconds = 0;
1811 policy_info->lft.hard_use_expires_seconds = 0;
1812
1813 struct rtattr *rthdr = XFRM_RTA(hdr, struct xfrm_userpolicy_info);
1814
1815 if (type == POLICY_IPSEC)
1816 {
1817 struct xfrm_user_tmpl *tmpl = (struct xfrm_user_tmpl*)RTA_DATA(rthdr);
1818 struct {
1819 u_int8_t proto;
1820 bool use;
1821 } protos[] = {
1822 { IPPROTO_COMP, sa->ipcomp.transform != IPCOMP_NONE },
1823 { IPPROTO_ESP, sa->esp.use },
1824 { IPPROTO_AH, sa->ah.use },
1825 };
1826 ipsec_mode_t proto_mode = sa->mode;
1827
1828 rthdr->rta_type = XFRMA_TMPL;
1829 rthdr->rta_len = 0; /* actual length is set below */
1830
1831 for (i = 0; i < countof(protos); i++)
1832 {
1833 if (!protos[i].use)
1834 {
1835 continue;
1836 }
1837
1838 rthdr->rta_len += RTA_LENGTH(sizeof(struct xfrm_user_tmpl));
1839 hdr->nlmsg_len += RTA_ALIGN(RTA_LENGTH(sizeof(struct xfrm_user_tmpl)));
1840 if (hdr->nlmsg_len > sizeof(request))
1841 {
1842 return FAILED;
1843 }
1844
1845 tmpl->reqid = sa->reqid;
1846 tmpl->id.proto = protos[i].proto;
1847 tmpl->aalgos = tmpl->ealgos = tmpl->calgos = ~0;
1848 tmpl->mode = mode2kernel(proto_mode);
1849 tmpl->optional = protos[i].proto == IPPROTO_COMP &&
1850 direction != POLICY_OUT;
1851 tmpl->family = src->get_family(src);
1852
1853 if (proto_mode == MODE_TUNNEL)
1854 { /* only for tunnel mode */
1855 host2xfrm(src, &tmpl->saddr);
1856 host2xfrm(dst, &tmpl->id.daddr);
1857 }
1858
1859 tmpl++;
1860
1861 /* use transport mode for other SAs */
1862 proto_mode = MODE_TRANSPORT;
1863 }
1864
1865 rthdr = XFRM_RTA_NEXT(rthdr);
1866 }
1867
1868 if (mark.value)
1869 {
1870 struct xfrm_mark *mrk;
1871
1872 rthdr->rta_type = XFRMA_MARK;
1873 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_mark));
1874
1875 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1876 if (hdr->nlmsg_len > sizeof(request))
1877 {
1878 return FAILED;
1879 }
1880
1881 mrk = (struct xfrm_mark*)RTA_DATA(rthdr);
1882 mrk->v = mark.value;
1883 mrk->m = mark.mask;
1884 }
1885
1886 if (this->socket_xfrm->send_ack(this->socket_xfrm, hdr) != SUCCESS)
1887 {
1888 DBG1(DBG_KNL, "unable to add policy %R === %R %N", src_ts, dst_ts,
1889 policy_dir_names, direction);
1890 return FAILED;
1891 }
1892
1893 /* install a route, if:
1894 * - we are NOT updating a policy
1895 * - this is a forward policy (to just get one for each child)
1896 * - we are in tunnel/BEET mode
1897 * - routing is not disabled via strongswan.conf
1898 */
1899 if (policy->route == NULL && direction == POLICY_FWD &&
1900 sa->mode != MODE_TRANSPORT && this->install_routes)
1901 {
1902 route_entry_t *route = malloc_thing(route_entry_t);
1903
1904 if (hydra->kernel_interface->get_address_by_ts(hydra->kernel_interface,
1905 dst_ts, &route->src_ip) == SUCCESS)
1906 {
1907 /* get the nexthop to src (src as we are in POLICY_FWD).*/
1908 route->gateway = hydra->kernel_interface->get_nexthop(
1909 hydra->kernel_interface, src);
1910 /* install route via outgoing interface */
1911 route->if_name = hydra->kernel_interface->get_interface(
1912 hydra->kernel_interface, dst);
1913 route->dst_net = chunk_alloc(policy->sel.family == AF_INET ? 4 : 16);
1914 memcpy(route->dst_net.ptr, &policy->sel.saddr, route->dst_net.len);
1915 route->prefixlen = policy->sel.prefixlen_s;
1916
1917 if (route->if_name)
1918 {
1919 DBG2(DBG_KNL, "installing route: %R via %H src %H dev %s",
1920 src_ts, route->gateway, route->src_ip, route->if_name);
1921 switch (hydra->kernel_interface->add_route(
1922 hydra->kernel_interface, route->dst_net,
1923 route->prefixlen, route->gateway,
1924 route->src_ip, route->if_name))
1925 {
1926 default:
1927 DBG1(DBG_KNL, "unable to install source route for %H",
1928 route->src_ip);
1929 /* FALL */
1930 case ALREADY_DONE:
1931 /* route exists, do not uninstall */
1932 route_entry_destroy(route);
1933 break;
1934 case SUCCESS:
1935 /* cache the installed route */
1936 policy->route = route;
1937 break;
1938 }
1939 }
1940 else
1941 {
1942 route_entry_destroy(route);
1943 }
1944 }
1945 else
1946 {
1947 free(route);
1948 }
1949 }
1950 return SUCCESS;
1951 }
1952
1953 METHOD(kernel_ipsec_t, query_policy, status_t,
1954 private_kernel_netlink_ipsec_t *this, traffic_selector_t *src_ts,
1955 traffic_selector_t *dst_ts, policy_dir_t direction, mark_t mark,
1956 u_int32_t *use_time)
1957 {
1958 netlink_buf_t request;
1959 struct nlmsghdr *out = NULL, *hdr;
1960 struct xfrm_userpolicy_id *policy_id;
1961 struct xfrm_userpolicy_info *policy = NULL;
1962 size_t len;
1963
1964 memset(&request, 0, sizeof(request));
1965
1966 if (mark.value)
1967 {
1968 DBG2(DBG_KNL, "querying policy %R === %R %N (mark %u/0x%8x)",
1969 src_ts, dst_ts, policy_dir_names, direction,
1970 mark.value, mark.mask);
1971 }
1972 else
1973 {
1974 DBG2(DBG_KNL, "querying policy %R === %R %N", src_ts, dst_ts,
1975 policy_dir_names, direction);
1976 }
1977 hdr = (struct nlmsghdr*)request;
1978 hdr->nlmsg_flags = NLM_F_REQUEST;
1979 hdr->nlmsg_type = XFRM_MSG_GETPOLICY;
1980 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_userpolicy_id));
1981
1982 policy_id = (struct xfrm_userpolicy_id*)NLMSG_DATA(hdr);
1983 policy_id->sel = ts2selector(src_ts, dst_ts);
1984 policy_id->dir = direction;
1985
1986 if (mark.value)
1987 {
1988 struct xfrm_mark *mrk;
1989 struct rtattr *rthdr = XFRM_RTA(hdr, struct xfrm_userpolicy_id);
1990
1991 rthdr->rta_type = XFRMA_MARK;
1992 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_mark));
1993
1994 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1995 if (hdr->nlmsg_len > sizeof(request))
1996 {
1997 return FAILED;
1998 }
1999
2000 mrk = (struct xfrm_mark*)RTA_DATA(rthdr);
2001 mrk->v = mark.value;
2002 mrk->m = mark.mask;
2003 }
2004
2005 if (this->socket_xfrm->send(this->socket_xfrm, hdr, &out, &len) == SUCCESS)
2006 {
2007 hdr = out;
2008 while (NLMSG_OK(hdr, len))
2009 {
2010 switch (hdr->nlmsg_type)
2011 {
2012 case XFRM_MSG_NEWPOLICY:
2013 {
2014 policy = (struct xfrm_userpolicy_info*)NLMSG_DATA(hdr);
2015 break;
2016 }
2017 case NLMSG_ERROR:
2018 {
2019 struct nlmsgerr *err = NLMSG_DATA(hdr);
2020 DBG1(DBG_KNL, "querying policy failed: %s (%d)",
2021 strerror(-err->error), -err->error);
2022 break;
2023 }
2024 default:
2025 hdr = NLMSG_NEXT(hdr, len);
2026 continue;
2027 case NLMSG_DONE:
2028 break;
2029 }
2030 break;
2031 }
2032 }
2033
2034 if (policy == NULL)
2035 {
2036 DBG2(DBG_KNL, "unable to query policy %R === %R %N", src_ts, dst_ts,
2037 policy_dir_names, direction);
2038 free(out);
2039 return FAILED;
2040 }
2041
2042 if (policy->curlft.use_time)
2043 {
2044 /* we need the monotonic time, but the kernel returns system time. */
2045 *use_time = time_monotonic(NULL) - (time(NULL) - policy->curlft.use_time);
2046 }
2047 else
2048 {
2049 *use_time = 0;
2050 }
2051
2052 free(out);
2053 return SUCCESS;
2054 }
2055
2056 METHOD(kernel_ipsec_t, del_policy, status_t,
2057 private_kernel_netlink_ipsec_t *this, traffic_selector_t *src_ts,
2058 traffic_selector_t *dst_ts, policy_dir_t direction, mark_t mark,
2059 bool unrouted)
2060 {
2061 policy_entry_t *current, policy, *to_delete = NULL;
2062 route_entry_t *route;
2063 netlink_buf_t request;
2064 struct nlmsghdr *hdr;
2065 struct xfrm_userpolicy_id *policy_id;
2066
2067 if (mark.value)
2068 {
2069 DBG2(DBG_KNL, "deleting policy %R === %R %N (mark %u/0x%8x)",
2070 src_ts, dst_ts, policy_dir_names, direction,
2071 mark.value, mark.mask);
2072 }
2073 else
2074 {
2075 DBG2(DBG_KNL, "deleting policy %R === %R %N",
2076 src_ts, dst_ts, policy_dir_names, direction);
2077 }
2078
2079 /* create a policy */
2080 memset(&policy, 0, sizeof(policy_entry_t));
2081 policy.sel = ts2selector(src_ts, dst_ts);
2082 policy.mark = mark.value & mark.mask;
2083 policy.direction = direction;
2084
2085 /* find the policy */
2086 this->mutex->lock(this->mutex);
2087 current = this->policies->get(this->policies, &policy);
2088 if (current)
2089 {
2090 to_delete = current;
2091 if (--to_delete->refcount > 0)
2092 {
2093 /* is used by more SAs, keep in kernel */
2094 DBG2(DBG_KNL, "policy still used by another CHILD_SA, not removed");
2095 this->mutex->unlock(this->mutex);
2096 return SUCCESS;
2097 }
2098 /* remove if last reference */
2099 this->policies->remove(this->policies, to_delete);
2100 }
2101 this->mutex->unlock(this->mutex);
2102 if (!to_delete)
2103 {
2104 if (mark.value)
2105 {
2106 DBG1(DBG_KNL, "deleting policy %R === %R %N (mark %u/0x%8x) "
2107 "failed, not found", src_ts, dst_ts, policy_dir_names,
2108 direction, mark.value, mark.mask);
2109 }
2110 else
2111 {
2112 DBG1(DBG_KNL, "deleting policy %R === %R %N failed, not found",
2113 src_ts, dst_ts, policy_dir_names, direction);
2114 }
2115 return NOT_FOUND;
2116 }
2117
2118 memset(&request, 0, sizeof(request));
2119
2120 hdr = (struct nlmsghdr*)request;
2121 hdr->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
2122 hdr->nlmsg_type = XFRM_MSG_DELPOLICY;
2123 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_userpolicy_id));
2124
2125 policy_id = (struct xfrm_userpolicy_id*)NLMSG_DATA(hdr);
2126 policy_id->sel = to_delete->sel;
2127 policy_id->dir = direction;
2128
2129 if (mark.value)
2130 {
2131 struct xfrm_mark *mrk;
2132 struct rtattr *rthdr = XFRM_RTA(hdr, struct xfrm_userpolicy_id);
2133
2134 rthdr->rta_type = XFRMA_MARK;
2135 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_mark));
2136 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
2137 if (hdr->nlmsg_len > sizeof(request))
2138 {
2139 return FAILED;
2140 }
2141
2142 mrk = (struct xfrm_mark*)RTA_DATA(rthdr);
2143 mrk->v = mark.value;
2144 mrk->m = mark.mask;
2145 }
2146
2147 route = to_delete->route;
2148 free(to_delete);
2149
2150 if (this->socket_xfrm->send_ack(this->socket_xfrm, hdr) != SUCCESS)
2151 {
2152 if (mark.value)
2153 {
2154 DBG1(DBG_KNL, "unable to delete policy %R === %R %N "
2155 "(mark %u/0x%8x)", src_ts, dst_ts, policy_dir_names,
2156 direction, mark.value, mark.mask);
2157 }
2158 else
2159 {
2160 DBG1(DBG_KNL, "unable to delete policy %R === %R %N",
2161 src_ts, dst_ts, policy_dir_names, direction);
2162 }
2163 return FAILED;
2164 }
2165
2166 if (route)
2167 {
2168 if (hydra->kernel_interface->del_route(hydra->kernel_interface,
2169 route->dst_net, route->prefixlen, route->gateway,
2170 route->src_ip, route->if_name) != SUCCESS)
2171 {
2172 DBG1(DBG_KNL, "error uninstalling route installed with "
2173 "policy %R === %R %N", src_ts, dst_ts,
2174 policy_dir_names, direction);
2175 }
2176 route_entry_destroy(route);
2177 }
2178 return SUCCESS;
2179 }
2180
2181 METHOD(kernel_ipsec_t, bypass_socket, bool,
2182 private_kernel_netlink_ipsec_t *this, int fd, int family)
2183 {
2184 struct xfrm_userpolicy_info policy;
2185 u_int sol, ipsec_policy;
2186
2187 switch (family)
2188 {
2189 case AF_INET:
2190 sol = SOL_IP;
2191 ipsec_policy = IP_XFRM_POLICY;
2192 break;
2193 case AF_INET6:
2194 sol = SOL_IPV6;
2195 ipsec_policy = IPV6_XFRM_POLICY;
2196 break;
2197 default:
2198 return FALSE;
2199 }
2200
2201 memset(&policy, 0, sizeof(policy));
2202 policy.action = XFRM_POLICY_ALLOW;
2203 policy.sel.family = family;
2204
2205 policy.dir = XFRM_POLICY_OUT;
2206 if (setsockopt(fd, sol, ipsec_policy, &policy, sizeof(policy)) < 0)
2207 {
2208 DBG1(DBG_KNL, "unable to set IPSEC_POLICY on socket: %s",
2209 strerror(errno));
2210 return FALSE;
2211 }
2212 policy.dir = XFRM_POLICY_IN;
2213 if (setsockopt(fd, sol, ipsec_policy, &policy, sizeof(policy)) < 0)
2214 {
2215 DBG1(DBG_KNL, "unable to set IPSEC_POLICY on socket: %s",
2216 strerror(errno));
2217 return FALSE;
2218 }
2219 return TRUE;
2220 }
2221
2222 METHOD(kernel_ipsec_t, destroy, void,
2223 private_kernel_netlink_ipsec_t *this)
2224 {
2225 enumerator_t *enumerator;
2226 policy_entry_t *policy;
2227
2228 if (this->job)
2229 {
2230 this->job->cancel(this->job);
2231 }
2232 if (this->socket_xfrm_events > 0)
2233 {
2234 close(this->socket_xfrm_events);
2235 }
2236 DESTROY_IF(this->socket_xfrm);
2237 enumerator = this->policies->create_enumerator(this->policies);
2238 while (enumerator->enumerate(enumerator, &policy, &policy))
2239 {
2240 free(policy);
2241 }
2242 enumerator->destroy(enumerator);
2243 this->policies->destroy(this->policies);
2244 this->mutex->destroy(this->mutex);
2245 free(this);
2246 }
2247
2248 /*
2249 * Described in header.
2250 */
2251 kernel_netlink_ipsec_t *kernel_netlink_ipsec_create()
2252 {
2253 private_kernel_netlink_ipsec_t *this;
2254 struct sockaddr_nl addr;
2255 int fd;
2256
2257 INIT(this,
2258 .public = {
2259 .interface = {
2260 .get_spi = _get_spi,
2261 .get_cpi = _get_cpi,
2262 .add_sa = _add_sa,
2263 .update_sa = _update_sa,
2264 .query_sa = _query_sa,
2265 .del_sa = _del_sa,
2266 .add_policy = _add_policy,
2267 .query_policy = _query_policy,
2268 .del_policy = _del_policy,
2269 .bypass_socket = _bypass_socket,
2270 .destroy = _destroy,
2271 },
2272 },
2273 .policies = hashtable_create((hashtable_hash_t)policy_hash,
2274 (hashtable_equals_t)policy_equals, 32),
2275 .mutex = mutex_create(MUTEX_TYPE_DEFAULT),
2276 .install_routes = lib->settings->get_bool(lib->settings,
2277 "%s.install_routes", TRUE, hydra->daemon),
2278 .replay_window = lib->settings->get_int(lib->settings,
2279 "%s.replay_window", DEFAULT_REPLAY_WINDOW, hydra->daemon),
2280 );
2281
2282 this->replay_bmp = (this->replay_window + sizeof(u_int32_t) * 8 - 1) /
2283 (sizeof(u_int32_t) * 8);
2284
2285 if (streq(hydra->daemon, "pluto"))
2286 { /* no routes for pluto, they are installed via updown script */
2287 this->install_routes = FALSE;
2288 }
2289
2290 /* disable lifetimes for allocated SPIs in kernel */
2291 fd = open("/proc/sys/net/core/xfrm_acq_expires", O_WRONLY);
2292 if (fd)
2293 {
2294 ignore_result(write(fd, "165", 3));
2295 close(fd);
2296 }
2297
2298 this->socket_xfrm = netlink_socket_create(NETLINK_XFRM);
2299 if (!this->socket_xfrm)
2300 {
2301 destroy(this);
2302 return NULL;
2303 }
2304
2305 memset(&addr, 0, sizeof(addr));
2306 addr.nl_family = AF_NETLINK;
2307
2308 /* create and bind XFRM socket for ACQUIRE, EXPIRE, MIGRATE & MAPPING */
2309 this->socket_xfrm_events = socket(AF_NETLINK, SOCK_RAW, NETLINK_XFRM);
2310 if (this->socket_xfrm_events <= 0)
2311 {
2312 DBG1(DBG_KNL, "unable to create XFRM event socket");
2313 destroy(this);
2314 return NULL;
2315 }
2316 addr.nl_groups = XFRMNLGRP(ACQUIRE) | XFRMNLGRP(EXPIRE) |
2317 XFRMNLGRP(MIGRATE) | XFRMNLGRP(MAPPING);
2318 if (bind(this->socket_xfrm_events, (struct sockaddr*)&addr, sizeof(addr)))
2319 {
2320 DBG1(DBG_KNL, "unable to bind XFRM event socket");
2321 destroy(this);
2322 return NULL;
2323 }
2324 this->job = callback_job_create_with_prio((callback_job_cb_t)receive_events,
2325 this, NULL, NULL, JOB_PRIO_CRITICAL);
2326 lib->processor->queue_job(lib->processor, (job_t*)this->job);
2327
2328 return &this->public;
2329 }
2330