Implemented Traffic Flow Confidentiality padding in kernel_interface
[strongswan.git] / src / libhydra / plugins / kernel_netlink / kernel_netlink_ipsec.c
1 /*
2 * Copyright (C) 2006-2010 Tobias Brunner
3 * Copyright (C) 2005-2009 Martin Willi
4 * Copyright (C) 2008 Andreas Steffen
5 * Copyright (C) 2006-2007 Fabian Hartmann, Noah Heusser
6 * Copyright (C) 2006 Daniel Roethlisberger
7 * Copyright (C) 2005 Jan Hutter
8 * Hochschule fuer Technik Rapperswil
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version. See <http://www.fsf.org/copyleft/gpl.txt>.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
17 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 * for more details.
19 */
20
21 #include <sys/types.h>
22 #include <sys/socket.h>
23 #include <stdint.h>
24 #include <linux/ipsec.h>
25 #include <linux/netlink.h>
26 #include <linux/rtnetlink.h>
27 #include <linux/xfrm.h>
28 #include <linux/udp.h>
29 #include <unistd.h>
30 #include <time.h>
31 #include <errno.h>
32 #include <string.h>
33 #include <fcntl.h>
34
35 #include "kernel_netlink_ipsec.h"
36 #include "kernel_netlink_shared.h"
37
38 #include <hydra.h>
39 #include <debug.h>
40 #include <threading/thread.h>
41 #include <threading/mutex.h>
42 #include <utils/hashtable.h>
43 #include <processing/jobs/callback_job.h>
44
45 /** required for Linux 2.6.26 kernel and later */
46 #ifndef XFRM_STATE_AF_UNSPEC
47 #define XFRM_STATE_AF_UNSPEC 32
48 #endif
49
50 /** from linux/in.h */
51 #ifndef IP_XFRM_POLICY
52 #define IP_XFRM_POLICY 17
53 #endif
54
55 /* missing on uclibc */
56 #ifndef IPV6_XFRM_POLICY
57 #define IPV6_XFRM_POLICY 34
58 #endif /*IPV6_XFRM_POLICY*/
59
60 /** default priority of installed policies */
61 #define PRIO_LOW 1024
62 #define PRIO_HIGH 512
63
64 /**
65 * map the limit for bytes and packets to XFRM_INF per default
66 */
67 #define XFRM_LIMIT(x) ((x) == 0 ? XFRM_INF : (x))
68
69 /**
70 * Create ORable bitfield of XFRM NL groups
71 */
72 #define XFRMNLGRP(x) (1<<(XFRMNLGRP_##x-1))
73
74 /**
75 * returns a pointer to the first rtattr following the nlmsghdr *nlh and the
76 * 'usual' netlink data x like 'struct xfrm_usersa_info'
77 */
78 #define XFRM_RTA(nlh, x) ((struct rtattr*)(NLMSG_DATA(nlh) + NLMSG_ALIGN(sizeof(x))))
79 /**
80 * returns a pointer to the next rtattr following rta.
81 * !!! do not use this to parse messages. use RTA_NEXT and RTA_OK instead !!!
82 */
83 #define XFRM_RTA_NEXT(rta) ((struct rtattr*)(((char*)(rta)) + RTA_ALIGN((rta)->rta_len)))
84 /**
85 * returns the total size of attached rta data
86 * (after 'usual' netlink data x like 'struct xfrm_usersa_info')
87 */
88 #define XFRM_PAYLOAD(nlh, x) NLMSG_PAYLOAD(nlh, sizeof(x))
89
90 typedef struct kernel_algorithm_t kernel_algorithm_t;
91
92 /**
93 * Mapping of IKEv2 kernel identifier to linux crypto API names
94 */
95 struct kernel_algorithm_t {
96 /**
97 * Identifier specified in IKEv2
98 */
99 int ikev2;
100
101 /**
102 * Name of the algorithm in linux crypto API
103 */
104 char *name;
105 };
106
107 ENUM(xfrm_msg_names, XFRM_MSG_NEWSA, XFRM_MSG_MAPPING,
108 "XFRM_MSG_NEWSA",
109 "XFRM_MSG_DELSA",
110 "XFRM_MSG_GETSA",
111 "XFRM_MSG_NEWPOLICY",
112 "XFRM_MSG_DELPOLICY",
113 "XFRM_MSG_GETPOLICY",
114 "XFRM_MSG_ALLOCSPI",
115 "XFRM_MSG_ACQUIRE",
116 "XFRM_MSG_EXPIRE",
117 "XFRM_MSG_UPDPOLICY",
118 "XFRM_MSG_UPDSA",
119 "XFRM_MSG_POLEXPIRE",
120 "XFRM_MSG_FLUSHSA",
121 "XFRM_MSG_FLUSHPOLICY",
122 "XFRM_MSG_NEWAE",
123 "XFRM_MSG_GETAE",
124 "XFRM_MSG_REPORT",
125 "XFRM_MSG_MIGRATE",
126 "XFRM_MSG_NEWSADINFO",
127 "XFRM_MSG_GETSADINFO",
128 "XFRM_MSG_NEWSPDINFO",
129 "XFRM_MSG_GETSPDINFO",
130 "XFRM_MSG_MAPPING"
131 );
132
133 ENUM(xfrm_attr_type_names, XFRMA_UNSPEC, XFRMA_KMADDRESS,
134 "XFRMA_UNSPEC",
135 "XFRMA_ALG_AUTH",
136 "XFRMA_ALG_CRYPT",
137 "XFRMA_ALG_COMP",
138 "XFRMA_ENCAP",
139 "XFRMA_TMPL",
140 "XFRMA_SA",
141 "XFRMA_POLICY",
142 "XFRMA_SEC_CTX",
143 "XFRMA_LTIME_VAL",
144 "XFRMA_REPLAY_VAL",
145 "XFRMA_REPLAY_THRESH",
146 "XFRMA_ETIMER_THRESH",
147 "XFRMA_SRCADDR",
148 "XFRMA_COADDR",
149 "XFRMA_LASTUSED",
150 "XFRMA_POLICY_TYPE",
151 "XFRMA_MIGRATE",
152 "XFRMA_ALG_AEAD",
153 "XFRMA_KMADDRESS"
154 );
155
156 #define END_OF_LIST -1
157
158 /**
159 * Algorithms for encryption
160 */
161 static kernel_algorithm_t encryption_algs[] = {
162 /* {ENCR_DES_IV64, "***" }, */
163 {ENCR_DES, "des" },
164 {ENCR_3DES, "des3_ede" },
165 /* {ENCR_RC5, "***" }, */
166 /* {ENCR_IDEA, "***" }, */
167 {ENCR_CAST, "cast128" },
168 {ENCR_BLOWFISH, "blowfish" },
169 /* {ENCR_3IDEA, "***" }, */
170 /* {ENCR_DES_IV32, "***" }, */
171 {ENCR_NULL, "cipher_null" },
172 {ENCR_AES_CBC, "aes" },
173 {ENCR_AES_CTR, "rfc3686(ctr(aes))" },
174 {ENCR_AES_CCM_ICV8, "rfc4309(ccm(aes))" },
175 {ENCR_AES_CCM_ICV12, "rfc4309(ccm(aes))" },
176 {ENCR_AES_CCM_ICV16, "rfc4309(ccm(aes))" },
177 {ENCR_AES_GCM_ICV8, "rfc4106(gcm(aes))" },
178 {ENCR_AES_GCM_ICV12, "rfc4106(gcm(aes))" },
179 {ENCR_AES_GCM_ICV16, "rfc4106(gcm(aes))" },
180 {ENCR_NULL_AUTH_AES_GMAC, "rfc4543(gcm(aes))" },
181 {ENCR_CAMELLIA_CBC, "cbc(camellia)" },
182 /* {ENCR_CAMELLIA_CTR, "***" }, */
183 /* {ENCR_CAMELLIA_CCM_ICV8, "***" }, */
184 /* {ENCR_CAMELLIA_CCM_ICV12, "***" }, */
185 /* {ENCR_CAMELLIA_CCM_ICV16, "***" }, */
186 {ENCR_SERPENT_CBC, "serpent" },
187 {ENCR_TWOFISH_CBC, "twofish" },
188 {END_OF_LIST, NULL }
189 };
190
191 /**
192 * Algorithms for integrity protection
193 */
194 static kernel_algorithm_t integrity_algs[] = {
195 {AUTH_HMAC_MD5_96, "md5" },
196 {AUTH_HMAC_SHA1_96, "sha1" },
197 {AUTH_HMAC_SHA2_256_96, "sha256" },
198 {AUTH_HMAC_SHA2_256_128, "hmac(sha256)" },
199 {AUTH_HMAC_SHA2_384_192, "hmac(sha384)" },
200 {AUTH_HMAC_SHA2_512_256, "hmac(sha512)" },
201 /* {AUTH_DES_MAC, "***" }, */
202 /* {AUTH_KPDK_MD5, "***" }, */
203 {AUTH_AES_XCBC_96, "xcbc(aes)" },
204 {END_OF_LIST, NULL }
205 };
206
207 /**
208 * Algorithms for IPComp
209 */
210 static kernel_algorithm_t compression_algs[] = {
211 /* {IPCOMP_OUI, "***" }, */
212 {IPCOMP_DEFLATE, "deflate" },
213 {IPCOMP_LZS, "lzs" },
214 {IPCOMP_LZJH, "lzjh" },
215 {END_OF_LIST, NULL }
216 };
217
218 /**
219 * Look up a kernel algorithm name and its key size
220 */
221 static char* lookup_algorithm(kernel_algorithm_t *list, int ikev2)
222 {
223 while (list->ikev2 != END_OF_LIST)
224 {
225 if (list->ikev2 == ikev2)
226 {
227 return list->name;
228 }
229 list++;
230 }
231 return NULL;
232 }
233
234 typedef struct route_entry_t route_entry_t;
235
236 /**
237 * installed routing entry
238 */
239 struct route_entry_t {
240 /** Name of the interface the route is bound to */
241 char *if_name;
242
243 /** Source ip of the route */
244 host_t *src_ip;
245
246 /** gateway for this route */
247 host_t *gateway;
248
249 /** Destination net */
250 chunk_t dst_net;
251
252 /** Destination net prefixlen */
253 u_int8_t prefixlen;
254 };
255
256 /**
257 * destroy an route_entry_t object
258 */
259 static void route_entry_destroy(route_entry_t *this)
260 {
261 free(this->if_name);
262 this->src_ip->destroy(this->src_ip);
263 DESTROY_IF(this->gateway);
264 chunk_free(&this->dst_net);
265 free(this);
266 }
267
268 typedef struct policy_entry_t policy_entry_t;
269
270 /**
271 * installed kernel policy.
272 */
273 struct policy_entry_t {
274
275 /** direction of this policy: in, out, forward */
276 u_int8_t direction;
277
278 /** parameters of installed policy */
279 struct xfrm_selector sel;
280
281 /** optional mark */
282 u_int32_t mark;
283
284 /** associated route installed for this policy */
285 route_entry_t *route;
286
287 /** by how many CHILD_SA's this policy is used */
288 u_int refcount;
289 };
290
291 /**
292 * Hash function for policy_entry_t objects
293 */
294 static u_int policy_hash(policy_entry_t *key)
295 {
296 chunk_t chunk = chunk_create((void*)&key->sel,
297 sizeof(struct xfrm_selector) + sizeof(u_int32_t));
298 return chunk_hash(chunk);
299 }
300
301 /**
302 * Equality function for policy_entry_t objects
303 */
304 static bool policy_equals(policy_entry_t *key, policy_entry_t *other_key)
305 {
306 return memeq(&key->sel, &other_key->sel,
307 sizeof(struct xfrm_selector) + sizeof(u_int32_t)) &&
308 key->direction == other_key->direction;
309 }
310
311 typedef struct private_kernel_netlink_ipsec_t private_kernel_netlink_ipsec_t;
312
313 /**
314 * Private variables and functions of kernel_netlink class.
315 */
316 struct private_kernel_netlink_ipsec_t {
317 /**
318 * Public part of the kernel_netlink_t object.
319 */
320 kernel_netlink_ipsec_t public;
321
322 /**
323 * mutex to lock access to various lists
324 */
325 mutex_t *mutex;
326
327 /**
328 * Hash table of installed policies (policy_entry_t)
329 */
330 hashtable_t *policies;
331
332 /**
333 * job receiving netlink events
334 */
335 callback_job_t *job;
336
337 /**
338 * Netlink xfrm socket (IPsec)
339 */
340 netlink_socket_t *socket_xfrm;
341
342 /**
343 * netlink xfrm socket to receive acquire and expire events
344 */
345 int socket_xfrm_events;
346
347 /**
348 * whether to install routes along policies
349 */
350 bool install_routes;
351 };
352
353 /**
354 * convert the general ipsec mode to the one defined in xfrm.h
355 */
356 static u_int8_t mode2kernel(ipsec_mode_t mode)
357 {
358 switch (mode)
359 {
360 case MODE_TRANSPORT:
361 return XFRM_MODE_TRANSPORT;
362 case MODE_TUNNEL:
363 return XFRM_MODE_TUNNEL;
364 case MODE_BEET:
365 return XFRM_MODE_BEET;
366 default:
367 return mode;
368 }
369 }
370
371 /**
372 * convert a host_t to a struct xfrm_address
373 */
374 static void host2xfrm(host_t *host, xfrm_address_t *xfrm)
375 {
376 chunk_t chunk = host->get_address(host);
377 memcpy(xfrm, chunk.ptr, min(chunk.len, sizeof(xfrm_address_t)));
378 }
379
380 /**
381 * convert a struct xfrm_address to a host_t
382 */
383 static host_t* xfrm2host(int family, xfrm_address_t *xfrm, u_int16_t port)
384 {
385 chunk_t chunk;
386
387 switch (family)
388 {
389 case AF_INET:
390 chunk = chunk_create((u_char*)&xfrm->a4, sizeof(xfrm->a4));
391 break;
392 case AF_INET6:
393 chunk = chunk_create((u_char*)&xfrm->a6, sizeof(xfrm->a6));
394 break;
395 default:
396 return NULL;
397 }
398 return host_create_from_chunk(family, chunk, ntohs(port));
399 }
400
401 /**
402 * convert a traffic selector address range to subnet and its mask.
403 */
404 static void ts2subnet(traffic_selector_t* ts,
405 xfrm_address_t *net, u_int8_t *mask)
406 {
407 host_t *net_host;
408 chunk_t net_chunk;
409
410 ts->to_subnet(ts, &net_host, mask);
411 net_chunk = net_host->get_address(net_host);
412 memcpy(net, net_chunk.ptr, net_chunk.len);
413 net_host->destroy(net_host);
414 }
415
416 /**
417 * convert a traffic selector port range to port/portmask
418 */
419 static void ts2ports(traffic_selector_t* ts,
420 u_int16_t *port, u_int16_t *mask)
421 {
422 /* linux does not seem to accept complex portmasks. Only
423 * any or a specific port is allowed. We set to any, if we have
424 * a port range, or to a specific, if we have one port only.
425 */
426 u_int16_t from, to;
427
428 from = ts->get_from_port(ts);
429 to = ts->get_to_port(ts);
430
431 if (from == to)
432 {
433 *port = htons(from);
434 *mask = ~0;
435 }
436 else
437 {
438 *port = 0;
439 *mask = 0;
440 }
441 }
442
443 /**
444 * convert a pair of traffic_selectors to a xfrm_selector
445 */
446 static struct xfrm_selector ts2selector(traffic_selector_t *src,
447 traffic_selector_t *dst)
448 {
449 struct xfrm_selector sel;
450
451 memset(&sel, 0, sizeof(sel));
452 sel.family = (src->get_type(src) == TS_IPV4_ADDR_RANGE) ? AF_INET : AF_INET6;
453 /* src or dest proto may be "any" (0), use more restrictive one */
454 sel.proto = max(src->get_protocol(src), dst->get_protocol(dst));
455 ts2subnet(dst, &sel.daddr, &sel.prefixlen_d);
456 ts2subnet(src, &sel.saddr, &sel.prefixlen_s);
457 ts2ports(dst, &sel.dport, &sel.dport_mask);
458 ts2ports(src, &sel.sport, &sel.sport_mask);
459 sel.ifindex = 0;
460 sel.user = 0;
461
462 return sel;
463 }
464
465 /**
466 * convert a xfrm_selector to a src|dst traffic_selector
467 */
468 static traffic_selector_t* selector2ts(struct xfrm_selector *sel, bool src)
469 {
470 u_char *addr;
471 u_int8_t prefixlen;
472 u_int16_t port = 0;
473 host_t *host = NULL;
474
475 if (src)
476 {
477 addr = (u_char*)&sel->saddr;
478 prefixlen = sel->prefixlen_s;
479 if (sel->sport_mask)
480 {
481 port = htons(sel->sport);
482 }
483 }
484 else
485 {
486 addr = (u_char*)&sel->daddr;
487 prefixlen = sel->prefixlen_d;
488 if (sel->dport_mask)
489 {
490 port = htons(sel->dport);
491 }
492 }
493
494 /* The Linux 2.6 kernel does not set the selector's family field,
495 * so as a kludge we additionally test the prefix length.
496 */
497 if (sel->family == AF_INET || sel->prefixlen_s == 32)
498 {
499 host = host_create_from_chunk(AF_INET, chunk_create(addr, 4), 0);
500 }
501 else if (sel->family == AF_INET6 || sel->prefixlen_s == 128)
502 {
503 host = host_create_from_chunk(AF_INET6, chunk_create(addr, 16), 0);
504 }
505
506 if (host)
507 {
508 return traffic_selector_create_from_subnet(host, prefixlen,
509 sel->proto, port);
510 }
511 return NULL;
512 }
513
514 /**
515 * process a XFRM_MSG_ACQUIRE from kernel
516 */
517 static void process_acquire(private_kernel_netlink_ipsec_t *this, struct nlmsghdr *hdr)
518 {
519 u_int32_t reqid = 0;
520 int proto = 0;
521 traffic_selector_t *src_ts, *dst_ts;
522 struct xfrm_user_acquire *acquire;
523 struct rtattr *rta;
524 size_t rtasize;
525
526 acquire = (struct xfrm_user_acquire*)NLMSG_DATA(hdr);
527 rta = XFRM_RTA(hdr, struct xfrm_user_acquire);
528 rtasize = XFRM_PAYLOAD(hdr, struct xfrm_user_acquire);
529
530 DBG2(DBG_KNL, "received a XFRM_MSG_ACQUIRE");
531
532 while (RTA_OK(rta, rtasize))
533 {
534 DBG2(DBG_KNL, " %N", xfrm_attr_type_names, rta->rta_type);
535
536 if (rta->rta_type == XFRMA_TMPL)
537 {
538 struct xfrm_user_tmpl* tmpl;
539
540 tmpl = (struct xfrm_user_tmpl*)RTA_DATA(rta);
541 reqid = tmpl->reqid;
542 proto = tmpl->id.proto;
543 }
544 rta = RTA_NEXT(rta, rtasize);
545 }
546 switch (proto)
547 {
548 case 0:
549 case IPPROTO_ESP:
550 case IPPROTO_AH:
551 break;
552 default:
553 /* acquire for AH/ESP only, not for IPCOMP */
554 return;
555 }
556 src_ts = selector2ts(&acquire->sel, TRUE);
557 dst_ts = selector2ts(&acquire->sel, FALSE);
558
559 hydra->kernel_interface->acquire(hydra->kernel_interface, reqid, src_ts,
560 dst_ts);
561 }
562
563 /**
564 * process a XFRM_MSG_EXPIRE from kernel
565 */
566 static void process_expire(private_kernel_netlink_ipsec_t *this, struct nlmsghdr *hdr)
567 {
568 u_int8_t protocol;
569 u_int32_t spi, reqid;
570 struct xfrm_user_expire *expire;
571
572 expire = (struct xfrm_user_expire*)NLMSG_DATA(hdr);
573 protocol = expire->state.id.proto;
574 spi = expire->state.id.spi;
575 reqid = expire->state.reqid;
576
577 DBG2(DBG_KNL, "received a XFRM_MSG_EXPIRE");
578
579 if (protocol != IPPROTO_ESP && protocol != IPPROTO_AH)
580 {
581 DBG2(DBG_KNL, "ignoring XFRM_MSG_EXPIRE for SA with SPI %.8x and "
582 "reqid {%u} which is not a CHILD_SA", ntohl(spi), reqid);
583 return;
584 }
585
586 hydra->kernel_interface->expire(hydra->kernel_interface, reqid, protocol,
587 spi, expire->hard != 0);
588 }
589
590 /**
591 * process a XFRM_MSG_MIGRATE from kernel
592 */
593 static void process_migrate(private_kernel_netlink_ipsec_t *this, struct nlmsghdr *hdr)
594 {
595 traffic_selector_t *src_ts, *dst_ts;
596 host_t *local = NULL, *remote = NULL;
597 host_t *old_src = NULL, *old_dst = NULL;
598 host_t *new_src = NULL, *new_dst = NULL;
599 struct xfrm_userpolicy_id *policy_id;
600 struct rtattr *rta;
601 size_t rtasize;
602 u_int32_t reqid = 0;
603 policy_dir_t dir;
604
605 policy_id = (struct xfrm_userpolicy_id*)NLMSG_DATA(hdr);
606 rta = XFRM_RTA(hdr, struct xfrm_userpolicy_id);
607 rtasize = XFRM_PAYLOAD(hdr, struct xfrm_userpolicy_id);
608
609 DBG2(DBG_KNL, "received a XFRM_MSG_MIGRATE");
610
611 src_ts = selector2ts(&policy_id->sel, TRUE);
612 dst_ts = selector2ts(&policy_id->sel, FALSE);
613 dir = (policy_dir_t)policy_id->dir;
614
615 DBG2(DBG_KNL, " policy: %R === %R %N", src_ts, dst_ts, policy_dir_names);
616
617 while (RTA_OK(rta, rtasize))
618 {
619 DBG2(DBG_KNL, " %N", xfrm_attr_type_names, rta->rta_type);
620 if (rta->rta_type == XFRMA_KMADDRESS)
621 {
622 struct xfrm_user_kmaddress *kmaddress;
623
624 kmaddress = (struct xfrm_user_kmaddress*)RTA_DATA(rta);
625 local = xfrm2host(kmaddress->family, &kmaddress->local, 0);
626 remote = xfrm2host(kmaddress->family, &kmaddress->remote, 0);
627 DBG2(DBG_KNL, " kmaddress: %H...%H", local, remote);
628 }
629 else if (rta->rta_type == XFRMA_MIGRATE)
630 {
631 struct xfrm_user_migrate *migrate;
632
633 migrate = (struct xfrm_user_migrate*)RTA_DATA(rta);
634 old_src = xfrm2host(migrate->old_family, &migrate->old_saddr, 0);
635 old_dst = xfrm2host(migrate->old_family, &migrate->old_daddr, 0);
636 new_src = xfrm2host(migrate->new_family, &migrate->new_saddr, 0);
637 new_dst = xfrm2host(migrate->new_family, &migrate->new_daddr, 0);
638 reqid = migrate->reqid;
639 DBG2(DBG_KNL, " migrate %H...%H to %H...%H, reqid {%u}",
640 old_src, old_dst, new_src, new_dst, reqid);
641 DESTROY_IF(old_src);
642 DESTROY_IF(old_dst);
643 DESTROY_IF(new_src);
644 DESTROY_IF(new_dst);
645 }
646 rta = RTA_NEXT(rta, rtasize);
647 }
648
649 if (src_ts && dst_ts && local && remote)
650 {
651 hydra->kernel_interface->migrate(hydra->kernel_interface, reqid,
652 src_ts, dst_ts, dir, local, remote);
653 }
654 else
655 {
656 DESTROY_IF(src_ts);
657 DESTROY_IF(dst_ts);
658 DESTROY_IF(local);
659 DESTROY_IF(remote);
660 }
661 }
662
663 /**
664 * process a XFRM_MSG_MAPPING from kernel
665 */
666 static void process_mapping(private_kernel_netlink_ipsec_t *this,
667 struct nlmsghdr *hdr)
668 {
669 u_int32_t spi, reqid;
670 struct xfrm_user_mapping *mapping;
671 host_t *host;
672
673 mapping = (struct xfrm_user_mapping*)NLMSG_DATA(hdr);
674 spi = mapping->id.spi;
675 reqid = mapping->reqid;
676
677 DBG2(DBG_KNL, "received a XFRM_MSG_MAPPING");
678
679 if (mapping->id.proto == IPPROTO_ESP)
680 {
681 host = xfrm2host(mapping->id.family, &mapping->new_saddr,
682 mapping->new_sport);
683 if (host)
684 {
685 hydra->kernel_interface->mapping(hydra->kernel_interface, reqid,
686 spi, host);
687 }
688 }
689 }
690
691 /**
692 * Receives events from kernel
693 */
694 static job_requeue_t receive_events(private_kernel_netlink_ipsec_t *this)
695 {
696 char response[1024];
697 struct nlmsghdr *hdr = (struct nlmsghdr*)response;
698 struct sockaddr_nl addr;
699 socklen_t addr_len = sizeof(addr);
700 int len;
701 bool oldstate;
702
703 oldstate = thread_cancelability(TRUE);
704 len = recvfrom(this->socket_xfrm_events, response, sizeof(response), 0,
705 (struct sockaddr*)&addr, &addr_len);
706 thread_cancelability(oldstate);
707
708 if (len < 0)
709 {
710 switch (errno)
711 {
712 case EINTR:
713 /* interrupted, try again */
714 return JOB_REQUEUE_DIRECT;
715 case EAGAIN:
716 /* no data ready, select again */
717 return JOB_REQUEUE_DIRECT;
718 default:
719 DBG1(DBG_KNL, "unable to receive from xfrm event socket");
720 sleep(1);
721 return JOB_REQUEUE_FAIR;
722 }
723 }
724
725 if (addr.nl_pid != 0)
726 { /* not from kernel. not interested, try another one */
727 return JOB_REQUEUE_DIRECT;
728 }
729
730 while (NLMSG_OK(hdr, len))
731 {
732 switch (hdr->nlmsg_type)
733 {
734 case XFRM_MSG_ACQUIRE:
735 process_acquire(this, hdr);
736 break;
737 case XFRM_MSG_EXPIRE:
738 process_expire(this, hdr);
739 break;
740 case XFRM_MSG_MIGRATE:
741 process_migrate(this, hdr);
742 break;
743 case XFRM_MSG_MAPPING:
744 process_mapping(this, hdr);
745 break;
746 default:
747 DBG1(DBG_KNL, "received unknown event from xfrm event socket: %d", hdr->nlmsg_type);
748 break;
749 }
750 hdr = NLMSG_NEXT(hdr, len);
751 }
752 return JOB_REQUEUE_DIRECT;
753 }
754
755 /**
756 * Get an SPI for a specific protocol from the kernel.
757 */
758 static status_t get_spi_internal(private_kernel_netlink_ipsec_t *this,
759 host_t *src, host_t *dst, u_int8_t proto, u_int32_t min, u_int32_t max,
760 u_int32_t reqid, u_int32_t *spi)
761 {
762 netlink_buf_t request;
763 struct nlmsghdr *hdr, *out;
764 struct xfrm_userspi_info *userspi;
765 u_int32_t received_spi = 0;
766 size_t len;
767
768 memset(&request, 0, sizeof(request));
769
770 hdr = (struct nlmsghdr*)request;
771 hdr->nlmsg_flags = NLM_F_REQUEST;
772 hdr->nlmsg_type = XFRM_MSG_ALLOCSPI;
773 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_userspi_info));
774
775 userspi = (struct xfrm_userspi_info*)NLMSG_DATA(hdr);
776 host2xfrm(src, &userspi->info.saddr);
777 host2xfrm(dst, &userspi->info.id.daddr);
778 userspi->info.id.proto = proto;
779 userspi->info.mode = XFRM_MODE_TUNNEL;
780 userspi->info.reqid = reqid;
781 userspi->info.family = src->get_family(src);
782 userspi->min = min;
783 userspi->max = max;
784
785 if (this->socket_xfrm->send(this->socket_xfrm, hdr, &out, &len) == SUCCESS)
786 {
787 hdr = out;
788 while (NLMSG_OK(hdr, len))
789 {
790 switch (hdr->nlmsg_type)
791 {
792 case XFRM_MSG_NEWSA:
793 {
794 struct xfrm_usersa_info* usersa = NLMSG_DATA(hdr);
795 received_spi = usersa->id.spi;
796 break;
797 }
798 case NLMSG_ERROR:
799 {
800 struct nlmsgerr *err = NLMSG_DATA(hdr);
801
802 DBG1(DBG_KNL, "allocating SPI failed: %s (%d)",
803 strerror(-err->error), -err->error);
804 break;
805 }
806 default:
807 hdr = NLMSG_NEXT(hdr, len);
808 continue;
809 case NLMSG_DONE:
810 break;
811 }
812 break;
813 }
814 free(out);
815 }
816
817 if (received_spi == 0)
818 {
819 return FAILED;
820 }
821
822 *spi = received_spi;
823 return SUCCESS;
824 }
825
826 METHOD(kernel_ipsec_t, get_spi, status_t,
827 private_kernel_netlink_ipsec_t *this, host_t *src, host_t *dst,
828 u_int8_t protocol, u_int32_t reqid, u_int32_t *spi)
829 {
830 DBG2(DBG_KNL, "getting SPI for reqid {%u}", reqid);
831
832 if (get_spi_internal(this, src, dst, protocol,
833 0xc0000000, 0xcFFFFFFF, reqid, spi) != SUCCESS)
834 {
835 DBG1(DBG_KNL, "unable to get SPI for reqid {%u}", reqid);
836 return FAILED;
837 }
838
839 DBG2(DBG_KNL, "got SPI %.8x for reqid {%u}", ntohl(*spi), reqid);
840
841 return SUCCESS;
842 }
843
844 METHOD(kernel_ipsec_t, get_cpi, status_t,
845 private_kernel_netlink_ipsec_t *this, host_t *src, host_t *dst,
846 u_int32_t reqid, u_int16_t *cpi)
847 {
848 u_int32_t received_spi = 0;
849
850 DBG2(DBG_KNL, "getting CPI for reqid {%u}", reqid);
851
852 if (get_spi_internal(this, src, dst,
853 IPPROTO_COMP, 0x100, 0xEFFF, reqid, &received_spi) != SUCCESS)
854 {
855 DBG1(DBG_KNL, "unable to get CPI for reqid {%u}", reqid);
856 return FAILED;
857 }
858
859 *cpi = htons((u_int16_t)ntohl(received_spi));
860
861 DBG2(DBG_KNL, "got CPI %.4x for reqid {%u}", ntohs(*cpi), reqid);
862
863 return SUCCESS;
864 }
865
866 METHOD(kernel_ipsec_t, add_sa, status_t,
867 private_kernel_netlink_ipsec_t *this, host_t *src, host_t *dst,
868 u_int32_t spi, u_int8_t protocol, u_int32_t reqid, mark_t mark,
869 u_int32_t tfc, lifetime_cfg_t *lifetime, u_int16_t enc_alg, chunk_t enc_key,
870 u_int16_t int_alg, chunk_t int_key, ipsec_mode_t mode, u_int16_t ipcomp,
871 u_int16_t cpi, bool encap, bool inbound,
872 traffic_selector_t* src_ts, traffic_selector_t* dst_ts)
873 {
874 netlink_buf_t request;
875 char *alg_name;
876 struct nlmsghdr *hdr;
877 struct xfrm_usersa_info *sa;
878 u_int16_t icv_size = 64;
879
880 /* if IPComp is used, we install an additional IPComp SA. if the cpi is 0
881 * we are in the recursive call below */
882 if (ipcomp != IPCOMP_NONE && cpi != 0)
883 {
884 lifetime_cfg_t lft = {{0,0,0},{0,0,0},{0,0,0}};
885 add_sa(this, src, dst, htonl(ntohs(cpi)), IPPROTO_COMP, reqid, mark, tfc,
886 &lft, ENCR_UNDEFINED, chunk_empty, AUTH_UNDEFINED, chunk_empty,
887 mode, ipcomp, 0, FALSE, inbound, NULL, NULL);
888 ipcomp = IPCOMP_NONE;
889 /* use transport mode ESP SA, IPComp uses tunnel mode */
890 mode = MODE_TRANSPORT;
891 }
892
893 memset(&request, 0, sizeof(request));
894
895 if (mark.value)
896 {
897 DBG2(DBG_KNL, "adding SAD entry with SPI %.8x and reqid {%u} "
898 "(mark %u/0x%8x)", ntohl(spi), reqid, mark.value, mark.mask);
899 }
900 else
901 {
902 DBG2(DBG_KNL, "adding SAD entry with SPI %.8x and reqid {%u}",
903 ntohl(spi), reqid);
904 }
905 hdr = (struct nlmsghdr*)request;
906 hdr->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
907 hdr->nlmsg_type = inbound ? XFRM_MSG_UPDSA : XFRM_MSG_NEWSA;
908 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_usersa_info));
909
910 sa = (struct xfrm_usersa_info*)NLMSG_DATA(hdr);
911 host2xfrm(src, &sa->saddr);
912 host2xfrm(dst, &sa->id.daddr);
913 sa->id.spi = spi;
914 sa->id.proto = protocol;
915 sa->family = src->get_family(src);
916 sa->mode = mode2kernel(mode);
917 switch (mode)
918 {
919 case MODE_TUNNEL:
920 sa->flags |= XFRM_STATE_AF_UNSPEC;
921 break;
922 case MODE_BEET:
923 case MODE_TRANSPORT:
924 if(src_ts && dst_ts)
925 {
926 sa->sel = ts2selector(src_ts, dst_ts);
927 }
928 break;
929 default:
930 break;
931 }
932
933 sa->replay_window = (protocol == IPPROTO_COMP) ? 0 : 32;
934 sa->reqid = reqid;
935 sa->lft.soft_byte_limit = XFRM_LIMIT(lifetime->bytes.rekey);
936 sa->lft.hard_byte_limit = XFRM_LIMIT(lifetime->bytes.life);
937 sa->lft.soft_packet_limit = XFRM_LIMIT(lifetime->packets.rekey);
938 sa->lft.hard_packet_limit = XFRM_LIMIT(lifetime->packets.life);
939 /* we use lifetimes since added, not since used */
940 sa->lft.soft_add_expires_seconds = lifetime->time.rekey;
941 sa->lft.hard_add_expires_seconds = lifetime->time.life;
942 sa->lft.soft_use_expires_seconds = 0;
943 sa->lft.hard_use_expires_seconds = 0;
944
945 struct rtattr *rthdr = XFRM_RTA(hdr, struct xfrm_usersa_info);
946
947 switch (enc_alg)
948 {
949 case ENCR_UNDEFINED:
950 /* no encryption */
951 break;
952 case ENCR_AES_CCM_ICV16:
953 case ENCR_AES_GCM_ICV16:
954 case ENCR_NULL_AUTH_AES_GMAC:
955 case ENCR_CAMELLIA_CCM_ICV16:
956 icv_size += 32;
957 /* FALL */
958 case ENCR_AES_CCM_ICV12:
959 case ENCR_AES_GCM_ICV12:
960 case ENCR_CAMELLIA_CCM_ICV12:
961 icv_size += 32;
962 /* FALL */
963 case ENCR_AES_CCM_ICV8:
964 case ENCR_AES_GCM_ICV8:
965 case ENCR_CAMELLIA_CCM_ICV8:
966 {
967 struct xfrm_algo_aead *algo;
968
969 alg_name = lookup_algorithm(encryption_algs, enc_alg);
970 if (alg_name == NULL)
971 {
972 DBG1(DBG_KNL, "algorithm %N not supported by kernel!",
973 encryption_algorithm_names, enc_alg);
974 return FAILED;
975 }
976 DBG2(DBG_KNL, " using encryption algorithm %N with key size %d",
977 encryption_algorithm_names, enc_alg, enc_key.len * 8);
978
979 rthdr->rta_type = XFRMA_ALG_AEAD;
980 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_algo_aead) + enc_key.len);
981 hdr->nlmsg_len += rthdr->rta_len;
982 if (hdr->nlmsg_len > sizeof(request))
983 {
984 return FAILED;
985 }
986
987 algo = (struct xfrm_algo_aead*)RTA_DATA(rthdr);
988 algo->alg_key_len = enc_key.len * 8;
989 algo->alg_icv_len = icv_size;
990 strcpy(algo->alg_name, alg_name);
991 memcpy(algo->alg_key, enc_key.ptr, enc_key.len);
992
993 rthdr = XFRM_RTA_NEXT(rthdr);
994 break;
995 }
996 default:
997 {
998 struct xfrm_algo *algo;
999
1000 alg_name = lookup_algorithm(encryption_algs, enc_alg);
1001 if (alg_name == NULL)
1002 {
1003 DBG1(DBG_KNL, "algorithm %N not supported by kernel!",
1004 encryption_algorithm_names, enc_alg);
1005 return FAILED;
1006 }
1007 DBG2(DBG_KNL, " using encryption algorithm %N with key size %d",
1008 encryption_algorithm_names, enc_alg, enc_key.len * 8);
1009
1010 rthdr->rta_type = XFRMA_ALG_CRYPT;
1011 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_algo) + enc_key.len);
1012 hdr->nlmsg_len += rthdr->rta_len;
1013 if (hdr->nlmsg_len > sizeof(request))
1014 {
1015 return FAILED;
1016 }
1017
1018 algo = (struct xfrm_algo*)RTA_DATA(rthdr);
1019 algo->alg_key_len = enc_key.len * 8;
1020 strcpy(algo->alg_name, alg_name);
1021 memcpy(algo->alg_key, enc_key.ptr, enc_key.len);
1022
1023 rthdr = XFRM_RTA_NEXT(rthdr);
1024 }
1025 }
1026
1027 if (int_alg != AUTH_UNDEFINED)
1028 {
1029 alg_name = lookup_algorithm(integrity_algs, int_alg);
1030 if (alg_name == NULL)
1031 {
1032 DBG1(DBG_KNL, "algorithm %N not supported by kernel!",
1033 integrity_algorithm_names, int_alg);
1034 return FAILED;
1035 }
1036 DBG2(DBG_KNL, " using integrity algorithm %N with key size %d",
1037 integrity_algorithm_names, int_alg, int_key.len * 8);
1038
1039 if (int_alg == AUTH_HMAC_SHA2_256_128)
1040 {
1041 struct xfrm_algo_auth* algo;
1042
1043 /* the kernel uses SHA256 with 96 bit truncation by default,
1044 * use specified truncation size supported by newer kernels */
1045 rthdr->rta_type = XFRMA_ALG_AUTH_TRUNC;
1046 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_algo_auth) + int_key.len);
1047
1048 hdr->nlmsg_len += rthdr->rta_len;
1049 if (hdr->nlmsg_len > sizeof(request))
1050 {
1051 return FAILED;
1052 }
1053
1054 algo = (struct xfrm_algo_auth*)RTA_DATA(rthdr);
1055 algo->alg_key_len = int_key.len * 8;
1056 algo->alg_trunc_len = 128;
1057 strcpy(algo->alg_name, alg_name);
1058 memcpy(algo->alg_key, int_key.ptr, int_key.len);
1059 }
1060 else
1061 {
1062 struct xfrm_algo* algo;
1063
1064 rthdr->rta_type = XFRMA_ALG_AUTH;
1065 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_algo) + int_key.len);
1066
1067 hdr->nlmsg_len += rthdr->rta_len;
1068 if (hdr->nlmsg_len > sizeof(request))
1069 {
1070 return FAILED;
1071 }
1072
1073 algo = (struct xfrm_algo*)RTA_DATA(rthdr);
1074 algo->alg_key_len = int_key.len * 8;
1075 strcpy(algo->alg_name, alg_name);
1076 memcpy(algo->alg_key, int_key.ptr, int_key.len);
1077 }
1078 rthdr = XFRM_RTA_NEXT(rthdr);
1079 }
1080
1081 if (ipcomp != IPCOMP_NONE)
1082 {
1083 rthdr->rta_type = XFRMA_ALG_COMP;
1084 alg_name = lookup_algorithm(compression_algs, ipcomp);
1085 if (alg_name == NULL)
1086 {
1087 DBG1(DBG_KNL, "algorithm %N not supported by kernel!",
1088 ipcomp_transform_names, ipcomp);
1089 return FAILED;
1090 }
1091 DBG2(DBG_KNL, " using compression algorithm %N",
1092 ipcomp_transform_names, ipcomp);
1093
1094 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_algo));
1095 hdr->nlmsg_len += rthdr->rta_len;
1096 if (hdr->nlmsg_len > sizeof(request))
1097 {
1098 return FAILED;
1099 }
1100
1101 struct xfrm_algo* algo = (struct xfrm_algo*)RTA_DATA(rthdr);
1102 algo->alg_key_len = 0;
1103 strcpy(algo->alg_name, alg_name);
1104
1105 rthdr = XFRM_RTA_NEXT(rthdr);
1106 }
1107
1108 if (encap)
1109 {
1110 struct xfrm_encap_tmpl *tmpl;
1111
1112 rthdr->rta_type = XFRMA_ENCAP;
1113 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_encap_tmpl));
1114
1115 hdr->nlmsg_len += rthdr->rta_len;
1116 if (hdr->nlmsg_len > sizeof(request))
1117 {
1118 return FAILED;
1119 }
1120
1121 tmpl = (struct xfrm_encap_tmpl*)RTA_DATA(rthdr);
1122 tmpl->encap_type = UDP_ENCAP_ESPINUDP;
1123 tmpl->encap_sport = htons(src->get_port(src));
1124 tmpl->encap_dport = htons(dst->get_port(dst));
1125 memset(&tmpl->encap_oa, 0, sizeof (xfrm_address_t));
1126 /* encap_oa could probably be derived from the
1127 * traffic selectors [rfc4306, p39]. In the netlink kernel implementation
1128 * pluto does the same as we do here but it uses encap_oa in the
1129 * pfkey implementation. BUT as /usr/src/linux/net/key/af_key.c indicates
1130 * the kernel ignores it anyway
1131 * -> does that mean that NAT-T encap doesn't work in transport mode?
1132 * No. The reason the kernel ignores NAT-OA is that it recomputes
1133 * (or, rather, just ignores) the checksum. If packets pass
1134 * the IPsec checks it marks them "checksum ok" so OA isn't needed. */
1135 rthdr = XFRM_RTA_NEXT(rthdr);
1136 }
1137
1138 if (mark.value)
1139 {
1140 struct xfrm_mark *mrk;
1141
1142 rthdr->rta_type = XFRMA_MARK;
1143 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_mark));
1144
1145 hdr->nlmsg_len += rthdr->rta_len;
1146 if (hdr->nlmsg_len > sizeof(request))
1147 {
1148 return FAILED;
1149 }
1150
1151 mrk = (struct xfrm_mark*)RTA_DATA(rthdr);
1152 mrk->v = mark.value;
1153 mrk->m = mark.mask;
1154 rthdr = XFRM_RTA_NEXT(rthdr);
1155 }
1156
1157 if (tfc)
1158 {
1159 u_int32_t *tfcpad;
1160
1161 rthdr->rta_type = XFRMA_TFCPAD;
1162 rthdr->rta_len = RTA_LENGTH(sizeof(u_int32_t));
1163
1164 hdr->nlmsg_len += rthdr->rta_len;
1165 if (hdr->nlmsg_len > sizeof(request))
1166 {
1167 return FAILED;
1168 }
1169
1170 tfcpad = (u_int32_t*)RTA_DATA(rthdr);
1171 *tfcpad = tfc;
1172 rthdr = XFRM_RTA_NEXT(rthdr);
1173 }
1174
1175 if (this->socket_xfrm->send_ack(this->socket_xfrm, hdr) != SUCCESS)
1176 {
1177 if (mark.value)
1178 {
1179 DBG1(DBG_KNL, "unable to add SAD entry with SPI %.8x "
1180 "(mark %u/0x%8x)", ntohl(spi), mark.value, mark.mask);
1181 }
1182 else
1183 {
1184 DBG1(DBG_KNL, "unable to add SAD entry with SPI %.8x", ntohl(spi));
1185 }
1186 return FAILED;
1187 }
1188 return SUCCESS;
1189 }
1190
1191 /**
1192 * Get the replay state (i.e. sequence numbers) of an SA.
1193 */
1194 static status_t get_replay_state(private_kernel_netlink_ipsec_t *this,
1195 u_int32_t spi, u_int8_t protocol, host_t *dst,
1196 struct xfrm_replay_state *replay)
1197 {
1198 netlink_buf_t request;
1199 struct nlmsghdr *hdr, *out = NULL;
1200 struct xfrm_aevent_id *out_aevent = NULL, *aevent_id;
1201 size_t len;
1202 struct rtattr *rta;
1203 size_t rtasize;
1204
1205 memset(&request, 0, sizeof(request));
1206
1207 DBG2(DBG_KNL, "querying replay state from SAD entry with SPI %.8x", ntohl(spi));
1208
1209 hdr = (struct nlmsghdr*)request;
1210 hdr->nlmsg_flags = NLM_F_REQUEST;
1211 hdr->nlmsg_type = XFRM_MSG_GETAE;
1212 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_aevent_id));
1213
1214 aevent_id = (struct xfrm_aevent_id*)NLMSG_DATA(hdr);
1215 aevent_id->flags = XFRM_AE_RVAL;
1216
1217 host2xfrm(dst, &aevent_id->sa_id.daddr);
1218 aevent_id->sa_id.spi = spi;
1219 aevent_id->sa_id.proto = protocol;
1220 aevent_id->sa_id.family = dst->get_family(dst);
1221
1222 if (this->socket_xfrm->send(this->socket_xfrm, hdr, &out, &len) == SUCCESS)
1223 {
1224 hdr = out;
1225 while (NLMSG_OK(hdr, len))
1226 {
1227 switch (hdr->nlmsg_type)
1228 {
1229 case XFRM_MSG_NEWAE:
1230 {
1231 out_aevent = NLMSG_DATA(hdr);
1232 break;
1233 }
1234 case NLMSG_ERROR:
1235 {
1236 struct nlmsgerr *err = NLMSG_DATA(hdr);
1237 DBG1(DBG_KNL, "querying replay state from SAD entry failed: %s (%d)",
1238 strerror(-err->error), -err->error);
1239 break;
1240 }
1241 default:
1242 hdr = NLMSG_NEXT(hdr, len);
1243 continue;
1244 case NLMSG_DONE:
1245 break;
1246 }
1247 break;
1248 }
1249 }
1250
1251 if (out_aevent == NULL)
1252 {
1253 DBG1(DBG_KNL, "unable to query replay state from SAD entry with SPI %.8x",
1254 ntohl(spi));
1255 free(out);
1256 return FAILED;
1257 }
1258
1259 rta = XFRM_RTA(out, struct xfrm_aevent_id);
1260 rtasize = XFRM_PAYLOAD(out, struct xfrm_aevent_id);
1261 while(RTA_OK(rta, rtasize))
1262 {
1263 if (rta->rta_type == XFRMA_REPLAY_VAL &&
1264 RTA_PAYLOAD(rta) == sizeof(struct xfrm_replay_state))
1265 {
1266 memcpy(replay, RTA_DATA(rta), RTA_PAYLOAD(rta));
1267 free(out);
1268 return SUCCESS;
1269 }
1270 rta = RTA_NEXT(rta, rtasize);
1271 }
1272
1273 DBG1(DBG_KNL, "unable to query replay state from SAD entry with SPI %.8x",
1274 ntohl(spi));
1275 free(out);
1276 return FAILED;
1277 }
1278
1279 METHOD(kernel_ipsec_t, query_sa, status_t,
1280 private_kernel_netlink_ipsec_t *this, host_t *src, host_t *dst,
1281 u_int32_t spi, u_int8_t protocol, mark_t mark, u_int64_t *bytes)
1282 {
1283 netlink_buf_t request;
1284 struct nlmsghdr *out = NULL, *hdr;
1285 struct xfrm_usersa_id *sa_id;
1286 struct xfrm_usersa_info *sa = NULL;
1287 size_t len;
1288
1289 memset(&request, 0, sizeof(request));
1290
1291 if (mark.value)
1292 {
1293 DBG2(DBG_KNL, "querying SAD entry with SPI %.8x (mark %u/0x%8x)",
1294 ntohl(spi), mark.value, mark.mask);
1295 }
1296 else
1297 {
1298 DBG2(DBG_KNL, "querying SAD entry with SPI %.8x", ntohl(spi));
1299 }
1300 hdr = (struct nlmsghdr*)request;
1301 hdr->nlmsg_flags = NLM_F_REQUEST;
1302 hdr->nlmsg_type = XFRM_MSG_GETSA;
1303 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_usersa_id));
1304
1305 sa_id = (struct xfrm_usersa_id*)NLMSG_DATA(hdr);
1306 host2xfrm(dst, &sa_id->daddr);
1307 sa_id->spi = spi;
1308 sa_id->proto = protocol;
1309 sa_id->family = dst->get_family(dst);
1310
1311 if (mark.value)
1312 {
1313 struct xfrm_mark *mrk;
1314 struct rtattr *rthdr = XFRM_RTA(hdr, struct xfrm_usersa_id);
1315
1316 rthdr->rta_type = XFRMA_MARK;
1317 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_mark));
1318 hdr->nlmsg_len += rthdr->rta_len;
1319 if (hdr->nlmsg_len > sizeof(request))
1320 {
1321 return FAILED;
1322 }
1323
1324 mrk = (struct xfrm_mark*)RTA_DATA(rthdr);
1325 mrk->v = mark.value;
1326 mrk->m = mark.mask;
1327 }
1328
1329 if (this->socket_xfrm->send(this->socket_xfrm, hdr, &out, &len) == SUCCESS)
1330 {
1331 hdr = out;
1332 while (NLMSG_OK(hdr, len))
1333 {
1334 switch (hdr->nlmsg_type)
1335 {
1336 case XFRM_MSG_NEWSA:
1337 {
1338 sa = (struct xfrm_usersa_info*)NLMSG_DATA(hdr);
1339 break;
1340 }
1341 case NLMSG_ERROR:
1342 {
1343 struct nlmsgerr *err = NLMSG_DATA(hdr);
1344
1345 if (mark.value)
1346 {
1347 DBG1(DBG_KNL, "querying SAD entry with SPI %.8x "
1348 "(mark %u/0x%8x) failed: %s (%d)",
1349 ntohl(spi), mark.value, mark.mask,
1350 strerror(-err->error), -err->error);
1351 }
1352 else
1353 {
1354 DBG1(DBG_KNL, "querying SAD entry with SPI %.8x "
1355 "failed: %s (%d)", ntohl(spi),
1356 strerror(-err->error), -err->error);
1357 }
1358 break;
1359 }
1360 default:
1361 hdr = NLMSG_NEXT(hdr, len);
1362 continue;
1363 case NLMSG_DONE:
1364 break;
1365 }
1366 break;
1367 }
1368 }
1369
1370 if (sa == NULL)
1371 {
1372 DBG2(DBG_KNL, "unable to query SAD entry with SPI %.8x", ntohl(spi));
1373 free(out);
1374 return FAILED;
1375 }
1376 *bytes = sa->curlft.bytes;
1377
1378 free(out);
1379 return SUCCESS;
1380 }
1381
1382 METHOD(kernel_ipsec_t, del_sa, status_t,
1383 private_kernel_netlink_ipsec_t *this, host_t *src, host_t *dst,
1384 u_int32_t spi, u_int8_t protocol, u_int16_t cpi, mark_t mark)
1385 {
1386 netlink_buf_t request;
1387 struct nlmsghdr *hdr;
1388 struct xfrm_usersa_id *sa_id;
1389
1390 /* if IPComp was used, we first delete the additional IPComp SA */
1391 if (cpi)
1392 {
1393 del_sa(this, src, dst, htonl(ntohs(cpi)), IPPROTO_COMP, 0, mark);
1394 }
1395
1396 memset(&request, 0, sizeof(request));
1397
1398 if (mark.value)
1399 {
1400 DBG2(DBG_KNL, "deleting SAD entry with SPI %.8x (mark %u/0x%8x)",
1401 ntohl(spi), mark.value, mark.mask);
1402 }
1403 else
1404 {
1405 DBG2(DBG_KNL, "deleting SAD entry with SPI %.8x", ntohl(spi));
1406 }
1407 hdr = (struct nlmsghdr*)request;
1408 hdr->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
1409 hdr->nlmsg_type = XFRM_MSG_DELSA;
1410 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_usersa_id));
1411
1412 sa_id = (struct xfrm_usersa_id*)NLMSG_DATA(hdr);
1413 host2xfrm(dst, &sa_id->daddr);
1414 sa_id->spi = spi;
1415 sa_id->proto = protocol;
1416 sa_id->family = dst->get_family(dst);
1417
1418 if (mark.value)
1419 {
1420 struct xfrm_mark *mrk;
1421 struct rtattr *rthdr = XFRM_RTA(hdr, struct xfrm_usersa_id);
1422
1423 rthdr->rta_type = XFRMA_MARK;
1424 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_mark));
1425 hdr->nlmsg_len += rthdr->rta_len;
1426 if (hdr->nlmsg_len > sizeof(request))
1427 {
1428 return FAILED;
1429 }
1430
1431 mrk = (struct xfrm_mark*)RTA_DATA(rthdr);
1432 mrk->v = mark.value;
1433 mrk->m = mark.mask;
1434 }
1435
1436 if (this->socket_xfrm->send_ack(this->socket_xfrm, hdr) != SUCCESS)
1437 {
1438 if (mark.value)
1439 {
1440 DBG1(DBG_KNL, "unable to delete SAD entry with SPI %.8x "
1441 "(mark %u/0x%8x)", ntohl(spi), mark.value, mark.mask);
1442 }
1443 else
1444 {
1445 DBG1(DBG_KNL, "unable to delete SAD entry with SPI %.8x", ntohl(spi));
1446 }
1447 return FAILED;
1448 }
1449 if (mark.value)
1450 {
1451 DBG2(DBG_KNL, "deleted SAD entry with SPI %.8x (mark %u/0x%8x)",
1452 ntohl(spi), mark.value, mark.mask);
1453 }
1454 else
1455 {
1456 DBG2(DBG_KNL, "deleted SAD entry with SPI %.8x", ntohl(spi));
1457 }
1458 return SUCCESS;
1459 }
1460
1461 METHOD(kernel_ipsec_t, update_sa, status_t,
1462 private_kernel_netlink_ipsec_t *this, u_int32_t spi, u_int8_t protocol,
1463 u_int16_t cpi, host_t *src, host_t *dst, host_t *new_src, host_t *new_dst,
1464 bool old_encap, bool new_encap, mark_t mark)
1465 {
1466 netlink_buf_t request;
1467 u_char *pos;
1468 struct nlmsghdr *hdr, *out = NULL;
1469 struct xfrm_usersa_id *sa_id;
1470 struct xfrm_usersa_info *out_sa = NULL, *sa;
1471 size_t len;
1472 struct rtattr *rta;
1473 size_t rtasize;
1474 struct xfrm_encap_tmpl* tmpl = NULL;
1475 bool got_replay_state = FALSE;
1476 struct xfrm_replay_state replay;
1477
1478 /* if IPComp is used, we first update the IPComp SA */
1479 if (cpi)
1480 {
1481 update_sa(this, htonl(ntohs(cpi)), IPPROTO_COMP, 0,
1482 src, dst, new_src, new_dst, FALSE, FALSE, mark);
1483 }
1484
1485 memset(&request, 0, sizeof(request));
1486
1487 DBG2(DBG_KNL, "querying SAD entry with SPI %.8x for update", ntohl(spi));
1488
1489 /* query the existing SA first */
1490 hdr = (struct nlmsghdr*)request;
1491 hdr->nlmsg_flags = NLM_F_REQUEST;
1492 hdr->nlmsg_type = XFRM_MSG_GETSA;
1493 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_usersa_id));
1494
1495 sa_id = (struct xfrm_usersa_id*)NLMSG_DATA(hdr);
1496 host2xfrm(dst, &sa_id->daddr);
1497 sa_id->spi = spi;
1498 sa_id->proto = protocol;
1499 sa_id->family = dst->get_family(dst);
1500
1501 if (this->socket_xfrm->send(this->socket_xfrm, hdr, &out, &len) == SUCCESS)
1502 {
1503 hdr = out;
1504 while (NLMSG_OK(hdr, len))
1505 {
1506 switch (hdr->nlmsg_type)
1507 {
1508 case XFRM_MSG_NEWSA:
1509 {
1510 out_sa = NLMSG_DATA(hdr);
1511 break;
1512 }
1513 case NLMSG_ERROR:
1514 {
1515 struct nlmsgerr *err = NLMSG_DATA(hdr);
1516 DBG1(DBG_KNL, "querying SAD entry failed: %s (%d)",
1517 strerror(-err->error), -err->error);
1518 break;
1519 }
1520 default:
1521 hdr = NLMSG_NEXT(hdr, len);
1522 continue;
1523 case NLMSG_DONE:
1524 break;
1525 }
1526 break;
1527 }
1528 }
1529 if (out_sa == NULL)
1530 {
1531 DBG1(DBG_KNL, "unable to update SAD entry with SPI %.8x", ntohl(spi));
1532 free(out);
1533 return FAILED;
1534 }
1535
1536 /* try to get the replay state */
1537 if (get_replay_state(this, spi, protocol, dst, &replay) == SUCCESS)
1538 {
1539 got_replay_state = TRUE;
1540 }
1541
1542 /* delete the old SA (without affecting the IPComp SA) */
1543 if (del_sa(this, src, dst, spi, protocol, 0, mark) != SUCCESS)
1544 {
1545 DBG1(DBG_KNL, "unable to delete old SAD entry with SPI %.8x", ntohl(spi));
1546 free(out);
1547 return FAILED;
1548 }
1549
1550 DBG2(DBG_KNL, "updating SAD entry with SPI %.8x from %#H..%#H to %#H..%#H",
1551 ntohl(spi), src, dst, new_src, new_dst);
1552 /* copy over the SA from out to request */
1553 hdr = (struct nlmsghdr*)request;
1554 memcpy(hdr, out, min(out->nlmsg_len, sizeof(request)));
1555 hdr->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
1556 hdr->nlmsg_type = XFRM_MSG_NEWSA;
1557 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_usersa_info));
1558 sa = NLMSG_DATA(hdr);
1559 sa->family = new_dst->get_family(new_dst);
1560
1561 if (!src->ip_equals(src, new_src))
1562 {
1563 host2xfrm(new_src, &sa->saddr);
1564 }
1565 if (!dst->ip_equals(dst, new_dst))
1566 {
1567 host2xfrm(new_dst, &sa->id.daddr);
1568 }
1569
1570 rta = XFRM_RTA(out, struct xfrm_usersa_info);
1571 rtasize = XFRM_PAYLOAD(out, struct xfrm_usersa_info);
1572 pos = (u_char*)XFRM_RTA(hdr, struct xfrm_usersa_info);
1573 while(RTA_OK(rta, rtasize))
1574 {
1575 /* copy all attributes, but not XFRMA_ENCAP if we are disabling it */
1576 if (rta->rta_type != XFRMA_ENCAP || new_encap)
1577 {
1578 if (rta->rta_type == XFRMA_ENCAP)
1579 { /* update encap tmpl */
1580 tmpl = (struct xfrm_encap_tmpl*)RTA_DATA(rta);
1581 tmpl->encap_sport = ntohs(new_src->get_port(new_src));
1582 tmpl->encap_dport = ntohs(new_dst->get_port(new_dst));
1583 }
1584 memcpy(pos, rta, rta->rta_len);
1585 pos += RTA_ALIGN(rta->rta_len);
1586 hdr->nlmsg_len += RTA_ALIGN(rta->rta_len);
1587 }
1588 rta = RTA_NEXT(rta, rtasize);
1589 }
1590
1591 rta = (struct rtattr*)pos;
1592 if (tmpl == NULL && new_encap)
1593 { /* add tmpl if we are enabling it */
1594 rta->rta_type = XFRMA_ENCAP;
1595 rta->rta_len = RTA_LENGTH(sizeof(struct xfrm_encap_tmpl));
1596
1597 hdr->nlmsg_len += rta->rta_len;
1598 if (hdr->nlmsg_len > sizeof(request))
1599 {
1600 return FAILED;
1601 }
1602
1603 tmpl = (struct xfrm_encap_tmpl*)RTA_DATA(rta);
1604 tmpl->encap_type = UDP_ENCAP_ESPINUDP;
1605 tmpl->encap_sport = ntohs(new_src->get_port(new_src));
1606 tmpl->encap_dport = ntohs(new_dst->get_port(new_dst));
1607 memset(&tmpl->encap_oa, 0, sizeof (xfrm_address_t));
1608
1609 rta = XFRM_RTA_NEXT(rta);
1610 }
1611
1612 if (got_replay_state)
1613 { /* copy the replay data if available */
1614 rta->rta_type = XFRMA_REPLAY_VAL;
1615 rta->rta_len = RTA_LENGTH(sizeof(struct xfrm_replay_state));
1616
1617 hdr->nlmsg_len += rta->rta_len;
1618 if (hdr->nlmsg_len > sizeof(request))
1619 {
1620 return FAILED;
1621 }
1622 memcpy(RTA_DATA(rta), &replay, sizeof(replay));
1623
1624 rta = XFRM_RTA_NEXT(rta);
1625 }
1626
1627 if (this->socket_xfrm->send_ack(this->socket_xfrm, hdr) != SUCCESS)
1628 {
1629 DBG1(DBG_KNL, "unable to update SAD entry with SPI %.8x", ntohl(spi));
1630 free(out);
1631 return FAILED;
1632 }
1633 free(out);
1634
1635 return SUCCESS;
1636 }
1637
1638 METHOD(kernel_ipsec_t, add_policy, status_t,
1639 private_kernel_netlink_ipsec_t *this, host_t *src, host_t *dst,
1640 traffic_selector_t *src_ts, traffic_selector_t *dst_ts,
1641 policy_dir_t direction, policy_type_t type, ipsec_sa_cfg_t *sa,
1642 mark_t mark, bool routed)
1643 {
1644 policy_entry_t *current, *policy;
1645 bool found = FALSE;
1646 netlink_buf_t request;
1647 struct xfrm_userpolicy_info *policy_info;
1648 struct nlmsghdr *hdr;
1649 int i;
1650
1651 /* create a policy */
1652 policy = malloc_thing(policy_entry_t);
1653 memset(policy, 0, sizeof(policy_entry_t));
1654 policy->sel = ts2selector(src_ts, dst_ts);
1655 policy->mark = mark.value & mark.mask;
1656 policy->direction = direction;
1657
1658 /* find the policy, which matches EXACTLY */
1659 this->mutex->lock(this->mutex);
1660 current = this->policies->get(this->policies, policy);
1661 if (current)
1662 {
1663 /* use existing policy */
1664 current->refcount++;
1665 if (mark.value)
1666 {
1667 DBG2(DBG_KNL, "policy %R === %R %N (mark %u/0x%8x) "
1668 "already exists, increasing refcount",
1669 src_ts, dst_ts, policy_dir_names, direction,
1670 mark.value, mark.mask);
1671 }
1672 else
1673 {
1674 DBG2(DBG_KNL, "policy %R === %R %N "
1675 "already exists, increasing refcount",
1676 src_ts, dst_ts, policy_dir_names, direction);
1677 }
1678 free(policy);
1679 policy = current;
1680 found = TRUE;
1681 }
1682 else
1683 { /* apply the new one, if we have no such policy */
1684 this->policies->put(this->policies, policy, policy);
1685 policy->refcount = 1;
1686 }
1687
1688 if (mark.value)
1689 {
1690 DBG2(DBG_KNL, "adding policy %R === %R %N (mark %u/0x%8x)",
1691 src_ts, dst_ts, policy_dir_names, direction,
1692 mark.value, mark.mask);
1693 }
1694 else
1695 {
1696 DBG2(DBG_KNL, "adding policy %R === %R %N",
1697 src_ts, dst_ts, policy_dir_names, direction);
1698 }
1699
1700 memset(&request, 0, sizeof(request));
1701 hdr = (struct nlmsghdr*)request;
1702 hdr->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
1703 hdr->nlmsg_type = found ? XFRM_MSG_UPDPOLICY : XFRM_MSG_NEWPOLICY;
1704 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_userpolicy_info));
1705
1706 policy_info = (struct xfrm_userpolicy_info*)NLMSG_DATA(hdr);
1707 policy_info->sel = policy->sel;
1708 policy_info->dir = policy->direction;
1709
1710 /* calculate priority based on selector size, small size = high prio */
1711 policy_info->priority = routed ? PRIO_LOW : PRIO_HIGH;
1712 policy_info->priority -= policy->sel.prefixlen_s;
1713 policy_info->priority -= policy->sel.prefixlen_d;
1714 policy_info->priority <<= 2; /* make some room for the two flags */
1715 policy_info->priority += policy->sel.sport_mask ||
1716 policy->sel.dport_mask ? 0 : 2;
1717 policy_info->priority += policy->sel.proto ? 0 : 1;
1718
1719 policy_info->action = type != POLICY_DROP ? XFRM_POLICY_ALLOW
1720 : XFRM_POLICY_BLOCK;
1721 policy_info->share = XFRM_SHARE_ANY;
1722 this->mutex->unlock(this->mutex);
1723
1724 /* policies don't expire */
1725 policy_info->lft.soft_byte_limit = XFRM_INF;
1726 policy_info->lft.soft_packet_limit = XFRM_INF;
1727 policy_info->lft.hard_byte_limit = XFRM_INF;
1728 policy_info->lft.hard_packet_limit = XFRM_INF;
1729 policy_info->lft.soft_add_expires_seconds = 0;
1730 policy_info->lft.hard_add_expires_seconds = 0;
1731 policy_info->lft.soft_use_expires_seconds = 0;
1732 policy_info->lft.hard_use_expires_seconds = 0;
1733
1734 struct rtattr *rthdr = XFRM_RTA(hdr, struct xfrm_userpolicy_info);
1735
1736 if (type == POLICY_IPSEC)
1737 {
1738 struct xfrm_user_tmpl *tmpl = (struct xfrm_user_tmpl*)RTA_DATA(rthdr);
1739 struct {
1740 u_int8_t proto;
1741 bool use;
1742 } protos[] = {
1743 { IPPROTO_COMP, sa->ipcomp.transform != IPCOMP_NONE },
1744 { IPPROTO_ESP, sa->esp.use },
1745 { IPPROTO_AH, sa->ah.use },
1746 };
1747 ipsec_mode_t proto_mode = sa->mode;
1748
1749 rthdr->rta_type = XFRMA_TMPL;
1750 rthdr->rta_len = 0; /* actual length is set below */
1751
1752 for (i = 0; i < countof(protos); i++)
1753 {
1754 if (!protos[i].use)
1755 {
1756 continue;
1757 }
1758
1759 rthdr->rta_len += RTA_LENGTH(sizeof(struct xfrm_user_tmpl));
1760 hdr->nlmsg_len += RTA_LENGTH(sizeof(struct xfrm_user_tmpl));
1761 if (hdr->nlmsg_len > sizeof(request))
1762 {
1763 return FAILED;
1764 }
1765
1766 tmpl->reqid = sa->reqid;
1767 tmpl->id.proto = protos[i].proto;
1768 tmpl->aalgos = tmpl->ealgos = tmpl->calgos = ~0;
1769 tmpl->mode = mode2kernel(proto_mode);
1770 tmpl->optional = protos[i].proto == IPPROTO_COMP &&
1771 direction != POLICY_OUT;
1772 tmpl->family = src->get_family(src);
1773
1774 if (proto_mode == MODE_TUNNEL)
1775 { /* only for tunnel mode */
1776 host2xfrm(src, &tmpl->saddr);
1777 host2xfrm(dst, &tmpl->id.daddr);
1778 }
1779
1780 tmpl++;
1781
1782 /* use transport mode for other SAs */
1783 proto_mode = MODE_TRANSPORT;
1784 }
1785
1786 rthdr = XFRM_RTA_NEXT(rthdr);
1787 }
1788
1789 if (mark.value)
1790 {
1791 struct xfrm_mark *mrk;
1792
1793 rthdr->rta_type = XFRMA_MARK;
1794 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_mark));
1795
1796 hdr->nlmsg_len += rthdr->rta_len;
1797 if (hdr->nlmsg_len > sizeof(request))
1798 {
1799 return FAILED;
1800 }
1801
1802 mrk = (struct xfrm_mark*)RTA_DATA(rthdr);
1803 mrk->v = mark.value;
1804 mrk->m = mark.mask;
1805 }
1806
1807 if (this->socket_xfrm->send_ack(this->socket_xfrm, hdr) != SUCCESS)
1808 {
1809 DBG1(DBG_KNL, "unable to add policy %R === %R %N", src_ts, dst_ts,
1810 policy_dir_names, direction);
1811 return FAILED;
1812 }
1813
1814 /* install a route, if:
1815 * - we are NOT updating a policy
1816 * - this is a forward policy (to just get one for each child)
1817 * - we are in tunnel/BEET mode
1818 * - routing is not disabled via strongswan.conf
1819 */
1820 if (policy->route == NULL && direction == POLICY_FWD &&
1821 sa->mode != MODE_TRANSPORT && this->install_routes)
1822 {
1823 route_entry_t *route = malloc_thing(route_entry_t);
1824
1825 if (hydra->kernel_interface->get_address_by_ts(hydra->kernel_interface,
1826 dst_ts, &route->src_ip) == SUCCESS)
1827 {
1828 /* get the nexthop to src (src as we are in POLICY_FWD).*/
1829 route->gateway = hydra->kernel_interface->get_nexthop(
1830 hydra->kernel_interface, src);
1831 /* install route via outgoing interface */
1832 route->if_name = hydra->kernel_interface->get_interface(
1833 hydra->kernel_interface, dst);
1834 route->dst_net = chunk_alloc(policy->sel.family == AF_INET ? 4 : 16);
1835 memcpy(route->dst_net.ptr, &policy->sel.saddr, route->dst_net.len);
1836 route->prefixlen = policy->sel.prefixlen_s;
1837
1838 if (route->if_name)
1839 {
1840 DBG2(DBG_KNL, "installing route: %R via %H src %H dev %s",
1841 src_ts, route->gateway, route->src_ip, route->if_name);
1842 switch (hydra->kernel_interface->add_route(
1843 hydra->kernel_interface, route->dst_net,
1844 route->prefixlen, route->gateway,
1845 route->src_ip, route->if_name))
1846 {
1847 default:
1848 DBG1(DBG_KNL, "unable to install source route for %H",
1849 route->src_ip);
1850 /* FALL */
1851 case ALREADY_DONE:
1852 /* route exists, do not uninstall */
1853 route_entry_destroy(route);
1854 break;
1855 case SUCCESS:
1856 /* cache the installed route */
1857 policy->route = route;
1858 break;
1859 }
1860 }
1861 else
1862 {
1863 route_entry_destroy(route);
1864 }
1865 }
1866 else
1867 {
1868 free(route);
1869 }
1870 }
1871 return SUCCESS;
1872 }
1873
1874 METHOD(kernel_ipsec_t, query_policy, status_t,
1875 private_kernel_netlink_ipsec_t *this, traffic_selector_t *src_ts,
1876 traffic_selector_t *dst_ts, policy_dir_t direction, mark_t mark,
1877 u_int32_t *use_time)
1878 {
1879 netlink_buf_t request;
1880 struct nlmsghdr *out = NULL, *hdr;
1881 struct xfrm_userpolicy_id *policy_id;
1882 struct xfrm_userpolicy_info *policy = NULL;
1883 size_t len;
1884
1885 memset(&request, 0, sizeof(request));
1886
1887 if (mark.value)
1888 {
1889 DBG2(DBG_KNL, "querying policy %R === %R %N (mark %u/0x%8x)",
1890 src_ts, dst_ts, policy_dir_names, direction,
1891 mark.value, mark.mask);
1892 }
1893 else
1894 {
1895 DBG2(DBG_KNL, "querying policy %R === %R %N", src_ts, dst_ts,
1896 policy_dir_names, direction);
1897 }
1898 hdr = (struct nlmsghdr*)request;
1899 hdr->nlmsg_flags = NLM_F_REQUEST;
1900 hdr->nlmsg_type = XFRM_MSG_GETPOLICY;
1901 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_userpolicy_id));
1902
1903 policy_id = (struct xfrm_userpolicy_id*)NLMSG_DATA(hdr);
1904 policy_id->sel = ts2selector(src_ts, dst_ts);
1905 policy_id->dir = direction;
1906
1907 if (mark.value)
1908 {
1909 struct xfrm_mark *mrk;
1910 struct rtattr *rthdr = XFRM_RTA(hdr, struct xfrm_userpolicy_id);
1911
1912 rthdr->rta_type = XFRMA_MARK;
1913 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_mark));
1914
1915 hdr->nlmsg_len += rthdr->rta_len;
1916 if (hdr->nlmsg_len > sizeof(request))
1917 {
1918 return FAILED;
1919 }
1920
1921 mrk = (struct xfrm_mark*)RTA_DATA(rthdr);
1922 mrk->v = mark.value;
1923 mrk->m = mark.mask;
1924 }
1925
1926 if (this->socket_xfrm->send(this->socket_xfrm, hdr, &out, &len) == SUCCESS)
1927 {
1928 hdr = out;
1929 while (NLMSG_OK(hdr, len))
1930 {
1931 switch (hdr->nlmsg_type)
1932 {
1933 case XFRM_MSG_NEWPOLICY:
1934 {
1935 policy = (struct xfrm_userpolicy_info*)NLMSG_DATA(hdr);
1936 break;
1937 }
1938 case NLMSG_ERROR:
1939 {
1940 struct nlmsgerr *err = NLMSG_DATA(hdr);
1941 DBG1(DBG_KNL, "querying policy failed: %s (%d)",
1942 strerror(-err->error), -err->error);
1943 break;
1944 }
1945 default:
1946 hdr = NLMSG_NEXT(hdr, len);
1947 continue;
1948 case NLMSG_DONE:
1949 break;
1950 }
1951 break;
1952 }
1953 }
1954
1955 if (policy == NULL)
1956 {
1957 DBG2(DBG_KNL, "unable to query policy %R === %R %N", src_ts, dst_ts,
1958 policy_dir_names, direction);
1959 free(out);
1960 return FAILED;
1961 }
1962
1963 if (policy->curlft.use_time)
1964 {
1965 /* we need the monotonic time, but the kernel returns system time. */
1966 *use_time = time_monotonic(NULL) - (time(NULL) - policy->curlft.use_time);
1967 }
1968 else
1969 {
1970 *use_time = 0;
1971 }
1972
1973 free(out);
1974 return SUCCESS;
1975 }
1976
1977 METHOD(kernel_ipsec_t, del_policy, status_t,
1978 private_kernel_netlink_ipsec_t *this, traffic_selector_t *src_ts,
1979 traffic_selector_t *dst_ts, policy_dir_t direction, mark_t mark,
1980 bool unrouted)
1981 {
1982 policy_entry_t *current, policy, *to_delete = NULL;
1983 route_entry_t *route;
1984 netlink_buf_t request;
1985 struct nlmsghdr *hdr;
1986 struct xfrm_userpolicy_id *policy_id;
1987
1988 if (mark.value)
1989 {
1990 DBG2(DBG_KNL, "deleting policy %R === %R %N (mark %u/0x%8x)",
1991 src_ts, dst_ts, policy_dir_names, direction,
1992 mark.value, mark.mask);
1993 }
1994 else
1995 {
1996 DBG2(DBG_KNL, "deleting policy %R === %R %N",
1997 src_ts, dst_ts, policy_dir_names, direction);
1998 }
1999
2000 /* create a policy */
2001 memset(&policy, 0, sizeof(policy_entry_t));
2002 policy.sel = ts2selector(src_ts, dst_ts);
2003 policy.mark = mark.value & mark.mask;
2004 policy.direction = direction;
2005
2006 /* find the policy */
2007 this->mutex->lock(this->mutex);
2008 current = this->policies->get(this->policies, &policy);
2009 if (current)
2010 {
2011 to_delete = current;
2012 if (--to_delete->refcount > 0)
2013 {
2014 /* is used by more SAs, keep in kernel */
2015 DBG2(DBG_KNL, "policy still used by another CHILD_SA, not removed");
2016 this->mutex->unlock(this->mutex);
2017 return SUCCESS;
2018 }
2019 /* remove if last reference */
2020 this->policies->remove(this->policies, to_delete);
2021 }
2022 this->mutex->unlock(this->mutex);
2023 if (!to_delete)
2024 {
2025 if (mark.value)
2026 {
2027 DBG1(DBG_KNL, "deleting policy %R === %R %N (mark %u/0x%8x) "
2028 "failed, not found", src_ts, dst_ts, policy_dir_names,
2029 direction, mark.value, mark.mask);
2030 }
2031 else
2032 {
2033 DBG1(DBG_KNL, "deleting policy %R === %R %N failed, not found",
2034 src_ts, dst_ts, policy_dir_names, direction);
2035 }
2036 return NOT_FOUND;
2037 }
2038
2039 memset(&request, 0, sizeof(request));
2040
2041 hdr = (struct nlmsghdr*)request;
2042 hdr->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
2043 hdr->nlmsg_type = XFRM_MSG_DELPOLICY;
2044 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_userpolicy_id));
2045
2046 policy_id = (struct xfrm_userpolicy_id*)NLMSG_DATA(hdr);
2047 policy_id->sel = to_delete->sel;
2048 policy_id->dir = direction;
2049
2050 if (mark.value)
2051 {
2052 struct xfrm_mark *mrk;
2053 struct rtattr *rthdr = XFRM_RTA(hdr, struct xfrm_userpolicy_id);
2054
2055 rthdr->rta_type = XFRMA_MARK;
2056 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_mark));
2057 hdr->nlmsg_len += rthdr->rta_len;
2058 if (hdr->nlmsg_len > sizeof(request))
2059 {
2060 return FAILED;
2061 }
2062
2063 mrk = (struct xfrm_mark*)RTA_DATA(rthdr);
2064 mrk->v = mark.value;
2065 mrk->m = mark.mask;
2066 }
2067
2068 route = to_delete->route;
2069 free(to_delete);
2070
2071 if (this->socket_xfrm->send_ack(this->socket_xfrm, hdr) != SUCCESS)
2072 {
2073 if (mark.value)
2074 {
2075 DBG1(DBG_KNL, "unable to delete policy %R === %R %N "
2076 "(mark %u/0x%8x)", src_ts, dst_ts, policy_dir_names,
2077 direction, mark.value, mark.mask);
2078 }
2079 else
2080 {
2081 DBG1(DBG_KNL, "unable to delete policy %R === %R %N",
2082 src_ts, dst_ts, policy_dir_names, direction);
2083 }
2084 return FAILED;
2085 }
2086
2087 if (route)
2088 {
2089 if (hydra->kernel_interface->del_route(hydra->kernel_interface,
2090 route->dst_net, route->prefixlen, route->gateway,
2091 route->src_ip, route->if_name) != SUCCESS)
2092 {
2093 DBG1(DBG_KNL, "error uninstalling route installed with "
2094 "policy %R === %R %N", src_ts, dst_ts,
2095 policy_dir_names, direction);
2096 }
2097 route_entry_destroy(route);
2098 }
2099 return SUCCESS;
2100 }
2101
2102 METHOD(kernel_ipsec_t, bypass_socket, bool,
2103 private_kernel_netlink_ipsec_t *this, int fd, int family)
2104 {
2105 struct xfrm_userpolicy_info policy;
2106 u_int sol, ipsec_policy;
2107
2108 switch (family)
2109 {
2110 case AF_INET:
2111 sol = SOL_IP;
2112 ipsec_policy = IP_XFRM_POLICY;
2113 break;
2114 case AF_INET6:
2115 sol = SOL_IPV6;
2116 ipsec_policy = IPV6_XFRM_POLICY;
2117 break;
2118 default:
2119 return FALSE;
2120 }
2121
2122 memset(&policy, 0, sizeof(policy));
2123 policy.action = XFRM_POLICY_ALLOW;
2124 policy.sel.family = family;
2125
2126 policy.dir = XFRM_POLICY_OUT;
2127 if (setsockopt(fd, sol, ipsec_policy, &policy, sizeof(policy)) < 0)
2128 {
2129 DBG1(DBG_KNL, "unable to set IPSEC_POLICY on socket: %s",
2130 strerror(errno));
2131 return FALSE;
2132 }
2133 policy.dir = XFRM_POLICY_IN;
2134 if (setsockopt(fd, sol, ipsec_policy, &policy, sizeof(policy)) < 0)
2135 {
2136 DBG1(DBG_KNL, "unable to set IPSEC_POLICY on socket: %s",
2137 strerror(errno));
2138 return FALSE;
2139 }
2140 return TRUE;
2141 }
2142
2143 METHOD(kernel_ipsec_t, destroy, void,
2144 private_kernel_netlink_ipsec_t *this)
2145 {
2146 enumerator_t *enumerator;
2147 policy_entry_t *policy;
2148
2149 if (this->job)
2150 {
2151 this->job->cancel(this->job);
2152 }
2153 if (this->socket_xfrm_events > 0)
2154 {
2155 close(this->socket_xfrm_events);
2156 }
2157 DESTROY_IF(this->socket_xfrm);
2158 enumerator = this->policies->create_enumerator(this->policies);
2159 while (enumerator->enumerate(enumerator, &policy, &policy))
2160 {
2161 free(policy);
2162 }
2163 enumerator->destroy(enumerator);
2164 this->policies->destroy(this->policies);
2165 this->mutex->destroy(this->mutex);
2166 free(this);
2167 }
2168
2169 /*
2170 * Described in header.
2171 */
2172 kernel_netlink_ipsec_t *kernel_netlink_ipsec_create()
2173 {
2174 private_kernel_netlink_ipsec_t *this;
2175 struct sockaddr_nl addr;
2176 int fd;
2177
2178 INIT(this,
2179 .public = {
2180 .interface = {
2181 .get_spi = _get_spi,
2182 .get_cpi = _get_cpi,
2183 .add_sa = _add_sa,
2184 .update_sa = _update_sa,
2185 .query_sa = _query_sa,
2186 .del_sa = _del_sa,
2187 .add_policy = _add_policy,
2188 .query_policy = _query_policy,
2189 .del_policy = _del_policy,
2190 .bypass_socket = _bypass_socket,
2191 .destroy = _destroy,
2192 },
2193 },
2194 .policies = hashtable_create((hashtable_hash_t)policy_hash,
2195 (hashtable_equals_t)policy_equals, 32),
2196 .mutex = mutex_create(MUTEX_TYPE_DEFAULT),
2197 .install_routes = lib->settings->get_bool(lib->settings,
2198 "%s.install_routes", TRUE,
2199 hydra->daemon),
2200 );
2201
2202 if (streq(hydra->daemon, "pluto"))
2203 { /* no routes for pluto, they are installed via updown script */
2204 this->install_routes = FALSE;
2205 }
2206
2207 /* disable lifetimes for allocated SPIs in kernel */
2208 fd = open("/proc/sys/net/core/xfrm_acq_expires", O_WRONLY);
2209 if (fd)
2210 {
2211 ignore_result(write(fd, "165", 3));
2212 close(fd);
2213 }
2214
2215 this->socket_xfrm = netlink_socket_create(NETLINK_XFRM);
2216 if (!this->socket_xfrm)
2217 {
2218 destroy(this);
2219 return NULL;
2220 }
2221
2222 memset(&addr, 0, sizeof(addr));
2223 addr.nl_family = AF_NETLINK;
2224
2225 /* create and bind XFRM socket for ACQUIRE, EXPIRE, MIGRATE & MAPPING */
2226 this->socket_xfrm_events = socket(AF_NETLINK, SOCK_RAW, NETLINK_XFRM);
2227 if (this->socket_xfrm_events <= 0)
2228 {
2229 DBG1(DBG_KNL, "unable to create XFRM event socket");
2230 destroy(this);
2231 return NULL;
2232 }
2233 addr.nl_groups = XFRMNLGRP(ACQUIRE) | XFRMNLGRP(EXPIRE) |
2234 XFRMNLGRP(MIGRATE) | XFRMNLGRP(MAPPING);
2235 if (bind(this->socket_xfrm_events, (struct sockaddr*)&addr, sizeof(addr)))
2236 {
2237 DBG1(DBG_KNL, "unable to bind XFRM event socket");
2238 destroy(this);
2239 return NULL;
2240 }
2241 this->job = callback_job_create((callback_job_cb_t)receive_events,
2242 this, NULL, NULL);
2243 lib->processor->queue_job(lib->processor, (job_t*)this->job);
2244
2245 return &this->public;
2246 }
2247