|
| 1 | +#include <linux/kernel.h> |
| 2 | +#include <linux/init.h> |
| 3 | +#include <linux/module.h> |
| 4 | +#include <linux/netfilter.h> |
| 5 | +#include <linux/rhashtable.h> |
| 6 | +#include <linux/ip.h> |
| 7 | +#include <linux/netdevice.h> |
| 8 | +#include <net/ip.h> |
| 9 | +#include <net/neighbour.h> |
| 10 | +#include <net/netfilter/nf_flow_table.h> |
| 11 | +#include <net/netfilter/nf_tables.h> |
| 12 | +/* For layer 4 checksum field offset. */ |
| 13 | +#include <linux/tcp.h> |
| 14 | +#include <linux/udp.h> |
| 15 | + |
| 16 | +static int nf_flow_nat_ip_tcp(struct sk_buff *skb, unsigned int thoff, |
| 17 | + __be32 addr, __be32 new_addr) |
| 18 | +{ |
| 19 | + struct tcphdr *tcph; |
| 20 | + |
| 21 | + if (!pskb_may_pull(skb, thoff + sizeof(*tcph)) || |
| 22 | + skb_try_make_writable(skb, thoff + sizeof(*tcph))) |
| 23 | + return -1; |
| 24 | + |
| 25 | + tcph = (void *)(skb_network_header(skb) + thoff); |
| 26 | + inet_proto_csum_replace4(&tcph->check, skb, addr, new_addr, true); |
| 27 | + |
| 28 | + return 0; |
| 29 | +} |
| 30 | + |
| 31 | +static int nf_flow_nat_ip_udp(struct sk_buff *skb, unsigned int thoff, |
| 32 | + __be32 addr, __be32 new_addr) |
| 33 | +{ |
| 34 | + struct udphdr *udph; |
| 35 | + |
| 36 | + if (!pskb_may_pull(skb, thoff + sizeof(*udph)) || |
| 37 | + skb_try_make_writable(skb, thoff + sizeof(*udph))) |
| 38 | + return -1; |
| 39 | + |
| 40 | + udph = (void *)(skb_network_header(skb) + thoff); |
| 41 | + if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) { |
| 42 | + inet_proto_csum_replace4(&udph->check, skb, addr, |
| 43 | + new_addr, true); |
| 44 | + if (!udph->check) |
| 45 | + udph->check = CSUM_MANGLED_0; |
| 46 | + } |
| 47 | + |
| 48 | + return 0; |
| 49 | +} |
| 50 | + |
| 51 | +static int nf_flow_nat_ip_l4proto(struct sk_buff *skb, struct iphdr *iph, |
| 52 | + unsigned int thoff, __be32 addr, |
| 53 | + __be32 new_addr) |
| 54 | +{ |
| 55 | + switch (iph->protocol) { |
| 56 | + case IPPROTO_TCP: |
| 57 | + if (nf_flow_nat_ip_tcp(skb, thoff, addr, new_addr) < 0) |
| 58 | + return NF_DROP; |
| 59 | + break; |
| 60 | + case IPPROTO_UDP: |
| 61 | + if (nf_flow_nat_ip_udp(skb, thoff, addr, new_addr) < 0) |
| 62 | + return NF_DROP; |
| 63 | + break; |
| 64 | + } |
| 65 | + |
| 66 | + return 0; |
| 67 | +} |
| 68 | + |
| 69 | +static int nf_flow_snat_ip(const struct flow_offload *flow, struct sk_buff *skb, |
| 70 | + struct iphdr *iph, unsigned int thoff, |
| 71 | + enum flow_offload_tuple_dir dir) |
| 72 | +{ |
| 73 | + __be32 addr, new_addr; |
| 74 | + |
| 75 | + switch (dir) { |
| 76 | + case FLOW_OFFLOAD_DIR_ORIGINAL: |
| 77 | + addr = iph->saddr; |
| 78 | + new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v4.s_addr; |
| 79 | + iph->saddr = new_addr; |
| 80 | + break; |
| 81 | + case FLOW_OFFLOAD_DIR_REPLY: |
| 82 | + addr = iph->daddr; |
| 83 | + new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v4.s_addr; |
| 84 | + iph->daddr = new_addr; |
| 85 | + break; |
| 86 | + default: |
| 87 | + return -1; |
| 88 | + } |
| 89 | + csum_replace4(&iph->check, addr, new_addr); |
| 90 | + |
| 91 | + return nf_flow_nat_ip_l4proto(skb, iph, thoff, addr, new_addr); |
| 92 | +} |
| 93 | + |
| 94 | +static int nf_flow_dnat_ip(const struct flow_offload *flow, struct sk_buff *skb, |
| 95 | + struct iphdr *iph, unsigned int thoff, |
| 96 | + enum flow_offload_tuple_dir dir) |
| 97 | +{ |
| 98 | + __be32 addr, new_addr; |
| 99 | + |
| 100 | + switch (dir) { |
| 101 | + case FLOW_OFFLOAD_DIR_ORIGINAL: |
| 102 | + addr = iph->daddr; |
| 103 | + new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v4.s_addr; |
| 104 | + iph->daddr = new_addr; |
| 105 | + break; |
| 106 | + case FLOW_OFFLOAD_DIR_REPLY: |
| 107 | + addr = iph->saddr; |
| 108 | + new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v4.s_addr; |
| 109 | + iph->saddr = new_addr; |
| 110 | + break; |
| 111 | + default: |
| 112 | + return -1; |
| 113 | + } |
| 114 | + |
| 115 | + return nf_flow_nat_ip_l4proto(skb, iph, thoff, addr, new_addr); |
| 116 | +} |
| 117 | + |
| 118 | +static int nf_flow_nat_ip(const struct flow_offload *flow, struct sk_buff *skb, |
| 119 | + enum flow_offload_tuple_dir dir) |
| 120 | +{ |
| 121 | + struct iphdr *iph = ip_hdr(skb); |
| 122 | + unsigned int thoff = iph->ihl * 4; |
| 123 | + |
| 124 | + if (flow->flags & FLOW_OFFLOAD_SNAT && |
| 125 | + (nf_flow_snat_port(flow, skb, thoff, iph->protocol, dir) < 0 || |
| 126 | + nf_flow_snat_ip(flow, skb, iph, thoff, dir) < 0)) |
| 127 | + return -1; |
| 128 | + if (flow->flags & FLOW_OFFLOAD_DNAT && |
| 129 | + (nf_flow_dnat_port(flow, skb, thoff, iph->protocol, dir) < 0 || |
| 130 | + nf_flow_dnat_ip(flow, skb, iph, thoff, dir) < 0)) |
| 131 | + return -1; |
| 132 | + |
| 133 | + return 0; |
| 134 | +} |
| 135 | + |
| 136 | +static bool ip_has_options(unsigned int thoff) |
| 137 | +{ |
| 138 | + return thoff != sizeof(struct iphdr); |
| 139 | +} |
| 140 | + |
| 141 | +static int nf_flow_tuple_ip(struct sk_buff *skb, const struct net_device *dev, |
| 142 | + struct flow_offload_tuple *tuple) |
| 143 | +{ |
| 144 | + struct flow_ports *ports; |
| 145 | + unsigned int thoff; |
| 146 | + struct iphdr *iph; |
| 147 | + |
| 148 | + if (!pskb_may_pull(skb, sizeof(*iph))) |
| 149 | + return -1; |
| 150 | + |
| 151 | + iph = ip_hdr(skb); |
| 152 | + thoff = iph->ihl * 4; |
| 153 | + |
| 154 | + if (ip_is_fragment(iph) || |
| 155 | + unlikely(ip_has_options(thoff))) |
| 156 | + return -1; |
| 157 | + |
| 158 | + if (iph->protocol != IPPROTO_TCP && |
| 159 | + iph->protocol != IPPROTO_UDP) |
| 160 | + return -1; |
| 161 | + |
| 162 | + thoff = iph->ihl * 4; |
| 163 | + if (!pskb_may_pull(skb, thoff + sizeof(*ports))) |
| 164 | + return -1; |
| 165 | + |
| 166 | + ports = (struct flow_ports *)(skb_network_header(skb) + thoff); |
| 167 | + |
| 168 | + tuple->src_v4.s_addr = iph->saddr; |
| 169 | + tuple->dst_v4.s_addr = iph->daddr; |
| 170 | + tuple->src_port = ports->source; |
| 171 | + tuple->dst_port = ports->dest; |
| 172 | + tuple->l3proto = AF_INET; |
| 173 | + tuple->l4proto = iph->protocol; |
| 174 | + tuple->iifidx = dev->ifindex; |
| 175 | + |
| 176 | + return 0; |
| 177 | +} |
| 178 | + |
| 179 | +/* Based on ip_exceeds_mtu(). */ |
| 180 | +static bool __nf_flow_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu) |
| 181 | +{ |
| 182 | + if (skb->len <= mtu) |
| 183 | + return false; |
| 184 | + |
| 185 | + if ((ip_hdr(skb)->frag_off & htons(IP_DF)) == 0) |
| 186 | + return false; |
| 187 | + |
| 188 | + if (skb_is_gso(skb) && skb_gso_validate_mtu(skb, mtu)) |
| 189 | + return false; |
| 190 | + |
| 191 | + return true; |
| 192 | +} |
| 193 | + |
| 194 | +static bool nf_flow_exceeds_mtu(struct sk_buff *skb, const struct rtable *rt) |
| 195 | +{ |
| 196 | + u32 mtu; |
| 197 | + |
| 198 | + mtu = ip_dst_mtu_maybe_forward(&rt->dst, true); |
| 199 | + if (__nf_flow_exceeds_mtu(skb, mtu)) |
| 200 | + return true; |
| 201 | + |
| 202 | + return false; |
| 203 | +} |
| 204 | + |
| 205 | +static unsigned int |
| 206 | +nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb, |
| 207 | + const struct nf_hook_state *state) |
| 208 | +{ |
| 209 | + struct flow_offload_tuple_rhash *tuplehash; |
| 210 | + struct nf_flowtable *flow_table = priv; |
| 211 | + struct flow_offload_tuple tuple = {}; |
| 212 | + enum flow_offload_tuple_dir dir; |
| 213 | + struct flow_offload *flow; |
| 214 | + struct net_device *outdev; |
| 215 | + const struct rtable *rt; |
| 216 | + struct iphdr *iph; |
| 217 | + __be32 nexthop; |
| 218 | + |
| 219 | + if (skb->protocol != htons(ETH_P_IP)) |
| 220 | + return NF_ACCEPT; |
| 221 | + |
| 222 | + if (nf_flow_tuple_ip(skb, state->in, &tuple) < 0) |
| 223 | + return NF_ACCEPT; |
| 224 | + |
| 225 | + tuplehash = flow_offload_lookup(flow_table, &tuple); |
| 226 | + if (tuplehash == NULL) |
| 227 | + return NF_ACCEPT; |
| 228 | + |
| 229 | + outdev = dev_get_by_index_rcu(state->net, tuplehash->tuple.oifidx); |
| 230 | + if (!outdev) |
| 231 | + return NF_ACCEPT; |
| 232 | + |
| 233 | + dir = tuplehash->tuple.dir; |
| 234 | + flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]); |
| 235 | + |
| 236 | + rt = (const struct rtable *)flow->tuplehash[dir].tuple.dst_cache; |
| 237 | + if (unlikely(nf_flow_exceeds_mtu(skb, rt))) |
| 238 | + return NF_ACCEPT; |
| 239 | + |
| 240 | + if (skb_try_make_writable(skb, sizeof(*iph))) |
| 241 | + return NF_DROP; |
| 242 | + |
| 243 | + if (flow->flags & (FLOW_OFFLOAD_SNAT | FLOW_OFFLOAD_DNAT) && |
| 244 | + nf_flow_nat_ip(flow, skb, dir) < 0) |
| 245 | + return NF_DROP; |
| 246 | + |
| 247 | + flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT; |
| 248 | + iph = ip_hdr(skb); |
| 249 | + ip_decrease_ttl(iph); |
| 250 | + |
| 251 | + skb->dev = outdev; |
| 252 | + nexthop = rt_nexthop(rt, flow->tuplehash[!dir].tuple.src_v4.s_addr); |
| 253 | + neigh_xmit(NEIGH_ARP_TABLE, outdev, &nexthop, skb); |
| 254 | + |
| 255 | + return NF_STOLEN; |
| 256 | +} |
| 257 | + |
| 258 | +static struct nf_flowtable_type flowtable_ipv4 = { |
| 259 | + .family = NFPROTO_IPV4, |
| 260 | + .params = &nf_flow_offload_rhash_params, |
| 261 | + .gc = nf_flow_offload_work_gc, |
| 262 | + .hook = nf_flow_offload_ip_hook, |
| 263 | + .owner = THIS_MODULE, |
| 264 | +}; |
| 265 | + |
| 266 | +static int __init nf_flow_ipv4_module_init(void) |
| 267 | +{ |
| 268 | + nft_register_flowtable_type(&flowtable_ipv4); |
| 269 | + |
| 270 | + return 0; |
| 271 | +} |
| 272 | + |
| 273 | +static void __exit nf_flow_ipv4_module_exit(void) |
| 274 | +{ |
| 275 | + nft_unregister_flowtable_type(&flowtable_ipv4); |
| 276 | +} |
| 277 | + |
| 278 | +module_init(nf_flow_ipv4_module_init); |
| 279 | +module_exit(nf_flow_ipv4_module_exit); |
| 280 | + |
| 281 | +MODULE_LICENSE("GPL"); |
| 282 | +MODULE_AUTHOR( "Pablo Neira Ayuso <[email protected]>"); |
| 283 | +MODULE_ALIAS_NF_FLOWTABLE(AF_INET); |
0 commit comments