当使用ip route add/del添加或者删除路由时,通过触发netlink发送信息到各协议路由系统注册的netlink处理函数,如add时调用函数为inet_rtm_newroute。Equal Cost Multi Path,在ip交换网络中存在到达同一目的地址的多条不同的路径,而且每条路径消耗的资源(cost)一样时。内核定制了CONFIG_IP_ROUTE_MULTIPATH时,ip层在收到等价的ip报文时,会根据配置的策略通过不通的路径均衡转发出去,使得转发达到负载均衡的目的。
路由初始化主要函数为:
//路由缓存初始化
int __init ip_rt_init(void)
{int rc = 0;#ifdef CONFIG_IP_ROUTE_CLASSID//基于路由的分类器,每个CPU256个变量ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));if (!ip_rt_acct)panic("IP: failed to allocate ip_rt_acct\n");
#endif//路由缓存池ipv4_dst_ops.kmem_cachep =kmem_cache_create("ip_dst_cache", sizeof(struct rtable), 0,SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);ipv4_dst_blackhole_ops.kmem_cachep = ipv4_dst_ops.kmem_cachep;//初始化每CPU变量if (dst_entries_init(&ipv4_dst_ops) < 0)panic("IP: failed to allocate ipv4_dst_ops counter\n");//初始化每CPU变量if (dst_entries_init(&ipv4_dst_blackhole_ops) < 0)panic("IP: failed to allocate ipv4_dst_blackhole_ops counter\n");//建立路由缓存hash表rt_hash_table = (struct rt_hash_bucket *)alloc_large_system_hash("IP route cache",sizeof(struct rt_hash_bucket),rhash_entries,(totalram_pages >= 128 * 1024) ?15 : 17,0,&rt_hash_log,&rt_hash_mask,rhash_entries ? 0 : 512 * 1024);//初始化路由缓存hash表memset(rt_hash_table, 0, (rt_hash_mask + 1) * sizeof(struct rt_hash_bucket));//每个hash表rt_hash_lock_init();//设置gc时间和缓存最大数量ipv4_dst_ops.gc_thresh = (rt_hash_mask + 1);ip_rt_max_size = (rt_hash_mask + 1) * 16;//初始化devinet_init();//注册通知链和创建alias缓存ip_fib_init();//注册gc任务INIT_DELAYED_WORK_DEFERRABLE(&expires_work, rt_worker_func);expires_ljiffies = jiffies;schedule_delayed_work(&expires_work,net_random() % ip_rt_gc_interval + ip_rt_gc_interval);if (ip_rt_proc_init())pr_err("Unable to create route proc files\n");
#ifdef CONFIG_XFRMxfrm_init();xfrm4_init(ip_rt_max_size);
#endif//注册netlink消息rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL, NULL);#ifdef CONFIG_SYSCTLregister_pernet_subsys(&sysctl_route_ops);
#endifregister_pernet_subsys(&rt_genid_ops);return rc;
}
当使用ip route add/del添加或者删除路由时,通过触发netlink发送信息到各协议路由系统注册的netlink处理函数,如add时调用函数为inet_rtm_newroute。
void __init ip_fib_init(void)
{//注册netlink路由添加、删除和dump命令处理函数rtnl_register(PF_INET, RTM_NEWROUTE, inet_rtm_newroute, NULL, NULL);rtnl_register(PF_INET, RTM_DELROUTE, inet_rtm_delroute, NULL, NULL);rtnl_register(PF_INET, RTM_GETROUTE, NULL, inet_dump_fib, NULL);//初始化路由表和路由缓存register_pernet_subsys(&fib_net_ops);//注册通知链处理函数,监听系统其它模块信息register_netdevice_notifier(&fib_netdev_notifier);register_inetaddr_notifier(&fib_inetaddr_notifier);//初始化路由用到的缓存池fib_trie_init();
}
而当协议报文下发时,调用ip_rt_init注册的inet_rtm_getroute进行路由转发。
sys_socketcall()-->sys_connect()-->inet_stream_connect()-->tcp_v4_connect()-->ip_route_connect()-->inet_rtm_getroute()。
inet_rtm_getroute分配sk的路由信息,通过fib_lookup查询该dip的路由信息,最终由函数ip_mkroute_input创建路由缓存项。
int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg)
{
...skb = inet_rtm_getroute_build_skb(src, dst, ip_proto, sport, dport);if (!skb)return -ENOBUFS;memset(&fl4, 0, sizeof(fl4));fl4.daddr = dst;fl4.saddr = src;fl4.flowi4_tos = rtm->rtm_tos;fl4.flowi4_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0;fl4.flowi4_mark = mark;fl4.flowi4_uid = uid;if (sport)fl4.fl4_sport = sport;if (dport)fl4.fl4_dport = dport;fl4.flowi4_proto = ip_proto;rcu_read_lock();if (iif) {struct net_device *dev;dev = dev_get_by_index_rcu(net, iif);if (!dev) {err = -ENODEV;goto errout_rcu;}fl4.flowi4_iif = iif; /* for rt_fill_info */skb->dev = dev;skb->mark = mark;//路由此skb,获取路由信息err = ip_route_input_rcu(skb, dst, src, rtm->rtm_tos,dev, &res);rt = skb_rtable(skb);if (err == 0 && rt->dst.error)err = -rt->dst.error;} else {fl4.flowi4_iif = LOOPBACK_IFINDEX;rt = ip_route_output_key_hash_rcu(net, &fl4, &res, skb);err = 0;if (IS_ERR(rt))err = PTR_ERR(rt);elseskb_dst_set(skb, &rt->dst);}
...
}
int ip_route_input_rcu(struct sk_buff *skb, __be32 daddr, __be32 saddr,u8 tos, struct net_device *dev, struct fib_result *res)
{...return ip_route_input_slow(skb, daddr, saddr, tos, dev, res);
}static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,u8 tos, struct net_device *dev, struct fib_result *res)
{
...err = fib_lookup(net, &fl4, res, 0);if (err != 0) {if (!IN_DEV_FORWARD(in_dev))err = -EHOSTUNREACH;goto no_route;}make_route:err = ip_mkroute_input(skb, res, in_dev, daddr, saddr, tos, flkeys);
...
}
如果在上述fib_lookup流程中查找到该dip包含多个路径,则由函数fib_multipath_hash计算hash值,之后,函数fib_select_multipath通过hash值选择其中的某个下一跳。
static int ip_mkroute_input(struct sk_buff *skb,struct fib_result *res,struct in_device *in_dev,__be32 daddr, __be32 saddr, u32 tos,struct flow_keys *hkeys)
{
#ifdef CONFIG_IP_ROUTE_MULTIPATHif (res->fi && res->fi->fib_nhs > 1) {int h = fib_multipath_hash(res->fi->fib_net, NULL, skb, hkeys);fib_select_multipath(res, h);}
#endif/* create a routing cache entry */return __mkroute_input(skb, res, in_dev, daddr, saddr, tos);
}
路径数量判断:
如果fib_info的成员nh(下一跳对象)有值,根据其获得路径的数量。其次,nh为空时,使用fib_info结构成员fib_nhs的数量值。如果下一跳对象为组,并且组内有多个路径,返回组内路径数量。否则,返回1。
static inline unsigned int fib_info_num_path(const struct fib_info *fi)
{if (unlikely(fi->nh))return nexthop_num_path(fi->nh);return fi->fib_nhs;
}
static inline unsigned int nexthop_num_path(const struct nexthop *nh)
{unsigned int rc = 1;if (nh->is_group) {struct nh_group *nh_grp;nh_grp = rcu_dereference_rtnl(nh->nh_grp);if (nh_grp->mpath)rc = nh_grp->num_nh;}return rc;
}
proc文件中fib_multipath_hash_policy参数用于指定路径选择时使用的hash策略。
# cat /proc/sys/net/ipv4/fib_multipath_hash_policy
0:基于三层头部数据做hash
1:基于四层hash
int fib_multipath_hash(const struct net *net, const struct flowi4 *fl4,const struct sk_buff *skb, struct flow_keys *flkeys)
{struct flow_keys hash_keys;u32 mhash;switch (net->ipv4.sysctl_fib_multipath_hash_policy) {case 0 :break;case 1:break;mhash = flow_hash_from_keys(&hash_keys);return mhash >> 1;
}
如果哈希策略fib_multipath_hash_policy值为0,使用流结构fl4中保存的源和目的IP地址,但是,如果skb有值,将使用函数ip_multipath_l3_keys获取源和目的IP地址,对于ICMP报文,此函数将得到内部IP头部中的IP地址信息;即,hash = sip xor dip。
case 0:memset(&hash_keys, 0, sizeof(hash_keys));hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;if (skb) {ip_multipath_l3_keys(skb, &hash_keys);} else {hash_keys.addrs.v4addrs.src = fl4->saddr;hash_keys.addrs.v4addrs.dst = fl4->daddr;}break;static void ip_multipath_l3_keys(const struct sk_buff *skb,struct flow_keys *hash_keys)
{const struct iphdr *outer_iph = ip_hdr(skb);const struct iphdr *key_iph = outer_iph;const struct iphdr *inner_iph;const struct icmphdr *icmph;struct iphdr _inner_iph;struct icmphdr _icmph;if (likely(outer_iph->protocol != IPPROTO_ICMP))goto out;if (unlikely((outer_iph->frag_off & htons(IP_OFFSET)) != 0))goto out;icmph = skb_header_pointer(skb, outer_iph->ihl * 4, sizeof(_icmph),&_icmph);if (!icmph)goto out;if (icmph->type != ICMP_DEST_UNREACH &&icmph->type != ICMP_REDIRECT &&icmph->type != ICMP_TIME_EXCEEDED &&icmph->type != ICMP_PARAMETERPROB)goto out;inner_iph = skb_header_pointer(skb,outer_iph->ihl * 4 + sizeof(_icmph),sizeof(_inner_iph), &_inner_iph);if (!inner_iph)goto out;key_iph = inner_iph;
out:hash_keys->addrs.v4addrs.src = key_iph->saddr;hash_keys->addrs.v4addrs.dst = key_iph->daddr;
}
当哈希策略值为1,根据skb是否为空,由以下两种处理。如果skb有值,并且已经计算了四层的哈希值,这里直接使用此值。否则,根据流的key值得到四层数据,包括:源和目的地址,源和目的端口号以及协议号。另外,如果skb为空,由flowl4结构的参数fl4中获取四层信息。
case 1:/* skb is currently provided only when forwarding */if (skb) {unsigned int flag = FLOW_DISSECTOR_F_STOP_AT_ENCAP;struct flow_keys keys;/* short-circuit if we already have L4 hash present */if (skb->l4_hash)return skb_get_hash_raw(skb) >> 1;memset(&hash_keys, 0, sizeof(hash_keys));if (!flkeys) {skb_flow_dissect_flow_keys(skb, &keys, flag);flkeys = &keys;}hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;hash_keys.addrs.v4addrs.src = flkeys->addrs.v4addrs.src;hash_keys.addrs.v4addrs.dst = flkeys->addrs.v4addrs.dst;hash_keys.ports.src = flkeys->ports.src;hash_keys.ports.dst = flkeys->ports.dst;hash_keys.basic.ip_proto = flkeys->basic.ip_proto;} else {memset(&hash_keys, 0, sizeof(hash_keys));hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;hash_keys.addrs.v4addrs.src = fl4->saddr;hash_keys.addrs.v4addrs.dst = fl4->daddr;hash_keys.ports.src = fl4->fl4_sport;hash_keys.ports.dst = fl4->fl4_dport;hash_keys.basic.ip_proto = fl4->flowi4_proto;}break;}
3.2 fib_select_multipath-->路由出口选择
proc系统下的fib_multipath_use_neigh的值用于表示是否根据邻居表的状态选择路径,默认为0,表示不使用邻居表状态信息。
# cat /proc/sys/net/ipv4/fib_multipath_use_neigh
遍历所有的下一跳,如果没有开启fib_multipath_use_neigh,判断hash值是否小于当前下一跳的fib_nh_upper_bound值,为真则在结果中记录下当前下一跳的索引值和相关信息。在开启fib_multipath_use_neigh的情况下,将通过函数fib_good_nh来判断是否为可用的下一跳,fib_nh_upper_bound的值不仅与自身下一跳地址的权重相关,而且与当前路由的其它下一跳地址的权重也相关(fib_rebalance)。在下一跳地址数组中,fib_nh_upper_bound的值有小到大,被设置RTNH_F_DEAD标记的下一跳的fib_nh_upper_bound值为-1,不会被选择。
void fib_select_multipath(struct fib_result *res, int hash)
{struct fib_info *fi = res->fi;struct net *net = fi->fib_net;bool first = false;for_nexthops(fi) {if (net->ipv4.sysctl_fib_multipath_use_neigh) {if (!fib_good_nh(nh))continue;if (!first) {res->nh_sel = nhsel;first = true;}}if (hash > atomic_read(&nh->nh_upper_bound))continue;res->nh_sel = nhsel;return;} endfor_nexthops(fi);
}