Skip to content

Commit 638b2a6

Browse files
jpirkodavem330
authored andcommitted
net: move netdev_pick_tx and dependencies to net/core/dev.c
next to its user. No relation to flow_dissector so it makes no sense to have it in flow_dissector.c Signed-off-by: Jiri Pirko <jiri@resnulli.us> Signed-off-by: David S. Miller <davem@davemloft.net>
1 parent 5605c76 commit 638b2a6

File tree

2 files changed

+78
-78
lines changed

2 files changed

+78
-78
lines changed

net/core/dev.c

Lines changed: 78 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2936,6 +2936,84 @@ int dev_loopback_xmit(struct sock *sk, struct sk_buff *skb)
29362936
}
29372937
EXPORT_SYMBOL(dev_loopback_xmit);
29382938

2939+
static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
2940+
{
2941+
#ifdef CONFIG_XPS
2942+
struct xps_dev_maps *dev_maps;
2943+
struct xps_map *map;
2944+
int queue_index = -1;
2945+
2946+
rcu_read_lock();
2947+
dev_maps = rcu_dereference(dev->xps_maps);
2948+
if (dev_maps) {
2949+
map = rcu_dereference(
2950+
dev_maps->cpu_map[skb->sender_cpu - 1]);
2951+
if (map) {
2952+
if (map->len == 1)
2953+
queue_index = map->queues[0];
2954+
else
2955+
queue_index = map->queues[reciprocal_scale(skb_get_hash(skb),
2956+
map->len)];
2957+
if (unlikely(queue_index >= dev->real_num_tx_queues))
2958+
queue_index = -1;
2959+
}
2960+
}
2961+
rcu_read_unlock();
2962+
2963+
return queue_index;
2964+
#else
2965+
return -1;
2966+
#endif
2967+
}
2968+
2969+
static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
2970+
{
2971+
struct sock *sk = skb->sk;
2972+
int queue_index = sk_tx_queue_get(sk);
2973+
2974+
if (queue_index < 0 || skb->ooo_okay ||
2975+
queue_index >= dev->real_num_tx_queues) {
2976+
int new_index = get_xps_queue(dev, skb);
2977+
if (new_index < 0)
2978+
new_index = skb_tx_hash(dev, skb);
2979+
2980+
if (queue_index != new_index && sk &&
2981+
rcu_access_pointer(sk->sk_dst_cache))
2982+
sk_tx_queue_set(sk, new_index);
2983+
2984+
queue_index = new_index;
2985+
}
2986+
2987+
return queue_index;
2988+
}
2989+
2990+
struct netdev_queue *netdev_pick_tx(struct net_device *dev,
2991+
struct sk_buff *skb,
2992+
void *accel_priv)
2993+
{
2994+
int queue_index = 0;
2995+
2996+
#ifdef CONFIG_XPS
2997+
if (skb->sender_cpu == 0)
2998+
skb->sender_cpu = raw_smp_processor_id() + 1;
2999+
#endif
3000+
3001+
if (dev->real_num_tx_queues != 1) {
3002+
const struct net_device_ops *ops = dev->netdev_ops;
3003+
if (ops->ndo_select_queue)
3004+
queue_index = ops->ndo_select_queue(dev, skb, accel_priv,
3005+
__netdev_pick_tx);
3006+
else
3007+
queue_index = __netdev_pick_tx(dev, skb);
3008+
3009+
if (!accel_priv)
3010+
queue_index = netdev_cap_txqueue(dev, queue_index);
3011+
}
3012+
3013+
skb_set_queue_mapping(skb, queue_index);
3014+
return netdev_get_tx_queue(dev, queue_index);
3015+
}
3016+
29393017
/**
29403018
* __dev_queue_xmit - transmit a buffer
29413019
* @skb: buffer to transmit

net/core/flow_dissector.c

Lines changed: 0 additions & 78 deletions
Original file line numberDiff line numberDiff line change
@@ -431,81 +431,3 @@ u32 skb_get_poff(const struct sk_buff *skb)
431431

432432
return __skb_get_poff(skb, skb->data, &keys, skb_headlen(skb));
433433
}
434-
435-
static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
436-
{
437-
#ifdef CONFIG_XPS
438-
struct xps_dev_maps *dev_maps;
439-
struct xps_map *map;
440-
int queue_index = -1;
441-
442-
rcu_read_lock();
443-
dev_maps = rcu_dereference(dev->xps_maps);
444-
if (dev_maps) {
445-
map = rcu_dereference(
446-
dev_maps->cpu_map[skb->sender_cpu - 1]);
447-
if (map) {
448-
if (map->len == 1)
449-
queue_index = map->queues[0];
450-
else
451-
queue_index = map->queues[reciprocal_scale(skb_get_hash(skb),
452-
map->len)];
453-
if (unlikely(queue_index >= dev->real_num_tx_queues))
454-
queue_index = -1;
455-
}
456-
}
457-
rcu_read_unlock();
458-
459-
return queue_index;
460-
#else
461-
return -1;
462-
#endif
463-
}
464-
465-
static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
466-
{
467-
struct sock *sk = skb->sk;
468-
int queue_index = sk_tx_queue_get(sk);
469-
470-
if (queue_index < 0 || skb->ooo_okay ||
471-
queue_index >= dev->real_num_tx_queues) {
472-
int new_index = get_xps_queue(dev, skb);
473-
if (new_index < 0)
474-
new_index = skb_tx_hash(dev, skb);
475-
476-
if (queue_index != new_index && sk &&
477-
rcu_access_pointer(sk->sk_dst_cache))
478-
sk_tx_queue_set(sk, new_index);
479-
480-
queue_index = new_index;
481-
}
482-
483-
return queue_index;
484-
}
485-
486-
struct netdev_queue *netdev_pick_tx(struct net_device *dev,
487-
struct sk_buff *skb,
488-
void *accel_priv)
489-
{
490-
int queue_index = 0;
491-
492-
#ifdef CONFIG_XPS
493-
if (skb->sender_cpu == 0)
494-
skb->sender_cpu = raw_smp_processor_id() + 1;
495-
#endif
496-
497-
if (dev->real_num_tx_queues != 1) {
498-
const struct net_device_ops *ops = dev->netdev_ops;
499-
if (ops->ndo_select_queue)
500-
queue_index = ops->ndo_select_queue(dev, skb, accel_priv,
501-
__netdev_pick_tx);
502-
else
503-
queue_index = __netdev_pick_tx(dev, skb);
504-
505-
if (!accel_priv)
506-
queue_index = netdev_cap_txqueue(dev, queue_index);
507-
}
508-
509-
skb_set_queue_mapping(skb, queue_index);
510-
return netdev_get_tx_queue(dev, queue_index);
511-
}

0 commit comments

Comments
 (0)