1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187
|
// SPDX-License-Identifier: GPL-2.0-or-later
#include <linux/ethtool_netlink.h>
#include <linux/netdevice.h>
#include <net/netdev_lock.h>
#include <net/netdev_queues.h>
#include <net/netdev_rx_queue.h>
#include <net/page_pool/memory_provider.h>
#include "page_pool_priv.h"
int netdev_rx_queue_restart(struct net_device *dev, unsigned int rxq_idx)
{
struct netdev_rx_queue *rxq = __netif_get_rx_queue(dev, rxq_idx);
const struct netdev_queue_mgmt_ops *qops = dev->queue_mgmt_ops;
void *new_mem, *old_mem;
int err;
if (!qops || !qops->ndo_queue_stop || !qops->ndo_queue_mem_free ||
!qops->ndo_queue_mem_alloc || !qops->ndo_queue_start)
return -EOPNOTSUPP;
netdev_assert_locked(dev);
new_mem = kvzalloc(qops->ndo_queue_mem_size, GFP_KERNEL);
if (!new_mem)
return -ENOMEM;
old_mem = kvzalloc(qops->ndo_queue_mem_size, GFP_KERNEL);
if (!old_mem) {
err = -ENOMEM;
goto err_free_new_mem;
}
err = qops->ndo_queue_mem_alloc(dev, new_mem, rxq_idx);
if (err)
goto err_free_old_mem;
err = page_pool_check_memory_provider(dev, rxq);
if (err)
goto err_free_new_queue_mem;
if (netif_running(dev)) {
err = qops->ndo_queue_stop(dev, old_mem, rxq_idx);
if (err)
goto err_free_new_queue_mem;
err = qops->ndo_queue_start(dev, new_mem, rxq_idx);
if (err)
goto err_start_queue;
} else {
swap(new_mem, old_mem);
}
qops->ndo_queue_mem_free(dev, old_mem);
kvfree(old_mem);
kvfree(new_mem);
return 0;
err_start_queue:
/* Restarting the queue with old_mem should be successful as we haven't
* changed any of the queue configuration, and there is not much we can
* do to recover from a failure here.
*
* WARN if we fail to recover the old rx queue, and at least free
* old_mem so we don't also leak that.
*/
if (qops->ndo_queue_start(dev, old_mem, rxq_idx)) {
WARN(1,
"Failed to restart old queue in error path. RX queue %d may be unhealthy.",
rxq_idx);
qops->ndo_queue_mem_free(dev, old_mem);
}
err_free_new_queue_mem:
qops->ndo_queue_mem_free(dev, new_mem);
err_free_old_mem:
kvfree(old_mem);
err_free_new_mem:
kvfree(new_mem);
return err;
}
EXPORT_SYMBOL_NS_GPL(netdev_rx_queue_restart, "NETDEV_INTERNAL");
int __net_mp_open_rxq(struct net_device *dev, unsigned int rxq_idx,
const struct pp_memory_provider_params *p,
struct netlink_ext_ack *extack)
{
struct netdev_rx_queue *rxq;
int ret;
if (!netdev_need_ops_lock(dev))
return -EOPNOTSUPP;
if (rxq_idx >= dev->real_num_rx_queues)
return -EINVAL;
rxq_idx = array_index_nospec(rxq_idx, dev->real_num_rx_queues);
if (rxq_idx >= dev->real_num_rx_queues) {
NL_SET_ERR_MSG(extack, "rx queue index out of range");
return -ERANGE;
}
if (dev->cfg->hds_config != ETHTOOL_TCP_DATA_SPLIT_ENABLED) {
NL_SET_ERR_MSG(extack, "tcp-data-split is disabled");
return -EINVAL;
}
if (dev->cfg->hds_thresh) {
NL_SET_ERR_MSG(extack, "hds-thresh is not zero");
return -EINVAL;
}
if (dev_xdp_prog_count(dev)) {
NL_SET_ERR_MSG(extack, "unable to custom memory provider to device with XDP program attached");
return -EEXIST;
}
rxq = __netif_get_rx_queue(dev, rxq_idx);
if (rxq->mp_params.mp_ops) {
NL_SET_ERR_MSG(extack, "designated queue already memory provider bound");
return -EEXIST;
}
#ifdef CONFIG_XDP_SOCKETS
if (rxq->pool) {
NL_SET_ERR_MSG(extack, "designated queue already in use by AF_XDP");
return -EBUSY;
}
#endif
rxq->mp_params = *p;
ret = netdev_rx_queue_restart(dev, rxq_idx);
if (ret) {
rxq->mp_params.mp_ops = NULL;
rxq->mp_params.mp_priv = NULL;
}
return ret;
}
int net_mp_open_rxq(struct net_device *dev, unsigned int rxq_idx,
struct pp_memory_provider_params *p)
{
int ret;
netdev_lock(dev);
ret = __net_mp_open_rxq(dev, rxq_idx, p, NULL);
netdev_unlock(dev);
return ret;
}
void __net_mp_close_rxq(struct net_device *dev, unsigned int ifq_idx,
const struct pp_memory_provider_params *old_p)
{
struct netdev_rx_queue *rxq;
int err;
if (WARN_ON_ONCE(ifq_idx >= dev->real_num_rx_queues))
return;
rxq = __netif_get_rx_queue(dev, ifq_idx);
/* Callers holding a netdev ref may get here after we already
* went thru shutdown via dev_memory_provider_uninstall().
*/
if (dev->reg_state > NETREG_REGISTERED &&
!rxq->mp_params.mp_ops)
return;
if (WARN_ON_ONCE(rxq->mp_params.mp_ops != old_p->mp_ops ||
rxq->mp_params.mp_priv != old_p->mp_priv))
return;
rxq->mp_params.mp_ops = NULL;
rxq->mp_params.mp_priv = NULL;
err = netdev_rx_queue_restart(dev, ifq_idx);
WARN_ON(err && err != -ENETDOWN);
}
void net_mp_close_rxq(struct net_device *dev, unsigned ifq_idx,
struct pp_memory_provider_params *old_p)
{
netdev_lock(dev);
__net_mp_close_rxq(dev, ifq_idx, old_p);
netdev_unlock(dev);
}
|