sfc_ef100: implement ndo_open/close and EVQ probing
Channels are probed, but actual event handling is still stubbed out. Stub implementation of check_caps is needed because ptp.c will call into it from efx_ptp_use_mac_tx_timestamps() to decide if it wants TXQs. Signed-off-by: Edward Cree <ecree@solarflare.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
committed by
David S. Miller
parent
2200e6d92e
commit
965b549f3c
@@ -29,6 +29,147 @@ static void ef100_update_name(struct efx_nic *efx)
|
||||
strcpy(efx->name, efx->net_dev->name);
|
||||
}
|
||||
|
||||
static int ef100_alloc_vis(struct efx_nic *efx, unsigned int *allocated_vis)
|
||||
{
|
||||
/* EF100 uses a single TXQ per channel, as all checksum offloading
|
||||
* is configured in the TX descriptor, and there is no TX Pacer for
|
||||
* HIGHPRI queues.
|
||||
*/
|
||||
unsigned int tx_vis = efx->n_tx_channels + efx->n_extra_tx_channels;
|
||||
unsigned int rx_vis = efx->n_rx_channels;
|
||||
unsigned int min_vis, max_vis;
|
||||
|
||||
EFX_WARN_ON_PARANOID(efx->tx_queues_per_channel != 1);
|
||||
|
||||
tx_vis += efx->n_xdp_channels * efx->xdp_tx_per_channel;
|
||||
|
||||
max_vis = max(rx_vis, tx_vis);
|
||||
/* Currently don't handle resource starvation and only accept
|
||||
* our maximum needs and no less.
|
||||
*/
|
||||
min_vis = max_vis;
|
||||
|
||||
return efx_mcdi_alloc_vis(efx, min_vis, max_vis,
|
||||
NULL, allocated_vis);
|
||||
}
|
||||
|
||||
static int ef100_remap_bar(struct efx_nic *efx, int max_vis)
|
||||
{
|
||||
unsigned int uc_mem_map_size;
|
||||
void __iomem *membase;
|
||||
|
||||
efx->max_vis = max_vis;
|
||||
uc_mem_map_size = PAGE_ALIGN(max_vis * efx->vi_stride);
|
||||
|
||||
/* Extend the original UC mapping of the memory BAR */
|
||||
membase = ioremap(efx->membase_phys, uc_mem_map_size);
|
||||
if (!membase) {
|
||||
netif_err(efx, probe, efx->net_dev,
|
||||
"could not extend memory BAR to %x\n",
|
||||
uc_mem_map_size);
|
||||
return -ENOMEM;
|
||||
}
|
||||
iounmap(efx->membase);
|
||||
efx->membase = membase;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Context: process, rtnl_lock() held.
|
||||
* Note that the kernel will ignore our return code; this method
|
||||
* should really be a void.
|
||||
*/
|
||||
static int ef100_net_stop(struct net_device *net_dev)
|
||||
{
|
||||
struct efx_nic *efx = netdev_priv(net_dev);
|
||||
|
||||
netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n",
|
||||
raw_smp_processor_id());
|
||||
|
||||
netif_stop_queue(net_dev);
|
||||
efx_stop_all(efx);
|
||||
efx_disable_interrupts(efx);
|
||||
efx_clear_interrupt_affinity(efx);
|
||||
efx_nic_fini_interrupt(efx);
|
||||
efx_fini_napi(efx);
|
||||
efx_remove_channels(efx);
|
||||
efx_mcdi_free_vis(efx);
|
||||
efx_remove_interrupts(efx);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Context: process, rtnl_lock() held. */
|
||||
static int ef100_net_open(struct net_device *net_dev)
|
||||
{
|
||||
struct efx_nic *efx = netdev_priv(net_dev);
|
||||
unsigned int allocated_vis;
|
||||
int rc;
|
||||
|
||||
ef100_update_name(efx);
|
||||
netif_dbg(efx, ifup, net_dev, "opening device on CPU %d\n",
|
||||
raw_smp_processor_id());
|
||||
|
||||
rc = efx_check_disabled(efx);
|
||||
if (rc)
|
||||
goto fail;
|
||||
|
||||
rc = efx_probe_interrupts(efx);
|
||||
if (rc)
|
||||
goto fail;
|
||||
|
||||
rc = efx_set_channels(efx);
|
||||
if (rc)
|
||||
goto fail;
|
||||
|
||||
rc = efx_mcdi_free_vis(efx);
|
||||
if (rc)
|
||||
goto fail;
|
||||
|
||||
rc = ef100_alloc_vis(efx, &allocated_vis);
|
||||
if (rc)
|
||||
goto fail;
|
||||
|
||||
rc = efx_probe_channels(efx);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
rc = ef100_remap_bar(efx, allocated_vis);
|
||||
if (rc)
|
||||
goto fail;
|
||||
|
||||
efx_init_napi(efx);
|
||||
|
||||
rc = efx_nic_init_interrupt(efx);
|
||||
if (rc)
|
||||
goto fail;
|
||||
efx_set_interrupt_affinity(efx);
|
||||
|
||||
rc = efx_enable_interrupts(efx);
|
||||
if (rc)
|
||||
goto fail;
|
||||
|
||||
/* in case the MC rebooted while we were stopped, consume the change
|
||||
* to the warm reboot count
|
||||
*/
|
||||
(void) efx_mcdi_poll_reboot(efx);
|
||||
|
||||
efx_start_all(efx);
|
||||
|
||||
/* Link state detection is normally event-driven; we have
|
||||
* to poll now because we could have missed a change
|
||||
*/
|
||||
mutex_lock(&efx->mac_lock);
|
||||
if (efx_mcdi_phy_poll(efx))
|
||||
efx_link_status_changed(efx);
|
||||
mutex_unlock(&efx->mac_lock);
|
||||
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
ef100_net_stop(net_dev);
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* Initiate a packet transmission. We use one channel per CPU
|
||||
* (sharing when we have more CPUs than channels).
|
||||
*
|
||||
@@ -64,6 +205,8 @@ err:
|
||||
}
|
||||
|
||||
static const struct net_device_ops ef100_netdev_ops = {
|
||||
.ndo_open = ef100_net_open,
|
||||
.ndo_stop = ef100_net_stop,
|
||||
.ndo_start_xmit = ef100_hard_start_xmit,
|
||||
};
|
||||
|
||||
|
||||
Reference in New Issue
Block a user