net: dpaa: avoid on-stack arrays of NR_CPUS elements
[ Upstream commit 555a05d84ca2c587e2d4777006e2c2fb3dfbd91d ] The dpaa-eth driver is written for PowerPC and Arm SoCs which have 1-24 CPUs. It depends on CONFIG_NR_CPUS having a reasonably small value in Kconfig. Otherwise, there are 2 functions which allocate on-stack arrays of NR_CPUS elements, and these can quickly explode in size, leading to warnings such as: drivers/net/ethernet/freescale/dpaa/dpaa_eth.c:3280:12: warning: stack frame size (16664) exceeds limit (2048) in 'dpaa_eth_probe' [-Wframe-larger-than] The problem is twofold: - Reducing the array size to the boot-time num_possible_cpus() (rather than the compile-time NR_CPUS) creates a variable-length array, which should be avoided in the Linux kernel. - Using NR_CPUS as an array size makes the driver blow up in stack consumption with generic, as opposed to hand-crafted, .config files. A simple solution is to use dynamic allocation for num_possible_cpus() elements (aka a small number determined at runtime). Link: https://lore.kernel.org/all/202406261920.l5pzM1rj-lkp@intel.com/ Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com> Reviewed-by: Breno Leitao <leitao@debian.org> Acked-by: Madalin Bucur <madalin.bucur@oss.nxp.com> Link: https://patch.msgid.link/20240713225336.1746343-2-vladimir.oltean@nxp.com Signed-off-by: Jakub Kicinski <kuba@kernel.org> Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
parent
88dfdf7438
commit
a78199809a
2 changed files with 24 additions and 6 deletions
|
@ -908,14 +908,18 @@ static inline void dpaa_setup_egress(const struct dpaa_priv *priv,
|
|||
}
|
||||
}
|
||||
|
||||
static void dpaa_fq_setup(struct dpaa_priv *priv,
|
||||
const struct dpaa_fq_cbs *fq_cbs,
|
||||
struct fman_port *tx_port)
|
||||
static int dpaa_fq_setup(struct dpaa_priv *priv,
|
||||
const struct dpaa_fq_cbs *fq_cbs,
|
||||
struct fman_port *tx_port)
|
||||
{
|
||||
int egress_cnt = 0, conf_cnt = 0, num_portals = 0, portal_cnt = 0, cpu;
|
||||
const cpumask_t *affine_cpus = qman_affine_cpus();
|
||||
u16 channels[NR_CPUS];
|
||||
struct dpaa_fq *fq;
|
||||
u16 *channels;
|
||||
|
||||
channels = kcalloc(num_possible_cpus(), sizeof(u16), GFP_KERNEL);
|
||||
if (!channels)
|
||||
return -ENOMEM;
|
||||
|
||||
for_each_cpu_and(cpu, affine_cpus, cpu_online_mask)
|
||||
channels[num_portals++] = qman_affine_channel(cpu);
|
||||
|
@ -974,6 +978,10 @@ static void dpaa_fq_setup(struct dpaa_priv *priv,
|
|||
break;
|
||||
}
|
||||
}
|
||||
|
||||
kfree(channels);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int dpaa_tx_fq_to_id(const struct dpaa_priv *priv,
|
||||
|
@ -3015,7 +3023,9 @@ static int dpaa_eth_probe(struct platform_device *pdev)
|
|||
*/
|
||||
dpaa_eth_add_channel(priv->channel, &pdev->dev);
|
||||
|
||||
dpaa_fq_setup(priv, &dpaa_fq_cbs, priv->mac_dev->port[TX]);
|
||||
err = dpaa_fq_setup(priv, &dpaa_fq_cbs, priv->mac_dev->port[TX]);
|
||||
if (err)
|
||||
goto free_dpaa_bps;
|
||||
|
||||
/* Create a congestion group for this netdev, with
|
||||
* dynamically-allocated CGR ID.
|
||||
|
|
|
@ -537,12 +537,16 @@ static int dpaa_set_coalesce(struct net_device *dev,
|
|||
struct ethtool_coalesce *c)
|
||||
{
|
||||
const cpumask_t *cpus = qman_affine_cpus();
|
||||
bool needs_revert[NR_CPUS] = {false};
|
||||
struct qman_portal *portal;
|
||||
u32 period, prev_period;
|
||||
u8 thresh, prev_thresh;
|
||||
bool *needs_revert;
|
||||
int cpu, res;
|
||||
|
||||
needs_revert = kcalloc(num_possible_cpus(), sizeof(bool), GFP_KERNEL);
|
||||
if (!needs_revert)
|
||||
return -ENOMEM;
|
||||
|
||||
period = c->rx_coalesce_usecs;
|
||||
thresh = c->rx_max_coalesced_frames;
|
||||
|
||||
|
@ -565,6 +569,8 @@ static int dpaa_set_coalesce(struct net_device *dev,
|
|||
needs_revert[cpu] = true;
|
||||
}
|
||||
|
||||
kfree(needs_revert);
|
||||
|
||||
return 0;
|
||||
|
||||
revert_values:
|
||||
|
@ -578,6 +584,8 @@ revert_values:
|
|||
qman_dqrr_set_ithresh(portal, prev_thresh);
|
||||
}
|
||||
|
||||
kfree(needs_revert);
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue