Commit 28576760 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'overflow-v4.18-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux

Pull overflow updates from Kees Cook:
 "This adds the new overflow checking helpers and adds them to the
  2-factor argument allocators. And this adds the saturating size
  helpers and does a treewide replacement for the struct_size() usage.
  Additionally this adds the overflow testing modules to make sure
  everything works.

  I'm still working on the treewide replacements for allocators with
  "simple" multiplied arguments:

     *alloc(a * b, ...) -> *alloc_array(a, b, ...)

  and

     *zalloc(a * b, ...) -> *calloc(a, b, ...)

  as well as the more complex cases, but that's separable from this
  portion of the series. I expect to have the rest sent before -rc1
  closes; there are a lot of messy cases to clean up.

  Summary:

   - Introduce arithmetic overflow test helper functions (Rasmus)

   - Use overflow helpers in 2-factor allocators (Kees, Rasmus)

   - Introduce overflow test module (Rasmus, Kees)

   - Introduce saturating size helper functions (Matthew, Kees)

   - Treewide use of struct_size() for allocators (Kees)"

* tag 'overflow-v4.18-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux:
  treewide: Use struct_size() for devm_kmalloc() and friends
  treewide: Use struct_size() for vmalloc()-family
  treewide: Use struct_size() for kmalloc()-family
  device: Use overflow helpers for devm_kmalloc()
  mm: Use overflow helpers in kvmalloc()
  mm: Use overflow helpers in kmalloc_array*()
  test_overflow: Add memory allocation overflow tests
  overflow.h: Add allocation size calculation helpers
  test_overflow: Report test failures
  test_overflow: macrofy some more, do more tests for free
  lib: add runtime test of check_*_overflow functions
  compiler.h: enable builtin overflow checkers and add fallback code
parents 5eb6eed7 0ed2dd03
......@@ -500,8 +500,8 @@ int af_alg_alloc_tsgl(struct sock *sk)
sg = sgl->sg;
if (!sg || sgl->cur >= MAX_SGL_ENTS) {
sgl = sock_kmalloc(sk, sizeof(*sgl) +
sizeof(sgl->sg[0]) * (MAX_SGL_ENTS + 1),
sgl = sock_kmalloc(sk,
struct_size(sgl, sg, (MAX_SGL_ENTS + 1)),
GFP_KERNEL);
if (!sgl)
return -ENOMEM;
......
......@@ -84,9 +84,14 @@ static struct devres_group * node_to_group(struct devres_node *node)
static __always_inline struct devres * alloc_dr(dr_release_t release,
size_t size, gfp_t gfp, int nid)
{
size_t tot_size = sizeof(struct devres) + size;
size_t tot_size;
struct devres *dr;
/* We must catch any near-SIZE_MAX cases that could overflow. */
if (unlikely(check_add_overflow(sizeof(struct devres), size,
&tot_size)))
return NULL;
dr = kmalloc_node_track_caller(tot_size, gfp, nid);
if (unlikely(!dr))
return NULL;
......
......@@ -40,8 +40,10 @@ static int bcm2835_aux_clk_probe(struct platform_device *pdev)
if (IS_ERR(reg))
return PTR_ERR(reg);
onecell = devm_kmalloc(dev, sizeof(*onecell) + sizeof(*onecell->hws) *
BCM2835_AUX_CLOCK_COUNT, GFP_KERNEL);
onecell = devm_kmalloc(dev,
struct_size(onecell, hws,
BCM2835_AUX_CLOCK_COUNT),
GFP_KERNEL);
if (!onecell)
return -ENOMEM;
onecell->num = BCM2835_AUX_CLOCK_COUNT;
......
......@@ -2147,8 +2147,8 @@ static int bcm2835_clk_probe(struct platform_device *pdev)
size_t i;
int ret;
cprman = devm_kzalloc(dev, sizeof(*cprman) +
sizeof(*cprman->onecell.hws) * asize,
cprman = devm_kzalloc(dev,
struct_size(cprman, onecell.hws, asize),
GFP_KERNEL);
if (!cprman)
return -ENOMEM;
......
......@@ -197,8 +197,8 @@ void __init iproc_asiu_setup(struct device_node *node,
if (WARN_ON(!asiu))
return;
asiu->clk_data = kzalloc(sizeof(*asiu->clk_data->hws) * num_clks +
sizeof(*asiu->clk_data), GFP_KERNEL);
asiu->clk_data = kzalloc(struct_size(asiu->clk_data, hws, num_clks),
GFP_KERNEL);
if (WARN_ON(!asiu->clk_data))
goto err_clks;
asiu->clk_data->num = num_clks;
......
......@@ -744,8 +744,7 @@ void iproc_pll_clk_setup(struct device_node *node,
if (WARN_ON(!pll))
return;
clk_data = kzalloc(sizeof(*clk_data->hws) * num_clks +
sizeof(*clk_data), GFP_KERNEL);
clk_data = kzalloc(struct_size(clk_data, hws, num_clks), GFP_KERNEL);
if (WARN_ON(!clk_data))
goto err_clk_data;
clk_data->num = num_clks;
......
......@@ -509,8 +509,7 @@ static void __init berlin2_clock_setup(struct device_node *np)
u8 avpll_flags = 0;
int n, ret;
clk_data = kzalloc(sizeof(*clk_data) +
sizeof(*clk_data->hws) * MAX_CLKS, GFP_KERNEL);
clk_data = kzalloc(struct_size(clk_data, hws, MAX_CLKS), GFP_KERNEL);
if (!clk_data)
return;
clk_data->num = MAX_CLKS;
......
......@@ -295,8 +295,7 @@ static void __init berlin2q_clock_setup(struct device_node *np)
struct clk_hw **hws;
int n, ret;
clk_data = kzalloc(sizeof(*clk_data) +
sizeof(*clk_data->hws) * MAX_CLKS, GFP_KERNEL);
clk_data = kzalloc(struct_size(clk_data, hws, MAX_CLKS), GFP_KERNEL);
if (!clk_data)
return;
clk_data->num = MAX_CLKS;
......
......@@ -273,8 +273,7 @@ static void __init asm9260_acc_init(struct device_node *np)
int n;
u32 accuracy = 0;
clk_data = kzalloc(sizeof(*clk_data) +
sizeof(*clk_data->hws) * MAX_CLKS, GFP_KERNEL);
clk_data = kzalloc(struct_size(clk_data, hws, MAX_CLKS), GFP_KERNEL);
if (!clk_data)
return;
clk_data->num = MAX_CLKS;
......
......@@ -627,8 +627,8 @@ static void __init aspeed_cc_init(struct device_node *np)
if (!scu_base)
return;
aspeed_clk_data = kzalloc(sizeof(*aspeed_clk_data) +
sizeof(*aspeed_clk_data->hws) * ASPEED_NUM_CLKS,
aspeed_clk_data = kzalloc(struct_size(aspeed_clk_data, hws,
ASPEED_NUM_CLKS),
GFP_KERNEL);
if (!aspeed_clk_data)
return;
......
......@@ -54,8 +54,8 @@ static struct clps711x_clk * __init _clps711x_clk_init(void __iomem *base,
if (!base)
return ERR_PTR(-ENOMEM);
clps711x_clk = kzalloc(sizeof(*clps711x_clk) +
sizeof(*clps711x_clk->clk_data.hws) * CLPS711X_CLK_MAX,
clps711x_clk = kzalloc(struct_size(clps711x_clk, clk_data.hws,
CLPS711X_CLK_MAX),
GFP_KERNEL);
if (!clps711x_clk)
return ERR_PTR(-ENOMEM);
......
......@@ -25,8 +25,8 @@ static void __init efm32gg_cmu_init(struct device_node *np)
void __iomem *base;
struct clk_hw **hws;
clk_data = kzalloc(sizeof(*clk_data) +
sizeof(*clk_data->hws) * CMU_MAX_CLKS, GFP_KERNEL);
clk_data = kzalloc(struct_size(clk_data, hws, CMU_MAX_CLKS),
GFP_KERNEL);
if (!clk_data)
return;
......
......@@ -399,8 +399,8 @@ static void __init gemini_cc_init(struct device_node *np)
int ret;
int i;
gemini_clk_data = kzalloc(sizeof(*gemini_clk_data) +
sizeof(*gemini_clk_data->hws) * GEMINI_NUM_CLKS,
gemini_clk_data = kzalloc(struct_size(gemini_clk_data, hws,
GEMINI_NUM_CLKS),
GFP_KERNEL);
if (!gemini_clk_data)
return;
......
......@@ -147,8 +147,8 @@ static int s2mps11_clk_probe(struct platform_device *pdev)
if (!s2mps11_clks)
return -ENOMEM;
clk_data = devm_kzalloc(&pdev->dev, sizeof(*clk_data) +
sizeof(*clk_data->hws) * S2MPS11_CLKS_NUM,
clk_data = devm_kzalloc(&pdev->dev,
struct_size(clk_data, hws, S2MPS11_CLKS_NUM),
GFP_KERNEL);
if (!clk_data)
return -ENOMEM;
......
......@@ -137,8 +137,8 @@ static int scmi_clocks_probe(struct scmi_device *sdev)
return -EINVAL;
}
clk_data = devm_kzalloc(dev, sizeof(*clk_data) +
sizeof(*clk_data->hws) * count, GFP_KERNEL);
clk_data = devm_kzalloc(dev, struct_size(clk_data, hws, count),
GFP_KERNEL);
if (!clk_data)
return -ENOMEM;
......
......@@ -1201,8 +1201,7 @@ static void __init stm32h7_rcc_init(struct device_node *np)
const char *hse_clk, *lse_clk, *i2s_clk;
struct regmap *pdrm;
clk_data = kzalloc(sizeof(*clk_data) +
sizeof(*clk_data->hws) * STM32H7_MAX_CLKS,
clk_data = kzalloc(struct_size(clk_data, hws, STM32H7_MAX_CLKS),
GFP_KERNEL);
if (!clk_data)
return;
......
......@@ -2060,8 +2060,7 @@ static int stm32_rcc_init(struct device_node *np,
max_binding = data->maxbinding;
clk_data = kzalloc(sizeof(*clk_data) +
sizeof(*clk_data->hws) * max_binding,
clk_data = kzalloc(struct_size(clk_data, hws, max_binding),
GFP_KERNEL);
if (!clk_data)
return -ENOMEM;
......
......@@ -650,8 +650,8 @@ static int of_da8xx_usb_phy_clk_init(struct device *dev, struct regmap *regmap)
struct da8xx_usb0_clk48 *usb0;
struct da8xx_usb1_clk48 *usb1;
clk_data = devm_kzalloc(dev, sizeof(*clk_data) + 2 *
sizeof(*clk_data->hws), GFP_KERNEL);
clk_data = devm_kzalloc(dev, struct_size(clk_data, hws, 2),
GFP_KERNEL);
if (!clk_data)
return -ENOMEM;
......
......@@ -667,8 +667,9 @@ static int armada_3700_periph_clock_probe(struct platform_device *pdev)
if (!driver_data)
return -ENOMEM;
driver_data->hw_data = devm_kzalloc(dev, sizeof(*driver_data->hw_data) +
sizeof(*driver_data->hw_data->hws) * num_periph,
driver_data->hw_data = devm_kzalloc(dev,
struct_size(driver_data->hw_data,
hws, num_periph),
GFP_KERNEL);
if (!driver_data->hw_data)
return -ENOMEM;
......
......@@ -91,8 +91,8 @@ static int armada_3700_tbg_clock_probe(struct platform_device *pdev)
void __iomem *reg;
int i, ret;
hw_tbg_data = devm_kzalloc(&pdev->dev, sizeof(*hw_tbg_data)
+ sizeof(*hw_tbg_data->hws) * NUM_TBG,
hw_tbg_data = devm_kzalloc(&pdev->dev,
struct_size(hw_tbg_data, hws, NUM_TBG),
GFP_KERNEL);
if (!hw_tbg_data)
return -ENOMEM;
......
......@@ -239,8 +239,7 @@ static int spmi_pmic_clkdiv_probe(struct platform_device *pdev)
if (!nclks)
return -EINVAL;
cc = devm_kzalloc(dev, sizeof(*cc) + sizeof(*cc->clks) * nclks,
GFP_KERNEL);
cc = devm_kzalloc(dev, struct_size(cc, clks, nclks), GFP_KERNEL);
if (!cc)
return -ENOMEM;
cc->nclks = nclks;
......
......@@ -149,8 +149,8 @@ static int exynos_audss_clk_probe(struct platform_device *pdev)
epll = ERR_PTR(-ENODEV);
clk_data = devm_kzalloc(dev,
sizeof(*clk_data) +
sizeof(*clk_data->hws) * EXYNOS_AUDSS_MAX_CLKS,
struct_size(clk_data, hws,
EXYNOS_AUDSS_MAX_CLKS),
GFP_KERNEL);
if (!clk_data)
return -ENOMEM;
......
......@@ -61,8 +61,7 @@ static void __init exynos_clkout_init(struct device_node *node, u32 mux_mask)
int ret;
int i;
clkout = kzalloc(sizeof(*clkout) +
sizeof(*clkout->data.hws) * EXYNOS_CLKOUT_NR_CLKS,
clkout = kzalloc(struct_size(clkout, data.hws, EXYNOS_CLKOUT_NR_CLKS),
GFP_KERNEL);
if (!clkout)
return;
......
......@@ -5505,8 +5505,8 @@ static int __init exynos5433_cmu_probe(struct platform_device *pdev)
info = of_device_get_match_data(dev);
data = devm_kzalloc(dev, sizeof(*data) +
sizeof(*data->ctx.clk_data.hws) * info->nr_clk_ids,
data = devm_kzalloc(dev,
struct_size(data, ctx.clk_data.hws, info->nr_clk_ids),
GFP_KERNEL);
if (!data)
return -ENOMEM;
......
......@@ -247,8 +247,9 @@ static int s3c24xx_dclk_probe(struct platform_device *pdev)
struct clk_hw **clk_table;
int ret, i;
s3c24xx_dclk = devm_kzalloc(&pdev->dev, sizeof(*s3c24xx_dclk) +
sizeof(*s3c24xx_dclk->clk_data.hws) * DCLK_MAX_CLKS,
s3c24xx_dclk = devm_kzalloc(&pdev->dev,
struct_size(s3c24xx_dclk, clk_data.hws,
DCLK_MAX_CLKS),
GFP_KERNEL);
if (!s3c24xx_dclk)
return -ENOMEM;
......
......@@ -81,8 +81,7 @@ static int s5pv210_audss_clk_probe(struct platform_device *pdev)
}
clk_data = devm_kzalloc(&pdev->dev,
sizeof(*clk_data) +
sizeof(*clk_data->hws) * AUDSS_MAX_CLKS,
struct_size(clk_data, hws, AUDSS_MAX_CLKS),
GFP_KERNEL);
if (!clk_data)
......
......@@ -594,7 +594,7 @@ struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region,
if (!count)
return ERR_PTR(-EINVAL);
dev_dax = kzalloc(sizeof(*dev_dax) + sizeof(*res) * count, GFP_KERNEL);
dev_dax = kzalloc(struct_size(dev_dax, res, count), GFP_KERNEL);
if (!dev_dax)
return ERR_PTR(-ENOMEM);
......
......@@ -1499,8 +1499,7 @@ static int sba_prealloc_channel_resources(struct sba_device *sba)
for (i = 0; i < sba->max_req; i++) {
req = devm_kzalloc(sba->dev,
sizeof(*req) +
sba->max_cmd_per_req * sizeof(req->cmds[0]),
struct_size(req, cmds, sba->max_cmd_per_req),
GFP_KERNEL);
if (!req) {
ret = -ENOMEM;
......
......@@ -1074,8 +1074,7 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
return NULL;
}
edesc = kzalloc(sizeof(*edesc) + sg_len * sizeof(edesc->pset[0]),
GFP_ATOMIC);
edesc = kzalloc(struct_size(edesc, pset, sg_len), GFP_ATOMIC);
if (!edesc)
return NULL;
......@@ -1192,8 +1191,7 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
nslots = 2;
}
edesc = kzalloc(sizeof(*edesc) + nslots * sizeof(edesc->pset[0]),
GFP_ATOMIC);
edesc = kzalloc(struct_size(edesc, pset, nslots), GFP_ATOMIC);
if (!edesc)
return NULL;
......@@ -1315,8 +1313,7 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
}
}
edesc = kzalloc(sizeof(*edesc) + nslots * sizeof(edesc->pset[0]),
GFP_ATOMIC);
edesc = kzalloc(struct_size(edesc, pset, nslots), GFP_ATOMIC);
if (!edesc)
return NULL;
......
......@@ -309,7 +309,7 @@ static struct dma_async_tx_descriptor *moxart_prep_slave_sg(
return NULL;
}
d = kzalloc(sizeof(*d) + sg_len * sizeof(d->sg[0]), GFP_ATOMIC);
d = kzalloc(struct_size(d, sg, sg_len), GFP_ATOMIC);
if (!d)
return NULL;
......
......@@ -1305,8 +1305,8 @@ static int nbpf_probe(struct platform_device *pdev)
cfg = of_device_get_match_data(dev);
num_channels = cfg->num_channels;
nbpf = devm_kzalloc(dev, sizeof(*nbpf) + num_channels *
sizeof(nbpf->chan[0]), GFP_KERNEL);
nbpf = devm_kzalloc(dev, struct_size(nbpf, chan, num_channels),
GFP_KERNEL);
if (!nbpf)
return -ENOMEM;
......
......@@ -917,7 +917,7 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
}
/* Now allocate and setup the descriptor. */
d = kzalloc(sizeof(*d) + sglen * sizeof(d->sg[0]), GFP_ATOMIC);
d = kzalloc(struct_size(d, sg, sglen), GFP_ATOMIC);
if (!d)
return NULL;
......
......@@ -557,7 +557,7 @@ static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg(
}
}
txd = kzalloc(sizeof(*txd) + j * sizeof(txd->sg[0]), GFP_ATOMIC);
txd = kzalloc(struct_size(txd, sg, j), GFP_ATOMIC);
if (!txd) {
dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", &c->vc);
return NULL;
......@@ -627,7 +627,7 @@ static struct dma_async_tx_descriptor *sa11x0_dma_prep_dma_cyclic(
if (sglen == 0)
return NULL;
txd = kzalloc(sizeof(*txd) + sglen * sizeof(txd->sg[0]), GFP_ATOMIC);
txd = kzalloc(struct_size(txd, sg, sglen), GFP_ATOMIC);
if (!txd) {
dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", &c->vc);
return NULL;
......
......@@ -269,7 +269,7 @@ static int usb_dmac_desc_alloc(struct usb_dmac_chan *chan, unsigned int sg_len,
struct usb_dmac_desc *desc;
unsigned long flags;
desc = kzalloc(sizeof(*desc) + sg_len * sizeof(desc->sg[0]), gfp);
desc = kzalloc(struct_size(desc, sg, sg_len), gfp);
if (!desc)
return -ENOMEM;
......
......@@ -805,8 +805,8 @@ static int sprd_dma_probe(struct platform_device *pdev)
return ret;
}
sdev = devm_kzalloc(&pdev->dev, sizeof(*sdev) +
sizeof(*dma_chn) * chn_count,
sdev = devm_kzalloc(&pdev->dev,
struct_size(sdev, channels, chn_count),
GFP_KERNEL);
if (!sdev)
return -ENOMEM;
......
......@@ -112,8 +112,7 @@ static struct fw_node *fw_node_create(u32 sid, int port_count, int color)
{
struct fw_node *node;
node = kzalloc(sizeof(*node) + port_count * sizeof(node->ports[0]),
GFP_ATOMIC);
node = kzalloc(struct_size(node, ports, port_count), GFP_ATOMIC);
if (node == NULL)
return NULL;
......
......@@ -371,8 +371,7 @@ static int uniphier_gpio_probe(struct platform_device *pdev)
return ret;
nregs = uniphier_gpio_get_nbanks(ngpios) * 2 + 3;
priv = devm_kzalloc(dev,
sizeof(*priv) + sizeof(priv->saved_vals[0]) * nregs,
priv = devm_kzalloc(dev, struct_size(priv, saved_vals, nregs),
GFP_KERNEL);
if (!priv)
return -ENOMEM;
......
......@@ -4023,8 +4023,7 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev,
if (count < 0)
return ERR_PTR(count);
descs = kzalloc(sizeof(*descs) + sizeof(descs->desc[0]) * count,
GFP_KERNEL);
descs = kzalloc(struct_size(descs, desc, count), GFP_KERNEL);
if (!descs)
return ERR_PTR(-ENOMEM);
......
......@@ -144,8 +144,7 @@ nvkm_ramht_new(struct nvkm_device *device, u32 size, u32 align,
struct nvkm_ramht *ramht;
int ret, i;
if (!(ramht = *pramht = vzalloc(sizeof(*ramht) +
(size >> 3) * sizeof(*ramht->data))))
if (!(ramht = *pramht = vzalloc(struct_size(ramht, data, (size >> 3)))))
return -ENOMEM;
ramht->device = device;
......
......@@ -779,8 +779,8 @@ nvkm_perfdom_new(struct nvkm_pm *pm, const char *name, u32 mask,
sdom = spec;
while (sdom->signal_nr) {
dom = kzalloc(sizeof(*dom) + sdom->signal_nr *
sizeof(*dom->signal), GFP_KERNEL);
dom = kzalloc(struct_size(dom, signal, sdom->signal_nr),
GFP_KERNEL);
if (!dom)
return -ENOMEM;
......
......@@ -132,7 +132,7 @@ static int omap_hwspinlock_probe(struct platform_device *pdev)
num_locks = i * 32; /* actual number of locks in this device */
bank = kzalloc(sizeof(*bank) + num_locks * sizeof(*hwlock), GFP_KERNEL);
bank = kzalloc(struct_size(bank, lock, num_locks), GFP_KERNEL);
if (!bank) {
ret = -ENOMEM;
goto iounmap_base;
......
......@@ -62,8 +62,10 @@ static int sirf_hwspinlock_probe(struct platform_device *pdev)
if (!pdev->dev.of_node)
return -ENODEV;
hwspin = devm_kzalloc(&pdev->dev, sizeof(*hwspin) +
sizeof(*hwlock) * HW_SPINLOCK_NUMBER, GFP_KERNEL);
hwspin = devm_kzalloc(&pdev->dev,
struct_size(hwspin, bank.lock,
HW_SPINLOCK_NUMBER),
GFP_KERNEL);
if (!hwspin)
return -ENOMEM;
......
......@@ -119,7 +119,7 @@ static int u8500_hsem_probe(struct platform_device *pdev)
/* clear all interrupts */
writel(0xFFFF, io_base + HSEM_ICRALL);
bank = kzalloc(sizeof(*bank) + num_locks * sizeof(*hwlock), GFP_KERNEL);
bank = kzalloc(struct_size(bank, lock, num_locks), GFP_KERNEL);
if (!bank) {
ret = -ENOMEM;
goto iounmap_base;
......
......@@ -1157,8 +1157,9 @@ static void ib_cache_update(struct ib_device *device,
goto err;
}
pkey_cache = kmalloc(sizeof *pkey_cache + tprops->pkey_tbl_len *
sizeof *pkey_cache->table, GFP_KERNEL);
pkey_cache = kmalloc(struct_size(pkey_cache, table,
tprops->pkey_tbl_len),
GFP_KERNEL);
if (!pkey_cache)
goto err;
......
......@@ -4298,8 +4298,8 @@ static void cm_add_one(struct ib_device *ib_device)
int count = 0;
u8 i;
cm_dev = kzalloc(sizeof(*cm_dev) + sizeof(*port) *
ib_device->phys_port_cnt, GFP_KERNEL);
cm_dev = kzalloc(struct_size(cm_dev, port, ib_device->phys_port_cnt),
GFP_KERNEL);
if (!cm_dev)
return;
......
......@@ -813,7 +813,7 @@ static void mcast_add_one(struct ib_device *device)
int i;
int count = 0;
dev = kmalloc(sizeof *dev + device->phys_port_cnt * sizeof *port,
dev = kmalloc(struct_size(dev, port, device->phys_port_cnt),
GFP_KERNEL);
if (!dev)
return;
......
......@@ -2756,8 +2756,8 @@ static struct ib_uflow_resources *flow_resources_alloc(size_t num_specs)
struct ib_uflow_resources *resources;
resources =
kmalloc(sizeof(*resources) +
num_specs * sizeof(*resources->collection), GFP_KERNEL);
kmalloc(struct_size(resources, collection, num_specs),
GFP_KERNEL);
if (!resources)
return NULL;
......
......@@ -297,8 +297,7 @@ static struct uverbs_method_spec *build_method_with_attrs(const struct uverbs_me
if (max_attr_buckets >= 0)
num_attr_buckets = max_attr_buckets + 1;
method = kzalloc(sizeof(*method) +
num_attr_buckets * sizeof(*method->attr_buckets),
method = kzalloc(struct_size(method, attr_buckets, num_attr_buckets),
GFP_KERNEL);
if (!method)
return ERR_PTR(-ENOMEM);
......@@ -446,9 +445,9 @@ static struct uverbs_object_spec *build_object_with_methods(const struct uverbs_
if (max_method_buckets >= 0)
num_method_buckets = max_method_buckets + 1;
object = kzalloc(sizeof(*object) +
num_method_buckets *
sizeof(*object->method_buckets), GFP_KERNEL);
object = kzalloc(struct_size(object, method_buckets,
num_method_buckets),
GFP_KERNEL);
if (!object)
return ERR_PTR(-ENOMEM);
......@@ -469,8 +468,8 @@ static struct uverbs_object_spec *build_object_with_methods(const struct uverbs_
if (methods_max_bucket < 0)
continue;
hash = kzalloc(sizeof(*hash) +
sizeof(*hash->methods) * (methods_max_bucket + 1),
hash = kzalloc(struct_size(hash, methods,
methods_max_bucket + 1),
GFP_KERNEL);
if (!hash) {
res = -ENOMEM;
......@@ -579,8 +578,8 @@ struct uverbs_root_spec *uverbs_alloc_spec_tree(unsigned int num_trees,
if (max_object_buckets >= 0)
num_objects_buckets = max_object_buckets + 1;
root_spec = kzalloc(sizeof(*root_spec) +
num_objects_buckets * sizeof(*root_spec->object_buckets),
root_spec = kzalloc(struct_size(root_spec, object_buckets,
num_objects_buckets),
GFP_KERNEL);
if (!root_spec)
return ERR_PTR(-ENOMEM);
......@@ -603,8 +602,8 @@ struct uverbs_root_spec *uverbs_alloc_spec_tree(unsigned int num_trees,
if (objects_max_bucket < 0)
continue;
hash = kzalloc(sizeof(*hash) +
sizeof(*hash->objects) * (objects_max_bucket + 1),
hash = kzalloc(struct_size(hash, objects,
objects_max_bucket + 1),
GFP_KERNEL);
if (!hash) {
res = -ENOMEM;
......
......@@ -367,7 +367,7 @@ struct mthca_icm_table *mthca_alloc_icm_table(struct mthca_dev *dev,
obj_per_chunk = MTHCA_TABLE_CHUNK_SIZE / obj_size;
num_icm = DIV_ROUND_UP(nobj, obj_per_chunk);
table = kmalloc(sizeof *table + num_icm * sizeof *table->icm, GFP_KERNEL);
table = kmalloc(struct_size(table, icm, num_icm), GFP_KERNEL);
if (!table)
return NULL;
......@@ -529,7 +529,7 @@ struct mthca_user_db_table *mthca_init_user_db_tab(struct mthca_dev *dev)
return NULL;
npages = dev->uar_table.uarc_size / MTHCA_ICM_PAGE_SIZE;
db_tab = kmalloc(sizeof *db_tab + npages * sizeof *db_tab->page, GFP_KERNEL);
db_tab = kmalloc(struct_size(db_tab, page, npages), GFP_KERNEL);
if (!db_tab)
return ERR_PTR(-ENOMEM);
......
......@@ -283,7 +283,7 @@ static struct rvt_mr *__rvt_alloc_mr(int count, struct ib_pd *pd)
/* Allocate struct plus pointers to first level page tables. */
m = (count + RVT_SEGSZ - 1) / RVT_SEGSZ;
mr = kzalloc(sizeof(*mr) + m * sizeof(mr->mr.map[0]), GFP_KERNEL);
mr = kzalloc(struct_size(mr, mr.map, m), GFP_KERNEL);
if (!mr)
goto bail;
......@@ -730,7 +730,7 @@ struct ib_fmr *rvt_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
/* Allocate struct plus pointers to first level page tables. */
m = (fmr_attr->max_pages + RVT_SEGSZ - 1) / RVT_SEGSZ;
fmr = kzalloc(sizeof(*fmr) + m * sizeof(fmr->mr.map[0]), GFP_KERNEL);
fmr = kzalloc(struct_size(fmr, mr.map, m), GFP_KERNEL