Systemd/src/basic/mempool.c
Lennart Poettering da6053d0a7 tree-wide: be more careful with the type of array sizes
Previously we were a bit sloppy with the index and size types of arrays,
we'd regularly use unsigned. While I don't think this ever resulted in
real issues I think we should be more careful there and follow a
stricter regime: unless there's a strong reason not to use size_t for
array sizes and indexes, size_t it should be. Any allocations we do
ultimately will use size_t anyway, and converting forth and back between
unsigned and size_t will always be a source of problems.

Note that on 32bit machines "unsigned" and "size_t" are equivalent, and
on 64bit machines our arrays shouldn't grow that large anyway, and if
they do we have a problem, however that kind of overly large allocation
we have protections for usually, but for overflows we do not have that
so much, hence let's add it.

So yeah, it's a story of the current code being already "good enough",
but I think some extra type hygiene is better.

This patch tries to be comprehensive, but it probably isn't and I missed
a few cases. But I guess we can cover that later as we notice it. Among
smaller fixes, this changes:

1. strv_length()' return type becomes size_t

2. the unit file changes array size becomes size_t

3. DNS answer and query array sizes become size_t

Fixes: https://bugs.freedesktop.org/show_bug.cgi?id=76745
2018-04-27 14:29:06 +02:00

92 lines
2.1 KiB
C

/* SPDX-License-Identifier: LGPL-2.1+ */
/***
This file is part of systemd.
Copyright 2010-2014 Lennart Poettering
Copyright 2014 Michal Schmidt
***/
#include <stdint.h>
#include <stdlib.h>
#include "macro.h"
#include "mempool.h"
#include "util.h"
struct pool {
struct pool *next;
size_t n_tiles;
size_t n_used;
};
void* mempool_alloc_tile(struct mempool *mp) {
size_t i;
/* When a tile is released we add it to the list and simply
* place the next pointer at its offset 0. */
assert(mp->tile_size >= sizeof(void*));
assert(mp->at_least > 0);
if (mp->freelist) {
void *r;
r = mp->freelist;
mp->freelist = * (void**) mp->freelist;
return r;
}
if (_unlikely_(!mp->first_pool) ||
_unlikely_(mp->first_pool->n_used >= mp->first_pool->n_tiles)) {
size_t size, n;
struct pool *p;
n = mp->first_pool ? mp->first_pool->n_tiles : 0;
n = MAX(mp->at_least, n * 2);
size = PAGE_ALIGN(ALIGN(sizeof(struct pool)) + n*mp->tile_size);
n = (size - ALIGN(sizeof(struct pool))) / mp->tile_size;
p = malloc(size);
if (!p)
return NULL;
p->next = mp->first_pool;
p->n_tiles = n;
p->n_used = 0;
mp->first_pool = p;
}
i = mp->first_pool->n_used++;
return ((uint8_t*) mp->first_pool) + ALIGN(sizeof(struct pool)) + i*mp->tile_size;
}
void* mempool_alloc0_tile(struct mempool *mp) {
void *p;
p = mempool_alloc_tile(mp);
if (p)
memzero(p, mp->tile_size);
return p;
}
void mempool_free_tile(struct mempool *mp, void *p) {
* (void**) p = mp->freelist;
mp->freelist = p;
}
#ifdef VALGRIND
void mempool_drop(struct mempool *mp) {
struct pool *p = mp->first_pool;
while (p) {
struct pool *n;
n = p->next;
free(p);
p = n;
}
}
#endif