2020-11-09 05:23:58 +01:00
|
|
|
/* SPDX-License-Identifier: LGPL-2.1-or-later */
|
2015-10-27 03:01:06 +01:00
|
|
|
#pragma once
|
|
|
|
|
|
|
|
#include <alloca.h>
|
2015-11-30 21:43:37 +01:00
|
|
|
#include <stddef.h>
|
2015-10-27 03:01:06 +01:00
|
|
|
#include <stdlib.h>
|
|
|
|
#include <string.h>
|
|
|
|
|
|
|
|
#include "macro.h"
|
|
|
|
|
2019-02-22 13:07:00 +01:00
|
|
|
#if HAS_FEATURE_MEMORY_SANITIZER
|
|
|
|
# include <sanitizer/msan_interface.h>
|
|
|
|
#endif
|
|
|
|
|
2018-11-27 08:33:28 +01:00
|
|
|
typedef void (*free_func_t)(void *p);
|
|
|
|
|
2019-01-26 15:42:35 +01:00
|
|
|
/* If for some reason more than 4M are allocated on the stack, let's abort immediately. It's better than
|
|
|
|
* proceeding and smashing the stack limits. Note that by default RLIMIT_STACK is 8M on Linux. */
|
|
|
|
#define ALLOCA_MAX (4U*1024U*1024U)
|
|
|
|
|
2015-10-27 03:01:06 +01:00
|
|
|
#define new(t, n) ((t*) malloc_multiply(sizeof(t), (n)))
|
|
|
|
|
2018-12-21 10:21:41 +01:00
|
|
|
#define new0(t, n) ((t*) calloc((n) ?: 1, sizeof(t)))
|
2015-10-27 03:01:06 +01:00
|
|
|
|
2019-01-26 15:42:35 +01:00
|
|
|
#define newa(t, n) \
|
|
|
|
({ \
|
|
|
|
size_t _n_ = n; \
|
|
|
|
assert(!size_multiply_overflow(sizeof(t), _n_)); \
|
|
|
|
assert(sizeof(t)*_n_ <= ALLOCA_MAX); \
|
2020-10-14 10:24:08 +02:00
|
|
|
(t*) alloca((sizeof(t)*_n_) ?: 1); \
|
2018-04-27 14:27:14 +02:00
|
|
|
})
|
|
|
|
|
2019-01-26 15:42:35 +01:00
|
|
|
#define newa0(t, n) \
|
|
|
|
({ \
|
|
|
|
size_t _n_ = n; \
|
|
|
|
assert(!size_multiply_overflow(sizeof(t), _n_)); \
|
|
|
|
assert(sizeof(t)*_n_ <= ALLOCA_MAX); \
|
2020-10-14 10:24:08 +02:00
|
|
|
(t*) alloca0((sizeof(t)*_n_) ?: 1); \
|
2018-04-27 14:27:14 +02:00
|
|
|
})
|
2015-10-27 03:01:06 +01:00
|
|
|
|
|
|
|
#define newdup(t, p, n) ((t*) memdup_multiply(p, sizeof(t), (n)))
|
|
|
|
|
2017-07-20 14:14:55 +02:00
|
|
|
#define newdup_suffix0(t, p, n) ((t*) memdup_suffix0_multiply(p, sizeof(t), (n)))
|
|
|
|
|
2020-10-14 10:24:08 +02:00
|
|
|
#define malloc0(n) (calloc(1, (n) ?: 1))
|
2015-10-27 03:01:06 +01:00
|
|
|
|
|
|
|
static inline void *mfree(void *memory) {
|
|
|
|
free(memory);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2016-10-17 01:23:35 +02:00
|
|
|
#define free_and_replace(a, b) \
|
|
|
|
({ \
|
|
|
|
free(a); \
|
|
|
|
(a) = (b); \
|
|
|
|
(b) = NULL; \
|
|
|
|
0; \
|
|
|
|
})
|
|
|
|
|
2015-10-27 03:01:06 +01:00
|
|
|
void* memdup(const void *p, size_t l) _alloc_(2);
|
2019-07-12 17:09:09 +02:00
|
|
|
void* memdup_suffix0(const void *p, size_t l); /* We can't use _alloc_() here, since we return a buffer one byte larger than the specified size */
|
2015-10-27 03:01:06 +01:00
|
|
|
|
2018-10-15 12:05:54 +02:00
|
|
|
#define memdupa(p, l) \
|
|
|
|
({ \
|
|
|
|
void *_q_; \
|
2019-01-26 15:42:35 +01:00
|
|
|
size_t _l_ = l; \
|
|
|
|
assert(_l_ <= ALLOCA_MAX); \
|
2020-10-14 10:24:08 +02:00
|
|
|
_q_ = alloca(_l_ ?: 1); \
|
2019-01-26 15:42:35 +01:00
|
|
|
memcpy(_q_, p, _l_); \
|
2018-10-15 12:05:54 +02:00
|
|
|
})
|
|
|
|
|
|
|
|
#define memdupa_suffix0(p, l) \
|
|
|
|
({ \
|
|
|
|
void *_q_; \
|
2019-01-26 15:42:35 +01:00
|
|
|
size_t _l_ = l; \
|
|
|
|
assert(_l_ <= ALLOCA_MAX); \
|
|
|
|
_q_ = alloca(_l_ + 1); \
|
|
|
|
((uint8_t*) _q_)[_l_] = 0; \
|
|
|
|
memcpy(_q_, p, _l_); \
|
2018-10-15 12:05:54 +02:00
|
|
|
})
|
|
|
|
|
2015-10-27 03:01:06 +01:00
|
|
|
static inline void freep(void *p) {
|
|
|
|
free(*(void**) p);
|
|
|
|
}
|
|
|
|
|
|
|
|
#define _cleanup_free_ _cleanup_(freep)
|
|
|
|
|
2016-02-16 18:51:43 +01:00
|
|
|
static inline bool size_multiply_overflow(size_t size, size_t need) {
|
2018-10-13 10:45:14 +02:00
|
|
|
return _unlikely_(need != 0 && size > (SIZE_MAX / need));
|
2016-02-16 18:51:43 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
_malloc_ _alloc_(1, 2) static inline void *malloc_multiply(size_t size, size_t need) {
|
|
|
|
if (size_multiply_overflow(size, need))
|
2015-10-27 03:01:06 +01:00
|
|
|
return NULL;
|
|
|
|
|
2018-12-21 10:21:41 +01:00
|
|
|
return malloc(size * need ?: 1);
|
2015-10-27 03:01:06 +01:00
|
|
|
}
|
|
|
|
|
2018-02-26 21:20:00 +01:00
|
|
|
#if !HAVE_REALLOCARRAY
|
|
|
|
_alloc_(2, 3) static inline void *reallocarray(void *p, size_t need, size_t size) {
|
2016-02-16 18:51:43 +01:00
|
|
|
if (size_multiply_overflow(size, need))
|
2015-10-27 03:01:06 +01:00
|
|
|
return NULL;
|
|
|
|
|
2018-12-21 10:21:41 +01:00
|
|
|
return realloc(p, size * need ?: 1);
|
2015-10-27 03:01:06 +01:00
|
|
|
}
|
2018-02-26 21:20:00 +01:00
|
|
|
#endif
|
2015-10-27 03:01:06 +01:00
|
|
|
|
2016-02-16 18:51:43 +01:00
|
|
|
_alloc_(2, 3) static inline void *memdup_multiply(const void *p, size_t size, size_t need) {
|
|
|
|
if (size_multiply_overflow(size, need))
|
2015-10-27 03:01:06 +01:00
|
|
|
return NULL;
|
|
|
|
|
2016-02-16 18:51:43 +01:00
|
|
|
return memdup(p, size * need);
|
2015-10-27 03:01:06 +01:00
|
|
|
}
|
|
|
|
|
2019-07-12 09:06:28 +02:00
|
|
|
/* Note that we can't decorate this function with _alloc_() since the returned memory area is one byte larger
|
|
|
|
* than the product of its parameters. */
|
|
|
|
static inline void *memdup_suffix0_multiply(const void *p, size_t size, size_t need) {
|
2017-07-20 14:14:55 +02:00
|
|
|
if (size_multiply_overflow(size, need))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
return memdup_suffix0(p, size * need);
|
|
|
|
}
|
|
|
|
|
2015-10-27 03:01:06 +01:00
|
|
|
void* greedy_realloc(void **p, size_t *allocated, size_t need, size_t size);
|
|
|
|
void* greedy_realloc0(void **p, size_t *allocated, size_t need, size_t size);
|
|
|
|
|
|
|
|
#define GREEDY_REALLOC(array, allocated, need) \
|
|
|
|
greedy_realloc((void**) &(array), &(allocated), (need), sizeof((array)[0]))
|
|
|
|
|
|
|
|
#define GREEDY_REALLOC0(array, allocated, need) \
|
|
|
|
greedy_realloc0((void**) &(array), &(allocated), (need), sizeof((array)[0]))
|
|
|
|
|
|
|
|
#define alloca0(n) \
|
|
|
|
({ \
|
|
|
|
char *_new_; \
|
|
|
|
size_t _len_ = n; \
|
2019-01-26 15:42:35 +01:00
|
|
|
assert(_len_ <= ALLOCA_MAX); \
|
2020-10-14 10:24:08 +02:00
|
|
|
_new_ = alloca(_len_ ?: 1); \
|
2015-10-27 03:01:06 +01:00
|
|
|
(void *) memset(_new_, 0, _len_); \
|
|
|
|
})
|
|
|
|
|
|
|
|
/* It's not clear what alignment glibc/gcc alloca() guarantee, hence provide a guaranteed safe version */
|
|
|
|
#define alloca_align(size, align) \
|
|
|
|
({ \
|
|
|
|
void *_ptr_; \
|
|
|
|
size_t _mask_ = (align) - 1; \
|
2019-01-26 15:42:35 +01:00
|
|
|
size_t _size_ = size; \
|
|
|
|
assert(_size_ <= ALLOCA_MAX); \
|
2020-10-14 10:24:08 +02:00
|
|
|
_ptr_ = alloca((_size_ + _mask_) ?: 1); \
|
2015-10-27 03:01:06 +01:00
|
|
|
(void*)(((uintptr_t)_ptr_ + _mask_) & ~_mask_); \
|
|
|
|
})
|
|
|
|
|
|
|
|
#define alloca0_align(size, align) \
|
|
|
|
({ \
|
|
|
|
void *_new_; \
|
2019-01-26 15:42:35 +01:00
|
|
|
size_t _xsize_ = (size); \
|
|
|
|
_new_ = alloca_align(_xsize_, (align)); \
|
|
|
|
(void*)memset(_new_, 0, _xsize_); \
|
2015-10-27 03:01:06 +01:00
|
|
|
})
|
2018-03-22 16:53:26 +01:00
|
|
|
|
2019-03-20 10:33:23 +01:00
|
|
|
/* Takes inspiration from Rust's Option::take() method: reads and returns a pointer, but at the same time
|
|
|
|
* resets it to NULL. See: https://doc.rust-lang.org/std/option/enum.Option.html#method.take */
|
2018-03-22 16:53:26 +01:00
|
|
|
#define TAKE_PTR(ptr) \
|
|
|
|
({ \
|
|
|
|
typeof(ptr) _ptr_ = (ptr); \
|
|
|
|
(ptr) = NULL; \
|
|
|
|
_ptr_; \
|
|
|
|
})
|
2019-02-22 13:07:00 +01:00
|
|
|
|
|
|
|
#if HAS_FEATURE_MEMORY_SANITIZER
|
|
|
|
# define msan_unpoison(r, s) __msan_unpoison(r, s)
|
|
|
|
#else
|
|
|
|
# define msan_unpoison(r, s)
|
|
|
|
#endif
|