Merge branch 'cleanup-assembly' into 'master'

Remove runtime assembly generation

See merge request glvnd/libglvnd!202
This commit is contained in:
Kyle Brenneman 2019-12-05 18:13:17 +00:00
commit fbcce35d97
17 changed files with 111 additions and 485 deletions

View file

@ -45,9 +45,6 @@ void __attribute__((constructor)) __libGLInit(void)
void _init(void)
#endif
{
// Fix up the static GL entrypoints, if necessary
entry_init_public();
__glDispatchInit();
// Register these entrypoints with GLdispatch so they can be overwritten at
@ -62,6 +59,7 @@ void _fini(void)
#endif
{
// Unregister the GLdispatch entrypoints
stub_cleanup();
__glDispatchUnregisterStubCallbacks(patchStubId);
__glDispatchFini();
}

View file

@ -35,9 +35,6 @@ typedef void (*mapi_func)(void);
extern const int entry_type;
extern const int entry_stub_size;
void
entry_init_public(void);
/**
* Returns the address of an entrypoint.
*
@ -54,14 +51,14 @@ mapi_func
entry_get_public(int index);
/**
* Restores the original code for a dispatch stub. This is used to unpatch the
* dispatch stubs after a vendor library patched them.
*
* \param index The index of the dispatch stub.
* \param slot The slot in the dispatch table that the stub should call
* through.
* Saves and returns a copy of all of the entrypoints.
*/
void entry_generate_default_code(int index, int slot);
void *entry_save_entrypoints(void);
/**
* Restores the entrypoints that were saved with entry_save_entrypoints.
*/
void entry_restore_entrypoints(void *saved);
/**
* Called before starting entrypoint patching.

View file

@ -99,37 +99,6 @@
"3:\n\t" \
".xword " slot " * 8\n\t" /* size of (void *) */
/*
* Bytecode for STUB_ASM_CODE()
*/
static const uint32_t ENTRY_TEMPLATE[] =
{
0xa9bf03e1, // <ENTRY>: stp x1, x0, [sp,#-16]!
0x58000240, // <ENTRY+4>: ldr x0, <ENTRY+76>
0xf9400000, // <ENTRY+8>: ldr x0, [x0]
0xb40000a0, // <ENTRY+12>: cbz x0, <ENTRY+32>
0x58000261, // <ENTRY+16>: ldr x1, <ENTRY+92>
0xf8616810, // <ENTRY+20>: ldr x16, [x0,x1]
0xa8c103e1, // <ENTRY+24>: ldp x1, x0, [sp],#16
0xd61f0200, // <ENTRY+28>: br x16
0xf81f0ffe, // <ENTRY+32>: str x30, [sp,#-16]!
0xa9bf1be7, // <ENTRY+36>: stp x7, x6, [sp,#-16]!
0xa9bf13e5, // <ENTRY+40>: stp x5, x4, [sp,#-16]!
0xa9bf0be3, // <ENTRY+44>: stp x3, x2, [sp,#-16]!
0x58000120, // <ENTRY+48>: ldr x0, <ENTRY+84>
0xd63f0000, // <ENTRY+52>: blr x0
0xa8c10be3, // <ENTRY+56>: ldp x3, x2, [sp],#16
0xa8c113e5, // <ENTRY+60>: ldp x5, x4, [sp],#16
0xa8c11be7, // <ENTRY+64>: ldp x7, x6, [sp],#16
0xf84107fe, // <ENTRY+68>: ldr x30, [sp],#16
0x17fffff2, // <ENTRY+72>: b <ENTRY+16>
// Offsets that need to be patched
0x00000000, 0x00000000, // <ENTRY+76>: _glapi_Current
0x00000000, 0x00000000, // <ENTRY+84>: _glapi_get_current
0x00000000, 0x00000000, // <ENTRY+92>: slot * sizeof(void*)
};
__asm__(".section wtext,\"ax\"\n"
".balign " U_STRINGIFY(GLDISPATCH_PAGE_SIZE) "\n"
".globl public_entry_start\n"
@ -148,23 +117,3 @@ __asm__(".balign " U_STRINGIFY(GLDISPATCH_PAGE_SIZE) "\n"
const int entry_type = __GLDISPATCH_STUB_AARCH64;
const int entry_stub_size = ENTRY_STUB_ALIGN;
// The offsets in ENTRY_TEMPLATE that need to be patched.
static const int TEMPLATE_OFFSET_CURRENT_TABLE = sizeof(ENTRY_TEMPLATE) - 3*8;
static const int TEMPLATE_OFFSET_CURRENT_TABLE_GET = sizeof(ENTRY_TEMPLATE) - 2*8;
static const int TEMPLATE_OFFSET_SLOT = sizeof(ENTRY_TEMPLATE) - 8;
void entry_generate_default_code(int index, int slot)
{
char *entry = (char *) (public_entry_start + (index * entry_stub_size));
memcpy(entry, ENTRY_TEMPLATE, sizeof(ENTRY_TEMPLATE));
// Patch the slot number and whatever addresses need to be patched.
*((uint64_t *)(entry + TEMPLATE_OFFSET_SLOT)) = (uint64_t)(slot * sizeof(mapi_func));
*((uint64_t *)(entry + TEMPLATE_OFFSET_CURRENT_TABLE)) =
(uint64_t)_glapi_Current;
*((uint64_t *)(entry + TEMPLATE_OFFSET_CURRENT_TABLE_GET)) =
(uint64_t)_glapi_get_current;
// See http://community.arm.com/groups/processors/blog/2010/02/17/caches-and-self-modifying-code
__builtin___clear_cache(entry, entry + sizeof(ENTRY_TEMPLATE));
}

View file

@ -120,40 +120,6 @@ __asm__(".syntax unified\n\t");
"3:\n\t" \
".word " slot "\n\t"
/*
* This template is used to generate new dispatch stubs at runtime. It's
* functionally equivalent to the code in STUB_ASM_CODE(), but not identical.
* The difference between the two is that STUB_ASM_CODE has to be position
* independent, so it has to go through the GOT and PLT to get the addresses of
* _glapi_Current and _glapi_get_current. In the generated stubs, we can just
* plug the addresses in directly.
*/
static const uint16_t ENTRY_TEMPLATE[] =
{
0xb40f, // push {r0-r3}
0xf8df, 0x0028, // ldr r0, 1f
0x6800, // ldr r0, [r0]
0x2800, // cmp r0, #0
0xbf08, // it eq
0xe008, // beq 10f
0x4909, // 11: ldr r1, 3f
0xf04f, 0x0204, // mov r2, #4
0xfb01, 0xf102, // mul r1, r1, r2
0xf850, 0xc001, // ldr ip, [r0, +r1]
0xbc0f, // pop {r0-r3}
0x4760, // bx ip
0xb500, // 10: push {lr}
0x4803, // ldr r0, 2f
0x4780, // blx r0
0xf85d, 0xeb04, // pop {lr}
0xe7f0, // b 11b
// Offsets that need to be patched
0x0000, 0x0000, // 1: .word _glapi_Current
0x0000, 0x0000, // 2: .word _glapi_get_current
0x0000, 0x0000, // 3: .word " slot "
};
__asm__(".section wtext,\"ax\"\n"
".balign " U_STRINGIFY(GLDISPATCH_PAGE_SIZE) "\n"
".syntax unified\n"
@ -180,32 +146,6 @@ __asm__(".arm\n\t");
const int entry_type = __GLDISPATCH_STUB_ARMV7_THUMB;
const int entry_stub_size = ENTRY_STUB_ALIGN;
static const int TEMPLATE_OFFSET_CURRENT_TABLE = sizeof(ENTRY_TEMPLATE) - 3*4;
static const int TEMPLATE_OFFSET_CURRENT_TABLE_GET = sizeof(ENTRY_TEMPLATE) - 2*4;
static const int TEMPLATE_OFFSET_SLOT = sizeof(ENTRY_TEMPLATE) - 4;
void
entry_init_public(void)
{
STATIC_ASSERT(sizeof(ENTRY_TEMPLATE) <= ENTRY_STUB_ALIGN);
}
void entry_generate_default_code(int index, int slot)
{
char *entry = (char *) (public_entry_start + (index * entry_stub_size));
memcpy(entry, ENTRY_TEMPLATE, sizeof(ENTRY_TEMPLATE));
*((uint32_t *)(entry + TEMPLATE_OFFSET_SLOT)) = slot;
*((uint32_t *)(entry + TEMPLATE_OFFSET_CURRENT_TABLE)) =
(uint32_t)_glapi_Current;
*((uint32_t *)(entry + TEMPLATE_OFFSET_CURRENT_TABLE_GET)) =
(uint32_t)_glapi_get_current;
// See http://community.arm.com/groups/processors/blog/2010/02/17/caches-and-self-modifying-code
__builtin___clear_cache(entry, entry + sizeof(ENTRY_TEMPLATE));
}
// Note: The rest of these functions could also be used for ARMv7 TLS stubs,
// once those are implemented.

View file

@ -78,3 +78,59 @@ void *entry_get_patch_address(int index)
{
return (void *) (public_entry_start + (index * entry_stub_size));
}
void *entry_save_entrypoints(void)
{
size_t size = ((uintptr_t) public_entry_end) - ((uintptr_t) public_entry_start);
void *buf = malloc(size);
if (buf != NULL) {
memcpy(buf, public_entry_start, size);
}
return buf;
}
#if defined(USE_ARMV7_ASM) || defined(USE_AARCH64_ASM)
static void InvalidateCache(void)
{
// See http://community.arm.com/groups/processors/blog/2010/02/17/caches-and-self-modifying-code
__builtin___clear_cache(public_entry_start, public_entry_end);
}
#elif defined(USE_PPC64LE_ASM)
static void InvalidateCache(void)
{
// Note: We might be able to get away with only invalidating each cache
// block, instead of every single 32-bit increment. If that works, we'd
// need to query the AT_DCACHEBSIZE and AT_ICACHEBSIZE values at runtime
// with getauxval(3).
size_t dataBlockSize = 4;
size_t instructionBlockSize = 4;
char *ptr;
for (ptr = public_entry_start;
(uintptr_t) ptr < (uintptr_t) public_entry_end;
ptr += dataBlockSize) {
__asm__ __volatile__("dcbst 0, %0" : : "r" (ptr));
}
__asm__ __volatile__("sync");
for (ptr = public_entry_start;
(uintptr_t) ptr < (uintptr_t) public_entry_end;
ptr += instructionBlockSize) {
__asm__ __volatile__("icbi 0, %0" : : "r" (ptr));
}
__asm__ __volatile__("isync");
}
#else
static void InvalidateCache(void)
{
// Nothing to do here.
}
#endif
void entry_restore_entrypoints(void *saved)
{
size_t size = ((uintptr_t) public_entry_end) - ((uintptr_t) public_entry_start);
memcpy(public_entry_start, saved, size);
InvalidateCache();
}

View file

@ -53,16 +53,6 @@ entry_current_get(void)
const int entry_type = __GLDISPATCH_STUB_UNKNOWN;
const int entry_stub_size = 0;
void
entry_init_public(void)
{
}
void entry_generate_default_code(int index, int slot)
{
assert(0);
}
mapi_func
entry_get_public(int index)
{
@ -91,3 +81,14 @@ void *entry_get_patch_address(int index)
assert(!"This should never be called");
return NULL;
}
void *entry_save_entrypoints(void)
{
assert(!"This should never be called");
return NULL;
}
void entry_restore_entrypoints(void *saved)
{
assert(!"This should never be called");
}

View file

@ -51,10 +51,6 @@
* each entrypoint to force switching to Thumb mode.
*/
void entry_init_public(void)
{
}
mapi_func entry_get_public(int index)
{
return (mapi_func)(public_entry_start + (index * entry_stub_size));

View file

@ -82,53 +82,15 @@ __asm__(".balign " U_STRINGIFY(GLDISPATCH_PAGE_SIZE) "\n"
__asm__(".text\n");
__asm__("x86_64_current_tls:\n\t"
"movq _glapi_tls_Current@GOTTPOFF(%rip), %rax\n\t"
"ret");
extern uint64_t
x86_64_current_tls();
const int entry_stub_size = ENTRY_STUB_ALIGN;
#ifdef __ILP32__
const int entry_type = __GLDISPATCH_STUB_X32;
static const unsigned char ENTRY_TEMPLATE[] = {
0x64, 0x44, 0x8b, 0x1c, 0x25, 0x00, 0x00, 0x00, 0x00, // movl %fs:0, %r11d
0x67, 0x45, 0x8b, 0x9b, 0x34, 0x12, 0x00, 0x00, // movl 0x1234(%r11d), %r11d
0x41, 0xff, 0xe3, // jmp *%r11
};
static const unsigned int TLS_ADDR_OFFSET = 5;
static const unsigned int SLOT_OFFSET = 13;
#else // __ILP32__
const int entry_type = __GLDISPATCH_STUB_X86_64;
static const unsigned char ENTRY_TEMPLATE[] = {
0x64, 0x4c, 0x8b, 0x1c, 0x25, 0x00, 0x00, 0x00, 0x00, // movq %fs:0, %r11
0x41, 0xff, 0xa3, 0x34, 0x12, 0x00, 0x00, // jmp *0x1234(%r11)
};
static const unsigned int TLS_ADDR_OFFSET = 5;
static const unsigned int SLOT_OFFSET = 12;
#endif // __ILP32__
void entry_generate_default_code(int index, int slot)
{
char *entry = (char *) (public_entry_start + (index * entry_stub_size));
uint64_t tls_addr;
STATIC_ASSERT(ENTRY_STUB_ALIGN >= sizeof(ENTRY_TEMPLATE));
assert(slot >= 0);
tls_addr = x86_64_current_tls();
memcpy(entry, ENTRY_TEMPLATE, sizeof(ENTRY_TEMPLATE));
*((unsigned int *) &entry[TLS_ADDR_OFFSET]) = (unsigned int) tls_addr;
*((unsigned int *) &entry[SLOT_OFFSET]) = (unsigned int) (slot * sizeof(mapi_func));
}

View file

@ -102,42 +102,3 @@ __asm__(".text\n");
const int entry_type = __GLDISPATCH_STUB_X86_64;
const int entry_stub_size = ENTRY_STUB_ALIGN;
static const unsigned char ENTRY_TEMPLATE[] =
{
// <ENTRY+0>: movabs ENTRY_CURRENT_TABLE, %rax
0x48, 0xa1, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
0x48, 0x85, 0xc0, // <ENTRY+10>: test %rax,%rax
0x75, 0x1c, // <ENTRY+13>: jne <ENTRY+43>
// <ENTRY+15>: movabs $_glapi_get_current, %rax
0x48, 0xb8, 0x10, 0x20, 0x30, 0x40, 0x50, 0x60, 0x70, 0x80,
0x57, // <ENTRY+25>: push %rdi
0x56, // <ENTRY+26>: push %rsi
0x52, // <ENTRY+27>: push %rdx
0x51, // <ENTRY+28>: push %rcx
0x41, 0x50, // <ENTRY+29>: push %r8
0x41, 0x51, // <ENTRY+31>: push %r9
0xff, 0xd0, // <ENTRY+33>: callq *%rax
0x41, 0x59, // <ENTRY+35>: pop %r9
0x41, 0x58, // <ENTRY+37>: pop %r8
0x59, // <ENTRY+39>: pop %rcx
0x5a, // <ENTRY+40>: pop %rdx
0x5e, // <ENTRY+41>: pop %rsi
0x5f, // <ENTRY+42>: pop %rdi
0xff, 0xa0, 0x00, 0x00, 0x00, 0x00 // <ENTRY+43:> jmpq *SLOT(%rax)
};
// These are the offsets in ENTRY_TEMPLATE of the values that we have to patch.
static const int TEMPLATE_OFFSET_CURRENT_TABLE = 2;
static const int TEMPLATE_OFFSET_CURRENT_TABLE_GET = 17;
static const int TEMPLATE_OFFSET_SLOT = 45;
void entry_generate_default_code(int index, int slot)
{
char *entry = (char *) (public_entry_start + (index * entry_stub_size));
memcpy(entry, ENTRY_TEMPLATE, sizeof(ENTRY_TEMPLATE));
*((uint32_t *) (entry + TEMPLATE_OFFSET_SLOT)) = slot * sizeof(mapi_func);
*((uintptr_t *) (entry + TEMPLATE_OFFSET_CURRENT_TABLE)) = (uintptr_t) _glapi_Current;
*((uintptr_t *) (entry + TEMPLATE_OFFSET_CURRENT_TABLE_GET)) = (uintptr_t) _glapi_get_current;
}

View file

@ -74,34 +74,6 @@ __asm__(".balign " U_STRINGIFY(GLDISPATCH_PAGE_SIZE) "\n"
__asm__(".text\n");
__asm__("x86_current_tls:\n\t"
".balign " U_STRINGIFY(ENTRY_STUB_ALIGN) "\n"
"call 1f\n"
"1:\n\t"
"popl %eax\n\t"
"addl $_GLOBAL_OFFSET_TABLE_+[.-1b], %eax\n\t"
"movl _glapi_tls_Current@GOTNTPOFF(%eax), %eax\n\t"
"ret");
extern uint32_t
x86_current_tls();
const int entry_type = __GLDISPATCH_STUB_X86;
const int entry_stub_size = ENTRY_STUB_ALIGN;
static const unsigned char ENTRY_TEMPLATE[] =
{
0x65, 0xa1, 0x00, 0x00, 0x00, 0x00, /* movl %gs:0x0, %eax */
0xff, 0xa0, 0x34, 0x12, 0x00, 0x00, /* jmp *0x1234(%eax) */
};
static const int TEMPLATE_OFFSET_TLS_OFFSET = 2;
static const int TEMPLATE_OFFSET_SLOT = 8;
void entry_generate_default_code(int index, int slot)
{
char *entry = (char *) (public_entry_start + (index * entry_stub_size));
memcpy(entry, ENTRY_TEMPLATE, sizeof(ENTRY_TEMPLATE));
*((uint32_t *) (entry + TEMPLATE_OFFSET_TLS_OFFSET)) = x86_current_tls();
*((uint32_t *) (entry + TEMPLATE_OFFSET_SLOT)) = (uint32_t) (slot * sizeof(mapi_func));
}

View file

@ -84,41 +84,3 @@ __asm__(".text\n");
const int entry_type = __GLDISPATCH_STUB_X86;
const int entry_stub_size = ENTRY_STUB_ALIGN;
// Note that the generated stubs are simpler than the assembly stubs above.
// For the generated stubs, we can patch in the addresses of _glapi_Current and
// _glapi_get_current, so we don't need to go through the GOT and PLT lookups.
static const unsigned char ENTRY_TEMPLATE[] =
{
0xa1, 0x40, 0x30, 0x20, 0x10, // <ENTRY>: mov _glapi_Current, %eax
0x85, 0xc0, // <ENTRY+5>: test %eax, %eax
0x74, 0x06, // <ENTRY+7>: je <ENTRY+15>
0xff, 0xa0, 0x40, 0x30, 0x20, 0x10, // <ENTRY+9>: jmp *slot(%eax)
0xe8, 0x40, 0x30, 0x20, 0x10, // <ENTRY+15>: call _glapi_get_current
0xff, 0xa0, 0x40, 0x30, 0x20, 0x10, // <ENTRY+20>: jmp *slot(%eax)
};
// These are the offsets in ENTRY_TEMPLATE of the values that we have to patch.
static const int TEMPLATE_OFFSET_CURRENT_TABLE = 1;
static const int TEMPLATE_OFFSET_CURRENT_TABLE_GET = 16;
static const int TEMPLATE_OFFSET_CURRENT_TABLE_GET_RELATIVE = 20;
static const int TEMPLATE_OFFSET_SLOT1 = 11;
static const int TEMPLATE_OFFSET_SLOT2 = 22;
void entry_generate_default_code(int index, int slot)
{
char *entry = (char *) (public_entry_start + (index * entry_stub_size));
uintptr_t getTableOffset;
memcpy(entry, ENTRY_TEMPLATE, sizeof(ENTRY_TEMPLATE));
*((uint32_t *) (entry + TEMPLATE_OFFSET_SLOT1)) = slot * sizeof(mapi_func);
*((uint32_t *) (entry + TEMPLATE_OFFSET_SLOT2)) = slot * sizeof(mapi_func);
*((uintptr_t *) (entry + TEMPLATE_OFFSET_CURRENT_TABLE)) = (uintptr_t) _glapi_Current;
// Calculate the offset to patch for the CALL instruction to
// _glapi_get_current.
getTableOffset = (uintptr_t) _glapi_get_current;
getTableOffset -= (((uintptr_t) entry) + TEMPLATE_OFFSET_CURRENT_TABLE_GET_RELATIVE);
*((uintptr_t *) (entry + TEMPLATE_OFFSET_CURRENT_TABLE_GET)) = getTableOffset;
}

View file

@ -41,14 +41,13 @@ void
_glapi_init(void)
{
u_current_init();
entry_init_public();
}
void
_glapi_destroy(void)
{
u_current_destroy();
stub_cleanup_dynamic();
stub_cleanup();
}
void

View file

@ -35,6 +35,10 @@
#include "table.h"
#include "utils_misc.h"
#if !defined(STATIC_DISPATCH_ONLY)
static void stub_cleanup_dynamic(void);
#endif
struct mapi_stub {
/*!
* The name of the stub function.
@ -44,6 +48,8 @@ struct mapi_stub {
int slot;
};
static void *savedEntrypoints = NULL;
/* define public_stubs */
#define MAPI_TMP_PUBLIC_STUBS
#include "mapi_tmp.h"
@ -84,6 +90,16 @@ stub_find_public(const char *name)
}
}
void stub_cleanup(void)
{
free(savedEntrypoints);
savedEntrypoints = NULL;
#if !defined(STATIC_DISPATCH_ONLY)
stub_cleanup_dynamic();
#endif
}
#if !defined(STATIC_DISPATCH_ONLY)
static char *dynamic_stub_names[MAPI_TABLE_NUM_DYNAMIC];
static int num_dynamic_stubs;
@ -200,11 +216,20 @@ static int stub_allow_override(void)
static GLboolean stubStartPatch(void)
{
assert(savedEntrypoints == NULL);
if (!stub_allow_override()) {
return GL_FALSE;
}
savedEntrypoints = entry_save_entrypoints();
if (savedEntrypoints == NULL) {
return GL_FALSE;
}
if (!entry_patch_start()) {
free(savedEntrypoints);
savedEntrypoints = NULL;
return GL_FALSE;
}
@ -218,20 +243,13 @@ static void stubFinishPatch(void)
static void stubRestoreFuncsInternal(void)
{
int i;
assert(savedEntrypoints != NULL);
assert(stub_allow_override());
for (i = 0; i < ARRAY_LEN(public_stubs); i++) {
entry_generate_default_code(i, public_stubs[i].slot);
}
#if !defined(STATIC_DISPATCH_ONLY)
for (i=0; i<num_dynamic_stubs; i++) {
int slot = ARRAY_LEN(public_stubs) + i;
entry_generate_default_code(slot, slot);
}
#endif
entry_restore_entrypoints(savedEntrypoints);
free(savedEntrypoints);
savedEntrypoints = NULL;
}
static GLboolean stubRestoreFuncs(void)

View file

@ -31,15 +31,14 @@
#include "entry.h"
#include "glapi.h"
#if !defined(STATIC_DISPATCH_ONLY)
/**
* Frees any memory that was allocated for any dynamic stub functions.
* Frees any memory that was allocated for the stub functions.
*
* This should only be called when the library is unloaded, since any generated
* functions won't work after this.
* This should only be called when the library is unloaded.
*/
void stub_cleanup_dynamic(void);
void stub_cleanup(void);
#if !defined(STATIC_DISPATCH_ONLY)
int
stub_find_public(const char *name);

View file

@ -43,9 +43,6 @@ void __attribute__((constructor)) __libGLInit(void)
void _init(void)
#endif
{
// Fix up the static GL entrypoints, if necessary
entry_init_public();
__glDispatchInit();
// Register these entrypoints with GLdispatch so they can be
@ -60,6 +57,7 @@ void _fini(void)
#endif
{
// Unregister the GLdispatch entrypoints
stub_cleanup();
__glDispatchUnregisterStubCallbacks(patchStubId);
__glDispatchFini();
}

View file

@ -39,33 +39,6 @@
#include <fcntl.h>
#include <assert.h>
#define TEMP_FILENAME_ARRAY_SIZE 4
/**
* Populates \p dirs with a NULL-terminated list of directories to use to open
* a temp file.
*
* \param[out] dirs An array with at least \p TEMP_FILENAME_ARRAY_SIZE elements.
*/
static void GetTempDirs(const char **dirs);
/**
* Creates a temp file.
*
* The file will be created with mkstemp(3), then immediately unlinked so that
* it doesn't risk leaving any clutter behind.
*
* \param tempdir The directory to create the file in.
* \return A file descriptor, or -1 on error.
*/
static int OpenTempFile(const char *tempdir);
/**
* Allocates executable memory by mapping a file.
*/
static int AllocExecPagesFile(int fd, size_t size, void **writePtr, void **execPtr);
static int AllocExecPagesAnonymous(size_t size, void **writePtr, void **execPtr);
int glvnd_asprintf(char **strp, const char *fmt, ...)
{
va_list args;
@ -117,139 +90,6 @@ int glvnd_vasprintf(char **strp, const char *fmt, va_list args)
return ret;
}
int AllocExecPages(size_t size, void **writePtr, void **execPtr)
{
const char *dirs[TEMP_FILENAME_ARRAY_SIZE];
int i;
*writePtr = NULL;
*execPtr = NULL;
// Try to allocate the memory by creating a file and mapping it twice.
// This follows Ulrich Drepper's recommendation for allocating executable
// memory:
// http://www.akkadia.org/drepper/selinux-mem.html
GetTempDirs(dirs);
for (i=0; dirs[i] != NULL; i++) {
int fd = OpenTempFile(dirs[i]);
if (fd >= 0) {
int rv = AllocExecPagesFile(fd, size, writePtr, execPtr);
close(fd);
if (rv == 0) {
return 0;
}
}
}
// Using a file failed, so fall back to trying to create a single anonymous
// mapping.
return AllocExecPagesAnonymous(size, writePtr, execPtr);
}
void FreeExecPages(size_t size, void *writePtr, void *execPtr)
{
if (writePtr != NULL) {
munmap(writePtr, size);
}
if (execPtr != NULL && execPtr != writePtr) {
munmap(execPtr, size);
}
}
int OpenTempFile(const char *tempdir)
{
int fd = -1;
#if defined(O_TMPFILE)
// If it's available, then try creating a file with O_TMPFILE first.
fd = open(tempdir, O_RDWR | O_TMPFILE | O_EXCL, S_IRUSR | S_IWUSR);
#endif // defined(HAVE_O_TMPFILE)
if (fd < 0)
{
// If O_TMPFILE wasn't available or wasn't supported, then try mkstemp
// instead.
char *templateName = NULL;
if (glvnd_asprintf(&templateName, "%s/.glvndXXXXXX", tempdir) < 0) {
return -1;
}
fd = mkstemp(templateName);
if (fd >= 0) {
// Unlink the file so that we don't leave any clutter behind.
unlink(templateName);
}
free(templateName);
templateName = NULL;
}
// Make sure we can still use the file after it's unlinked.
if (fd >= 0) {
struct stat sb;
if (fstat(fd, &sb) != 0) {
close(fd);
fd = -1;
}
}
return fd;
}
int AllocExecPagesFile(int fd, size_t size, void **writePtr, void **execPtr)
{
if (ftruncate(fd, size) != 0) {
return -1;
}
*execPtr = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_SHARED, fd, 0);
if (*execPtr == MAP_FAILED) {
*execPtr = NULL;
return -1;
}
*writePtr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
if (*writePtr == MAP_FAILED) {
munmap(*execPtr, size);
*execPtr = *writePtr = NULL;
return -1;
}
return 0;
}
int AllocExecPagesAnonymous(size_t size, void **writePtr, void **execPtr)
{
void *ptr = mmap(NULL, size, PROT_READ | PROT_WRITE | PROT_EXEC,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (ptr == MAP_FAILED) {
return -1;
}
*writePtr = *execPtr = ptr;
return 0;
}
void GetTempDirs(const char **dirs)
{
int count = 0;
// Don't use the environment variables if we're running as setuid.
if (getuid() == geteuid()) {
dirs[count] = getenv("TMPDIR");
if (dirs[count] != NULL) {
count++;
}
dirs[count] = getenv("HOME");
if (dirs[count] != NULL) {
count++;
}
}
dirs[count++] = "/tmp";
dirs[count] = NULL;
}
void glvnd_byte_swap16(uint16_t* array, const size_t size)
{
int i;

View file

@ -68,28 +68,6 @@ int glvnd_asprintf(char **strp, const char *fmt, ...);
*/
int glvnd_vasprintf(char **strp, const char *fmt, va_list args);
/**
* Allocates executable memory.
*
* To avoid having the same page be both writable and executable, this function
* returns two pointers to the same mapping, one read/write and one
* read/execute.
*
* Depending on the system, \p writePtr and \p execPtr may return the same
* pointer, mapped as read/write/execute.
*
* \param size The number of bytes to allocate.
* \param[out] writePtr Returns a pointer to the read/write mapping.
* \param[out] execPtr Returns a pointer to the read/exec mapping.
* \return Zero on success, non-zero on error.
*/
int AllocExecPages(size_t size, void **writePtr, void **execPtr);
/**
* Frees the pages allocated from \p allocExecMem.
*/
void FreeExecPages(size_t size, void *writePtr, void *execPtr);
/*!
* Swaps the bytes of an array.
*