From b317855c643050da7c7f047082de9a8b04270172 Mon Sep 17 00:00:00 2001 From: Matt Staveley-Taylor Date: Thu, 24 Oct 2024 14:48:56 +0200 Subject: [PATCH] meta: add clang-format --- .clang-format | 33 + .github/workflows/pre-commit.yml | 11 + .pre-commit-config.yaml | 14 + README.md | 4 + options/rtld/generic/linker.cpp | 1160 +++++++++++++++++------------- 5 files changed, 725 insertions(+), 497 deletions(-) create mode 100644 .clang-format create mode 100644 .github/workflows/pre-commit.yml create mode 100644 .pre-commit-config.yaml diff --git a/.clang-format b/.clang-format new file mode 100644 index 0000000000..ab874bd548 --- /dev/null +++ b/.clang-format @@ -0,0 +1,33 @@ +--- +BasedOnStyle: LLVM +Language: Cpp + +ColumnLimit: 100 +IndentWidth: 4 +TabWidth: 4 +UseTab: Always + +ContinuationIndentWidth: 4 +ConstructorInitializerIndentWidth: 4 + +AlignAfterOpenBracket: BlockIndent + +# Put function names + args on their own lines +AlwaysBreakAfterReturnType: All + +# Put all initializers onto their own lines. +PackConstructorInitializers: CurrentLine + +# Put all function arguments on their own lines. +BinPackArguments: false +BinPackParameters: false + +# If control statements are wrapped to multiple lines, we want the { to be on the next line +# to improve readability. Otherwise, it can happen that the line containing the { gets the same +# indentation level as the control statement's body. +BreakBeforeBraces: Custom +BraceWrapping: + AfterControlStatement: MultiLine + +# Format braces as {1, 2, 3}, not as { 1, 2, 3 }. +Cpp11BracedListStyle: true diff --git a/.github/workflows/pre-commit.yml b/.github/workflows/pre-commit.yml new file mode 100644 index 0000000000..cc35be6b01 --- /dev/null +++ b/.github/workflows/pre-commit.yml @@ -0,0 +1,11 @@ +name: pre-commit + +on: [merge_group, pull_request] + +jobs: + pre-commit: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-python@v3 + - uses: pre-commit/action@v3.0.1 diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000000..57cb12ff68 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,14 @@ +fail_fast: false +repos: + - repo: https://github.com/pocc/pre-commit-hooks + rev: 336fdd7c3cab698ead0b1c95157b9e74d3906b62 + hooks: + - id: clang-format + additional_dependencies: [clang-format==19.1.3] + args: ["-i"] + exclude: > + (?x)^( + options/ansi/musl-generic-math/.*| + options/ansi/generic/complex/.*| + options/posix/musl-generic-regex/.* + ) diff --git a/README.md b/README.md index 12db9505bc..c459f9fa03 100644 --- a/README.md +++ b/README.md @@ -49,6 +49,10 @@ The type of library to be built (static, shared, or both) is controlled by meson We also support building with `-Db_sanitize=undefined` to use UBSan inside mlibc. Note that this does not enable UBSan for external applications which link against `libc.so`, but it can be useful during development to detect internal bugs (e.g when adding new sysdeps). +## Running pre-commit hooks + +To format your code before submitting a PR, you should install [`pre-commit`](https://pre-commit.com/). Then do `pre-commit install` to install the git hook which runs each time you commit. Alternatively, you can do `pre-commit run -a` to manually format all files. + ## Running Tests The `mlibc` test suite can be run under a Linux host. To do this, first install a set of kernel headers (as described [here](https://docs.kernel.org/kbuild/headers_install.html)), then run from the project root: diff --git a/options/rtld/generic/linker.cpp b/options/rtld/generic/linker.cpp index ec11f1b92b..0ba5c0aa9d 100644 --- a/options/rtld/generic/linker.cpp +++ b/options/rtld/generic/linker.cpp @@ -4,19 +4,18 @@ // keep a list of optional generic relocation types enum { - R_OFFSET = (uintptr_t) -1, + R_OFFSET = (uintptr_t)-1, }; - +#include #include #include +#include #include #include -#include #include +#include #include -#include -#include #include "elf.hpp" #include "linker.hpp" @@ -45,13 +44,15 @@ constexpr inline uintptr_t tlsOffsetFromTp = 0; constexpr inline bool tlsAboveTp = true; constexpr inline ptrdiff_t tlsOffsetFromTp = -0x7000; #else -# error Unknown architecture +#error Unknown architecture #endif extern DebugInterface globalDebugInterface; extern uintptr_t __stack_chk_guard; -extern frg::manual_box> libraryPaths; +extern frg::manual_box< + frg::small_vector> + libraryPaths; extern frg::manual_box> preloads; #if MLIBC_STATIC_BUILD @@ -73,17 +74,18 @@ size_t tlsMaxAlignment = 16; // part of the global scope is considered for symbol resolution. uint64_t rtsCounter = 2; -bool trySeek(int fd, int64_t offset) { +bool +trySeek(int fd, int64_t offset) { off_t noff; return mlibc::sys_seek(fd, offset, SEEK_SET, &noff) == 0; } -bool tryReadExactly(int fd, void *data, size_t length) { +bool +tryReadExactly(int fd, void *data, size_t length) { size_t offset = 0; - while(offset < length) { + while (offset < length) { ssize_t chunk; - if(mlibc::sys_read(fd, reinterpret_cast(data) + offset, - length - offset, &chunk)) + if (mlibc::sys_read(fd, reinterpret_cast(data) + offset, length - offset, &chunk)) return false; __ensure(chunk > 0); offset += chunk; @@ -92,12 +94,14 @@ bool tryReadExactly(int fd, void *data, size_t length) { return true; } -void closeOrDie(int fd) { - if(mlibc::sys_close(fd)) +void +closeOrDie(int fd) { + if (mlibc::sys_close(fd)) __ensure(!"sys_close() failed"); } -uintptr_t alignUp(uintptr_t address, size_t align) { +uintptr_t +alignUp(uintptr_t address, size_t align) { return (address + align - 1) & ~(align - 1); } @@ -106,17 +110,23 @@ uintptr_t alignUp(uintptr_t address, size_t align) { // -------------------------------------------------------- ObjectRepository::ObjectRepository() -: loadedObjects{getAllocator()}, - _nameMap{frg::hash{}, getAllocator()}, - _destructQueue{getAllocator()} {} - -SharedObject *ObjectRepository::injectObjectFromDts(frg::string_view name, - frg::string path, uintptr_t base_address, - elf_dyn *dynamic, uint64_t rts) { + : loadedObjects{getAllocator()}, + _nameMap{frg::hash{}, getAllocator()}, + _destructQueue{getAllocator()} {} + +SharedObject * +ObjectRepository::injectObjectFromDts( + frg::string_view name, + frg::string path, + uintptr_t base_address, + elf_dyn *dynamic, + uint64_t rts +) { __ensure(!findLoadedObject(name)); - auto object = frg::construct(getAllocator(), - name.data(), std::move(path), false, globalScope.get(), rts); + auto object = frg::construct( + getAllocator(), name.data(), std::move(path), false, globalScope.get(), rts + ); object->baseAddress = base_address; object->dynamic = dynamic; _parseDynamic(object); @@ -127,14 +137,21 @@ SharedObject *ObjectRepository::injectObjectFromDts(frg::string_view name, return object; } -SharedObject *ObjectRepository::injectObjectFromPhdrs(frg::string_view name, - frg::string path, void *phdr_pointer, - size_t phdr_entry_size, size_t num_phdrs, void *entry_pointer, - uint64_t rts) { +SharedObject * +ObjectRepository::injectObjectFromPhdrs( + frg::string_view name, + frg::string path, + void *phdr_pointer, + size_t phdr_entry_size, + size_t num_phdrs, + void *entry_pointer, + uint64_t rts +) { __ensure(!findLoadedObject(name)); - auto object = frg::construct(getAllocator(), - name.data(), std::move(path), true, globalScope.get(), rts); + auto object = frg::construct( + getAllocator(), name.data(), std::move(path), true, globalScope.get(), rts + ); _fetchFromPhdrs(object, phdr_pointer, phdr_entry_size, num_phdrs, entry_pointer); _parseDynamic(object); @@ -144,25 +161,32 @@ SharedObject *ObjectRepository::injectObjectFromPhdrs(frg::string_view name, return object; } -SharedObject *ObjectRepository::injectStaticObject(frg::string_view name, - frg::string path, void *phdr_pointer, - size_t phdr_entry_size, size_t num_phdrs, void *entry_pointer, - uint64_t rts) { +SharedObject * +ObjectRepository::injectStaticObject( + frg::string_view name, + frg::string path, + void *phdr_pointer, + size_t phdr_entry_size, + size_t num_phdrs, + void *entry_pointer, + uint64_t rts +) { __ensure(!findLoadedObject(name)); - auto object = frg::construct(getAllocator(), - name.data(), std::move(path), true, globalScope.get(), rts); + auto object = frg::construct( + getAllocator(), name.data(), std::move(path), true, globalScope.get(), rts + ); _fetchFromPhdrs(object, phdr_pointer, phdr_entry_size, num_phdrs, entry_pointer); #if MLIBC_STATIC_BUILD - object->initArray = reinterpret_cast(__init_array_start); - object->initArraySize = static_cast((uintptr_t)__init_array_end - - (uintptr_t)__init_array_start); - object->finiArray = reinterpret_cast(__fini_array_start); - object->finiArraySize = static_cast((uintptr_t)__fini_array_end - - (uintptr_t)__fini_array_start); - object->preInitArray = reinterpret_cast(__preinit_array_start); - object->preInitArraySize = static_cast((uintptr_t)__preinit_array_end - - (uintptr_t)__preinit_array_start); + object->initArray = reinterpret_cast(__init_array_start); + object->initArraySize = + static_cast((uintptr_t)__init_array_end - (uintptr_t)__init_array_start); + object->finiArray = reinterpret_cast(__fini_array_start); + object->finiArraySize = + static_cast((uintptr_t)__fini_array_end - (uintptr_t)__fini_array_start); + object->preInitArray = reinterpret_cast(__preinit_array_start); + object->preInitArraySize = + static_cast((uintptr_t)__preinit_array_end - (uintptr_t)__preinit_array_start); #endif _addLoadedObject(object); @@ -170,14 +194,16 @@ SharedObject *ObjectRepository::injectStaticObject(frg::string_view name, return object; } -frg::expected ObjectRepository::requestObjectWithName(frg::string_view name, - SharedObject *origin, Scope *localScope, bool createScope, uint64_t rts) { +frg::expected +ObjectRepository::requestObjectWithName( + frg::string_view name, SharedObject *origin, Scope *localScope, bool createScope, uint64_t rts +) { if (auto obj = findLoadedObject(name)) return obj; - auto tryToOpen = [&] (const char *path) { + auto tryToOpen = [&](const char *path) { int fd; - if(auto x = mlibc::sys_open(path, O_RDONLY, 0, &fd); x) { + if (auto x = mlibc::sys_open(path, O_RDONLY, 0, &fd); x) { return -1; } return fd; @@ -185,8 +211,8 @@ frg::expected ObjectRepository::requestObjectWithNa // TODO(arsen): this process can probably undergo heavy optimization, by // preprocessing the rpath only once on parse - auto processRpath = [&] (frg::string_view path) { - frg::string sPath { getAllocator() }; + auto processRpath = [&](frg::string_view path) { + frg::string sPath{getAllocator()}; if (path.starts_with("$ORIGIN")) { frg::string_view dirname = origin->path; auto lastsl = dirname.find_last('/'); @@ -195,10 +221,10 @@ frg::expected ObjectRepository::requestObjectWithNa } else { dirname = "."; } - sPath = frg::string{ getAllocator(), dirname }; + sPath = frg::string{getAllocator(), dirname}; sPath += path.sub_string(7, path.size() - 7); } else { - sPath = frg::string{ getAllocator(), path }; + sPath = frg::string{getAllocator(), path}; } if (sPath[sPath.size() - 1] != '/') { sPath += '/'; @@ -209,16 +235,16 @@ frg::expected ObjectRepository::requestObjectWithNa int fd = tryToOpen(sPath.data()); if (logRpath && fd >= 0) mlibc::infoLogger() << "rtld: found in rpath" << frg::endlog; - return frg::tuple { fd, std::move(sPath) }; + return frg::tuple{fd, std::move(sPath)}; }; - frg::string chosenPath { getAllocator() }; + frg::string chosenPath{getAllocator()}; int fd = -1; if (origin && origin->runPath) { size_t start = 0; size_t idx = 0; - frg::string_view rpath { origin->runPath }; - auto next = [&] () { + frg::string_view rpath{origin->runPath}; + auto next = [&]() { idx = rpath.find_first(':', start); if (idx == (size_t)-1) idx = rpath.size(); @@ -245,18 +271,19 @@ frg::expected ObjectRepository::requestObjectWithNa mlibc::infoLogger() << "rtld: no rpath set for object" << frg::endlog; } - for(size_t i = 0; i < libraryPaths->size() && fd == -1; i++) { + for (size_t i = 0; i < libraryPaths->size() && fd == -1; i++) { auto ldPath = (*libraryPaths)[i]; auto path = frg::string{getAllocator(), ldPath} + '/' + name; - if(logLdPath) - mlibc::infoLogger() << "rtld: Trying to load " << name << " from ldpath " << ldPath << "/" << frg::endlog; + if (logLdPath) + mlibc::infoLogger() << "rtld: Trying to load " << name << " from ldpath " << ldPath + << "/" << frg::endlog; fd = tryToOpen(path.data()); - if(fd >= 0) { + if (fd >= 0) { chosenPath = std::move(path); break; } } - if(fd == -1) + if (fd == -1) return LinkerError::notFound; if (createScope) { @@ -268,12 +295,13 @@ frg::expected ObjectRepository::requestObjectWithNa __ensure(localScope != nullptr); - auto object = frg::construct(getAllocator(), - name.data(), std::move(chosenPath), false, localScope, rts); + auto object = frg::construct( + getAllocator(), name.data(), std::move(chosenPath), false, localScope, rts + ); auto result = _fetchFromFile(object, fd); closeOrDie(fd); - if(!result) { + if (!result) { frg::destruct(getAllocator(), object); return result.error(); } @@ -286,8 +314,10 @@ frg::expected ObjectRepository::requestObjectWithNa return object; } -frg::expected ObjectRepository::requestObjectAtPath(frg::string_view path, - Scope *localScope, bool createScope, uint64_t rts) { +frg::expected +ObjectRepository::requestObjectAtPath( + frg::string_view path, Scope *localScope, bool createScope, uint64_t rts +) { // TODO: Support SONAME correctly. auto lastSlash = path.find_last('/') + 1; auto name = path; @@ -306,19 +336,20 @@ frg::expected ObjectRepository::requestObjectAtPath __ensure(localScope != nullptr); - auto object = frg::construct(getAllocator(), - name.data(), path.data(), false, localScope, rts); + auto object = frg::construct( + getAllocator(), name.data(), path.data(), false, localScope, rts + ); frg::string no_prefix(getAllocator(), path); int fd; - if(mlibc::sys_open((no_prefix + '\0').data(), O_RDONLY, 0, &fd)) { + if (mlibc::sys_open((no_prefix + '\0').data(), O_RDONLY, 0, &fd)) { frg::destruct(getAllocator(), object); return LinkerError::notFound; } auto result = _fetchFromFile(object, fd); closeOrDie(fd); - if(!result) { + if (!result) { frg::destruct(getAllocator(), object); return result.error(); } @@ -331,12 +362,13 @@ frg::expected ObjectRepository::requestObjectAtPath return object; } -SharedObject *ObjectRepository::findCaller(void *addr) { +SharedObject * +ObjectRepository::findCaller(void *addr) { uintptr_t target = reinterpret_cast(addr); for (auto [name, object] : _nameMap) { // Search all PT_LOAD segments for the specified address. - for(size_t j = 0; j < object->phdrCount; j++) { + for (size_t j = 0; j < object->phdrCount; j++) { auto phdr = (elf_phdr *)((uintptr_t)object->phdrPointer + j * object->phdrEntrySize); if (phdr->p_type == PT_LOAD) { uintptr_t start = object->baseAddress + phdr->p_vaddr; @@ -350,7 +382,8 @@ SharedObject *ObjectRepository::findCaller(void *addr) { return nullptr; } -SharedObject *ObjectRepository::findLoadedObject(frg::string_view name) { +SharedObject * +ObjectRepository::findLoadedObject(frg::string_view name) { auto it = _nameMap.get(name); if (it) return *it; @@ -365,13 +398,16 @@ SharedObject *ObjectRepository::findLoadedObject(frg::string_view name) { return nullptr; } -void ObjectRepository::addObjectToDestructQueue(SharedObject *object) { +void +ObjectRepository::addObjectToDestructQueue(SharedObject *object) { _destructQueue.push(object); } -void doDestruct(SharedObject *object); +void +doDestruct(SharedObject *object); -void ObjectRepository::destructObjects() { +void +ObjectRepository::destructObjects() { while (_destructQueue.size() > 0) { auto top = _destructQueue.top(); doDestruct(top); @@ -383,13 +419,19 @@ void ObjectRepository::destructObjects() { // ObjectRepository: Fetching methods. // -------------------------------------------------------- -void ObjectRepository::_fetchFromPhdrs(SharedObject *object, void *phdr_pointer, - size_t phdr_entry_size, size_t phdr_count, void *entry_pointer) { +void +ObjectRepository::_fetchFromPhdrs( + SharedObject *object, + void *phdr_pointer, + size_t phdr_entry_size, + size_t phdr_count, + void *entry_pointer +) { __ensure(object->isMainObject); object->phdrPointer = phdr_pointer; object->phdrEntrySize = phdr_entry_size; object->phdrCount = phdr_count; - if(verbose) + if (verbose) mlibc::infoLogger() << "rtld: Loading " << object->name << frg::endlog; // Note: the entry pointer is absolute and not relative to the base address. @@ -399,16 +441,16 @@ void ObjectRepository::_fetchFromPhdrs(SharedObject *object, void *phdr_pointer, frg::optional tls_offset; // segments are already mapped, so we just have to find the dynamic section - for(size_t i = 0; i < phdr_count; i++) { + for (size_t i = 0; i < phdr_count; i++) { auto phdr = (elf_phdr *)((uintptr_t)phdr_pointer + i * phdr_entry_size); - switch(phdr->p_type) { + switch (phdr->p_type) { case PT_PHDR: // Determine the executable's base address (in the PIE case) by comparing // the PHDR segment's load address against it's address in the ELF file. object->baseAddress = reinterpret_cast(phdr_pointer) - phdr->p_vaddr; - if(verbose) + if (verbose) mlibc::infoLogger() << "rtld: Executable is loaded at " - << (void *)object->baseAddress << frg::endlog; + << (void *)object->baseAddress << frg::endlog; break; case PT_DYNAMIC: dynamic_offset = phdr->p_vaddr; @@ -421,52 +463,48 @@ void ObjectRepository::_fetchFromPhdrs(SharedObject *object, void *phdr_pointer, break; case PT_INTERP: object->interpreterPath = frg::string{ - (char*)(object->baseAddress + phdr->p_vaddr), - getAllocator() + (char *)(object->baseAddress + phdr->p_vaddr), getAllocator() }; } break; default: - //FIXME warn about unknown phdrs + // FIXME warn about unknown phdrs break; } } - if(dynamic_offset) + if (dynamic_offset) object->dynamic = (elf_dyn *)(object->baseAddress + *dynamic_offset); - if(tls_offset) + if (tls_offset) object->tlsImagePtr = (void *)(object->baseAddress + *tls_offset); } - -frg::expected ObjectRepository::_fetchFromFile(SharedObject *object, int fd) { +frg::expected +ObjectRepository::_fetchFromFile(SharedObject *object, int fd) { __ensure(!object->isMainObject); // read the elf file header elf_ehdr ehdr; - if(!tryReadExactly(fd, &ehdr, sizeof(elf_ehdr))) + if (!tryReadExactly(fd, &ehdr, sizeof(elf_ehdr))) return LinkerError::fileTooShort; - if(ehdr.e_ident[0] != 0x7F - || ehdr.e_ident[1] != 'E' - || ehdr.e_ident[2] != 'L' - || ehdr.e_ident[3] != 'F') + if (ehdr.e_ident[0] != 0x7F || ehdr.e_ident[1] != 'E' || ehdr.e_ident[2] != 'L' || + ehdr.e_ident[3] != 'F') return LinkerError::notElf; - if((ehdr.e_type != ET_EXEC && ehdr.e_type != ET_DYN) - || ehdr.e_machine != ELF_MACHINE - || ehdr.e_ident[EI_CLASS] != ELF_CLASS) + if ((ehdr.e_type != ET_EXEC && ehdr.e_type != ET_DYN) || ehdr.e_machine != ELF_MACHINE || + ehdr.e_ident[EI_CLASS] != ELF_CLASS) return LinkerError::wrongElfType; // read the elf program headers auto phdr_buffer = (char *)getAllocator().allocate(ehdr.e_phnum * ehdr.e_phentsize); - if(!phdr_buffer) + if (!phdr_buffer) return LinkerError::outOfMemory; - if(!trySeek(fd, ehdr.e_phoff)) { + if (!trySeek(fd, ehdr.e_phoff)) { getAllocator().deallocate(phdr_buffer, ehdr.e_phnum * ehdr.e_phentsize); return LinkerError::invalidProgramHeader; } - if(!tryReadExactly(fd, phdr_buffer, ehdr.e_phnum * ehdr.e_phentsize)) { + if (!tryReadExactly(fd, phdr_buffer, ehdr.e_phnum * ehdr.e_phentsize)) { getAllocator().deallocate(phdr_buffer, ehdr.e_phnum * ehdr.e_phentsize); return LinkerError::invalidProgramHeader; } @@ -479,14 +517,14 @@ frg::expected ObjectRepository::_fetchFromFile(SharedObject * constexpr size_t hugeSize = 0x200000; uintptr_t highest_address = 0; - for(int i = 0; i < ehdr.e_phnum; i++) { + for (int i = 0; i < ehdr.e_phnum; i++) { auto phdr = (elf_phdr *)(phdr_buffer + i * ehdr.e_phentsize); - if(phdr->p_type != PT_LOAD) + if (phdr->p_type != PT_LOAD) continue; auto limit = phdr->p_vaddr + phdr->p_memsz; - if(limit > highest_address) + if (limit > highest_address) highest_address = limit; } @@ -497,14 +535,20 @@ frg::expected ObjectRepository::_fetchFromFile(SharedObject * #if MLIBC_MMAP_ALLOCATE_DSO void *mappedAddr = nullptr; - if (mlibc::sys_vm_map(nullptr, - highest_address - object->baseAddress, PROT_NONE, - MAP_PRIVATE | MAP_ANONYMOUS, -1, 0, &mappedAddr)) { + if (mlibc::sys_vm_map( + nullptr, + highest_address - object->baseAddress, + PROT_NONE, + MAP_PRIVATE | MAP_ANONYMOUS, + -1, + 0, + &mappedAddr + )) + { mlibc::infoLogger() << "sys_vm_map failed when allocating address space for DSO \"" - << object->name << "\"" - << ", base " << (void *)object->baseAddress - << ", requested " << (highest_address - object->baseAddress) << " bytes" - << frg::endlog; + << object->name << "\"" + << ", base " << (void *)object->baseAddress << ", requested " + << (highest_address - object->baseAddress) << " bytes" << frg::endlog; getAllocator().deallocate(phdr_buffer, ehdr.e_phnum * ehdr.e_phentsize); return LinkerError::outOfMemory; } @@ -515,18 +559,18 @@ frg::expected ObjectRepository::_fetchFromFile(SharedObject * libraryBase += (highest_address + (hugeSize - 1)) & ~(hugeSize - 1); #endif - if(verbose || logBaseAddresses) - mlibc::infoLogger() << "rtld: Loading " << object->name - << " at " << (void *)object->baseAddress << frg::endlog; + if (verbose || logBaseAddresses) + mlibc::infoLogger() << "rtld: Loading " << object->name << " at " + << (void *)object->baseAddress << frg::endlog; // Load all segments. constexpr size_t pageSize = 0x1000; - for(int i = 0; i < ehdr.e_phnum; i++) { + for (int i = 0; i < ehdr.e_phnum; i++) { auto phdr = (elf_phdr *)(phdr_buffer + i * ehdr.e_phentsize); - if(phdr->p_type == PT_LOAD) { + if (phdr->p_type == PT_LOAD) { size_t misalign = phdr->p_vaddr & (pageSize - 1); - if(!phdr->p_memsz) + if (!phdr->p_memsz) continue; __ensure(phdr->p_memsz >= phdr->p_filesz); @@ -539,71 +583,89 @@ frg::expected ObjectRepository::_fetchFromFile(SharedObject * auto total_map_size = (phdr->p_memsz + misalign + pageSize - 1) & ~(pageSize - 1); int prot = 0; - if(phdr->p_flags & PF_R) + if (phdr->p_flags & PF_R) prot |= PROT_READ; - if(phdr->p_flags & PF_W) + if (phdr->p_flags & PF_W) prot |= PROT_WRITE; - if(phdr->p_flags & PF_X) + if (phdr->p_flags & PF_X) prot |= PROT_EXEC; - #if MLIBC_MAP_DSO_SEGMENTS - void *map_pointer; - if(mlibc::sys_vm_map(reinterpret_cast(map_address), - backed_map_size, prot | PROT_WRITE, - MAP_PRIVATE | MAP_FIXED, fd, phdr->p_offset - misalign, &map_pointer)) - __ensure(!"sys_vm_map failed"); - if(total_map_size > backed_map_size) - if(mlibc::sys_vm_map(reinterpret_cast(map_address + backed_map_size), - total_map_size - backed_map_size, prot | PROT_WRITE, - MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS, -1, 0, &map_pointer)) - __ensure(!"sys_vm_map failed"); - - if(mlibc::sys_vm_readahead) - if(mlibc::sys_vm_readahead(reinterpret_cast(map_address), - backed_map_size)) - mlibc::infoLogger() << "mlibc: sys_vm_readahead() failed in ld.so" - << frg::endlog; - - // Clear the trailing area at the end of the backed mapping. - // We do not clear the leading area; programs are not supposed to access it. - memset(reinterpret_cast(map_address + misalign + phdr->p_filesz), - 0, phdr->p_memsz - phdr->p_filesz); - #else - (void)backed_map_size; - - void *map_pointer; - if(mlibc::sys_vm_map(reinterpret_cast(map_address), - total_map_size, prot | PROT_WRITE, - MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS, -1, 0, &map_pointer)) +#if MLIBC_MAP_DSO_SEGMENTS + void *map_pointer; + if (mlibc::sys_vm_map( + reinterpret_cast(map_address), + backed_map_size, + prot | PROT_WRITE, + MAP_PRIVATE | MAP_FIXED, + fd, + phdr->p_offset - misalign, + &map_pointer + )) + __ensure(!"sys_vm_map failed"); + if (total_map_size > backed_map_size) + if (mlibc::sys_vm_map( + reinterpret_cast(map_address + backed_map_size), + total_map_size - backed_map_size, + prot | PROT_WRITE, + MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS, + -1, + 0, + &map_pointer + )) __ensure(!"sys_vm_map failed"); - __ensure(trySeek(fd, phdr->p_offset)); - __ensure(tryReadExactly(fd, reinterpret_cast(map_address) + misalign, - phdr->p_filesz)); - #endif + if (mlibc::sys_vm_readahead) + if (mlibc::sys_vm_readahead(reinterpret_cast(map_address), backed_map_size)) + mlibc::infoLogger() + << "mlibc: sys_vm_readahead() failed in ld.so" << frg::endlog; + + // Clear the trailing area at the end of the backed mapping. + // We do not clear the leading area; programs are not supposed to access it. + memset( + reinterpret_cast(map_address + misalign + phdr->p_filesz), + 0, + phdr->p_memsz - phdr->p_filesz + ); +#else + (void)backed_map_size; + + void *map_pointer; + if (mlibc::sys_vm_map( + reinterpret_cast(map_address), + total_map_size, + prot | PROT_WRITE, + MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS, + -1, + 0, + &map_pointer + )) + __ensure(!"sys_vm_map failed"); + + __ensure(trySeek(fd, phdr->p_offset)); + __ensure( + tryReadExactly(fd, reinterpret_cast(map_address) + misalign, phdr->p_filesz) + ); +#endif // Take care of removing superfluous permissions. - if(mlibc::sys_vm_protect && ((prot & PROT_WRITE) == 0)) - if(mlibc::sys_vm_protect(map_pointer, total_map_size, prot)) + if (mlibc::sys_vm_protect && ((prot & PROT_WRITE) == 0)) + if (mlibc::sys_vm_protect(map_pointer, total_map_size, prot)) mlibc::infoLogger() << "mlibc: sys_vm_protect() failed in ld.so" << frg::endlog; - }else if(phdr->p_type == PT_TLS) { + } else if (phdr->p_type == PT_TLS) { object->tlsSegmentSize = phdr->p_memsz; object->tlsAlignment = phdr->p_align; object->tlsImageSize = phdr->p_filesz; object->tlsImagePtr = (void *)(object->baseAddress + phdr->p_vaddr); - }else if(phdr->p_type == PT_DYNAMIC) { + } else if (phdr->p_type == PT_DYNAMIC) { object->dynamic = (elf_dyn *)(object->baseAddress + phdr->p_vaddr); - }else if(phdr->p_type == PT_INTERP - || phdr->p_type == PT_PHDR - || phdr->p_type == PT_NOTE - || phdr->p_type == PT_RISCV_ATTRIBUTES - || phdr->p_type == PT_GNU_EH_FRAME - || phdr->p_type == PT_GNU_RELRO - || phdr->p_type == PT_GNU_STACK - || phdr->p_type == PT_GNU_PROPERTY) { + } else if (phdr->p_type == PT_INTERP || phdr->p_type == PT_PHDR || + phdr->p_type == PT_NOTE || phdr->p_type == PT_RISCV_ATTRIBUTES || + phdr->p_type == PT_GNU_EH_FRAME || phdr->p_type == PT_GNU_RELRO || + phdr->p_type == PT_GNU_STACK || phdr->p_type == PT_GNU_PROPERTY) + { // ignore the phdr - }else{ - mlibc::panicLogger() << "Unexpected PHDR type 0x" - << frg::hex_fmt(phdr->p_type) << " in DSO " << object->name << frg::endlog; + } else { + mlibc::panicLogger() << "Unexpected PHDR type 0x" << frg::hex_fmt(phdr->p_type) + << " in DSO " << object->name << frg::endlog; } } @@ -614,10 +676,11 @@ frg::expected ObjectRepository::_fetchFromFile(SharedObject * // ObjectRepository: Parsing methods. // -------------------------------------------------------- -void ObjectRepository::_parseDynamic(SharedObject *object) { - if(!object->dynamic) +void +ObjectRepository::_parseDynamic(SharedObject *object) { + if (!object->dynamic) mlibc::infoLogger() << "ldso: Object '" << object->name - << "' does not have a dynamic section" << frg::endlog; + << "' does not have a dynamic section" << frg::endlog; __ensure(object->dynamic); // Fix up these offsets to addresses after the loop, since the @@ -627,9 +690,9 @@ void ObjectRepository::_parseDynamic(SharedObject *object) { bool runpath_found = false; frg::optional soname_offset; - for(size_t i = 0; object->dynamic[i].d_tag != DT_NULL; i++) { + for (size_t i = 0; object->dynamic[i].d_tag != DT_NULL; i++) { elf_dyn *dynamic = &object->dynamic[i]; - switch(dynamic->d_tag) { + switch (dynamic->d_tag) { // handle hash table, symbol table and string table case DT_HASH: object->hashStyle = HashStyle::systemV; @@ -652,8 +715,7 @@ void ObjectRepository::_parseDynamic(SharedObject *object) { break; // handle lazy relocation table case DT_PLTGOT: - object->globalOffsetTable = (void **)(object->baseAddress - + dynamic->d_un.d_ptr); + object->globalOffsetTable = (void **)(object->baseAddress + dynamic->d_un.d_ptr); break; case DT_JMPREL: object->lazyRelocTableOffset = dynamic->d_un.d_ptr; @@ -662,9 +724,9 @@ void ObjectRepository::_parseDynamic(SharedObject *object) { object->lazyTableSize = dynamic->d_un.d_val; break; case DT_PLTREL: - if(dynamic->d_un.d_val == DT_RELA) { + if (dynamic->d_un.d_val == DT_RELA) { object->lazyExplicitAddend = true; - }else{ + } else { __ensure(dynamic->d_un.d_val == DT_REL); object->lazyExplicitAddend = false; } @@ -677,11 +739,11 @@ void ObjectRepository::_parseDynamic(SharedObject *object) { object->eagerBinding = true; break; case DT_FLAGS: { - if(dynamic->d_un.d_val & DF_SYMBOLIC) + if (dynamic->d_un.d_val & DF_SYMBOLIC) object->symbolicResolution = true; - if(dynamic->d_un.d_val & DF_STATIC_TLS) + if (dynamic->d_un.d_val & DF_STATIC_TLS) object->haveStaticTls = true; - if(dynamic->d_un.d_val & DF_BIND_NOW) + if (dynamic->d_un.d_val & DF_BIND_NOW) object->eagerBinding = true; auto ignored = DF_BIND_NOW | DF_SYMBOLIC | DF_STATIC_TLS; @@ -689,24 +751,24 @@ void ObjectRepository::_parseDynamic(SharedObject *object) { // Work around https://sourceware.org/bugzilla/show_bug.cgi?id=24673. ignored |= DF_TEXTREL; #else - if(dynamic->d_un.d_val & DF_TEXTREL) + if (dynamic->d_un.d_val & DF_TEXTREL) mlibc::panicLogger() << "\e[31mrtld: DF_TEXTREL is unimplemented" << frg::endlog; #endif - if(dynamic->d_un.d_val & ~ignored) - mlibc::infoLogger() << "\e[31mrtld: DT_FLAGS(" << frg::hex_fmt{dynamic->d_un.d_val & ~ignored} - << ") is not implemented correctly!\e[39m" - << frg::endlog; + if (dynamic->d_un.d_val & ~ignored) + mlibc::infoLogger() + << "\e[31mrtld: DT_FLAGS(" << frg::hex_fmt{dynamic->d_un.d_val & ~ignored} + << ") is not implemented correctly!\e[39m" << frg::endlog; } break; case DT_FLAGS_1: - if(dynamic->d_un.d_val & DF_1_NOW) + if (dynamic->d_un.d_val & DF_1_NOW) object->eagerBinding = true; // The DF_1_PIE flag is informational only. It is used by e.g file(1). // The DF_1_NODELETE flag has a similar effect to RTLD_NODELETE, both of which we // ignore because we don't implement dlclose(). - if(dynamic->d_un.d_val & ~(DF_1_NOW | DF_1_PIE | DF_1_NODELETE)) - mlibc::infoLogger() << "\e[31mrtld: DT_FLAGS_1(" << frg::hex_fmt{dynamic->d_un.d_val} - << ") is not implemented correctly!\e[39m" - << frg::endlog; + if (dynamic->d_un.d_val & ~(DF_1_NOW | DF_1_PIE | DF_1_NODELETE)) + mlibc::infoLogger() + << "\e[31mrtld: DT_FLAGS_1(" << frg::hex_fmt{dynamic->d_un.d_val} + << ") is not implemented correctly!\e[39m" << frg::endlog; break; case DT_RPATH: if (runpath_found) { @@ -719,19 +781,19 @@ void ObjectRepository::_parseDynamic(SharedObject *object) { runpath_offset = dynamic->d_un.d_val; break; case DT_INIT: - if(dynamic->d_un.d_ptr != 0) + if (dynamic->d_un.d_ptr != 0) object->initPtr = (InitFuncPtr)(object->baseAddress + dynamic->d_un.d_ptr); break; case DT_FINI: - if(dynamic->d_un.d_ptr != 0) + if (dynamic->d_un.d_ptr != 0) object->finiPtr = (InitFuncPtr)(object->baseAddress + dynamic->d_un.d_ptr); break; case DT_INIT_ARRAY: - if(dynamic->d_un.d_ptr != 0) + if (dynamic->d_un.d_ptr != 0) object->initArray = (InitFuncPtr *)(object->baseAddress + dynamic->d_un.d_ptr); break; case DT_FINI_ARRAY: - if(dynamic->d_un.d_ptr != 0) + if (dynamic->d_un.d_ptr != 0) object->finiArray = (InitFuncPtr *)(object->baseAddress + dynamic->d_un.d_ptr); break; case DT_INIT_ARRAYSZ: @@ -741,7 +803,7 @@ void ObjectRepository::_parseDynamic(SharedObject *object) { object->finiArraySize = dynamic->d_un.d_val; break; case DT_PREINIT_ARRAY: - if(dynamic->d_un.d_ptr != 0) { + if (dynamic->d_un.d_ptr != 0) { // Only the main object is allowed pre-initializers. __ensure(object->isMainObject); object->preInitArray = (InitFuncPtr *)(object->baseAddress + dynamic->d_un.d_ptr); @@ -764,51 +826,64 @@ void ObjectRepository::_parseDynamic(SharedObject *object) { break; // ignore unimportant tags case DT_NEEDED: // we handle this later - case DT_RELA: case DT_RELASZ: case DT_RELAENT: case DT_RELACOUNT: - case DT_REL: case DT_RELSZ: case DT_RELENT: case DT_RELCOUNT: - case DT_RELR: case DT_RELRSZ: case DT_RELRENT: + case DT_RELA: + case DT_RELASZ: + case DT_RELAENT: + case DT_RELACOUNT: + case DT_REL: + case DT_RELSZ: + case DT_RELENT: + case DT_RELCOUNT: + case DT_RELR: + case DT_RELRSZ: + case DT_RELRENT: case DT_VERSYM: - case DT_VERDEF: case DT_VERDEFNUM: - case DT_VERNEED: case DT_VERNEEDNUM: + case DT_VERDEF: + case DT_VERDEFNUM: + case DT_VERNEED: + case DT_VERNEEDNUM: #ifdef __riscv case DT_TEXTREL: // Work around https://sourceware.org/bugzilla/show_bug.cgi?id=24673. #endif break; - case DT_TLSDESC_PLT: case DT_TLSDESC_GOT: + case DT_TLSDESC_PLT: + case DT_TLSDESC_GOT: break; default: // Ignore unknown entries in the os-specific area as we don't use them. - if(dynamic->d_tag < DT_LOOS || dynamic->d_tag > DT_HIOS) { - mlibc::panicLogger() << "Unexpected dynamic entry " - << (void *)dynamic->d_tag << " in object" << frg::endlog; + if (dynamic->d_tag < DT_LOOS || dynamic->d_tag > DT_HIOS) { + mlibc::panicLogger() << "Unexpected dynamic entry " << (void *)dynamic->d_tag + << " in object" << frg::endlog; } } } - if(runpath_offset) { - object->runPath = reinterpret_cast(object->baseAddress - + object->stringTableOffset + *runpath_offset); + if (runpath_offset) { + object->runPath = reinterpret_cast( + object->baseAddress + object->stringTableOffset + *runpath_offset + ); } - if(soname_offset) { - object->soName = reinterpret_cast(object->baseAddress - + object->stringTableOffset + *soname_offset); + if (soname_offset) { + object->soName = reinterpret_cast( + object->baseAddress + object->stringTableOffset + *soname_offset + ); } } -void ObjectRepository::_discoverDependencies(SharedObject *object, - Scope *localScope, uint64_t rts) { - if(object->isMainObject) { - for(auto preload : *preloads) { +void +ObjectRepository::_discoverDependencies(SharedObject *object, Scope *localScope, uint64_t rts) { + if (object->isMainObject) { + for (auto preload : *preloads) { frg::expected libraryResult; if (preload.find_first('/') == size_t(-1)) { libraryResult = requestObjectWithName(preload, object, globalScope.get(), false, 1); } else { libraryResult = requestObjectAtPath(preload, globalScope.get(), false, 1); } - if(!libraryResult) + if (!libraryResult) mlibc::panicLogger() << "rtld: Could not load preload " << preload << frg::endlog; - if(verbose) + if (verbose) mlibc::infoLogger() << "rtld: Preloading " << preload << frg::endlog; object->dependencies.push_back(libraryResult.value()); @@ -816,23 +891,24 @@ void ObjectRepository::_discoverDependencies(SharedObject *object, } // Load required dynamic libraries. - for(size_t i = 0; object->dynamic[i].d_tag != DT_NULL; i++) { + for (size_t i = 0; object->dynamic[i].d_tag != DT_NULL; i++) { elf_dyn *dynamic = &object->dynamic[i]; - if(dynamic->d_tag != DT_NEEDED) + if (dynamic->d_tag != DT_NEEDED) continue; - const char *library_str = (const char *)(object->baseAddress - + object->stringTableOffset + dynamic->d_un.d_val); + const char *library_str = + (const char *)(object->baseAddress + object->stringTableOffset + dynamic->d_un.d_val); - auto library = requestObjectWithName(frg::string_view{library_str}, - object, localScope, false, rts); - if(!library) + auto library = + requestObjectWithName(frg::string_view{library_str}, object, localScope, false, rts); + if (!library) mlibc::panicLogger() << "Could not satisfy dependency " << library_str << frg::endlog; object->dependencies.push(library.value()); } } -void ObjectRepository::_addLoadedObject(SharedObject *object) { +void +ObjectRepository::_addLoadedObject(SharedObject *object) { _nameMap.insert(object->name, object); loadedObjects.push_back(object); } @@ -841,58 +917,95 @@ void ObjectRepository::_addLoadedObject(SharedObject *object) { // SharedObject // -------------------------------------------------------- -SharedObject::SharedObject(const char *name, frg::string path, - bool is_main_object, Scope *local_scope, uint64_t object_rts) - : name(name, getAllocator()), path(std::move(path)), - interpreterPath(getAllocator()), soName(nullptr), - isMainObject(is_main_object), objectRts(object_rts), inLinkMap(false), - baseAddress(0), localScope(local_scope), dynamic(nullptr), - globalOffsetTable(nullptr), entry(nullptr), tlsSegmentSize(0), - tlsAlignment(0), tlsImageSize(0), tlsImagePtr(nullptr), - tlsInitialized(false), hashTableOffset(0), symbolTableOffset(0), - stringTableOffset(0), lazyRelocTableOffset(0), lazyTableSize(0), - lazyExplicitAddend(false), symbolicResolution(false), - eagerBinding(false), haveStaticTls(false), - dependencies(getAllocator()), tlsModel(TlsModel::null), - tlsOffset(0), globalRts(0), wasLinked(false), - scheduledForInit(false), onInitStack(false), - wasInitialized(false) { } - -SharedObject::SharedObject(const char *name, const char *path, - bool is_main_object, Scope *localScope, uint64_t object_rts) - : SharedObject(name, - frg::string { path, getAllocator() }, - is_main_object, localScope, object_rts) {} - -void processLateRelocation(Relocation rel) { +SharedObject::SharedObject( + const char *name, + frg::string path, + bool is_main_object, + Scope *local_scope, + uint64_t object_rts +) + : name(name, getAllocator()), + path(std::move(path)), + interpreterPath(getAllocator()), + soName(nullptr), + isMainObject(is_main_object), + objectRts(object_rts), + inLinkMap(false), + baseAddress(0), + localScope(local_scope), + dynamic(nullptr), + globalOffsetTable(nullptr), + entry(nullptr), + tlsSegmentSize(0), + tlsAlignment(0), + tlsImageSize(0), + tlsImagePtr(nullptr), + tlsInitialized(false), + hashTableOffset(0), + symbolTableOffset(0), + stringTableOffset(0), + lazyRelocTableOffset(0), + lazyTableSize(0), + lazyExplicitAddend(false), + symbolicResolution(false), + eagerBinding(false), + haveStaticTls(false), + dependencies(getAllocator()), + tlsModel(TlsModel::null), + tlsOffset(0), + globalRts(0), + wasLinked(false), + scheduledForInit(false), + onInitStack(false), + wasInitialized(false) {} + +SharedObject::SharedObject( + const char *name, const char *path, bool is_main_object, Scope *localScope, uint64_t object_rts +) + : SharedObject( + name, + frg::string{path, getAllocator()}, + is_main_object, + localScope, + object_rts + ) {} + +void +processLateRelocation(Relocation rel) { // resolve the symbol if there is a symbol frg::optional p; - if(rel.symbol_index()) { - auto symbol = (elf_sym *)(rel.object()->baseAddress + rel.object()->symbolTableOffset - + rel.symbol_index() * sizeof(elf_sym)); + if (rel.symbol_index()) { + auto symbol = (elf_sym *)(rel.object()->baseAddress + rel.object()->symbolTableOffset + + rel.symbol_index() * sizeof(elf_sym)); ObjectSymbol r(rel.object(), symbol); - p = Scope::resolveGlobalOrLocal(*globalScope, rel.object()->localScope, - r.getString(), rel.object()->objectRts, Scope::resolveCopy); + p = Scope::resolveGlobalOrLocal( + *globalScope, + rel.object()->localScope, + r.getString(), + rel.object()->objectRts, + Scope::resolveCopy + ); } - switch(rel.type()) { + switch (rel.type()) { case R_COPY: __ensure(p); memcpy(rel.destination(), (void *)p->virtualAddress(), p->symbol()->st_size); break; -// TODO: R_IRELATIVE also exists on other architectures but will likely need a different implementation. +// TODO: R_IRELATIVE also exists on other architectures but will likely need a different +// implementation. #if defined(__x86_64__) || defined(__i386__) case R_IRELATIVE: { uintptr_t addr = rel.object()->baseAddress + rel.addend_rel(); - auto* fn = reinterpret_cast(addr); + auto *fn = reinterpret_cast(addr); rel.relocate(fn()); } break; #elif defined(__aarch64__) case R_IRELATIVE: { uintptr_t addr = rel.object()->baseAddress + rel.addend_rel(); - auto* fn = reinterpret_cast(addr); + auto *fn = reinterpret_cast(addr); // TODO: the function should get passed AT_HWCAP value. rel.relocate(fn(0)); } break; @@ -903,17 +1016,18 @@ void processLateRelocation(Relocation rel) { } } -void processLateRelocations(SharedObject *object) { +void +processLateRelocations(SharedObject *object) { frg::optional rel_offset; frg::optional rel_length; frg::optional rela_offset; frg::optional rela_length; - for(size_t i = 0; object->dynamic[i].d_tag != DT_NULL; i++) { + for (size_t i = 0; object->dynamic[i].d_tag != DT_NULL; i++) { elf_dyn *dynamic = &object->dynamic[i]; - switch(dynamic->d_tag) { + switch (dynamic->d_tag) { case DT_REL: rel_offset = dynamic->d_un.d_ptr; break; @@ -935,66 +1049,68 @@ void processLateRelocations(SharedObject *object) { } } - if(rela_offset && rela_length) { - for(size_t offset = 0; offset < *rela_length; offset += sizeof(elf_rela)) { + if (rela_offset && rela_length) { + for (size_t offset = 0; offset < *rela_length; offset += sizeof(elf_rela)) { auto reloc = (elf_rela *)(object->baseAddress + *rela_offset + offset); auto r = Relocation(object, reloc); processLateRelocation(r); } - } else if(rel_offset && rel_length) { - for(size_t offset = 0; offset < *rel_length; offset += sizeof(elf_rel)) { + } else if (rel_offset && rel_length) { + for (size_t offset = 0; offset < *rel_length; offset += sizeof(elf_rel)) { auto reloc = (elf_rel *)(object->baseAddress + *rel_offset + offset); auto r = Relocation(object, reloc); processLateRelocation(r); } - }else{ + } else { __ensure(!rela_offset && !rela_length); __ensure(!rel_offset && !rel_length); } } -void doInitialize(SharedObject *object) { +void +doInitialize(SharedObject *object) { __ensure(object->wasLinked); __ensure(!object->wasInitialized); - if(verbose) + if (verbose) mlibc::infoLogger() << "rtld: Initialize " << object->name << frg::endlog; - if(verbose) + if (verbose) mlibc::infoLogger() << "rtld: Running DT_INIT function" << frg::endlog; - if(object->initPtr != nullptr) + if (object->initPtr != nullptr) object->initPtr(); - if(verbose) + if (verbose) mlibc::infoLogger() << "rtld: Running DT_INIT_ARRAY functions" << frg::endlog; __ensure((object->initArraySize % sizeof(InitFuncPtr)) == 0); - for(size_t i = 0; i < object->initArraySize / sizeof(InitFuncPtr); i++) + for (size_t i = 0; i < object->initArraySize / sizeof(InitFuncPtr); i++) object->initArray[i](); - if(verbose) + if (verbose) mlibc::infoLogger() << "rtld: Object initialization complete" << frg::endlog; object->wasInitialized = true; } -void doDestruct(SharedObject *object) { - if(!object->wasInitialized || object->wasDestroyed) +void +doDestruct(SharedObject *object) { + if (!object->wasInitialized || object->wasDestroyed) return; - if(verbose) + if (verbose) mlibc::infoLogger() << "rtld: Destruct " << object->name << frg::endlog; - if(verbose) + if (verbose) mlibc::infoLogger() << "rtld: Running DT_FINI_ARRAY functions" << frg::endlog; __ensure((object->finiArraySize % sizeof(InitFuncPtr)) == 0); - for(size_t i = object->finiArraySize / sizeof(InitFuncPtr); i > 0; i--) + for (size_t i = object->finiArraySize / sizeof(InitFuncPtr); i > 0; i--) object->finiArray[i - 1](); - if(verbose) + if (verbose) mlibc::infoLogger() << "rtld: Running DT_FINI function" << frg::endlog; - if(object->finiPtr != nullptr) + if (object->finiPtr != nullptr) object->finiPtr(); - if(verbose) + if (verbose) mlibc::infoLogger() << "rtld: Object destruction complete" << frg::endlog; object->wasDestroyed = true; } @@ -1003,14 +1119,16 @@ void doDestruct(SharedObject *object) { // RuntimeTlsMap // -------------------------------------------------------- -RuntimeTlsMap::RuntimeTlsMap() -: initialPtr{0}, initialLimit{0}, indices{getAllocator()} { } +RuntimeTlsMap::RuntimeTlsMap() : initialPtr{0}, initialLimit{0}, indices{getAllocator()} {} -void initTlsObjects(Tcb *tcb, const frg::vector &objects, bool checkInitialized) { +void +initTlsObjects( + Tcb *tcb, const frg::vector &objects, bool checkInitialized +) { // Initialize TLS segments that follow the static model. - for(auto object : objects) { - if(object->tlsModel == TlsModel::initial) { - if(checkInitialized && object->tlsInitialized) + for (auto object : objects) { + if (object->tlsModel == TlsModel::initial) { + if (checkInitialized && object->tlsInitialized) continue; char *tcb_ptr = reinterpret_cast(tcb); @@ -1024,8 +1142,9 @@ void initTlsObjects(Tcb *tcb, const frg::vector memcpy(tls_ptr, object->tlsImagePtr, object->tlsImageSize); if (verbose) { - mlibc::infoLogger() << "rtld: wrote tls image at " << (void *)tls_ptr - << ", size = 0x" << frg::hex_fmt{object->tlsSegmentSize} << frg::endlog; + mlibc::infoLogger() + << "rtld: wrote tls image at " << (void *)tls_ptr << ", size = 0x" + << frg::hex_fmt{object->tlsSegmentSize} << frg::endlog; } if (checkInitialized) @@ -1034,7 +1153,8 @@ void initTlsObjects(Tcb *tcb, const frg::vector } } -Tcb *allocateTcb() { +Tcb * +allocateTcb() { size_t tlsInitialSize = runtimeTlsMap->initialLimit; // To make sure that both the TCB and TLS data are sufficiently aligned, allocate @@ -1069,10 +1189,10 @@ Tcb *allocateTcb() { __ensure((tcbAddress & (alignof(Tcb) - 1)) == 0); if (verbose) { - mlibc::infoLogger() << "rtld: tcb allocated at " << (void *)tcbAddress - << ", size = 0x" << frg::hex_fmt{sizeof(Tcb)} << frg::endlog; - mlibc::infoLogger() << "rtld: tls allocated at " << (void *)tlsAddress - << ", size = 0x" << frg::hex_fmt{tlsInitialSize} << frg::endlog; + mlibc::infoLogger() << "rtld: tcb allocated at " << (void *)tcbAddress << ", size = 0x" + << frg::hex_fmt{sizeof(Tcb)} << frg::endlog; + mlibc::infoLogger() << "rtld: tls allocated at " << (void *)tlsAddress << ", size = 0x" + << frg::hex_fmt{tlsInitialSize} << frg::endlog; } Tcb *tcb_ptr = new ((char *)tcbAddress) Tcb; @@ -1083,17 +1203,19 @@ Tcb *allocateTcb() { tcb_ptr->didExit = 0; tcb_ptr->isJoinable = 1; memset(&tcb_ptr->returnValue, 0, sizeof(tcb_ptr->returnValue)); - tcb_ptr->localKeys = frg::construct>(getAllocator()); + tcb_ptr->localKeys = + frg::construct>(getAllocator()); tcb_ptr->dtvSize = runtimeTlsMap->indices.size(); tcb_ptr->dtvPointers = frg::construct_n(getAllocator(), runtimeTlsMap->indices.size()); memset(tcb_ptr->dtvPointers, 0, sizeof(void *) * runtimeTlsMap->indices.size()); - for(size_t i = 0; i < runtimeTlsMap->indices.size(); ++i) { + for (size_t i = 0; i < runtimeTlsMap->indices.size(); ++i) { auto object = runtimeTlsMap->indices[i]; - if(object->tlsModel != TlsModel::initial) + if (object->tlsModel != TlsModel::initial) continue; if constexpr (tlsAboveTp) { - tcb_ptr->dtvPointers[i] = reinterpret_cast(tcb_ptr) + sizeof(Tcb) + object->tlsOffset; + tcb_ptr->dtvPointers[i] = + reinterpret_cast(tcb_ptr) + sizeof(Tcb) + object->tlsOffset; } else { tcb_ptr->dtvPointers[i] = reinterpret_cast(tcb_ptr) + object->tlsOffset; } @@ -1102,11 +1224,12 @@ Tcb *allocateTcb() { return tcb_ptr; } -void *accessDtv(SharedObject *object) { +void * +accessDtv(SharedObject *object) { Tcb *tcb_ptr = mlibc::get_current_tcb(); // We might need to reallocate the DTV. - if(object->tlsIndex >= tcb_ptr->dtvSize) { + if (object->tlsIndex >= tcb_ptr->dtvSize) { // TODO: need to protect runtimeTlsMap against concurrent access. auto ndtv = frg::construct_n(getAllocator(), runtimeTlsMap->indices.size()); memset(ndtv, 0, sizeof(void *) * runtimeTlsMap->indices.size()); @@ -1117,7 +1240,7 @@ void *accessDtv(SharedObject *object) { } // We might need to fill in a new DTV entry. - if(!tcb_ptr->dtvPointers[object->tlsIndex]) { + if (!tcb_ptr->dtvPointers[object->tlsIndex]) { __ensure(object->tlsModel == TlsModel::dynamic); auto buffer = getAllocator().allocate(object->tlsSegmentSize); @@ -1127,15 +1250,16 @@ void *accessDtv(SharedObject *object) { tcb_ptr->dtvPointers[object->tlsIndex] = buffer; if (verbose) { - mlibc::infoLogger() << "rtld: accessDtv wrote tls image at " << buffer - << ", size = 0x" << frg::hex_fmt{object->tlsSegmentSize} << frg::endlog; + mlibc::infoLogger() << "rtld: accessDtv wrote tls image at " << buffer << ", size = 0x" + << frg::hex_fmt{object->tlsSegmentSize} << frg::endlog; } } return (void *)((char *)tcb_ptr->dtvPointers[object->tlsIndex] + TLS_DTV_OFFSET); } -void *tryAccessDtv(SharedObject *object) { +void * +tryAccessDtv(SharedObject *object) { Tcb *tcb_ptr = mlibc::get_current_tcb(); if (object->tlsIndex >= tcb_ptr->dtvSize) @@ -1151,15 +1275,17 @@ void *tryAccessDtv(SharedObject *object) { // -------------------------------------------------------- ObjectSymbol::ObjectSymbol(SharedObject *object, const elf_sym *symbol) -: _object(object), _symbol(symbol) { } + : _object(object), + _symbol(symbol) {} -const char *ObjectSymbol::getString() { +const char * +ObjectSymbol::getString() { __ensure(_symbol->st_name != 0); - return (const char *)(_object->baseAddress - + _object->stringTableOffset + _symbol->st_name); + return (const char *)(_object->baseAddress + _object->stringTableOffset + _symbol->st_name); } -uintptr_t ObjectSymbol::virtualAddress() { +uintptr_t +ObjectSymbol::virtualAddress() { auto bind = ELF_ST_BIND(_symbol->st_info); __ensure(bind == STB_GLOBAL || bind == STB_WEAK || bind == STB_GNU_UNIQUE); __ensure(_symbol->st_shndx != SHN_UNDEF); @@ -1170,13 +1296,14 @@ uintptr_t ObjectSymbol::virtualAddress() { // Scope // -------------------------------------------------------- -uint32_t elf64Hash(frg::string_view string) { +uint32_t +elf64Hash(frg::string_view string) { uint32_t h = 0, g; - for(size_t i = 0; i < string.size(); ++i) { + for (size_t i = 0; i < string.size(); ++i) { h = (h << 4) + (uint32_t)string[i]; g = h & 0xF0000000; - if(g) + if (g) h ^= g >> 24; h &= 0x0FFFFFFF; } @@ -1184,22 +1311,24 @@ uint32_t elf64Hash(frg::string_view string) { return h; } -uint32_t gnuHash(frg::string_view string) { - uint32_t h = 5381; - for(size_t i = 0; i < string.size(); ++i) - h = (h << 5) + h + string[i]; - return h; +uint32_t +gnuHash(frg::string_view string) { + uint32_t h = 5381; + for (size_t i = 0; i < string.size(); ++i) + h = (h << 5) + h + string[i]; + return h; } // TODO: move this to some namespace or class? -frg::optional resolveInObject(SharedObject *object, frg::string_view string) { +frg::optional +resolveInObject(SharedObject *object, frg::string_view string) { // Checks if the symbol can be used to satisfy the dependency. - auto eligible = [&] (ObjectSymbol cand) { - if(cand.symbol()->st_shndx == SHN_UNDEF) + auto eligible = [&](ObjectSymbol cand) { + if (cand.symbol()->st_shndx == SHN_UNDEF) return false; auto bind = ELF_ST_BIND(cand.symbol()->st_info); - if(bind != STB_GLOBAL && bind != STB_WEAK && bind != STB_GNU_UNIQUE) + if (bind != STB_GLOBAL && bind != STB_WEAK && bind != STB_GNU_UNIQUE) return false; return true; @@ -1211,17 +1340,20 @@ frg::optional resolveInObject(SharedObject *object, frg::string_vi auto bucket = elf64Hash(string) % num_buckets; auto index = hash_table[2 + bucket]; - while(index != 0) { - ObjectSymbol cand{object, (elf_sym *)(object->baseAddress - + object->symbolTableOffset + index * sizeof(elf_sym))}; - if(eligible(cand) && frg::string_view{cand.getString()} == string) + while (index != 0) { + ObjectSymbol cand{ + object, + (elf_sym *)(object->baseAddress + object->symbolTableOffset + + index * sizeof(elf_sym)) + }; + if (eligible(cand) && frg::string_view{cand.getString()} == string) return cand; index = hash_table[2 + num_buckets + index]; } return frg::optional{}; - }else{ + } else { __ensure(object->hashStyle == HashStyle::gnu); struct GnuTable { @@ -1231,15 +1363,16 @@ frg::optional resolveInObject(SharedObject *object, frg::string_vi uint32_t bloomShift; }; - auto hash_table = reinterpret_cast(object->baseAddress - + object->hashTableOffset); - auto buckets = reinterpret_cast(object->baseAddress - + object->hashTableOffset + sizeof(GnuTable) - + hash_table->bloomSize * sizeof(elf_addr)); - auto chains = reinterpret_cast(object->baseAddress - + object->hashTableOffset + sizeof(GnuTable) - + hash_table->bloomSize * sizeof(elf_addr) - + hash_table->nBuckets * sizeof(uint32_t)); + auto hash_table = + reinterpret_cast(object->baseAddress + object->hashTableOffset); + auto buckets = reinterpret_cast( + object->baseAddress + object->hashTableOffset + sizeof(GnuTable) + + hash_table->bloomSize * sizeof(elf_addr) + ); + auto chains = reinterpret_cast( + object->baseAddress + object->hashTableOffset + sizeof(GnuTable) + + hash_table->bloomSize * sizeof(elf_addr) + hash_table->nBuckets * sizeof(uint32_t) + ); // TODO: Use the bloom filter. @@ -1247,29 +1380,32 @@ frg::optional resolveInObject(SharedObject *object, frg::string_vi auto hash = gnuHash(string); auto index = buckets[hash % hash_table->nBuckets]; - if(!index) + if (!index) return frg::optional{}; - while(true) { + while (true) { // chains[] contains an array of hashes, parallel to the symbol table. auto chash = chains[index - hash_table->symbolOffset]; if ((chash & ~1) == (hash & ~1)) { - ObjectSymbol cand{object, (elf_sym *)(object->baseAddress - + object->symbolTableOffset + index * sizeof(elf_sym))}; - if(eligible(cand) && frg::string_view{cand.getString()} == string) + ObjectSymbol cand{ + object, + (elf_sym *)(object->baseAddress + object->symbolTableOffset + + index * sizeof(elf_sym)) + }; + if (eligible(cand) && frg::string_view{cand.getString()} == string) return cand; } // If we hit the end of the chain, the symbol is not present. - if(chash & 1) + if (chash & 1) return frg::optional{}; index++; } } } -frg::optional Scope::_resolveNext(frg::string_view string, - SharedObject *target) { +frg::optional +Scope::_resolveNext(frg::string_view string, SharedObject *target) { // Skip objects until we find the target, and only look for symbols after that. size_t i; for (i = 0; i < _objects.size(); i++) { @@ -1278,26 +1414,27 @@ frg::optional Scope::_resolveNext(frg::string_view string, } if (i == _objects.size()) { - mlibc::infoLogger() << "rtld: object passed to Scope::resolveAfter was not found" << frg::endlog; + mlibc::infoLogger() << "rtld: object passed to Scope::resolveAfter was not found" + << frg::endlog; return frg::optional(); } for (i = i + 1; i < _objects.size(); i++) { - if(_objects[i]->isMainObject) + if (_objects[i]->isMainObject) continue; frg::optional p = resolveInObject(_objects[i], string); - if(p) + if (p) return p; } return frg::optional(); } -Scope::Scope(bool isGlobal) -: isGlobal{isGlobal}, _objects(getAllocator()) { } +Scope::Scope(bool isGlobal) : isGlobal{isGlobal}, _objects(getAllocator()) {} -void Scope::appendObject(SharedObject *object) { +void +Scope::appendObject(SharedObject *object) { // Don't insert duplicates. for (auto obj : _objects) { if (obj == object) @@ -1307,41 +1444,49 @@ void Scope::appendObject(SharedObject *object) { _objects.push(object); } -frg::optional Scope::resolveGlobalOrLocal(Scope &globalScope, - Scope *localScope, frg::string_view string, uint64_t skipRts, ResolveFlags flags) { +frg::optional +Scope::resolveGlobalOrLocal( + Scope &globalScope, + Scope *localScope, + frg::string_view string, + uint64_t skipRts, + ResolveFlags flags +) { auto sym = globalScope.resolveSymbol(string, skipRts, flags | skipGlobalAfterRts); - if(!sym && localScope) + if (!sym && localScope) sym = localScope->resolveSymbol(string, skipRts, flags | skipGlobalAfterRts); return sym; } -frg::optional Scope::resolveGlobalOrLocalNext(Scope &globalScope, - Scope *localScope, frg::string_view string, SharedObject *origin) { +frg::optional +Scope::resolveGlobalOrLocalNext( + Scope &globalScope, Scope *localScope, frg::string_view string, SharedObject *origin +) { auto sym = globalScope._resolveNext(string, origin); - if(!sym && localScope) { + if (!sym && localScope) { sym = localScope->_resolveNext(string, origin); } return sym; } // TODO: let this return uintptr_t -frg::optional Scope::resolveSymbol(frg::string_view string, - uint64_t skipRts, ResolveFlags flags) { +frg::optional +Scope::resolveSymbol(frg::string_view string, uint64_t skipRts, ResolveFlags flags) { for (auto object : _objects) { - if((flags & resolveCopy) && object->isMainObject) + if ((flags & resolveCopy) && object->isMainObject) continue; - if((flags & skipGlobalAfterRts) && object->globalRts > skipRts) { + if ((flags & skipGlobalAfterRts) && object->globalRts > skipRts) { // globalRts should be monotone increasing for objects in the global scope, // so as an optimization we can break early here. // TODO: If we implement DT_SYMBOLIC, this assumption fails. - if(isGlobal) + if (isGlobal) break; else continue; } frg::optional p = resolveInObject(object, string); - if(p) + if (p) return p; } @@ -1353,20 +1498,24 @@ frg::optional Scope::resolveSymbol(frg::string_view string, // -------------------------------------------------------- Loader::Loader(Scope *scope, SharedObject *mainExecutable, bool is_initial_link, uint64_t rts) -: _mainExecutable{mainExecutable}, _loadScope{scope}, _isInitialLink{is_initial_link}, - _linkRts{rts}, _linkBfs{getAllocator()}, _initQueue{getAllocator()} { } - -void Loader::_buildLinkBfs(SharedObject *root) { + : _mainExecutable{mainExecutable}, + _loadScope{scope}, + _isInitialLink{is_initial_link}, + _linkRts{rts}, + _linkBfs{getAllocator()}, + _initQueue{getAllocator()} {} + +void +Loader::_buildLinkBfs(SharedObject *root) { __ensure(_linkBfs.size() == 0); struct Token {}; - using Set = frg::hash_map, MemoryAllocator>; + using Set = frg::hash_map, MemoryAllocator>; Set set{frg::hash{}, getAllocator()}; _linkBfs.push(root); // Loop over indices (not iterators) here: We are adding elements in the loop! - for(size_t i = 0; i < _linkBfs.size(); i++) { + for (size_t i = 0; i < _linkBfs.size(); i++) { auto current = _linkBfs[i]; // At this point the object is loaded and we can fill in its debug struct, @@ -1390,12 +1539,13 @@ void Loader::_buildLinkBfs(SharedObject *root) { } } -void Loader::linkObjects(SharedObject *root) { +void +Loader::linkObjects(SharedObject *root) { _buildLinkBfs(root); _buildTlsMaps(); // Promote objects to the desired scope. - for(auto object : _linkBfs) { + for (auto object : _linkBfs) { if (object->globalRts == 0 && _loadScope->isGlobal) object->globalRts = _linkRts; @@ -1403,72 +1553,73 @@ void Loader::linkObjects(SharedObject *root) { } // Process regular relocations. - for(auto object : _linkBfs) { + for (auto object : _linkBfs) { // Some objects have already been linked before. - if(object->objectRts < _linkRts) + if (object->objectRts < _linkRts) continue; - if(object->dynamic == nullptr) + if (object->dynamic == nullptr) continue; - if(verbose) + if (verbose) mlibc::infoLogger() << "rtld: Linking " << object->name << frg::endlog; __ensure(!object->wasLinked); // TODO: Support this. - if(object->symbolicResolution) + if (object->symbolicResolution) mlibc::infoLogger() << "\e[31mrtld: DT_SYMBOLIC is not implemented correctly!\e[39m" - << frg::endlog; + << frg::endlog; _processStaticRelocations(object); _processLazyRelocations(object); } // Process copy relocations. - for(auto object : _linkBfs) { - if(!object->isMainObject) + for (auto object : _linkBfs) { + if (!object->isMainObject) continue; // Some objects have already been linked before. - if(object->objectRts < _linkRts) + if (object->objectRts < _linkRts) continue; - if(object->dynamic == nullptr) + if (object->dynamic == nullptr) continue; processLateRelocations(object); } - for(auto object : _linkBfs) { + for (auto object : _linkBfs) { object->wasLinked = true; - if(object->inLinkMap) + if (object->inLinkMap) continue; - auto linkMap = reinterpret_cast(globalDebugInterface.head); + auto linkMap = reinterpret_cast(globalDebugInterface.head); object->linkMap.prev = linkMap; object->linkMap.next = linkMap->next; - if(linkMap->next) + if (linkMap->next) linkMap->next->prev = &(object->linkMap); linkMap->next = &(object->linkMap); object->inLinkMap = true; } } -void Loader::_buildTlsMaps() { - if(_isInitialLink) { +void +Loader::_buildTlsMaps() { + if (_isInitialLink) { __ensure(runtimeTlsMap->initialPtr == 0); __ensure(runtimeTlsMap->initialLimit == 0); __ensure(!_linkBfs.empty()); __ensure(_linkBfs.front()->isMainObject); - for(auto object : _linkBfs) { + for (auto object : _linkBfs) { __ensure(object->tlsModel == TlsModel::null); - if(object->tlsSegmentSize == 0) + if (object->tlsSegmentSize == 0) continue; // Allocate an index for the object. @@ -1479,7 +1630,7 @@ void Loader::_buildTlsMaps() { if constexpr (tlsAboveTp) { size_t misalign = runtimeTlsMap->initialPtr & (object->tlsAlignment - 1); - if(misalign) + if (misalign) runtimeTlsMap->initialPtr += object->tlsAlignment - misalign; object->tlsOffset = runtimeTlsMap->initialPtr; @@ -1488,26 +1639,26 @@ void Loader::_buildTlsMaps() { runtimeTlsMap->initialPtr += object->tlsSegmentSize; size_t misalign = runtimeTlsMap->initialPtr & (object->tlsAlignment - 1); - if(misalign) + if (misalign) runtimeTlsMap->initialPtr += object->tlsAlignment - misalign; object->tlsOffset = -runtimeTlsMap->initialPtr; } - if(verbose) - mlibc::infoLogger() << "rtld: TLS of " << object->name - << " mapped to 0x" << frg::hex_fmt{object->tlsOffset} - << ", size: " << object->tlsSegmentSize - << ", alignment: " << object->tlsAlignment << frg::endlog; + if (verbose) + mlibc::infoLogger() + << "rtld: TLS of " << object->name << " mapped to 0x" + << frg::hex_fmt{object->tlsOffset} << ", size: " << object->tlsSegmentSize + << ", alignment: " << object->tlsAlignment << frg::endlog; } // Reserve some additional space for future libraries. runtimeTlsMap->initialLimit = runtimeTlsMap->initialPtr + 64; - }else{ - for(auto object : _linkBfs) { - if(object->tlsModel != TlsModel::null) + } else { + for (auto object : _linkBfs) { + if (object->tlsModel != TlsModel::null) continue; - if(object->tlsSegmentSize == 0) + if (object->tlsSegmentSize == 0) continue; // Allocate an index for the object. @@ -1516,12 +1667,12 @@ void Loader::_buildTlsMaps() { // There are some libraries (e.g. Mesa) that require static TLS even though // they expect to be dynamically loaded. - if(object->haveStaticTls) { + if (object->haveStaticTls) { object->tlsModel = TlsModel::initial; if constexpr (tlsAboveTp) { size_t misalign = runtimeTlsMap->initialPtr & (object->tlsAlignment - 1); - if(misalign) + if (misalign) runtimeTlsMap->initialPtr += object->tlsAlignment - misalign; object->tlsOffset = runtimeTlsMap->initialPtr; @@ -1530,29 +1681,31 @@ void Loader::_buildTlsMaps() { runtimeTlsMap->initialPtr += object->tlsSegmentSize; size_t misalign = runtimeTlsMap->initialPtr & (object->tlsAlignment - 1); - if(misalign) + if (misalign) runtimeTlsMap->initialPtr += object->tlsAlignment - misalign; object->tlsOffset = -runtimeTlsMap->initialPtr; } - if(runtimeTlsMap->initialPtr > runtimeTlsMap->initialLimit) - mlibc::panicLogger() << "rtld: Static TLS space exhausted while while" - " allocating TLS for " << object->name << frg::endlog; + if (runtimeTlsMap->initialPtr > runtimeTlsMap->initialLimit) + mlibc::panicLogger() << "rtld: Static TLS space exhausted while while" + " allocating TLS for " + << object->name << frg::endlog; - if(verbose) - mlibc::infoLogger() << "rtld: TLS of " << object->name - << " mapped to 0x" << frg::hex_fmt{object->tlsOffset} - << ", size: " << object->tlsSegmentSize - << ", alignment: " << object->tlsAlignment << frg::endlog; - }else{ + if (verbose) + mlibc::infoLogger() + << "rtld: TLS of " << object->name << " mapped to 0x" + << frg::hex_fmt{object->tlsOffset} << ", size: " << object->tlsSegmentSize + << ", alignment: " << object->tlsAlignment << frg::endlog; + } else { object->tlsModel = TlsModel::dynamic; } } } } -void Loader::initObjects(ObjectRepository *repository) { +void +Loader::initObjects(ObjectRepository *repository) { initTlsObjects(mlibc::get_current_tcb(), _linkBfs, true); if (_mainExecutable && _mainExecutable->preInitArray) { @@ -1562,19 +1715,19 @@ void Loader::initObjects(ObjectRepository *repository) { __ensure(_mainExecutable->isMainObject); __ensure(!_mainExecutable->wasInitialized); __ensure((_mainExecutable->preInitArraySize % sizeof(InitFuncPtr)) == 0); - for(size_t i = 0; i < _mainExecutable->preInitArraySize / sizeof(InitFuncPtr); i++) + for (size_t i = 0; i < _mainExecutable->preInitArraySize / sizeof(InitFuncPtr); i++) _mainExecutable->preInitArray[i](); } // Convert the breadth-first representation to a depth-first post-order representation, // so that every object is initialized *after* its dependencies. - for(auto object : _linkBfs) { - if(!object->scheduledForInit) + for (auto object : _linkBfs) { + if (!object->scheduledForInit) _scheduleInit(object); } - for(auto object : _initQueue) { - if(!object->wasInitialized) { + for (auto object : _initQueue) { + if (!object->wasInitialized) { doInitialize(object); repository->addObjectToDestructQueue(object); } @@ -1582,7 +1735,8 @@ void Loader::initObjects(ObjectRepository *repository) { } // TODO: Use an explicit vector to reduce stack usage to O(1)? -void Loader::_scheduleInit(SharedObject *object) { +void +Loader::_scheduleInit(SharedObject *object) { // Here we detect cyclic dependencies. __ensure(!object->onInitStack); object->onInitStack = true; @@ -1590,8 +1744,8 @@ void Loader::_scheduleInit(SharedObject *object) { __ensure(!object->scheduledForInit); object->scheduledForInit = true; - for(size_t i = 0; i < object->dependencies.size(); i++) { - if(!object->dependencies[i]->scheduledForInit) + for (size_t i = 0; i < object->dependencies.size(); i++) { + if (!object->dependencies[i]->scheduledForInit) _scheduleInit(object->dependencies[i]); } @@ -1599,32 +1753,34 @@ void Loader::_scheduleInit(SharedObject *object) { object->onInitStack = false; } -void Loader::_processRelocations(Relocation &rel) { +void +Loader::_processRelocations(Relocation &rel) { // copy and irelative relocations have to be performed after all other relocations - if(rel.type() == R_COPY || rel.type() == R_IRELATIVE) + if (rel.type() == R_COPY || rel.type() == R_IRELATIVE) return; // resolve the symbol if there is a symbol frg::optional p; - if(rel.symbol_index()) { - auto symbol = (elf_sym *)(rel.object()->baseAddress + rel.object()->symbolTableOffset - + rel.symbol_index() * sizeof(elf_sym)); + if (rel.symbol_index()) { + auto symbol = (elf_sym *)(rel.object()->baseAddress + rel.object()->symbolTableOffset + + rel.symbol_index() * sizeof(elf_sym)); ObjectSymbol r(rel.object(), symbol); - p = Scope::resolveGlobalOrLocal(*globalScope, rel.object()->localScope, - r.getString(), rel.object()->objectRts, 0); - if(!p) { - if(ELF_ST_BIND(symbol->st_info) != STB_WEAK) - mlibc::panicLogger() << "Unresolved load-time symbol " - << r.getString() << " in object " << rel.object()->name << frg::endlog; - - if(verbose) - mlibc::infoLogger() << "rtld: Unresolved weak load-time symbol " - << r.getString() << " in object " << rel.object()->name << frg::endlog; + p = Scope::resolveGlobalOrLocal( + *globalScope, rel.object()->localScope, r.getString(), rel.object()->objectRts, 0 + ); + if (!p) { + if (ELF_ST_BIND(symbol->st_info) != STB_WEAK) + mlibc::panicLogger() << "Unresolved load-time symbol " << r.getString() + << " in object " << rel.object()->name << frg::endlog; + + if (verbose) + mlibc::infoLogger() << "rtld: Unresolved weak load-time symbol " << r.getString() + << " in object " << rel.object()->name << frg::endlog; } } - switch(rel.type()) { + switch (rel.type()) { case R_NONE: break; @@ -1660,13 +1816,13 @@ void Loader::_processRelocations(Relocation &rel) { // sets the first `sizeof(uintptr_t)` bytes of `struct __abi_tls_entry` // this means that we can just use the `SharedObject *` to resolve whatever we need __ensure(!rel.addend_rel()); - if(rel.symbol_index()) { + if (rel.symbol_index()) { __ensure(p); rel.relocate(elf_addr(p->object())); - }else{ - if(stillSlightlyVerbose) + } else { + if (stillSlightlyVerbose) mlibc::infoLogger() << "rtld: Warning: TLS_DTPMOD64 with no symbol in object " - << rel.object()->name << frg::endlog; + << rel.object()->name << frg::endlog; rel.relocate(elf_addr(rel.object())); } } break; @@ -1679,23 +1835,24 @@ void Loader::_processRelocations(Relocation &rel) { uintptr_t off = rel.addend_rel(); ssize_t tls_offset = 0; - if(rel.symbol_index()) { + if (rel.symbol_index()) { __ensure(p); - if(p->object()->tlsModel != TlsModel::initial) - mlibc::panicLogger() << "rtld: In object " << rel.object()->name - << ": Static TLS relocation to symbol " << p->getString() - << " in dynamically loaded object " - << p->object()->name << frg::endlog; + if (p->object()->tlsModel != TlsModel::initial) + mlibc::panicLogger() + << "rtld: In object " << rel.object()->name + << ": Static TLS relocation to symbol " << p->getString() + << " in dynamically loaded object " << p->object()->name << frg::endlog; off += p->symbol()->st_value; tls_offset = p->object()->tlsOffset; - }else{ - if(stillSlightlyVerbose) + } else { + if (stillSlightlyVerbose) mlibc::infoLogger() << "rtld: Warning: TPOFF64 with no symbol" - " in object " << rel.object()->name << frg::endlog; - if(rel.object()->tlsModel != TlsModel::initial) + " in object " + << rel.object()->name << frg::endlog; + if (rel.object()->tlsModel != TlsModel::initial) mlibc::panicLogger() << "rtld: In object " << rel.object()->name - << ": Static TLS relocation to dynamically loaded object " - << rel.object()->name << frg::endlog; + << ": Static TLS relocation to dynamically loaded object " + << rel.object()->name << frg::endlog; tls_offset = rel.object()->tlsOffset; } @@ -1703,12 +1860,12 @@ void Loader::_processRelocations(Relocation &rel) { rel.relocate(off); } break; default: - mlibc::panicLogger() << "Unexpected relocation type " - << (void *) rel.type() << frg::endlog; + mlibc::panicLogger() << "Unexpected relocation type " << (void *)rel.type() << frg::endlog; } } -void Loader::_processStaticRelocations(SharedObject *object) { +void +Loader::_processStaticRelocations(SharedObject *object) { frg::optional rela_offset; frg::optional rela_length; @@ -1718,10 +1875,10 @@ void Loader::_processStaticRelocations(SharedObject *object) { frg::optional relr_offset; frg::optional relr_length; - for(size_t i = 0; object->dynamic[i].d_tag != DT_NULL; i++) { + for (size_t i = 0; object->dynamic[i].d_tag != DT_NULL; i++) { elf_dyn *dynamic = &object->dynamic[i]; - switch(dynamic->d_tag) { + switch (dynamic->d_tag) { case DT_RELA: rela_offset = dynamic->d_un.d_ptr; break; @@ -1752,19 +1909,19 @@ void Loader::_processStaticRelocations(SharedObject *object) { } } - if(rela_offset && rela_length) { + if (rela_offset && rela_length) { __ensure(!rel_offset && !rel_length); - for(size_t offset = 0; offset < *rela_length; offset += sizeof(elf_rela)) { + for (size_t offset = 0; offset < *rela_length; offset += sizeof(elf_rela)) { auto reloc = (elf_rela *)(object->baseAddress + *rela_offset + offset); auto r = Relocation(object, reloc); _processRelocations(r); } - }else if(rel_offset && rel_length) { + } else if (rel_offset && rel_length) { __ensure(!rela_offset && !rela_length); - for(size_t offset = 0; offset < *rel_length; offset += sizeof(elf_rel)) { + for (size_t offset = 0; offset < *rel_length; offset += sizeof(elf_rel)) { auto reloc = (elf_rel *)(object->baseAddress + *rel_offset + offset); auto r = Relocation(object, reloc); @@ -1772,26 +1929,27 @@ void Loader::_processStaticRelocations(SharedObject *object) { } } - if(relr_offset && relr_length) { + if (relr_offset && relr_length) { elf_addr *addr = nullptr; - for(size_t offset = 0; offset < *relr_length; offset += sizeof(elf_relr)) { + for (size_t offset = 0; offset < *relr_length; offset += sizeof(elf_relr)) { auto entry = *(elf_relr *)(object->baseAddress + *relr_offset + offset); // Even entry indicates the beginning address. - if(!(entry & 1)) { + if (!(entry & 1)) { addr = (elf_addr *)(object->baseAddress + entry); __ensure(addr); *addr++ += object->baseAddress; - }else { - // Odd entry indicates entry is a bitmap of the subsequent locations to be relocated. + } else { + // Odd entry indicates entry is a bitmap of the subsequent locations to be + // relocated. - // The first bit of an entry is always a marker about whether the entry is an address or a bitmap, - // discard it. + // The first bit of an entry is always a marker about whether the entry is an + // address or a bitmap, discard it. entry >>= 1; - for(int i = 0; entry; ++i) { - if(entry & 1) { + for (int i = 0; entry; ++i) { + if (entry & 1) { addr[i] += object->baseAddress; } entry >>= 1; @@ -1806,33 +1964,36 @@ void Loader::_processStaticRelocations(SharedObject *object) { // TODO: TLSDESC relocations aren't aarch64/x86_64 specific #if defined(__aarch64__) || defined(__x86_64__) -extern "C" void *__mlibcTlsdescStatic(void *); -extern "C" void *__mlibcTlsdescDynamic(void *); +extern "C" void * +__mlibcTlsdescStatic(void *); +extern "C" void * +__mlibcTlsdescDynamic(void *); #endif -void Loader::_processLazyRelocations(SharedObject *object) { - if(object->globalOffsetTable == nullptr) { +void +Loader::_processLazyRelocations(SharedObject *object) { + if (object->globalOffsetTable == nullptr) { __ensure(object->lazyRelocTableOffset == 0); return; } object->globalOffsetTable[1] = object; object->globalOffsetTable[2] = (void *)&pltRelocateStub; - if(!object->lazyTableSize) + if (!object->lazyTableSize) return; // adjust the addresses of JUMP_SLOT relocations __ensure(object->lazyExplicitAddend.has_value()); size_t rel_size = (*object->lazyExplicitAddend) ? sizeof(elf_rela) : sizeof(elf_rel); - for(size_t offset = 0; offset < object->lazyTableSize; offset += rel_size) { + for (size_t offset = 0; offset < object->lazyTableSize; offset += rel_size) { elf_info type; elf_info symbol_index; uintptr_t rel_addr; uintptr_t addend [[maybe_unused]] = 0; - if(*object->lazyExplicitAddend) { + if (*object->lazyExplicitAddend) { auto reloc = (elf_rela *)(object->baseAddress + object->lazyRelocTableOffset + offset); type = ELF_R_TYPE(reloc->r_info); symbol_index = ELF_R_SYM(reloc->r_info); @@ -1847,25 +2008,29 @@ void Loader::_processLazyRelocations(SharedObject *object) { switch (type) { case R_JUMP_SLOT: - if(eagerBinding) { - auto symbol = (elf_sym *)(object->baseAddress + object->symbolTableOffset - + symbol_index * sizeof(elf_sym)); + if (eagerBinding) { + auto symbol = (elf_sym *)(object->baseAddress + object->symbolTableOffset + + symbol_index * sizeof(elf_sym)); ObjectSymbol r(object, symbol); - auto p = Scope::resolveGlobalOrLocal(*globalScope, object->localScope, r.getString(), object->objectRts, 0); + auto p = Scope::resolveGlobalOrLocal( + *globalScope, object->localScope, r.getString(), object->objectRts, 0 + ); - if(!p) { - if(ELF_ST_BIND(symbol->st_info) != STB_WEAK) - mlibc::panicLogger() << "rtld: Unresolved JUMP_SLOT symbol " - << r.getString() << " in object " << object->name << frg::endlog; - - if(verbose) - mlibc::infoLogger() << "rtld: Unresolved weak JUMP_SLOT symbol " - << r.getString() << " in object " << object->name << frg::endlog; + if (!p) { + if (ELF_ST_BIND(symbol->st_info) != STB_WEAK) + mlibc::panicLogger() + << "rtld: Unresolved JUMP_SLOT symbol " << r.getString() + << " in object " << object->name << frg::endlog; + + if (verbose) + mlibc::infoLogger() + << "rtld: Unresolved weak JUMP_SLOT symbol " << r.getString() + << " in object " << object->name << frg::endlog; *((uintptr_t *)rel_addr) = 0; - }else{ + } else { *((uintptr_t *)rel_addr) = p->virtualAddress(); } - }else{ + } else { *((uintptr_t *)rel_addr) += object->baseAddress; } break; @@ -1884,15 +2049,17 @@ void Loader::_processLazyRelocations(SharedObject *object) { SharedObject *target = nullptr; if (symbol_index) { - auto symbol = (elf_sym *)(object->baseAddress + object->symbolTableOffset - + symbol_index * sizeof(elf_sym)); + auto symbol = (elf_sym *)(object->baseAddress + object->symbolTableOffset + + symbol_index * sizeof(elf_sym)); ObjectSymbol r(object, symbol); - auto p = Scope::resolveGlobalOrLocal(*globalScope, object->localScope, r.getString(), object->objectRts, 0); + auto p = Scope::resolveGlobalOrLocal( + *globalScope, object->localScope, r.getString(), object->objectRts, 0 + ); if (!p) { __ensure(ELF_ST_BIND(symbol->st_info) != STB_WEAK); - mlibc::panicLogger() << "rtld: Unresolved TLSDESC for symbol " - << r.getString() << " in object " << object->name << frg::endlog; + mlibc::panicLogger() << "rtld: Unresolved TLSDESC for symbol " << r.getString() + << " in object " << object->name << frg::endlog; } else { target = p->object(); if (p->symbol()) @@ -1935,4 +2102,3 @@ void Loader::_processLazyRelocations(SharedObject *object) { } } } -