diff --git a/CMakeLists.txt b/CMakeLists.txt index b65fea498..9cc6dcb8b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -5,6 +5,8 @@ # # Please read The Ultimate Guide to CMake: # https://rix0r.nl/blog/2015/08/13/cmake-guide/ +set(CMAKE_VERBOSE_MAKEFILE on) + cmake_minimum_required(VERSION 3.12 FATAL_ERROR) if (${CMAKE_VERSION} GREATER_EQUAL "3.24.0") diff --git a/README.md b/README.md index bde4adfb9..1ccd420f4 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,8 @@ Open Enclave SDK ================ +PSL version of OpenEnclave with EDMM support copied from https://github.com/mingweishih/openenclave. + [![Bors enabled](https://bors.tech/images/badge_small.svg)](https://app.bors.tech/repositories/21855) [![Join the chat at https://gitter.im/openenclave/community](https://badges.gitter.im/openenclave/community.svg)](https://gitter.im/openenclave/community?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) diff --git a/enclave/core/CMakeLists.txt b/enclave/core/CMakeLists.txt index 71a1f6f06..f074567ff 100644 --- a/enclave/core/CMakeLists.txt +++ b/enclave/core/CMakeLists.txt @@ -62,6 +62,8 @@ if (OE_SGX) sgx/backtrace.c sgx/calls.c sgx/cpuid.c + sgx/edmm.S + sgx/edmm.c sgx/enter.S sgx/entropy.c sgx/errno.c @@ -97,50 +99,47 @@ if (OE_SGX) # Switch compiler from MSVC to Clang for compiling enclave libraries if (WIN32 AND MSVC) - find_program(CLANG_C_COMPILER - NAMES clang - PATHS "C:/Program Files/LLVM/bin" - NO_DEFAULT_PATH - REQUIRED) + find_program( + CLANG_C_COMPILER + NAMES clang + PATHS "C:/Program Files/LLVM/bin" + NO_DEFAULT_PATH REQUIRED) execute_process( COMMAND ${CLANG_C_COMPILER} --version OUTPUT_VARIABLE CLANG_C_COMPILER_VERSION OUTPUT_STRIP_TRAILING_WHITESPACE) - if (CLANG_C_COMPILER_VERSION MATCHES "clang version ([0-9]+\.[0-9]+\.[0-9]+)") + if (CLANG_C_COMPILER_VERSION MATCHES + "clang version ([0-9]+\.[0-9]+\.[0-9]+)") set(CLANG_C_COMPILER_VERSION ${CMAKE_MATCH_1}) else () message( - FATAL_ERROR - "Could not determine Clang version from ${CLANG_C_COMPILER}" + FATAL_ERROR "Could not determine Clang version from ${CLANG_C_COMPILER}" ) endif () set(CMAKE_C_COMPILER ${CLANG_C_COMPILER}) set(CMAKE_C_COMPILER_VERSION ${CLANG_C_COMPILER_VERSION}) message( STATUS - "${CMAKE_C_COMPILER} (version: ${CMAKE_C_COMPILER_VERSION}) will be used to compile enclave libraries" + "${CMAKE_C_COMPILER} (version: ${CMAKE_C_COMPILER_VERSION}) will be used to compile enclave libraries" ) - if (CMAKE_C_COMPILER_VERSION VERSION_LESS 10 - OR CMAKE_C_COMPILER_VERSION VERSION_GREATER 11.99) - message( - WARNING "Open Enclave officially supports Clang 11 and 10 only, " - "but your Clang version (${CMAKE_C_COMPILER_VERSION}) " - "is older or newer than that. Build problems may occur.") + if (CMAKE_C_COMPILER_VERSION VERSION_LESS 10 OR CMAKE_C_COMPILER_VERSION + VERSION_GREATER 11.99) + message(WARNING "Open Enclave officially supports Clang 11 and 10 only, " + "but your Clang version (${CMAKE_C_COMPILER_VERSION}) " + "is older or newer than that. Build problems may occur.") endif () endif () # Adding "-Wno-frame-address" because, for clang-11 and above, usage of - # __builtin_frame_address() with a non-zero argument is triggering + # __builtin_frame_address() with a non-zero argument is triggering # -Wframe-address. - # -Wno-frame-address is not used for clang-10 as it does not emit such + # -Wno-frame-address is not used for clang-10 as it does not emit such # an error, and does not support the option. if (CMAKE_C_COMPILER_VERSION VERSION_GREATER_EQUAL 11) set(C_CALLS_CFLAGS "-Wno-frame-address") endif () - set_source_files_properties( - sgx/calls.c - PROPERTIES - COMPILE_OPTIONS "${C_CALLS_CFLAGS}") + set_source_files_properties(sgx/calls.c PROPERTIES COMPILE_OPTIONS + "${C_CALLS_CFLAGS}") # To avoid the `unused-command-line-argument` warning, which we treat as an # error, we explicitly turn off the warning when compiling these assembly @@ -241,28 +240,26 @@ list(APPEND NEEDS_STDC_NAMES ${MUSL_SRC_DIR}/prng/rand.c list(APPEND W_NO_CONVERSION ${MUSL_SRC_DIR}/prng/rand.c) -set_property( - SOURCE ${W_NO_CONVERSION} - APPEND_STRING - PROPERTY COMPILE_FLAGS "-Wno-conversion") +set_property(SOURCE ${W_NO_CONVERSION} APPEND_STRING PROPERTY COMPILE_FLAGS + "-Wno-conversion") -set_property( - SOURCE ${NEEDS_STDC_NAMES} - APPEND_STRING - PROPERTY COMPILE_FLAGS " -I${CORELIBC_INCLUDES}") -set_property( - SOURCE ${NEEDS_STDC_NAMES} - APPEND - PROPERTY COMPILE_DEFINITIONS OE_NEED_STDC_NAMES) +set_property(SOURCE ${NEEDS_STDC_NAMES} APPEND_STRING + PROPERTY COMPILE_FLAGS " -I${CORELIBC_INCLUDES}") +set_property(SOURCE ${NEEDS_STDC_NAMES} APPEND PROPERTY COMPILE_DEFINITIONS + OE_NEED_STDC_NAMES) maybe_build_using_clangw(oecore) add_enclave_dependencies(oecore core_trusted_edl) if (OE_SGX) add_enclave_dependencies(oecore platform_trusted_edl) + enclave_include_directories( + oecore PRIVATE ${PROJECT_SOURCE_DIR}/include/openenclave/corelibc) endif () enclave_include_directories(oecore PRIVATE ${CMAKE_CURRENT_BINARY_DIR}) +enclave_include_directories(oecore PRIVATE + ${PROJECT_SOURCE_DIR}/enclave/core/sgx/include) enclave_link_libraries(oecore PUBLIC oe_includes) if (OE_TRUSTZONE) @@ -345,10 +342,8 @@ if (OE_SGX) ${MUSL_SRC_DIR}/string/x86_64/memmove.s ${MUSL_SRC_DIR}/string/x86_64/memset.s) - set_property( - SOURCE ${DEFAULT_VISIBILITY} - APPEND_STRING - PROPERTY COMPILE_FLAGS " -fvisibility=default") + set_property(SOURCE ${DEFAULT_VISIBILITY} APPEND_STRING + PROPERTY COMPILE_FLAGS " -fvisibility=default") endif () enclave_compile_options(oecore INTERFACE $<$:-nostdinc++>) diff --git a/enclave/core/sgx/asmdefs.h b/enclave/core/sgx/asmdefs.h index 84e499772..400455030 100644 --- a/enclave/core/sgx/asmdefs.h +++ b/enclave/core/sgx/asmdefs.h @@ -12,6 +12,9 @@ #define ENCLU_EGETKEY 1 #define ENCLU_EENTER 2 #define ENCLU_EEXIT 4 +#define ENCLU_EACCEPT 5 +#define ENCLU_EMODPE 6 +#define ENCLU_EACCEPTCOPY 7 #define PAGE_SIZE 4096 #define STATIC_STACK_SIZE 8 * 100 diff --git a/enclave/core/sgx/edmm.S b/enclave/core/sgx/edmm.S new file mode 100644 index 000000000..2ed08318c --- /dev/null +++ b/enclave/core/sgx/edmm.S @@ -0,0 +1,69 @@ +// Copyright (c) Open Enclave SDK contributors. +// Licensed under the MIT License. + +#include "asmdefs.h" +#include "asmcommon.inc" + +//============================================================================== +// This file implements the necessary wrappers of Intel SGX primitives required +// by the Enclave Memory Manager (emm) library that supports the Enclave Dynamic +// Memory Management (EDMM) feature on SGX2 CPUs. +//============================================================================== + +.globl do_eaccept +.type do_eaccept, @function +do_eaccept: +.cfi_startproc + + push %rbx + push %rcx + movq %rdi, %rbx + movq %rsi, %rcx + + // Execute EACCEPT. + movq $ENCLU_EACCEPT, %rax + ENCLU + + pop %rcx + pop %rbx + + ret +.cfi_endproc + +.globl do_eacceptcopy +.type do_eacceptcopy, @function +do_eacceptcopy: +.cfi_startproc + push %rbx + push %rcx + movq %rdi, %rbx + movq %rsi, %rcx + + // Execute EACCEPTCOPY + movq $ENCLU_EACCEPTCOPY, %rax + ENCLU + + pop %rcx + pop %rbx + + ret +.cfi_endproc + +.globl do_emodpe +.type do_emodpe, @function +do_emodpe: +.cfi_startproc + push %rbx + push %rcx + movq %rdi, %rbx + movq %rsi, %rcx + + // Execute EMODPE + movq $ENCLU_EMODPE, %rax + ENCLU + + pop %rcx + pop %rbx + + ret +.cfi_endproc diff --git a/enclave/core/sgx/edmm.c b/enclave/core/sgx/edmm.c new file mode 100644 index 000000000..fe1fe58a7 --- /dev/null +++ b/enclave/core/sgx/edmm.c @@ -0,0 +1,191 @@ +// Copyright (c) Open Enclave SDK contributors. +// Licensed under the MIT License. + +#include +#include +#include +#include +#include +#include + +#define _sgx_mm_mutex _oe_pthread_mutex + +#include +#include +#include +#include "emm_private.h" +#include "platform_t.h" +#include "sgx_mm_rt_abstraction.h" + +#define OE_MMAN_PAGE_NUMBER 0x4000 + +typedef struct _oe_sgx_enclave_layout +{ + uint64_t address; + uint64_t size; + uint64_t type; + uint64_t permission; +} oe_sgx_enclave_layout_t; + +static sgx_mm_pfhandler_t _emm_handler_wrapper; + +bool sgx_mm_is_within_enclave(const void* addr, size_t size) +{ + return oe_is_within_enclave(addr, size); +} + +sgx_mm_mutex* sgx_mm_mutex_create(void) +{ + sgx_mm_mutex* m = (sgx_mm_mutex*)oe_malloc(sizeof(sgx_mm_mutex)); + + if (!m) + { + OE_TRACE_ERROR("sgx_mm_mutex_create failed\n"); + oe_abort(); + } + + oe_pthread_mutex_init(m, NULL); + + return m; +} + +int sgx_mm_mutex_lock(sgx_mm_mutex* mutex) +{ + return oe_pthread_mutex_lock(mutex); +} + +int sgx_mm_mutex_unlock(sgx_mm_mutex* mutex) +{ + return oe_pthread_mutex_unlock(mutex); +} + +int sgx_mm_mutex_destroy(sgx_mm_mutex* mutex) +{ + int ret = oe_pthread_mutex_destroy(mutex); + + oe_free(mutex); + + return ret; +} + +int sgx_mm_alloc_ocall(uint64_t addr, size_t length, int page_type, int flags) +{ + int ret; + + if (oe_sgx_mm_alloc_ocall(&ret, addr, length, page_type, flags) != OE_OK) + { + OE_TRACE_ERROR("oe_sgx_mm_alloc_ocall failed\n"); + oe_abort(); + } + + return ret; +} + +int sgx_mm_modify_ocall( + uint64_t addr, + size_t length, + int flags_from, + int flags_to) +{ + int ret; + + if (oe_sgx_mm_modify_ocall(&ret, addr, length, flags_from, flags_to) != + OE_OK) + { + OE_TRACE_ERROR("oe_sgx_mm_modify_ocall failed\n"); + oe_abort(); + } + + return ret; +} + +static uint64_t _oe_emm_handler(oe_exception_record_t* record) +{ + if (record->code == OE_EXCEPTION_PAGE_FAULT && _emm_handler_wrapper) + { + sgx_pfinfo pfinfo = {0}; + pfinfo.maddr = record->faulting_address; + memcpy(&pfinfo.pfec, &record->error_code, sizeof(uint32_t)); + _emm_handler_wrapper(&pfinfo); + } + + return OE_EXCEPTION_CONTINUE_EXECUTION; +} + +bool sgx_mm_register_pfhandler(sgx_mm_pfhandler_t pfhandler) +{ + if (oe_add_vectored_exception_handler(false, _oe_emm_handler) != OE_OK) + return false; + + _emm_handler_wrapper = pfhandler; + + return true; +} + +bool sgx_mm_unregister_pfhandler(sgx_mm_pfhandler_t pfhandler) +{ + (void)pfhandler; + oe_remove_vectored_exception_handler(_oe_emm_handler); + _emm_handler_wrapper = NULL; + return true; +} + +/* No longer needed */ +#if 0 +void _oe_register_emm_layout() +{ + size_t entries_count = __oe_get_layout_entries_size() / sizeof(oe_sgx_enclave_layout_t); + + oe_sgx_enclave_layout_t* layout_entries = (oe_sgx_enclave_layout_t*)__oe_get_layout_entries_base(); + + OE_TRACE_INFO("enclave range: 0x%lx - 0x%lx\n", + (uint64_t)__oe_get_enclave_base_address(), + (uint64_t)__oe_get_enclave_base_address() + __oe_get_enclave_size()); + + for (size_t i = 0; i < entries_count; i++) + { + if (!layout_entries[i].address) + break; + + OE_TRACE_INFO("register layout: #%zu addr=0x%lx, size=%zu, type: %zu, permission: %zu\n", + i, layout_entries[i].address, layout_entries[i].size, layout_entries[i].type, layout_entries[i].permission); + + int ret = mm_init_ema((void*)layout_entries[i].address, + OE_PAGE_SIZE, + (int)layout_entries[i].type, + (int)layout_entries[i].permission, + NULL, + NULL); + if (ret != 0) + { + OE_TRACE_ERROR("mm_init_ema failed: ret=%d, addr=0x%lx\n", ret, layout_entries[i].address); + } + } +} +#endif + +static void _oe_sgx_emm_init() +{ + uint64_t user_end = + (uint64_t)__oe_get_enclave_base_address() + __oe_get_enclave_size(); + // TODO: change this to use OE_MMPAGE + uint64_t user_start = user_end - OE_PAGE_SIZE * OE_MMAN_PAGE_NUMBER; + + // printf("emm range 0x%lx - 0x%lx (size: %zu)", + // user_start, + // user_end, + // user_end - user_start); + + OE_TRACE_INFO( + "emm range 0x%lx - 0x%lx (size: %zu)", + user_start, + user_end, + user_end - user_start); + sgx_mm_init(user_start, user_end); +} + +void oe_emm_init() +{ + OE_TRACE_ERROR("emm init..."); + _oe_sgx_emm_init(); +} diff --git a/enclave/core/sgx/edmm/bit_array.c b/enclave/core/sgx/edmm/bit_array.c new file mode 100644 index 000000000..5f55e9632 --- /dev/null +++ b/enclave/core/sgx/edmm/bit_array.c @@ -0,0 +1,389 @@ +/* + * Copyright (C) 2022 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#include +#include +#include +#include +#include + +#include "bit_array_imp.h" +#include "emalloc.h" + +#define NUM_OF_BYTES(nbits) (ROUND_TO((nbits), 8) >> 3) +#define TEST_BIT(A, p) ((A)[((p) / 8)] & ((uint8_t)(1 << ((p) % 8)))) +#define SET_BIT(A, p) ((A)[((p) / 8)] |= ((uint8_t)(1 << ((p) % 8)))) + +// Create a new bit array to track the status of 'num' of bits. +// The contents of the data is uninitialized. +bit_array* bit_array_new(size_t num_of_bits) +{ + if (num_of_bits == 0) + return NULL; + + if (ROUND_TO((num_of_bits), 8) < num_of_bits) + return NULL; + + size_t n_bytes = NUM_OF_BYTES(num_of_bits); + bit_array* ba = (bit_array*)emalloc(sizeof(bit_array)); + if (!ba) + return NULL; + ba->n_bytes = n_bytes; + ba->n_bits = num_of_bits; + ba->data = (uint8_t*)emalloc(n_bytes); + if (!ba->data) + { + efree(ba); + return NULL; + } + return ba; +} + +// Create a new bit array to track the status of 'num' of bits. +// All the tracked bits are set (value 1). +bit_array* bit_array_new_set(size_t num_of_bits) +{ + bit_array* ba = bit_array_new(num_of_bits); + if (!ba) + return NULL; + + memset(ba->data, 0xFF, ba->n_bytes); + return ba; +} + +// Create a new bit array to track the status of 'num' of bits. +// All the tracked bits are reset (value 0). +bit_array* bit_array_new_reset(size_t num_of_bits) +{ + bit_array* ba = bit_array_new(num_of_bits); + if (!ba) + return NULL; + + memset(ba->data, 0, ba->n_bytes); + return ba; +} + +// Delete the bit_array 'ba' and the data it owns +void bit_array_delete(bit_array* ba) +{ + efree(ba->data); + efree(ba); +} + +// Returns whether the bit at position 'pos' is set +bool bit_array_test(bit_array* ba, size_t pos) +{ + return TEST_BIT(ba->data, pos); +} +uint8_t set_mask(size_t start, size_t bits_to_set) +{ + assert(start < 8); + assert(bits_to_set <= 8); + assert(start + bits_to_set <= 8); + return (uint8_t)(((1 << bits_to_set) - 1) << start); +} +bool bit_array_test_range(bit_array* ba, size_t pos, size_t len) +{ + size_t byte_index = pos / 8; + size_t bit_index = pos % 8; + size_t bits_in_first_byte = 8 - bit_index; + + if (len <= bits_in_first_byte) + { + uint8_t mask = set_mask(bit_index, len); + if ((ba->data[byte_index] & mask) != mask) + { + return false; + } + return true; + } + + uint8_t mask = set_mask(bit_index, bits_in_first_byte); + if ((ba->data[byte_index] & mask) != mask) + { + return false; + } + + size_t bits_remain = len - bits_in_first_byte; + while (bits_remain >= 8) + { + if (ba->data[++byte_index] != 0xFF) + { + return false; + } + bits_remain -= 8; + } + + // handle last several bits + if (bits_remain > 0) + { + mask = set_mask(0, bits_remain); + if ((ba->data[++byte_index] & mask) != mask) + { + return false; + } + } + + return true; +} + +bool bit_array_test_range_any(bit_array* ba, size_t pos, size_t len) +{ + size_t byte_index = pos / 8; + size_t bit_index = pos % 8; + size_t bits_in_first_byte = 8 - bit_index; + + if (len <= bits_in_first_byte) + { + uint8_t mask = set_mask(bit_index, len); + if ((ba->data[byte_index] & mask)) + { + return true; + } + return false; + } + + uint8_t mask = set_mask(bit_index, bits_in_first_byte); + if ((ba->data[byte_index] & mask)) + { + return true; + } + + size_t bits_remain = len - bits_in_first_byte; + while (bits_remain >= 8) + { + if (ba->data[++byte_index]) + { + return true; + } + bits_remain -= 8; + } + + // handle last several bits + if (bits_remain > 0) + { + mask = set_mask(0, bits_remain); + if ((ba->data[++byte_index] & mask)) + { + return true; + } + } + return false; +} + +// Set the bit at 'pos' +void bit_array_set(bit_array* ba, size_t pos) +{ + SET_BIT(ba->data, pos); +} + +void bit_array_set_range(bit_array* ba, size_t pos, size_t len) +{ + size_t byte_index = pos / 8; + size_t bit_index = pos % 8; + size_t bits_in_first_byte = 8 - bit_index; + + if (len <= bits_in_first_byte) + { + uint8_t mask = set_mask(bit_index, len); + ba->data[byte_index] |= mask; + return; + } + + uint8_t mask = set_mask(bit_index, bits_in_first_byte); + ba->data[byte_index] |= mask; + size_t bits_remain = len - bits_in_first_byte; + while (bits_remain >= 8) + { + ba->data[++byte_index] = 0xFF; + bits_remain -= 8; + } + + // handle last several bits + if (bits_remain > 0) + { + mask = set_mask(0, bits_remain); + ba->data[++byte_index] |= mask; + } + + return; +} + +// Set all the bits +void bit_array_set_all(bit_array* ba) +{ + memset(ba->data, 0xFF, ba->n_bytes); +} + +uint8_t clear_mask(size_t start, size_t bits_to_clear) +{ + return (uint8_t)(~set_mask(start, bits_to_clear)); +} + +void bit_array_reset_range(bit_array* ba, size_t pos, size_t len) +{ + size_t byte_index = pos / 8; + size_t bit_index = pos % 8; + size_t bits_in_first_byte = 8 - bit_index; + + if (len <= bits_in_first_byte) + { + uint8_t mask = clear_mask(bit_index, len); + ba->data[byte_index] &= mask; + return; + } + + uint8_t mask = clear_mask(bit_index, bits_in_first_byte); + ba->data[byte_index] &= mask; + + size_t bits_remain = len - bits_in_first_byte; + while (bits_remain >= 8) + { + ba->data[++byte_index] = 0; + bits_remain -= 8; + } + + // handle last several bits + if (bits_remain > 0) + { + mask = clear_mask(0, bits_remain); + ba->data[++byte_index] &= mask; + } + + return; +} + +// Clear all the bits +void bit_array_reset_all(bit_array* ba) +{ + memset(ba->data, 0, ba->n_bytes); +} + +// Reset the bit_array 'ba' to track the new 'data', which has 'num' of bits. +void bit_array_reattach(bit_array* ba, size_t num_of_bits, uint8_t* data) +{ + if (ba->data) + { + efree(ba->data); + } + + size_t n_bytes = NUM_OF_BYTES(num_of_bits); + ba->n_bytes = n_bytes; + ba->n_bits = num_of_bits; + ba->data = data; +} + +// Split the bit array at 'pos' +int bit_array_split( + bit_array* ba, + size_t pos, + bit_array** new_lower, + bit_array** new_higher) +{ + // not actually a split + if (pos == 0) + { + *new_lower = NULL; + *new_higher = ba; + return 0; + } + + // not actually a split + if (pos >= ba->n_bits) + { + *new_lower = ba; + *new_higher = NULL; + return 0; + } + + size_t byte_index = pos / 8; + uint8_t bit_index = pos % 8; + + size_t l_bits = (byte_index << 3) + bit_index; + size_t l_bytes = NUM_OF_BYTES(l_bits); + size_t r_bits = ba->n_bits - l_bits; + + // new data for bit_array of lower pages + uint8_t* data = (uint8_t*)emalloc(l_bytes); + if (!data) + return ENOMEM; + size_t i; + for (i = 0; i < byte_index; ++i) + { + data[i] = ba->data[i]; + } + + if (bit_index > 0) + { + uint8_t tmp = ba->data[i] & (uint8_t)((1 << bit_index) - 1); + data[i] = tmp; + } + + // new bit_array for higher pages + bit_array* ba2 = bit_array_new(r_bits); + if (!ba2) + { + efree(data); + return ENOMEM; + } + + size_t bits_remain = r_bits; + size_t curr_byte = byte_index; + size_t dst_byte = 0; + uint8_t u1 = 0, u2 = 0; + + while (bits_remain >= 8) + { + u1 = (uint8_t)(ba->data[curr_byte++] >> bit_index); + u2 = (uint8_t)(ba->data[curr_byte] << (8 - bit_index)); + ba2->data[dst_byte++] = u1 | u2; + bits_remain -= 8; + } + + if (bits_remain > (uint8_t)(8 - bit_index)) + { + u1 = (uint8_t)(ba->data[curr_byte++] >> bit_index); + u2 = (uint8_t)(ba->data[curr_byte] << (8 - bit_index)); + ba2->data[dst_byte] = u1 | u2; + ; + } + else if (bits_remain > 0) + { + u1 = (uint8_t)(ba->data[curr_byte] >> bit_index); + ba2->data[dst_byte] = u1; + } + + bit_array_reattach(ba, l_bits, data); + + *new_lower = ba; + *new_higher = ba2; + return 0; +} diff --git a/enclave/core/sgx/edmm/ema.c b/enclave/core/sgx/edmm/ema.c new file mode 100644 index 000000000..09443fc9c --- /dev/null +++ b/enclave/core/sgx/edmm/ema.c @@ -0,0 +1,1397 @@ +/* + * Copyright (C) 2022 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#include +#include +#include +#include +#include +#include + +#include "bit_array.h" +#include "ema_imp.h" +#include "emalloc.h" +#include "helloworld_t.h" +#include "sgx_mm.h" +#include "sgx_mm_primitives.h" +#include "sgx_mm_rt_abstraction.h" + +/* State flags */ +#define SGX_EMA_STATE_PENDING 0x8UL +#define SGX_EMA_STATE_MODIFIED 0x10UL +#define SGX_EMA_STATE_PR 0x20UL +#define UNUSED(x) ((void)(x)) + +struct ema_root_ +{ + ema_t* guard; +}; + +extern size_t mm_user_base; +extern size_t mm_user_end; + +static bool is_within_user_range(size_t start, size_t size) +{ + if (start + size < start) + return false; + return start >= mm_user_base && start + size <= mm_user_end; +} + +static bool is_within_rts_range(size_t start, size_t size) +{ + if (start + size < start) + return false; + return start >= mm_user_end || start + size <= mm_user_base; +} + +ema_t rts_ema_guard = {.next = &rts_ema_guard, .prev = &rts_ema_guard}; +ema_root_t g_rts_ema_root = {.guard = &rts_ema_guard}; + +ema_t user_ema_guard = {.next = &user_ema_guard, .prev = &user_ema_guard}; +ema_root_t g_user_ema_root = {.guard = &user_ema_guard}; + +#ifdef TEST +static void dump_ema_node(ema_t* node, size_t index) +{ + printf("------ node #%lu ------\n", index); + printf("start:\t0x%lX\n", node->start_addr); + printf("size:\t0x%lX\n", node->size); +} + +void dump_ema_root(ema_root_t* root) +{ + ema_t* node = root->guard->next; + size_t index = 0; + + while (node != root->guard) + { + dump_ema_node(node, index++); + node = node->next; + } +} + +#endif +void destroy_ema_root(ema_root_t* root) +{ + ema_t* node = root->guard->next; + size_t index = 0; + + while (node != root->guard) + { + index++; + ema_t* next = node->next; + ema_destroy(node); + node = next; + } +#if 0 + printf("Destroy %lu nodes on the root\n", index); +#endif +} + +#ifdef TEST +size_t ema_base(ema_t* node) +{ + return node->start_addr; +} + +size_t ema_size(ema_t* node) +{ + return node->size; +} +#endif +#ifndef NDEBUG +ema_t* ema_next(ema_t* node) +{ + return node->next; +} +#endif + +uint32_t get_ema_alloc_flags(ema_t* node) +{ + return node->alloc_flags; +} + +uint64_t get_ema_si_flags(ema_t* node) +{ + return node->si_flags; +} + +sgx_enclave_fault_handler_t ema_fault_handler(ema_t* node, void** private_data) +{ + if (private_data) + *private_data = node->priv; + return node->handler; +} + +static void ema_clone(ema_t* dst, ema_t* src) +{ + memcpy((void*)dst, (void*)src, sizeof(ema_t)); +} + +static bool ema_lower_than_addr(ema_t* ema, size_t addr) +{ + return ((ema->start_addr + ema->size) <= addr); +} + +static bool ema_higher_than_addr(ema_t* ema, size_t addr) +{ + return (ema->start_addr >= addr); +} + +static bool ema_overlap_addr(const ema_t* ema, size_t addr) +{ + if ((addr >= ema->start_addr) && (addr < ema->start_addr + ema->size)) + return true; + return false; +} + +int ema_set_eaccept_full(ema_t* node) +{ + if (!node->eaccept_map) + { + node->eaccept_map = bit_array_new_set((node->size) >> SGX_PAGE_SHIFT); + if (!node->eaccept_map) + return ENOMEM; + else + return 0; + } + else + bit_array_set_all(node->eaccept_map); + return 0; +} + +int ema_clear_eaccept_full(ema_t* node) +{ + if (!node->eaccept_map) + { + node->eaccept_map = bit_array_new_reset((node->size) >> SGX_PAGE_SHIFT); + if (!node->eaccept_map) + return ENOMEM; + else + return 0; + } + else + bit_array_reset_all(node->eaccept_map); + return 0; +} + +int ema_set_eaccept(ema_t* node, size_t start, size_t end) +{ + if (!node) + { + return EINVAL; + } + + assert(start >= node->start_addr); + assert(end <= node->start_addr + node->size); + size_t pos_begin = (start - node->start_addr) >> SGX_PAGE_SHIFT; + size_t pos_end = (end - node->start_addr) >> SGX_PAGE_SHIFT; + + // update eaccept bit map + if (!node->eaccept_map) + { + node->eaccept_map = bit_array_new_reset((node->size) >> SGX_PAGE_SHIFT); + if (!node->eaccept_map) + return ENOMEM; + } + bit_array_set_range(node->eaccept_map, pos_begin, pos_end - pos_begin); + return 0; +} + +bool ema_page_committed(ema_t* ema, size_t addr) +{ + assert(!(addr % SGX_PAGE_SIZE)); + if (!ema->eaccept_map) + { + return false; + } + + return bit_array_test( + ema->eaccept_map, (addr - ema->start_addr) >> SGX_PAGE_SHIFT); +} + +// search for a node whose address range contains 'addr' +ema_t* search_ema(ema_root_t* root, size_t addr) +{ + for (ema_t* node = root->guard->next; node != root->guard; + node = node->next) + { + if (ema_overlap_addr(node, addr)) + { + return node; + } + } + return NULL; +} + +// insert 'new_node' before 'node' +ema_t* insert_ema(ema_t* new_node, ema_t* node) +{ + new_node->prev = node->prev; + new_node->next = node; + node->prev->next = new_node; + node->prev = new_node; + return new_node; +} + +static void replace_ema(ema_t* new_node, ema_t* old_node) +{ + old_node->prev->next = new_node; + old_node->next->prev = new_node; + new_node->next = old_node->next; + new_node->prev = old_node->prev; +} + +// Remove the 'node' from the list +static ema_t* remove_ema(ema_t* node) +{ + if (!node) + return node; + + // Sanity check pointers for corruption + if ((node->prev->next != node) || (node->next->prev != node)) + { + abort(); + } + + node->prev->next = node->next; + node->next->prev = node->prev; + return node; +} + +void push_back_ema(ema_root_t* root, ema_t* node) +{ + insert_ema(node, root->guard); +} + +// search for a range of nodes containing addresses within [start, end) +// 'ema_begin' will hold the fist ema that has address higher than /euqal to +// 'start' 'ema_end' will hold the node immediately follow the last ema that has +// address lower than / equal to 'end' +int search_ema_range( + ema_root_t* root, + size_t start, + size_t end, + ema_t** ema_begin, + ema_t** ema_end) +{ + ema_t* node = root->guard->next; + + // find the first node that has addr >= 'start' + while ((node != root->guard) && ema_lower_than_addr(node, start)) + { + node = node->next; + } + + // empty list or all nodes are beyond [start, end) + if ((node == root->guard) || ema_higher_than_addr(node, end)) + { + *ema_begin = NULL; + *ema_end = NULL; + return -1; + } + + *ema_begin = node; + + // find the last node that has addr <= 'end' + while ((node != root->guard) && (!ema_higher_than_addr(node, end))) + { + node = node->next; + } + *ema_end = node; + + return 0; +} + +// We just split and emalloc_free will merge unused and reuse blocks +int ema_split(ema_t* ema, size_t addr, bool new_lower, ema_t** ret_node) +{ + // this is only needed for UT + // in real usage in the file, addr always overlap +#ifdef TEST + if (!ema_overlap_addr(ema, addr) || !ret_node) + { + return EINVAL; + } +#else + assert(ema_overlap_addr(ema, addr)); + assert(ret_node); +#endif + + ema_t* new_node = (ema_t*)emalloc(sizeof(ema_t)); + if (!new_node) + { + return ENOMEM; + } + + bit_array *low = NULL, *high = NULL; + if (ema->eaccept_map) + { + size_t pos = (addr - ema->start_addr) >> SGX_PAGE_SHIFT; + int ret = bit_array_split(ema->eaccept_map, pos, &low, &high); + if (ret) + { + efree(new_node); + return ret; + } + } + + // caller does not need free new_node as it is inserted + // and managed in root when this returns + ema_clone(new_node, ema); + + ema_t *lo_ema = NULL, *hi_ema = NULL; + if (new_lower) + { + // new node for lower address + lo_ema = new_node; + hi_ema = ema; + insert_ema(new_node, ema); + } + else + { + lo_ema = ema; + hi_ema = new_node; + insert_ema(new_node, ema->next); + } + + size_t start = ema->start_addr; + size_t size = ema->size; + + lo_ema->start_addr = start; + lo_ema->size = addr - start; + hi_ema->start_addr = addr; + hi_ema->size = size - lo_ema->size; + + if (ema->eaccept_map) + { + lo_ema->eaccept_map = low; + hi_ema->eaccept_map = high; + } + *ret_node = new_node; + return 0; +} + +int ema_split_ex(ema_t* ema, size_t start, size_t end, ema_t** new_node) +{ + ema_t* node = ema; + ema_t* tmp_node; + if (start > node->start_addr) + { + int ret = ema_split(node, start, false, &tmp_node); + if (ret) + return ret; + if (tmp_node) + node = tmp_node; + } + tmp_node = NULL; + if (end < (node->start_addr + node->size)) + { + int ret = ema_split(node, end, true, &tmp_node); + if (ret) + return ret; + if (tmp_node) + node = tmp_node; + } + *new_node = node; + return 0; +} + +static size_t ema_aligned_end(ema_t* ema, size_t align) +{ + size_t curr_end = ema->start_addr + ema->size; + curr_end = ROUND_TO(curr_end, align); + return curr_end; +} + +// Find a free space of size at least 'size' bytes on the given root, does not +// matter where the start is +bool find_free_region( + ema_root_t* root, + size_t size, + uint64_t align, + size_t* addr, + ema_t** next_ema) +{ + bool is_rts = (root == &g_rts_ema_root); + ema_t* ema_begin = root->guard->next; + ema_t* ema_end = root->guard; + + *next_ema = NULL; + *addr = 0; + printf("find free region\n"); + + // no ema node on the root + if (ema_begin == ema_end) + { + printf("%p, %p, %d\n", ema_begin, ema_end, is_rts); + size_t tmp = 0; + if (is_rts) + { + bool found = false; + if (mm_user_base >= size) + { + tmp = TRIM_TO(mm_user_base - size, align); + found = sgx_mm_is_within_enclave((void*)tmp, size); + } + if (!found) + { + tmp = ROUND_TO(mm_user_end, align); + found = tmp + size >= tmp && // No integer overflow + sgx_mm_is_within_enclave((void*)tmp, size); + } + if (!found) + return false; + assert(is_within_rts_range(tmp, size)); + + *addr = tmp; + *next_ema = ema_end; + return true; + } + else + { + tmp = ROUND_TO(mm_user_base, align); + printf("tmp: %p, %x\n", tmp, size); + if (is_within_user_range(tmp, size)) + { + *addr = tmp; + *next_ema = ema_end; + return true; + } + return true; + } + printf("%p, %p\n", ema_begin, ema_end); + return false; + } + + printf("before ema nodes\n"); + + // iterate over the ema nodes + ema_t* curr = ema_begin; + ema_t* next = curr->next; + + while (next != ema_end) + { + printf("next: %p\n", curr->start_addr); + size_t curr_end = ema_aligned_end(curr, align); + if (curr_end <= next->start_addr) + { + size_t free_size = next->start_addr - curr_end; + if (free_size >= size) + { + if (!is_rts || is_within_rts_range(curr_end, size)) + { + *next_ema = next; + *addr = curr_end; + return true; + } + } + } + curr = next; + next = curr->next; + } + + // check the region higher than last ema node + size_t tmp = ema_aligned_end(curr, align); + printf("after ema node iteration: %p\n", tmp); + if (sgx_mm_is_within_enclave((void*)tmp, size)) + { + if ((is_rts && is_within_rts_range(tmp, size)) || + (!is_rts && is_within_user_range(tmp, size))) + { + *next_ema = next; + *addr = tmp; + return true; + } + } + + // check the region lower than the first ema node + if (ema_begin->start_addr < size) + return false; + + tmp = TRIM_TO(ema_begin->start_addr - size, align); + if (!is_rts) + { + if (is_within_user_range(tmp, size)) + { + *addr = tmp; + *next_ema = ema_begin; + return true; + } + } + else if (sgx_mm_is_within_enclave((void*)tmp, size)) + { + if (is_within_rts_range(tmp, size)) + { + *addr = tmp; + *next_ema = ema_begin; + return true; + } + } + printf("did it reach here?\n"); + + return false; +} + +bool find_free_region_at( + ema_root_t* root, + size_t addr, + size_t size, + ema_t** next_ema) +{ + if (!sgx_mm_is_within_enclave((void*)(addr), size)) + { + *next_ema = NULL; + return false; + } + bool is_rts = (root == &g_rts_ema_root); + if ((is_rts && !is_within_rts_range(addr, size)) || + (!is_rts && !is_within_user_range(addr, size))) + { + *next_ema = NULL; + return false; + } + + ema_t* node = root->guard->next; + while (node != root->guard) + { + if (node->start_addr >= (addr + size)) + { + *next_ema = node; + return true; + } + if (addr >= (node->start_addr + node->size)) + { + node = node->next; + } + else + { + break; + } + } + if (node == root->guard) + { + *next_ema = node; + return true; + } + + *next_ema = NULL; + return false; +} + +ema_t* ema_new( + size_t addr, + size_t size, + uint32_t alloc_flags, + uint64_t si_flags, + sgx_enclave_fault_handler_t handler, + void* private_data, + ema_t* next_ema) +{ + // allocate a temp on stack, which is already allocated, i.e., + // stack expansion won't create new nodes recursively. + ema_t tmp = { + .start_addr = addr, + .size = size, + .alloc_flags = alloc_flags, + .si_flags = si_flags, + .eaccept_map = NULL, + .handler = handler, + .priv = private_data, + .next = NULL, + .prev = NULL, + }; + + // ensure region [start, start+size) is in the list so emalloc won't use it. + insert_ema(&tmp, next_ema); + ema_t* node = (ema_t*)emalloc(sizeof(ema_t)); + if (node) + { + *node = tmp; + replace_ema(node, &tmp); + return node; + } + else + { + remove_ema(&tmp); + return NULL; + } +} + +void ema_destroy(ema_t* ema) +{ + remove_ema(ema); + if (ema->eaccept_map) + { + bit_array_delete(ema->eaccept_map); + } + efree(ema); +} + +static int eaccept_range_forward(const sec_info_t* si, size_t start, size_t end) +{ + while (start < end) + { + if (do_eaccept(si, start)) + abort(); + start += SGX_PAGE_SIZE; + } + return 0; +} + +static int eaccept_range_backward( + const sec_info_t* si, + size_t start, + size_t end) +{ + assert(start < end); + do + { + end -= SGX_PAGE_SIZE; + if (do_eaccept(si, end)) + abort(); + } while (end > start); + return 0; +} + +int do_commit(size_t start, size_t size, uint64_t si_flags, bool grow_up) +{ + sec_info_t si SGX_SECINFO_ALIGN = {si_flags | SGX_EMA_STATE_PENDING, 0}; + int ret = -1; + + if (grow_up) + { + ret = eaccept_range_backward(&si, start, start + size); + } + else + { + ret = eaccept_range_forward(&si, start, start + size); + } + + return ret; +} + +int ema_do_commit(ema_t* node, size_t start, size_t end) +{ + // Only RESERVE region has no bit map allocated. + assert(node->eaccept_map); + size_t real_start = MAX(start, node->start_addr); + size_t real_end = MIN(end, node->start_addr + node->size); + + sec_info_t si SGX_SECINFO_ALIGN = { + SGX_EMA_PAGE_TYPE_REG | SGX_EMA_PROT_READ_WRITE | SGX_EMA_STATE_PENDING, + 0}; + + for (size_t addr = real_start; addr < real_end; addr += SGX_PAGE_SIZE) + { + size_t pos = (addr - node->start_addr) >> SGX_PAGE_SHIFT; + // only commit for uncommitted page + if (!bit_array_test(node->eaccept_map, pos)) + { + int ret = do_eaccept(&si, addr); + if (ret != 0) + { + return ret; + } + bit_array_set(node->eaccept_map, pos); + } + } + + return 0; +} + +static int ema_can_commit(ema_t* first, ema_t* last, size_t start, size_t end) +{ + ema_t* curr = first; + size_t prev_end = first->start_addr; + while (curr != last) + { + if (prev_end != curr->start_addr) // there is a gap + return EINVAL; + + if (!(curr->si_flags & (SGX_EMA_PROT_WRITE))) + return EACCES; + + if (!(curr->si_flags & (SGX_EMA_PAGE_TYPE_REG))) + return EACCES; + + if ((curr->alloc_flags & (SGX_EMA_RESERVE))) + return EACCES; + + prev_end = curr->start_addr + curr->size; + curr = curr->next; + } + if (prev_end < end) + return EINVAL; + return 0; +} + +int ema_do_commit_loop(ema_t* first, ema_t* last, size_t start, size_t end) +{ + int ret = ema_can_commit(first, last, start, end); + if (ret) + return ret; + + ema_t *curr = first, *next = NULL; + + while (curr != last) + { + next = curr->next; + ret = ema_do_commit(curr, start, end); + if (ret != 0) + { + return ret; + } + curr = next; + } + return ret; +} + +static int ema_do_uncommit_real( + ema_t* node, + size_t real_start, + size_t real_end, + int prot) +{ + int type = node->si_flags & SGX_EMA_PAGE_TYPE_MASK; + uint32_t alloc_flags = node->alloc_flags & SGX_EMA_ALLOC_FLAGS_MASK; + + // ignore if ema is in reserved state + if (alloc_flags & SGX_EMA_RESERVE) + { + return 0; + } + + // Only RESERVE region has no bit map allocated. + assert(node->eaccept_map); + + sec_info_t si SGX_SECINFO_ALIGN = { + SGX_EMA_PAGE_TYPE_TRIM | SGX_EMA_STATE_MODIFIED, 0}; + + while (real_start < real_end) + { + size_t block_start = real_start; + while (block_start < real_end) + { + size_t pos = (block_start - node->start_addr) >> SGX_PAGE_SHIFT; + if (bit_array_test(node->eaccept_map, pos)) + { + break; + } + else + { + block_start += SGX_PAGE_SIZE; + } + } + if (block_start == real_end) + break; + + size_t block_end = block_start + SGX_PAGE_SIZE; + while (block_end < real_end) + { + size_t pos = (block_end - node->start_addr) >> SGX_PAGE_SHIFT; + if (bit_array_test(node->eaccept_map, pos)) + { + block_end += SGX_PAGE_SIZE; + } + else + break; + } + assert(block_end > block_start); + // only for committed page + size_t block_length = block_end - block_start; + int ret = sgx_mm_modify_ocall( + block_start, + block_length, + prot | type, + prot | SGX_EMA_PAGE_TYPE_TRIM); + if (ret != 0) + { + return EFAULT; + } + + ret = eaccept_range_forward(&si, block_start, block_end); + if (ret != 0) + { + return ret; + } + bit_array_reset_range( + node->eaccept_map, + (block_start - node->start_addr) >> SGX_PAGE_SHIFT, + block_length >> SGX_PAGE_SHIFT); + // eaccept trim notify + ret = sgx_mm_modify_ocall( + block_start, + block_length, + prot | SGX_EMA_PAGE_TYPE_TRIM, + prot | SGX_EMA_PAGE_TYPE_TRIM); + if (ret) + return EFAULT; + + real_start = block_end; + } + return 0; +} + +int ema_do_uncommit(ema_t* node, size_t start, size_t end) +{ + size_t real_start = MAX(start, node->start_addr); + size_t real_end = MIN(end, node->start_addr + node->size); + int prot = node->si_flags & SGX_EMA_PROT_MASK; + if (prot == SGX_EMA_PROT_NONE) // need READ for trimming + ema_modify_permissions(node, start, end, SGX_EMA_PROT_READ); + return ema_do_uncommit_real(node, real_start, real_end, prot); +} +static int ema_can_uncommit(ema_t* first, ema_t* last, size_t start, size_t end) +{ + ema_t* curr = first; + size_t prev_end = first->start_addr; + while (curr != last) + { + if (prev_end != curr->start_addr) // there is a gap + return EINVAL; + + if ((curr->alloc_flags & (SGX_EMA_RESERVE))) + return EACCES; + + prev_end = curr->start_addr + curr->size; + curr = curr->next; + } + if (prev_end < end) + return EINVAL; + return 0; +} + +int ema_do_uncommit_loop(ema_t* first, ema_t* last, size_t start, size_t end) +{ + int ret = ema_can_uncommit(first, last, start, end); + if (ret) + return ret; + + ema_t *curr = first, *next = NULL; + while (curr != last) + { + next = curr->next; + ret = ema_do_uncommit(curr, start, end); + if (ret != 0) + { + return ret; + } + curr = next; + } + return ret; +} + +int ema_do_dealloc(ema_t* node, size_t start, size_t end) +{ + int alloc_flag = node->alloc_flags & SGX_EMA_ALLOC_FLAGS_MASK; + size_t real_start = MAX(start, node->start_addr); + size_t real_end = MIN(end, node->start_addr + node->size); + int prot = node->si_flags & SGX_EMA_PROT_MASK; + ema_t* tmp_node = NULL; + int ret = EFAULT; + + if (alloc_flag & SGX_EMA_RESERVE) + { + goto split_and_destroy; + } + + // Only RESERVE region has no bit map allocated. + assert(node->eaccept_map); + if (prot == SGX_EMA_PROT_NONE) // need READ for trimming + ema_modify_permissions(node, start, end, SGX_EMA_PROT_READ); + // clear protections flag for dealloc + ret = ema_do_uncommit_real(node, real_start, real_end, SGX_EMA_PROT_NONE); + if (ret != 0) + return ret; + +split_and_destroy: + // potential ema split + if (real_start > node->start_addr) + { + ret = ema_split(node, real_start, false, &tmp_node); + if (ret) + return ret; + assert(tmp_node); + node = tmp_node; + } + + tmp_node = NULL; + if (real_end < (node->start_addr + node->size)) + { + ret = ema_split(node, real_end, true, &tmp_node); + if (ret) + return ret; + assert(tmp_node); + node = tmp_node; + } + + ema_destroy(node); + return 0; +} + +int ema_do_dealloc_loop(ema_t* first, ema_t* last, size_t start, size_t end) +{ + int ret = 0; + ema_t *curr = first, *next = NULL; + + while (curr != last) + { + next = curr->next; + ret = ema_do_dealloc(curr, start, end); + if (ret != 0) + { + return ret; + } + curr = next; + } + return ret; +} + +// change the type of the page to TCS +int ema_change_to_tcs(ema_t* node, size_t addr) +{ + int prot = node->si_flags & SGX_EMA_PROT_MASK; + int type = node->si_flags & SGX_EMA_PAGE_TYPE_MASK; + + // page need to be already committed + if (!ema_page_committed(node, addr)) + { + return EACCES; + } + + if (type == SGX_EMA_PAGE_TYPE_TCS) + { + return 0; // already committed to TCS type + } + + if (prot != SGX_EMA_PROT_READ_WRITE) + return EACCES; + if (type != SGX_EMA_PAGE_TYPE_REG) + return EACCES; + + int ret = sgx_mm_modify_ocall( + addr, SGX_PAGE_SIZE, prot | type, prot | SGX_EMA_PAGE_TYPE_TCS); + if (ret != 0) + { + return EFAULT; + } + printf("[ema_change_to_tcs] before\n"); + + sec_info_t si SGX_SECINFO_ALIGN = { + SGX_EMA_PAGE_TYPE_TCS | SGX_EMA_STATE_MODIFIED, 0}; + if (do_eaccept(&si, addr) != 0) + { + printf("[ema_change_to_tcs] abort\n"); + abort(); + } + printf("[ema_change_to_tcs] before\n"); + + // operation succeeded, update ema node: state update, split + ema_t* tcs = NULL; + ret = ema_split_ex(node, addr, addr + SGX_PAGE_SIZE, &tcs); + printf("[ema_change_to_tcs] before\n"); + if (ret) + return ret; + assert(tcs); // ema_split_ex should not return NULL if node!=NULL + + tcs->si_flags = (tcs->si_flags & (uint64_t)(~SGX_EMA_PAGE_TYPE_MASK) & + (uint64_t)(~SGX_EMA_PROT_MASK)) | + SGX_EMA_PAGE_TYPE_TCS | SGX_EMA_PROT_NONE; + return ret; +} + +int ema_modify_permissions(ema_t* node, size_t start, size_t end, int new_prot) +{ + int prot = node->si_flags & SGX_EMA_PROT_MASK; + int type = node->si_flags & SGX_EMA_PAGE_TYPE_MASK; + if (prot == new_prot) + return 0; + + size_t real_start = MAX(start, node->start_addr); + size_t real_end = MIN(end, node->start_addr + node->size); + + int ret = sgx_mm_modify_ocall( + real_start, real_end - real_start, prot | type, new_prot | type); + if (ret != 0) + { + return EFAULT; + } + + sec_info_t si SGX_SECINFO_ALIGN = { + (uint64_t)new_prot | SGX_EMA_PAGE_TYPE_REG | SGX_EMA_STATE_PR, 0}; + + for (size_t page = real_start; page < real_end; page += SGX_PAGE_SIZE) + { + if ((new_prot | prot) != prot) + do_emodpe(&si, page); + + // new permission is RWX, no EMODPR needed in untrusted part, hence no + // EACCEPT + if ((new_prot & (SGX_EMA_PROT_WRITE | SGX_EMA_PROT_EXEC)) != + (SGX_EMA_PROT_WRITE | SGX_EMA_PROT_EXEC)) + { + ret = do_eaccept(&si, page); + if (ret) + return ret; + } + } + + // all involved pages complete permission change, deal with potential + // ema node split and update permission state + if (real_start > node->start_addr) + { + ema_t* tmp_node = NULL; + ret = ema_split(node, real_start, false, &tmp_node); + if (ret) + return ret; + assert(tmp_node); + node = tmp_node; + } + + if (real_end < (node->start_addr + node->size)) + { + ema_t* tmp_node = NULL; + ret = ema_split(node, real_end, true, &tmp_node); + if (ret) + return ret; + assert(tmp_node); + node = tmp_node; + } + + // 'node' is the ema node to update permission for + node->si_flags = + (node->si_flags & (uint64_t)(~SGX_EMA_PROT_MASK)) | (uint64_t)new_prot; + if (new_prot == SGX_EMA_PROT_NONE) + { // do mprotect if target is PROT_NONE + ret = sgx_mm_modify_ocall( + real_start, + real_end - real_start, + type | SGX_EMA_PROT_NONE, + type | SGX_EMA_PROT_NONE); + if (ret) + ret = EFAULT; + } + return ret; +} + +static int ema_can_modify_permissions( + ema_t* first, + ema_t* last, + size_t start, + size_t end) +{ + ema_t* curr = first; + size_t prev_end = first->start_addr; + while (curr != last) + { + if (prev_end != curr->start_addr) // there is a gap + return EINVAL; + + if (!(curr->si_flags & (SGX_EMA_PAGE_TYPE_REG))) + return EACCES; + + if ((curr->alloc_flags & (SGX_EMA_RESERVE))) + return EACCES; + + size_t real_start = MAX(start, curr->start_addr); + size_t real_end = MIN(end, curr->start_addr + curr->size); + + size_t pos_begin = (real_start - curr->start_addr) >> SGX_PAGE_SHIFT; + size_t pos_end = (real_end - curr->start_addr) >> SGX_PAGE_SHIFT; + if (!curr->eaccept_map || + !bit_array_test_range( + curr->eaccept_map, pos_begin, pos_end - pos_begin)) + { + return EINVAL; + } + + prev_end = curr->start_addr + curr->size; + curr = curr->next; + } + if (prev_end < end) + return EINVAL; + return 0; +} + +static int ema_modify_permissions_loop_nocheck( + ema_t* first, + ema_t* last, + size_t start, + size_t end, + int prot) +{ + int ret = 0; + ema_t *curr = first, *next = NULL; + while (curr != last) + { + next = curr->next; + ret = ema_modify_permissions(curr, start, end, prot); + if (ret != 0) + { + return ret; + } + curr = next; + } + return ret; +} + +int ema_modify_permissions_loop( + ema_t* first, + ema_t* last, + size_t start, + size_t end, + int prot) +{ + int ret = ema_can_modify_permissions(first, last, start, end); + if (ret) + return ret; + + return ema_modify_permissions_loop_nocheck(first, last, start, end, prot); +} + +static int ema_can_commit_data( + ema_t* first, + ema_t* last, + size_t start, + size_t end) +{ + ema_t* curr = first; + size_t prev_end = first->start_addr; + while (curr != last) + { + if (prev_end != curr->start_addr) // there is a gap + return EINVAL; + + if (!(curr->si_flags & (SGX_EMA_PROT_WRITE))) + return EACCES; + + if (!(curr->si_flags & (SGX_EMA_PAGE_TYPE_REG))) + return EACCES; + + if ((curr->alloc_flags & (SGX_EMA_RESERVE))) + return EACCES; + + if (!(curr->alloc_flags & (SGX_EMA_COMMIT_ON_DEMAND))) + return EINVAL; + + if (curr->eaccept_map) + { + size_t real_start = MAX(start, curr->start_addr); + size_t real_end = MIN(end, curr->start_addr + curr->size); + size_t pos_begin = + (real_start - curr->start_addr) >> SGX_PAGE_SHIFT; + size_t pos_end = (real_end - curr->start_addr) >> SGX_PAGE_SHIFT; + + if (bit_array_test_range_any( + curr->eaccept_map, pos_begin, pos_end - pos_begin)) + return EACCES; + } + prev_end = curr->start_addr + curr->size; + curr = curr->next; + } + if (prev_end < end) + return EINVAL; + return 0; +} + +int ema_do_commit_data( + ema_t* node, + size_t start, + size_t end, + uint8_t* data, + int prot) +{ + size_t addr = start; + size_t src = (size_t)data; + sec_info_t si SGX_SECINFO_ALIGN = { + (uint64_t)prot | SGX_EMA_PAGE_TYPE_REG, 0}; + + while (addr < end) + { + int ret = do_eacceptcopy(&si, addr, src); + if (ret != 0) + { + return EFAULT; + } + addr += SGX_PAGE_SIZE; + src += SGX_PAGE_SIZE; + } + return ema_set_eaccept(node, start, end); +} + +int ema_do_commit_data_loop( + ema_t* first, + ema_t* last, + size_t start, + size_t end, + uint8_t* data, + int prot) +{ + int ret = 0; + ret = ema_can_commit_data(first, last, start, end); + if (ret) + return ret; + + ema_t* curr = first; + while (curr != last) + { // there is no split in this loop + size_t real_start = MAX(start, curr->start_addr); + size_t real_end = MIN(end, curr->start_addr + curr->size); + uint8_t* real_data = data + real_start - start; + ret = ema_do_commit_data(curr, real_start, real_end, real_data, prot); + if (ret != 0) + { + return ret; + } + curr = curr->next; + } + + ret = ema_modify_permissions_loop_nocheck(first, last, start, end, prot); + return ret; +} + +ema_t* ema_realloc_from_reserve_range( + ema_t* first, + ema_t* last, + size_t start, + size_t end, + uint32_t alloc_flags, + uint64_t si_flags, + sgx_enclave_fault_handler_t handler, + void* private_data) +{ + assert(first != NULL); + assert(last != NULL); + ema_t* curr = first; + assert(first->start_addr < end); + assert(last->prev->start_addr + last->prev->size > start); + // fail on any nodes not reserve or any gaps + size_t prev_end = first->start_addr; + while (curr != last) + { + // do not touch internal reserve. + if (!can_erealloc(curr)) + return NULL; + if (prev_end != curr->start_addr) // there is a gap + return NULL; + if (curr->alloc_flags & SGX_EMA_RESERVE) + { + prev_end = curr->start_addr + curr->size; + curr = curr->next; + } + else + return NULL; + } + + int ret = 0; + // Splitting nodes may add more emalloc reserve nodes. + // Those can be appended and move the "guard" which + // could be the last node + // We track the the last inclusive node. + ema_t* last_inclusive = last->prev; + if (start > first->start_addr) + { + ema_t* ofirst = first; + ret = ema_split(first, start, false, &first); + if (ret) + return NULL; + // old first was split, we need update last_inclusive + // if the old first was also the last_inclusive + if (ofirst == last_inclusive) + last_inclusive = first; + } + + if (end < last_inclusive->start_addr + last_inclusive->size) + { + ret = ema_split(last_inclusive, end, false, &last); + if (ret) + return NULL; + } + else + last = last_inclusive->next; + + assert(first->alloc_flags & SGX_EMA_RESERVE); + assert(!first->eaccept_map); + + curr = first; + while (curr != last) + { + ema_t* next = curr->next; + ema_destroy(curr); + curr = next; + } + + ema_t* new_node = ema_new( + start, end - start, alloc_flags, si_flags, handler, private_data, last); + return new_node; +} + +int ema_do_alloc(ema_t* node) +{ + uint32_t alloc_flags = node->alloc_flags; + if (alloc_flags & SGX_EMA_RESERVE) + { + printf("early return\n"); + return 0; + } + + size_t tmp_addr = node->start_addr; + size_t size = node->size; + + int ret = sgx_mm_alloc_ocall( + tmp_addr, + size, + (int)(node->si_flags & SGX_EMA_PAGE_TYPE_MASK), + (int)alloc_flags); + if (ret) + { + ret = EFAULT; + return ret; + } + + if (alloc_flags & SGX_EMA_COMMIT_NOW) + { + int grow_up = (alloc_flags & SGX_EMA_GROWSDOWN) ? 0 : 1; + ret = do_commit(tmp_addr, size, node->si_flags, grow_up); + if (ret) + { + return ret; + } + } + + if (alloc_flags & SGX_EMA_COMMIT_NOW) + ret = ema_set_eaccept_full(node); + else + ret = ema_clear_eaccept_full(node); + + return ret; +} diff --git a/enclave/core/sgx/edmm/emalloc.c b/enclave/core/sgx/edmm/emalloc.c new file mode 100644 index 000000000..56859c309 --- /dev/null +++ b/enclave/core/sgx/edmm/emalloc.c @@ -0,0 +1,593 @@ +/* + * Copyright (C) 2022 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ +#include "emalloc.h" + +#include +#include +#include +#include + +#include "ema.h" // SGX_PAGE_SIZE +#include "sgx_mm.h" // sgx_mm_alloc +/* + * This file implements a Simple allocator for EMM internal memory + * It maintains a list of reserves, dynamically added on + * demand using sgx_mm_alloc recursively when reserve runs below + * a threshold. + */ + +/** + * Emalloc uses a two-level reserve scheme for allocation. + * The meta reserve statically defined here is only used to allocate memory + * for EMAs tracking the "reserve" areas used by emalloc. This is to ensure + * we always have memory for book keeping of the reserves. Memory for regular + * EMAs are allocated out of those reserves. + * + * How manny regular EMAs we can allocate with the following meta reserve size? + * + * A regular or reserve EMA takes fixed 112 bytes of allocation for the + * ema_t and bit_array structs, plus 16 bytes allocation for bit map itself + * if the EMA size is 64 pages or less. Note that the 8-byte emalloc header is + * included in above numbers. So each EMA needs 128 bytes for tracking a region + * of 64 pages or less. Larger EMAs needs additional memory allocated for the + * bit map only, and the smallest allocation increment allowed by emalloc is 8 + * bytes which can be used to track up to 64 pages in the bit map. So the + * overhead of EMAs incrememnts by 8 bytes for each 64-page increment of the + * size of the region tracked by the EMA. + * + * Each reserve EMA is also surrounded by guard page regions above and below. + * The total meta reserve consumption for each reserve EMA is calculated by: + * 3 * 128 + floor((pages tracked in reserve EMA - 1) / 64) * 8 + * Reserve EMA size starts at 16 pages and doubles each time a new reserve is + * added, capped at 2^28 (max_emalloc_size). Using a spreadsheet, we can + * calculate the maximum total reserve possible is 1.75GB with 16 pages of meta + * reserve for allocating EMAs tracking reserve areas. + * + * Number of regular EMAs can be calculated by: + * 1.75 * 2^30 / (128 + floor((pages tracked in EMA - 1) / 64) * 8). + * That is 14.7 million if each EMA covers 64 pages or less + * (128 bytes reserve per EMA), tracking up to 3.8 T space, or 13.8 million if + * all EMAs are of 65-128 pages (136 bytes reserve per EMA), tracking up to 7.2 + * T space, and so on. + * + */ +#define META_RESERVE_SIZE 0x10000ULL +static uint8_t meta_reserve[META_RESERVE_SIZE]; +static size_t meta_used; +/** + * initial reserve size + */ +#define initial_reserve_size 0x10000ULL + +// this is enough for bit map of an 8T EMA +static const size_t max_emalloc_size = 0x10000000ULL; + +/* Blocks of memory managed. + * The allocator put these fields at the front + * of the block when a memory block is freed + * minimal allocation size is 8 bytes + * 8 bytes of header is overhead + */ +typedef struct _block +{ + uint64_t header; // size | alloc_mask + union + { + char* payload[0]; + struct _block* next_prev[2]; /* used only when this block is free + * next_prev[0] points to next free + * block, next_prev[1] points to prev + * free block if this one is 16 bytes+ + */ + }; +} block_t; + +#define num_exact_list 0x100 +size_t header_size = sizeof(uint64_t); +#define exact_match_increment 0x8 +#define min_block_size 0x10 // include 8-byte header +static const size_t max_exact_size = + min_block_size + exact_match_increment * (num_exact_list - 1); +static block_t* exact_block_list[num_exact_list]; + +// the least significant bit in block header +// 1 == allocated/in-use, 0 == free +static const uint64_t alloc_mask = 1ULL; +// block size align to 8 bytes +uint64_t size_mask = ~((uint64_t)(exact_match_increment - 1)); +// We don't expect many large blocks +// Keep all the rest in one list +static block_t* large_block_list = NULL; + +block_t* payload_to_block(void* p) +{ + return (block_t*)(((size_t)p) - header_size); +} + +void* block_to_payload(block_t* b) +{ + return (void*)(b->payload); +} + +bool is_alloced(block_t* b) +{ + return alloc_mask & b->header; +} + +uint64_t block_size(block_t* b) +{ + return b->header & size_mask; +} + +size_t block_end(block_t* b) +{ + return (size_t)(b) + block_size(b); +} +#ifndef NDEBUG +size_t num_free_blocks = 0; +#endif +/* + * A reserve is a continuous block of + * memory committed for emalloc purpose. + */ +typedef struct _mm_reserve +{ + size_t base; + size_t size; + size_t used; + struct _mm_reserve* next; +} mm_reserve_t; + +static mm_reserve_t* reserve_list = NULL; + +static mm_reserve_t* find_used_in_reserve(size_t addr, size_t size) +{ + if (size == 0) + return NULL; + mm_reserve_t* r = reserve_list; + while (r) + { + if (addr >= r->base && addr + size <= r->base + r->used) + return r; + r = r->next; + } + return NULL; +} + +static size_t get_list_idx(size_t size) +{ + assert(size % exact_match_increment == 0); + if (size < min_block_size) + return 0; + size_t list = (size - min_block_size) / exact_match_increment; + assert(list < num_exact_list); + return list; +} + +static void remove_from_list(block_t* b, block_t** list_head) +{ + size_t bsize = block_size(b); + if (b == *list_head) + { + *list_head = b->next_prev[0]; + if ((*list_head) && bsize > min_block_size) + (*list_head)->next_prev[1] = NULL; + } + else + { + block_t* prev = NULL; + if (bsize > min_block_size) + prev = b->next_prev[1]; + block_t* next = b->next_prev[0]; + if (prev) + prev->next_prev[0] = next; + if (next) + next->next_prev[1] = prev; + } +} +static void remove_from_lists(block_t* b) +{ + size_t bsize = block_size(b); + if (bsize > max_exact_size) + remove_from_list(b, &large_block_list); + else + { + size_t l = get_list_idx(bsize); + remove_from_list(b, &exact_block_list[l]); + } +} + +static void prepend_to_list(block_t* b, block_t** head) +{ + b->next_prev[0] = *head; + if ((*head) && block_size(*head) > min_block_size) + { + (*head)->next_prev[1] = b; + } + *head = b; +} + +static void put_exact_block(block_t* b) +{ + size_t list = get_list_idx(block_size(b)); + prepend_to_list(b, &exact_block_list[list]); +#ifndef NDEBUG + num_free_blocks++; +#endif +} + +static block_t* neighbor_right(block_t* me) +{ + size_t end = block_end(me); + mm_reserve_t* r1 = find_used_in_reserve((size_t)me, end); + if (!r1) + return NULL; + if (end == r1->base + r1->used) + return NULL; + mm_reserve_t* r2 = find_used_in_reserve(end, block_size((block_t*)end)); + if (r1 != r2) + return NULL; + return (block_t*)end; +} + +// Merge with left neighbor is not implemented +// which requires scanning or footer +// Most blocks are of similar sizes, benefits of full merge implementation +// is probably not significant. +// Keep it simple for now. +static block_t* possibly_merge(block_t* b) +{ + block_t* nr = neighbor_right(b); + while (nr && is_alloced(nr)) + { + remove_from_lists(nr); + b->header += block_size(nr); +#ifndef NDEBUG + num_free_blocks--; +#endif + } + return b; +} + +static void put_free_block(block_t* e) +{ + if (block_size(e) <= (size_t)max_exact_size) + { + put_exact_block(e); + return; + } + prepend_to_list(e, &large_block_list); +#ifndef NDEBUG + num_free_blocks++; +#endif +} + +static block_t* split_free_block(block_t* b, size_t s) +{ + size_t remain = b->header - s; + assert(remain >= (size_t)min_block_size); + b->header = s; + block_t* new_b = (block_t*)((uint8_t*)b + s); + new_b->header = remain; + return new_b; +} + +static block_t* get_exact_match(size_t bsize) +{ + size_t list = get_list_idx(bsize); + if (exact_block_list[list] == NULL) + return NULL; + block_t* ret = exact_block_list[list]; + exact_block_list[list] = ret->next_prev[0]; + if (list > 0 && exact_block_list[list]) + exact_block_list[list]->next_prev[1] = NULL; +#ifndef NDEBUG + num_free_blocks--; +#endif + return ret; +} + +static block_t* get_free_block(size_t bsize) +{ + if (bsize <= max_exact_size) + return get_exact_match(bsize); + + if (large_block_list == NULL) + return NULL; + + block_t* tmp = large_block_list; + block_t* best = NULL; + + // EMA objects are 80 bytes + // Bit_arrays are mostly small except for really large EMAs + // So number of large objects is likely small. + // Simply loop over the free list and find the smallest block + // that can meet the requested size. + while (tmp != NULL) + { + if (tmp->header >= bsize) + { + if (!best) + { + best = tmp; + } + else if (best->header > tmp->header) + { + best = tmp; + } + } + tmp = (block_t*)tmp->next_prev[0]; + } + + if (!best) + return NULL; + remove_from_list(best, &large_block_list); + + if (best->header >= (bsize + min_block_size)) + { + block_t* tail = split_free_block(best, bsize); + put_free_block(tail); + } +#ifndef NDEBUG + num_free_blocks--; +#endif + return best; +} + +static block_t* get_large_block_end_at(size_t addr) +{ + if (large_block_list == NULL) + return NULL; + block_t* tmp = large_block_list; + + while (tmp != NULL) + { + if ((((size_t)tmp) + tmp->header) == addr) + { + remove_from_list(tmp, &large_block_list); + return tmp; + } + tmp = tmp->next_prev[0]; + } + return NULL; +} + +static void merge_large_blocks_to_reserve(mm_reserve_t* r) +{ + size_t used_end = r->base + r->used; + block_t* merge = get_large_block_end_at(used_end); + while (merge != NULL) + { +#ifndef NDEBUG + num_free_blocks--; +#endif + used_end -= merge->header; + merge = get_large_block_end_at(used_end); + } + r->used = used_end - r->base; + return; +} + +static void new_reserve(void* base, size_t rsize) +{ + mm_reserve_t* reserve = (mm_reserve_t*)base; + size_t head_size = sizeof(mm_reserve_t); + reserve->base = (size_t)(base) + head_size; + reserve->used = 0; + reserve->size = rsize - head_size; + reserve->next = reserve_list; + reserve_list = reserve; +} + +static block_t* alloc_from_reserve(size_t bsize) +{ + mm_reserve_t* r = reserve_list; + size_t ret = 0; + while (r) + { + if (r->size - r->used >= bsize) + { + ret = r->base + r->used; + r->used += bsize; + break; + } + r = r->next; + } + return (block_t*)ret; +} + +static bool adding_reserve = false; +static size_t reserve_size_increment = initial_reserve_size; +static const size_t guard_size = 0x8000ULL; + +static int add_reserve(size_t rsize) +{ + printf("add_reserve\n"); + void* base = NULL; + if (adding_reserve) + return 0; + reserve_size_increment = + reserve_size_increment > rsize ? reserve_size_increment : rsize; + // this will call back to emalloc and efree. + // set the flag to avoid infinite loop + adding_reserve = true; + int ret = sgx_mm_alloc( + NULL, + reserve_size_increment + 2 * guard_size, + SGX_EMA_RESERVE, + NULL, + NULL, + &base); + if (ret) + goto out; + printf("commit on demand pages\n"); + ret = sgx_mm_alloc( + (void*)((size_t)base + guard_size), + reserve_size_increment, + SGX_EMA_COMMIT_ON_DEMAND | SGX_EMA_FIXED, + NULL, + NULL, + &base); + if (ret) + goto out; + + sgx_mm_commit(base, rsize); + new_reserve(base, reserve_size_increment); + reserve_size_increment = reserve_size_increment * 2; // double next time + if (reserve_size_increment > max_emalloc_size) + reserve_size_increment = max_emalloc_size; +out: + adding_reserve = false; + printf("adding_reserve: %d\n", add_reserve); + return ret; +} + +static void* alloc_from_meta(size_t bsize) +{ + assert(adding_reserve); + if (meta_used + bsize > META_RESERVE_SIZE) + return NULL; + block_t* b = (block_t*)(&meta_reserve[meta_used]); + meta_used += bsize; + b->header = bsize | alloc_mask; + return block_to_payload(b); +} + +int emalloc_init() +{ + for (int i = 0; i < num_exact_list; i++) + { + exact_block_list[i] = NULL; + } + if (add_reserve(initial_reserve_size)) + return ENOMEM; + return 0; +} + +// Single thread only. +// Caller holds mm_lock +void* emalloc(size_t size) +{ + size_t bsize = ROUND_TO(size + header_size, exact_match_increment); + if (bsize < min_block_size) + bsize = min_block_size; + if (adding_reserve) // called back from add_reserve + return alloc_from_meta(bsize); + + block_t* b = get_free_block(bsize); + + if (b != NULL) + { + b->header = bsize | alloc_mask; + return block_to_payload(b); + } + + b = alloc_from_reserve(bsize); + if (!b) + { + size_t new_reserve_size = + ROUND_TO(bsize + sizeof(mm_reserve_t), initial_reserve_size); + if (add_reserve(new_reserve_size)) + return NULL; + b = alloc_from_reserve(bsize); + if (!b) // should never happen + return NULL; + } + + b->header = bsize | alloc_mask; + return block_to_payload(b); +} + +static block_t* reconfigure_block(block_t* b) +{ + b->header = b->header & size_mask; + b->next_prev[0] = NULL; + if (b->header > min_block_size) + b->next_prev[1] = NULL; + + b = possibly_merge(b); + return b; +} + +int can_erealloc(void* payload) +{ + block_t* b = payload_to_block(payload); + size_t bstart = (size_t)b; + size_t bsize = block_size(b); + if (adding_reserve) + return 1; + else if ( + bstart < (size_t)(&meta_reserve[META_RESERVE_SIZE]) && + bstart + bsize > (size_t)(&meta_reserve[0])) + return 0; + else + return 1; +} +/* + * This is an internal interface only used + * by emm, intentionally crash for any error or + * inconsistency + */ +void efree(void* payload) +{ + block_t* b = payload_to_block(payload); + size_t bstart = (size_t)b; + size_t bsize = block_size(b); + if (bstart < (size_t)(&meta_reserve[META_RESERVE_SIZE]) && + bstart + bsize > (size_t)(&meta_reserve[0])) + { + if (adding_reserve) + { // we don't expect a lot of free blocks allocated + // in meta reserve. Do nothing now + assert(bstart >= (size_t)(&meta_reserve[0])); + assert( + bstart + bsize <= (size_t)(&meta_reserve[META_RESERVE_SIZE])); + return; + } + else + abort(); + } + // normal blocks + mm_reserve_t* r = find_used_in_reserve((size_t)b, block_size(b)); + if (!r) + abort(); + b = reconfigure_block(b); + size_t end = block_end(b); + if ((end - r->base) == r->used) + { + r->used -= b->header; + merge_large_blocks_to_reserve(r); + return; + } + + put_free_block(b); + return; +} diff --git a/enclave/core/sgx/edmm/emm_private.c b/enclave/core/sgx/edmm/emm_private.c new file mode 100644 index 000000000..ee98fd5cd --- /dev/null +++ b/enclave/core/sgx/edmm/emm_private.c @@ -0,0 +1,155 @@ +/* + * Copyright (C) 2022 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#include "emm_private.h" + +#include +#include +#include + +#include "ema.h" +#include "sgx_mm_rt_abstraction.h" + +extern ema_root_t g_rts_ema_root; +#define LEGAL_INIT_FLAGS \ + (SGX_EMA_PAGE_TYPE_REG | SGX_EMA_PAGE_TYPE_TCS | \ + SGX_EMA_PAGE_TYPE_SS_FIRST | SGX_EMA_PAGE_TYPE_SS_REST | SGX_EMA_SYSTEM | \ + SGX_EMA_RESERVE) + +int mm_init_ema( + void* addr, + size_t size, + int flags, + int prot, + sgx_enclave_fault_handler_t handler, + void* handler_private) +{ + if (!sgx_mm_is_within_enclave(addr, size)) + return EACCES; + if (((unsigned int)flags) & (~LEGAL_INIT_FLAGS)) + return EINVAL; + if (prot & (~SGX_EMA_PROT_MASK)) + return EINVAL; + ema_t* next_ema = NULL; + + if (!find_free_region_at(&g_rts_ema_root, (size_t)addr, size, &next_ema)) + return EINVAL; + + ema_t* ema = ema_new( + (size_t)addr, + size, + flags & SGX_EMA_ALLOC_FLAGS_MASK, + (uint64_t)prot | (SGX_EMA_PAGE_TYPE_MASK & flags), + handler, + handler_private, + next_ema); + if (!ema) + return ENOMEM; + if (flags & SGX_EMA_RESERVE) + return 0; + return ema_set_eaccept_full(ema); +} + +extern int mm_alloc_internal( + void* addr, + size_t size, + uint32_t flags, + sgx_enclave_fault_handler_t handler, + void* priv, + void** out_addr, + ema_root_t* root); + +int mm_alloc( + void* addr, + size_t size, + uint32_t flags, + sgx_enclave_fault_handler_t handler, + void* priv, + void** out_addr) +{ + return mm_alloc_internal( + addr, size, flags, handler, priv, out_addr, &g_rts_ema_root); +} + +extern int mm_commit_internal(void* addr, size_t size, ema_root_t* root); + +int mm_commit(void* addr, size_t size) +{ + return mm_commit_internal(addr, size, &g_rts_ema_root); +} + +extern int mm_uncommit_internal(void* addr, size_t size, ema_root_t* root); + +int mm_uncommit(void* addr, size_t size) +{ + return mm_uncommit_internal(addr, size, &g_rts_ema_root); +} + +extern int mm_dealloc_internal(void* addr, size_t size, ema_root_t* root); + +int mm_dealloc(void* addr, size_t size) +{ + return mm_dealloc_internal(addr, size, &g_rts_ema_root); +} + +extern int mm_commit_data_internal( + void* addr, + size_t size, + uint8_t* data, + int prot, + ema_root_t* root); + +int mm_commit_data(void* addr, size_t size, uint8_t* data, int prot) +{ + return mm_commit_data_internal(addr, size, data, prot, &g_rts_ema_root); +} + +extern int mm_modify_type_internal( + void* addr, + size_t size, + int type, + ema_root_t* root); + +int mm_modify_type(void* addr, size_t size, int type) +{ + return mm_modify_type_internal(addr, size, type, &g_rts_ema_root); +} + +extern int mm_modify_permissions_internal( + void* addr, + size_t size, + int prot, + ema_root_t* root); + +int mm_modify_permissions(void* addr, size_t size, int prot) +{ + return mm_modify_permissions_internal(addr, size, prot, &g_rts_ema_root); +} diff --git a/enclave/core/sgx/edmm/sgx_mm.c b/enclave/core/sgx/edmm/sgx_mm.c new file mode 100644 index 000000000..1010b7ead --- /dev/null +++ b/enclave/core/sgx/edmm/sgx_mm.c @@ -0,0 +1,513 @@ +/* + * Copyright (C) 2022 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#include "sgx_mm.h" + +#include +#include +#include + +#include "ema.h" +#include "emalloc.h" +#include "helloworld_t.h" +#include "sgx_mm_rt_abstraction.h" + +extern ema_root_t g_user_ema_root; +extern ema_root_t g_rts_ema_root; +#define LEGAL_ALLOC_PAGE_TYPE \ + (SGX_EMA_PAGE_TYPE_REG | SGX_EMA_PAGE_TYPE_SS_FIRST | \ + SGX_EMA_PAGE_TYPE_SS_REST) +sgx_mm_mutex* mm_lock = NULL; +size_t mm_user_base = 0; +size_t mm_user_end = 0; + +int mm_alloc_internal( + void* addr, + size_t size, + int flags, + sgx_enclave_fault_handler_t handler, + void* priv, + void** out_addr, + ema_root_t* root) +{ + int status = -1; + size_t tmp_addr = 0; + ema_t *node = NULL, *next_ema = NULL; + bool ret = false; + + uint32_t alloc_flags = (uint32_t)flags & SGX_EMA_ALLOC_FLAGS_MASK; + // Must have one of these: + if (!(alloc_flags & + (SGX_EMA_RESERVE | SGX_EMA_COMMIT_NOW | SGX_EMA_COMMIT_ON_DEMAND))) + return EINVAL; + + uint64_t page_type = (uint64_t)flags & SGX_EMA_PAGE_TYPE_MASK; + if ((uint64_t)(~LEGAL_ALLOC_PAGE_TYPE) & page_type) + return EINVAL; + if (page_type == 0) + page_type = SGX_EMA_PAGE_TYPE_REG; + + if (size % SGX_PAGE_SIZE) + return EINVAL; + + uint8_t align_flag = (uint8_t)( + ((uint32_t)flags & SGX_EMA_ALIGNMENT_MASK) >> SGX_EMA_ALIGNMENT_SHIFT); + if (align_flag == 0) + align_flag = 12; + if (align_flag < 12) + return EINVAL; + + uint64_t align_mask = (uint64_t)(1ULL << align_flag) - 1ULL; + + tmp_addr = (size_t)addr; + // If an address is given, user must align it + if ((tmp_addr & align_mask)) + return EINVAL; + if (addr && (!sgx_mm_is_within_enclave(addr, size))) + return EACCES; + + if (sgx_mm_mutex_lock(mm_lock)) + return EFAULT; + + uint64_t si_flags = (uint64_t)SGX_EMA_PROT_READ_WRITE | page_type; + if (alloc_flags & SGX_EMA_RESERVE) + { + // no type set for RESERVE ranges + si_flags = SGX_EMA_PROT_NONE; + } + + if (tmp_addr) + { + bool fixed_alloc = (alloc_flags & SGX_EMA_FIXED); + size_t end = tmp_addr + size; + size_t start = tmp_addr; + ema_t* first = NULL; + ema_t* last = NULL; + + bool exist_in_root = !search_ema_range(root, start, end, &first, &last); + + if (exist_in_root) + { + printf("exist in root\n"); + // Use the reserved space earlier + node = ema_realloc_from_reserve_range( + first, last, start, end, alloc_flags, si_flags, handler, priv); + if (node) + { + goto alloc_action; + } + // can't fit with the address but fixed alloc is asked + if (fixed_alloc) + { + status = EEXIST; + goto unlock; + } + // Not a fixed alloc, + // fall through to find a free space anywhere + assert(!ret); + } + else + { + // No existing ema overlapping with requested range + ret = find_free_region_at(root, tmp_addr, size, &next_ema); + if (!ret && fixed_alloc) + { + // specified addr is not within the range covered by this root, + // and the caller insists to use this addr + status = EPERM; + goto unlock; + } + // can't use specified addr, but can try another, fall through + } + } + + // At this point, ret == false means: + // Either no address given or the given address can't be used + if (!ret) + ret = find_free_region( + root, size, (1ULL << align_flag), &tmp_addr, &next_ema); + if (!ret) + { + printf("find_free_region fails\n"); + status = ENOMEM; + goto unlock; + } + /************************************************** + * create and operate on a new node + **************************************************/ + assert(tmp_addr); // found address + assert(next_ema); // found where to insert + // create and insert the node + node = + ema_new(tmp_addr, size, alloc_flags, si_flags, handler, priv, next_ema); + if (!node) + { + status = ENOMEM; + goto unlock; + } +alloc_action: + assert(node); + bool x = oe_is_within_enclave(0x01837408, 0x4096); + printf("alloc_action is in enclave ret: %d\n", x); + status = ema_do_alloc(node); + if (status != 0) + { + goto alloc_failed; + } + if (out_addr) + { + *out_addr = (void*)tmp_addr; + } + status = 0; + goto unlock; +alloc_failed: + ema_destroy(node); + +unlock: + sgx_mm_mutex_unlock(mm_lock); + return status; +} + +int sgx_mm_alloc( + void* addr, + size_t size, + int flags, + sgx_enclave_fault_handler_t handler, + void* priv, + void** out_addr) +{ + if (flags & SGX_EMA_SYSTEM) + return EINVAL; + return mm_alloc_internal( + addr, size, flags, handler, priv, out_addr, &g_user_ema_root); +} + +int mm_commit_internal(void* addr, size_t size, ema_root_t* root) +{ + int ret = EFAULT; + size_t start = (size_t)addr; + size_t end = start + size; + ema_t *first = NULL, *last = NULL; + + if (sgx_mm_mutex_lock(mm_lock)) + return ret; + ret = search_ema_range(root, start, end, &first, &last); + if (ret < 0) + { + ret = EINVAL; + goto unlock; + } + + ret = ema_do_commit_loop(first, last, start, end); +unlock: + sgx_mm_mutex_unlock(mm_lock); + return ret; +} + +int sgx_mm_commit(void* addr, size_t size) +{ + return mm_commit_internal(addr, size, &g_user_ema_root); +} + +int mm_uncommit_internal(void* addr, size_t size, ema_root_t* root) +{ + int ret = EFAULT; + size_t start = (size_t)addr; + size_t end = start + size; + ema_t *first = NULL, *last = NULL; + + if (sgx_mm_mutex_lock(mm_lock)) + return ret; + ret = search_ema_range(root, start, end, &first, &last); + if (ret < 0) + { + ret = EINVAL; + goto unlock; + } + + ret = ema_do_uncommit_loop(first, last, start, end); +unlock: + sgx_mm_mutex_unlock(mm_lock); + return ret; +} + +int sgx_mm_uncommit(void* addr, size_t size) +{ + return mm_uncommit_internal(addr, size, &g_user_ema_root); +} + +int mm_dealloc_internal(void* addr, size_t size, ema_root_t* root) +{ + int ret = EFAULT; + size_t start = (size_t)addr; + size_t end = start + size; + ema_t *first = NULL, *last = NULL; + + if (sgx_mm_mutex_lock(mm_lock)) + return ret; + ret = search_ema_range(root, start, end, &first, &last); + if (ret < 0) + { + ret = EINVAL; + goto unlock; + } + + ret = ema_do_dealloc_loop(first, last, start, end); +unlock: + sgx_mm_mutex_unlock(mm_lock); + return ret; +} + +int sgx_mm_dealloc(void* addr, size_t size) +{ + return mm_dealloc_internal(addr, size, &g_user_ema_root); +} + +int mm_commit_data_internal( + void* addr, + size_t size, + uint8_t* data, + int prot, + ema_root_t* root) +{ + int ret = EFAULT; + size_t start = (size_t)addr; + size_t end = start + size; + ema_t *first = NULL, *last = NULL; + + if (size == 0) + return EINVAL; + if (size % SGX_PAGE_SIZE != 0) + return EINVAL; + if (start % SGX_PAGE_SIZE != 0) + return EINVAL; + if (((size_t)data) % SGX_PAGE_SIZE != 0) + return EINVAL; + if (((uint32_t)prot) & (uint32_t)(~SGX_EMA_PROT_MASK)) + return EINVAL; + if (!sgx_mm_is_within_enclave(data, size)) + return EINVAL; + + if (sgx_mm_mutex_lock(mm_lock)) + return ret; + ret = search_ema_range(root, start, end, &first, &last); + + if (ret < 0) + { + ret = EINVAL; + goto unlock; + } + + ret = ema_do_commit_data_loop(first, last, start, end, data, prot); +unlock: + sgx_mm_mutex_unlock(mm_lock); + return ret; +} + +int sgx_mm_commit_data(void* addr, size_t size, uint8_t* data, int prot) +{ + return mm_commit_data_internal(addr, size, data, prot, &g_user_ema_root); +} + +int mm_modify_type_internal(void* addr, size_t size, int type, ema_root_t* root) +{ + // for this API, TCS is the only valid page type + if (type != SGX_EMA_PAGE_TYPE_TCS) + { + return EPERM; + } + + // TCS occupies only one page + if (size != SGX_PAGE_SIZE) + { + return EINVAL; + } + int ret = EFAULT; + size_t start = (size_t)addr; + size_t end = start + size; + ema_t *first = NULL, *last = NULL; + + if (start % SGX_PAGE_SIZE != 0) + return EINVAL; + + if (sgx_mm_mutex_lock(mm_lock)) + return ret; + ret = search_ema_range(root, start, end, &first, &last); + + if (ret < 0) + { + ret = EINVAL; + goto unlock; + } + + // one page only, covered by a single ema node + assert(ema_next(first) == last); + printf("[mm_modify_type_internal]\n"); + ret = ema_change_to_tcs(first, (size_t)addr); +unlock: + sgx_mm_mutex_unlock(mm_lock); + return ret; +} + +int sgx_mm_modify_type(void* addr, size_t size, int type) +{ + return mm_modify_type_internal(addr, size, type, &g_user_ema_root); +} + +int mm_modify_permissions_internal( + void* addr, + size_t size, + int prot, + ema_root_t* root) +{ + int ret = EFAULT; + size_t start = (size_t)addr; + size_t end = start + size; + + if (size == 0) + return EINVAL; + if (size % SGX_PAGE_SIZE) + return EINVAL; + if (start % SGX_PAGE_SIZE) + return EINVAL; + if ((prot & SGX_EMA_PROT_EXEC) && !(prot & SGX_EMA_PROT_READ)) + return EINVAL; + + ema_t *first = NULL, *last = NULL; + + if (sgx_mm_mutex_lock(mm_lock)) + return ret; + ret = search_ema_range(root, start, end, &first, &last); + if (ret < 0) + { + ret = EINVAL; + goto unlock; + } + ret = ema_modify_permissions_loop(first, last, start, end, prot); +unlock: + sgx_mm_mutex_unlock(mm_lock); + return ret; +} + +int sgx_mm_modify_permissions(void* addr, size_t size, int prot) +{ + return mm_modify_permissions_internal(addr, size, prot, &g_user_ema_root); +} + +int sgx_mm_enclave_pfhandler(const sgx_pfinfo* pfinfo) +{ + int ret = SGX_MM_EXCEPTION_CONTINUE_SEARCH; + size_t addr = TRIM_TO((pfinfo->maddr), SGX_PAGE_SIZE); + if (sgx_mm_mutex_lock(mm_lock)) + return ret; + ema_t* ema = search_ema(&g_user_ema_root, addr); + void* data = NULL; + sgx_enclave_fault_handler_t eh = NULL; + if (!ema) + { + ema = search_ema(&g_rts_ema_root, addr); + if (!ema) + goto unlock; + } + eh = ema_fault_handler(ema, &data); + if (eh) + { + // don't hold the lock as handlers can longjmp + sgx_mm_mutex_unlock(mm_lock); + return eh(pfinfo, data); + } + if (ema_page_committed(ema, addr)) + { + // Check for spurious #PF + if ((pfinfo->pfec.rw == 0 && + 0 == (get_ema_si_flags(ema) & SGX_EMA_PROT_READ)) || + (pfinfo->pfec.rw == 1 && + 0 == (get_ema_si_flags(ema) & SGX_EMA_PROT_WRITE))) + { + ret = SGX_MM_EXCEPTION_CONTINUE_SEARCH; + } + else + ret = SGX_MM_EXCEPTION_CONTINUE_EXECUTION; + goto unlock; + } + if (get_ema_alloc_flags(ema) & SGX_EMA_COMMIT_ON_DEMAND) + { + if ((pfinfo->pfec.rw == 0 && + 0 == (get_ema_si_flags(ema) & SGX_EMA_PROT_READ)) || + (pfinfo->pfec.rw == 1 && + 0 == (get_ema_si_flags(ema) & SGX_EMA_PROT_WRITE))) + { + ret = SGX_MM_EXCEPTION_CONTINUE_SEARCH; + goto unlock; + } + + // Currently kernel support for GROWSUP/GROWSDOWN not yet available. + // Add support for those flags later + if (ema_do_commit(ema, addr, addr + SGX_PAGE_SIZE)) + { + sgx_mm_mutex_unlock(mm_lock); + abort(); + } + + ret = SGX_MM_EXCEPTION_CONTINUE_EXECUTION; + goto unlock; + } + else + { + sgx_mm_mutex_unlock(mm_lock); + // we found the EMA and nothing should cause the PF + // Can't continue as we know something is wrong + abort(); + } + + ret = SGX_MM_EXCEPTION_CONTINUE_SEARCH; +unlock: + sgx_mm_mutex_unlock(mm_lock); + return ret; +} + +int sgx_mm_init(size_t user_base, size_t user_end) +{ + mm_lock = sgx_mm_mutex_create(); + if (!mm_lock) + return EFAULT; + + mm_user_base = user_base; + mm_user_end = user_end; + + printf("sgx_mm_init: %p\n", mm_user_base); + bool x = oe_is_within_enclave(0x01837408, 0x4096); + printf("is in enclave ret: %d\n", x); + + if (!sgx_mm_register_pfhandler(sgx_mm_enclave_pfhandler)) + return EFAULT; + return emalloc_init(); +} diff --git a/enclave/core/sgx/include/bit_array.h b/enclave/core/sgx/include/bit_array.h new file mode 100644 index 000000000..429d8000e --- /dev/null +++ b/enclave/core/sgx/include/bit_array.h @@ -0,0 +1,96 @@ +/* + * Copyright (C) 2022 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef BIT_ARRAY_H_ +#define BIT_ARRAY_H_ + +#include +#include +#include + +typedef struct bit_array_ bit_array; + +#ifdef __cplusplus +extern "C" +{ +#endif + + // Create a new bit array to track the status of 'num' of bits. + // The contents of the data is not initialized. + bit_array* bit_array_new(size_t num_of_bits); + + // Create a new bit array to track the status of 'num' of bits. + // All the tracked bits are set (value 1). + bit_array* bit_array_new_set(size_t num_of_bits); + + // Create a new bit array to track the status of 'num' of bits. + // All the tracked bits are reset (value 0). + bit_array* bit_array_new_reset(size_t num_of_bits); + + // Delete the bit_array 'ba' and the data it owns + void bit_array_delete(bit_array* ba); + + // Returns whether the bit at position 'pos' is set + bool bit_array_test(bit_array* ba, size_t pos); + + // Return whether the bits in range [pos, pos+len) are all set + bool bit_array_test_range(bit_array* ba, size_t pos, size_t len); + + // Retuen whether any bit in range [pos, pos+len) is set + bool bit_array_test_range_any(bit_array* ba, size_t pos, size_t len); + + // Set the bit at 'pos' + void bit_array_set(bit_array* ba, size_t pos); + + // Set the bits in range [pos, pos+len) + void bit_array_set_range(bit_array* ba, size_t pos, size_t len); + + // Set all the bits + void bit_array_set_all(bit_array* ba); + + // Clear the bits in range [pos, pos+len) + void bit_array_reset_range(bit_array* ba, size_t pos, size_t len); + + // Clear all the bits + void bit_array_reset_all(bit_array* ba); + + // Reset the bit_array 'ba' to track the new 'data', which has 'num' of bits + void bit_array_reattach(bit_array* ba, size_t num_of_bits, uint8_t* data); + + // Split the bit array at 'pos' + // Returns pointers to two new bit arrays + int bit_array_split(bit_array* ba, size_t pos, bit_array**, bit_array**); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/enclave/core/sgx/include/bit_array_imp.h b/enclave/core/sgx/include/bit_array_imp.h new file mode 100644 index 000000000..754583cea --- /dev/null +++ b/enclave/core/sgx/include/bit_array_imp.h @@ -0,0 +1,45 @@ +/* + * Copyright (C) 2022 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ +#ifndef SGX_BIT_ARRAY_IMP_H +#define SGX_BIT_ARRAY_IMP_H + +#include + +#include "bit_array.h" + +struct bit_array_ +{ + size_t n_bytes; + size_t n_bits; + uint8_t* data; +}; + +#endif diff --git a/enclave/core/sgx/include/ema.h b/enclave/core/sgx/include/ema.h new file mode 100644 index 000000000..49a70187b --- /dev/null +++ b/enclave/core/sgx/include/ema.h @@ -0,0 +1,169 @@ +/* + * Copyright (C) 2022 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef __SGX_EMA_H__ +#define __SGX_EMA_H__ + +#include + +#include "sgx_mm.h" + +#ifndef SGX_SECINFO_ALIGN +#define SGX_SECINFO_ALIGN __attribute__((aligned(sizeof(sec_info_t)))) +#endif + +#define SGX_PAGE_SIZE 0x1000ULL +#define SGX_PAGE_SHIFT 12 + +typedef struct ema_root_ ema_root_t; +typedef struct ema_t_ ema_t; + +#ifdef __cplusplus +extern "C" +{ +#endif + +#ifndef NDEBUG + ema_t* ema_next(ema_t* node); +#endif +#ifdef TEST + void destroy_ema_root(ema_root_t*); + void dump_ema_root(ema_root_t*); + size_t ema_base(ema_t* node); + size_t ema_size(ema_t* node); + int ema_split(ema_t* ema, size_t addr, bool new_lower, ema_t** new_node); + int ema_split_ex(ema_t* ema, size_t start, size_t end, ema_t** new_node); + ema_t* ema_merge(ema_t* lo_ema, ema_t* hi_ema); +#endif + + uint32_t get_ema_alloc_flags(ema_t* node); + uint64_t get_ema_si_flags(ema_t* node); + + sgx_enclave_fault_handler_t ema_fault_handler( + ema_t* node, + void** private_data); + ema_t* ema_new( + size_t addr, + size_t size, + uint32_t alloc_flags, + uint64_t si_flags, + sgx_enclave_fault_handler_t handler, + void* private_data, + ema_t* next_ema); + void ema_destroy(ema_t* ema); + + int ema_set_eaccept_full(ema_t* node); + int ema_clear_eaccept_full(ema_t* node); + int ema_set_eaccept(ema_t* node, size_t start, size_t end); + bool ema_page_committed(ema_t* ema, size_t addr); + + ema_t* search_ema(ema_root_t* root, size_t addr); + int search_ema_range( + ema_root_t* root, + size_t start, + size_t end, + ema_t** ema_begin, + ema_t** ema_end); + + bool find_free_region( + ema_root_t* root, + size_t size, + size_t align, + size_t* addr, + ema_t** next_ema); + + bool find_free_region_at( + ema_root_t* root, + size_t addr, + size_t size, + ema_t** next_ema); + + int do_commit(size_t start, size_t size, uint64_t si_flags, bool grow_up); + int ema_do_commit(ema_t* node, size_t start, size_t end); + int ema_do_commit_loop(ema_t* first, ema_t* last, size_t start, size_t end); + + int ema_do_uncommit(ema_t* node, size_t start, size_t end); + int ema_do_uncommit_loop( + ema_t* first, + ema_t* last, + size_t start, + size_t end); + + int ema_do_dealloc(ema_t* node, size_t start, size_t end); + int ema_do_dealloc_loop( + ema_t* first, + ema_t* last, + size_t start, + size_t end); + + int ema_modify_permissions( + ema_t* node, + size_t start, + size_t end, + int new_prot); + int ema_modify_permissions_loop( + ema_t* first, + ema_t* last, + size_t start, + size_t end, + int prot); + int ema_change_to_tcs(ema_t* node, size_t addr); + + int ema_do_commit_data( + ema_t* node, + size_t start, + size_t end, + uint8_t* data, + int prot); + int ema_do_commit_data_loop( + ema_t* firsr, + ema_t* last, + size_t start, + size_t end, + uint8_t* data, + int prot); + + int ema_do_alloc(ema_t* node); + ema_t* ema_realloc_from_reserve_range( + ema_t* first, + ema_t* last, + size_t start, + size_t end, + uint32_t alloc_flags, + uint64_t si_flags, + sgx_enclave_fault_handler_t handler, + void* private_data); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/enclave/core/sgx/include/ema_imp.h b/enclave/core/sgx/include/ema_imp.h new file mode 100644 index 000000000..937e8c90f --- /dev/null +++ b/enclave/core/sgx/include/ema_imp.h @@ -0,0 +1,60 @@ +/* + * Copyright (C) 2022 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef SGX_EMA_IMP_H +#define SGX_EMA_IMP_H + +#include + +#include "bit_array.h" +#include "ema.h" +#include "sgx_mm.h" + +struct ema_t_ +{ + size_t start_addr; // starting address, should be on a page boundary + size_t size; // bytes + uint32_t alloc_flags; // EMA_RESERVED, EMA_COMMIT_NOW, EMA_COMMIT_ON_DEMAND, + // OR'ed with EMA_SYSTEM, EMA_GROWSDOWN, ENA_GROWSUP + uint64_t si_flags; // one of EMA_PROT_NONE, READ, READ_WRITE, READ_EXEC, + // READ_WRITE_EXEC Or'd with one of EMA_PAGE_TYPE_REG, + // EMA_PAGE_TYPE_TCS, EMA_PAGE_TYPE_TRIM + bit_array* + eaccept_map; // bitmap for EACCEPT status, bit 0 in eaccept_map[0] for + // the page at start address bit i in eaccept_map[j] for + // page at start_address+(i+j<<3)<<12 + sgx_enclave_fault_handler_t + handler; // custom PF handler (for EACCEPTCOPY use) + void* priv; // private data for handler + ema_t* next; // next in doubly linked list + ema_t* prev; // prev in doubly linked list +}; +#endif diff --git a/enclave/core/sgx/include/emalloc.h b/enclave/core/sgx/include/emalloc.h new file mode 100644 index 000000000..1f3e79f01 --- /dev/null +++ b/enclave/core/sgx/include/emalloc.h @@ -0,0 +1,45 @@ +/* + * Copyright (C) 2022 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef __SGX_EMALLOC_H__ +#define __SGX_EMALLOC_H__ +#include +#define ROUND_TO(x, align) \ + ((size_t)((x) + ((align)-1)) & (size_t)(~((align)-1))) +#define TRIM_TO(x, align) ((size_t)(x) & (size_t)(~(align - 1))) +#define MIN(x, y) (((x) > (y)) ? (y) : (x)) +#define MAX(x, y) (((x) > (y)) ? (x) : (y)) + +int emalloc_init(void); +void* emalloc(size_t); +void efree(void* ptr); +int can_erealloc(void* ptr); +#endif diff --git a/enclave/core/sgx/include/emm_private.h b/enclave/core/sgx/include/emm_private.h new file mode 100644 index 000000000..927825969 --- /dev/null +++ b/enclave/core/sgx/include/emm_private.h @@ -0,0 +1,120 @@ +/* + * Copyright (C) 2022 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef EMM_PRIVATE_H_ +#define EMM_PRIVATE_H_ + +#include +#include + +#include "sgx_mm.h" + +#ifdef __cplusplus +extern "C" +{ +#endif + + /* + * Initialize the EMM internals and reserve the whole range available for + * user allocations via the public sgx_mm_alloc API. This should be called + * before any other APIs invoked. The runtime should not intend to allocate + * any subregion in [user_start, user_end) for system usage, i.e., the EMM + * will fail any allocation request with SGX_EMA_SYSTEM flag in this range + * and return an EINVAL error. + * @param[in] user_start The start of the user address range, page aligned. + * @param[in] user_end The end (exclusive) of the user address range, page + * aligned. + * @retval 0 Initialization was successful + * @retval ENOMEM No EPC space or RAM for internal allocations. + * @retval EFAULT Other failures in runtime abstraction layer API + * implementation, e.g., failure in sgx_mm_register_pfhandler, + * sgx_mm_mutex_create. + */ + int sgx_mm_init(size_t user_start, size_t user_end); + +#define SGX_EMA_SYSTEM \ + SGX_EMA_ALLOC_FLAGS(0x80UL) /* EMA reserved by system \ + */ + /* + * Initialize an EMA. This can be used to setup EMAs to account regions that + * are loaded and initialized with EADD before EINIT. + * @param[in] addr Starting address of the region, page aligned. If NULL is + * provided, then the function will select the starting address. + * @param[in] size Size of the region in multiples of page size in bytes. + * @param[in] flags SGX_EMA_SYSTEM, or SGX_EMA_SYSTEM | SGX_EMA_RESERVE + * bitwise ORed with one of following page types: + * - SGX_EMA_PAGE_TYPE_REG: regular page type. This is the + * default if not specified. + * - SGX_EMA_PAGE_TYPE_TCS: TCS page. + * - SGX_EMA_PAGE_TYPE_SS_FIRST: the first page in shadow stack. + * - SGX_EMA_PAGE_TYPE_SS_REST: the rest page in shadow stack. + * @param[in] prot permissions, either SGX_EMA_PROT_NONE or a bitwise OR of + * following with: + * - SGX_EMA_PROT_READ: Pages may be read. + * - SGX_EMA_PROT_WRITE: Pages may be written. + * - SGX_EMA_PROT_EXECUTE: Pages may be executed. + * @param[in] handler A custom handler for page faults in this region, NULL + * if no custom handling needed. + * @param[in] handler_private Private data for the @handler, which will be + * passed back when the handler is called. + * @retval 0 The operation was successful. + * @retval EACCES Region is outside enclave address space. + * @retval EEXIST Any page in range requested is in use. + * @retval EINVAL Invalid page type, flags, or addr and length are not page + * aligned. + */ + int mm_init_ema( + void* addr, + size_t size, + int flags, + int prot, + sgx_enclave_fault_handler_t handler, + void* handler_private); + // See documentation in sgx_mm.h + int mm_alloc( + void* addr, + size_t size, + uint32_t flags, + sgx_enclave_fault_handler_t handler, + void* private_data, + void** out_addr); + int mm_dealloc(void* addr, size_t size); + int mm_uncommit(void* addr, size_t size); + int mm_commit(void* addr, size_t size); + int mm_commit_data(void* addr, size_t size, uint8_t* data, int prot); + int mm_modify_type(void* addr, size_t size, int type); + int mm_modify_permissions(void* addr, size_t size, int prot); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/enclave/core/sgx/include/sgx_mm.h b/enclave/core/sgx/include/sgx_mm.h new file mode 100644 index 000000000..5e18ac721 --- /dev/null +++ b/enclave/core/sgx/include/sgx_mm.h @@ -0,0 +1,306 @@ +/* + * Copyright (C) 2022 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef SGX_MM_H_ +#define SGX_MM_H_ + +#include +#include +#ifdef __cplusplus +extern "C" +{ +#endif + + /** + * Page fault (#PF) info reported in the SGX SSA MISC region. + */ + typedef struct _sgx_pfinfo + { + uint64_t maddr; // address for #PF. + union _pfec + { + uint32_t errcd; + struct + { // PFEC bits. + uint32_t p : 1; // P flag. + uint32_t rw : 1; // RW access flag, 0 for read, 1 for write. + uint32_t : 13; // U/S, I/O, PK and reserved bits not relevant + // for SGX PF. + uint32_t sgx : 1; // SGX bit. + uint32_t : 16; // reserved bits. + }; + } pfec; + uint32_t reserved; + } sgx_pfinfo; + + /** + * Custom page fault (#PF) handler, do usage specific processing upon #PF, + * e.g., loading data and verify its trustworthiness, then call + * sgx_mm_commit_data to explicitly EACCEPTCOPY data. This custom handler is + * passed into sgx_mm_alloc, and associated with the newly allocated region. + * The memory manager calls the handler when a #PF happens in the associated + * region. The handler may invoke abort() if it determines the exception is + * invalid based on certain internal states it maintains. + * + * @param[in] pfinfo info reported in the SSA MISC region for page fault. + * @param[in] private_data private data provided by handler in sgx_mm_alloc + * call. + * @retval SGX_MM_EXCEPTION_CONTINUE_EXECUTION Success on handling the + * exception. + * @retval SGX_MM_EXCEPTION_CONTINUE_SEARCH Exception not handled and should + * be passed to some other handler. + * + */ + typedef int (*sgx_enclave_fault_handler_t)( + const sgx_pfinfo* pfinfo, + void* private_data); + +/* bit 0 - 7 are allocation flags. */ +#define SGX_EMA_ALLOC_FLAGS_SHIFT 0 +#define SGX_EMA_ALLOC_FLAGS(n) ((n) << SGX_EMA_ALLOC_FLAGS_SHIFT) +#define SGX_EMA_ALLOC_FLAGS_MASK SGX_EMA_ALLOC_FLAGS(0xFF) + +/* Only reserve an address range, no physical memory committed.*/ +#define SGX_EMA_RESERVE SGX_EMA_ALLOC_FLAGS(0x1) + +/* Reserve an address range and commit physical memory. */ +#define SGX_EMA_COMMIT_NOW SGX_EMA_ALLOC_FLAGS(0x2) + +/* Reserve an address range and commit physical memory on demand.*/ +#define SGX_EMA_COMMIT_ON_DEMAND SGX_EMA_ALLOC_FLAGS(0x4) + +/* Always commit pages from higher to lower addresses, + * no gaps in addresses above the last committed. + */ +#define SGX_EMA_GROWSDOWN SGX_EMA_ALLOC_FLAGS(0x10) + +/* Always commit pages from lower to higher addresses, + * no gaps in addresses below the last committed. + */ +#define SGX_EMA_GROWSUP SGX_EMA_ALLOC_FLAGS(0x20) + +/* Map addr must be exactly as requested. */ +#define SGX_EMA_FIXED SGX_EMA_ALLOC_FLAGS(0x40) + +/* bit 8 - 15 are page types. */ +#define SGX_EMA_PAGE_TYPE_SHIFT 8 +#define SGX_EMA_PAGE_TYPE(n) ((n) << SGX_EMA_PAGE_TYPE_SHIFT) +#define SGX_EMA_PAGE_TYPE_MASK SGX_EMA_PAGE_TYPE(0xFF) +#define SGX_EMA_PAGE_TYPE_TCS SGX_EMA_PAGE_TYPE(0x1) /* TCS page type. */ +#define SGX_EMA_PAGE_TYPE_REG \ + SGX_EMA_PAGE_TYPE(0x2) /* regular page type, default if not specified. */ +#define SGX_EMA_PAGE_TYPE_TRIM SGX_EMA_PAGE_TYPE(0x4) /* TRIM page type. */ +#define SGX_EMA_PAGE_TYPE_SS_FIRST \ + SGX_EMA_PAGE_TYPE(0x5) /* the first page in shadow stack. */ +#define SGX_EMA_PAGE_TYPE_SS_REST \ + SGX_EMA_PAGE_TYPE(0x6) /* the rest pages in shadow stack. */ + +/* Use bit 24-31 for alignment masks. */ +#define SGX_EMA_ALIGNMENT_SHIFT 24 +/* + * Alignment (expressed in log2). Must be >= log2(PAGE_SIZE) and + * < # bits in a pointer (32 or 64). + */ +#define SGX_EMA_ALIGNED(n) (((unsigned int)(n) << SGX_EMA_ALIGNMENT_SHIFT)) +#define SGX_EMA_ALIGNMENT_MASK SGX_EMA_ALIGNED(0xFFUL) +#define SGX_EMA_ALIGNMENT_64KB SGX_EMA_ALIGNED(16UL) +#define SGX_EMA_ALIGNMENT_16MB SGX_EMA_ALIGNED(24UL) +#define SGX_EMA_ALIGNMENT_4GB SGX_EMA_ALIGNED(32UL) + +/* Permissions flags */ +#define SGX_EMA_PROT_NONE 0x0 +#define SGX_EMA_PROT_READ 0x1 +#define SGX_EMA_PROT_WRITE 0x2 +#define SGX_EMA_PROT_EXEC 0x4 +#define SGX_EMA_PROT_READ_WRITE (SGX_EMA_PROT_READ | SGX_EMA_PROT_WRITE) +#define SGX_EMA_PROT_READ_EXEC (SGX_EMA_PROT_READ | SGX_EMA_PROT_EXEC) +#define SGX_EMA_PROT_MASK (SGX_EMA_PROT_READ_WRITE | SGX_EMA_PROT_EXEC) + /* + * Allocate a new memory region in enclave address space (ELRANGE). + * @param[in] addr Starting address of the region, page aligned. If NULL is + * provided, then the function will select the starting address. + * @param[in] length Size of the region in bytes of multiples of page size. + * @param[in] flags A bitwise OR of flags describing committing mode, + * committing order, address preference, and page type. Flags should include + * exactly one of following for committing mode: + * - SGX_EMA_RESERVE: just reserve an address range, no EPC + * committed. To allocate memory on a reserved range, call this function + * again with SGX_EMA_COMMIT_ON_DEMAND or SGX_EMA_COMMIT_NOW. + * - SGX_EMA_COMMIT_NOW: reserves memory range and commit EPC + * pages. EAUG and EACCEPT are done on SGX2 platforms. + * - SGX_EMA_COMMIT_ON_DEMAND: reserves memory range, EPC pages + * are committed (EACCEPT) on demand upon #PF + * on SGX2 platforms. ORed with zero or one of the committing order flags + * for SGX2 platforms: + * - SGX_EMA_GROWSDOWN: always commit pages from higher to lower + * addresses, no gaps in addresses above the last committed. + * - SGX_EMA_GROWSUP: always commit pages from lower to higher + * addresses, no gaps in addresses below the last committed. Optionally ORed + * with + * - SGX_EMA_FIXED: allocate at fixed address, will return error + * if the requested address is in use. + * - SGX_EMA_ALIGNED(n): Align the region on a requested + * boundary. Fail if a suitable region cannot be found, The argument n + * specifies the binary logarithm of the desired alignment and must be at + * least 12. Optionally ORed with one of following page types: + * - SGX_EMA_PAGE_TYPE_REG: regular page type. This is the + * default if not specified. + * - SGX_EMA_PAGE_TYPE_SS_FIRST: the first page in shadow stack. + * - SGX_EMA_PAGE_TYPE_SS_REST: the rest page in shadow stack. + * + * @param[in] handler A custom handler for page faults in this region, NULL + * if no custom handling needed. + * @param[in] handler_private Private data for the @handler, which will be + * passed back when the handler is called. + * @param[out] out_addr Pointer to store the start address of allocated + * range. Set to valid address by the function on success, NULL otherwise. + * @retval 0 The operation was successful. + * @retval EACCES Region is outside enclave address space. + * @retval EEXIST Any page in range requested is in use and SGX_EMA_FIXED is + * set. + * @retval EINVAL Invalid alignment bouandary, i.e., n < 12 in + * SGX_EMA_ALIGNED(n). + * @retval ENOMEM Out of memory, or no free space to satisfy alignment + * boundary. + * @retval EFAULT All other errors. + */ + int sgx_mm_alloc( + void* addr, + size_t length, + int flags, + sgx_enclave_fault_handler_t handler, + void* handler_private, + void** out_addr); + + /* + * Uncommit (trim) physical EPC pages in a previously committed range. + * The pages in the allocation are freed, but the address range is still + * reserved. + * @param[in] addr Page aligned start address of the region to be trimmed. + * @param[in] length Size in bytes of multiples of page size. + * @retval 0 The operation was successful. + * @retval EINVAL The address range is not allocated or outside enclave. + * @retval EFAULT All other errors. + */ + int sgx_mm_uncommit(void* addr, size_t length); + + /* + * Deallocate the address range. + * The pages in the allocation are freed and the address range is released + * for future allocation. + * @param[in] addr Page aligned start address of the region to be freed and + * released. + * @param[in] length Size in bytes of multiples of page size. + * @retval 0 The operation was successful. + * @retval EINVAL The address range is not allocated or outside enclave. + */ + int sgx_mm_dealloc(void* addr, size_t length); + + /* + * Change permissions of an allocated region. + * @param[in] addr Start address of the region, must be page aligned. + * @param[in] length Size in bytes of multiples of page size. + * @param[in] prot permissions bitwise OR of following with: + * - SGX_EMA_PROT_READ: Pages may be read. + * - SGX_EMA_PROT_WRITE: Pages may be written. + * - SGX_EMA_PROT_EXEC: Pages may be executed. + * @retval 0 The operation was successful. + * @retval EACCES The page type does not allow the change. + * @retval EINVAL The memory region was not allocated or outside enclave + * or other invalid parameters that are not supported. + * @retval EFAULT All other errors. + */ + int sgx_mm_modify_permissions(void* addr, size_t length, int prot); + + /* + * Change the page type of an allocated region. + * @param[in] addr Start address of the region, must be page aligned. + * @param[in] length Size in bytes of multiples of page size. + * @param[in] type page type, only SGX_EMA_PAGE_TYPE_TCS is supported. + * + * @retval 0 The operation was successful. + * @retval EACCES Original page type/permissions do not allow the change. + * @retval EINVAL The memory region was not allocated or outside enclave + * or other invalid parameters that are not supported. + * @retval EPERM Target page type is not allowed by this API, e.g., + * PT_TRIM, PT_SS_FIRST, PT_SS_REST. + * @retval EFAULT All other errors. + */ + int sgx_mm_modify_type(void* addr, size_t length, int type); + + /* + * Commit a partial or full range of memory allocated previously with + * SGX_EMA_COMMIT_ON_DEMAND. The API will return 0 if all pages in the + * requested range are successfully committed. Calling this API on pages + * already committed has no effect. + * @param[in] addr Page aligned starting address. + * @param[in] length Length of the region in bytes of multiples of page + * size. + * @retval 0 The operation was successful. + * @retval EINVAL Any requested page is not in any previously allocated + * regions, or outside the enclave address range. + * @retval EFAULT All other errors. + */ + int sgx_mm_commit(void* addr, size_t length); + + /* + * Load data into target pages within a region previously allocated by + * sgx_mm_alloc. This can be called to load data and set target permissions + * at the same time, e.g., dynamic code loading. The caller has verified + * data to be trusted and expected to be loaded to the target address range. + * Calling this API on pages already committed will fail. + * + * @param[in] addr Page aligned target starting addr. + * @param[in] length Length of data, in bytes of multiples of page size. + * @param[in] data Data of @length. + * @param[in] prot Target permissions. + * @retval 0 The operation was successful. + * @retval EINVAL Any page in requested address range is not previously + * allocated, or outside the enclave address range. + * @retval EACCES Any page in requested range is previously committed. + * @retval EFAULT All other errors. + */ + int sgx_mm_commit_data(void* addr, size_t length, uint8_t* data, int prot); + +/* Return value used by the EMM #PF handler to indicate + * to the dispatcher that it should continue searching for the next handler. + */ +#define SGX_MM_EXCEPTION_CONTINUE_SEARCH 0 + +/* Return value used by the EMM #PF handler to indicate + * to the dispatcher that it should stop searching and continue execution. + */ +#define SGX_MM_EXCEPTION_CONTINUE_EXECUTION -1 + +#ifdef __cplusplus +} +#endif +#endif diff --git a/enclave/core/sgx/include/sgx_mm_primitives.h b/enclave/core/sgx/include/sgx_mm_primitives.h new file mode 100644 index 000000000..90d55f7bb --- /dev/null +++ b/enclave/core/sgx/include/sgx_mm_primitives.h @@ -0,0 +1,62 @@ +/* + * Copyright (C) 2022 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef SGX_MM_PRIMITIVES_H_ +#define SGX_MM_PRIMITIVES_H_ + +#include +#include +#ifdef __cplusplus +extern "C" +{ +#endif + + // SGX primitives + typedef struct _sec_info_t + { + uint64_t flags; + uint64_t reserved[7]; + } sec_info_t; + + // EACCEPT + int do_eaccept(const sec_info_t* si, size_t addr); + + // EMODPE + int do_emodpe(const sec_info_t* si, size_t addr); + + // EACCEPTCOPY + int do_eacceptcopy(const sec_info_t* si, size_t dest, size_t src); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/enclave/core/sgx/include/sgx_mm_rt_abstraction.h b/enclave/core/sgx/include/sgx_mm_rt_abstraction.h new file mode 100644 index 000000000..0151aacd5 --- /dev/null +++ b/enclave/core/sgx/include/sgx_mm_rt_abstraction.h @@ -0,0 +1,173 @@ +/* + * Copyright (C) 2022 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef SGX_MM_RT_ABSTRACTION_H_ +#define SGX_MM_RT_ABSTRACTION_H_ + +#include "sgx_mm.h" +#ifdef __cplusplus +extern "C" +{ +#endif + +#include + + /* + * The EMM page fault (#PF) handler. + * + * @param[in] pfinfo Info reported in the SSA MISC region for page fault. + * @retval SGX_EXCEPTION_CONTINUE_EXECUTION Success handling the exception. + * @retval SGX_EXCEPTION_CONTINUE_SEARCH The EMM does not handle the + * exception. + */ + typedef int (*sgx_mm_pfhandler_t)(const sgx_pfinfo* pfinfo); + + /* + * Register the EMM handler with the global exception handler registry + * The Runtime should ensure this handler is called first in case of + * a #PF before all other handlers. + * + * @param[in] pfhandler The EMM page fault handler. + * @retval true Success. + * @retval false Failure. + */ + bool sgx_mm_register_pfhandler(sgx_mm_pfhandler_t pfhandler); + + /* + * Unregister the EMM handler with the global exception handler registry. + * @param[in] pfhandler The EMM page fault handler. + * @retval true Success. + * @retval false Failure. + */ + bool sgx_mm_unregister_pfhandler(sgx_mm_pfhandler_t pfhandler); + + /* + * Call OS to reserve region for EAUG, immediately or on-demand. + * + * @param[in] addr Desired page aligned start address. + * @param[in] length Size of the region in bytes of multiples of page size. + * @param[in] page_type One of following page types: + * - SGX_EMA_PAGE_TYPE_REG: regular page type. This is the + * default if not specified. + * - SGX_EMA_PAGE_TYPE_SS_FIRST: the first page in shadow stack. + * - SGX_EMA_PAGE_TYPE_SS_REST: the rest page in shadow stack. + * @param[in] alloc_flags A bitwise OR of flags describing committing mode, + * committing order, address preference, page type. The untrusted side. + * implementation should translate following additional bits to proper + * parameters invoking syscall(mmap on Linux) provided by the kernel. + * The flags param of this interface should include exactly one of + * following for committing mode: + * - SGX_EMA_COMMIT_NOW: reserves memory range with + * SGX_EMA_PROT_READ|SGX_EMA_PROT_WRITE, if supported, kernel is given a + * hint to EAUG EPC pages for the area as soon as possible. + * - SGX_EMA_COMMIT_ON_DEMAND: reserves memory range, EPC pages + * can be EAUGed upon #PF. ORed with zero or one of the committing order + * flags: + * - SGX_EMA_GROWSDOWN: if supported, a hint given for the kernel + * to EAUG pages from higher to lower addresses, no gaps in addresses above + * the last committed. + * - SGX_EMA_GROWSUP: if supported, a hint given for the kernel + * to EAUG pages from lower to higher addresses, no gaps in addresses below + * the last committed. + * @retval 0 The operation was successful. + * @retval EFAULT for all failures. + */ + int sgx_mm_alloc_ocall( + uint64_t addr, + size_t length, + int page_type, + int alloc_flags); + + /* + * Call OS to change permissions, type, or notify EACCEPT done after TRIM. + * + * @param[in] addr Start address of the memory to change protections. + * @param[in] length Length of the area. This must be a multiple of the + * page size. + * @param[in] page_properties_from The original EPCM flags of the EPC pages + * to be modified. Must be bitwise OR of following: SGX_EMA_PROT_READ + * SGX_EMA_PROT_WRITE + * SGX_EMA_PROT_EXEC + * SGX_EMA_PAGE_TYPE_REG: regular page, changeable to TRIM and + * TCS SGX_EMA_PAGE_TYPE_TRIM: signal to the kernel EACCEPT is done for TRIM + * pages. + * @param[in] page_properties_to The target EPCM flags. This must be bitwise + * OR of following: SGX_EMA_PROT_READ SGX_EMA_PROT_WRITE SGX_EMA_PROT_EXEC + * SGX_EMA_PAGE_TYPE_TRIM: change the page type to PT_TRIM. Note + * the address range for trimmed pages may still be reserved by enclave with + * proper permissions. + * SGX_EMA_PAGE_TYPE_TCS: change the page type to PT_TCS + * @retval 0 The operation was successful. + * @retval EFAULT for all failures. + */ + + int sgx_mm_modify_ocall( + uint64_t addr, + size_t length, + int page_properties_from, + int page_properties_to); + + /* + * Define a mutex and init/lock/unlock/destroy functions. + */ + typedef struct _sgx_mm_mutex sgx_mm_mutex; + sgx_mm_mutex* sgx_mm_mutex_create(void); + int sgx_mm_mutex_lock(sgx_mm_mutex* mutex); + int sgx_mm_mutex_unlock(sgx_mm_mutex* mutex); + int sgx_mm_mutex_destroy(sgx_mm_mutex* mutex); + + /* + * Check whether the given buffer is strictly within the enclave. + * + * Check whether the buffer given by the **ptr** and **size** parameters is + * strictly within the enclave's memory. If so, return true. If any + * portion of the buffer lies outside the enclave's memory, return false. + * + * @param[in] ptr The pointer to the buffer. + * @param[in] size The size of the buffer. + * + * @retval true The buffer is strictly within the enclave. + * @retval false At least some part of the buffer is outside the enclave, or + * the arguments are invalid. For example, if **ptr** is null or **size** + * causes arithmetic operations to wrap. + * + */ + bool sgx_mm_is_within_enclave(const void* ptr, size_t size); + +#define SGX_EMA_SYSTEM \ + SGX_EMA_ALLOC_FLAGS(0x80UL) /* EMA reserved by system \ + */ + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/host/CMakeLists.txt b/host/CMakeLists.txt index 6a5731132..767f99022 100644 --- a/host/CMakeLists.txt +++ b/host/CMakeLists.txt @@ -490,10 +490,10 @@ macro (add_library_oehost_wrapper) ${PARA_LIB_NAME} sgx/linux/enter.c PROPERTIES COMPILE_FLAGS "-O2 -fno-omit-frame-pointer") else () + message("should not be here") set_source_files_properties( ${PARA_LIB_NAME} sgx/linux/enter.c - PROPERTIES COMPILE_FLAGS - "-O2 -fno-omit-frame-pointer -Wno-frame-address") + PROPERTIES COMPILE_FLAGS "-O2 -fno-omit-frame-pointer") endif () endif () diff --git a/host/sgx/create.c b/host/sgx/create.c index ba8c398be..1c9a4a330 100644 --- a/host/sgx/create.c +++ b/host/sgx/create.c @@ -65,6 +65,8 @@ static char* get_fullpath(const char* path) #include "vdso.h" #include "xstate.h" +#define OE_MMAN_PAGE_NUMBER 0x4000 + static volatile oe_load_extra_enclave_data_hook_t _oe_load_extra_enclave_data_hook; @@ -395,6 +397,7 @@ static oe_result_t _calculate_enclave_size( size_t stack_size; size_t tls_size; size_t control_size; + size_t mman_size; const oe_enclave_size_settings_t* size_settings; size_settings = &props->header.size_settings; @@ -440,10 +443,13 @@ static oe_result_t _calculate_enclave_size( control_size = (OE_SGX_TCS_CONTROL_PAGES + OE_SGX_TCS_THREAD_DATA_PAGES) * OE_PAGE_SIZE; + mman_size = OE_MMAN_PAGE_NUMBER * OE_PAGE_SIZE; + /* Compute end of the enclave */ *loaded_enclave_pages_size = image_size + heap_size + - (size_settings->num_tcs * (stack_size + tls_size + control_size)); + (size_settings->num_tcs * (stack_size + tls_size + control_size)) + + mman_size; if (extra_data_size) *loaded_enclave_pages_size += *extra_data_size; @@ -480,24 +486,32 @@ static oe_result_t _add_data_pages( size_t i; /* Add the heap pages */ + // printf("[add heap pages]\n"); + // uint64_t heap = *vaddr; OE_CHECK(_add_heap_pages( context, enclave, vaddr, size_settings->num_heap_pages)); for (i = 0; i < size_settings->num_tcs; i++) { + uint64_t start; /* Add guard page */ *vaddr += OE_PAGE_SIZE; - + start = *vaddr; /* Add the stack for this thread control structure */ OE_CHECK(_add_stack_pages( context, enclave, vaddr, size_settings->num_stack_pages)); /* Add guard page */ *vaddr += OE_PAGE_SIZE; + start = *vaddr; /* Add the "control" pages */ OE_CHECK( _add_control_pages(context, entry, tls_page_count, vaddr, enclave)); + + // printf("[tcs_page_control] %p - %p\n", (void *) + // (enclave->start_address + start), (void *) (enclave->start_address + + // *vaddr)); } result = OE_OK; diff --git a/host/sgx/exception.c b/host/sgx/exception.c index ff11d460e..3c0e8ddfc 100644 --- a/host/sgx/exception.c +++ b/host/sgx/exception.c @@ -57,6 +57,7 @@ uint64_t oe_host_handle_exception(oe_host_exception_context_t* context) oe_thread_binding_t* thread_data = oe_get_thread_binding(); if (thread_data->flags & _OE_THREAD_HANDLING_EXCEPTION) { + printf("Not handled 1\n"); // Return directly. return OE_SGX_EXCEPTION_ENCLAVE_NOT_HANDLED; } @@ -109,6 +110,13 @@ uint64_t oe_host_handle_exception(oe_host_exception_context_t* context) // We continue the exception handler search as if it were a // non-enclave exception. + printf( + "Not handled 2: exit_address: %p, exit_code: %lu arg_out: %lu, " + "result: %d\n", + (void*)exit_address, + exit_code, + arg_out, + result); return OE_SGX_EXCEPTION_ENCLAVE_NOT_HANDLED; } } diff --git a/host/sgx/linux/vdso.c b/host/sgx/linux/vdso.c index 5cb196916..4c8ee8839 100644 --- a/host/sgx/linux/vdso.c +++ b/host/sgx/linux/vdso.c @@ -162,11 +162,19 @@ static int oe_vdso_user_handler( { /* Should always be this case */ if (action == OE_SGX_EXCEPTION_ENCLAVE_NOT_HANDLED) - OE_TRACE_ERROR( - "Unhandled in-enclave exception. To get more " - "information, configure the enclave with " - "CapturePFGPExceptions=1 and enable the in-enclave " - "logging."); + printf( + "run->exception_vector: %hu, run->exception_addr: %p, " + "run->user_handler: %p, run->exception_error_code: " + "%x\n", + run->exception_vector, + (void*)run->exception_addr, + (void*)run->user_handler, + run->exception_error_code); + OE_TRACE_ERROR( + "in vdso Unhandled in-enclave exception. To get more " + "information, configure the enclave with " + "CapturePFGPExceptions=1 and enable the in-enclave " + "logging."); result = -1; } diff --git a/host/sgx/ocalls/ocalls.c b/host/sgx/ocalls/ocalls.c index 190fc5c29..ac86455a5 100644 --- a/host/sgx/ocalls/ocalls.c +++ b/host/sgx/ocalls/ocalls.c @@ -31,6 +31,7 @@ #include "../../tdx/quote.h" #include "../enclave.h" #include "../quote.h" +#include "../sgx_enclave_common_wrapper.h" #include "../sgxquote.h" #include "../sgxquoteprovider.h" #include "ocalls.h" @@ -479,3 +480,17 @@ oe_result_t oe_sgx_get_additional_host_entropy_ocall(uint8_t* data, size_t size) done: return result; } + +int oe_sgx_mm_alloc_ocall(uint64_t addr, size_t length, int flags) +{ + return oe_sgx_enclave_alloc(addr, length, flags); +} + +int oe_sgx_mm_modify_ocall( + uint64_t addr, + size_t length, + int flags_from, + int flags_to) +{ + return oe_sgx_enclave_modify(addr, length, flags_from, flags_to); +} diff --git a/host/sgx/sgx_enclave_common_wrapper.c b/host/sgx/sgx_enclave_common_wrapper.c index 19a586601..ed2e80d2e 100644 --- a/host/sgx/sgx_enclave_common_wrapper.c +++ b/host/sgx/sgx_enclave_common_wrapper.c @@ -54,6 +54,14 @@ static bool (*_enclave_set_information)( size_t input_info_size, uint32_t* enclave_error); +int (*_enclave_alloc)(uint64_t addr, size_t length, int flags); + +int (*_enclave_modify)( + uint64_t addr, + size_t length, + int flags_from, + int flags_to); + /****** Dynamic loading of libsgx_enclave_common.so/.dll **************/ #ifdef _WIN32 @@ -143,6 +151,8 @@ static void _load_sgx_enclave_common_impl(void) OE_CHECK(_lookup_function("enclave_delete", (void**)&_enclave_delete)); OE_CHECK(_lookup_function( "enclave_set_information", (void**)&_enclave_set_information)); + OE_CHECK(_lookup_function("enclave_alloc", (void**)&_enclave_alloc)); + OE_CHECK(_lookup_function("enclave_modify", (void**)&_enclave_modify)); atexit(_unload_sgx_enclave_common); result = OE_OK; @@ -277,3 +287,17 @@ bool oe_sgx_enclave_set_information( return _enclave_set_information( base_address, info_type, input_info, input_info_size, enclave_error); } + +int oe_sgx_enclave_alloc(uint64_t addr, size_t length, int flags) +{ + return _enclave_alloc(addr, length, flags); +} + +int oe_sgx_enclave_modify( + uint64_t addr, + size_t length, + int flags_from, + int flags_to) +{ + return _enclave_modify(addr, length, flags_from, flags_to); +} \ No newline at end of file diff --git a/host/sgx/sgx_enclave_common_wrapper.h b/host/sgx/sgx_enclave_common_wrapper.h index 238cb3703..fe23d472e 100644 --- a/host/sgx/sgx_enclave_common_wrapper.h +++ b/host/sgx/sgx_enclave_common_wrapper.h @@ -76,6 +76,14 @@ bool oe_sgx_enclave_set_information( size_t input_info_size, uint32_t* enclave_error); +int oe_sgx_enclave_alloc(uint64_t addr, size_t length, int flags); + +int oe_sgx_enclave_modify( + uint64_t addr, + size_t length, + int flags_from, + int flags_to); + oe_result_t oe_sgx_load_sgx_enclave_common(void); #endif // _OE_HOST_SGX_ENCLAVE_COMMON_WRAPPER_H diff --git a/include/openenclave/edl/edmm.edl b/include/openenclave/edl/edmm.edl new file mode 100644 index 000000000..964759816 --- /dev/null +++ b/include/openenclave/edl/edmm.edl @@ -0,0 +1,22 @@ +// Copyright (c) Open Enclave SDK contributors. +// Licensed under the MIT License. + +/* +**============================================================================== +** +** sgx/edmm.edl: +** +** Internal OCALLs required by the Enclave Memory Manager (emm) library for +** SGX2 Enclave Dynamic Memory Management (EDMM) support. +** +**============================================================================== +*/ + +enclave +{ + untrusted { + int oe_sgx_mm_alloc_ocall(uint64_t addr, size_t length, int page_type, int flags); + int oe_sgx_mm_modify_ocall( + uint64_t addr, size_t length, int flags_from, int flags_to); + }; +} \ No newline at end of file diff --git a/include/openenclave/edl/sgx/edmm.edl b/include/openenclave/edl/sgx/edmm.edl new file mode 100644 index 000000000..c49022d05 --- /dev/null +++ b/include/openenclave/edl/sgx/edmm.edl @@ -0,0 +1,22 @@ +// Copyright (c) Open Enclave SDK contributors. +// Licensed under the MIT License. + +/* +**============================================================================== +** +** sgx/edmm.edl: +** +** Internal OCALLs required by the Enclave Memory Manager (emm) library for +** SGX2 Enclave Dynamic Memory Management (EDMM) support. +** +**============================================================================== +*/ + +enclave +{ + untrusted { + int oe_sgx_mm_alloc_ocall(uint64_t addr, size_t length, int flags); + int oe_sgx_mm_modify_ocall( + uint64_t addr, size_t length, int flags_from, int flags_to); + }; +} \ No newline at end of file diff --git a/include/openenclave/edl/sgx/platform.edl b/include/openenclave/edl/sgx/platform.edl index f3ab06b63..36247a537 100644 --- a/include/openenclave/edl/sgx/platform.edl +++ b/include/openenclave/edl/sgx/platform.edl @@ -23,4 +23,5 @@ enclave #ifdef OE_SGX_ENTROPY from "openenclave/edl/sgx/entropy.edl" import *; #endif + from "openenclave/edl/sgx/edmm.edl" import *; }; diff --git a/libc/mman.c b/libc/mman.c index 6d97e025d..1b9a7695e 100644 --- a/libc/mman.c +++ b/libc/mman.c @@ -11,6 +11,7 @@ #include "mman.h" #include "openenclave/bits/defs.h" #include "openenclave/bits/result.h" +#include "stdio.h" #include "syscall.h" static oe_mapping_t* _mappings; @@ -180,19 +181,30 @@ void* oe_mmap( size_t vector_length = 0; int ret = 0; + printf("[oe_mmap] in oe_enclave\n"); OE_CHECK(_validate_mmap_parameters(addr, length, prot, flags, fd, offset)); + printf("[oe_mmap] after _validate_mmap_parameters\n"); _register_atexit_callback(); // length is rounded up to nearest page size. + printf("[oe_mmap] before check \n"); OE_CHECK(oe_safe_round_up_u64(length, OE_PAGE_SIZE, &length)); OE_CHECK(oe_safe_round_up_u64(length / 8, 8, &vector_length)); + printf("[oe_mmap] after check \n"); // Allocate objects. vector = (uint8_t*)calloc(vector_length, 1); m = (oe_mapping_t*)malloc(sizeof(*m)); + printf( + "[oe_mmap] after malloc m: %p, %x vector: %p, %x \n", + m, + sizeof(*m), + vector, + vector_length); if (!vector || !m) { + printf("[oe_mmap] out of memory \n"); oe_errno = OE_ENOMEM; OE_RAISE(OE_OUT_OF_MEMORY); } diff --git a/libc/stdlib.c b/libc/stdlib.c index 1c30d97db..5ae974c0c 100644 --- a/libc/stdlib.c +++ b/libc/stdlib.c @@ -21,3 +21,366 @@ unsigned long long strtoull_l( OE_UNUSED(loc); return strtoull(nptr, endptr, base); } + +int fchmod(int fd, int perms) +{ + // HANDLE h = reinterpret_cast(_get_osfhandle(fd)); + return 0; +} + +int execv(const char* path, char* const argv[]) +{ + return 0; +} + +int fork() +{ + return 0; +} + +int wait() +{ + return 0; +} + +int sigprocmask() +{ + return 0; +} + +int sigfillset() +{ + return 0; +} + +int fstatfs() +{ + return 0; +} + +int chmod() +{ + return 0; +} + +int isatty() +{ + return 0; +} + +int symlink() +{ + return 0; +} + +int futimens() +{ + return 0; +} + +int posix_spawn_file_actions_init() +{ + return 0; +} + +int posix_spawn_file_actions_destroy() +{ + return 0; +} + +int sigemptyset() +{ + return 0; +} + +int pthread_attr_init() +{ + return 0; +} + +int pthread_getattr_np() +{ + return 0; +} + +int pthread_attr_setstacksize() +{ + return 0; +} + +int pthread_condattr_setclock() +{ + return 0; +} + +int pthread_condattr_init() +{ + return 0; +} + +int pthread_setname_np() +{ + return 0; +} +int pthread_attr_getstack() +{ + return 0; +} + +int pthread_attr_destroy() +{ + return 0; +} + +int pthread_attr_getguardsize() +{ + return 0; +} + +int pthread_attr_setdetachstate() +{ + return 0; +} + +int sigaddset() +{ + return 0; +} + +int getpwuid_r() +{ + return 0; +} + +int fchown() +{ + return 0; +} + +int getrusage() +{ + return 0; +} + +int sigaltstack() +{ + return 0; +} + +int umask() +{ + return 0; +} + +char* realpath(const char* __restrict x, char* __restrict y) +{ + return "path"; +} + +int __sched_cpucount() +{ + return 0; +} + +int utimes() +{ + return 0; +} +int statvfs64() +{ + return 0; +} +int pathconf() +{ + return 0; +} + +int sched_getaffinity() +{ + return 0; +} + +int alarm() +{ + return 0; +} + +int wait4() +{ + return 0; +} + +int posix_spawn_file_actions_adddup2() +{ + return 0; +} + +int readlink() +{ + return 0; +} + +int execve() +{ + return 0; +} + +int setrlimit() +{ + return 0; +} + +int waitpid() +{ + return 0; +} + +int posix_madvise() +{ + return 0; +} + +int getpwnam_r() +{ + return 0; +} + +int getgrnam_r() +{ + return 0; +} + +int posix_spawn() +{ + return 0; +} + +int statfs() +{ + return 0; +} + +int dlopen() +{ + return 0; +} + +int posix_spawn_file_actions_addopen() +{ + return 0; +} + +int pthread_condattr_destroy() +{ + return 0; +} + +int dlclose() +{ + return 0; +} + +int mprotect() +{ + return 0; +} + +int telldir() +{ + return 0; +} + +int seekdir() +{ + return 0; +} + +int clock_getres() +{ + return 0; +} + +int dlsym() +{ + return 0; +} + +int getClockResAsI128() +{ + return 0; +} + +int raise() +{ + return 0; +} + +int lstat() +{ + return 0; +} + +int fdopendir() +{ + return 0; +} + +int pthread_sigmask() +{ + return 0; +} + +int siglongjmp() +{ + return 0; +} + +int dlerror() +{ + return 0; +} + +int sigsetjmp() +{ + return 0; +} + +int pthread_mutex_consistent() +{ + return 0; +} + +int pthread_mutexattr_setpshared() +{ + return 0; +} + +int pthread_mutexattr_setrobust() +{ + return 0; +} + +int sigwait() +{ + return 0; +} + +int madvise() +{ + return 0; +} + +int msync() +{ + return 0; +} + +int unlinkat() +{ + return 0; +} + +int utimensat() +{ + return 0; +} \ No newline at end of file diff --git a/syscall/syscall.c b/syscall/syscall.c index 59d896bfa..46174c947 100644 --- a/syscall/syscall.c +++ b/syscall/syscall.c @@ -1160,6 +1160,11 @@ OE_WEAK OE_DEFINE_SYSCALL2(SYS_umount2) return oe_umount(target); } +OE_WEAK OE_DEFINE_SYSCALL2(SYS_fchmod) +{ + return arg1 + arg2; +} + static long _syscall( long number, long arg1,