diff --git a/SConstruct b/SConstruct index d7c9052..80acf71 100644 --- a/SConstruct +++ b/SConstruct @@ -12,6 +12,9 @@ env_vars = { 'PATH': os.environ['PATH'], } +if 'QCOM_LLVM' in os.environ: + env_vars['QCOM_LLVM'] = os.environ['QCOM_LLVM'] + if 'LLVM' in os.environ: env_vars['LLVM'] = os.environ['LLVM'] diff --git a/config/arch/gic-500.conf b/config/arch/gic-500.conf index 338c577..84b087b 100644 --- a/config/arch/gic-500.conf +++ b/config/arch/gic-500.conf @@ -13,3 +13,4 @@ configs GICV3_HAS_GICD_ICLAR=0 configs GICV3_HAS_SECURITY_DISABLED=0 configs PLATFORM_GICD_BASE=PLATFORM_GIC_BASE configs PLATFORM_GICR_SIZE=(0x20000*PLATFORM_MAX_CORES) +configs PLATFORM_IDLE_WAKEUP_TIMEOUT_NS=5000 diff --git a/config/arch/gic-600.conf b/config/arch/gic-600.conf index 7a9a45d..e48dbba 100644 --- a/config/arch/gic-600.conf +++ b/config/arch/gic-600.conf @@ -18,3 +18,4 @@ configs PLATFORM_GITS_SIZE=(0x20000U*PLATFORM_GITS_COUNT) configs PLATFORM_GICR_BASE=(PLATFORM_GITS_BASE+PLATFORM_GITS_SIZE) configs PLATFORM_GICR_SIZE=(0x20000U*PLATFORM_GICR_COUNT) configs PLATFORM_GIC_SIZE=(0x50000U+PLATFORM_GITS_SIZE+PLATFORM_GICR_SIZE) +configs PLATFORM_IDLE_WAKEUP_TIMEOUT_NS=5000 diff --git a/config/arch/gic-700-vlpi.conf b/config/arch/gic-700-vlpi.conf index 97609d0..d214fde 100644 --- a/config/arch/gic-700-vlpi.conf +++ b/config/arch/gic-700-vlpi.conf @@ -16,3 +16,4 @@ configs PLATFORM_GITS_SIZE=(0x40000U*PLATFORM_GITS_COUNT) configs PLATFORM_GICR_BASE=(PLATFORM_GITS_BASE+PLATFORM_GITS_SIZE) configs PLATFORM_GICR_SIZE=(0x40000U*PLATFORM_GICR_COUNT) configs PLATFORM_GIC_SIZE=(0x50000U+PLATFORM_GITS_SIZE+PLATFORM_GICR_SIZE) +configs PLATFORM_IDLE_WAKEUP_TIMEOUT_NS=5000 diff --git a/config/arch/gic-qemu.conf b/config/arch/gic-qemu.conf index d0105bf..a3cad1e 100644 --- a/config/arch/gic-qemu.conf +++ b/config/arch/gic-qemu.conf @@ -17,3 +17,4 @@ configs PLATFORM_GITS_SIZE=(0x20000U*PLATFORM_GITS_COUNT) configs PLATFORM_GICR_BASE=(PLATFORM_GITS_BASE+PLATFORM_GITS_SIZE) configs PLATFORM_GICR_SIZE=(0x20000U*PLATFORM_MAX_CORES) configs PLATFORM_GIC_SIZE=(0x50000U+PLATFORM_GITS_SIZE+PLATFORM_GICR_SIZE) +configs PLATFORM_IDLE_WAKEUP_TIMEOUT_NS=0 diff --git a/config/featureset/gunyah-rm-qemu.conf b/config/featureset/gunyah-rm-qemu.conf index e6060a6..08cf131 100644 --- a/config/featureset/gunyah-rm-qemu.conf +++ b/config/featureset/gunyah-rm-qemu.conf @@ -58,6 +58,9 @@ module vm/slat module vm/vcpu module vm/vcpu_power module vm/vcpu_run +module vm/virtio_mmio +module vm/virtio_input +module vm/vrtc_pl031 arch_module armv8 vm/smccc arch_module armv8 vm/psci_pc arch_module armv8 vm/vdebug diff --git a/config/featureset/unittests-qemu.conf b/config/featureset/unittests-qemu.conf index a167f8d..7675d9f 100644 --- a/config/featureset/unittests-qemu.conf +++ b/config/featureset/unittests-qemu.conf @@ -30,6 +30,7 @@ module core/irq module core/virq_null module core/timer module core/power +module core/globals module debug/object_lists module debug/symbol_version module mem/allocator_list diff --git a/config/platform/qemu.conf b/config/platform/qemu.conf index bc612f2..852efb2 100644 --- a/config/platform/qemu.conf +++ b/config/platform/qemu.conf @@ -60,3 +60,5 @@ configs PSCI_AFFINITY_LEVELS_NOT_SUPPORTED=1 configs PLATFORM_HAS_NO_DBGCLAIM_EL1=1 # QEMU supports version 0.2, which does not have set_suspend_mode call configs PSCI_SET_SUSPEND_MODE_NOT_SUPPORTED=1 + +configs QCBOR_ENV_CONFIG_SIZE=0x4000 diff --git a/config/quality/debug.conf b/config/quality/debug.conf index 8ffdf8f..37d2c9d 100644 --- a/config/quality/debug.conf +++ b/config/quality/debug.conf @@ -8,7 +8,7 @@ configs RESET_ON_ABORT=0 configs QUALITY=debug flags -O1 -g -mstrict-align -#include include/debug_no_kaslr +include include/debug_no_kaslr include include/debug_no_cspace_rand include include/debug_no_rootvm_aslr diff --git a/docs/api/Makefile b/docs/api/Makefile index 15efcd1..cba6be7 100644 --- a/docs/api/Makefile +++ b/docs/api/Makefile @@ -1,9 +1,15 @@ %.pdf: %.md Makefile pandoc -s --toc --pdf-engine=xelatex -N --top-level-division=part \ --metadata=title:'Gunyah Hypercall API' \ + --metadata=date:"Generated: `date \"+%a %d %B %Y\"`" \ --variable=class:book \ --variable=mainfont:LiberationSans \ --variable=monofont:LiberationMono \ + --variable=papersize:a4 \ + --variable=margin-left:2.5cm \ + --variable=margin-right:2.5cm \ + --variable=margin-top:2.5cm \ + --variable=margin-bottom:2.5cm \ $< -o $@ -all: gunyah_api.pdf +all: gunyah_api.pdf gunyah_api_qcom.pdf diff --git a/docs/api/gunyah_api.md b/docs/api/gunyah_api.md index 321c430..2bdf7b1 100644 --- a/docs/api/gunyah_api.md +++ b/docs/api/gunyah_api.md @@ -2649,27 +2649,45 @@ Also see: [Capability Errors](#capability-errors) ## Virtual IO MMIO Management -### Configure a Virtual IO MMIO +### Configure a Virtual IO Interface Object -Configure a Virtual IO MMIO whose state is OBJECT_STATE_INIT. The Virtual IO MMIO device needs to get a reference to memextent that covers its range. +Configure a Virtual IO Interface Object whose state is OBJECT_STATE_INIT. + +Every Virtual IO device must be attached to a Memory Extent Object that contains its common registers and assumed to be mapped with write permissions into the backend VM's address space. The caller must also bind the backend IRQs to the backend VM's Virtual Interrupt Controller. + +The number of queues presented by the device must be set at configuration time, so the hypervisor can allocate memory for tracking the queue states. + +The Memory Extent must be 4KiB in size. Its layout matches the register layout specified for MMIO devices in section 4.2.2 of the Virtual I/O Device (VIRTIO) 1.1 specification, followed by optional device-specific configuration starting at offset 0x100. The caller must map it with read-only permissions into the frontend VM's address space, and bind the device's frontend IRQs to the frontend VM's Virtual Interrupt Controller. + +If the device type valid flag is set, then the specified device type must be one that is known to the hypervisor, and any appropriate type-specific hypercalls must be made before the device is permitted to exit its reset state. Otherwise, the device type argument is ignored. | **Hypercall**: | `virtio_mmio_configure` | |-------------------------|--------------------------------------| | Call number: | `hvc 0x6049` | | Inputs: | X0: VirtioMMIO CapID | | | X1: Memextent CapID | -| | X3: VQsNum | -| | X3: Reserved — Must be Zero | +| | X2: VQsNum Integer | +| | X3: VirtioOptionFlags | +| | X4: DeviceType Integer | +| | X5: DeviceConfigSize Integer | | Outputs: | X0: Error Result | +**Types:** + +*VirtioOptionFlags:* + +| Bit Numbers | Mask | Description | +|-------------|-----------------------|-----------------------------------------| +| 6 | `0x40` | Device type argument is valid | +| 63:7,5:0 | `0xFFFFFFFF.FFFFFFBF` | Reserved — Must be Zero | **Errors:** -OK – the operation was successful, and the result is valid. +OK – the operation was successful. ERROR_OBJECT_STATE – if the Virtual IO MMIO object is not in OBJECT_STATE_INIT state. -ERROR_ARGUMENT_INVALID – a value passed in an argument was invalid. This could be due to a VQsNum out of range or if the specified memextent is not contiguous. +ERROR_ARGUMENT_INVALID – a value passed in an argument was invalid. This could be due to VQsNum being larger than the maximum, or the specified Memory Extent object being of an unsupported type. Also see: [Capability Errors](#capability-errors) @@ -2945,6 +2963,84 @@ ERROR_ARGUMENT_INVALID – A value passed in an argument was invalid. Also see: [Capability Errors](#capability-errors) +## Virtio Input Config Hypercalls + +### Virtio Input Configure + +Allocate storage for the large data items and set the values of the small data items (`dev_ids` and `propbits`, which each fit in a single machine register). For the two types that support `subsel`, this call will specify the number of distinct valid `subsel` values (which may be sparse). + +The `NumEVTypes` value must be between 0 and 32 inclusive. The `NumAbsAxes` value must be between 0 and 64 inclusive. If these limits are exceeded, the call will return `ERROR_ARGUMENT_SIZE`. + + + +| **Hypercall**: | `virtio_input_configure` | +|-------------------------|--------------------------------------| +| Call number: | `hvc 0x605e` | +| Inputs: | X0: Virtio CapID | +| | X1: DevIDs | +| | X2: PropBits | +| | X3: NumEVTypes | +| | X4: NumAbsAxes | +| | X5: Reserved — Must be Zero | +| Outputs: | X0: Error Result | + +**Types**: + +_DevIDs_: + +| Bits | Mask | Description | +|-|---|-----| +| 15:0 | `0xFFFF` | BusType | +| 31:16 | `0xFFFF0000` | Vendor | +| 47:32 | `0xFFFF.00000000` | Product | +| 63:48 | `0xFFFF0000.00000000` | Version | + +**Errors:** + +OK – The operation was successful, and the result is valid. + +ERROR_ARGUMENT_INVALID – A value passed in an argument was invalid. + +Also see: [Capability Errors](#capability-errors) + +### Virtio Input Set Data + +Copy data into the hypervisor's storage for one of the large data items, given its `sel` and `subsel` values, size, and the virtual address of a buffer in the caller's stage 1 address space. The data must not already have been configured for the given `sel` and `subsel` values. + +| **Hypercall**: | `virtio_input_set_data` | +|-------------------------|--------------------------------------| +| Call number: | `hvc 0x605f` | +| Inputs: | X0: Virtio CapID | +| | X1: Sel | +| | X2: SubSel | +| | X3: Size | +| | X4: Data VMAddr | +| | X5: Reserved — Must be Zero | +| Outputs: | X0: Error Result | + +The specified `VMAddr` must point to a buffer of the specified size that is mapped in the caller's stage 1 and stage 2 address spaces. + +The `Sel`, `SubSel` and `Size` arguments must fall within one of the following ranges: + +| Sel | SubSel | Size | +|------|--------|-------| +| 1 | 0 | 0–128 | +| 2 | 0 | 0–128 | +| 0x11 | 0–31 | 0–128 | +| 0x12 | 0–63 | 20 | + +All other combinations are invalid. The call will return `ERROR_ARGUMENT_INVALID` if `Sel` or `SubSel` is invalid or out of range, and `ERROR_ARGUMENT_SIZE` if `Size` is out of range for the specified combination of `Sel` and `SubSel`. + +Also, the call must not be repeated with `Sel` set to 0x11 or 0x12 and `Size` set to a nonzero value for more distinct values of `SubSel` than were specified with the `NumEVTypes` and `NumAbsAxes` arguments, respectively, of the most recent `virtio_input_configure` call. The call will return `ERROR_NORESOURCES` if these limits are exceeded. + +**Errors:** + +OK – The operation was successful, and the result is valid. + +ERROR_ARGUMENT_INVALID – A value passed in an argument was invalid. + +Also see: [Capability Errors](#capability-errors) + ## PRNG Management ### PRNG Get Entropy diff --git a/hyp/arch/aarch64/registers.reg b/hyp/arch/aarch64/registers.reg index 87fe130..b499e42 100644 --- a/hyp/arch/aarch64/registers.reg +++ b/hyp/arch/aarch64/registers.reg @@ -613,7 +613,7 @@ TRCCLAIMCLR Orw TRCCLAIMSET Orw #endif -#if defined(MODULE_PLATFORM_TBRE) +#if defined(MODULE_PLATFORM_TRBE) TRBLIMITR_EL1 Orw TRBPTR_EL1 Orw TRBBASER_EL1 Orw diff --git a/hyp/arch/armv8/include/asm/cache.h b/hyp/arch/armv8/include/asm/cache.h index 0744532..661ba44 100644 --- a/hyp/arch/armv8/include/asm/cache.h +++ b/hyp/arch/armv8/include/asm/cache.h @@ -56,8 +56,36 @@ CACHE_OP_OBJECT(*x_, op); \ } while (0) +#define CACHE_DEFINE_ARRAY_OP(type, elements, name, op) \ + static inline void cache_##name(type(*x)[elements]) \ + { \ + CACHE_OP_OBJECT(*x, op); \ + } + +#define CACHE_DEFINE_CLEAN_ARRAY(type, elements, name) \ + CACHE_DEFINE_ARRAY_OP(type, elements, clean_##name, CVAC) +#define CACHE_DEFINE_INVALIDATE_ARRAY(type, elements, name) \ + CACHE_DEFINE_ARRAY_OP(type, elements, invalidate_##name, IVAC) +#define CACHE_DEFINE_CLEAN_INVALIDATE_ARRAY(type, elements, name) \ + CACHE_DEFINE_ARRAY_OP(type, elements, clean_invalidate_##name, CIVAC) + +#define CACHE_DEFINE_OP(type, name, op) \ + static inline void cache_##name(type *x) \ + { \ + CACHE_OP_OBJECT(*x, op); \ + } + +#define CACHE_DEFINE_CLEAN(type, name) CACHE_DEFINE_OP(type, clean_##name, CVAC) +#define CACHE_DEFINE_INVALIDATE(type, name) \ + CACHE_DEFINE_OP(type, invalidate_##name, IVAC) +#define CACHE_DEFINE_CLEAN_INVALIDATE(type, name) \ + CACHE_DEFINE_OP(type, clean_invalidate_##name, CIVAC) + #define CACHE_CLEAN_FIXED_RANGE(x, size) CACHE_OP_FIXED_RANGE(x, size, CVAC) #define CACHE_INVALIDATE_FIXED_RANGE(x, size) \ CACHE_OP_FIXED_RANGE(x, size, IVAC) #define CACHE_CLEAN_INVALIDATE_FIXED_RANGE(x, size) \ CACHE_OP_FIXED_RANGE(x, size, CIVAC) + +void +cache_clean_range(const void *data, size_t size); diff --git a/hyp/core/api/aarch64/templates/c_wrapper.c.tmpl b/hyp/core/api/aarch64/templates/c_wrapper.c.tmpl index 1b420f4..1d12510 100644 --- a/hyp/core/api/aarch64/templates/c_wrapper.c.tmpl +++ b/hyp/core/api/aarch64/templates/c_wrapper.c.tmpl @@ -55,7 +55,7 @@ #for hypcall_num in sorted($hypcall_dict.keys()) #set $hypcall = $hypcall_dict[$hypcall_num] #if len($hypcall.outputs) > 1 - static_assert(sizeof(${return_type($hypcall)}) <= 8 * sizeof(register_t), + static_assert(sizeof(${return_type($hypcall)}) <= 8U * sizeof(register_t), "Return structure must fit in 8 machine registers"); #end if diff --git a/hyp/core/base/aarch64/src/cache.c b/hyp/core/base/aarch64/src/cache.c new file mode 100644 index 0000000..cbb6fc9 --- /dev/null +++ b/hyp/core/base/aarch64/src/cache.c @@ -0,0 +1,17 @@ +// © 2023 Qualcomm Innovation Center, Inc. All rights reserved. +// +// SPDX-License-Identifier: BSD-3-Clause + +#include +#include + +#include + +#include +#include + +void +cache_clean_range(const void *data, size_t size) +{ + CACHE_CLEAN_RANGE(data, size); +} diff --git a/hyp/core/base/aarch64/sysregs.tc b/hyp/core/base/aarch64/sysregs.tc index bcc86d0..8955cd7 100644 --- a/hyp/core/base/aarch64/sysregs.tc +++ b/hyp/core/base/aarch64/sysregs.tc @@ -1020,7 +1020,7 @@ extend MDCR_EL2 bitfield { }; #endif -#if defined(ARCH_ARM_FEAT_SPEv1p1) +#if defined(ARCH_ARM_FEAT_SPE) extend MDCR_EL2 bitfield { 13:12 E2PB uint8; 14 TPMS bool; @@ -1039,7 +1039,7 @@ extend MDCR_EL2 bitfield { }; #endif -#if defined(MODULE_PLATFORM_TBRE) +#if defined(MODULE_PLATFORM_TRBE) extend ID_AA64DFR0_EL1 bitfield { 47:44 TraceBuffer uint8; }; diff --git a/hyp/core/base/build.conf b/hyp/core/base/build.conf index 2189a03..ef2def4 100644 --- a/hyp/core/base/build.conf +++ b/hyp/core/base/build.conf @@ -5,7 +5,7 @@ interface base types types.tc source base.c -arch_source aarch64 core_id.c +arch_source aarch64 core_id.c cache.c arch_types armv8 enums.tc arch_types aarch64 types.tc enums.tc sysregs.tc arch_types cortex-a-v8_2 sysregs_cpu.tc diff --git a/hyp/core/boot/src/boot.c b/hyp/core/boot/src/boot.c index 737bab9..e025613 100644 --- a/hyp/core/boot/src/boot.c +++ b/hyp/core/boot/src/boot.c @@ -145,7 +145,7 @@ boot_do_memdb_walk(paddr_t base, size_t size, void *arg) { qcbor_enc_ctxt_t *qcbor_enc_ctxt = (qcbor_enc_ctxt_t *)arg; - if ((size == 0U) && (util_add_overflows(base, size - 1))) { + if ((size == 0U) && (util_add_overflows(base, size - 1U))) { return ERROR_ARGUMENT_SIZE; } diff --git a/hyp/core/boot/src/rel_init.c b/hyp/core/boot/src/rel_init.c index ce7fe9d..e33b9c0 100644 --- a/hyp/core/boot/src/rel_init.c +++ b/hyp/core/boot/src/rel_init.c @@ -2,6 +2,7 @@ // // SPDX-License-Identifier: BSD-3-Clause +#include #include #include #define USE_ELF64 @@ -17,22 +18,27 @@ __attribute__((no_stack_protector)) void boot_rel_fixup(Elf_Dyn *dyni, Elf_Addr addr_offset, Elf_Addr rel_offset) { Elf_Xword dyn[DT_CNT]; - Elf_Rel *rel = NULL; - Elf_Rela *rela = NULL; - Elf_Xword sz = 0; + Elf_Rel *rel = NULL; + Elf_Rel *rel_end = NULL; + Elf_Rela *rela = NULL; + Elf_Rela *rela_end = NULL; - for (int i = 0; i < DT_CNT; ++i) { + // We avoid zeroing the dyn array with an initialiser list as the + // compiler may optimise it to a memset, which may perform cache zeroing + // operations that are not supported when the MMU is disabled. + for (index_t i = 0; i < DT_CNT; i++) { dyn[i] = 0; } + for (; dyni->d_tag != DT_NULL; dyni += 1) { if (dyni->d_tag < DT_CNT) { dyn[dyni->d_tag] = (Elf_Xword)dyni->d_un.d_ptr; } } - rel = (Elf_Rel *)(dyn[DT_REL] + addr_offset); - sz = dyn[DT_RELSZ]; - for (; sz > 0u; sz -= sizeof(*rel), ++rel) { + rel = (Elf_Rel *)(dyn[DT_REL] + addr_offset); + rel_end = (Elf_Rel *)(dyn[DT_REL] + addr_offset + dyn[DT_RELSZ]); + for (; rel < rel_end; rel++) { if (!ARCH_CAN_PATCH(rel->r_info)) { continue; } @@ -40,9 +46,9 @@ boot_rel_fixup(Elf_Dyn *dyni, Elf_Addr addr_offset, Elf_Addr rel_offset) *r += rel_offset; } - rela = (Elf_Rela *)(dyn[DT_RELA] + addr_offset); - sz = dyn[DT_RELASZ]; - for (; sz > 0u; sz -= sizeof(*rela), ++rela) { + rela = (Elf_Rela *)(dyn[DT_RELA] + addr_offset); + rela_end = (Elf_Rela *)(dyn[DT_RELA] + addr_offset + dyn[DT_RELASZ]); + for (; rela < rela_end; rela++) { if (!ARCH_CAN_PATCH(rela->r_info)) { continue; } diff --git a/hyp/core/idle/aarch64/src/idle.c b/hyp/core/idle/aarch64/src/idle.c index 1aea8ed..f464be0 100644 --- a/hyp/core/idle/aarch64/src/idle.c +++ b/hyp/core/idle/aarch64/src/idle.c @@ -26,3 +26,27 @@ idle_arch_wait(void) return must_reschedule; } + +bool +idle_arch_wait_timeout(ticks_t timeout) +{ + bool must_reschedule = false; + +#if defined(ARCH_ARM_FEAT_WFxT) && ARCH_ARM_FEAT_WFxT + // Note: WFIT timeouts are based on CNTVCT_EL0, so this assumes that we + // always set CNTVOFF_EL2 to 0! + __asm__ volatile("dsb ish; wfit %1; isb" + : "+m"(asm_ordering) + : "r"(timeout)); +#else + (void)timeout; + asm_context_sync_ordered(&asm_ordering); +#endif + + ISR_EL1_t isr = register_ISR_EL1_read_volatile_ordered(&asm_ordering); + if (ISR_EL1_get_I(&isr)) { + must_reschedule = irq_interrupt_dispatch(); + } + + return must_reschedule; +} diff --git a/hyp/core/idle/include/idle_arch.h b/hyp/core/idle/include/idle_arch.h index 4255a96..0de4f00 100644 --- a/hyp/core/idle/include/idle_arch.h +++ b/hyp/core/idle/include/idle_arch.h @@ -21,3 +21,18 @@ // implementation that enables interrupts must always return true. bool idle_arch_wait(void) REQUIRE_PREEMPT_DISABLED; + +// Execute a wait-for-interrupt with a timeout. +// +// This is the same as idle_arch_wait(), except that a timeout can be specified +// (as an absolute ticks value) as the time at which the CPU will stop waiting. +// If possible, the implementation should execute a wait for interrupt +// instruction, and arrange to be woken at expiry of the timeout if no other +// event has occurred. +// +// The wokeup mechanism should not rely on interrupt delivery, and should not +// execute any non-trivial code; it is assumed that an architectural wakeup +// mechanism will be used (e.g. AArch64 FEAT_WFxT). If no such mechanism is +// available, the implementation should check for interrupts without waiting. +bool +idle_arch_wait_timeout(ticks_t timeout) REQUIRE_PREEMPT_DISABLED; diff --git a/hyp/core/idle/src/idle.c b/hyp/core/idle/src/idle.c index d5c4a6b..79538ab 100644 --- a/hyp/core/idle/src/idle.c +++ b/hyp/core/idle/src/idle.c @@ -11,6 +11,8 @@ #include #include #include +#include +#include #include #include #include @@ -18,6 +20,7 @@ #include #include #include +#include #include #include @@ -261,3 +264,46 @@ idle_yield(void) return must_schedule; } + +idle_state_t +idle_wakeup(void) +{ + idle_state_t ret; + + // Check for an immediately pending wakeup interrupt that triggers a + // local reschedule. Note that misrouted or spurious IRQs that don't + // cause local reschedules won't be counted here; we'll keep waiting. + if (irq_interrupt_dispatch()) { + ret = IDLE_STATE_RESCHEDULE; + goto out; + } + + // Check for a pending reschedule not directly caused by an interrupt. + if (ipi_handle_relaxed()) { + ret = IDLE_STATE_RESCHEDULE; + goto out; + } + +#if (!defined(PLATFORM_IDLE_WAKEUP_NOWAIT) || !PLATFORM_IDLE_WAKEUP_NOWAIT) && \ + (PLATFORM_IDLE_WAKEUP_TIMEOUT_NS > 0) + // Wait a while for a reschedule event to be triggered. As above, + // misrouted or spurious IRQs don't count. + ticks_t start_ticks = timer_get_current_timer_ticks(); + ticks_t wait_ticks = timer_convert_ns_to_ticks( + (nanoseconds_t)PLATFORM_IDLE_WAKEUP_TIMEOUT_NS); + ticks_t end_ticks = start_ticks + wait_ticks; + do { + if (idle_arch_wait_timeout(end_ticks)) { + ret = IDLE_STATE_RESCHEDULE; + goto out; + } + } while (timer_get_current_timer_ticks() <= end_ticks); +#endif // !PLATFORM_IDLE_WAKEUP_NOWAIT + + // Still no reschedule. Give up and restart the idle handlers. + TRACE(INFO, INFO, "spurious wakeup, entering idle"); + ret = IDLE_STATE_WAKEUP; + +out: + return ret; +} diff --git a/hyp/core/irq/src/irq.c b/hyp/core/irq/src/irq.c index 9932eed..4fa4ec8 100644 --- a/hyp/core/irq/src/irq.c +++ b/hyp/core/irq/src/irq.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include #include diff --git a/hyp/core/partition_standard/src/init.c b/hyp/core/partition_standard/src/init.c index bd366fe..34047be 100644 --- a/hyp/core/partition_standard/src/init.c +++ b/hyp/core/partition_standard/src/init.c @@ -45,9 +45,9 @@ static const paddr_t phys_last = (paddr_t)&image_phys_last; #if defined(ARCH_ARM) && ARCH_IS_64BIT // Ensure hypervisor is 2MiB page size aligned to use AArch64 2M block mappings -static_assert((PLATFORM_RW_DATA_SIZE & 0x1fffffU) == 0U, +static_assert(((size_t)PLATFORM_RW_DATA_SIZE & 0x1fffffU) == 0U, "PLATFORM_RW_DATA_SIZE must be 2MB aligned"); -static_assert((PLATFORM_HEAP_PRIVATE_SIZE & 0xfffU) == 0U, +static_assert(((size_t)PLATFORM_HEAP_PRIVATE_SIZE & 0xfffU) == 0U, "PLATFORM_HEAP_PRIVATE_SIZE must be 4KB aligned"); #endif diff --git a/hyp/core/scheduler_fprr/src/scheduler_fprr.c b/hyp/core/scheduler_fprr/src/scheduler_fprr.c index caa26a0..ddc0ae2 100644 --- a/hyp/core/scheduler_fprr/src/scheduler_fprr.c +++ b/hyp/core/scheduler_fprr/src/scheduler_fprr.c @@ -48,7 +48,7 @@ static_assert((SCHEDULER_DEFAULT_PRIORITY >= SCHEDULER_MIN_PRIORITY) && static_assert((SCHEDULER_DEFAULT_TIMESLICE <= SCHEDULER_MAX_TIMESLICE) && (SCHEDULER_DEFAULT_TIMESLICE >= SCHEDULER_MIN_TIMESLICE), "Default timeslice is invalid."); -static_assert(SCHEDULER_BLOCK__MAX < BITMAP_WORD_BITS, +static_assert((index_t)SCHEDULER_BLOCK__MAX < BITMAP_WORD_BITS, "Scheduler block flags must fit in a register"); static ticks_t @@ -1059,6 +1059,12 @@ scheduler_is_runnable(const thread_t *thread) return can_be_scheduled(thread); } +bool +scheduler_is_running(const thread_t *thread) +{ + return sched_state_get_running(&thread->scheduler_state); +} + thread_t * scheduler_get_primary_vcpu(cpu_index_t cpu) { diff --git a/hyp/core/scheduler_trivial/src/scheduler_trivial.c b/hyp/core/scheduler_trivial/src/scheduler_trivial.c index 557275f..adba81b 100644 --- a/hyp/core/scheduler_trivial/src/scheduler_trivial.c +++ b/hyp/core/scheduler_trivial/src/scheduler_trivial.c @@ -273,6 +273,32 @@ scheduler_is_runnable(const thread_t *thread) SCHEDULER_NUM_BLOCK_BITS); } +bool +scheduler_is_running(const thread_t *thread) +{ + bool ret; + cpu_index_t cpu = thread->scheduler_affinity; + + if (!cpulocal_index_valid(cpu)) { + ret = false; + goto out; + } + + thread_t *active_thread = + atomic_load_consume(&CPULOCAL_BY_INDEX(active_thread, cpu)); + bool active_runnable = scheduler_is_runnable(active_thread); + + // Its either the active_thread or idle thread. + if (thread == active_thread) { + ret = active_runnable; + } else { + ret = !active_runnable; + assert(thread == idle_thread_for(cpu)); + } +out: + return ret; +} + thread_t * scheduler_get_primary_vcpu(cpu_index_t cpu) { diff --git a/hyp/interfaces/allocator/include/allocator.h b/hyp/interfaces/allocator/include/allocator.h index 85e9b0c..08e2b8b 100644 --- a/hyp/interfaces/allocator/include/allocator.h +++ b/hyp/interfaces/allocator/include/allocator.h @@ -14,7 +14,7 @@ allocator_init(allocator_t *allocator); void_ptr_result_t allocator_allocate_object(allocator_t *allocator, size_t size, - size_t alignment); + size_t min_alignment); error_t allocator_deallocate_object(allocator_t *allocator, void *object, size_t size); diff --git a/hyp/interfaces/api/api.tc b/hyp/interfaces/api/api.tc index 57fdc98..3bde626 100644 --- a/hyp/interfaces/api/api.tc +++ b/hyp/interfaces/api/api.tc @@ -32,9 +32,10 @@ define hyp_api_flags0 public bitfield<64>(const) { 8 watchdog bool = 0; 9 virtio_mmio bool = 0; 10 prng bool = 0; + 11 vcpu_run bool = 0; 16 reserved_16 bool = 0; 31:28 scheduler enumeration scheduler_variant = SCHEDULER_VARIANT; - 63:32,27:17,15:11 res0_0 uint64 = 0; + 63:32,27:17,15:12 res0_0 uint64 = 0; }; define hyp_api_flags1 public bitfield<64>(const) { diff --git a/hyp/interfaces/idle/include/idle.h b/hyp/interfaces/idle/include/idle.h index 54828a9..fc90a16 100644 --- a/hyp/interfaces/idle/include/idle.h +++ b/hyp/interfaces/idle/include/idle.h @@ -19,3 +19,7 @@ idle_is_current(void) REQUIRE_PREEMPT_DISABLED; bool idle_yield(void) REQUIRE_PREEMPT_DISABLED; + +// Handle a wakeup event received during idle. +idle_state_t +idle_wakeup(void) REQUIRE_PREEMPT_DISABLED; diff --git a/hyp/interfaces/memextent/include/memextent.h b/hyp/interfaces/memextent/include/memextent.h index ba2bee3..31519ed 100644 --- a/hyp/interfaces/memextent/include/memextent.h +++ b/hyp/interfaces/memextent/include/memextent.h @@ -150,7 +150,8 @@ memextent_check_memtype(memextent_memtype_t extent_type, // This function does not create a capability to the new memextent. memextent_ptr_result_t memextent_derive(memextent_t *parent, paddr_t offset, size_t size, - memextent_memtype_t memtype, pgtable_access_t access); + memextent_memtype_t memtype, pgtable_access_t access, + memextent_type_t type); // Temporarily retain all of the memextent's mappings. // diff --git a/hyp/interfaces/psci/psci.tc b/hyp/interfaces/psci/psci.tc index 18ea4e5..4f61a3e 100644 --- a/hyp/interfaces/psci/psci.tc +++ b/hyp/interfaces/psci/psci.tc @@ -90,4 +90,16 @@ extend vcpu_run_state enumeration { // VCPU will return it to a regular power-off state. psci_system_reset = 0x100; }; + +extend vcpu_run_wakeup_from_state enumeration { + // VCPU made a PSCI_CPU_SUSPEND call. The first state data word is the + // PSCI suspend state argument. + psci_cpu_suspend = 2; + + // VCPU made a PSCI_SYSTEM_SUSPEND call. The first state data word is + // the deepest possible CPU suspend state (which may not be the same as + // the system suspend state), for backwards compatibility with host + // kernels that do not check this value. + psci_system_suspend = 3; +}; #endif diff --git a/hyp/interfaces/scheduler/include/scheduler.h b/hyp/interfaces/scheduler/include/scheduler.h index 677cb6f..1d1b11e 100644 --- a/hyp/interfaces/scheduler/include/scheduler.h +++ b/hyp/interfaces/scheduler/include/scheduler.h @@ -150,6 +150,14 @@ scheduler_is_blocked(const thread_t *thread, scheduler_block_t block); bool scheduler_is_runnable(const thread_t *thread) REQUIRE_SCHEDULER_LOCK(thread); +// Return true if a thread is currently scheduled and running. +// +// The caller must either be the specified thread, or hold a reference to the +// specified thread, or be in an RCU read-side critical section. The caller must +// also hold the scheduling lock for the thread (see scheduler_lock()). +bool +scheduler_is_running(const thread_t *thread) REQUIRE_SCHEDULER_LOCK(thread); + // Wait until a specified thread is not running. // // The caller must not be holding any spinlocks and must not be an RCU diff --git a/hyp/interfaces/smccc/smccc.ev b/hyp/interfaces/smccc/smccc.ev index 7962dd3..5fc2ddc 100644 --- a/hyp/interfaces/smccc/smccc.ev +++ b/hyp/interfaces/smccc/smccc.ev @@ -6,7 +6,7 @@ interface smccc #define SMCCC_DISPATCH(type, size) \ selector_event smccc_dispatch_ ## type ## _ ## size; \ - selector interface_id: smccc_interface_id_t; \ + selector owner_id: smccc_owner_id_t; \ param function: smccc_function_t; \ param is_hvc: bool; \ param arg1: uint ## size ## _t; \ @@ -26,8 +26,8 @@ SMCCC_DISPATCH(fast, 64) SMCCC_DISPATCH(yielding, 32) SMCCC_DISPATCH(yielding, 64) -#define _SMCCC_CALL_INTERFACE(type, size, iface) \ -selector_event smccc_call_ ## type ## _ ## size ## _ ## iface; \ +#define _SMCCC_CALL_OWNER(type, size, owner) \ +selector_event smccc_call_ ## type ## _ ## size ## _ ## owner; \ selector function: smccc_function_t; \ param is_hvc: bool; \ param arg1: uint ## size ## _t; \ @@ -42,19 +42,19 @@ selector_event smccc_call_ ## type ## _ ## size ## _ ## iface; \ param ret2: uint ## size ## _t *; \ param ret3: uint ## size ## _t *. -#define SMCCC_CALL_INTERFACE(iface) \ - _SMCCC_CALL_INTERFACE(fast, 32, iface) \ - _SMCCC_CALL_INTERFACE(fast, 64, iface) \ - _SMCCC_CALL_INTERFACE(yielding, 32, iface) \ - _SMCCC_CALL_INTERFACE(yielding, 64, iface) +#define SMCCC_CALL_OWNER(owner) \ + _SMCCC_CALL_OWNER(fast, 32, owner) \ + _SMCCC_CALL_OWNER(fast, 64, owner) \ + _SMCCC_CALL_OWNER(yielding, 32, owner) \ + _SMCCC_CALL_OWNER(yielding, 64, owner) -SMCCC_CALL_INTERFACE(arch) -SMCCC_CALL_INTERFACE(cpu) -SMCCC_CALL_INTERFACE(sip) -SMCCC_CALL_INTERFACE(oem) -SMCCC_CALL_INTERFACE(standard) -SMCCC_CALL_INTERFACE(standard_hyp) -SMCCC_CALL_INTERFACE(vendor_hyp) +SMCCC_CALL_OWNER(arch) +SMCCC_CALL_OWNER(cpu) +SMCCC_CALL_OWNER(sip) +SMCCC_CALL_OWNER(oem) +SMCCC_CALL_OWNER(standard) +SMCCC_CALL_OWNER(standard_hyp) +SMCCC_CALL_OWNER(vendor_hyp) selector_event smccc_arch_features_fast32 selector function: smccc_arch_function_t diff --git a/hyp/interfaces/smccc/smccc.tc b/hyp/interfaces/smccc/smccc.tc index 6e7f2ad..8c58d95 100644 --- a/hyp/interfaces/smccc/smccc.tc +++ b/hyp/interfaces/smccc/smccc.tc @@ -12,7 +12,7 @@ // the sve_not_live hint bit in the function ID). define SMCCC_VERSION public constant uint32 = 0x10003; -define smccc_interface_id public enumeration(explicit) { +define smccc_owner_id public enumeration(explicit) { ARCH = 0; CPU = 1; SIP = 2; @@ -28,7 +28,7 @@ define smccc_function_id public bitfield<32> { 15:0 function type smccc_function_t; 16 sve_live_state_hint bool; // from SMCCC v1.3+ 23:17 res0 uint32(const); - 29:24 interface_id enumeration smccc_interface_id; + 29:24 owner_id enumeration smccc_owner_id; 30 is_smc64 bool; 31 is_fast bool; }; diff --git a/hyp/interfaces/vcpu/vcpu.tc b/hyp/interfaces/vcpu/vcpu.tc index 8aa2e3c..91161b0 100644 --- a/hyp/interfaces/vcpu/vcpu.tc +++ b/hyp/interfaces/vcpu/vcpu.tc @@ -45,6 +45,10 @@ define vcpu_option_flags public bitfield<64> { }; // Private vcpu flags for modules to consolidate runtime boolean flags. +// +// For VCPUs that have not yet been activated, this bitfield is protected by +// the VCPU's object state lock. Otherwise, it must only be accessed from the +// VCPU's own context. define vcpu_runtime_flags bitfield<32> { others unknown = 0; }; diff --git a/hyp/interfaces/vcpu_run/vcpu_run.ev b/hyp/interfaces/vcpu_run/vcpu_run.ev index 09b43bd..c45334c 100644 --- a/hyp/interfaces/vcpu_run/vcpu_run.ev +++ b/hyp/interfaces/vcpu_run/vcpu_run.ev @@ -4,12 +4,9 @@ interface vcpu_run -// Triggered after vcpu_run_is_enabled() becomes true for a VCPU. The VCPU's -// scheduler lock is held by the caller. +// Triggered after vcpu_run_is_enabled() becomes true for a VCPU after vcpu +// activation. This dynamic vcpu activation is deprecated. +// +// The VCPU's scheduler lock is held by the caller. event vcpu_run_enabled param vcpu: thread_t * - -// Triggered before vcpu_run_is_enabled() becomes false for a VCPU. The VCPU's -// scheduler lock is held by the caller. -event vcpu_run_disabled - param vcpu: thread_t * diff --git a/hyp/interfaces/vcpu_run/vcpu_run.tc b/hyp/interfaces/vcpu_run/vcpu_run.tc index 638d0b3..4008e05 100644 --- a/hyp/interfaces/vcpu_run/vcpu_run.tc +++ b/hyp/interfaces/vcpu_run/vcpu_run.tc @@ -2,6 +2,12 @@ // // SPDX-License-Identifier: BSD-3-Clause +#if defined(HYPERCALLS) +extend hyp_api_flags0 bitfield { + delete vcpu_run; + 11 vcpu_run bool = 1; +}; +#endif define vcpu_run_state public enumeration(explicit) { // VCPU is ready to run on the next vcpu_run hypercall. ready = 0; @@ -9,10 +15,13 @@ define vcpu_run_state public enumeration(explicit) { // VCPU is sleeping until an interrupt arrives. The wakeup IRQ will be // asserted when that occurs. // - // If the platform implements PSCI and the VCPU has called - // PSCI_CPU_SUSPEND, the first state data word will be the requested - // suspend state. Otherwise, it will be 0 (e.g. if the VCPU is - // executing a WFI instruction). + // The first state data word contains a platform-specific description + // of the sleep state. For example, for AArch64 VMs with PSCI enabled, + // this contains the PSCI suspend state. + // + // The second state data word contains a vcpu_run_wakeup_from_state + // enumeration, which is a platform-specific description of the reason + // the VCPU is expecting a wakeup. expects_wakeup = 1; // VCPU is powered off and cannot execute until another VCPU triggers @@ -34,3 +43,9 @@ define vcpu_run_poweroff_flags public bitfield<32> { 0 exited bool; others unknown = 0; }; + +define vcpu_run_wakeup_from_state public enumeration(explicit) { + // For backwards compatibility with hypervisors that didn't specify + // the cause of the sleep. Should not be used. + unspecified = 0; +}; diff --git a/hyp/interfaces/virtio_mmio/virtio_mmio.hvc b/hyp/interfaces/virtio_mmio/virtio_mmio.hvc index e3c1e7a..564117e 100644 --- a/hyp/interfaces/virtio_mmio/virtio_mmio.hvc +++ b/hyp/interfaces/virtio_mmio/virtio_mmio.hvc @@ -7,7 +7,8 @@ define virtio_mmio_configure hypercall { virtio_mmio input type cap_id_t; memextent input type cap_id_t; vqs_num input type count_t; - res0 input uregister; + flags input bitfield virtio_option_flags; + device_type input enumeration virtio_device_type; error output enumeration error; }; diff --git a/hyp/interfaces/virtio_mmio/virtio_mmio.tc b/hyp/interfaces/virtio_mmio/virtio_mmio.tc index e5f323b..425df38 100644 --- a/hyp/interfaces/virtio_mmio/virtio_mmio.tc +++ b/hyp/interfaces/virtio_mmio/virtio_mmio.tc @@ -17,3 +17,17 @@ define virtio_mmio_notify_reason public bitfield<64> { 4 failed bool = 0; others unknown = 0; }; + +define virtio_option_flags public bitfield<64> { + 5:0 unknown = 0; + 6 valid_device_type bool = 0; + 63:7 res0 uint64 = 0; +}; + +define virtio_config_space union { + raw array(VIRTIO_MMIO_REG_CONFIG_BYTES) uint8(atomic); +}; + +define virtio_device_type public enumeration(explicit) { + INVALID = 0; +}; diff --git a/hyp/mem/allocator_boot/src/bootmem.c b/hyp/mem/allocator_boot/src/bootmem.c index 9711532..ae6e878 100644 --- a/hyp/mem/allocator_boot/src/bootmem.c +++ b/hyp/mem/allocator_boot/src/bootmem.c @@ -67,10 +67,10 @@ bootmem_allocate(size_t size, size_t align) loc = util_balign_up(loc, align); } - size_t free = bootmem_allocator.pool_size - - (loc - (uintptr_t)bootmem_allocator.pool_base); + size_t free_boot = bootmem_allocator.pool_size - + (loc - (uintptr_t)bootmem_allocator.pool_base); - if (size > free) { + if (size > free_boot) { return void_ptr_result_error(ERROR_NOMEM); } @@ -86,12 +86,12 @@ bootmem_allocate_remaining(size_t *size) assert(size != NULL); assert(bootmem_allocator.alloc_offset <= bootmem_allocator.pool_size); - size_t free = + size_t free_boot = bootmem_allocator.pool_size - bootmem_allocator.alloc_offset; - if (free == 0U) { + if (free_boot == 0U) { return void_ptr_result_error(ERROR_NOMEM); } - *size = free; + *size = free_boot; uintptr_t loc = (uintptr_t)bootmem_allocator.pool_base; assert(!util_add_overflows(loc, bootmem_allocator.alloc_offset)); diff --git a/hyp/mem/allocator_list/src/freelist.c b/hyp/mem/allocator_list/src/freelist.c index 67ae553..a24861e 100644 --- a/hyp/mem/allocator_list/src/freelist.c +++ b/hyp/mem/allocator_list/src/freelist.c @@ -443,10 +443,13 @@ allocate_block(allocator_node_t **head, size_t size, size_t alignment) } void_ptr_result_t -allocator_allocate_object(allocator_t *allocator, size_t size, size_t alignment) +allocator_allocate_object(allocator_t *allocator, size_t size, + size_t min_alignment) { void_ptr_result_t ret; + size_t alignment = util_max(min_alignment, alignof(size_t)); + spinlock_acquire(&allocator->lock); if (allocator->heap == NULL) { diff --git a/hyp/mem/memextent/src/memextent.c b/hyp/mem/memextent/src/memextent.c index 9c63b89..0915ccb 100644 --- a/hyp/mem/memextent/src/memextent.c +++ b/hyp/mem/memextent/src/memextent.c @@ -53,6 +53,10 @@ memextent_validate_attrs(memextent_type_t type, memextent_memtype_t memtype, break; default: ret = false; + break; + } + + if (!ret) { goto out; } @@ -70,6 +74,10 @@ memextent_validate_attrs(memextent_type_t type, memextent_memtype_t memtype, #endif default: ret = false; + break; + } + + if (!ret) { goto out; } @@ -789,7 +797,8 @@ memextent_check_memtype(memextent_memtype_t extent_type, memextent_ptr_result_t memextent_derive(memextent_t *parent, paddr_t offset, size_t size, - memextent_memtype_t memtype, pgtable_access_t access) + memextent_memtype_t memtype, pgtable_access_t access, + memextent_type_t type) { memextent_create_t params_me = { .memextent = NULL, .memextent_device_mem = false }; @@ -804,6 +813,7 @@ memextent_derive(memextent_t *parent, paddr_t offset, size_t size, memextent_attrs_t attrs = memextent_attrs_default(); memextent_attrs_set_access(&attrs, access); memextent_attrs_set_memtype(&attrs, memtype); + memextent_attrs_set_type(&attrs, type); spinlock_acquire(&me->header.lock); @@ -857,7 +867,7 @@ memextent_attach(partition_t *owner, memextent_t *me, uintptr_t hyp_va, assert(owner != NULL); assert(me != NULL); - error_t ret; + error_t ret = OK; if (owner != me->header.partition) { ret = ERROR_DENIED; @@ -888,6 +898,9 @@ memextent_attach(partition_t *owner, memextent_t *me, uintptr_t hyp_va, break; default: ret = ERROR_ARGUMENT_INVALID; + break; + } + if (ret == ERROR_ARGUMENT_INVALID) { goto out; } diff --git a/hyp/mem/memextent/src/memextent_tests.c b/hyp/mem/memextent/src/memextent_tests.c index c74a4c7..646451e 100644 --- a/hyp/mem/memextent/src/memextent_tests.c +++ b/hyp/mem/memextent/src/memextent_tests.c @@ -257,7 +257,8 @@ tests_memextent_test1(paddr_t phys_base) memextent_ptr_result_t me_ret; - me_ret = memextent_derive(me2, offset, size3, memtype, access); + me_ret = memextent_derive(me2, offset, size3, memtype, access, + MEMEXTENT_TYPE_BASIC); if (me_ret.e != OK) { panic("Failed creation of derived mem extent"); } @@ -268,7 +269,8 @@ tests_memextent_test1(paddr_t phys_base) size_t size4 = 4096 * 2; paddr_t vm_base3 = vm_base2 + offset2; - me_ret = memextent_derive(me2, offset2, size4, memtype, access); + me_ret = memextent_derive(me2, offset2, size4, memtype, access, + MEMEXTENT_TYPE_BASIC); if (me_ret.e != OK) { panic("Failed creation of derived mem extent"); } @@ -299,7 +301,8 @@ tests_memextent_test1(paddr_t phys_base) #endif // Derive memory extent from the entire derived extent. - me_ret = memextent_derive(me_d, offset, size3, memtype, access); + me_ret = memextent_derive(me_d, offset, size3, memtype, access, + MEMEXTENT_TYPE_BASIC); if (me_ret.e != OK) { panic("Failed creation of derived mem extent"); } @@ -307,7 +310,8 @@ tests_memextent_test1(paddr_t phys_base) memextent_t *me_dd = me_ret.r; // Derive memory extent from first page of derived extent. - me_ret = memextent_derive(me_d2, offset, size3, memtype, access); + me_ret = memextent_derive(me_d2, offset, size3, memtype, access, + MEMEXTENT_TYPE_BASIC); if (me_ret.e != OK) { panic("Failed creation of derived mem extent"); } @@ -470,7 +474,8 @@ tests_memextent_test2(paddr_t phys_base) memextent_ptr_result_t me_ret; - me_ret = memextent_derive(me, offset, size3, memtype, access); + me_ret = memextent_derive(me, offset, size3, memtype, access, + MEMEXTENT_TYPE_BASIC); if (me_ret.e != OK) { panic("Failed creation of derived mem extent"); } diff --git a/hyp/mem/pgtable/armv8/src/pgtable.c b/hyp/mem/pgtable/armv8/src/pgtable.c index 60d5afa..4417686 100644 --- a/hyp/mem/pgtable/armv8/src/pgtable.c +++ b/hyp/mem/pgtable/armv8/src/pgtable.c @@ -558,6 +558,23 @@ hyp_tlbi_ipa_range(vmaddr_t ipa_start, size_t size, count_t granule_shift, } #endif +static void +dsb_st(bool outer_shareable) +{ +#ifndef HOST_TEST +#if defined(ARCH_ARM_FEAT_TLBIOS) + if (outer_shareable) { + __asm__ volatile("dsb oshst" ::: "memory"); + } else { + __asm__ volatile("dsb ishst" ::: "memory"); + } +#else + (void)outer_shareable; + __asm__ volatile("dsb ishst" ::: "memory"); +#endif +#endif +} + static void dsb(bool outer_shareable) { @@ -677,10 +694,6 @@ map_stg2_access_to_attrs(pgtable_access_t kernel_access, static void set_invalid_entry(vmsa_level_table_t *table, index_t idx); -static void -set_table_entry(vmsa_level_table_t *table, index_t idx, paddr_t addr, - count_t count); - static void set_page_entry(vmsa_level_table_t *table, index_t idx, paddr_t addr, vmsa_upper_attrs_t upper_attrs, vmsa_lower_attrs_t lower_attrs, @@ -711,7 +724,8 @@ alloc_level_table(partition_t *partition, size_t size, size_t alignment, static void set_pgtables(vmaddr_t virtual_address, stack_elem_t stack[PGTABLE_LEVEL_NUM], index_t first_new_table_level, index_t cur_level, - count_t initial_refcount, index_t start_level); + count_t initial_refcount, index_t start_level, + bool outer_shareable); static pgtable_modifier_ret_t map_modifier(pgtable_t *pgt, vmaddr_t virtual_address, size_t size, @@ -724,13 +738,6 @@ static pgtable_modifier_ret_t lookup_modifier(pgtable_t *pgt, vmsa_entry_t cur_entry, index_t level, pgtable_entry_types_t type, void *data); -static void -check_refcount(pgtable_t *pgt, partition_t *partition, vmaddr_t virtual_address, - size_t size, index_t upper_level, - stack_elem_t stack[PGTABLE_LEVEL_NUM], bool need_dec, - size_t preserved_size, index_t *next_level, - vmaddr_t *next_virtual_address, size_t *next_size); - #if 0 static bool map_should_set_cont(vmaddr_t virtual_address, size_t size, @@ -767,7 +774,9 @@ static inline index_t get_index(vmaddr_t addr, const pgtable_level_info_t *info, bool is_first_level) { index_t index; - if (is_first_level) { + if (compiler_unexpected(is_first_level && + !is_high_virtual_address(addr))) { + // Handle contiguous tables index = (index_t)(addr >> info->lsb); } else { index = (index_t)((addr & segment_mask(info->msb, info->lsb)) >> @@ -892,7 +901,7 @@ static bool addr_check(vmaddr_t virtual_address, size_t bit_count, bool is_high) { #if ARCH_IS_64BIT - static_assert(sizeof(vmaddr_t) == 8, "vmaddr_t expected to be 64bits"); + static_assert(sizeof(vmaddr_t) == 8U, "vmaddr_t expected to be 64bits"); uint64_t v = is_high ? ~virtual_address : virtual_address; size_t count = (v == 0U) ? 0U : 64U - (compiler_clz(v) + 1); @@ -1084,7 +1093,7 @@ map_stg2_attr_to_access(vmsa_upper_attrs_t upper_attrs, #if defined(ARCH_ARM_FEAT_XNX) bool uxn = vmsa_stg2_upper_attrs_get_UXN(&u); bool pxn_xor_uxn = vmsa_stg2_upper_attrs_get_PXNxorUXN(&u); - bool pxn = (bool)(pxn_xor_uxn ^ uxn); + bool pxn = pxn_xor_uxn != uxn; *user_access = uxn ? rw : pgtable_access_combine(rw, PGTABLE_ACCESS_X); *kernel_access = pxn ? rw : pgtable_access_combine(rw, PGTABLE_ACCESS_X); @@ -1210,7 +1219,7 @@ map_stg2_access_to_attrs(pgtable_access_t kernel_access, #if defined(ARCH_ARM_FEAT_XNX) vmsa_stg2_upper_attrs_set_UXN(upper_attrs, !user_exec); vmsa_stg2_upper_attrs_set_PXNxorUXN(upper_attrs, - (bool)(!kernel_exec ^ !user_exec)); + kernel_exec != user_exec); #else vmsa_stg2_upper_attrs_set_XN(upper_attrs, !kernel_exec || !user_exec); #endif @@ -1257,16 +1266,19 @@ set_invalid_entry(vmsa_level_table_t *table, index_t idx) static void set_table_entry(vmsa_level_table_t *table, index_t idx, paddr_t addr, - count_t count) + count_t count, bool outer_shareable) { vmsa_table_entry_t entry = vmsa_table_entry_default(); vmsa_table_entry_set_NextLevelTableAddress(&entry, addr); vmsa_table_entry_set_refcount(&entry, count); + // Ensure prior writes are observable to the TLB walker + dsb_st(outer_shareable); + partition_phys_access_enable(&table[idx]); vmsa_entry_t g = { .table = entry }; - atomic_store_explicit(&table[idx], g.base, memory_order_release); + atomic_store_explicit(&table[idx], g.base, memory_order_relaxed); partition_phys_access_disable(&table[idx]); } @@ -1373,7 +1385,8 @@ alloc_level_table(partition_t *partition, size_t size, size_t alignment, static void set_pgtables(vmaddr_t virtual_address, stack_elem_t stack[PGTABLE_LEVEL_NUM], index_t first_new_table_level, index_t cur_level, - count_t initial_refcount, index_t start_level) + count_t initial_refcount, index_t start_level, + bool outer_shareable) { paddr_t lower; vmsa_level_table_t *table; @@ -1406,7 +1419,8 @@ set_pgtables(vmaddr_t virtual_address, stack_elem_t stack[PGTABLE_LEVEL_NUM], set_table_refcount(table, idx, refcount); } else { // Write the table entry. - set_table_entry(table, idx, lower, refcount); + set_table_entry(table, idx, lower, refcount, + outer_shareable); // The refcount for the remaining levels should be 1. refcount = 1; @@ -1547,16 +1561,16 @@ pgtable_maybe_update_access(pgtable_t *pgt, #if defined(ARCH_ARM_FEAT_TLBIRANGE) if (margs->stage == PGTABLE_HYP_STAGE_1) { - dsb(false); + dsb_st(false); hyp_tlbi_va_range(start_virtual_address, updated_size, pgt->granule_shift); } else { - dsb(margs->outer_shareable); + dsb_st(margs->outer_shareable); hyp_tlbi_ipa_range(start_virtual_address, updated_size, pgt->granule_shift, margs->outer_shareable); } #else - dsb(margs->outer_shareable); + dsb_st(margs->outer_shareable); for (size_t offset = 0U; offset < updated_size; offset += addr_size) { if (margs->stage == PGTABLE_HYP_STAGE_1) { @@ -1697,10 +1711,10 @@ pgtable_split_block(pgtable_t *pgt, vmaddr_t virtual_address, size_t size, // Flush the TLB entry if (margs->stage == PGTABLE_HYP_STAGE_1) { - dsb(false); + dsb_st(false); hyp_tlbi_va(entry_virtual_address); } else { - dsb(margs->outer_shareable); + dsb_st(margs->outer_shareable); vm_tlbi_ipa(entry_virtual_address, margs->outer_shareable); // The full stage-1 flushing below is really sub-optimal. // FIXME: @@ -1788,19 +1802,22 @@ pgtable_split_block(pgtable_t *pgt, vmaddr_t virtual_address, size_t size, } #if (CPU_PGTABLE_BBM_LEVEL < 2U) && !defined(PLATFORM_PGTABLE_AVOID_BBM) + // There is a dsb_st in set_pgtables() which is sufficient for FEAT_ETS2 +#if !defined(ARCH_ARM_FEAT_ETS2) || !ARCH_ARM_FEAT_ETS2 // Wait for the TLB flush before inserting the new table entry dsb(margs->outer_shareable); +#endif #endif set_pgtables(entry_virtual_address, stack, new_page_start_level, level, - new_pages, pgt->start_level); + new_pages, pgt->start_level, margs->outer_shareable); #if (CPU_PGTABLE_BBM_LEVEL >= 2U) || defined(PLATFORM_PGTABLE_AVOID_BBM) // Flush the old entry from the TLB now, to avoid TLB conflicts later. if (margs->stage == PGTABLE_HYP_STAGE_1) { - dsb(false); + dsb_st(false); hyp_tlbi_va(entry_virtual_address); } else { - dsb(margs->outer_shareable); + dsb_st(margs->outer_shareable); vm_tlbi_ipa(entry_virtual_address, margs->outer_shareable); } #endif @@ -2023,17 +2040,17 @@ pgtable_maybe_merge_block(pgtable_t *pgt, vmaddr_t virtual_address, size_t size, vmaddr_t next_level_addr = entry_virtual_address; #ifdef ARCH_ARM_FEAT_TLBIRANGE if (margs->stage == PGTABLE_HYP_STAGE_1) { - dsb(false); + dsb_st(false); hyp_tlbi_va_range(entry_virtual_address, cur_level_info->addr_size, pgt->granule_shift); } else { - dsb(margs->outer_shareable); + dsb_st(margs->outer_shareable); hyp_tlbi_ipa_range(next_level_addr, cur_level_info->addr_size, pgt->granule_shift, margs->outer_shareable); } #else - dsb((margs->stage != PGTABLE_HYP_STAGE_1) && margs->outer_shareable); + dsb_st((margs->stage != PGTABLE_HYP_STAGE_1) && margs->outer_shareable); for (index_t i = 0; i < next_level_info->entry_cnt; i++) { if (margs->stage == PGTABLE_HYP_STAGE_1) { hyp_tlbi_va(next_level_addr); @@ -2057,6 +2074,9 @@ pgtable_maybe_merge_block(pgtable_t *pgt, vmaddr_t virtual_address, size_t size, set_block_entry(cur_table, idx, entry_phys, margs->upper_attrs, margs->lower_attrs, false, false, false); +#else + // Wait for the TLB flush before reusing the freed page table memory + dsb((margs->stage != PGTABLE_HYP_STAGE_1) && margs->outer_shareable); #endif // Release the page table memory @@ -2175,7 +2195,7 @@ map_modifier_insert_new_leaf(const pgtable_t *pgt, vmaddr_t virtual_address, // check if need to set all page table levels set_pgtables(virtual_address, stack, new_page_start_level, level, 1U, - pgt->start_level); + pgt->start_level, margs->outer_shareable); // update the physical address for next mapping margs->phys += addr_size; @@ -2360,7 +2380,7 @@ static void check_refcount(pgtable_t *pgt, partition_t *partition, vmaddr_t virtual_address, size_t size, index_t upper_level, stack_elem_t stack[PGTABLE_LEVEL_NUM], bool need_dec, - size_t preserved_size, index_t *next_level, + const pgtable_unmap_modifier_args_t *margs, index_t *next_level, vmaddr_t *next_virtual_address, size_t *next_size) { const pgtable_level_info_t *cur_level_info = NULL; @@ -2391,8 +2411,8 @@ check_refcount(pgtable_t *pgt, partition_t *partition, vmaddr_t virtual_address, } if (refcount == 0U) { - is_preserved = is_preserved_table_entry(preserved_size, - cur_level_info); + is_preserved = is_preserved_table_entry( + margs->preserved_size, cur_level_info); if (is_preserved) { break; @@ -2458,12 +2478,26 @@ check_refcount(pgtable_t *pgt, partition_t *partition, vmaddr_t virtual_address, cur_table = NULL; } - // free the page table levels at one time, the free will do the fence + if (free_idx > 0U) { + // We need to ensure that the removed levels are no longer + // reachable and all of their walk cache entries are removed + // before we reuse the memory for any other purpose. + if (margs->stage == PGTABLE_HYP_STAGE_1) { + dsb_st(false); + hyp_tlbi_va(virtual_address); + dsb(false); + } else { + dsb_st(margs->outer_shareable); + vm_tlbi_ipa(virtual_address, margs->outer_shareable); + dsb(margs->outer_shareable); + } + } + while (free_idx > 0U) { free_idx--; if (free_list[free_idx]->need_unmap) { - // Only used by unmap, should always need unamp + // Only used by unmap, should always need unmap partition_phys_unmap(free_list[free_idx]->table, free_list[free_idx]->paddr, util_bit(pgt->granule_shift)); @@ -2551,18 +2585,18 @@ unmap_clear_cont_bit(vmsa_level_table_t *table, vmaddr_t virtual_address, ~((util_bit(info->lsb) * info->contiguous_entry_cnt) - 1U); #ifdef ARCH_ARM_FEAT_TLBIRANGE if (margs->stage == PGTABLE_HYP_STAGE_1) { - dsb(false); + dsb_st(false); hyp_tlbi_va_range(vaddr, info->contiguous_entry_cnt * info->addr_size, granule_shift); } else { - dsb(margs->outer_shareable); + dsb_st(margs->outer_shareable); hyp_tlbi_ipa_range(vaddr, info->contiguous_entry_cnt * info->addr_size, granule_shift, margs->outer_shareable); } #else - dsb(margs->outer_shareable); + dsb_st(margs->outer_shareable); for (index_t i = 0; i < info->contiguous_entry_cnt; i++) { if (margs->stage == PGTABLE_HYP_STAGE_1) { hyp_tlbi_va(vaddr); @@ -2740,10 +2774,10 @@ unmap_modifier(pgtable_t *pgt, vmaddr_t virtual_address, size_t size, need_dec = true; if (margs->stage == PGTABLE_HYP_STAGE_1) { - dsb(false); + dsb_st(false); hyp_tlbi_va(virtual_address); } else { - dsb(margs->outer_shareable); + dsb_st(margs->outer_shareable); vm_tlbi_ipa(virtual_address, margs->outer_shareable); } @@ -2754,8 +2788,7 @@ unmap_modifier(pgtable_t *pgt, vmaddr_t virtual_address, size_t size, if (level != pgt->start_level) { check_refcount(pgt, margs->partition, virtual_address, size, - level - 1U, stack, need_dec, - margs->preserved_size, next_level, + level - 1U, stack, need_dec, margs, next_level, next_virtual_address, next_size); } @@ -2808,7 +2841,7 @@ prealloc_modifier(pgtable_t *pgt, vmaddr_t virtual_address, size_t size, if (margs->new_page_start_level != PGTABLE_INVALID_LEVEL) { set_pgtables(virtual_address, stack, margs->new_page_start_level, level, 0U, - pgt->start_level); + pgt->start_level, false); margs->new_page_start_level = PGTABLE_INVALID_LEVEL; } @@ -3462,9 +3495,10 @@ pgtable_handle_boot_runtime_warm_init(void) TCR_EL2_E2H1_set_SH0(&tcr_val, TCR_SH_INNER_SHAREABLE); TCR_EL2_E2H1_set_TG0(&tcr_val, TCR_TG0_GRANULE_SIZE_4KB); + dsb(false); register_TTBR0_EL2_write_barrier(ttbr0_val); + asm_context_sync_fence(); register_TCR_EL2_E2H1_write_barrier(tcr_val); - asm_context_sync_fence(); #endif } @@ -3536,10 +3570,6 @@ pgtable_hyp_lookup(uintptr_t virtual_address, paddr_t *mapped_base, goto out; } - if (is_high) { - virtual_address &= util_mask(pgt->address_bits); - } - pgtable_entry_types_set_block(&entry_types, true); pgtable_entry_types_set_page(&entry_types, true); // just try to lookup a page, but if it's a block, the modifier will @@ -3605,10 +3635,6 @@ pgtable_hyp_preallocate(partition_t *partition, uintptr_t virtual_address, addr_check(virtual_address + size - 1, pgt->address_bits, is_high)); - if (is_high) { - virtual_address &= util_mask(pgt->address_bits); - } - margs.partition = partition; margs.new_page_start_level = PGTABLE_INVALID_LEVEL; margs.error = OK; @@ -3687,10 +3713,6 @@ pgtable_do_hyp_map(partition_t *partition, uintptr_t virtual_address, goto out; } - if (is_high) { - virtual_address &= util_mask(pgt->address_bits); - } - margs.orig_virtual_address = virtual_address; margs.orig_size = size; margs.phys = phys; @@ -3792,10 +3814,6 @@ pgtable_hyp_unmap(partition_t *partition, uintptr_t virtual_address, assert(util_is_p2aligned(virtual_address, pgt->granule_shift)); assert(util_is_p2aligned(size, pgt->granule_shift)); - if (is_high) { - virtual_address &= util_mask(pgt->address_bits); - } - margs.partition = partition; margs.preserved_size = preserved_prealloc; margs.stage = PGTABLE_HYP_STAGE_1; @@ -3832,6 +3850,12 @@ void pgtable_hyp_commit(void) LOCK_IMPL { dsb(false); + // An ISB is needed if the CPU does not implement FEAT_ETS2. If ETS2 + // is available, we can skip the ISB, because we never dynamically + // create executable mappings in EL2 address space. +#if !defined(ARCH_ARM_FEAT_ETS2) || !ARCH_ARM_FEAT_ETS2 + asm_context_sync_fence(); +#endif #if !defined(NDEBUG) assert(pgtable_op); pgtable_op = false; @@ -3906,10 +3930,6 @@ pgtable_hyp_ext(vmaddr_t virtual_address, size_t size, goto out; } - if (is_high) { - virtual_address &= util_mask(pgt->address_bits); - } - (void)translation_table_walk( pgt, virtual_address, size, PGTABLE_TRANSLATION_TABLE_WALK_EVENT_EXTERNAL, entry_types, @@ -4147,8 +4167,8 @@ pgtable_vm_init(partition_t *partition, pgtable_vm_t *pgtable, vmid_t vmid) #error untested granule size #endif pgtable->control.address_bits = PLATFORM_VM_ADDRESS_SPACE_BITS; - msb = PLATFORM_VM_ADDRESS_SPACE_BITS - 1; - pgtable->control.vmid = vmid; + msb = (index_t)PLATFORM_VM_ADDRESS_SPACE_BITS - 1U; + pgtable->control.vmid = vmid; get_start_level_info_ret_t info = get_start_level_info(level_conf, msb, true); @@ -4168,6 +4188,7 @@ pgtable_vm_init(partition_t *partition, pgtable_vm_t *pgtable, vmid_t vmid) #if !defined(HOST_TEST) pgtable_vm_init_regs(pgtable); #endif + dsb(false); out: return ret; diff --git a/hyp/misc/elf/src/elf_loader.c b/hyp/misc/elf/src/elf_loader.c index 5fd997e..9e6d012 100644 --- a/hyp/misc/elf/src/elf_loader.c +++ b/hyp/misc/elf/src/elf_loader.c @@ -19,7 +19,7 @@ #include #include -static const unsigned char *elf_ident = (unsigned char *)EI_MAG_STR; +static const unsigned char *elf_ident = (const unsigned char *)EI_MAG_STR; // Simple unoptimized non-terminated string comparison static bool diff --git a/hyp/misc/log_standard/src/log.c b/hyp/misc/log_standard/src/log.c index d87cec6..38b8211 100644 --- a/hyp/misc/log_standard/src/log.c +++ b/hyp/misc/log_standard/src/log.c @@ -34,7 +34,7 @@ extern char hyp_log_buffer[]; char hyp_log_buffer[LOG_BUFFER_SIZE]; -static_assert(LOG_BUFFER_SIZE > LOG_ENTRY_BUFFER_SIZE, +static_assert((size_t)LOG_BUFFER_SIZE > LOG_ENTRY_BUFFER_SIZE, "LOG_BUFFER_SIZE too small"); // Global visibility - for debug @@ -154,20 +154,20 @@ log_standard_handle_trace_log(trace_id_t id, trace_action_t action, if (compiler_expected(buf_remaining >= entry_size)) { (void)memcpy(&hyp_log.log_buffer[prev_idx], entry_buf, entry_size); - CACHE_CLEAN_RANGE(&hyp_log.log_buffer[prev_idx], entry_size); + cache_clean_range(&hyp_log.log_buffer[prev_idx], entry_size); } else { // Otherwise copy the first bit of entry to the tail of the // buffer and wrap to the start for the remainder. size_t first_part = buf_remaining; (void)memcpy(&hyp_log.log_buffer[prev_idx], entry_buf, first_part); - CACHE_CLEAN_RANGE(&hyp_log.log_buffer[prev_idx], first_part); + cache_clean_range(&hyp_log.log_buffer[prev_idx], first_part); size_t second_part = entry_size - first_part; (void)memcpy(&hyp_log.log_buffer[0], entry_buf + first_part, second_part); - CACHE_CLEAN_RANGE(&hyp_log.log_buffer[0], second_part); + cache_clean_range(&hyp_log.log_buffer[0], second_part); } out: // Nothing to do diff --git a/hyp/misc/log_standard/src/string_util.c b/hyp/misc/log_standard/src/string_util.c index 0e608d7..045134a 100644 --- a/hyp/misc/log_standard/src/string_util.c +++ b/hyp/misc/log_standard/src/string_util.c @@ -36,7 +36,7 @@ typedef struct token { count_t stage; } token_t; -typedef enum align { +typedef enum align_e { // The same as left, use white space ALIGN_DEFAULT = 0, ALIGN_LEFT, @@ -562,7 +562,7 @@ check_align(const char *fmt, fmt_info_t *info) } // Check for a padding character using look-ahead - if (next && (strnidx(stopper, len, *next) < len)) { + if ((next != NULL) && (strnidx(stopper, len, *next) < len)) { if (info->fill_char == '\0') { info->fill_char = *fmt; ret = RET_TOKEN_NEXT_CHAR; diff --git a/hyp/platform/arm_arch_timer_lp/src/platform_timer_lp.c b/hyp/platform/arm_arch_timer_lp/src/platform_timer_lp.c index dcb2599..05bc2b6 100644 --- a/hyp/platform/arm_arch_timer_lp/src/platform_timer_lp.c +++ b/hyp/platform/arm_arch_timer_lp/src/platform_timer_lp.c @@ -205,9 +205,10 @@ platform_timer_lp_handle_rootvm_init(cspace_t *cspace, hyp_env_data_t *hyp_env) memextent_t *parent = m.r; - memextent_ptr_result_t me_ret = memextent_derive( - parent, PLATFORM_HYP_ARCH_TIMER_LP_BASE, (size_t)1U << 12, - MEMEXTENT_MEMTYPE_DEVICE, PGTABLE_ACCESS_RW); + memextent_ptr_result_t me_ret = + memextent_derive(parent, PLATFORM_HYP_ARCH_TIMER_LP_BASE, + (size_t)1U << 12, MEMEXTENT_MEMTYPE_DEVICE, + PGTABLE_ACCESS_RW, MEMEXTENT_TYPE_BASIC); if (me_ret.e != OK) { panic("Failed creation of low power timer memextent"); } diff --git a/hyp/platform/arm_trng_fi/src/arm_trng.c b/hyp/platform/arm_trng_fi/src/arm_trng.c index 2e07902..0a068a8 100644 --- a/hyp/platform/arm_trng_fi/src/arm_trng.c +++ b/hyp/platform/arm_trng_fi/src/arm_trng.c @@ -112,12 +112,12 @@ arm_trng_fi_handle_call(void) thread_t *current = thread_get_self(); smccc_function_id_t function_id = smccc_function_id_cast((uint32_t)current->vcpu_regs_gpr.x[0]); - smccc_interface_id_t interface = - smccc_function_id_get_interface_id(&function_id); + smccc_owner_id_t owner_id = + smccc_function_id_get_owner_id(&function_id); smccc_function_t function = smccc_function_id_get_function(&function_id); - if (compiler_expected((interface != SMCCC_INTERFACE_ID_STANDARD) || + if (compiler_expected((owner_id != SMCCC_OWNER_ID_STANDARD) || (!smccc_function_id_get_is_fast(&function_id)))) { goto out; } @@ -186,8 +186,8 @@ arm_trng_fi_handle_call(void) smccc_function_id_t fid = smccc_function_id_cast( (uint32_t)current->vcpu_regs_gpr.x[1]); - if ((smccc_function_id_get_interface_id(&fid) != - SMCCC_INTERFACE_ID_STANDARD) || + if ((smccc_function_id_get_owner_id(&fid) != + SMCCC_OWNER_ID_STANDARD) || !smccc_function_id_get_is_fast(&fid) || (smccc_function_id_get_res0(&fid) != 0U)) { break; diff --git a/hyp/platform/gicv3/src/gicv3.c b/hyp/platform/gicv3/src/gicv3.c index 56ea3fb..d2877f8 100644 --- a/hyp/platform/gicv3/src/gicv3.c +++ b/hyp/platform/gicv3/src/gicv3.c @@ -1213,19 +1213,24 @@ gicv3_irq_enable_shared(irq_t irq) } } + { #if GICV3_EXT_IRQS - if (irq_type == GICV3_IRQ_TYPE_SPI) { + if (irq_type == GICV3_IRQ_TYPE_SPI) { + atomic_store_release( + &gicd->isenabler[GICD_ENABLE_GET_N(irq)], + GIC_ENABLE_BIT(irq)); + } else { + atomic_store_release( + &gicd->isenabler_e[GICD_ENABLE_GET_N( + irq - GIC_SPI_EXT_BASE)], + GIC_ENABLE_BIT(irq - GIC_SPI_EXT_BASE)); + } +#else atomic_store_release(&gicd->isenabler[GICD_ENABLE_GET_N(irq)], GIC_ENABLE_BIT(irq)); - } else { - atomic_store_release(&gicd->isenabler_e[GICD_ENABLE_GET_N( - irq - GIC_SPI_EXT_BASE)], - GIC_ENABLE_BIT(irq - GIC_SPI_EXT_BASE)); - } -#else - atomic_store_release(&gicd->isenabler[GICD_ENABLE_GET_N(irq)], - GIC_ENABLE_BIT(irq)); #endif + } + spinlock_release(&spi_route_lock); } @@ -1240,17 +1245,22 @@ gicv3_irq_enable_percpu(irq_t irq, cpu_index_t cpu) switch (irq_type) { case GICV3_IRQ_TYPE_SGI: case GICV3_IRQ_TYPE_PPI: { - atomic_store_release(&gicr->sgi.isenabler0, - GIC_ENABLE_BIT(irq)); + { + atomic_store_release(&gicr->sgi.isenabler0, + GIC_ENABLE_BIT(irq)); + } break; } #if GICV3_EXT_IRQS case GICV3_IRQ_TYPE_PPI_EXT: { // Extended PPI - atomic_store_release(&gicr->sgi.isenabler_e[GICD_ENABLE_GET_N( - irq - GIC_PPI_EXT_BASE)], - GIC_ENABLE_BIT(irq - GIC_PPI_EXT_BASE)); + { + atomic_store_release( + &gicr->sgi.isenabler_e[GICD_ENABLE_GET_N( + irq - GIC_PPI_EXT_BASE)], + GIC_ENABLE_BIT(irq - GIC_PPI_EXT_BASE)); + } break; } #endif @@ -1288,17 +1298,23 @@ gicv3_irq_disable_shared(irq_t irq) switch (irq_type) { case GICV3_IRQ_TYPE_SPI: - atomic_store_relaxed(&gicd->icenabler[GICD_ENABLE_GET_N(irq)], - GIC_ENABLE_BIT(irq)); + { + atomic_store_relaxed( + &gicd->icenabler[GICD_ENABLE_GET_N(irq)], + GIC_ENABLE_BIT(irq)); + } (void)gicd_wait_for_write(); break; #if GICV3_EXT_IRQS case GICV3_IRQ_TYPE_SPI_EXT: { // Extended SPI - atomic_store_relaxed(&gicd->icenabler_e[GICD_ENABLE_GET_N( - irq - GIC_SPI_EXT_BASE)], - GIC_ENABLE_BIT(irq - GIC_SPI_EXT_BASE)); + { + atomic_store_relaxed( + &gicd->icenabler_e[GICD_ENABLE_GET_N( + irq - GIC_SPI_EXT_BASE)], + GIC_ENABLE_BIT(irq - GIC_SPI_EXT_BASE)); + } (void)gicd_wait_for_write(); break; } @@ -1370,16 +1386,21 @@ gicv3_irq_disable_percpu_nowait(irq_t irq, cpu_index_t cpu) switch (irq_type) { case GICV3_IRQ_TYPE_SGI: case GICV3_IRQ_TYPE_PPI: { - atomic_store_relaxed(&gicr->sgi.icenabler0, - GIC_ENABLE_BIT(irq)); + { + atomic_store_relaxed(&gicr->sgi.icenabler0, + GIC_ENABLE_BIT(irq)); + } break; } #if GICV3_EXT_IRQS case GICV3_IRQ_TYPE_PPI_EXT: { // Extended PPI - atomic_store_relaxed(&gicr->sgi.icenabler_e[GICD_ENABLE_GET_N( - irq - GIC_PPI_EXT_BASE)], - GIC_ENABLE_BIT(irq - GIC_PPI_EXT_BASE)); + { + atomic_store_relaxed( + &gicr->sgi.icenabler_e[GICD_ENABLE_GET_N( + irq - GIC_PPI_EXT_BASE)], + GIC_ENABLE_BIT(irq - GIC_PPI_EXT_BASE)); + } break; } #endif @@ -1666,31 +1687,35 @@ gicv3_spi_set_route_internal(irq_t irq, GICD_IROUTER_t route) assert_preempt_disabled(); - switch (gicv3_get_irq_type(irq)) { - case GICV3_IRQ_TYPE_SPI: - atomic_store_relaxed(&gicd->irouter[irq - GIC_SPI_BASE], route); - ret = OK; - break; + { + switch (gicv3_get_irq_type(irq)) { + case GICV3_IRQ_TYPE_SPI: + atomic_store_relaxed(&gicd->irouter[irq - GIC_SPI_BASE], + route); + ret = OK; + break; #if GICV3_EXT_IRQS - case GICV3_IRQ_TYPE_SPI_EXT: - atomic_store_relaxed(&gicd->irouter_e[irq - GIC_SPI_EXT_BASE], - route); - ret = OK; - break; + case GICV3_IRQ_TYPE_SPI_EXT: + atomic_store_relaxed( + &gicd->irouter_e[irq - GIC_SPI_EXT_BASE], + route); + ret = OK; + break; #endif - case GICV3_IRQ_TYPE_SGI: - case GICV3_IRQ_TYPE_PPI: + case GICV3_IRQ_TYPE_SGI: + case GICV3_IRQ_TYPE_PPI: #if GICV3_EXT_IRQS - case GICV3_IRQ_TYPE_PPI_EXT: + case GICV3_IRQ_TYPE_PPI_EXT: #endif #if GICV3_HAS_LPI - case GICV3_IRQ_TYPE_LPI: + case GICV3_IRQ_TYPE_LPI: #endif - case GICV3_IRQ_TYPE_SPECIAL: - case GICV3_IRQ_TYPE_RESERVED: - default: - ret = ERROR_ARGUMENT_INVALID; - break; + case GICV3_IRQ_TYPE_SPECIAL: + case GICV3_IRQ_TYPE_RESERVED: + default: + ret = ERROR_ARGUMENT_INVALID; + break; + } } return ret; diff --git a/hyp/platform/psci_smc/aarch64/include/psci_smc_arch.h b/hyp/platform/psci_smc/aarch64/include/psci_smc_arch.h index 66e0dc7..13434c3 100644 --- a/hyp/platform/psci_smc/aarch64/include/psci_smc_arch.h +++ b/hyp/platform/psci_smc/aarch64/include/psci_smc_arch.h @@ -9,7 +9,7 @@ psci_smc_fn_call(psci_function_t fn, register_t arg_0, register_t arg_1, smccc_function_id_t fn_id = smccc_function_id_default(); smccc_function_id_set_is_fast(&fn_id, true); smccc_function_id_set_is_smc64(&fn_id, true); - smccc_function_id_set_interface_id(&fn_id, SMCCC_INTERFACE_ID_STANDARD); + smccc_function_id_set_owner_id(&fn_id, SMCCC_OWNER_ID_STANDARD); smccc_function_id_set_function(&fn_id, (smccc_function_t)fn); uint64_t hyp_args[6] = { arg_0, arg_1, arg_2, 0, 0, 0 }; @@ -27,7 +27,7 @@ psci_smc_fn_call32(psci_function_t fn, uint32_t arg_0, uint32_t arg_1, smccc_function_id_t fn_id = smccc_function_id_default(); smccc_function_id_set_is_fast(&fn_id, true); smccc_function_id_set_is_smc64(&fn_id, false); - smccc_function_id_set_interface_id(&fn_id, SMCCC_INTERFACE_ID_STANDARD); + smccc_function_id_set_owner_id(&fn_id, SMCCC_OWNER_ID_STANDARD); smccc_function_id_set_function(&fn_id, (smccc_function_t)fn); uint64_t hyp_args[6] = { arg_0, arg_1, arg_2, 0, 0, 0 }; @@ -45,7 +45,7 @@ psci_smc_fn_call_reg(psci_function_t fn, register_t arg_0, register_t arg_1, smccc_function_id_t fn_id = smccc_function_id_default(); smccc_function_id_set_is_fast(&fn_id, true); smccc_function_id_set_is_smc64(&fn_id, true); - smccc_function_id_set_interface_id(&fn_id, SMCCC_INTERFACE_ID_STANDARD); + smccc_function_id_set_owner_id(&fn_id, SMCCC_OWNER_ID_STANDARD); smccc_function_id_set_function(&fn_id, (smccc_function_t)fn); uint64_t hyp_args[6] = { arg_0, arg_1, arg_2, 0, 0, 0 }; diff --git a/hyp/platform/psci_smc/src/psci_smc.c b/hyp/platform/psci_smc/src/psci_smc.c index 9cb4825..426f82a 100644 --- a/hyp/platform/psci_smc/src/psci_smc.c +++ b/hyp/platform/psci_smc/src/psci_smc.c @@ -177,7 +177,7 @@ psci_smc_psci_features(psci_function_t fn, bool smc64) smccc_function_id_t fn_id = smccc_function_id_default(); smccc_function_id_set_is_fast(&fn_id, true); smccc_function_id_set_is_smc64(&fn_id, smc64); - smccc_function_id_set_interface_id(&fn_id, SMCCC_INTERFACE_ID_STANDARD); + smccc_function_id_set_owner_id(&fn_id, SMCCC_OWNER_ID_STANDARD); smccc_function_id_set_function(&fn_id, (smccc_function_t)fn); sint32_result_t ret; diff --git a/hyp/platform/soc_qemu/src/abort.c b/hyp/platform/soc_qemu/src/abort.c index 70903ba..5fefac5 100644 --- a/hyp/platform/soc_qemu/src/abort.c +++ b/hyp/platform/soc_qemu/src/abort.c @@ -16,7 +16,7 @@ soc_qemu_handle_power_system_off(void) smccc_function_id_t fn_id = smccc_function_id_default(); - smccc_function_id_set_interface_id(&fn_id, SMCCC_INTERFACE_ID_STANDARD); + smccc_function_id_set_owner_id(&fn_id, SMCCC_OWNER_ID_STANDARD); smccc_function_id_set_function(&fn_id, PSCI_FUNCTION_SYSTEM_OFF); smccc_function_id_set_is_smc64(&fn_id, false); smccc_function_id_set_is_fast(&fn_id, true); diff --git a/hyp/platform/soc_qemu/src/boot.c b/hyp/platform/soc_qemu/src/boot.c index 002955e..cd04951 100644 --- a/hyp/platform/soc_qemu/src/boot.c +++ b/hyp/platform/soc_qemu/src/boot.c @@ -195,20 +195,23 @@ soc_qemu_handle_rootvm_init(partition_t *root_partition, cspace_t *root_cspace, memextent_ptr_result_t me_ret; me_ret = memextent_derive(me, PLATFORM_GICD_BASE, 0x10000U, - MEMEXTENT_MEMTYPE_DEVICE, PGTABLE_ACCESS_RW); + MEMEXTENT_MEMTYPE_DEVICE, PGTABLE_ACCESS_RW, + MEMEXTENT_TYPE_BASIC); if (me_ret.e != OK) { panic("Failed creation of gicd memextent"); } me_ret = memextent_derive(me, PLATFORM_GICR_BASE, (PLATFORM_MAX_CORES << GICR_STRIDE_SHIFT), - MEMEXTENT_MEMTYPE_DEVICE, PGTABLE_ACCESS_RW); + MEMEXTENT_MEMTYPE_DEVICE, PGTABLE_ACCESS_RW, + MEMEXTENT_TYPE_BASIC); if (me_ret.e != OK) { panic("Failed creation of gicr memextent"); } // Derive extent for UART and share it with RM me_ret = memextent_derive(me, PLATFORM_UART_BASE, PLATFORM_UART_SIZE, - MEMEXTENT_MEMTYPE_DEVICE, PGTABLE_ACCESS_RW); + MEMEXTENT_MEMTYPE_DEVICE, PGTABLE_ACCESS_RW, + MEMEXTENT_TYPE_BASIC); if (me_ret.e != OK) { panic("Failed creation of uart memextent"); } diff --git a/hyp/platform/tbre/aarch64/tbre.tc b/hyp/platform/trbe/aarch64/trbe.tc similarity index 93% rename from hyp/platform/tbre/aarch64/tbre.tc rename to hyp/platform/trbe/aarch64/trbe.tc index bdc89ef..2ba67d2 100644 --- a/hyp/platform/tbre/aarch64/tbre.tc +++ b/hyp/platform/trbe/aarch64/trbe.tc @@ -15,7 +15,7 @@ define TRBLIMITR_EL1 bitfield<64> { others unknown = 0; }; -define tbre_context structure { +define trbe_context structure { TRBLIMITR_EL1 bitfield TRBLIMITR_EL1; TRBPTR_EL1 uint64; TRBBASER_EL1 uint64; diff --git a/hyp/platform/tbre/build.conf b/hyp/platform/trbe/build.conf similarity index 74% rename from hyp/platform/tbre/build.conf rename to hyp/platform/trbe/build.conf index e37a8b7..161fe61 100644 --- a/hyp/platform/tbre/build.conf +++ b/hyp/platform/trbe/build.conf @@ -2,6 +2,6 @@ # # SPDX-License-Identifier: BSD-3-Clause -arch_types aarch64 tbre.tc +arch_types aarch64 trbe.tc local_include -source tbre.c +source trbe.c diff --git a/hyp/platform/tbre/include/tbre.h b/hyp/platform/trbe/include/trbe.h similarity index 57% rename from hyp/platform/tbre/include/tbre.h rename to hyp/platform/trbe/include/trbe.h index 8241880..8cba61c 100644 --- a/hyp/platform/tbre/include/tbre.h +++ b/hyp/platform/trbe/include/trbe.h @@ -3,7 +3,7 @@ // SPDX-License-Identifier: BSD-3-Clause void -tbre_save_context_percpu(cpu_index_t cpu); +trbe_save_context_percpu(cpu_index_t cpu); void -tbre_restore_context_percpu(cpu_index_t cpu); +trbe_restore_context_percpu(cpu_index_t cpu); diff --git a/hyp/platform/tbre/src/tbre.c b/hyp/platform/trbe/src/trbe.c similarity index 53% rename from hyp/platform/tbre/src/tbre.c rename to hyp/platform/trbe/src/trbe.c index 9473f18..68b5783 100644 --- a/hyp/platform/tbre/src/tbre.c +++ b/hyp/platform/trbe/src/trbe.c @@ -14,55 +14,55 @@ #include -#include "tbre.h" +#include "trbe.h" -CPULOCAL_DECLARE_STATIC(tbre_context_t, tbre_contexts); +CPULOCAL_DECLARE_STATIC(trbe_context_t, trbe_contexts); void -tbre_save_context_percpu(cpu_index_t cpu) +trbe_save_context_percpu(cpu_index_t cpu) { - CPULOCAL_BY_INDEX(tbre_contexts, cpu).TRBLIMITR_EL1 = + CPULOCAL_BY_INDEX(trbe_contexts, cpu).TRBLIMITR_EL1 = register_TRBLIMITR_EL1_read_ordered(&vet_ordering); - CPULOCAL_BY_INDEX(tbre_contexts, cpu).TRBPTR_EL1 = + CPULOCAL_BY_INDEX(trbe_contexts, cpu).TRBPTR_EL1 = register_TRBPTR_EL1_read_ordered(&vet_ordering); - CPULOCAL_BY_INDEX(tbre_contexts, cpu).TRBBASER_EL1 = + CPULOCAL_BY_INDEX(trbe_contexts, cpu).TRBBASER_EL1 = register_TRBBASER_EL1_read_ordered(&vet_ordering); - CPULOCAL_BY_INDEX(tbre_contexts, cpu).TRBSR_EL1 = + CPULOCAL_BY_INDEX(trbe_contexts, cpu).TRBSR_EL1 = register_TRBSR_EL1_read_ordered(&vet_ordering); - CPULOCAL_BY_INDEX(tbre_contexts, cpu).TRBMAR_EL1 = + CPULOCAL_BY_INDEX(trbe_contexts, cpu).TRBMAR_EL1 = register_TRBMAR_EL1_read_ordered(&vet_ordering); - CPULOCAL_BY_INDEX(tbre_contexts, cpu).TRBTRG_EL1 = + CPULOCAL_BY_INDEX(trbe_contexts, cpu).TRBTRG_EL1 = register_TRBTRG_EL1_read_ordered(&vet_ordering); } void -tbre_restore_context_percpu(cpu_index_t cpu) +trbe_restore_context_percpu(cpu_index_t cpu) { register_TRBLIMITR_EL1_write_ordered( - CPULOCAL_BY_INDEX(tbre_contexts, cpu).TRBLIMITR_EL1, + CPULOCAL_BY_INDEX(trbe_contexts, cpu).TRBLIMITR_EL1, &vet_ordering); register_TRBPTR_EL1_write_ordered( - CPULOCAL_BY_INDEX(tbre_contexts, cpu).TRBPTR_EL1, + CPULOCAL_BY_INDEX(trbe_contexts, cpu).TRBPTR_EL1, &vet_ordering); register_TRBBASER_EL1_write_ordered( - CPULOCAL_BY_INDEX(tbre_contexts, cpu).TRBBASER_EL1, + CPULOCAL_BY_INDEX(trbe_contexts, cpu).TRBBASER_EL1, &vet_ordering); register_TRBSR_EL1_write_ordered( - CPULOCAL_BY_INDEX(tbre_contexts, cpu).TRBSR_EL1, &vet_ordering); + CPULOCAL_BY_INDEX(trbe_contexts, cpu).TRBSR_EL1, &vet_ordering); register_TRBMAR_EL1_write_ordered( - CPULOCAL_BY_INDEX(tbre_contexts, cpu).TRBMAR_EL1, + CPULOCAL_BY_INDEX(trbe_contexts, cpu).TRBMAR_EL1, &vet_ordering); register_TRBTRG_EL1_write_ordered( - CPULOCAL_BY_INDEX(tbre_contexts, cpu).TRBTRG_EL1, + CPULOCAL_BY_INDEX(trbe_contexts, cpu).TRBTRG_EL1, &vet_ordering); } diff --git a/hyp/vm/arm_pv_time/src/arm_pv_time.c b/hyp/vm/arm_pv_time/src/arm_pv_time.c index 151c7a0..4e64318 100644 --- a/hyp/vm/arm_pv_time/src/arm_pv_time.c +++ b/hyp/vm/arm_pv_time/src/arm_pv_time.c @@ -22,16 +22,15 @@ bool smccc_pv_time_features(uint64_t arg1, uint64_t *ret0) { - smccc_function_id_t fn_id = smccc_function_id_cast((uint32_t)arg1); - bool is_smc64 = smccc_function_id_get_is_smc64(&fn_id); - bool is_fast = smccc_function_id_get_is_fast(&fn_id); - bool res0 = smccc_function_id_get_res0(&fn_id); - smccc_function_t fn = smccc_function_id_get_function(&fn_id); - smccc_interface_id_t interface_id = - smccc_function_id_get_interface_id(&fn_id); - uint64_t ret = SMCCC_UNKNOWN_FUNCTION64; - - if ((interface_id == SMCCC_INTERFACE_ID_STANDARD_HYP) && (res0 == 0U) && + smccc_function_id_t fn_id = smccc_function_id_cast((uint32_t)arg1); + bool is_smc64 = smccc_function_id_get_is_smc64(&fn_id); + bool is_fast = smccc_function_id_get_is_fast(&fn_id); + uint32_t res0 = smccc_function_id_get_res0(&fn_id); + smccc_function_t fn = smccc_function_id_get_function(&fn_id); + smccc_owner_id_t owner_id = smccc_function_id_get_owner_id(&fn_id); + uint64_t ret = SMCCC_UNKNOWN_FUNCTION64; + + if ((owner_id == SMCCC_OWNER_ID_STANDARD_HYP) && (res0 == 0U) && is_fast && is_smc64) { switch ((smccc_standard_hyp_function_t)fn) { case SMCCC_STANDARD_HYP_FUNCTION_PV_TIME_FEATURES: diff --git a/hyp/vm/psci/psci.tc b/hyp/vm/psci/psci.tc index aadc3ce..2d915fd 100644 --- a/hyp/vm/psci/psci.tc +++ b/hyp/vm/psci/psci.tc @@ -16,16 +16,13 @@ define PSCI_VCPUS_STATE_PER_VCPU_MASK constant uint8 = 0x7; define PSCI_PER_CLUSTER_STATE_BITS_MASK constant uint8 = 0x7; define PSCI_VCPUS_STATE_BITS constant type count_t = (PLATFORM_MAX_CORES * PSCI_VCPUS_STATE_PER_VCPU_BITS); define PSCI_CLUSTER_STATE_BITS constant type count_t = (PLATFORM_MAX_CLUSTERS * PSCI_PER_CLUSTER_STATE_BITS); -define PSCI_SYSTEM_STATE_BITS constant type count_t = 8; -define PSCI_CLUSTER_STATE_SUSPEND_BITS constant type count_t = PLATFORM_MAX_CLUSTERS; define PSCI_VCPUS_STATE_MAX_VCPUS constant type count_t = PSCI_VCPUS_STATE_BITS/PSCI_VCPUS_STATE_PER_VCPU_BITS; define PSCI_VCPUS_STATE_MAX_INDEX constant type count_t = PLATFORM_MAX_CORES*PSCI_VCPUS_STATE_PER_VCPU_BITS; define vpm_group_suspend_state bitfield<128> { auto vcpus_state uint64; auto cluster_state uint16; - auto system_state uint8; - auto is_cluster_suspended uint16; + auto system_suspend bool; others unknown=0; }; diff --git a/hyp/vm/psci/src/psci_common.c b/hyp/vm/psci/src/psci_common.c index f80c517..f8b06df 100644 --- a/hyp/vm/psci/src/psci_common.c +++ b/hyp/vm/psci/src/psci_common.c @@ -797,8 +797,8 @@ psci_features(uint32_t arg1, uint32_t *ret0) smccc_function_t fn = smccc_function_id_get_function(&fn_id); if (has_psci && - (smccc_function_id_get_interface_id(&fn_id) == - SMCCC_INTERFACE_ID_STANDARD) && + (smccc_function_id_get_owner_id(&fn_id) == + SMCCC_OWNER_ID_STANDARD) && smccc_function_id_get_is_fast(&fn_id) && (smccc_function_id_get_res0(&fn_id) == 0U)) { ret = smccc_function_id_get_is_smc64(&fn_id) @@ -806,8 +806,8 @@ psci_features(uint32_t arg1, uint32_t *ret0) (psci_function_t)fn) : trigger_psci_features32_event( (psci_function_t)fn); - } else if ((smccc_function_id_get_interface_id(&fn_id) == - SMCCC_INTERFACE_ID_ARCH) && + } else if ((smccc_function_id_get_owner_id(&fn_id) == + SMCCC_OWNER_ID_ARCH) && smccc_function_id_get_is_fast(&fn_id) && !smccc_function_id_get_is_smc64(&fn_id) && (smccc_function_id_get_res0(&fn_id) == 0U) && @@ -1163,8 +1163,9 @@ psci_handle_vcpu_wakeup(thread_t *vcpu) void psci_handle_vcpu_wakeup_self(void) { - assert(!scheduler_is_blocked(thread_get_self(), - SCHEDULER_BLOCK_VCPU_SUSPEND)); + thread_t *current = thread_get_self(); + assert(!scheduler_is_blocked(current, SCHEDULER_BLOCK_VCPU_SUSPEND) || + thread_is_dying(current)); } bool @@ -1188,6 +1189,23 @@ psci_handle_vcpu_run_check(const thread_t *thread, register_t *state_data_0, ret = VCPU_RUN_STATE_EXPECTS_WAKEUP; *state_data_0 = psci_suspend_powerstate_raw(thread->psci_suspend_state); + vpm_group_t *vpm_group = thread->psci_group; + bool system_suspend; + if (vpm_group != NULL) { + vpm_group_suspend_state_t vm_state = + atomic_load_acquire( + &vpm_group->psci_vm_suspend_state); + system_suspend = + vpm_group_suspend_state_get_system_suspend( + &vm_state); + } else { + system_suspend = false; + } + vcpu_run_wakeup_from_state_t from_state = + system_suspend + ? VCPU_RUN_WAKEUP_FROM_STATE_PSCI_SYSTEM_SUSPEND + : VCPU_RUN_WAKEUP_FROM_STATE_PSCI_CPU_SUSPEND; + *state_data_1 = (register_t)from_state; } else { ret = VCPU_RUN_STATE_BLOCKED; } @@ -1296,7 +1314,7 @@ psci_handle_vcpu_stopped(void) void psci_handle_power_cpu_online(void) { - psci_set_vpm_active_pcpus_bit(cpulocal_get_index()); + (void)psci_set_vpm_active_pcpus_bit(cpulocal_get_index()); } void diff --git a/hyp/vm/psci_pc/src/psci_pc.c b/hyp/vm/psci_pc/src/psci_pc.c index de59f61..489d5f1 100644 --- a/hyp/vm/psci_pc/src/psci_pc.c +++ b/hyp/vm/psci_pc/src/psci_pc.c @@ -16,7 +16,6 @@ #include #include #include -#include #include #include #include @@ -357,13 +356,10 @@ psci_pc_handle_idle_yield(bool in_idle_thread) (void)psci_set_vpm_active_pcpus_bit(cpu); } - if (suspend_result == OK) { - // Return from successful suspend. We were presumably woken by - // an interrupt; handle it now and reschedule if required. - idle_state = irq_interrupt_dispatch() ? IDLE_STATE_RESCHEDULE - : IDLE_STATE_WAKEUP; - } else if (suspend_result == ERROR_BUSY) { - // An interrupt will arrive soon, continue with idle. + if ((suspend_result == OK) || (suspend_result == ERROR_BUSY)) { + // Return from successful suspend, or suspend failure due to a + // pending wakeup. Poll for wakeup events. + idle_state = idle_wakeup(); } else if (suspend_result != ERROR_DENIED) { TRACE_AND_LOG(ERROR, WARN, "ERROR: psci suspend error {:d}", (register_t)suspend_result); diff --git a/hyp/vm/rootvm/src/rootvm_init.c b/hyp/vm/rootvm/src/rootvm_init.c index dca805c..6528e7a 100644 --- a/hyp/vm/rootvm/src/rootvm_init.c +++ b/hyp/vm/rootvm/src/rootvm_init.c @@ -115,10 +115,11 @@ rootvm_init_env_data(partition_t *root_partition, uint32_t env_data_size) } qcbor_enc_ctxt = (qcbor_enc_ctxt_t *)alloc_ret.r; - memset_s(qcbor_enc_ctxt, sizeof(*qcbor_enc_ctxt), 0, - sizeof(*qcbor_enc_ctxt)); - memset_s(&hyp_env, sizeof(hyp_env), 0, sizeof(hyp_env)); + (void)memset_s(qcbor_enc_ctxt, sizeof(*qcbor_enc_ctxt), 0, + sizeof(*qcbor_enc_ctxt)); + + (void)memset_s(&hyp_env, sizeof(hyp_env), 0, sizeof(hyp_env)); hyp_env.env_data_size = env_data_size; remaining_size = env_data_size; @@ -234,7 +235,7 @@ rootvm_init(void) goto cspace_fail; } - uint32_t env_data_size = 0x4000; + uint32_t env_data_size = QCBOR_ENV_CONFIG_SIZE; rootvm_init_env_info info = rootvm_init_env_data(root_partition, env_data_size); diff --git a/hyp/vm/rootvm_package/src/package.c b/hyp/vm/rootvm_package/src/package.c index b273435..d4b8dd2 100644 --- a/hyp/vm/rootvm_package/src/package.c +++ b/hyp/vm/rootvm_package/src/package.c @@ -137,7 +137,8 @@ rootvm_package_load_elf(void *elf, size_t elf_max_size, addrspace_t *addrspace, util_balign_up(phdr->p_memsz, PGTABLE_VM_PAGE_SIZE); memextent_ptr_result_t me_ret = memextent_derive( - me_rm, offset, size, MEMEXTENT_MEMTYPE_ANY, access); + me_rm, offset, size, MEMEXTENT_MEMTYPE_ANY, access, + MEMEXTENT_TYPE_BASIC); if (me_ret.e != OK) { panic("Failed creation of derived mem extent"); } @@ -241,13 +242,13 @@ rootvm_package_handle_rootvm_init(partition_t *root_partition, paddr_t load_next = load_base; // Create memory extent for the RM with randomized base - uint64_t rand; + uint64_t random; #if !defined(DISABLE_ROOTVM_ASLR) uint64_result_t res = prng_get64(); assert(res.e == OK); - rand = res.r; + random = res.r; #else - rand = 0x10000000U; + random = 0x10000000U; #endif #if 0 @@ -261,7 +262,7 @@ rootvm_package_handle_rootvm_init(partition_t *root_partition, ipa += PGTABLE_VM_PAGE_SIZE; // avoid use of the zero page ipa = util_balign_down(ipa, PGTABLE_VM_PAGE_SIZE); #else - (void)rand; + (void)random; vmaddr_t ipa = PLATFORM_ROOTVM_LMA_BASE; #endif diff --git a/hyp/vm/smccc/aarch64/src/smccc_64.c b/hyp/vm/smccc/aarch64/src/smccc_64.c index 7984f36..1d395a7 100644 --- a/hyp/vm/smccc/aarch64/src/smccc_64.c +++ b/hyp/vm/smccc/aarch64/src/smccc_64.c @@ -48,8 +48,7 @@ smccc_handle_call(bool is_hvc) EXCLUDE_PREEMPT_DISABLED if (smccc_function_id_get_is_fast(&function_id)) { handled = trigger_smccc_dispatch_fast_64_event( - smccc_function_id_get_interface_id( - &function_id), + smccc_function_id_get_owner_id(&function_id), smccc_function_id_get_function(&function_id), is_hvc, (uint64_t)current->vcpu_regs_gpr.x[1], (uint64_t)current->vcpu_regs_gpr.x[2], @@ -62,8 +61,7 @@ smccc_handle_call(bool is_hvc) EXCLUDE_PREEMPT_DISABLED &ret0, &ret1, &ret2, &ret3); } else { handled = trigger_smccc_dispatch_yielding_64_event( - smccc_function_id_get_interface_id( - &function_id), + smccc_function_id_get_owner_id(&function_id), smccc_function_id_get_function(&function_id), is_hvc, (uint64_t)current->vcpu_regs_gpr.x[1], (uint64_t)current->vcpu_regs_gpr.x[2], @@ -90,8 +88,7 @@ smccc_handle_call(bool is_hvc) EXCLUDE_PREEMPT_DISABLED if (smccc_function_id_get_is_fast(&function_id)) { handled = trigger_smccc_dispatch_fast_32_event( - smccc_function_id_get_interface_id( - &function_id), + smccc_function_id_get_owner_id(&function_id), smccc_function_id_get_function(&function_id), is_hvc, (uint32_t)current->vcpu_regs_gpr.x[1], (uint32_t)current->vcpu_regs_gpr.x[2], @@ -104,8 +101,7 @@ smccc_handle_call(bool is_hvc) EXCLUDE_PREEMPT_DISABLED &ret0, &ret1, &ret2, &ret3); } else { handled = trigger_smccc_dispatch_yielding_32_event( - smccc_function_id_get_interface_id( - &function_id), + smccc_function_id_get_owner_id(&function_id), smccc_function_id_get_function(&function_id), is_hvc, (uint32_t)current->vcpu_regs_gpr.x[1], (uint32_t)current->vcpu_regs_gpr.x[2], diff --git a/hyp/vm/smccc/smccc.ev b/hyp/vm/smccc/smccc.ev index cb7bb3b..48c9b7c 100644 --- a/hyp/vm/smccc/smccc.ev +++ b/hyp/vm/smccc/smccc.ev @@ -4,26 +4,26 @@ module smccc -#define _SMCCC_DISPATCH_INTERFACE(type_size, iface, iface_id) \ -subscribe smccc_dispatch_ ## type_size[SMCCC_INTERFACE_ID_ ## iface_id]; \ - handler trigger_smccc_call_ ## type_size ## _ ## iface ## _event( \ +#define _SMCCC_DISPATCH_OWNER(type_size, owner, owner_id) \ +subscribe smccc_dispatch_ ## type_size[SMCCC_OWNER_ID_ ## owner_id]; \ + handler trigger_smccc_call_ ## type_size ## _ ## owner ## _event( \ function, is_hvc, arg1, arg2, arg3, arg4, arg5, arg6, \ client_id, ret0, ret1, ret2, ret3); \ exclude_preempt_disabled. -#define SMCCC_DISPATCH_INTERFACE(iface, iface_id) \ - _SMCCC_DISPATCH_INTERFACE(fast_32, iface, iface_id) \ - _SMCCC_DISPATCH_INTERFACE(fast_64, iface, iface_id) \ - _SMCCC_DISPATCH_INTERFACE(yielding_32, iface, iface_id) \ - _SMCCC_DISPATCH_INTERFACE(yielding_64, iface, iface_id) +#define SMCCC_DISPATCH_OWNER(owner, owner_id) \ + _SMCCC_DISPATCH_OWNER(fast_32, owner, owner_id) \ + _SMCCC_DISPATCH_OWNER(fast_64, owner, owner_id) \ + _SMCCC_DISPATCH_OWNER(yielding_32, owner, owner_id) \ + _SMCCC_DISPATCH_OWNER(yielding_64, owner, owner_id) -SMCCC_DISPATCH_INTERFACE(arch, ARCH) -SMCCC_DISPATCH_INTERFACE(cpu, CPU) -SMCCC_DISPATCH_INTERFACE(sip, SIP) -SMCCC_DISPATCH_INTERFACE(oem, OEM) -SMCCC_DISPATCH_INTERFACE(standard, STANDARD) -SMCCC_DISPATCH_INTERFACE(standard_hyp, STANDARD_HYP) -SMCCC_DISPATCH_INTERFACE(vendor_hyp, VENDOR_HYP) +SMCCC_DISPATCH_OWNER(arch, ARCH) +SMCCC_DISPATCH_OWNER(cpu, CPU) +SMCCC_DISPATCH_OWNER(sip, SIP) +SMCCC_DISPATCH_OWNER(oem, OEM) +SMCCC_DISPATCH_OWNER(standard, STANDARD) +SMCCC_DISPATCH_OWNER(standard_hyp, STANDARD_HYP) +SMCCC_DISPATCH_OWNER(vendor_hyp, VENDOR_HYP) #include diff --git a/hyp/vm/smccc/src/smccc.c b/hyp/vm/smccc/src/smccc.c index d9e6e35..5f2388e 100644 --- a/hyp/vm/smccc/src/smccc.c +++ b/hyp/vm/smccc/src/smccc.c @@ -23,8 +23,7 @@ smccc_arch_features(uint32_t arg1, uint32_t *ret0) smccc_function_t fn = smccc_function_id_get_function(&fn_id); uint32_t ret; - if ((smccc_function_id_get_interface_id(&fn_id) == - SMCCC_INTERFACE_ID_ARCH) && + if ((smccc_function_id_get_owner_id(&fn_id) == SMCCC_OWNER_ID_ARCH) && smccc_function_id_get_is_fast(&fn_id) && (smccc_function_id_get_res0(&fn_id) == 0U)) { if (is_smc64) { @@ -34,8 +33,8 @@ smccc_arch_features(uint32_t arg1, uint32_t *ret0) ret = trigger_smccc_arch_features_fast32_event( (smccc_arch_function_t)fn); } - } else if ((smccc_function_id_get_interface_id(&fn_id) == - SMCCC_INTERFACE_ID_STANDARD_HYP) && + } else if ((smccc_function_id_get_owner_id(&fn_id) == + SMCCC_OWNER_ID_STANDARD_HYP) && smccc_function_id_get_is_fast(&fn_id) && (smccc_function_id_get_res0(&fn_id) == 0U)) { if (is_smc64) { diff --git a/hyp/vm/smccc/src/smccc_hypercalls.c b/hyp/vm/smccc/src/smccc_hypercalls.c index ea3ea82..cf9a327 100644 --- a/hyp/vm/smccc/src/smccc_hypercalls.c +++ b/hyp/vm/smccc/src/smccc_hypercalls.c @@ -16,11 +16,10 @@ smccc_handle_hypercall_wrapper(smccc_function_id_t smc_id, bool is_hvc) { bool handled; - smccc_function_t smc_func = smccc_function_id_get_function(&smc_id); - smccc_interface_id_t smc_iface = - smccc_function_id_get_interface_id(&smc_id); + smccc_function_t smc_func = smccc_function_id_get_function(&smc_id); + smccc_owner_id_t smc_owner = smccc_function_id_get_owner_id(&smc_id); - if (smc_iface != SMCCC_INTERFACE_ID_VENDOR_HYP) { + if (smc_owner != SMCCC_OWNER_ID_VENDOR_HYP) { handled = false; goto out; } diff --git a/hyp/vm/vcpu/aarch64/src/aarch64_init.c b/hyp/vm/vcpu/aarch64/src/aarch64_init.c index c0f0fed..938ccdf 100644 --- a/hyp/vm/vcpu/aarch64/src/aarch64_init.c +++ b/hyp/vm/vcpu/aarch64/src/aarch64_init.c @@ -208,7 +208,7 @@ arch_vcpu_el2_registers_init(vcpu_el2_registers_t *el2_regs) MDCR_EL2_set_TPM(&el2_regs->mdcr_el2, true); MDCR_EL2_set_TPMCR(&el2_regs->mdcr_el2, true); #endif -#if defined(ARCH_ARM_FEAT_SPEv1p1) +#if defined(ARCH_ARM_FEAT_SPE) // Enable SPE traps by default MDCR_EL2_set_TPMS(&el2_regs->mdcr_el2, true); #endif @@ -366,7 +366,14 @@ vcpu_poweron(thread_t *vcpu, vmaddr_result_t entry_point, assert(vcpu->kind == THREAD_KIND_VCPU); assert(scheduler_is_blocked(vcpu, SCHEDULER_BLOCK_VCPU_OFF)); - err = trigger_vcpu_poweron_event(vcpu); + if (thread_is_dying(vcpu) || thread_has_exited(vcpu)) { + err = ERROR_FAILURE; + } + + if (err == OK) { + err = trigger_vcpu_poweron_event(vcpu); + } + if (err == OK) { vcpu_reset_execution_context(vcpu); if (entry_point.e == OK) { diff --git a/hyp/vm/vcpu/aarch64/src/sysreg_traps.c b/hyp/vm/vcpu/aarch64/src/sysreg_traps.c index e33231a..181e9ff 100644 --- a/hyp/vm/vcpu/aarch64/src/sysreg_traps.c +++ b/hyp/vm/vcpu/aarch64/src/sysreg_traps.c @@ -727,124 +727,523 @@ read_virtual_id_register(ESR_EL2_ISS_MSR_MRS_t iss, uint8_t reg_num) } #endif -// For the guests with no AMU access we should trap the AMU registers by setting -// CPTR_EL2.TAM and clearing ACTLR_EL2.AMEN. However the trapped registers -// should be handled in the AMU module, and not here. - -vcpu_trap_result_t -sysreg_read(ESR_EL2_ISS_MSR_MRS_t iss) +static vcpu_trap_result_t +default_sys_read(const ESR_EL2_ISS_MSR_MRS_t *iss, register_t *reg_val_ptr) { - register_t reg_val = 0ULL; // Default action is RAZ vcpu_trap_result_t ret = VCPU_TRAP_RESULT_EMULATED; - thread_t *thread = thread_get_self(); + register_t reg_val = 0ULL; + + uint8_t opc0, opc1, crn, crm; + + opc0 = ESR_EL2_ISS_MSR_MRS_get_Op0(iss); + opc1 = ESR_EL2_ISS_MSR_MRS_get_Op1(iss); + crn = ESR_EL2_ISS_MSR_MRS_get_CRn(iss); + crm = ESR_EL2_ISS_MSR_MRS_get_CRm(iss); + + if ((opc0 == 3U) && (opc1 == 0U) && (crn == 0U) && (crm >= 1U) && + (crm <= 7U)) { + // It is IMPLEMENTATION DEFINED whether HCR_EL2.TID3 + // traps MRS accesses to the registers in this range + // (that have not been handled above). If we ever get + // here print a debug message so we can investigate. + TRACE_AND_LOG(DEBUG, DEBUG, + "Emulated RAZ for ID register: ISS {:#x}", + ESR_EL2_ISS_MSR_MRS_raw(*iss)); + reg_val = 0U; + } else { + ret = VCPU_TRAP_RESULT_UNHANDLED; + } - // Assert this is a read - assert(ESR_EL2_ISS_MSR_MRS_get_Direction(&iss)); + *reg_val_ptr = reg_val; + return ret; +} - uint8_t reg_num = ESR_EL2_ISS_MSR_MRS_get_Rt(&iss); +static register_t +sys_aa64mmfr3_read(void) +{ + register_t reg_val = 0ULL; - // Remove the fields that are not used in the comparison - ESR_EL2_ISS_MSR_MRS_t temp_iss = iss; - ESR_EL2_ISS_MSR_MRS_set_Rt(&temp_iss, 0U); - ESR_EL2_ISS_MSR_MRS_set_Direction(&temp_iss, false); + ID_AA64MMFR3_EL1_t mmfr3 = ID_AA64MMFR3_EL1_default(); + ID_AA64MMFR3_EL1_t hw_mmfr3 = register_ID_AA64MMFR3_EL1_read(); + ID_AA64MMFR3_EL1_copy_Spec_FPACC(&mmfr3, &hw_mmfr3); + reg_val = ID_AA64MMFR3_EL1_raw(mmfr3); -#if SCHEDULER_CAN_MIGRATE - // If not pinned, use virtual ID register values. - if (!vcpu_option_flags_get_pinned(&thread->vcpu_options) && - read_virtual_id_register(temp_iss, reg_num)) { - goto out; + return reg_val; +} + +static register_t +sys_aa64mmfr2_read(void) +{ + register_t reg_val = 0ULL; + + ID_AA64MMFR2_EL1_t mmfr2 = register_ID_AA64MMFR2_EL1_read(); + + mmfr2 = ID_AA64MMFR2_EL1_clean(mmfr2); + + reg_val = ID_AA64MMFR2_EL1_raw(mmfr2); + + return reg_val; +} + +static register_t +sys_aa64mmfr1_read(void) +{ + register_t reg_val = 0ULL; + + ID_AA64MMFR1_EL1_t mmfr1 = register_ID_AA64MMFR1_EL1_read(); + + mmfr1 = ID_AA64MMFR1_EL1_clean(mmfr1); + +#if defined(ARCH_ARM_FEAT_PAN3) + assert(ID_AA64MMFR1_EL1_get_PAN(&mmfr1) >= 3U); + ID_AA64MMFR1_EL1_set_PAN(&mmfr1, 3U); +#elif defined(ARCH_ARM_FEAT_PAN2) // now known as FEAT_PAN2 + assert(ID_AA64MMFR1_EL1_get_PAN(&mmfr1) >= 2U); + ID_AA64MMFR1_EL1_set_PAN(&mmfr1, 2U); +#elif defined(ARCH_ARM_FEAT_PAN) + assert(ID_AA64MMFR1_EL1_get_PAN(&mmfr1) >= 1U); + ID_AA64MMFR1_EL1_set_PAN(&mmfr1, 1U); +#else + ID_AA64MMFR1_EL1_set_PAN(&mmfr1, 0U); +#endif + reg_val = ID_AA64MMFR1_EL1_raw(mmfr1); + + return reg_val; +} + +static register_t +sys_aa64mmfr0_read(void) +{ + register_t reg_val = 0ULL; + + ID_AA64MMFR0_EL1_t mmfr0 = register_ID_AA64MMFR0_EL1_read(); + + mmfr0 = ID_AA64MMFR0_EL1_clean(mmfr0); + + reg_val = ID_AA64MMFR0_EL1_raw(mmfr0); + + return reg_val; +} + +static register_t +sys_aa64isar2_read(void) +{ + register_t reg_val = 0ULL; + + ID_AA64ISAR2_EL1_t isar2 = register_ID_AA64ISAR2_EL1_read(); + + isar2 = ID_AA64ISAR2_EL1_clean(isar2); + +#if !defined(ARCH_ARM_FEAT_PAuth) + // When PAUTH using QARMA3 is disabled, hide it from the VM + ID_AA64ISAR2_EL1_set_APA3(&isar2, 0U); + ID_AA64ISAR2_EL1_set_GPA3(&isar2, 0U); + ID_AA64ISAR2_EL1_set_PAC_frac(&isar2, 0U); +#endif +#if defined(ARCH_ARM_FEAT_WFxT) + // Remove once FEAT_WFxT is implemented + // FIXME: + ID_AA64ISAR2_EL1_set_WFxT(&isar2, 0U); +#endif + reg_val = ID_AA64ISAR2_EL1_raw(isar2); + + return reg_val; +} + +static register_t +sys_aa64isar1_read(void) +{ + register_t reg_val = 0ULL; + + ID_AA64ISAR1_EL1_t isar1 = register_ID_AA64ISAR1_EL1_read(); + + isar1 = ID_AA64ISAR1_EL1_clean(isar1); +#if !defined(ARCH_ARM_FEAT_BF16) + ID_AA64ISAR1_EL1_set_BF16(&isar1, 0U); +#endif +#if !defined(ARCH_ARM_FEAT_PAuth) + // When no PAUTH is enabled, hide it from the VM + ID_AA64ISAR1_EL1_set_APA(&isar1, 0U); + ID_AA64ISAR1_EL1_set_API(&isar1, 0U); + ID_AA64ISAR1_EL1_set_GPA(&isar1, 0U); + ID_AA64ISAR1_EL1_set_GPI(&isar1, 0U); +#endif + reg_val = ID_AA64ISAR1_EL1_raw(isar1); + + return reg_val; +} + +static register_t +sys_aa64isar0_read(void) +{ + register_t reg_val = 0ULL; + + ID_AA64ISAR0_EL1_t isar0 = register_ID_AA64ISAR0_EL1_read(); + + isar0 = ID_AA64ISAR0_EL1_clean(isar0); + + reg_val = ID_AA64ISAR0_EL1_raw(isar0); + + return reg_val; +} + +static register_t +sys_aa64dfr0_read(const thread_t *thread) +{ + register_t reg_val = 0ULL; + + ID_AA64DFR0_EL1_t dfr0 = ID_AA64DFR0_EL1_default(); + ID_AA64DFR0_EL1_t hw_dfr0 = register_ID_AA64DFR0_EL1_read(); + + // The debug, trace, PMU and SPE modules must correctly support + // the values reported by the hardware. All we do here is to + // zero out fields for missing modules. + +#if defined(MODULE_VM_VDEBUG) + // Note that ARMv8-A does not allow 0 (not implemented) in this + // field. So without this module is not really supported. + ID_AA64DFR0_EL1_copy_DebugVer(&dfr0, &hw_dfr0); + + ID_AA64DFR0_EL1_copy_BRPs(&dfr0, &hw_dfr0); + ID_AA64DFR0_EL1_copy_WRPs(&dfr0, &hw_dfr0); + ID_AA64DFR0_EL1_copy_CTX_CMPs(&dfr0, &hw_dfr0); + ID_AA64DFR0_EL1_copy_DoubleLock(&dfr0, &hw_dfr0); +#endif +#if defined(MODULE_VM_ARM_VM_PMU) + ID_AA64DFR0_EL1_copy_PMUVer(&dfr0, &hw_dfr0); +#endif +#if defined(INTERFACE_VET) + // Set IDs for VMs allowed to trace + if (vcpu_option_flags_get_trace_allowed(&thread->vcpu_options)) { +#if defined(MODULE_VM_VETE) + ID_AA64DFR0_EL1_copy_TraceVer(&dfr0, &hw_dfr0); + ID_AA64DFR0_EL1_copy_TraceFilt(&dfr0, &hw_dfr0); +#endif +#if defined(MODULE_VM_VTRBE) + ID_AA64DFR0_EL1_copy_TraceBuffer(&dfr0, &hw_dfr0); +#endif } +#else + (void)thread; #endif - switch (ESR_EL2_ISS_MSR_MRS_raw(temp_iss)) { - // The registers trapped with HCR_EL2.TID3 - case ISS_MRS_MSR_ID_PFR0_EL1: { - ID_PFR0_EL1_t pfr0 = register_ID_PFR0_EL1_read(); +#if defined(MODULE_SPE) + ID_AA64DFR0_EL1_copy_PMSVer(&dfr0, &hw_dfr0); +#endif + + reg_val = ID_AA64DFR0_EL1_raw(dfr0); + return reg_val; +} + +static register_t +sys_aa64pfr1_read(const thread_t *thread) +{ + register_t reg_val = 0ULL; + + ID_AA64PFR1_EL1_t pfr1 = register_ID_AA64PFR1_EL1_read(); + + pfr1 = ID_AA64PFR1_EL1_clean(pfr1); +#if defined(ARCH_ARM_FEAT_MTE) + if (!arm_mte_is_allowed()) { + ID_AA64PFR1_EL1_set_MTE(&pfr1, 0); + } +#else + ID_AA64PFR1_EL1_set_MTE(&pfr1, 0); +#endif #if defined(ARCH_ARM_FEAT_RAS) || defined(ARCH_ARM_FEAT_RASv1p1) - // Tell non-RAS handler guests there is no RAS. - if (!vcpu_option_flags_get_ras_error_handler( - &thread->vcpu_options)) { - ID_PFR0_EL1_set_RAS(&pfr0, 0); - } + if (!vcpu_option_flags_get_ras_error_handler(&thread->vcpu_options)) { + ID_AA64PFR1_EL1_set_RAS_frac(&pfr1, 0); + } +#else + (void)thread; #endif -#if defined(ARCH_ARM_FEAT_AMUv1) || defined(ARCH_ARM_FEAT_AMUv1p1) - // Tell non-HLOS guests that there is no AMU - if (!vcpu_option_flags_get_hlos_vm(&thread->vcpu_options)) { - ID_PFR0_EL1_set_AMU(&pfr0, 0); - } +#if defined(ARCH_ARM_HAVE_SCXT) && defined(ARCH_ARM_FEAT_CSV2_1p2) + if (!vcpu_option_flags_get_scxt_allowed(&thread->vcpu_options)) { + ID_AA64PFR1_EL1_set_CSV2_frac(&pfr1, 1U); + } +#elif defined(ARCH_ARM_FEAT_CSV2_1p1) + ID_AA64PFR1_EL1_set_CSV2_frac(&pfr1, 1U); +#else + ID_AA64PFR1_EL1_set_CSV2_frac(&pfr1, 0U); + (void)thread; #endif + +#if defined(ARCH_ARM_FEAT_MPAM) + if (!arm_mpam_is_allowed() || + !vcpu_option_flags_get_mpam_allowed(&thread->vcpu_options)) { + // No MPAM + ID_AA64PFR1_EL1_set_MPAM_frac(&pfr1, 0); + } +#else + // No MPAM + ID_AA64PFR1_EL1_set_MPAM_frac(&pfr1, 0); + (void)thread; +#endif + // No SME / NMI + ID_AA64PFR1_EL1_set_SME(&pfr1, 0); + ID_AA64PFR1_EL1_set_NMI(&pfr1, 0); + + reg_val = ID_AA64PFR1_EL1_raw(pfr1); + + return reg_val; +} + +static register_t +sys_aa64pfr0_read(const thread_t *thread) +{ + register_t reg_val = 0ULL; + + ID_AA64PFR0_EL1_t pfr0 = register_ID_AA64PFR0_EL1_read(); + + pfr0 = ID_AA64PFR0_EL1_clean(pfr0); +#if !ARCH_AARCH64_32BIT_EL0 + // Require EL0 to be 64-bit only, even if core supports 32-bit + ID_AA64PFR0_EL1_set_EL0(&pfr0, 1U); +#endif +#if !ARCH_AARCH64_32BIT_EL1 + // Require EL1 to be 64-bit only, even if core supports 32-bit + ID_AA64PFR0_EL1_set_EL1(&pfr0, 1U); +#endif + ID_AA64PFR0_EL1_set_EL2(&pfr0, 1U); + ID_AA64PFR0_EL1_set_EL3(&pfr0, 1U); #if defined(ARCH_ARM_HAVE_SCXT) - if (!vcpu_runtime_flags_get_scxt_allowed(&thread->vcpu_flags)) { - ID_PFR0_EL1_set_CSV2(&pfr0, 1U); - } + if (!vcpu_runtime_flags_get_scxt_allowed(&thread->vcpu_flags)) { + ID_AA64PFR0_EL1_set_CSV2(&pfr0, 1U); + } #elif defined(ARCH_ARM_FEAT_CSV2) - ID_PFR0_EL1_set_CSV2(&pfr0, 1U); + ID_AA64PFR0_EL1_set_CSV2(&pfr0, 1U); + (void)thread; +#else + (void)thread; #endif - reg_val = ID_PFR0_EL1_raw(pfr0); - break; +#if defined(ARCH_ARM_FEAT_MPAM) + if (!arm_mpam_is_allowed() || + !vcpu_option_flags_get_mpam_allowed(&thread->vcpu_options)) { + // No MPAM + ID_AA64PFR0_EL1_set_MPAM(&pfr0, 0); } - case ISS_MRS_MSR_ID_PFR1_EL1: { - ID_PFR1_EL1_t pfr1 = register_ID_PFR1_EL1_read(); +#else + // No MPAM + ID_AA64PFR0_EL1_set_MPAM(&pfr0, 0); + (void)thread; +#endif - reg_val = ID_PFR1_EL1_raw(pfr1); - break; +#if defined(ARCH_ARM_FEAT_SVE) + // Tell non-SVE allowed guests that there is no SVE + if (!vcpu_option_flags_get_sve_allowed(&thread->vcpu_options)) { + ID_AA64PFR0_EL1_set_SVE(&pfr0, 0); } - case ISS_MRS_MSR_ID_PFR2_EL1: { - ID_PFR2_EL1_t pfr2 = ID_PFR2_EL1_default(); -#if defined(ARCH_ARM_FEAT_CSV3) - ID_PFR2_EL1_set_CSV3(&pfr2, 1U); +#else + // No SVE + ID_AA64PFR0_EL1_set_SVE(&pfr0, 0); + (void)thread; #endif -#if defined(ARCH_ARM_FEAT_SSBS) - ID_PFR2_EL1_set_SSBS(&pfr2, 1U); + +#if defined(ARCH_ARM_FEAT_RAS) || defined(ARCH_ARM_FEAT_RASv1p1) + // Tell non-RAS handler guests there is no RAS + if (!vcpu_option_flags_get_ras_error_handler(&thread->vcpu_options)) { + ID_AA64PFR0_EL1_set_RAS(&pfr0, 0); + } #endif - reg_val = ID_PFR2_EL1_raw(pfr2); - break; +#if defined(ARCH_ARM_FEAT_AMUv1) || defined(ARCH_ARM_FEAT_AMUv1p1) + // Tell non-HLOS guests that there is no AMU + if (!vcpu_option_flags_get_hlos_vm(&thread->vcpu_options)) { + ID_AA64PFR0_EL1_set_AMU(&pfr0, 0); } - case ISS_MRS_MSR_ID_DFR0_EL1: { - ID_DFR0_EL1_t dfr0 = register_ID_DFR0_EL1_read(); +#else + (void)thread; +#endif +#if !defined(ARCH_ARM_FEAT_SEL2) + ID_AA64PFR0_EL1_set_SEL2(&pfr0, 0U); +#endif + ID_AA64PFR0_EL1_set_RME(&pfr0, 0U); - // The debug, trace, PMU and SPE modules must correctly support - // the values reported by the hardware. All we do here is to - // zero out fields for features we don't support. + reg_val = ID_AA64PFR0_EL1_raw(pfr0); + + return reg_val; +} + +static register_t +sys_mmfr3_read(void) +{ + register_t reg_val = 0ULL; + sysreg64_read(ID_MMFR3_EL1, reg_val); + ID_MMFR3_EL1_t mmfr1 = ID_MMFR3_EL1_cast(reg_val); +#if defined(ARCH_ARM_FEAT_PAN3) + assert(ID_MMFR3_EL1_get_PAN(&mmfr1) >= 3U); + ID_MMFR3_EL1_set_PAN(&mmfr1, 3U); +#elif defined(ARCH_ARM_FEAT_PAN2) // now known as FEAT_PAN2 + assert(ID_MMFR3_EL1_get_PAN(&mmfr1) >= 2U); + ID_MMFR3_EL1_set_PAN(&mmfr1, 2U); +#elif defined(ARCH_ARM_FEAT_PAN) + assert(ID_MMFR3_EL1_get_PAN(&mmfr1) >= 1U); + ID_MMFR3_EL1_set_PAN(&mmfr1, 1U); +#else + ID_MMFR3_EL1_set_PAN(&mmfr1, 0U); +#endif + reg_val = ID_MMFR3_EL1_raw(mmfr1); + + return reg_val; +} + +static register_t +sys_dfr0_read(const thread_t *thread) +{ + register_t reg_val = 0ULL; + ID_DFR0_EL1_t dfr0 = register_ID_DFR0_EL1_read(); + + // The debug, trace, PMU and SPE modules must correctly support + // the values reported by the hardware. All we do here is to + // zero out fields for features we don't support. #if !defined(MODULE_VM_VDEBUG) - // Note that ARMv8-A does not allow 0 (not implemented) in the - // CopDbg field. So this configuration is not really supported. - ID_DFR0_EL1_set_CopDbg(&dfr0, 0U); - ID_DFR0_EL1_set_CopSDbg(&dfr0, 0U); - ID_DFR0_EL1_set_MMapDbg(&dfr0, 0U); - ID_DFR0_EL1_set_MProfDbg(&dfr0, 0U); + // Note that ARMv8-A does not allow 0 (not implemented) in the + // CopDbg field. So this configuration is not really supported. + ID_DFR0_EL1_set_CopDbg(&dfr0, 0U); + ID_DFR0_EL1_set_CopSDbg(&dfr0, 0U); + ID_DFR0_EL1_set_MMapDbg(&dfr0, 0U); + ID_DFR0_EL1_set_MProfDbg(&dfr0, 0U); #endif #if defined(MODULE_VM_VETE) - // Only the HLOS VM is allowed to trace - if (!vcpu_option_flags_get_trace_allowed( - &thread->vcpu_options)) { - ID_DFR0_EL1_set_CopTrc(&dfr0, 0U); - ID_DFR0_EL1_set_TraceFilt(&dfr0, 0U); - } -#else + // Only the HLOS VM is allowed to trace + if (!vcpu_option_flags_get_trace_allowed(&thread->vcpu_options)) { ID_DFR0_EL1_set_CopTrc(&dfr0, 0U); ID_DFR0_EL1_set_TraceFilt(&dfr0, 0U); + } +#else + ID_DFR0_EL1_set_CopTrc(&dfr0, 0U); + ID_DFR0_EL1_set_TraceFilt(&dfr0, 0U); + (void)thread; #endif #if defined(MODULE_VM_VETM) - // Only the HLOS VM is allowed to trace - if (!vcpu_option_flags_get_trace_allowed( - &thread->vcpu_options)) { - ID_DFR0_EL1_set_MMapTrc(&dfr0, 0U); - } -#else + // Only the HLOS VM is allowed to trace + if (!vcpu_option_flags_get_trace_allowed(&thread->vcpu_options)) { ID_DFR0_EL1_set_MMapTrc(&dfr0, 0U); + } +#else + ID_DFR0_EL1_set_MMapTrc(&dfr0, 0U); + (void)thread; #endif #if !defined(MODULE_PLATFORM_ARM_PMU) - ID_DFR0_EL1_set_PerfMon(&dfr0, 0U); + ID_DFR0_EL1_set_PerfMon(&dfr0, 0U); #endif - reg_val = ID_DFR0_EL1_raw(dfr0); - break; + reg_val = ID_DFR0_EL1_raw(dfr0); + + return reg_val; +} + +static register_t +sys_pfr2_read(void) +{ + register_t reg_val = 0ULL; + ID_PFR2_EL1_t pfr2 = ID_PFR2_EL1_default(); +#if defined(ARCH_ARM_FEAT_CSV3) + ID_PFR2_EL1_set_CSV3(&pfr2, 1U); +#endif +#if defined(ARCH_ARM_FEAT_SSBS) + ID_PFR2_EL1_set_SSBS(&pfr2, 1U); +#endif + reg_val = ID_PFR2_EL1_raw(pfr2); + + return reg_val; +} + +static register_t +sys_pfr1_read(void) +{ + register_t reg_val = 0ULL; + ID_PFR1_EL1_t pfr1 = register_ID_PFR1_EL1_read(); + + reg_val = ID_PFR1_EL1_raw(pfr1); + return reg_val; +} + +static register_t +sys_pfr0_read(const thread_t *thread) +{ + register_t reg_val = 0ULL; + + ID_PFR0_EL1_t pfr0 = register_ID_PFR0_EL1_read(); + +#if defined(ARCH_ARM_FEAT_RAS) || defined(ARCH_ARM_FEAT_RASv1p1) + // Tell non-RAS handler guests there is no RAS. + if (!vcpu_option_flags_get_ras_error_handler(&thread->vcpu_options)) { + ID_PFR0_EL1_set_RAS(&pfr0, 0); + } +#else + (void)thread; +#endif +#if defined(ARCH_ARM_FEAT_AMUv1) || defined(ARCH_ARM_FEAT_AMUv1p1) + // Tell non-HLOS guests that there is no AMU + if (!vcpu_option_flags_get_hlos_vm(&thread->vcpu_options)) { + ID_PFR0_EL1_set_AMU(&pfr0, 0); + } +#else + (void)thread; +#endif +#if defined(ARCH_ARM_HAVE_SCXT) + if (!vcpu_runtime_flags_get_scxt_allowed(&thread->vcpu_flags)) { + ID_PFR0_EL1_set_CSV2(&pfr0, 1U); } +#elif defined(ARCH_ARM_FEAT_CSV2) + ID_PFR0_EL1_set_CSV2(&pfr0, 1U); + (void)thread; +#else + (void)thread; +#endif + + reg_val = ID_PFR0_EL1_raw(pfr0); + + return reg_val; +} + +// For the guests with no AMU access we should trap the AMU registers by setting +// CPTR_EL2.TAM and clearing ACTLR_EL2.AMEN. However the trapped registers +// should be handled in the AMU module, and not here. + +vcpu_trap_result_t +sysreg_read(ESR_EL2_ISS_MSR_MRS_t iss) +{ + register_t reg_val = 0ULL; // Default action is RAZ + vcpu_trap_result_t ret = VCPU_TRAP_RESULT_EMULATED; + thread_t *thread = thread_get_self(); + + // Assert this is a read + assert(ESR_EL2_ISS_MSR_MRS_get_Direction(&iss)); + + uint8_t reg_num = ESR_EL2_ISS_MSR_MRS_get_Rt(&iss); + + // Remove the fields that are not used in the comparison + ESR_EL2_ISS_MSR_MRS_t temp_iss = iss; + ESR_EL2_ISS_MSR_MRS_set_Rt(&temp_iss, 0U); + ESR_EL2_ISS_MSR_MRS_set_Direction(&temp_iss, false); + +#if SCHEDULER_CAN_MIGRATE + // If not pinned, use virtual ID register values. + if (!vcpu_option_flags_get_pinned(&thread->vcpu_options) && + read_virtual_id_register(temp_iss, reg_num)) { + goto out; + } +#endif + + switch (ESR_EL2_ISS_MSR_MRS_raw(temp_iss)) { + // The registers trapped with HCR_EL2.TID3 + case ISS_MRS_MSR_ID_PFR0_EL1: + reg_val = sys_pfr0_read(thread); + break; + case ISS_MRS_MSR_ID_PFR1_EL1: + reg_val = sys_pfr1_read(); + break; + case ISS_MRS_MSR_ID_PFR2_EL1: + reg_val = sys_pfr2_read(); + break; + case ISS_MRS_MSR_ID_DFR0_EL1: + reg_val = sys_dfr0_read(thread); + break; case ISS_MRS_MSR_ID_AFR0_EL1: // RES0 - We don't know any AFR0 bits break; @@ -857,24 +1256,9 @@ sysreg_read(ESR_EL2_ISS_MSR_MRS_t iss) case ISS_MRS_MSR_ID_MMFR2_EL1: sysreg64_read(ID_MMFR2_EL1, reg_val); break; - case ISS_MRS_MSR_ID_MMFR3_EL1: { - sysreg64_read(ID_MMFR3_EL1, reg_val); - ID_MMFR3_EL1_t mmfr1 = ID_MMFR3_EL1_cast(reg_val); -#if defined(ARCH_ARM_FEAT_PAN3) - assert(ID_MMFR3_EL1_get_PAN(&mmfr1) >= 3U); - ID_MMFR3_EL1_set_PAN(&mmfr1, 3U); -#elif defined(ARCH_ARM_FEAT_PAN2) // now known as FEAT_PAN2 - assert(ID_MMFR3_EL1_get_PAN(&mmfr1) >= 2U); - ID_MMFR3_EL1_set_PAN(&mmfr1, 2U); -#elif defined(ARCH_ARM_FEAT_PAN) - assert(ID_MMFR3_EL1_get_PAN(&mmfr1) >= 1U); - ID_MMFR3_EL1_set_PAN(&mmfr1, 1U); -#else - ID_MMFR3_EL1_set_PAN(&mmfr1, 0U); -#endif - reg_val = ID_MMFR3_EL1_raw(mmfr1); + case ISS_MRS_MSR_ID_MMFR3_EL1: + reg_val = sys_mmfr3_read(); break; - } case ISS_MRS_MSR_ID_MMFR4_EL1: sysreg64_read(ID_MMFR4_EL1, reg_val); break; @@ -908,117 +1292,12 @@ sysreg_read(ESR_EL2_ISS_MSR_MRS_t iss) case ISS_MRS_MSR_MVFR2_EL1: sysreg64_read(MVFR2_EL1, reg_val); break; - case ISS_MRS_MSR_ID_AA64PFR0_EL1: { - ID_AA64PFR0_EL1_t pfr0 = register_ID_AA64PFR0_EL1_read(); - - pfr0 = ID_AA64PFR0_EL1_clean(pfr0); -#if !ARCH_AARCH64_32BIT_EL0 - // Require EL0 to be 64-bit only, even if core supports 32-bit - ID_AA64PFR0_EL1_set_EL0(&pfr0, 1U); -#endif -#if !ARCH_AARCH64_32BIT_EL1 - // Require EL1 to be 64-bit only, even if core supports 32-bit - ID_AA64PFR0_EL1_set_EL1(&pfr0, 1U); -#endif - ID_AA64PFR0_EL1_set_EL2(&pfr0, 1U); - ID_AA64PFR0_EL1_set_EL3(&pfr0, 1U); -#if defined(ARCH_ARM_HAVE_SCXT) - if (!vcpu_runtime_flags_get_scxt_allowed(&thread->vcpu_flags)) { - ID_AA64PFR0_EL1_set_CSV2(&pfr0, 1U); - } -#elif defined(ARCH_ARM_FEAT_CSV2) - ID_AA64PFR0_EL1_set_CSV2(&pfr0, 1U); -#endif - -#if defined(ARCH_ARM_FEAT_MPAM) - if (!arm_mpam_is_allowed() || - !vcpu_option_flags_get_mpam_allowed( - &thread->vcpu_options)) { - // No MPAM - ID_AA64PFR0_EL1_set_MPAM(&pfr0, 0); - } -#else - // No MPAM - ID_AA64PFR0_EL1_set_MPAM(&pfr0, 0); -#endif - -#if defined(ARCH_ARM_FEAT_SVE) - // Tell non-SVE allowed guests that there is no SVE - if (!vcpu_option_flags_get_sve_allowed(&thread->vcpu_options)) { - ID_AA64PFR0_EL1_set_SVE(&pfr0, 0); - } -#else - // No SVE - ID_AA64PFR0_EL1_set_SVE(&pfr0, 0); -#endif - -#if defined(ARCH_ARM_FEAT_RAS) || defined(ARCH_ARM_FEAT_RASv1p1) - // Tell non-RAS handler guests there is no RAS - if (!vcpu_option_flags_get_ras_error_handler( - &thread->vcpu_options)) { - ID_AA64PFR0_EL1_set_RAS(&pfr0, 0); - } -#endif -#if defined(ARCH_ARM_FEAT_AMUv1) || defined(ARCH_ARM_FEAT_AMUv1p1) - // Tell non-HLOS guests that there is no AMU - if (!vcpu_option_flags_get_hlos_vm(&thread->vcpu_options)) { - ID_AA64PFR0_EL1_set_AMU(&pfr0, 0); - } -#endif -#if !defined(ARCH_ARM_FEAT_SEL2) - ID_AA64PFR0_EL1_set_SEL2(&pfr0, 0U); -#endif - ID_AA64PFR0_EL1_set_RME(&pfr0, 0U); - - reg_val = ID_AA64PFR0_EL1_raw(pfr0); + case ISS_MRS_MSR_ID_AA64PFR0_EL1: + reg_val = sys_aa64pfr0_read(thread); break; - } - case ISS_MRS_MSR_ID_AA64PFR1_EL1: { - ID_AA64PFR1_EL1_t pfr1 = register_ID_AA64PFR1_EL1_read(); - - pfr1 = ID_AA64PFR1_EL1_clean(pfr1); -#if defined(ARCH_ARM_FEAT_MTE) - if (!arm_mte_is_allowed()) { - ID_AA64PFR1_EL1_set_MTE(&pfr1, 0); - } -#else - ID_AA64PFR1_EL1_set_MTE(&pfr1, 0); -#endif -#if defined(ARCH_ARM_FEAT_RAS) || defined(ARCH_ARM_FEAT_RASv1p1) - if (!vcpu_option_flags_get_ras_error_handler( - &thread->vcpu_options)) { - ID_AA64PFR1_EL1_set_RAS_frac(&pfr1, 0); - } -#endif -#if defined(ARCH_ARM_HAVE_SCXT) && defined(ARCH_ARM_FEAT_CSV2_1p2) - if (!vcpu_option_flags_get_scxt_allowed( - &thread->vcpu_options)) { - ID_AA64PFR1_EL1_set_CSV2_frac(&pfr1, 1U); - } -#elif defined(ARCH_ARM_FEAT_CSV2_1p1) - ID_AA64PFR1_EL1_set_CSV2_frac(&pfr1, 1U); -#else - ID_AA64PFR1_EL1_set_CSV2_frac(&pfr1, 0U); -#endif - -#if defined(ARCH_ARM_FEAT_MPAM) - if (!arm_mpam_is_allowed() || - !vcpu_option_flags_get_mpam_allowed( - &thread->vcpu_options)) { - // No MPAM - ID_AA64PFR1_EL1_set_MPAM_frac(&pfr1, 0); - } -#else - // No MPAM - ID_AA64PFR1_EL1_set_MPAM_frac(&pfr1, 0); -#endif - // No SME / NMI - ID_AA64PFR1_EL1_set_SME(&pfr1, 0); - ID_AA64PFR1_EL1_set_NMI(&pfr1, 0); - - reg_val = ID_AA64PFR1_EL1_raw(pfr1); + case ISS_MRS_MSR_ID_AA64PFR1_EL1: + reg_val = sys_aa64pfr1_read(thread); break; - } case ISS_MRS_MSR_ID_AA64ZFR0_EL1: #if defined(ARCH_ARM_FEAT_SVE) // The SVE module will handle this register @@ -1030,48 +1309,9 @@ sysreg_read(ESR_EL2_ISS_MSR_MRS_t iss) case ISS_MRS_MSR_ID_AA64SMFR0_EL1: // No Scalable Matrix Extension support for now break; - case ISS_MRS_MSR_ID_AA64DFR0_EL1: { - ID_AA64DFR0_EL1_t dfr0 = ID_AA64DFR0_EL1_default(); - ID_AA64DFR0_EL1_t hw_dfr0 = register_ID_AA64DFR0_EL1_read(); - - // The debug, trace, PMU and SPE modules must correctly support - // the values reported by the hardware. All we do here is to - // zero out fields for missing modules. - -#if defined(MODULE_VM_VDEBUG) - // Note that ARMv8-A does not allow 0 (not implemented) in this - // field. So without this module is not really supported. - ID_AA64DFR0_EL1_copy_DebugVer(&dfr0, &hw_dfr0); - - ID_AA64DFR0_EL1_copy_BRPs(&dfr0, &hw_dfr0); - ID_AA64DFR0_EL1_copy_WRPs(&dfr0, &hw_dfr0); - ID_AA64DFR0_EL1_copy_CTX_CMPs(&dfr0, &hw_dfr0); - ID_AA64DFR0_EL1_copy_DoubleLock(&dfr0, &hw_dfr0); -#endif -#if defined(MODULE_VM_ARM_VM_PMU) - ID_AA64DFR0_EL1_copy_PMUVer(&dfr0, &hw_dfr0); -#endif -#if defined(INTERFACE_VET) - // Set IDs for VMs allowed to trace - if (vcpu_option_flags_get_trace_allowed( - &thread->vcpu_options)) { -#if defined(MODULE_VM_VETE) - ID_AA64DFR0_EL1_copy_TraceVer(&dfr0, &hw_dfr0); - ID_AA64DFR0_EL1_copy_TraceFilt(&dfr0, &hw_dfr0); -#endif -#if defined(MODULE_VM_VTBRE) - ID_AA64DFR0_EL1_copy_TraceBuffer(&dfr0, &hw_dfr0); -#endif - } -#endif - -#if defined(MODULE_SPE) - ID_AA64DFR0_EL1_copy_PMSVer(&dfr0, &hw_dfr0); -#endif - - reg_val = ID_AA64DFR0_EL1_raw(dfr0); + case ISS_MRS_MSR_ID_AA64DFR0_EL1: + reg_val = sys_aa64dfr0_read(thread); break; - } case ISS_MRS_MSR_ID_AA64DFR1_EL1: // RES0 - We don't know any AA64DFR1 bits break; @@ -1081,126 +1321,39 @@ sysreg_read(ESR_EL2_ISS_MSR_MRS_t iss) case ISS_MRS_MSR_ID_AA64AFR1_EL1: // RES0 - We don't know any AA64AFR1 bits break; - case ISS_MRS_MSR_ID_AA64ISAR0_EL1: { - ID_AA64ISAR0_EL1_t isar0 = register_ID_AA64ISAR0_EL1_read(); - - isar0 = ID_AA64ISAR0_EL1_clean(isar0); - - reg_val = ID_AA64ISAR0_EL1_raw(isar0); + case ISS_MRS_MSR_ID_AA64ISAR0_EL1: + reg_val = sys_aa64isar0_read(); break; - } - case ISS_MRS_MSR_ID_AA64ISAR1_EL1: { - ID_AA64ISAR1_EL1_t isar1 = register_ID_AA64ISAR1_EL1_read(); - - isar1 = ID_AA64ISAR1_EL1_clean(isar1); -#if !defined(ARCH_ARM_FEAT_BF16) - ID_AA64ISAR1_EL1_set_BF16(&isar1, 0U); -#endif -#if !defined(ARCH_ARM_FEAT_PAuth) - // When no PAUTH is enabled, hide it from the VM - ID_AA64ISAR1_EL1_set_APA(&isar1, 0U); - ID_AA64ISAR1_EL1_set_API(&isar1, 0U); - ID_AA64ISAR1_EL1_set_GPA(&isar1, 0U); - ID_AA64ISAR1_EL1_set_GPI(&isar1, 0U); -#endif - reg_val = ID_AA64ISAR1_EL1_raw(isar1); + case ISS_MRS_MSR_ID_AA64ISAR1_EL1: + reg_val = sys_aa64isar1_read(); break; - } - case ISS_MRS_MSR_ID_AA64ISAR2_EL1: { - ID_AA64ISAR2_EL1_t isar2 = register_ID_AA64ISAR2_EL1_read(); - - isar2 = ID_AA64ISAR2_EL1_clean(isar2); - -#if !defined(ARCH_ARM_FEAT_PAuth) - // When PAUTH using QARMA3 is disabled, hide it from the VM - ID_AA64ISAR2_EL1_set_APA3(&isar2, 0U); - ID_AA64ISAR2_EL1_set_GPA3(&isar2, 0U); - ID_AA64ISAR2_EL1_set_PAC_frac(&isar2, 0U); -#endif -#if defined(ARCH_ARM_FEAT_WFxT) - // Remove once FEAT_WFxT is implemented - // FIXME: - ID_AA64ISAR2_EL1_set_WFxT(&isar2, 0U); -#endif - reg_val = ID_AA64ISAR2_EL1_raw(isar2); + case ISS_MRS_MSR_ID_AA64ISAR2_EL1: + reg_val = sys_aa64isar2_read(); break; - } - case ISS_MRS_MSR_ID_AA64MMFR0_EL1: { - ID_AA64MMFR0_EL1_t mmfr0 = register_ID_AA64MMFR0_EL1_read(); - - mmfr0 = ID_AA64MMFR0_EL1_clean(mmfr0); - - reg_val = ID_AA64MMFR0_EL1_raw(mmfr0); + case ISS_MRS_MSR_ID_AA64MMFR0_EL1: + reg_val = sys_aa64mmfr0_read(); break; - } - case ISS_MRS_MSR_ID_AA64MMFR1_EL1: { - ID_AA64MMFR1_EL1_t mmfr1 = register_ID_AA64MMFR1_EL1_read(); - - mmfr1 = ID_AA64MMFR1_EL1_clean(mmfr1); - -#if defined(ARCH_ARM_FEAT_PAN3) - assert(ID_AA64MMFR1_EL1_get_PAN(&mmfr1) >= 3U); - ID_AA64MMFR1_EL1_set_PAN(&mmfr1, 3U); -#elif defined(ARCH_ARM_FEAT_PAN2) // now known as FEAT_PAN2 - assert(ID_AA64MMFR1_EL1_get_PAN(&mmfr1) >= 2U); - ID_AA64MMFR1_EL1_set_PAN(&mmfr1, 2U); -#elif defined(ARCH_ARM_FEAT_PAN) - assert(ID_AA64MMFR1_EL1_get_PAN(&mmfr1) >= 1U); - ID_AA64MMFR1_EL1_set_PAN(&mmfr1, 1U); -#else - ID_AA64MMFR1_EL1_set_PAN(&mmfr1, 0U); -#endif - reg_val = ID_AA64MMFR1_EL1_raw(mmfr1); + case ISS_MRS_MSR_ID_AA64MMFR1_EL1: + reg_val = sys_aa64mmfr1_read(); break; - } - case ISS_MRS_MSR_ID_AA64MMFR2_EL1: { - ID_AA64MMFR2_EL1_t mmfr2 = register_ID_AA64MMFR2_EL1_read(); - - mmfr2 = ID_AA64MMFR2_EL1_clean(mmfr2); - - reg_val = ID_AA64MMFR2_EL1_raw(mmfr2); + case ISS_MRS_MSR_ID_AA64MMFR2_EL1: + reg_val = sys_aa64mmfr2_read(); break; - } - case ISS_MRS_MSR_ID_AA64MMFR3_EL1: { - ID_AA64MMFR3_EL1_t mmfr3 = ID_AA64MMFR3_EL1_default(); - ID_AA64MMFR3_EL1_t hw_mmfr3 = register_ID_AA64MMFR3_EL1_read(); - ID_AA64MMFR3_EL1_copy_Spec_FPACC(&mmfr3, &hw_mmfr3); - reg_val = ID_AA64MMFR3_EL1_raw(mmfr3); + case ISS_MRS_MSR_ID_AA64MMFR3_EL1: + reg_val = sys_aa64mmfr3_read(); break; - } case ISS_MRS_MSR_ID_AA64MMFR4_EL1: reg_val = 0; break; // The trapped ACTLR_EL1 by default returns 0 for reads. // The particular access should be handled in sysreg_read_cpu. - case ISS_MRS_MSR_ACTLR_EL1: { + case ISS_MRS_MSR_ACTLR_EL1: reg_val = 0U; break; - } - default: { - uint8_t opc0, opc1, crn, crm; - - opc0 = ESR_EL2_ISS_MSR_MRS_get_Op0(&iss); - opc1 = ESR_EL2_ISS_MSR_MRS_get_Op1(&iss); - crn = ESR_EL2_ISS_MSR_MRS_get_CRn(&iss); - crm = ESR_EL2_ISS_MSR_MRS_get_CRm(&iss); - - if ((opc0 == 3U) && (opc1 == 0U) && (crn == 0U) && - (crm >= 1U) && (crm <= 7U)) { - // It is IMPLEMENTATION DEFINED whether HCR_EL2.TID3 - // traps MRS accesses to the registers in this range - // (that have not been handled above). If we ever get - // here print a debug message so we can investigate. - TRACE_AND_LOG(DEBUG, DEBUG, - "Emulated RAZ for ID register: ISS {:#x}", - ESR_EL2_ISS_MSR_MRS_raw(iss)); - reg_val = 0U; - } else { - ret = VCPU_TRAP_RESULT_UNHANDLED; - } + default: + ret = default_sys_read(&iss, ®_val); break; } - } // Update the thread's register if (ret == VCPU_TRAP_RESULT_EMULATED) { diff --git a/hyp/vm/vcpu/aarch64/src/wfi.c b/hyp/vm/vcpu/aarch64/src/wfi.c index d8358c2..cb4df30 100644 --- a/hyp/vm/vcpu/aarch64/src/wfi.c +++ b/hyp/vm/vcpu/aarch64/src/wfi.c @@ -59,37 +59,31 @@ vcpu_handle_vcpu_trap_wfi(ESR_EL2_ISS_WFI_WFE_t iss) #endif #if !defined(PREEMPT_NULL) + bool vcpu_interrupted = + vcpu_runtime_flags_get_vcpu_interrupted(¤t->vcpu_flags); #if !defined(VCPU_IDLE_IN_EL1) || !VCPU_IDLE_IN_EL1 if (vcpu_runtime_flags_get_vcpu_can_idle(¤t->vcpu_flags) && - !vcpu_runtime_flags_get_vcpu_interrupted(¤t->vcpu_flags)) { + !vcpu_interrupted) { if (vcpu_block_start()) { goto out; } bool need_schedule; - bool vcpu_can_idle; - bool vcpu_interrupted; do { need_schedule = idle_yield(); - - vcpu_can_idle = vcpu_runtime_flags_get_vcpu_can_idle( - ¤t->vcpu_flags); + // We may have received a wakeup while idle, so recheck + // the interrupted flag. vcpu_interrupted = vcpu_runtime_flags_get_vcpu_interrupted( ¤t->vcpu_flags); - } while (!need_schedule && vcpu_can_idle && !vcpu_interrupted); + } while (!need_schedule && !vcpu_interrupted); vcpu_block_finish(); - vcpu_can_idle = vcpu_runtime_flags_get_vcpu_can_idle( - ¤t->vcpu_flags); - vcpu_interrupted = vcpu_runtime_flags_get_vcpu_interrupted( - ¤t->vcpu_flags); - // If this thread and another thread are woken concurrently, we - // need to reschedule with no yield hint before we return. If - // we haven't been woken, the need_reschedule is handled by the - // yield below after setting the WFI block flag. - if ((need_schedule || !vcpu_can_idle) && vcpu_interrupted) { + // We only need to reschedule here if the VCPU was interrupted; + // otherwise the reschedule is handled by the yield below after + // setting the WFI block flag. + if (need_schedule && vcpu_interrupted) { scheduler_schedule(); } } @@ -98,7 +92,7 @@ vcpu_handle_vcpu_trap_wfi(ESR_EL2_ISS_WFI_WFE_t iss) // taken. This could have been done either by a preemption before the // preempt_disable() above, or by an IPI during the idle_yield() in the // WFI fastpath (if it is enabled). - if (vcpu_runtime_flags_get_vcpu_interrupted(¤t->vcpu_flags)) { + if (vcpu_interrupted) { goto out; } #endif // !PREEMPT_NULL @@ -146,8 +140,11 @@ vcpu_wakeup(thread_t *vcpu) assert(vcpu->kind == THREAD_KIND_VCPU); #if !defined(PREEMPT_NULL) - // Inhibit sleep in preempted WFI handlers (see above) - vcpu_runtime_flags_set_vcpu_interrupted(&vcpu->vcpu_flags, true); + if (vcpu == thread_get_self()) { + // Inhibit sleep in preempted WFI handlers (see above) + vcpu_runtime_flags_set_vcpu_interrupted(&vcpu->vcpu_flags, + true); + } #endif trigger_vcpu_wakeup_event(vcpu); @@ -182,11 +179,17 @@ vcpu_expects_wakeup(const thread_t *thread) #if defined(MODULE_VM_VCPU_RUN) vcpu_run_state_t -vcpu_arch_handle_vcpu_run_check(const thread_t *thread) +vcpu_arch_handle_vcpu_run_check(const thread_t *thread, + register_t *state_data_0, + register_t *state_data_1) { - return scheduler_is_blocked(thread, SCHEDULER_BLOCK_VCPU_WFI) - ? VCPU_RUN_STATE_EXPECTS_WAKEUP - : VCPU_RUN_STATE_BLOCKED; + vcpu_run_state_t state = VCPU_RUN_STATE_BLOCKED; + if (scheduler_is_blocked(thread, SCHEDULER_BLOCK_VCPU_WFI)) { + state = VCPU_RUN_STATE_EXPECTS_WAKEUP; + *state_data_0 = 0U; + *state_data_1 = (register_t)VCPU_RUN_WAKEUP_FROM_STATE_WFI; + } + return state; } #endif diff --git a/hyp/vm/vcpu/aarch64/vcpu_aarch64.ev b/hyp/vm/vcpu/aarch64/vcpu_aarch64.ev index f75cf7e..7ea31bd 100644 --- a/hyp/vm/vcpu/aarch64/vcpu_aarch64.ev +++ b/hyp/vm/vcpu/aarch64/vcpu_aarch64.ev @@ -97,9 +97,9 @@ subscribe scheduler_quiescent() subscribe thread_context_switch_post() #endif -#if defined(MODULE_VM_VCPU_RUN) +#if defined(INTERFACE_VCPU_RUN) subscribe vcpu_run_check - handler vcpu_arch_handle_vcpu_run_check(vcpu) + handler vcpu_arch_handle_vcpu_run_check(vcpu, state_data_0, state_data_1) #endif // VCPU lifecycle and power management diff --git a/hyp/vm/vcpu/aarch64/vcpu_aarch64.tc b/hyp/vm/vcpu/aarch64/vcpu_aarch64.tc index 4b34bc4..58be96f 100644 --- a/hyp/vm/vcpu/aarch64/vcpu_aarch64.tc +++ b/hyp/vm/vcpu/aarch64/vcpu_aarch64.tc @@ -86,3 +86,10 @@ extend vcpu_runtime_flags bitfield { auto vcpu_can_idle bool = 0; }; #endif + +#if defined(INTERFACE_VCPU_RUN) +extend vcpu_run_wakeup_from_state enumeration { + // VCPU is halted by a trapped WFI instruction. + wfi = 1; +}; +#endif diff --git a/hyp/vm/vcpu_power/src/vcpu_power.c b/hyp/vm/vcpu_power/src/vcpu_power.c index d62c5d8..099bcbe 100644 --- a/hyp/vm/vcpu_power/src/vcpu_power.c +++ b/hyp/vm/vcpu_power/src/vcpu_power.c @@ -20,17 +20,20 @@ error_t vcpu_power_handle_vcpu_poweron(thread_t *vcpu) { - cpu_index_t cpu = scheduler_get_affinity(vcpu); - bool should_vote = cpulocal_index_valid(cpu); + assert((vcpu != NULL) && !vcpu->vcpu_power_should_vote); + vcpu->vcpu_power_should_vote = true; + + cpu_index_t cpu = scheduler_get_affinity(vcpu); + bool can_vote = cpulocal_index_valid(cpu); #if defined(INTERFACE_VCPU_RUN) if (vcpu_run_is_enabled(vcpu)) { - should_vote = false; + can_vote = false; } #endif error_t ret; - if (should_vote) { + if (can_vote) { ret = power_vote_cpu_on(cpu); } else { ret = OK; @@ -42,16 +45,18 @@ vcpu_power_handle_vcpu_poweron(thread_t *vcpu) error_t vcpu_power_handle_vcpu_poweroff(thread_t *vcpu) { - cpu_index_t cpu = scheduler_get_affinity(vcpu); - bool should_vote = cpulocal_index_valid(cpu); + assert((vcpu != NULL) && vcpu->vcpu_power_should_vote); + vcpu->vcpu_power_should_vote = false; + cpu_index_t cpu = scheduler_get_affinity(vcpu); + bool can_vote = cpulocal_index_valid(cpu); #if defined(INTERFACE_VCPU_RUN) if (vcpu_run_is_enabled(vcpu)) { - should_vote = false; + can_vote = false; } #endif - if (should_vote) { + if (can_vote) { power_vote_cpu_off(cpu); } @@ -66,71 +71,33 @@ vcpu_power_handle_vcpu_stopped(void) scheduler_lock_nopreempt(vcpu); - cpu_index_t cpu = scheduler_get_affinity(vcpu); - bool should_vote = cpulocal_index_valid(cpu); + if (vcpu->vcpu_power_should_vote) { + vcpu->vcpu_power_should_vote = false; + cpu_index_t cpu = scheduler_get_affinity(vcpu); + bool can_vote = cpulocal_index_valid(cpu); #if defined(INTERFACE_VCPU_RUN) - if (vcpu_run_is_enabled(vcpu)) { - should_vote = false; - } + if (vcpu_run_is_enabled(vcpu)) { + can_vote = false; + } #endif - if (scheduler_is_blocked(vcpu, SCHEDULER_BLOCK_VCPU_OFF)) { - // If the VCPU is already powered off, it does not hold a vote. - should_vote = false; - } - - if (should_vote) { - power_vote_cpu_off(cpu); + if (can_vote) { + power_vote_cpu_off(cpu); + } } scheduler_unlock_nopreempt(vcpu); } #if defined(INTERFACE_VCPU_RUN) -void -vcpu_power_handle_vcpu_run_disabled(thread_t *vcpu) -{ - cpu_index_t cpu = scheduler_get_affinity(vcpu); - bool should_vote = cpulocal_index_valid(cpu); - - if (scheduler_is_blocked(vcpu, SCHEDULER_BLOCK_VCPU_OFF)) { - should_vote = false; - } - - error_t err; - if (should_vote) { - err = power_vote_cpu_on(cpu); - } else { - err = OK; - } - - if (err != OK) { - // Note: vcpu_run is still enabled when this event is triggered, - // so the affinity change handler won't cast a duplicate vote. - err = scheduler_set_affinity(vcpu, CPU_INDEX_INVALID); - - // If there's already an affinity change in progress for the - // VCPU it is not possible to retry at this point. - if (err == ERROR_RETRY) { - // scheduler_lock(vcpu) already held here - scheduler_block(vcpu, SCHEDULER_BLOCK_VCPU_FAULT); - vcpu_halted(); - } - } -} - void vcpu_power_handle_vcpu_run_enabled(thread_t *vcpu) { - cpu_index_t cpu = scheduler_get_affinity(vcpu); - bool should_vote = cpulocal_index_valid(cpu); - - if (scheduler_is_blocked(vcpu, SCHEDULER_BLOCK_VCPU_OFF)) { - should_vote = false; - } + cpu_index_t cpu = scheduler_get_affinity(vcpu); + bool can_vote = cpulocal_index_valid(cpu); - if (should_vote) { + if (can_vote && vcpu->vcpu_power_should_vote) { power_vote_cpu_off(cpu); } } @@ -154,7 +121,7 @@ vcpu_power_handle_scheduler_set_affinity_prepare(thread_t *vcpu, } #endif - if (!scheduler_is_blocked(vcpu, SCHEDULER_BLOCK_VCPU_OFF)) { + if (vcpu->vcpu_power_should_vote) { if (cpulocal_index_valid(next_cpu)) { ret = power_vote_cpu_on(next_cpu); } diff --git a/hyp/vm/vcpu_power/vcpu_power.ev b/hyp/vm/vcpu_power/vcpu_power.ev index aa8e720..026264d 100644 --- a/hyp/vm/vcpu_power/vcpu_power.ev +++ b/hyp/vm/vcpu_power/vcpu_power.ev @@ -19,9 +19,6 @@ subscribe vcpu_stopped() subscribe vcpu_run_enabled require_scheduler_lock(vcpu) -subscribe vcpu_run_disabled - require_scheduler_lock(vcpu) - #endif subscribe scheduler_set_affinity_prepare(thread, prev_cpu, next_cpu) diff --git a/hyp/vm/vcpu_power/vcpu_power.tc b/hyp/vm/vcpu_power/vcpu_power.tc index f4680e8..e15df64 100644 --- a/hyp/vm/vcpu_power/vcpu_power.tc +++ b/hyp/vm/vcpu_power/vcpu_power.tc @@ -1,3 +1,10 @@ // © 2022 Qualcomm Innovation Center, Inc. All rights reserved. // // SPDX-License-Identifier: BSD-3-Clause + +extend thread object module vcpu_power { + // True if this VCPU is currently powered on and not stopped; i.e. it + // should be voting for power on of any CPU it has static affinity to. + // This is protected by the VCPU's scheduler lock. + should_vote bool; +}; diff --git a/hyp/vm/vcpu_run/src/vcpu_run.c b/hyp/vm/vcpu_run/src/vcpu_run.c index 48bbc6b..6364139 100644 --- a/hyp/vm/vcpu_run/src/vcpu_run.c +++ b/hyp/vm/vcpu_run/src/vcpu_run.c @@ -28,8 +28,9 @@ #include "event_handlers.h" -error_t -vcpu_run_handle_object_activate_thread(thread_t *thread) +bool +vcpu_run_handle_vcpu_activate_thread(thread_t *thread, + vcpu_option_flags_t options) { assert(thread != NULL); @@ -40,7 +41,17 @@ vcpu_run_handle_object_activate_thread(thread_t *thread) thread->vcpu_run_last_state = VCPU_RUN_STATE_READY; } - return OK; + if (vcpu_option_flags_get_vcpu_run_scheduled(&options)) { + vcpu_option_flags_set_vcpu_run_scheduled(&thread->vcpu_options, + true); + scheduler_lock(thread); + scheduler_block(thread, SCHEDULER_BLOCK_VCPU_RUN); + scheduler_unlock(thread); + + thread->vcpu_run_enabled = true; + } + + return true; } bool @@ -107,6 +118,7 @@ hypercall_vcpu_run(cap_id_t vcpu_cap_id, register_t resume_data_0, ret.error = ERROR_BUSY; goto unlock; } + assert(vcpu_run_is_enabled(vcpu)); ret.error = trigger_vcpu_run_resume_event(vcpu->vcpu_run_last_state, vcpu, resume_data_0, @@ -197,16 +209,25 @@ hypercall_vcpu_run_check(cap_id_t vcpu_cap_id) error_t vcpu_run_handle_vcpu_bind_virq(thread_t *vcpu, vic_t *vic, virq_t virq) { + error_t err; + scheduler_lock(vcpu); - error_t err = vic_bind_shared(&vcpu->vcpu_run_wakeup_virq, vic, virq, - VIRQ_TRIGGER_VCPU_RUN_WAKEUP); - if (err == OK) { + if (scheduler_is_running(vcpu)) { + err = ERROR_BUSY; + goto out_unlock; + } + + err = vic_bind_shared(&vcpu->vcpu_run_wakeup_virq, vic, virq, + VIRQ_TRIGGER_VCPU_RUN_WAKEUP); + + if ((err == OK) && !vcpu_run_is_enabled(vcpu)) { scheduler_block(vcpu, SCHEDULER_BLOCK_VCPU_RUN); vcpu->vcpu_run_enabled = true; trigger_vcpu_run_enabled_event(vcpu); } +out_unlock: scheduler_unlock(vcpu); return err; @@ -215,16 +236,6 @@ vcpu_run_handle_vcpu_bind_virq(thread_t *vcpu, vic_t *vic, virq_t virq) error_t vcpu_run_handle_vcpu_unbind_virq(thread_t *vcpu) { - scheduler_lock(vcpu); - if (vcpu->vcpu_run_enabled) { - trigger_vcpu_run_disabled_event(vcpu); - vcpu->vcpu_run_enabled = false; - if (scheduler_unblock(vcpu, SCHEDULER_BLOCK_VCPU_RUN)) { - scheduler_trigger(); - } - } - scheduler_unlock(vcpu); - vic_unbind_sync(&vcpu->vcpu_run_wakeup_virq); return OK; diff --git a/hyp/vm/vcpu_run/vcpu_run.ev b/hyp/vm/vcpu_run/vcpu_run.ev index e1d39e2..d778580 100644 --- a/hyp/vm/vcpu_run/vcpu_run.ev +++ b/hyp/vm/vcpu_run/vcpu_run.ev @@ -46,12 +46,12 @@ handled_event vcpu_run_check module vcpu_run -subscribe object_activate_thread(thread) - subscribe object_deactivate_thread(thread) subscribe task_queue_execute[TASK_QUEUE_CLASS_VCPU_RUN_WAKEUP_VIRQ](entry) +subscribe vcpu_activate_thread + subscribe vcpu_wakeup handler vcpu_run_trigger_virq diff --git a/hyp/vm/vcpu_run/vcpu_run.tc b/hyp/vm/vcpu_run/vcpu_run.tc index 1df0c01..cbdfa47 100644 --- a/hyp/vm/vcpu_run/vcpu_run.tc +++ b/hyp/vm/vcpu_run/vcpu_run.tc @@ -13,8 +13,13 @@ extend thread object module vcpu_run { // protected the same way. last_state enumeration vcpu_run_state; - // Protected by the scheduler lock. - enabled bool; + // Deprecated, will be moved to vcpu options when dynamic enablement of + // vcpu_run is removed. + enabled bool; // Protected by the scheduler lock. +}; + +extend vcpu_option_flags bitfield { + 9 vcpu_run_scheduled bool = 0; }; extend virq_trigger enumeration { diff --git a/hyp/vm/vete/src/vete.c b/hyp/vm/vete/src/vete.c index 93e261c..2c6de94 100644 --- a/hyp/vm/vete/src/vete.c +++ b/hyp/vm/vete/src/vete.c @@ -144,7 +144,7 @@ vete_handle_vcpu_trap_sysreg(ESR_EL2_ISS_MSR_MRS_t iss) if (((ESR_EL2_ISS_MSR_MRS_get_Op0(&iss) != 2U) || (ESR_EL2_ISS_MSR_MRS_get_Op1(&iss) != 1U)) && (ESR_EL2_ISS_MSR_MRS_raw(iss) != ISS_TRFCR_EL1)) { - // Not a TBRE register access. + // Not a TRBE register access. ret = VCPU_TRAP_RESULT_UNHANDLED; } else if (!vcpu_option_flags_get_trace_allowed( ¤t->vcpu_options)) { diff --git a/hyp/vm/vgic/include/internal.h b/hyp/vm/vgic/include/internal.h index 251ebd3..c7a25f2 100644 --- a/hyp/vm/vgic/include/internal.h +++ b/hyp/vm/vgic/include/internal.h @@ -178,6 +178,9 @@ vgic_gicr_rd_get_control(vic_t *vic, thread_t *gicr_vcpu); void vgic_gicr_rd_set_statusr(thread_t *gicr_vcpu, GICR_STATUSR_t statusr, bool set); +void +vgic_gicr_rd_set_sleep(vic_t *vic, thread_t *gicr_vcpu, bool sleep); + bool vgic_gicr_rd_check_sleep(thread_t *gicr_vcpu); diff --git a/hyp/vm/vgic/src/deliver.c b/hyp/vm/vgic/src/deliver.c index ee2d648..ece24ec 100644 --- a/hyp/vm/vgic/src/deliver.c +++ b/hyp/vm/vgic/src/deliver.c @@ -209,24 +209,27 @@ vgic_route_and_flag(vic_t *vic, virq_t virq, vgic_delivery_state_t new_dstate, #if VGIC_HAS_1N static void -vgic_spi_set_route_1n(irq_t irq, vgic_delivery_state_t dstate) +vgic_spi_reset_route_1n(virq_source_t *source, vgic_delivery_state_t dstate) { - assert(vgic_irq_is_spi(irq)); + if ((source != NULL) && + (source->trigger == VIRQ_TRIGGER_VGIC_FORWARDED_SPI)) { + // Restore the 1-of-N route + hwirq_t *hwirq = hwirq_from_virq_source(source); - // Restore the 1-of-N route - GICD_IROUTER_t route_1n = GICD_IROUTER_default(); - GICD_IROUTER_set_IRM(&route_1n, true); - (void)gicv3_spi_set_route(irq, route_1n); + GICD_IROUTER_t route_1n = GICD_IROUTER_default(); + GICD_IROUTER_set_IRM(&route_1n, true); + (void)gicv3_spi_set_route(hwirq->irq, route_1n); #if GICV3_HAS_GICD_ICLAR - // Set the HW IRQ's 1-of-N routing classes. Note that these are reset - // in the hardware whenever the IRM bit is cleared. - (void)gicv3_spi_set_classes(irq, - !vgic_delivery_state_get_nclass0(&dstate), - vgic_delivery_state_get_class1(&dstate)); + // Set the HW IRQ's 1-of-N routing classes. Note that these are + // reset in the hardware whenever the IRM bit is cleared. + (void)gicv3_spi_set_classes( + hwirq->irq, !vgic_delivery_state_get_nclass0(&dstate), + vgic_delivery_state_get_class1(&dstate)); #else - (void)dstate; + (void)dstate; #endif + } } #endif @@ -443,14 +446,11 @@ vgic_sync_lr_update_lr(vic_t *vic, thread_t *vcpu, vgic_lr_status_t *status, (register_t)virq_pending); #if VGIC_HAS_1N - if (lr_hw && vgic_delivery_state_get_route_1n(&new_dstate)) { - assert(ICH_LR_EL2_base_get_HW(&status->lr.base)); - vgic_spi_set_route_1n( - ICH_LR_EL2_HW1_get_pINTID(&status->lr.hw), - new_dstate); + if (vgic_delivery_state_get_route_1n(&new_dstate)) { + virq_source_t *source = + vgic_find_source(vic, vcpu, virq); + vgic_spi_reset_route_1n(source, new_dstate); } -#else - (void)lr_hw; #endif status->dstate = NULL; status->lr.base = ICH_LR_EL2_base_default(); @@ -492,7 +492,7 @@ vgic_sync_lr_update_lr(vic_t *vic, thread_t *vcpu, vgic_lr_status_t *status, lr_active ? ICH_LR_EL2_STATE_PENDING_ACTIVE : ICH_LR_EL2_STATE_PENDING); - if (compiler_unexpected(!lr_pending && !lr_active)) { + if (!lr_pending && !lr_active) { // This is a new delivery; make sure the VCPU is awake. if (vcpu == thread_get_self()) { vcpu_wakeup_self(); @@ -517,6 +517,20 @@ vgic_sync_lr_update_lr(vic_t *vic, thread_t *vcpu, vgic_lr_status_t *status, &new_dstate) || vgic_delivery_state_get_hw_active( &new_dstate)); + } else if (vgic_delivery_state_get_hw_active(&new_dstate)) { + // If the dstate update left hw_active set, we need to + // force HW=0 and trap EOI to deactivate the HW IRQ. + ICH_LR_EL2_base_set_HW(&status->lr.base, false); + ICH_LR_EL2_HW0_set_EOI(&status->lr.sw, true); + } else if (!ICH_LR_EL2_base_get_HW(&status->lr.base)) { + // We also need to trap EOI for SW asserted level + // triggered IRQs. + ICH_LR_EL2_HW0_set_EOI( + &status->lr.sw, + !vgic_delivery_state_get_cfg_is_edge( + &new_dstate)); + } else { + // Existing HW delivery; EOI handled by physical GIC } } else { // The IRQ is remaining listed, is allowed to remain pending, @@ -631,12 +645,6 @@ vgic_sync_lr(vic_t *vic, thread_t *vcpu, vgic_lr_status_t *status, (source->trigger == VIRQ_TRIGGER_VGIC_FORWARDED_SPI)); hwirq_t *hwirq = hwirq_from_virq_source(source); -#if VGIC_HAS_1N - if (vgic_delivery_state_get_route_1n(&new_dstate)) { - vgic_spi_set_route_1n(hwirq->irq, new_dstate); - } -#endif - VGIC_TRACE(HWSTATE_CHANGED, vic, vcpu, "sync_lr {:d}: deactivate HW IRQ {:d} (EOI)", virq, hwirq->irq); @@ -783,13 +791,6 @@ vgic_undeliver(vic_t *vic, thread_t *vcpu, vgic_find_source(vic, vcpu, virq); hwirq_t *hwirq = hwirq_from_virq_source(source); - -#if VGIC_HAS_1N - if (vgic_delivery_state_get_route_1n(&new_dstate)) { - vgic_spi_set_route_1n(hwirq->irq, new_dstate); - } -#endif - VGIC_TRACE(HWSTATE_CHANGED, vic, vcpu, "undeliver {:d}: deactivate HW IRQ {:d}", virq, hwirq->irq); @@ -878,7 +879,7 @@ vgic_redeliver_lr_update_state(const vic_t *vic, const thread_t *vcpu, // bother to recheck level triggering yet; that will be // done when this interrupt ends. ICH_LR_EL2_base_set_HW(&new_lr.base, is_hw); - if (ICH_LR_EL2_base_get_HW(&new_lr.base)) { + if (is_hw) { vgic_delivery_state_set_hw_active(&new_dstate, false); ICH_LR_EL2_HW1_set_pINTID( &new_lr.hw, @@ -1406,8 +1407,8 @@ vgic_route_1n_preference(vic_t *vic, thread_t *vcpu, #endif } else if (cpulocal_get_index() != scheduler_get_active_affinity(vcpu)) { - ret = vcpu_expects_wakeup(vcpu) ? VGIC_ROUTE_REMOTE_BUSY - : VGIC_ROUTE_REMOTE; + ret = vcpu_expects_wakeup(vcpu) ? VGIC_ROUTE_REMOTE + : VGIC_ROUTE_REMOTE_BUSY; } else if (vcpu_expects_wakeup(vcpu) && scheduler_will_preempt_current(vcpu)) { ret = VGIC_ROUTE_IMMEDIATE; @@ -1418,25 +1419,33 @@ vgic_route_1n_preference(vic_t *vic, thread_t *vcpu, return ret; } +static bool +vgic_retry_unrouted_virq(vic_t *vic, virq_t virq); + // Attempt to wake a VCPU to handle a 1-of-N SPI. // // This should be called after flagging a 1-of-N SPI as unrouted. static void -vgic_wakeup_1n(vic_t *vic, bool class0, bool class1) +vgic_wakeup_1n(vic_t *vic, virq_t virq, bool class0, bool class1) { // Check whether 1-of-N wakeups are permitted by the VM. GICD_CTLR_DS_t gicd_ctlr = atomic_load_relaxed(&vic->gicd_ctlr); if (!GICD_CTLR_DS_get_E1NWF(&gicd_ctlr)) { - VGIC_DEBUG_TRACE(ROUTE, vic, NULL, "wakeup-1n: disabled", - /* unused */ 0U); + VGIC_DEBUG_TRACE(ROUTE, vic, NULL, "wakeup-1n {:d}: disabled", + virq); goto out; } + // Ensure that the sleep state checks are ordered after the IRQs are + // flagged as unrouted. There is a matching fence between entering sleep + // state and checking for unrouted VIRQs in vgic_gicr_rd_set_sleep(). + atomic_thread_fence(memory_order_seq_cst); + // Find a VCPU that has its GICR in sleep state. // - // Per section 10.1 of the spec, we are allowed to wake any arbitrary - // VCPU and assume that it will eventually handle the interrupt; we - // don't need to monitor whether that has happened. + // Per section 11.1 of the GICv3 spec, we are allowed to wake any + // arbitrary VCPU and assume that it will eventually handle the + // interrupt. We don't need to monitor whether that has happened. // // We always start this search from the VCPU corresponding to the // current physical CPU, to reduce the chances of waking a second @@ -1453,25 +1462,59 @@ vgic_wakeup_1n(vic_t *vic, bool class0, bool class1) // VCPU's class continue; } - if (atomic_load_relaxed(&candidate->vgic_sleep)) { - VGIC_DEBUG_TRACE(ROUTE, vic, candidate, - "wakeup-1n: {:d}", - candidate->vgic_gicr_index); - scheduler_lock(candidate); - candidate->vgic_wakeup_1n = true; - vcpu_wakeup(candidate); - scheduler_unlock(candidate); + vgic_sleep_state_t sleep_state = + atomic_load_relaxed(&candidate->vgic_sleep); + while (sleep_state == VGIC_SLEEP_STATE_ASLEEP) { + if (atomic_compare_exchange_weak_explicit( + &candidate->vgic_sleep, &sleep_state, + VGIC_SLEEP_STATE_WAKEUP_1N, + memory_order_acquire, + memory_order_acquire)) { + VGIC_DEBUG_TRACE( + ROUTE, vic, candidate, + "wakeup-1n {:d}: waking GICR {:d}", + virq, candidate->vgic_gicr_index); + scheduler_lock(candidate); + vcpu_wakeup(candidate); + scheduler_unlock(candidate); + goto out; + } + } + if (sleep_state == VGIC_SLEEP_STATE_WAKEUP_1N) { + VGIC_TRACE(ROUTE, vic, NULL, + "wakeup-1n {:d}: GICR {:d} already waking", + virq, candidate->vgic_gicr_index); goto out; } } - VGIC_TRACE(ROUTE, vic, NULL, "wakeup-1n: failed", /* unused */ 0U); + // If the VIRQ's classes have no sleeping VCPUs but also no VCPUs that + // are currently valid targets, we must consider two possibilities: + // at least one VCPU is concurrently in its resume path, or all VCPUs + // are concurrently in their suspend paths or hotplugged. + // + // The first case, which is much more likely, has a race in which the + // following sequence might occur: + // + // 1. Core A tries to route VIRQ, fails due to disabled group + // 2. Core B enables group + // 3. Core B checks for unrouted IRQs, finds none + // 4. Core A marks VIRQ as unrouted, then calls this function + // + // To avoid leaving the VIRQ unrouted in this case, we retry routing. + if (!vgic_retry_unrouted_virq(vic, virq)) { + VGIC_TRACE(ROUTE, vic, NULL, "wakeup-1n {:d}: already woken", + virq); + goto out; + } + + // If the retry didn't work, then either there is a VCPU in its wakeup + // path that has not enabled its IRQ groups yet, or else all VCPUs are + // in their suspend paths and have not enabled sleep yet. We retry all + // unrouted IRQs when enabling either IRQ groups or sleep, so there's + // nothing more to do here. + VGIC_TRACE(ROUTE, vic, NULL, "wakeup-1n {:d}: failed", virq); - // If there are no sleeping VCPUs, there is a possibility that the - // unrouted VIRQ we just flagged has lost a race with all of the - // currently sleeping VCPUs to be flagged as unrouted before the VCPUs - // woke up. To recover from this, we must re-check the unrouted VIRQs. - vgic_retry_unrouted(vic); out: (void)0; } @@ -1637,7 +1680,7 @@ vgic_route_and_flag(vic_t *vic, virq_t virq, vgic_delivery_state_t new_dstate, if (!vgic_try_route_and_flag(vic, virq, new_dstate, use_local_vcpu)) { vgic_flag_unrouted(vic, virq); #if VGIC_HAS_1N - vgic_wakeup_1n(vic, + vgic_wakeup_1n(vic, virq, vgic_get_delivery_state_is_class0(&new_dstate), vgic_get_delivery_state_is_class1(&new_dstate)); #endif @@ -1801,9 +1844,8 @@ vgic_reclaim_lr(vic_t *vic, thread_t *vcpu, index_t lr, bool reroute) vic, vcpu, virq, &old_dstate, lr_active, lr_hw, status, source); #if VGIC_HAS_1N - if (lr_hw && vgic_delivery_state_get_route_1n(&new_dstate)) { - vgic_spi_set_route_1n(ICH_LR_EL2_HW1_get_pINTID(&status->lr.hw), - new_dstate); + if (vgic_delivery_state_get_route_1n(&new_dstate)) { + vgic_spi_reset_route_1n(source, new_dstate); } #endif @@ -1848,6 +1890,15 @@ vgic_list_irq(vgic_delivery_state_t new_dstate, index_t lr, bool is_hw, assert(status->dstate == NULL); } +#if VGIC_HAS_1N + if (vgic_delivery_state_get_route_1n(&new_dstate) && (source != NULL) && + (source->trigger == VIRQ_TRIGGER_VGIC_FORWARDED_SPI)) { + // Set the HW IRQ's route to the VCPU's current physical core + hwirq_t *hwirq = hwirq_from_virq_source(source); + (void)gicv3_spi_set_route(hwirq->irq, vcpu->vgic_irouter); + } +#endif + status->dstate = dstate; ICH_LR_EL2_base_set_HW(&status->lr.base, is_hw); if (is_hw) { @@ -2063,40 +2114,39 @@ vgic_deliver_update_state(virq_t virq, vgic_delivery_state_t prev_dstate, } static void -vgic_deliver_update_spi_route(vgic_delivery_state_t assert_dstate, - vgic_delivery_state_t old_dstate, +vgic_deliver_update_spi_route(vgic_delivery_state_t old_dstate, const vic_t *vic, const thread_t *vcpu, cpu_index_t remote_cpu, virq_source_t *source) { - if (vgic_delivery_state_get_hw_active(&assert_dstate)) { +#if !VGIC_HAS_1N + (void)old_dstate; +#endif + + if ((source == NULL) || + (source->trigger != VIRQ_TRIGGER_VGIC_FORWARDED_SPI)) { + // Not a HW IRQ; don't try to update the route. + } #if VGIC_HAS_1N - if (vgic_delivery_state_get_route_1n(&old_dstate) && - (vcpu != NULL)) { - // Make the IRQ stick to the physical CPU that is - // handling it, until it is delisted. This is because a - // delivery on another CPU while it is still listed - // will require a sync IPI to delist it first. - hwirq_t *hwirq = hwirq_from_virq_source(source); - (void)gicv3_spi_set_route(hwirq->irq, - vcpu->vgic_irouter); - } else -#else - (void)old_dstate; + else if (vgic_delivery_state_get_route_1n(&old_dstate)) { + // IRQ doesn't have a fixed route, so there is no need to update + // it here. Note that we may update it later when it is listed. + } #endif - if (cpulocal_index_valid(remote_cpu)) { - assert(vcpu != NULL); - // IRQ was HW-delivered to the wrong CPU, probably - // because the VCPU was migrated. Update the route. - hwirq_t *hwirq = hwirq_from_virq_source(source); - (void)gicv3_spi_set_route(hwirq->irq, - vcpu->vgic_irouter); + else if (cpulocal_index_valid(remote_cpu)) { + assert(vcpu != NULL); + // HW IRQ was delivered on the wrong CPU, probably because the + // VCPU was migrated. Update the route. Note that we don't need + // to disable / enable the IRQ or execute any waits or barriers + // here because we are tolerant of further misrouting. + hwirq_t *hwirq = hwirq_from_virq_source(source); + (void)gicv3_spi_set_route(hwirq->irq, vcpu->vgic_irouter); - VGIC_TRACE(HWSTATE_CHANGED, vic, vcpu, - "lazy reroute {:d}: to cpu {:d}", hwirq->irq, - remote_cpu); - } else { - // Directly routed to the correct CPU; do nothing - } + VGIC_TRACE(HWSTATE_CHANGED, vic, vcpu, + "lazy reroute {:d}: to cpu {:d}", hwirq->irq, + remote_cpu); + } else { + // Directly routed to the correct CPU or not routed to any CPU + // yet; nothing to do. } } @@ -2170,8 +2220,8 @@ vgic_deliver(virq_t virq, vic_t *vic, thread_t *vcpu, virq_source_t *source, // If this is a physical SPI assertion, we may need to update the route // of the physical SPI. - vgic_deliver_update_spi_route(assert_dstate, old_dstate, vic, vcpu, - remote_cpu, source); + vgic_deliver_update_spi_route(old_dstate, vic, vcpu, remote_cpu, + source); // Update the dstate and deliver the interrupt vgic_deliver_info_t vgic_deliver_info = vgic_deliver_update_state( @@ -2196,7 +2246,7 @@ vgic_deliver(virq_t virq, vic_t *vic, thread_t *vcpu, virq_source_t *source, } else { #if VGIC_HAS_1N vgic_wakeup_1n( - vic, + vic, virq, vgic_get_delivery_state_is_class0(&new_dstate), vgic_get_delivery_state_is_class1(&new_dstate)); #else @@ -2690,39 +2740,14 @@ vgic_list_if_pending(vic_t *vic, thread_t *vcpu, virq_t virq, vgic_delivery_state_raw(old_dstate), vgic_delivery_state_raw(new_dstate)); - vgic_lr_status_t *status = &vcpu->vgic_lrs[lr]; - if (status->dstate != NULL) { - vgic_reclaim_lr(vic, vcpu, lr, false); - assert(status->dstate == NULL); - } + bool to_self = (vcpu == thread_get_self()); + bool is_hw = vgic_delivery_state_get_hw_active(&old_dstate); + virq_source_t *source = vgic_find_source(vic, vcpu, virq); - bool is_hw = vgic_delivery_state_get_hw_active(&old_dstate); - if (is_hw) { - virq_source_t *source = vgic_find_source(vic, vcpu, virq); - assert((source != NULL) && - (source->trigger == VIRQ_TRIGGER_VGIC_FORWARDED_SPI)); - hwirq_t *hwirq = hwirq_from_virq_source(source); - ICH_LR_EL2_HW1_set_pINTID(&status->lr.hw, hwirq->irq); - } else { - ICH_LR_EL2_HW0_set_EOI( - &status->lr.sw, - !vgic_delivery_state_get_cfg_is_edge(&new_dstate) && - vgic_delivery_state_is_level_asserted( - &new_dstate)); - } + vgic_list_irq(new_dstate, lr, is_hw, priority, dstate, virq, vic, vcpu, + source, to_self); - status->dstate = dstate; - ICH_LR_EL2_base_set_HW(&status->lr.base, is_hw); - ICH_LR_EL2_base_set_vINTID(&status->lr.base, virq); - ICH_LR_EL2_base_set_Priority(&status->lr.base, priority); - ICH_LR_EL2_base_set_Group(&status->lr.base, - vgic_delivery_state_get_group1(&new_dstate)); - ICH_LR_EL2_base_set_State(&status->lr.base, ICH_LR_EL2_STATE_PENDING); - if (vcpu == thread_get_self()) { - vgic_write_lr(lr); - } err = OK; - out: return err; } @@ -3064,10 +3089,20 @@ vgic_do_delivery_check(vic_t *vic, thread_t *vcpu) { bool wakeup = false; - if (atomic_load_relaxed(&vcpu->vgic_sleep)) { - // The GICR is asleep. We can't deliver anything, but if any - // VIRQs are flagged we should wake the VCPU immediately. + vgic_sleep_state_t sleep_state = atomic_load_relaxed(&vcpu->vgic_sleep); + if (sleep_state != VGIC_SLEEP_STATE_AWAKE) { + // The GICR is asleep. We can't deliver anything. ICH_HCR_EL2_set_NPIE(&vcpu->vgic_ich_hcr, false); + +#if VGIC_HAS_1N + if (sleep_state == VGIC_SLEEP_STATE_WAKEUP_1N) { + // The GICR has been chosen for 1-of-N wakeup. + wakeup = true; + goto out; + } +#endif + + // If anything is flagged for delivery, wake up immediately. wakeup = !bitmap_atomic_empty(vcpu->vgic_search_prios, VGIC_PRIORITIES); goto out; @@ -3139,12 +3174,14 @@ vgic_do_delivery_check(vic_t *vic, thread_t *vcpu) } static bool -vgic_retry_unrouted_virq(vic_t *vic, virq_t virq) REQUIRE_PREEMPT_DISABLED +vgic_retry_unrouted_virq(vic_t *vic, virq_t virq) { assert(vic != NULL); // Only SPIs can be unrouted assert(vgic_irq_is_spi(virq)); + preempt_disable(); + _Atomic vgic_delivery_state_t *dstate = vgic_find_dstate(vic, NULL, virq); assert(dstate != NULL); @@ -3161,6 +3198,8 @@ vgic_retry_unrouted_virq(vic_t *vic, virq_t virq) REQUIRE_PREEMPT_DISABLED } } + preempt_enable(); + return unclaimed; } @@ -3201,6 +3240,53 @@ vgic_retry_unrouted(vic_t *vic) spinlock_release(&vic->search_lock); } +#if VGIC_HAS_1N +static bool +vgic_check_unrouted_virq(vic_t *vic, thread_t *vcpu, virq_t virq) +{ + assert(vic != NULL); + // Only SPIs can be unrouted + assert(vgic_irq_is_spi(virq)); + + _Atomic vgic_delivery_state_t *dstate = + vgic_find_dstate(vic, NULL, virq); + assert(dstate != NULL); + vgic_delivery_state_t current_dstate = atomic_load_relaxed(dstate); + + return vgic_delivery_state_get_enabled(¤t_dstate) && + !vgic_delivery_state_get_listed(¤t_dstate) && + vgic_delivery_state_is_pending(¤t_dstate) && + ((platform_irq_cpu_class((cpu_index_t)vcpu->vgic_gicr_index) == + 0U) + ? vgic_get_delivery_state_is_class0(¤t_dstate) + : vgic_get_delivery_state_is_class1(¤t_dstate)); +} + +static bool +vgic_check_unrouted(vic_t *vic, thread_t *vcpu) +{ + bool wakeup_found = false; + + BITMAP_ATOMIC_FOREACH_SET_BEGIN(range, vic->search_ranges_low, + VGIC_LOW_RANGES) + VGIC_DEBUG_TRACE(ROUTE, vic, NULL, "unrouted: check range {:d}", + range); + + for (index_t i = 0; i < vgic_low_range_size(range); i++) { + virq_t virq = + (virq_t)((range * VGIC_LOW_RANGE_SIZE) + i); + if (vgic_irq_is_spi(virq) && + vgic_check_unrouted_virq(vic, vcpu, virq)) { + wakeup_found = true; + break; + } + } + BITMAP_ATOMIC_FOREACH_SET_END + + return wakeup_found; +} +#endif + // This function is called when permanently tearing down a VCPU. // // It clears out the list registers, disregarding the priority order of active @@ -3580,12 +3666,56 @@ vgic_handle_thread_load_state(void) LOCK_IMPL } } +void +vgic_gicr_rd_set_sleep(vic_t *vic, thread_t *gicr_vcpu, bool sleep) +{ +#if VGIC_HAS_1N + if (sleep) { + // Update the sleep state, but only if we were awake; don't wipe + // out a wakeup if this is a redundant write of the sleep bit. + vgic_sleep_state_t old_sleep_state = VGIC_SLEEP_STATE_AWAKE; + if (atomic_compare_exchange_strong_explicit( + &gicr_vcpu->vgic_sleep, &old_sleep_state, + VGIC_SLEEP_STATE_ASLEEP, memory_order_relaxed, + memory_order_relaxed)) { + // We successfully entered sleep and there was no + // existing wakeup. We now need to check whether any + // IRQs had been marked unrouted prior to us entering + // sleep. We need a seq_cst fence to order the check + // after entering sleep, matching the seq_cst fence in + // vgic_wakeup_1n(). + atomic_thread_fence(memory_order_seq_cst); + if (vgic_check_unrouted(vic, gicr_vcpu)) { + old_sleep_state = VGIC_SLEEP_STATE_ASLEEP; + (void)atomic_compare_exchange_strong_explicit( + &gicr_vcpu->vgic_sleep, + &old_sleep_state, + VGIC_SLEEP_STATE_WAKEUP_1N, + memory_order_relaxed, + memory_order_relaxed); + } + } + } else { + // We're waking up; if there's a wakeup it can be + // discarded. + atomic_store_relaxed(&gicr_vcpu->vgic_sleep, + VGIC_SLEEP_STATE_AWAKE); + } +#else + (void)vic; + atomic_store_relaxed(&gicr_vcpu->vgic_sleep, + sleep ? VGIC_SLEEP_STATE_ASLEEP + : VGIC_SLEEP_STATE_AWAKE); +#endif +} + bool vgic_gicr_rd_check_sleep(thread_t *gicr_vcpu) { bool is_asleep; - if (atomic_load_relaxed(&gicr_vcpu->vgic_sleep)) { + if (atomic_load_relaxed(&gicr_vcpu->vgic_sleep) != + VGIC_SLEEP_STATE_AWAKE) { if (!vgic_fgt_allowed()) { cpu_index_t lr_owner = vgic_lr_owner_lock(gicr_vcpu); // We might not have received the maintenance interrupt @@ -3631,7 +3761,10 @@ vgic_handle_vcpu_pending_wakeup(void) !bitmap_atomic_empty(vcpu->vgic_search_prios, VGIC_PRIORITIES); #if VGIC_HAS_1N - pending = pending || vcpu->vgic_wakeup_1n; + if (!pending && (atomic_load_relaxed(&vcpu->vgic_sleep) == + VGIC_SLEEP_STATE_WAKEUP_1N)) { + pending = true; + } #endif if (!pending && @@ -3874,15 +4007,6 @@ vgic_icc_irq_deactivate(vic_t *vic, irq_t irq_num) // manually deactivated). bool hw_active = ICH_LR_EL2_base_get_HW(&status->lr.base); -#if VGIC_HAS_1N - if (hw_active && - vgic_delivery_state_get_route_1n(&old_dstate)) { - vgic_spi_set_route_1n( - ICH_LR_EL2_HW1_get_pINTID(&status->lr.hw), - old_dstate); - } -#endif - // Kick the interrupt out of the LR. We could potentially keep // it listed if it is still pending, but that complicates the // code too much and we don't care about EOImode=1 VMs anyway. @@ -3890,6 +4014,14 @@ vgic_icc_irq_deactivate(vic_t *vic, irq_t irq_num) status->dstate = NULL; vgic_write_lr(lr); +#if VGIC_HAS_1N + if (vgic_delivery_state_get_route_1n(&old_dstate)) { + virq_source_t *source = + vgic_find_source(vic, vcpu, irq_num); + vgic_spi_reset_route_1n(source, old_dstate); + } +#endif + vgic_deactivate(vic, thread_get_self(), irq_num, dstate, old_dstate, set_edge, hw_active); diff --git a/hyp/vm/vgic/src/distrib.c b/hyp/vm/vgic/src/distrib.c index f4e4847..3bf0f12 100644 --- a/hyp/vm/vgic/src/distrib.c +++ b/hyp/vm/vgic/src/distrib.c @@ -294,6 +294,9 @@ vgic_handle_addrspace_attach_vdevice(addrspace_t *addrspace, return err; } +static bool +vic_do_unbind(virq_source_t *source, bool during_deactivate); + void vgic_handle_object_deactivate_vic(vic_t *vic) { @@ -311,7 +314,16 @@ vgic_handle_object_deactivate_vic(vic_t *vic) continue; } - vic_unbind(virq_source); + if (vic_do_unbind(virq_source, true)) { + // During deactivate we know that the VCPUs have all + // exited so there can't be any IRQs left listed (as + // asserted above). It therefore is not necessary to + // wait until the end of an RCU grace period to clear + // vgic_is_bound, as we normally would; we can go ahead + // and clear it here. + atomic_store_release(&virq_source->vgic_is_bound, + false); + } } rcu_read_finish(); @@ -403,7 +415,7 @@ vgic_handle_object_create_thread(thread_create_t thread_create) // guests with GICR_WAKER awareness (like Linux), but allows // interrupt delivery to work correctly for guests that assume // they have a non-secure view of the GIC (like UEFI). - atomic_init(&vcpu->vgic_sleep, false); + atomic_init(&vcpu->vgic_sleep, VGIC_SLEEP_STATE_AWAKE); vcpu->vgic_ich_hcr = ICH_HCR_EL2_default(); @@ -821,6 +833,12 @@ vgic_handle_rootvm_create_hwirq(partition_t *root_partition, } #endif } + + // Check if the insertion failed because of buffer overflow, on error + // the config QCBOR_ENV_CONFIG_SIZE needs to be increased + if (QCBOREncode_GetErrorState(qcbor_enc_ctxt) != QCBOR_SUCCESS) { + panic("QCBOR data buffer too small"); + } QCBOREncode_CloseArray(qcbor_enc_ctxt); } @@ -2460,7 +2478,7 @@ vic_bind_private(virq_source_t *source, vic_t *vic, thread_t *vcpu, virq_t virq, if (atomic_fetch_or_explicit(&source->vgic_is_bound, true, memory_order_acquire)) { ret = ERROR_VIRQ_BOUND; - goto out_release; + goto out; } assert(atomic_load_relaxed(&source->vic) == NULL); @@ -2489,7 +2507,6 @@ vic_bind_private(virq_source_t *source, vic_t *vic, thread_t *vcpu, virq_t virq, out_locked: spinlock_release(&vic->gicd_lock); -out_release: if (ret != OK) { atomic_store_release(&source->vgic_is_bound, false); } @@ -2547,8 +2564,7 @@ vic_bind_private_index(virq_source_t *source, vic_t *vic, index_t index, error_t vic_bind_private_forward_private(virq_source_t *source, vic_t *vic, - thread_t *vcpu, virq_t virq, irq_t pirq, - cpu_index_t pcpu) + thread_t *vcpu, virq_t virq) { error_t ret; @@ -2563,10 +2579,15 @@ vic_bind_private_forward_private(virq_source_t *source, vic_t *vic, ret = vic_bind_private_vcpu(source, vcpu, virq, VIRQ_TRIGGER_VIC_BASE_FORWARD_PRIVATE); - if (ret != OK) { - goto out; - } +out: + return ret; +} +void +vic_sync_private_forward_private(virq_source_t *source, vic_t *vic, + thread_t *vcpu, virq_t virq, irq_t pirq, + cpu_index_t pcpu) +{ // Take the GICD lock to ensure that the vGIC's IRQ config does // not change while we are copying it to the hardware GIC spinlock_acquire(&vic->gicd_lock); @@ -2604,15 +2625,12 @@ vic_bind_private_forward_private(virq_source_t *source, vic_t *vic, } spinlock_release(&vic->gicd_lock); - -out: - return ret; } -static error_t -vic_do_unbind(virq_source_t *source) +static bool +vic_do_unbind(virq_source_t *source, bool during_deactivate) { - error_t err = ERROR_VIRQ_NOT_BOUND; + bool complete = false; rcu_read_start(); @@ -2642,10 +2660,16 @@ vic_do_unbind(virq_source_t *source) vgic_find_dstate(vic, vcpu, source->virq); if (!vgic_undeliver(vic, vcpu, dstate, source->virq, clear_dstate, false)) { - // The VIRQ is still listed somewhere. For HW sources this can - // delay both re-registration of the VIRQ and delivery of the - // HW IRQ (after it is re-registered elsewhere), so start a - // sync to ensure that delisting happens soon. + // The VIRQ is still listed somewhere. + // + // This should never happen during a deactivate, because there + // are no VCPUs left to list it in. + assert(!during_deactivate); + + // For HW sources this can delay both re-registration of the + // VIRQ and delivery of the HW IRQ (after it is re-registered + // elsewhere), so start a sync to ensure that delisting happens + // soon. vgic_sync_all(vic, false); } @@ -2665,22 +2689,22 @@ vic_do_unbind(virq_source_t *source) goto out; } - err = OK; + complete = true; out: rcu_read_finish(); - return err; + return complete; } void vic_unbind(virq_source_t *source) { - (void)vic_do_unbind(source); + (void)vic_do_unbind(source, false); } void vic_unbind_sync(virq_source_t *source) { - if (vic_do_unbind(source) == OK) { + if (vic_do_unbind(source, false)) { // Ensure that any remote operations affecting the source object // and the unbound VIRQ have completed. rcu_sync(); diff --git a/hyp/vm/vgic/src/vdevice.c b/hyp/vm/vgic/src/vdevice.c index f4198e0..f7faf9a 100644 --- a/hyp/vm/vgic/src/vdevice.c +++ b/hyp/vm/vgic/src/vdevice.c @@ -37,96 +37,10 @@ #define IIDR_VARIANT 0U #define IIDR_REVISION 0U -static register_t -vgic_read_irqbits(vic_t *vic, thread_t *vcpu, size_t base_offset, size_t offset) +static void +vgic_update_irqbits_flag(vic_t *vic, const thread_t *vcpu, size_t base_offset, + count_t range_base, count_t range_size, uint32_t *bits) { - assert(vic != NULL); - assert(vcpu != NULL); - assert(offset >= base_offset); - assert(offset <= base_offset + (31 * sizeof(uint32_t))); - - register_t bits = 0U; - count_t range_base = - (count_t)((offset - base_offset) / sizeof(uint32_t)) * 32U; - count_t range_size = - util_min(32U, GIC_SPECIAL_INTIDS_BASE - range_base); - - _Atomic vgic_delivery_state_t *dstates = - vgic_find_dstate(vic, vcpu, range_base); - if (dstates == NULL) { - goto out; - } - assert(compiler_sizeof_object(dstates) >= - range_size * sizeof(*dstates)); - - bool listed = false; - - for (count_t i = 0; i < range_size; i++) { - vgic_delivery_state_t this_dstate = - atomic_load_relaxed(&dstates[i]); - bool bit; - - // Note: the GICR base offsets are the same as the GICD offsets, - // so we don't need to duplicate them here. - switch (base_offset) { - case OFS_GICD_IGROUPR(0U): - bit = vgic_delivery_state_get_group1(&this_dstate); - break; - case OFS_GICD_ISENABLER(0U): - case OFS_GICD_ICENABLER(0U): - bit = vgic_delivery_state_get_enabled(&this_dstate); - break; - case OFS_GICD_ISPENDR(0U): - case OFS_GICD_ICPENDR(0U): - bit = vgic_delivery_state_is_pending(&this_dstate); - if (vgic_delivery_state_get_listed(&this_dstate)) { - listed = true; - } - break; - case OFS_GICD_ISACTIVER(0U): - case OFS_GICD_ICACTIVER(0U): - bit = vgic_delivery_state_get_active(&this_dstate); - if (vgic_delivery_state_get_listed(&this_dstate)) { - listed = true; - } - break; - default: - panic("vgic_read_irqbits: Bad base_offset"); - } - - if (bit) { - bits |= util_bit(i); - } - } - -#if GICV3_HAS_VLPI_V4_1 && defined(GICV3_ENABLE_VPE) && GICV3_ENABLE_VPE - if ((range_base == GIC_SGI_BASE) && - ((base_offset == offsetof(gicd_t, ispendr)) || - (base_offset == offsetof(gicd_t, icpendr)))) { - // Query the hardware for the vSGI pending state - uint32_result_t bits_r = gicv3_vpe_vsgi_query(vcpu); - if (bits_r.e == OK) { - bits |= bits_r.r; - } - } -#endif // GICV3_HAS_VLPI_V4_1 && GICV3_ENABLE_VPE - - if (compiler_expected(!listed)) { - // We didn't try to read the pending or active state of a VIRQ - // that is in list register, so the value we've read is - // accurate. - goto out; - } - - // Read back from the current VCPU's physical LRs. - preempt_disable(); - for (count_t lr = 0U; lr < CPU_GICH_LR_COUNT; lr++) { - vgic_read_lr_state(lr); - } - preempt_enable(); - - // Try to update the flags for listed vIRQs, based on the state of - // every VCPU's list registers. for (index_t i = 0; i < vic->gicr_count; i++) { rcu_read_start(); thread_t *check_vcpu = atomic_load_consume(&vic->gicr_vcpus[i]); @@ -173,9 +87,9 @@ vgic_read_irqbits(vic_t *vic, thread_t *vcpu, size_t base_offset, size_t offset) if ((state == ICH_LR_EL2_STATE_PENDING) || (state == ICH_LR_EL2_STATE_PENDING_ACTIVE)) { - bits |= bit; + (*bits) |= bit; } else { - bits &= ~bit; + (*bits) &= ~bit; } break; case OFS_GICD_ISACTIVER(0U): @@ -183,9 +97,9 @@ vgic_read_irqbits(vic_t *vic, thread_t *vcpu, size_t base_offset, size_t offset) if ((state == ICH_LR_EL2_STATE_ACTIVE) || (state == ICH_LR_EL2_STATE_PENDING_ACTIVE)) { - bits |= bit; + (*bits) |= bit; } else { - bits &= ~bit; + (*bits) &= ~bit; } break; default: @@ -198,7 +112,112 @@ vgic_read_irqbits(vic_t *vic, thread_t *vcpu, size_t base_offset, size_t offset) next_vcpu: rcu_read_finish(); } +} + +static uint32_t +vgic_read_gicd_irqbits(count_t range_size, + _Atomic vgic_delivery_state_t *dstates, + size_t base_offset, bool *listed) +{ + uint32_t bits = 0U; + for (count_t i = 0; i < range_size; i++) { + vgic_delivery_state_t this_dstate = + atomic_load_relaxed(&dstates[i]); + bool bit; + + // Note: the GICR base offsets are the same as the GICD offsets, + // so we don't need to duplicate them here. + switch (base_offset) { + case OFS_GICD_IGROUPR(0U): + bit = vgic_delivery_state_get_group1(&this_dstate); + break; + case OFS_GICD_ISENABLER(0U): + case OFS_GICD_ICENABLER(0U): + bit = vgic_delivery_state_get_enabled(&this_dstate); + break; + case OFS_GICD_ISPENDR(0U): + case OFS_GICD_ICPENDR(0U): + bit = vgic_delivery_state_is_pending(&this_dstate); + if (vgic_delivery_state_get_listed(&this_dstate)) { + *listed = true; + } + break; + case OFS_GICD_ISACTIVER(0U): + case OFS_GICD_ICACTIVER(0U): + bit = vgic_delivery_state_get_active(&this_dstate); + if (vgic_delivery_state_get_listed(&this_dstate)) { + *listed = true; + } + break; + default: + panic("vgic_read_irqbits: Bad base_offset"); + } + + if (bit) { + bits |= (uint32_t)util_bit(i); + } + } + + return bits; +} + +static uint32_t +vgic_read_irqbits(vic_t *vic, thread_t *vcpu, size_t base_offset, size_t offset) +{ + assert(vic != NULL); + assert(vcpu != NULL); + assert(offset >= base_offset); + assert(offset <= base_offset + (31 * sizeof(uint32_t))); + + uint32_t bits = 0U; + count_t range_base = + (count_t)((offset - base_offset) / sizeof(uint32_t)) * 32U; + count_t range_size = + util_min(32U, GIC_SPECIAL_INTIDS_BASE - range_base); + + _Atomic vgic_delivery_state_t *dstates = + vgic_find_dstate(vic, vcpu, range_base); + if (dstates == NULL) { + goto out; + } + assert(compiler_sizeof_object(dstates) >= + range_size * sizeof(*dstates)); + bool listed = false; + + bits = vgic_read_gicd_irqbits(range_size, dstates, base_offset, + &listed); + +#if GICV3_HAS_VLPI_V4_1 && defined(GICV3_ENABLE_VPE) && GICV3_ENABLE_VPE + if ((range_base == GIC_SGI_BASE) && + ((base_offset == offsetof(gicd_t, ispendr)) || + (base_offset == offsetof(gicd_t, icpendr)))) { + // Query the hardware for the vSGI pending state + uint32_result_t bits_r = gicv3_vpe_vsgi_query(vcpu); + if (bits_r.e == OK) { + bits |= bits_r.r; + } + } +#endif // GICV3_HAS_VLPI_V4_1 && GICV3_ENABLE_VPE + + if (compiler_expected(!listed)) { + // We didn't try to read the pending or active state of a VIRQ + // that is in list register, so the value we've read is + // accurate. + goto out; + } + + // Read back from the current VCPU's physical LRs. + preempt_disable(); + for (count_t lr = 0U; lr < CPU_GICH_LR_COUNT; lr++) { + vgic_read_lr_state(lr); + } + preempt_enable(); + + // Try to update the flags for listed vIRQs, based on the state of + // every VCPU's list registers. + vgic_update_irqbits_flag(vic, vcpu, base_offset, range_base, range_size, + &bits); out: return bits; } @@ -270,6 +289,8 @@ gicd_vdevice_read(vic_t *vic, size_t offset, register_t *val, bool ret = true; thread_t *thread = thread_get_self(); + uint32_t read_val = 0U; + assert(vic != NULL); if ((offset == offsetof(gicd_t, setspi_nsr)) || @@ -282,13 +303,14 @@ gicd_vdevice_read(vic_t *vic, size_t offset, register_t *val, GICD_STATUSR_init(&statusr); GICD_STATUSR_set_RWOD(&statusr, true); vgic_gicd_set_statusr(vic, statusr, true); - *val = 0U; + read_val = 0U; } else if (offset == offsetof(gicd_t, ctlr)) { - *val = GICD_CTLR_DS_raw(atomic_load_relaxed(&vic->gicd_ctlr)); + read_val = + GICD_CTLR_DS_raw(atomic_load_relaxed(&vic->gicd_ctlr)); } else if (offset == offsetof(gicd_t, statusr)) { - *val = GICD_STATUSR_raw(vic->gicd_statusr); + read_val = GICD_STATUSR_raw(vic->gicd_statusr); } else if (offset == offsetof(gicd_t, typer)) { GICD_TYPER_t typer = GICD_TYPER_default(); @@ -310,7 +332,7 @@ gicd_vdevice_read(vic_t *vic, size_t offset, register_t *val, #endif GICD_TYPER_set_A3V(&typer, true); GICD_TYPER_set_No1N(&typer, VGIC_HAS_1N == 0); - *val = GICD_TYPER_raw(typer); + read_val = GICD_TYPER_raw(typer); } else if (offset == offsetof(gicd_t, iidr)) { GICD_IIDR_t iidr = GICD_IIDR_default(); @@ -318,67 +340,67 @@ gicd_vdevice_read(vic_t *vic, size_t offset, register_t *val, GICD_IIDR_set_ProductID(&iidr, IIDR_PRODUCTID); GICD_IIDR_set_Variant(&iidr, IIDR_VARIANT); GICD_IIDR_set_Revision(&iidr, IIDR_REVISION); - *val = GICD_IIDR_raw(iidr); + read_val = GICD_IIDR_raw(iidr); } else if (offset == offsetof(gicd_t, typer2)) { GICD_TYPER2_t typer2 = GICD_TYPER2_default(); #if GICV3_HAS_VLPI_V4_1 GICD_TYPER2_set_nASSGIcap(&typer2, vgic_has_lpis(vic)); #endif - *val = GICD_TYPER2_raw(typer2); + read_val = GICD_TYPER2_raw(typer2); } else if (offset == (size_t)OFS_GICD_PIDR2) { - *val = VGIC_PIDR2; + read_val = VGIC_PIDR2; } else if ((offset >= OFS_GICD_IGROUPR(0U)) && (offset <= OFS_GICD_IGROUPR(31U))) { - *val = vgic_read_irqbits(vic, thread, OFS_GICD_IGROUPR(0), - offset); + read_val = vgic_read_irqbits(vic, thread, OFS_GICD_IGROUPR(0), + offset); } else if ((offset >= OFS_GICD_ISENABLER(0U)) && (offset <= OFS_GICD_ISENABLER(31U))) { - *val = vgic_read_irqbits(vic, thread, OFS_GICD_ISENABLER(0U), - offset); + read_val = vgic_read_irqbits(vic, thread, + OFS_GICD_ISENABLER(0U), offset); } else if ((offset >= OFS_GICD_ICENABLER(0U)) && (offset <= OFS_GICD_ICENABLER(31U))) { - *val = vgic_read_irqbits(vic, thread, OFS_GICD_ICENABLER(0U), - offset); + read_val = vgic_read_irqbits(vic, thread, + OFS_GICD_ICENABLER(0U), offset); } else if ((offset >= OFS_GICD_ISPENDR(0U)) && (offset <= OFS_GICD_ISPENDR(31U))) { - *val = vgic_read_irqbits(vic, thread, OFS_GICD_ISPENDR(0U), - offset); + read_val = vgic_read_irqbits(vic, thread, OFS_GICD_ISPENDR(0U), + offset); } else if ((offset >= OFS_GICD_ICPENDR(0U)) && (offset <= OFS_GICD_ICPENDR(31U))) { - *val = vgic_read_irqbits(vic, thread, OFS_GICD_ICPENDR(0U), - offset); + read_val = vgic_read_irqbits(vic, thread, OFS_GICD_ICPENDR(0U), + offset); } else if ((offset >= OFS_GICD_ISACTIVER(0U)) && (offset <= OFS_GICD_ISACTIVER(31U))) { - *val = vgic_read_irqbits(vic, thread, OFS_GICD_ISACTIVER(0U), - offset); + read_val = vgic_read_irqbits(vic, thread, + OFS_GICD_ISACTIVER(0U), offset); } else if ((offset >= OFS_GICD_ICACTIVER(0U)) && (offset <= OFS_GICD_ICACTIVER(31U))) { - *val = vgic_read_irqbits(vic, thread, OFS_GICD_ICACTIVER(0U), - offset); + read_val = vgic_read_irqbits(vic, thread, + OFS_GICD_ICACTIVER(0U), offset); } else if (util_offset_in_range(offset, gicd_t, ipriorityr)) { - *val = vgic_read_priority(vic, thread, - offset - offsetof(gicd_t, ipriorityr), - access_size); + read_val = (uint32_t)vgic_read_priority( + vic, thread, offset - offsetof(gicd_t, ipriorityr), + access_size); } else if (util_offset_in_range(offset, gicd_t, icfgr)) { - *val = vgic_read_config(vic, thread, - offset - offsetof(gicd_t, icfgr)); + read_val = (uint32_t)vgic_read_config( + vic, thread, offset - offsetof(gicd_t, icfgr)); } else if (util_offset_in_range(offset, gicd_t, itargetsr) || util_offset_in_range(offset, gicd_t, igrpmodr) || util_offset_in_range(offset, gicd_t, nsacr)) { // RAZ ranges - *val = 0U; + read_val = 0U; } else { // Unknown register @@ -386,9 +408,11 @@ gicd_vdevice_read(vic_t *vic, size_t offset, register_t *val, GICD_STATUSR_init(&statusr); GICD_STATUSR_set_RRD(&statusr, true); vgic_gicd_set_statusr(vic, statusr, true); - *val = 0U; + read_val = 0U; } + *val = read_val; + return ret; } @@ -805,7 +829,8 @@ gicr_vdevice_read(vic_t *vic, thread_t *gicr_vcpu, index_t gicr_num, GICR_WAKER_t gicr_waker = GICR_WAKER_default(); GICR_WAKER_set_ProcessorSleep( &gicr_waker, - atomic_load_relaxed(&gicr_vcpu->vgic_sleep)); + atomic_load_relaxed(&gicr_vcpu->vgic_sleep) != + VGIC_SLEEP_STATE_AWAKE); GICR_WAKER_set_ChildrenAsleep( &gicr_waker, vgic_gicr_rd_check_sleep(gicr_vcpu)); @@ -849,9 +874,9 @@ gicr_vdevice_read(vic_t *vic, thread_t *gicr_vcpu, index_t gicr_num, (offset == offsetof(gicr_t, sgi.icpendr0)) || (offset == offsetof(gicr_t, sgi.isactiver0)) || (offset == offsetof(gicr_t, sgi.icactiver0))) { - *val = vgic_read_irqbits(vic, gicr_vcpu, - offset - offsetof(gicr_t, sgi), - offset - offsetof(gicr_t, sgi)); + *val = (uint32_t)vgic_read_irqbits( + vic, gicr_vcpu, offset - offsetof(gicr_t, sgi), + offset - offsetof(gicr_t, sgi)); } else if ((offset == offsetof(gicr_t, sgi.igrpmodr0)) || (offset == offsetof(gicr_t, sgi.nsacr))) { @@ -879,6 +904,113 @@ gicr_vdevice_read(vic_t *vic, thread_t *gicr_vcpu, index_t gicr_num, return ret; } +static void +gicr_vdevice_icfgr_write(vic_t *vic, thread_t *gicr_vcpu, register_t val) +{ + // 32-bit register, 32-bit access only + for (index_t i = 0U; i < GIC_PPI_NUM; i++) { + vgic_gicr_sgi_set_ppi_config(vic, gicr_vcpu, i + GIC_PPI_BASE, + (val & util_bit((i * 2U) + 1U)) != + 0U); + } +} + +static void +gicr_vdevice_ipriorityr_write(vic_t *vic, thread_t *gicr_vcpu, size_t offset, + register_t val, size_t access_size) +{ + // 32-bit registers, byte or 32-bit accessible + index_t n = (index_t)(offset - OFS_GICR_SGI_IPRIORITYR(0U)); + // Loop through every byte + uint32_t shifted_val = (uint32_t)val; + for (index_t i = 0U; i < access_size; i++) { + vgic_gicr_sgi_set_sgi_ppi_priority(vic, gicr_vcpu, n + i, + (uint8_t)shifted_val); + shifted_val >>= 8U; + } +} + +static void +gicr_vdevice_activer0_write(vic_t *vic, thread_t *gicr_vcpu, size_t offset, + register_t val) +{ + // 32-bit registers, 32-bit access only + uint32_t bits = (uint32_t)val; + while (bits != 0U) { + index_t i = compiler_ctz(bits); + bits &= ~((index_t)util_bit(i)); + + vgic_gicr_sgi_change_sgi_ppi_active( + vic, gicr_vcpu, i, + (offset == offsetof(gicr_t, sgi.isactiver0))); + } +} + +static void +gicr_vdevice_pendr0_write(vic_t *vic, thread_t *gicr_vcpu, size_t offset, + register_t val) +{ + // 32-bit registers, 32-bit access only + uint32_t bits = (uint32_t)val; + while (bits != 0U) { + index_t i = compiler_ctz(bits); + bits &= ~((index_t)util_bit(i)); + + vgic_gicr_sgi_change_sgi_ppi_pending( + vic, gicr_vcpu, i, + (offset == offsetof(gicr_t, sgi.ispendr0))); + } +} + +static void +gicr_vdevice_enabler0_write(vic_t *vic, thread_t *gicr_vcpu, size_t offset, + register_t val) +{ + // 32-bit registers, 32-bit access only + uint32_t bits = (uint32_t)val; + while (bits != 0U) { + index_t i = compiler_ctz(bits); + bits &= ~((index_t)util_bit(i)); + + vgic_gicr_sgi_change_sgi_ppi_enable( + vic, gicr_vcpu, i, + (offset == offsetof(gicr_t, sgi.isenabler0))); + } +} + +static void +gicr_vdevice_igroupr0_write(vic_t *vic, thread_t *gicr_vcpu, register_t val) +{ + // 32-bit register, 32-bit access only + for (index_t i = 0U; i < 32U; i++) { + vgic_gicr_sgi_set_sgi_ppi_group(vic, gicr_vcpu, i, + (val & util_bit(i)) != 0U); + } +} + +#if VGIC_HAS_LPI +static void +gicr_vdevice_invallr_write(vic_t *vic, thread_t *gicr_vcpu, register_t val) +{ + GICR_INVALLR_t invallr = GICR_INVALLR_cast(val); + // WI if the virtual bit is set + if (!GICR_INVALLR_get_V(&invallr)) { + vgic_gicr_rd_invall(vic, gicr_vcpu); + } +} + +static void +gicr_vdevice_invlpir_write(vic_t *vic, thread_t *gicr_vcpu, register_t val) +{ + GICR_INVLPIR_t invlpir = GICR_INVLPIR_cast(val); + // WI if the virtual bit is set + if (!GICR_INVLPIR_get_V(&invlpir)) { + vgic_gicr_rd_invlpi(vic, gicr_vcpu, + GICR_INVLPIR_get_pINTID(&invlpir)); + } +} +#endif + static bool gicr_vdevice_write(vic_t *vic, thread_t *gicr_vcpu, size_t offset, register_t val, size_t access_size) @@ -907,21 +1039,10 @@ gicr_vdevice_write(vic_t *vic, thread_t *gicr_vcpu, size_t offset, vgic_gicr_rd_set_statusr(gicr_vcpu, statusr, false); } else if (offset == offsetof(gicr_t, rd.waker)) { - bool new_sleep = GICR_WAKER_get_ProcessorSleep( - &GICR_WAKER_cast((uint32_t)val)); -#if VGIC_HAS_1N - bool old_sleep = atomic_exchange_explicit( - &gicr_vcpu->vgic_sleep, new_sleep, - memory_order_relaxed); - if (old_sleep && !new_sleep) { - // Leaving sleep, so clear any pending 1-of-N wakeup. - scheduler_lock(gicr_vcpu); - gicr_vcpu->vgic_wakeup_1n = false; - scheduler_unlock(gicr_vcpu); - } -#else - atomic_store_relaxed(&gicr_vcpu->vgic_sleep, new_sleep); -#endif + vgic_gicr_rd_set_sleep( + vic, gicr_vcpu, + GICR_WAKER_get_ProcessorSleep( + &GICR_WAKER_cast((uint32_t)val))); } else if ((offset == offsetof(gicr_t, rd.setlpir)) || (offset == offsetof(gicr_t, rd.clrlpir))) { @@ -940,91 +1061,40 @@ gicr_vdevice_write(vic_t *vic, thread_t *gicr_vcpu, size_t offset, GICR_PENDBASER_cast(val)); } else if (offset == offsetof(gicr_t, rd.invlpir)) { - GICR_INVLPIR_t invlpir = GICR_INVLPIR_cast(val); - // WI if the virtual bit is set - if (!GICR_INVLPIR_get_V(&invlpir)) { - vgic_gicr_rd_invlpi(vic, gicr_vcpu, - GICR_INVLPIR_get_pINTID(&invlpir)); - } + gicr_vdevice_invlpir_write(vic, gicr_vcpu, val); } else if (offset == offsetof(gicr_t, rd.invallr)) { - GICR_INVALLR_t invallr = GICR_INVALLR_cast(val); - // WI if the virtual bit is set - if (!GICR_INVALLR_get_V(&invallr)) { - vgic_gicr_rd_invall(vic, gicr_vcpu); - } + gicr_vdevice_invallr_write(vic, gicr_vcpu, val); + #endif // VGIC_HAS_LPI } else if (offset == offsetof(gicr_t, sgi.igroupr0)) { - // 32-bit register, 32-bit access only - for (index_t i = 0U; i < 32U; i++) { - vgic_gicr_sgi_set_sgi_ppi_group( - vic, gicr_vcpu, i, (val & util_bit(i)) != 0U); - } + gicr_vdevice_igroupr0_write(vic, gicr_vcpu, val); } else if ((offset == offsetof(gicr_t, sgi.isenabler0)) || (offset == offsetof(gicr_t, sgi.icenabler0))) { - // 32-bit registers, 32-bit access only - uint32_t bits = (uint32_t)val; - while (bits != 0U) { - index_t i = compiler_ctz(bits); - bits &= ~((index_t)util_bit(i)); - - vgic_gicr_sgi_change_sgi_ppi_enable( - vic, gicr_vcpu, i, - (offset == offsetof(gicr_t, sgi.isenabler0))); - } + gicr_vdevice_enabler0_write(vic, gicr_vcpu, offset, val); } else if ((offset == offsetof(gicr_t, sgi.ispendr0)) || (offset == offsetof(gicr_t, sgi.icpendr0))) { - // 32-bit registers, 32-bit access only - uint32_t bits = (uint32_t)val; - while (bits != 0U) { - index_t i = compiler_ctz(bits); - bits &= ~((index_t)util_bit(i)); - - vgic_gicr_sgi_change_sgi_ppi_pending( - vic, gicr_vcpu, i, - (offset == offsetof(gicr_t, sgi.ispendr0))); - } + gicr_vdevice_pendr0_write(vic, gicr_vcpu, offset, val); } else if ((offset == offsetof(gicr_t, sgi.isactiver0)) || (offset == offsetof(gicr_t, sgi.icactiver0))) { - // 32-bit registers, 32-bit access only - uint32_t bits = (uint32_t)val; - while (bits != 0U) { - index_t i = compiler_ctz(bits); - bits &= ~((index_t)util_bit(i)); - - vgic_gicr_sgi_change_sgi_ppi_active( - vic, gicr_vcpu, i, - (offset == offsetof(gicr_t, sgi.isactiver0))); - } + gicr_vdevice_activer0_write(vic, gicr_vcpu, offset, val); } else if ((offset >= OFS_GICR_SGI_IPRIORITYR(0U)) && (offset <= OFS_GICR_SGI_IPRIORITYR(GIC_PPI_BASE + GIC_PPI_NUM - 1))) { - // 32-bit registers, byte or 32-bit accessible - index_t n = (index_t)(offset - OFS_GICR_SGI_IPRIORITYR(0U)); - // Loop through every byte - uint32_t shifted_val = (uint32_t)val; - for (index_t i = 0U; i < access_size; i++) { - vgic_gicr_sgi_set_sgi_ppi_priority( - vic, gicr_vcpu, n + i, (uint8_t)shifted_val); - shifted_val >>= 8U; - } + gicr_vdevice_ipriorityr_write(vic, gicr_vcpu, offset, val, + access_size); } else if (offset == OFS_GICR_SGI_ICFGR(0U)) { // All interrupts in this register are SGIs, which are always // edge-triggered, so it is entirely WI } else if (offset == OFS_GICR_SGI_ICFGR(1U)) { - // 32-bit register, 32-bit access only - for (index_t i = 0U; i < GIC_PPI_NUM; i++) { - vgic_gicr_sgi_set_ppi_config( - vic, gicr_vcpu, i + GIC_PPI_BASE, - (val & util_bit((i * 2U) + 1U)) != 0U); - } + gicr_vdevice_icfgr_write(vic, gicr_vcpu, val); } else if (offset == offsetof(gicr_t, sgi.igrpmodr0)) { // WI diff --git a/hyp/vm/vgic/vgic.tc b/hyp/vm/vgic/vgic.tc index a65b309..fa0e7a0 100644 --- a/hyp/vm/vgic/vgic.tc +++ b/hyp/vm/vgic/vgic.tc @@ -381,6 +381,14 @@ define vgic_lr_owner_lock structure(lockable) { lock structure spinlock; }; +define vgic_sleep_state enumeration { + awake = 0; + asleep; +#if VGIC_HAS_1N + wakeup_1n; +#endif +}; + // Virtual GICR state is embedded in the associated thread context. extend thread object module vgic { // Reference-counted pointer to the vic that owns this GICR. This @@ -497,7 +505,7 @@ extend thread object module vgic { // Current GICR_WAKER.ProcessorSleep state. // // The effects of setting this flag are: - // - selecting the VCPU for a 1-of-N wakeup + // - enabling selection of the VCPU for a 1-of-N wakeup // - short-circuiting delivery checks to wake up without listing // - polling group enables on GICR_WAKER.ChildrenAsleep reads // - not polling VPE scheduling on GICR_WAKER.ChildrenAsleep reads @@ -511,23 +519,11 @@ extend thread object module vgic { // Therefore it is within spec for this flag to have no direct effect // on interrupt delivery. The advantage of doing this is that interrupt // delivery will work in VMs that assume they don't have control of - // GICR_WAKER, like UEFI. + // GICR_WAKER, like UEFI, as long as they don't rely on 1-of-N wakeup. // - // This flag does not need any lock protection, but it does need to be - // atomic as it is read remotely by the 1-of-N wakeup algorithm. - sleep bool(atomic); - -#if VGIC_HAS_1N - // Current 1-of-N wakeup request state. - // - // This flag is set to true if the VCPU is chosen by the 1-of-N wakeup - // algorithm while it is not blocked in the EL2 scheduler, and is - // cleared when GICR_WAKER.ProcessorSleep is changed from 1 to 0. If - // it is set, the VCPU cannot enter a low-power state. - // - // This flag is protected by the scheduler lock. - wakeup_1n bool; -#endif + // When 1-of-N support is enabled, this flag has a third state that + // indicates that it is asleep, but has been chosen for 1-of-N wakeup. + sleep enumeration vgic_sleep_state(atomic); // Cache of VIRQ numbers that may need to be checked when searching // for a pending IRQ to deliver. diff --git a/hyp/vm/vic_base/include/vic_base.h b/hyp/vm/vic_base/include/vic_base.h index 9e2b710..c51d590 100644 --- a/hyp/vm/vic_base/include/vic_base.h +++ b/hyp/vm/vic_base/include/vic_base.h @@ -17,6 +17,10 @@ vic_attach_vcpu(vic_t *vic, thread_t *vcpu, index_t index); // bind PPI error_t vic_bind_private_forward_private(virq_source_t *source, vic_t *vic, + thread_t *vcpu, virq_t virq); + +void +vic_sync_private_forward_private(virq_source_t *source, vic_t *vic, thread_t *vcpu, virq_t virq, irq_t pirq, cpu_index_t pcpu); diff --git a/hyp/vm/vic_base/src/forward_private.c b/hyp/vm/vic_base/src/forward_private.c index 2b6b1a1..7af6223 100644 --- a/hyp/vm/vic_base/src/forward_private.c +++ b/hyp/vm/vic_base/src/forward_private.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include @@ -30,7 +31,6 @@ #include #include "event_handlers.h" -#include "gicv3.h" #include "panic.h" #include "vic_base.h" @@ -43,13 +43,14 @@ private_irq_info_from_virq_source(virq_source_t *source) return vic_private_irq_info_container_of_source(source); } +// Called with the forward-private lock held. static error_t vic_bind_private_hwirq_helper(vic_forward_private_t *fp, thread_t *vcpu) { error_t err; cpu_index_t cpu; - assert(vcpu->forward_private_active); + assert(vcpu->vic_base_forward_private_active); if (!vcpu_option_flags_get_pinned(&vcpu->vcpu_options)) { err = ERROR_DENIED; @@ -65,12 +66,19 @@ vic_bind_private_hwirq_helper(vic_forward_private_t *fp, thread_t *vcpu) vic_private_irq_info_t *irq_info = &fp->irq_info[cpu]; err = vic_bind_private_forward_private(&irq_info->source, fp->vic, vcpu, - fp->virq, irq_info->irq, cpu); + fp->virq); + + if ((err == OK) && vcpu->vic_base_forward_private_in_sync) { + vic_sync_private_forward_private(&irq_info->source, fp->vic, + vcpu, fp->virq, irq_info->irq, + cpu); + } out: return err; } +// Called with the forward-private lock held. static void vic_unbind_private_hwirq_helper(hwirq_t *hwirq) { @@ -98,6 +106,43 @@ vic_unbind_private_hwirq_helper(hwirq_t *hwirq) } } +// Called with the forward-private lock held. +static void +vic_sync_private_hwirq_helper(vic_forward_private_t *fp, thread_t *vcpu) +{ + assert(vcpu->vic_base_forward_private_active); + assert(vcpu_option_flags_get_pinned(&vcpu->vcpu_options)); + + scheduler_lock(vcpu); + cpu_index_t cpu = vcpu->scheduler_affinity; + scheduler_unlock(vcpu); + + assert(cpulocal_index_valid(cpu)); + + vic_private_irq_info_t *irq_info = &fp->irq_info[cpu]; + + vic_sync_private_forward_private(&irq_info->source, fp->vic, vcpu, + fp->virq, irq_info->irq, cpu); +} + +// Called with the forward-private lock held. +static void +vic_disable_private_hwirq_helper(vic_forward_private_t *fp, thread_t *vcpu) +{ + assert(vcpu->vic_base_forward_private_active); + assert(vcpu_option_flags_get_pinned(&vcpu->vcpu_options)); + + scheduler_lock(vcpu); + cpu_index_t cpu = vcpu->scheduler_affinity; + scheduler_unlock(vcpu); + + assert(cpulocal_index_valid(cpu)); + + vic_private_irq_info_t *irq_info = &fp->irq_info[cpu]; + + platform_irq_disable_percpu(irq_info->irq, cpu); +} + error_t vic_bind_hwirq_forward_private(vic_t *vic, hwirq_t *hwirq, virq_t virq) { @@ -152,7 +197,7 @@ vic_bind_hwirq_forward_private(vic_t *vic, hwirq_t *hwirq, virq_t virq) rcu_read_start(); thread_t *vcpu = atomic_load_consume(&vic->gicr_vcpus[i]); - if ((vcpu != NULL) && vcpu->forward_private_active) { + if ((vcpu != NULL) && vcpu->vic_base_forward_private_active) { err = vic_bind_private_hwirq_helper(fp, vcpu); if (err != OK) { rcu_read_finish(); @@ -185,12 +230,13 @@ bool vic_handle_vcpu_activate_thread_forward_private(thread_t *thread) { bool ret = true; - vic_t *vic = thread->vgic_vic; + vic_t *vic = vic_get_vic(thread); if (vic != NULL) { spinlock_acquire(&vic->forward_private_lock); - thread->forward_private_active = true; + thread->vic_base_forward_private_active = true; + thread->vic_base_forward_private_in_sync = false; vic_forward_private_t *fp; @@ -294,6 +340,7 @@ vic_handle_virq_check_pending_forward_private(virq_source_t *source, vic_private_irq_info_t *irq_info = private_irq_info_from_virq_source(source); + // FIXME: if (!reasserted && atomic_fetch_and_explicit(&irq_info->hw_active, false, memory_order_relaxed)) { @@ -317,6 +364,12 @@ vic_handle_virq_set_enabled_forward_private(virq_source_t *source, bool enabled) assert(source->is_private); assert(platform_irq_is_percpu(irq_info->irq)); + // Note that we don't check the forward-private flag here, because we + // can't safely take the lock; the vgic module calls this handler with + // the GICD lock held, and the sync handler above calls a vgic function + // that acquires the GICD lock with the forward-private lock held. + // The same applies to the other VIRQ configuration handlers. + // FIXME: if (enabled) { platform_irq_enable_percpu(irq_info->irq, irq_info->cpu); } else { @@ -336,7 +389,8 @@ vic_handle_virq_set_mode_forward_private(virq_source_t *source, assert(source->is_private); assert(platform_irq_is_percpu(irq_info->irq)); - return gicv3_irq_set_trigger_percpu(irq_info->irq, mode, irq_info->cpu); + // FIXME: + return platform_irq_set_mode_percpu(irq_info->irq, mode, irq_info->cpu); } rcu_update_status_t @@ -358,4 +412,60 @@ vic_handle_free_forward_private(rcu_entry_t *entry) return ret; } + +void +vic_base_handle_vcpu_started(bool warm_reset) +{ + thread_t *vcpu = thread_get_self(); + vic_t *vic = vic_get_vic(vcpu); + + if (warm_reset || (vic == NULL) || + !vcpu_option_flags_get_pinned(&vcpu->vcpu_options)) { + // Nothing to do + goto out; + } + + spinlock_acquire(&vic->forward_private_lock); + + assert(!vcpu->vic_base_forward_private_in_sync); + + vic_forward_private_t *fp; + list_foreach_container (fp, &vic->forward_private_list, + vic_forward_private, list_node) { + vic_sync_private_hwirq_helper(fp, vcpu); + } + vcpu->vic_base_forward_private_in_sync = true; + + spinlock_release(&vic->forward_private_lock); + +out: + return; +} + +void +vic_base_handle_vcpu_stopped(void) +{ + thread_t *vcpu = thread_get_self(); + vic_t *vic = vic_get_vic(vcpu); + + if ((vic == NULL) || + !vcpu_option_flags_get_pinned(&vcpu->vcpu_options)) { + // Nothing to do + goto out; + } + + spinlock_acquire(&vic->forward_private_lock); + if (vcpu->vic_base_forward_private_in_sync) { + vic_forward_private_t *fp; + list_foreach_container (fp, &vic->forward_private_list, + vic_forward_private, list_node) { + vic_disable_private_hwirq_helper(fp, vcpu); + } + vcpu->vic_base_forward_private_in_sync = false; + } + spinlock_release(&vic->forward_private_lock); + +out: + return; +} #endif diff --git a/hyp/vm/vic_base/vic_base.ev b/hyp/vm/vic_base/vic_base.ev index 09946dd..e262ba0 100644 --- a/hyp/vm/vic_base/vic_base.ev +++ b/hyp/vm/vic_base/vic_base.ev @@ -39,4 +39,8 @@ subscribe virq_set_mode[VIRQ_TRIGGER_VIC_BASE_FORWARD_PRIVATE] subscribe rcu_update[RCU_UPDATE_CLASS_VIC_BASE_FREE_FORWARD_PRIVATE] handler vic_handle_free_forward_private(entry) + +subscribe vcpu_started(warm_reset) + +subscribe vcpu_stopped() #endif diff --git a/hyp/vm/vic_base/vic_base.tc b/hyp/vm/vic_base/vic_base.tc index 5dff8ca..8a44776 100644 --- a/hyp/vm/vic_base/vic_base.tc +++ b/hyp/vm/vic_base/vic_base.tc @@ -23,8 +23,9 @@ extend vic object { forward_private_list structure list; }; -extend thread object { +extend thread object module vic_base { forward_private_active bool; + forward_private_in_sync bool; }; extend rcu_update_class enumeration { diff --git a/hyp/vm/virtio_input/build.conf b/hyp/vm/virtio_input/build.conf new file mode 100644 index 0000000..50a657e --- /dev/null +++ b/hyp/vm/virtio_input/build.conf @@ -0,0 +1,10 @@ +# © 2023 Qualcomm Innovation Center, Inc. All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +local_include +types virtio_input.tc +events virtio_input.ev +hypercalls virtio_input.hvc +base_module hyp/mem/useraccess +source virtio_input.c hypercalls.c diff --git a/hyp/vm/virtio_input/include/virtio_input.h b/hyp/vm/virtio_input/include/virtio_input.h new file mode 100644 index 0000000..da011f9 --- /dev/null +++ b/hyp/vm/virtio_input/include/virtio_input.h @@ -0,0 +1,10 @@ +// © 2023 Qualcomm Innovation Center, Inc. All rights reserved. +// +// SPDX-License-Identifier: BSD-3-Clause + +error_t +set_data_sel_ev_bits(const virtio_mmio_t *virtio_mmio, uint32_t subsel, + uint32_t size, vmaddr_t data); +error_t +set_data_sel_abs_info(const virtio_mmio_t *virtio_mmio, uint32_t subsel, + uint32_t size, vmaddr_t data); diff --git a/hyp/vm/virtio_input/src/hypercalls.c b/hyp/vm/virtio_input/src/hypercalls.c new file mode 100644 index 0000000..c296fc6 --- /dev/null +++ b/hyp/vm/virtio_input/src/hypercalls.c @@ -0,0 +1,251 @@ +// © 2023 Qualcomm Innovation Center, Inc. All rights reserved. +// +// SPDX-License-Identifier: BSD-3-Clause + +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "useraccess.h" +#include "virtio_input.h" + +error_t +hypercall_virtio_input_configure(cap_id_t virtio_mmio_cap, uint64_t devids, + uint32_t prop_bits, uint32_t num_evtypes, + uint32_t num_absaxes) +{ + error_t ret; + cspace_t *cspace = cspace_get_self(); + + virtio_mmio_ptr_result_t p = cspace_lookup_virtio_mmio( + cspace, virtio_mmio_cap, CAP_RIGHTS_VIRTIO_MMIO_CONFIG); + if (compiler_unexpected(p.e != OK)) { + ret = p.e; + goto out; + } + virtio_mmio_t *virtio_mmio = p.r; + partition_t *partition = virtio_mmio->header.partition; + + // Must be a virtio-input device + if (virtio_mmio->device_type != VIRTIO_DEVICE_TYPE_INPUT) { + ret = ERROR_OBJECT_CONFIG; + goto release_virtio_object; + } + + // save the devids and propbits + virtio_mmio->input_data->devids = devids; + virtio_mmio->input_data->prop_bits = prop_bits; + + // Validate the upper bound for evtypes and absaxes + if ((num_evtypes > VIRTIO_INPUT_MAX_EV_TYPES) || + (num_absaxes > VIRTIO_INPUT_MAX_ABS_AXES)) { + ret = ERROR_ARGUMENT_INVALID; + goto release_virtio_object; + } + + size_t alloc_size = 0U; + void_ptr_result_t alloc_ret; + // allocate mem for evtypes, if not already allocated and count is > 0 + if ((virtio_mmio->input_data->ev_bits == NULL) && (num_evtypes > 0U)) { + alloc_size = num_evtypes * sizeof(virtio_input_ev_bits_t); + alloc_ret = partition_alloc(partition, alloc_size, + alignof(virtio_input_ev_bits_t)); + if (alloc_ret.e != OK) { + ret = ERROR_NOMEM; + goto release_virtio_object; + } + (void)memset_s(alloc_ret.r, alloc_size, 0, alloc_size); + + virtio_mmio->input_data->ev_bits = + (virtio_input_ev_bits_t *)alloc_ret.r; + virtio_mmio->input_data->ev_bits_count = num_evtypes; + + // set entry of each ev as VIRTIO_INPUT_SUBSEL_INVALID + for (uint32_t entry = 0; entry < num_evtypes; entry++) { + virtio_mmio->input_data->ev_bits[entry].subsel = + (uint8_t)VIRTIO_INPUT_SUBSEL_INVALID; + } + } else { + if (num_evtypes > 0U) { + ret = ERROR_BUSY; + goto release_virtio_object; + } else { + // it means device has no evtypes to register, no + // worries + } + } + + // allocate mem for absaxes, if not already allocated and count is > 0 + if ((virtio_mmio->input_data->absinfo == NULL) && (num_absaxes > 0U)) { + alloc_size = num_absaxes * sizeof(virtio_input_absinfo_t); + alloc_ret = partition_alloc(partition, alloc_size, + alignof(virtio_input_absinfo_t)); + if (alloc_ret.e != OK) { + ret = ERROR_NOMEM; + goto release_virtio_object; + } + (void)memset_s(alloc_ret.r, alloc_size, 0, alloc_size); + + virtio_mmio->input_data->absinfo = + (virtio_input_absinfo_t *)alloc_ret.r; + virtio_mmio->input_data->absinfo_count = num_absaxes; + + // set entry of each absinfo as VIRTIO_INPUT_SUBSEL_INVALID + for (uint32_t entry = 0; entry < num_absaxes; entry++) { + virtio_mmio->input_data->absinfo[entry].subsel = + (uint8_t)VIRTIO_INPUT_SUBSEL_INVALID; + } + } else if (num_absaxes > 0U) { + ret = ERROR_BUSY; + goto release_virtio_object; + } else { + // device has no absaxes info to register + } + + ret = OK; +release_virtio_object: + object_put_virtio_mmio(virtio_mmio); +out: + return ret; +} + +error_t +hypercall_virtio_input_set_data(cap_id_t virtio_mmio_cap, uint32_t sel, + uint32_t subsel, uint32_t size, vmaddr_t data) +{ + error_t ret; + cspace_t *cspace = cspace_get_self(); + + virtio_mmio_ptr_result_t p = cspace_lookup_virtio_mmio( + cspace, virtio_mmio_cap, CAP_RIGHTS_VIRTIO_MMIO_CONFIG); + if (compiler_unexpected(p.e != OK)) { + ret = p.e; + goto out; + } + virtio_mmio_t *virtio_mmio = p.r; + + // Must be a virtio-input device + if (virtio_mmio->device_type != VIRTIO_DEVICE_TYPE_INPUT) { + ret = ERROR_CSPACE_WRONG_OBJECT_TYPE; + goto release_virtio_object; + } + + switch ((virtio_input_config_select_t)sel) { + case VIRTIO_INPUT_CONFIG_SELECT_CFG_ID_NAME: { + // Only subsel 0 is valid for this sel value + if (subsel == 0U) { + // copy data from guest va; size is checked by this API + ret = useraccess_copy_from_guest_va( + virtio_mmio->input_data->name, + sizeof(virtio_mmio->input_data->name), + data, size) + .e; + if (ret == OK) { + virtio_mmio->input_data->name_size = size; + } else { + virtio_mmio->input_data->name_size = 0U; + } + } else { + ret = ERROR_ARGUMENT_INVALID; + } + break; + } + case VIRTIO_INPUT_CONFIG_SELECT_CFG_ID_SERIAL: { + // Only subsel 0 is valid for this sel value + if (subsel == 0U) { + // copy data from guest va; size is checked by this API + ret = useraccess_copy_from_guest_va( + virtio_mmio->input_data->serial, + sizeof(virtio_mmio->input_data->serial), + data, size) + .e; + if (ret == OK) { + virtio_mmio->input_data->serial_size = size; + } else { + virtio_mmio->input_data->serial_size = 0U; + } + } else { + ret = ERROR_ARGUMENT_INVALID; + } + break; + } + case VIRTIO_INPUT_CONFIG_SELECT_CFG_ID_DEVIDS: { + // Only subsel 0 is valid for this sel value + if (subsel == 0U) { + // copy data from guest va; size is checked by this API + // TODO: should we memset here? + ret = useraccess_copy_from_guest_va( + &virtio_mmio->input_data->devids, + sizeof(virtio_mmio->input_data->devids), + data, size) + .e; + } else { + ret = ERROR_ARGUMENT_INVALID; + } + break; + } + case VIRTIO_INPUT_CONFIG_SELECT_CFG_PROP_BITS: { + // Only subsel 0 is valid for this sel value + if (subsel == 0U) { + // copy data from guest va; size is checked by this API + // TODO: should we memset here? + ret = useraccess_copy_from_guest_va( + &virtio_mmio->input_data->prop_bits, + sizeof(virtio_mmio->input_data->prop_bits), + data, size) + .e; + } else { + ret = ERROR_ARGUMENT_INVALID; + } + break; + } + case VIRTIO_INPUT_CONFIG_SELECT_CFG_EV_BITS: { + // check if mem is allocated for ev_bits + if (virtio_mmio->input_data->ev_bits != NULL) { + ret = set_data_sel_ev_bits( + (const virtio_mmio_t *)virtio_mmio, subsel, + size, data); + } else { + // Not properly configured + ret = ERROR_ARGUMENT_INVALID; + } + break; + } + case VIRTIO_INPUT_CONFIG_SELECT_CFG_ABS_INFO: { + // check if mem is allocated for absinfo + if (virtio_mmio->input_data->absinfo != NULL) { + ret = set_data_sel_abs_info( + (const virtio_mmio_t *)virtio_mmio, subsel, + size, data); + } else { + // Not properly configured + ret = ERROR_ARGUMENT_INVALID; + } + break; + } + case VIRTIO_INPUT_CONFIG_SELECT_CFG_UNSET: + default: + // invalid select event + ret = ERROR_ARGUMENT_INVALID; + break; + } + +release_virtio_object: + object_put_virtio_mmio(virtio_mmio); +out: + return ret; +} diff --git a/hyp/vm/virtio_input/src/virtio_input.c b/hyp/vm/virtio_input/src/virtio_input.c new file mode 100644 index 0000000..41bde96 --- /dev/null +++ b/hyp/vm/virtio_input/src/virtio_input.c @@ -0,0 +1,455 @@ +// © 2023 Qualcomm Innovation Center, Inc. All rights reserved. +// +// SPDX-License-Identifier: BSD-3-Clause + +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include + +#include + +#include + +#include "event_handlers.h" +#include "useraccess.h" +#include "virtio_input.h" + +error_t +virtio_input_handle_object_activate(virtio_mmio_t *virtio_mmio) +{ + error_t ret; + + partition_t *partition = virtio_mmio->header.partition; + // Allocate memory for virtio input data struct if device type + // virtio-input + if (virtio_mmio->device_type == VIRTIO_DEVICE_TYPE_INPUT) { + size_t alloc_size = sizeof(virtio_input_data_t); + void_ptr_result_t alloc_ret = partition_alloc( + partition, alloc_size, alignof(virtio_input_data_t)); + if (alloc_ret.e != OK) { + ret = ERROR_NOMEM; + goto out; + } + + (void)memset_s(alloc_ret.r, alloc_size, 0, alloc_size); + + virtio_mmio->input_data = (virtio_input_data_t *)alloc_ret.r; + } + + ret = OK; +out: + return ret; +} + +error_t +virtio_input_handle_object_cleanup(virtio_mmio_t *virtio_mmio) +{ + if (virtio_mmio->input_data != NULL) { + partition_t *partition = virtio_mmio->header.partition; + size_t alloc_size; + void *alloc_base; + /* first free memory for absinfo and evtypes if any */ + if (virtio_mmio->input_data->absinfo_count != 0U) { + alloc_size = virtio_mmio->input_data->absinfo_count * + sizeof(virtio_input_absinfo_t); + alloc_base = (void *)virtio_mmio->input_data->absinfo; + + error_t err = partition_free(partition, alloc_base, + alloc_size); + assert(err == OK); + + virtio_mmio->input_data->absinfo = NULL; + virtio_mmio->input_data->absinfo_count = 0U; + } + + if (virtio_mmio->input_data->ev_bits_count != 0U) { + alloc_size = virtio_mmio->input_data->ev_bits_count * + sizeof(virtio_input_ev_bits_t); + alloc_base = (void *)virtio_mmio->input_data->ev_bits; + + error_t err = partition_free(partition, alloc_base, + alloc_size); + assert(err == OK); + + virtio_mmio->input_data->ev_bits = NULL; + virtio_mmio->input_data->ev_bits_count = 0U; + } + + /* now safely free the virtio_input struct */ + alloc_size = sizeof(virtio_mmio->input_data); + alloc_base = (void *)&virtio_mmio->input_data; + + error_t err = partition_free(partition, alloc_base, alloc_size); + assert(err == OK); + + virtio_mmio->input_data = NULL; + } else { + // ignore + } + + return OK; +} + +error_t +set_data_sel_abs_info(const virtio_mmio_t *virtio_mmio, uint32_t subsel, + uint32_t size, vmaddr_t data) +{ + error_t ret; + if (subsel < VIRTIO_INPUT_MAX_ABS_AXES) { + // find free entry + uint32_t entry; + for (entry = 0; entry < virtio_mmio->input_data->absinfo_count; + entry++) { + if (virtio_mmio->input_data->absinfo[entry].subsel == + VIRTIO_INPUT_SUBSEL_INVALID) { + // got the free entry + break; + } else { + continue; + } + } + + if (entry == virtio_mmio->input_data->absinfo_count) { + // no free entry + ret = ERROR_NORESOURCES; + } else { + // copy data from guest va; size is checked by this API + ret = useraccess_copy_from_guest_va( + &(virtio_mmio->input_data->absinfo[entry] + .data), + VIRTIO_INPUT_MAX_ABSINFO_SIZE, data, size) + .e; + if (ret == OK) { + // successful copy, update subsel + virtio_mmio->input_data->absinfo[entry].subsel = + (uint8_t)subsel; + } else { + // ignore + } + } + } else { + ret = ERROR_ARGUMENT_INVALID; + } + return ret; +} + +error_t +set_data_sel_ev_bits(const virtio_mmio_t *virtio_mmio, uint32_t subsel, + uint32_t size, vmaddr_t data) +{ + error_t ret; + if (subsel < VIRTIO_INPUT_MAX_EV_TYPES) { + // find free entry + uint32_t entry; + for (entry = 0; entry < virtio_mmio->input_data->ev_bits_count; + entry++) { + if (virtio_mmio->input_data->ev_bits[entry].subsel == + VIRTIO_INPUT_SUBSEL_INVALID) { + // got the free entry + break; + } else { + continue; + } + } + + if (entry == virtio_mmio->input_data->ev_bits_count) { + // no free entry + ret = ERROR_NORESOURCES; + } else { + // copy data from guest va; size is checked by this API + ret = useraccess_copy_from_guest_va( + &(virtio_mmio->input_data->ev_bits[entry] + .data), + VIRTIO_INPUT_MAX_BITMAP_SIZE, data, size) + .e; + if (ret == OK) { + /*successful copy, update the size info + * and subsel*/ + virtio_mmio->input_data->ev_bits[entry].size = + (uint8_t)size; + virtio_mmio->input_data->ev_bits[entry].subsel = + (uint8_t)subsel; + } else { + // ignore + } + } + } else { + ret = ERROR_ARGUMENT_INVALID; + } + return ret; +} + +static void +sel_cfg_abs_info_write(const virtio_mmio_t *virtio_mmio, uint8_t subsel) +{ + if (subsel < VIRTIO_INPUT_MAX_ABS_AXES) { + // find the ev entry where this subsel entry is stored + uint32_t entry; + for (entry = 0; entry < virtio_mmio->input_data->absinfo_count; + entry++) { + if (virtio_mmio->input_data->absinfo[entry].subsel == + subsel) { + // found the entry + break; + } else { + continue; + } + } + if (entry == virtio_mmio->input_data->absinfo_count) { + // entry not found, invalid subsel set size 0 + atomic_store_relaxed(&virtio_mmio->regs->device_config + .input_config.size, + 0U); + } else { + // valid subsel + uint8_t size = (uint8_t)VIRTIO_INPUT_MAX_ABSINFO_SIZE; + + (void)memcpy( + virtio_mmio->regs->device_config.input_config.u + .abs, + virtio_mmio->input_data->absinfo[entry].data, + size); + + // update the size + atomic_store_relaxed(&virtio_mmio->regs->device_config + .input_config.size, + size); + } + } else { + // invalid subsel set size 0 + atomic_store_relaxed( + &virtio_mmio->regs->device_config.input_config.size, + 0U); + } +} + +static void +sel_cfg_ev_bits_write(const virtio_mmio_t *virtio_mmio, uint8_t subsel) +{ + if (subsel < VIRTIO_INPUT_MAX_EV_TYPES) { + // find the ev entry where this subsel entry is stored + uint32_t entry; + for (entry = 0; entry < virtio_mmio->input_data->ev_bits_count; + entry++) { + if (virtio_mmio->input_data->ev_bits[entry].subsel == + subsel) { + // found the entry + break; + } else { + continue; + } + } + if (entry == virtio_mmio->input_data->ev_bits_count) { + // entry not found, invalid subsel set size 0 + atomic_store_relaxed(&virtio_mmio->regs->device_config + .input_config.size, + 0U); + } else { + // valid subsel + uint8_t size = + virtio_mmio->input_data->ev_bits[entry].size; + + (void)memcpy( + virtio_mmio->regs->device_config.input_config.u + .bitmap, + virtio_mmio->input_data->ev_bits[entry].data, + size); + + // update the size + atomic_store_relaxed(&virtio_mmio->regs->device_config + .input_config.size, + size); + } + } else { + // invalid subsel set size 0 + atomic_store_relaxed( + &virtio_mmio->regs->device_config.input_config.size, + 0U); + } +} + +static void +virtio_input_config_u_write(const virtio_mmio_t *virtio_mmio, uint8_t sel, + uint8_t subsel) +{ + switch ((virtio_input_config_select_t)sel) { + case VIRTIO_INPUT_CONFIG_SELECT_CFG_ID_NAME: { + if (subsel != 0U) { // only subsel 0 is valid + atomic_store_relaxed(&virtio_mmio->regs->device_config + .input_config.size, + 0U); + } else { + size_t size = virtio_mmio->input_data->name_size; + for (index_t i = 0U; i < size; i++) { + atomic_store_relaxed( + &virtio_mmio->regs->device_config + .input_config.u.string[i], + virtio_mmio->input_data->name[i]); + } + // update the size + atomic_store_relaxed(&virtio_mmio->regs->device_config + .input_config.size, + (uint8_t)size); + } + break; + } + case VIRTIO_INPUT_CONFIG_SELECT_CFG_ID_SERIAL: { + if (subsel != 0U) { // only subsel 0 is valid + atomic_store_relaxed(&virtio_mmio->regs->device_config + .input_config.size, + 0U); + } else { + size_t size = virtio_mmio->input_data->serial_size; + for (index_t i = 0U; i < size; i++) { + atomic_store_relaxed( + &virtio_mmio->regs->device_config + .input_config.u.string[i], + virtio_mmio->input_data->serial[i]); + } + // update the size + atomic_store_relaxed(&virtio_mmio->regs->device_config + .input_config.size, + (uint8_t)size); + } + break; + } + case VIRTIO_INPUT_CONFIG_SELECT_CFG_ID_DEVIDS: { + if (subsel != 0U) { // only subsel 0 is valid + atomic_store_relaxed(&virtio_mmio->regs->device_config + .input_config.size, + 0U); + } else { + size_t size = sizeof(virtio_mmio->input_data->devids); + atomic_store_relaxed(&virtio_mmio->regs->device_config + .input_config.u.ids, + virtio_mmio->input_data->devids); + // update the size + atomic_store_relaxed(&virtio_mmio->regs->device_config + .input_config.size, + (uint8_t)size); + } + break; + } + case VIRTIO_INPUT_CONFIG_SELECT_CFG_PROP_BITS: { + if (subsel != 0U) { // only subsel 0 is valid + atomic_store_relaxed(&virtio_mmio->regs->device_config + .input_config.size, + 0U); + } else { + size_t size = + sizeof(virtio_mmio->input_data->prop_bits); + uint8_t *prop_bits_addr = + (uint8_t *)&virtio_mmio->input_data->prop_bits; + for (index_t i = 0U; i < size; i++) { + atomic_store_relaxed( + &virtio_mmio->regs->device_config + .input_config.u.bitmap[i], + *prop_bits_addr); + prop_bits_addr++; + } + // update the size + atomic_store_relaxed(&virtio_mmio->regs->device_config + .input_config.size, + (uint8_t)size); + } + break; + } + case VIRTIO_INPUT_CONFIG_SELECT_CFG_EV_BITS: { + sel_cfg_ev_bits_write(virtio_mmio, subsel); + break; + } + case VIRTIO_INPUT_CONFIG_SELECT_CFG_ABS_INFO: { + sel_cfg_abs_info_write(virtio_mmio, subsel); + break; + } + case VIRTIO_INPUT_CONFIG_SELECT_CFG_UNSET: + default: + // No data; set size to 0 + atomic_store_relaxed( + &virtio_mmio->regs->device_config.input_config.size, + 0U); + break; + } +} + +vcpu_trap_result_t +virtio_input_config_write(const virtio_mmio_t *virtio_mmio, size_t write_offset, + register_t reg_val, size_t access_size) +{ + vcpu_trap_result_t ret; + register_t val = reg_val; + size_t offset; + size_t access_size_remaining = access_size; + + if (write_offset >= (size_t)OFS_VIRTIO_MMIO_REGS_DEVICE_CONFIG) { + ret = VCPU_TRAP_RESULT_FAULT; + offset = write_offset - + (size_t)OFS_VIRTIO_MMIO_REGS_DEVICE_CONFIG; + while (access_size_remaining != 0U) { + switch (offset) { + case OFS_VIRTIO_INPUT_CONFIG_SELECT: { + atomic_store_relaxed( + &virtio_mmio->regs->device_config + .input_config.select, + (uint8_t)val); + + uint8_t subsel = atomic_load_relaxed( + &virtio_mmio->regs->device_config + .input_config.subsel); + + // write the appropriate data in u regs + virtio_input_config_u_write( + virtio_mmio, (uint8_t)val, subsel); + // update remianing size + access_size_remaining = + access_size_remaining - 1U; + offset += 1U; // update offset + val >>= 8; // update the value + ret = VCPU_TRAP_RESULT_EMULATED; + break; + } + case OFS_VIRTIO_INPUT_CONFIG_SUBSEL: { + atomic_store_relaxed( + &virtio_mmio->regs->device_config + .input_config.subsel, + (uint8_t)val); + + uint8_t sel = atomic_load_relaxed( + &virtio_mmio->regs->device_config + .input_config.select); + + // write the appropriate data in u regs + virtio_input_config_u_write(virtio_mmio, sel, + (uint8_t)val); + // update remianing size + access_size_remaining = + access_size_remaining - 1U; + offset += 1U; // update offset + val >>= 8; // update the value + ret = VCPU_TRAP_RESULT_EMULATED; + break; + } + default: + (void)access_size; + // we will not handle offset after subsel + access_size_remaining = 0U; + ret = VCPU_TRAP_RESULT_FAULT; + break; + } + } + } else { + ret = VCPU_TRAP_RESULT_FAULT; + } + + return ret; +} diff --git a/hyp/vm/virtio_input/virtio_input.ev b/hyp/vm/virtio_input/virtio_input.ev new file mode 100644 index 0000000..02e027a --- /dev/null +++ b/hyp/vm/virtio_input/virtio_input.ev @@ -0,0 +1,17 @@ +// © 2023 Qualcomm Innovation Center, Inc. All rights reserved. +// +// SPDX-License-Identifier: BSD-3-Clause + +module virtio_input + +subscribe virtio_mmio_valid_device_type[VIRTIO_DEVICE_TYPE_INPUT] + constant true + +subscribe virtio_mmio_device_config_write[VIRTIO_DEVICE_TYPE_INPUT] + handler virtio_input_config_write(virtio_mmio, offset, value, access_size) + +subscribe virtio_mmio_device_config_activate[VIRTIO_DEVICE_TYPE_INPUT] + handler virtio_input_handle_object_activate(virtio_mmio) + +subscribe virtio_mmio_device_config_cleanup[VIRTIO_DEVICE_TYPE_INPUT] + handler virtio_input_handle_object_cleanup(virtio_mmio) diff --git a/hyp/vm/virtio_input/virtio_input.hvc b/hyp/vm/virtio_input/virtio_input.hvc new file mode 100644 index 0000000..e8d7e2e --- /dev/null +++ b/hyp/vm/virtio_input/virtio_input.hvc @@ -0,0 +1,25 @@ +// © 2023 Qualcomm Innovation Center, Inc. All rights reserved. +// +// SPDX-License-Identifier: BSD-3-Clause + +define virtio_input_configure hypercall { + call_num 0x5e; + virtio_mmio_cap input type cap_id_t; + devids input uint64; + prop_bits input uint32; + num_evtypes input uint32; + num_absaxes input uint32; + res0 input uregister; + error output enumeration error; +}; + +define virtio_input_set_data hypercall { + call_num 0x5f; + virtio_mmio_cap input type cap_id_t; + sel input uint32; + subsel input uint32; + size input uint32; + data input type vmaddr_t; + res0 input uregister; + error output enumeration error; +}; diff --git a/hyp/vm/virtio_input/virtio_input.tc b/hyp/vm/virtio_input/virtio_input.tc new file mode 100644 index 0000000..05c1641 --- /dev/null +++ b/hyp/vm/virtio_input/virtio_input.tc @@ -0,0 +1,72 @@ +// © 2023 Qualcomm Innovation Center, Inc. All rights reserved. +// +// SPDX-License-Identifier: BSD-3-Clause + +define VIRTIO_MMIO_INPUT_REG_CONFIG_BYTES constant type count_t = (VIRTIO_MMIO_REG_CONFIG_BYTES - 0x88); +define VIRTIO_INPUT_MAX_ABS_AXES constant type count_t = 64; +define VIRTIO_INPUT_MAX_EV_TYPES constant type count_t = 32; +define VIRTIO_INPUT_MAX_STRING_SIZE constant type count_t = 128; +define VIRTIO_INPUT_MAX_BITMAP_SIZE constant type count_t = 128; +define VIRTIO_INPUT_MAX_ABSINFO_SIZE constant type count_t = 20; +define VIRTIO_INPUT_SUBSEL_INVALID constant type count_t = 255; + +extend virtio_config_space union { + input_config structure virtio_input_config; +}; + +extend virtio_device_type enumeration { + INPUT = 18; +}; + +extend virtio_mmio object { + input_data pointer structure virtio_input_data; +}; + +define virtio_input_absinfo structure { + subsel uint8; + data array(VIRTIO_INPUT_MAX_ABSINFO_SIZE) uint8; +}; + +define virtio_input_ev_bits structure { + subsel uint8; + size uint8; + data array(VIRTIO_INPUT_MAX_BITMAP_SIZE) uint8; +}; + +define virtio_input_data structure { + name array(VIRTIO_INPUT_MAX_STRING_SIZE) uint8; + name_size size; + serial array(VIRTIO_INPUT_MAX_STRING_SIZE) uint8; + serial_size size; + prop_bits uint32; + devids uint64; + absinfo pointer structure virtio_input_absinfo; + absinfo_count type count_t; + ev_bits pointer structure virtio_input_ev_bits; + ev_bits_count type count_t; +}; + +define virtio_input_config structure { + select @ 0x00 uint8(atomic); + subsel @ 0x01 uint8(atomic); + size @ 0x02 uint8(atomic); + u @ 0x08 union virtio_input_banked_regs; + config @ 0x88 array(VIRTIO_MMIO_INPUT_REG_CONFIG_BYTES) uint8(atomic); +}; + +define virtio_input_banked_regs union { + string array(VIRTIO_INPUT_MAX_STRING_SIZE) uint8(atomic); + bitmap array(VIRTIO_INPUT_MAX_BITMAP_SIZE) uint8(atomic); + abs array(VIRTIO_INPUT_MAX_ABSINFO_SIZE) uint8(atomic); + ids uint64(atomic); +}; + +define virtio_input_config_select enumeration(explicit) { + CFG_UNSET = 0x00; + CFG_ID_NAME = 0x01; + CFG_ID_SERIAL = 0x02; + CFG_ID_DEVIDS = 0x03; + CFG_PROP_BITS = 0x10; + CFG_EV_BITS = 0x11; + CFG_ABS_INFO = 0x12; +}; diff --git a/hyp/vm/virtio_mmio/include/virtio_mmio.h b/hyp/vm/virtio_mmio/include/virtio_mmio.h index 2fe12cc..05b3132 100644 --- a/hyp/vm/virtio_mmio/include/virtio_mmio.h +++ b/hyp/vm/virtio_mmio/include/virtio_mmio.h @@ -4,7 +4,8 @@ error_t virtio_mmio_configure(virtio_mmio_t *virtio_mmio, memextent_t *memextent, - count_t vqs_num); + count_t vqs_num, virtio_option_flags_t flags, + virtio_device_type_t device_type); error_t virtio_mmio_backend_bind_virq(virtio_mmio_t *virtio_mmio, vic_t *vic, diff --git a/hyp/vm/virtio_mmio/src/hypercalls.c b/hyp/vm/virtio_mmio/src/hypercalls.c index 228c1a8..b14472e 100644 --- a/hyp/vm/virtio_mmio/src/hypercalls.c +++ b/hyp/vm/virtio_mmio/src/hypercalls.c @@ -3,6 +3,7 @@ // SPDX-License-Identifier: BSD-3-Clause #include +#include #include #include @@ -12,6 +13,7 @@ #include #include #include +#include #include #include #include @@ -22,7 +24,9 @@ error_t hypercall_virtio_mmio_configure(cap_id_t virtio_mmio_cap, - cap_id_t memextent_cap, count_t vqs_num) + cap_id_t memextent_cap, count_t vqs_num, + virtio_option_flags_t flags, + virtio_device_type_t device_type) { error_t err; cspace_t *cspace = cspace_get_self(); @@ -55,7 +59,8 @@ hypercall_virtio_mmio_configure(cap_id_t virtio_mmio_cap, if (atomic_load_relaxed(&virtio_mmio->header.state) == OBJECT_STATE_INIT) { - err = virtio_mmio_configure(virtio_mmio, memextent, vqs_num); + err = virtio_mmio_configure(virtio_mmio, memextent, vqs_num, + flags, device_type); } else { err = ERROR_OBJECT_STATE; } @@ -145,10 +150,20 @@ hypercall_virtio_mmio_backend_assert_virq(cap_id_t virtio_mmio_cap, if (virtio_mmio_status_reg_get_device_needs_reset(&status)) { err = ERROR_DENIED; } else { +#if defined(PLATFORM_NO_DEVICE_ATTR_ATOMIC_UPDATE) && \ + PLATFORM_NO_DEVICE_ATTR_ATOMIC_UPDATE + spinlock_acquire(&virtio_mmio->lock); + uint32_t new_irq_status = atomic_load_relaxed( + &virtio_mmio->regs->interrupt_status); + new_irq_status |= interrupt_status; + atomic_store_relaxed(&virtio_mmio->regs->interrupt_status, + new_irq_status); + spinlock_release(&virtio_mmio->lock); +#else (void)atomic_fetch_or_explicit( &virtio_mmio->regs->interrupt_status, interrupt_status, memory_order_relaxed); - +#endif atomic_thread_fence(memory_order_release); // Assert frontend's IRQ (void)virq_assert(&virtio_mmio->backend_source, false); @@ -345,15 +360,15 @@ hypercall_virtio_mmio_backend_get_queue_info(cap_id_t virtio_mmio_cap, ret.queue_desc = queue_regs->desc_high; ret.queue_desc = ret.queue_desc << 32; - ret.queue_desc |= queue_regs->desc_low; + ret.queue_desc |= (register_t)queue_regs->desc_low; ret.queue_drv = queue_regs->drv_high; ret.queue_drv = ret.queue_drv << 32; - ret.queue_drv |= queue_regs->drv_low; + ret.queue_drv |= (register_t)queue_regs->drv_low; ret.queue_dev = queue_regs->dev_high; ret.queue_dev = ret.queue_dev << 32; - ret.queue_dev |= queue_regs->dev_low; + ret.queue_dev |= (register_t)queue_regs->dev_low; ret.error = OK; diff --git a/hyp/vm/virtio_mmio/src/vdevice.c b/hyp/vm/virtio_mmio/src/vdevice.c index 235d9be..dac0c71 100644 --- a/hyp/vm/virtio_mmio/src/vdevice.c +++ b/hyp/vm/virtio_mmio/src/vdevice.c @@ -15,6 +15,8 @@ #include #include +#include + #include #include "event_handlers.h" @@ -33,9 +35,10 @@ virtio_mmio_access_allowed(size_t size, size_t offset) ret = true; } else if (size == sizeof(uint8_t)) { // Byte accesses only allowed for config - ret = ((offset >= OFS_VIRTIO_MMIO_REGS_CONFIG(0U)) && - (offset <= OFS_VIRTIO_MMIO_REGS_CONFIG(( - VIRTIO_MMIO_REG_CONFIG_BYTES - 1U)))); + ret = ((offset >= (size_t)OFS_VIRTIO_MMIO_REGS_DEVICE_CONFIG) && + (offset <= + ((size_t)((size_t)OFS_VIRTIO_MMIO_REGS_DEVICE_CONFIG + + (VIRTIO_MMIO_REG_CONFIG_BYTES - 1U))))); } else { // Invalid access size ret = false; @@ -44,25 +47,29 @@ virtio_mmio_access_allowed(size_t size, size_t offset) return ret; } -static bool -virtio_mmio_default_write(const virtio_mmio_t *virtio_mmio, size_t offset, - size_t access_size, uint32_t val) +vcpu_trap_result_t +virtio_mmio_default_write(const virtio_mmio_t *virtio_mmio, size_t write_offset, + size_t access_size, register_t val) { - bool ret = true; - - if ((offset >= OFS_VIRTIO_MMIO_REGS_CONFIG(0U)) && - (offset <= OFS_VIRTIO_MMIO_REGS_CONFIG( - (VIRTIO_MMIO_REG_CONFIG_BYTES - 1U)))) { - index_t n = (index_t)(offset - OFS_VIRTIO_MMIO_REGS_CONFIG(0U)); + vcpu_trap_result_t ret = VCPU_TRAP_RESULT_FAULT; + if ((write_offset >= (size_t)OFS_VIRTIO_MMIO_REGS_DEVICE_CONFIG) && + (write_offset <= + (size_t)((size_t)OFS_VIRTIO_MMIO_REGS_DEVICE_CONFIG + + VIRTIO_MMIO_REG_CONFIG_BYTES - 1U))) { + index_t n = + (index_t)(write_offset - + (size_t)OFS_VIRTIO_MMIO_REGS_DEVICE_CONFIG); // Loop through every byte - uint32_t shifted_val = val; + register_t shifted_val = val; for (index_t i = 0U; i < access_size; i++) { - atomic_store_relaxed(&virtio_mmio->regs->config[n + i], - (uint8_t)shifted_val); + atomic_store_relaxed( + &virtio_mmio->regs->device_config.raw[n + i], + (uint8_t)shifted_val); shifted_val >>= 8U; } + ret = VCPU_TRAP_RESULT_EMULATED; } else { - ret = false; + ret = VCPU_TRAP_RESULT_FAULT; } return ret; @@ -221,8 +228,19 @@ virtio_mmio_write_queue_notify(virtio_mmio_t *virtio_mmio, uint32_t val) static void virtio_mmio_write_interrupt_ack(virtio_mmio_t *virtio_mmio, uint32_t val) { +#if defined(PLATFORM_NO_DEVICE_ATTR_ATOMIC_UPDATE) && \ + PLATFORM_NO_DEVICE_ATTR_ATOMIC_UPDATE + spinlock_acquire(&virtio_mmio->lock); + uint32_t interrupt_status = + atomic_load_relaxed(&virtio_mmio->regs->interrupt_status); + interrupt_status &= ~val; + atomic_store_relaxed(&virtio_mmio->regs->interrupt_status, + interrupt_status); + spinlock_release(&virtio_mmio->lock); +#else (void)atomic_fetch_and_explicit(&virtio_mmio->regs->interrupt_status, ~val, memory_order_relaxed); +#endif } static bool @@ -306,8 +324,10 @@ virtio_mmio_vdevice_write(virtio_mmio_t *virtio_mmio, size_t offset, break; default: - ret = virtio_mmio_default_write(virtio_mmio, offset, - access_size, val); + ret = (trigger_virtio_mmio_device_config_write_event( + virtio_mmio->device_type, + (const virtio_mmio_t *)virtio_mmio, offset, val, + access_size) == VCPU_TRAP_RESULT_EMULATED); break; } diff --git a/hyp/vm/virtio_mmio/src/virtio_mmio.c b/hyp/vm/virtio_mmio/src/virtio_mmio.c index 4292a30..d2593c8 100644 --- a/hyp/vm/virtio_mmio/src/virtio_mmio.c +++ b/hyp/vm/virtio_mmio/src/virtio_mmio.c @@ -22,6 +22,8 @@ #include #include +#include + #include #include @@ -40,7 +42,8 @@ virtio_mmio_handle_object_create_virtio_mmio(virtio_mmio_create_t create) error_t virtio_mmio_configure(virtio_mmio_t *virtio_mmio, memextent_t *memextent, - count_t vqs_num) + count_t vqs_num, virtio_option_flags_t flags, + virtio_device_type_t device_type) { error_t ret = OK; @@ -55,6 +58,17 @@ virtio_mmio_configure(virtio_mmio_t *virtio_mmio, memextent_t *memextent, goto out; } + if (virtio_option_flags_get_valid_device_type(&flags)) { + if (trigger_virtio_mmio_valid_device_type_event(device_type)) { + virtio_mmio->device_type = device_type; + } else { + ret = ERROR_ARGUMENT_INVALID; + goto out; + } + } else { + virtio_mmio->device_type = VIRTIO_DEVICE_TYPE_INVALID; + } + if (virtio_mmio->me != NULL) { object_put_memextent(virtio_mmio->me); } @@ -103,6 +117,12 @@ virtio_mmio_handle_object_activate_virtio_mmio(virtio_mmio_t *virtio_mmio) virtio_mmio->banked_queue_regs = (virtio_mmio_banked_queue_registers_t *)alloc_ret.r; + ret = trigger_virtio_mmio_device_config_activate_event( + virtio_mmio->device_type, virtio_mmio); + if (ret != OK) { + goto out; + } + // Allocate virtio config page size_t size = virtio_mmio->me->size; if (size < sizeof(*virtio_mmio->regs)) { @@ -182,6 +202,9 @@ virtio_mmio_handle_object_cleanup_virtio_mmio(virtio_mmio_t *virtio_mmio) virtio_mmio->vqs_num = 0U; } + (void)trigger_virtio_mmio_device_config_cleanup_event( + virtio_mmio->device_type, virtio_mmio); + if (virtio_mmio->me != NULL) { object_put_memextent(virtio_mmio->me); virtio_mmio->me = NULL; @@ -269,3 +292,17 @@ virtio_mmio_backend_handle_virq_check_pending(virq_source_t *source) return (atomic_load_relaxed(&virtio_mmio->regs->interrupt_status) != 0U); } + +error_t +virtio_default_handle_object_activate(virtio_mmio_t *virtio_mmio) +{ + (void)virtio_mmio; + return OK; +} + +error_t +virtio_default_handle_object_cleanup(virtio_mmio_t *virtio_mmio) +{ + (void)virtio_mmio; + return OK; +} diff --git a/hyp/vm/virtio_mmio/virtio_mmio.ev b/hyp/vm/virtio_mmio/virtio_mmio.ev index 0233078..02ac3ab 100644 --- a/hyp/vm/virtio_mmio/virtio_mmio.ev +++ b/hyp/vm/virtio_mmio/virtio_mmio.ev @@ -2,6 +2,30 @@ // // SPDX-License-Identifier: BSD-3-Clause +interface virtio_mmio + +selector_event virtio_mmio_valid_device_type + selector type_: virtio_device_type_t + return: bool = false + +selector_event virtio_mmio_device_config_write + selector type_: virtio_device_type_t + param virtio_mmio: const virtio_mmio_t * + param offset: size_t + param value: register_t + param access_size: size_t + return: vcpu_trap_result_t = VCPU_TRAP_RESULT_UNHANDLED + +selector_event virtio_mmio_device_config_activate + selector type_: virtio_device_type_t + param virtio_mmio: virtio_mmio_t * + return: error_t = OK + +selector_event virtio_mmio_device_config_cleanup + selector type_: virtio_device_type_t + param virtio_mmio: virtio_mmio_t * + return: error_t = OK + module virtio_mmio subscribe object_create_virtio_mmio @@ -20,3 +44,12 @@ subscribe virq_check_pending[VIRQ_TRIGGER_VIRTIO_MMIO_BACKEND] handler virtio_mmio_backend_handle_virq_check_pending(source) subscribe vdevice_access[VDEVICE_TYPE_VIRTIO_MMIO](vdevice, offset, access_size, value, is_write) + +subscribe virtio_mmio_device_config_write[VIRTIO_DEVICE_TYPE_INVALID] + handler virtio_mmio_default_write(virtio_mmio, offset, value, access_size) + +subscribe virtio_mmio_device_config_activate[VIRTIO_DEVICE_TYPE_INVALID] + handler virtio_default_handle_object_activate(virtio_mmio) + +subscribe virtio_mmio_device_config_cleanup[VIRTIO_DEVICE_TYPE_INVALID] + handler virtio_default_handle_object_cleanup(virtio_mmio) diff --git a/hyp/vm/virtio_mmio/virtio_mmio.tc b/hyp/vm/virtio_mmio/virtio_mmio.tc index 26fadfb..cf1ceca 100644 --- a/hyp/vm/virtio_mmio/virtio_mmio.tc +++ b/hyp/vm/virtio_mmio/virtio_mmio.tc @@ -30,10 +30,12 @@ define virtio_mmio_status_reg bitfield<32> { }; extend virtio_mmio object { + flags bitfield virtio_option_flags; me pointer object memextent; backend_source structure virq_source(contained); frontend_source structure virq_source(contained); frontend_device structure vdevice(contained); + device_type enumeration virtio_device_type; regs pointer structure virtio_mmio_regs; size size; vqs_num type count_t; @@ -84,7 +86,7 @@ define virtio_mmio_regs structure(aligned(PGTABLE_HYP_PAGE_SIZE)) { queue_dev_low @ 0x0a0 uint32(atomic); queue_dev_high @ 0x0a4 uint32(atomic); config_gen @ 0x0fc uint32(atomic); - config @ 0x100 array(VIRTIO_MMIO_REG_CONFIG_BYTES) uint8(atomic); + device_config @ 0x100 union virtio_config_space; }; extend virq_trigger enumeration { diff --git a/hyp/vm/vrtc_pl031/src/hypercalls.c b/hyp/vm/vrtc_pl031/src/hypercalls.c index 9a8edbb..fdabfbb 100644 --- a/hyp/vm/vrtc_pl031/src/hypercalls.c +++ b/hyp/vm/vrtc_pl031/src/hypercalls.c @@ -48,6 +48,7 @@ hypercall_vrtc_configure(cap_id_t vrtc_cap, vmaddr_t ipa) } spinlock_release(&vrtc->header.lock); + object_put_vrtc(vrtc); out: return err; } @@ -70,7 +71,7 @@ hypercall_vrtc_set_time_base(cap_id_t vrtc_cap, nanoseconds_t time_base, if (vrtc->time_base != 0U) { // The time base has already been set once err = ERROR_BUSY; - goto out; + goto out_put_vrtc; } preempt_disable(); @@ -92,6 +93,8 @@ hypercall_vrtc_set_time_base(cap_id_t vrtc_cap, nanoseconds_t time_base, out_preempt: preempt_enable(); +out_put_vrtc: + object_put_vrtc(vrtc); out: return err; } diff --git a/hyp/vm/vtbre/build.conf b/hyp/vm/vtrbe/build.conf similarity index 68% rename from hyp/vm/vtbre/build.conf rename to hyp/vm/vtrbe/build.conf index fd20f96..1ef8322 100644 --- a/hyp/vm/vtbre/build.conf +++ b/hyp/vm/vtrbe/build.conf @@ -2,7 +2,7 @@ # # SPDX-License-Identifier: BSD-3-Clause -base_module hyp/platform/tbre +base_module hyp/platform/trbe base_module hyp/misc/vet -events vtbre.ev -source vtbre.c +events vtrbe.ev +source vtrbe.c diff --git a/hyp/vm/vtbre/src/vtbre.c b/hyp/vm/vtrbe/src/vtrbe.c similarity index 86% rename from hyp/vm/vtbre/src/vtbre.c rename to hyp/vm/vtrbe/src/vtrbe.c index a9b71d7..777bf36 100644 --- a/hyp/vm/vtbre/src/vtbre.c +++ b/hyp/vm/vtrbe/src/vtrbe.c @@ -17,10 +17,10 @@ #include #include "event_handlers.h" -#include "tbre.h" +#include "trbe.h" void -vtbre_handle_boot_cpu_cold_init(void) +vtrbe_handle_boot_cpu_cold_init(void) { ID_AA64DFR0_EL1_t id_aa64dfr0 = register_ID_AA64DFR0_EL1_read(); // NOTE: ID_AA64DFR0.TraceBuffer just indicates if trace buffer is @@ -29,7 +29,7 @@ vtbre_handle_boot_cpu_cold_init(void) } error_t -vtbre_handle_object_create_thread(thread_create_t thread_create) +vtrbe_handle_object_create_thread(thread_create_t thread_create) { thread_t *thread = thread_create.thread; @@ -44,7 +44,7 @@ vet_update_trace_buffer_status(thread_t *self) { assert(self != NULL); -#if !DISABLE_TBRE +#if !DISABLE_TRBE // check/set by reading TBRLIMITR.EN == 1 TRBLIMITR_EL1_t trb_limitr = register_TRBLIMITR_EL1_read_ordered(&vet_ordering); @@ -72,12 +72,12 @@ vet_disable_buffer(void) } static void -vtbre_prohibit_registers_access(thread_t *self, bool prohibit) +vtrbe_prohibit_registers_access(thread_t *self, bool prohibit) { assert(self != NULL); - // MDCR_EL2.E2TB == 0b11 to enable access to TBRE - // MDCR_EL2.E2TB == 0b10 to disable access to TBRE + // MDCR_EL2.E2TB == 0b11 to enable access to TRBE + // MDCR_EL2.E2TB == 0b10 to disable access to TRBE uint8_t expect = prohibit ? 0x2U : 0x3U; MDCR_EL2_set_E2TB(&self->vcpu_regs_el2.mdcr_el2, expect); @@ -89,13 +89,13 @@ void vet_save_buffer_thread_context(thread_t *self) { (void)self; - vtbre_prohibit_registers_access(self, true); + vtrbe_prohibit_registers_access(self, true); } void vet_restore_buffer_thread_context(thread_t *self) { - vtbre_prohibit_registers_access(self, false); + vtrbe_prohibit_registers_access(self, false); } void @@ -119,7 +119,7 @@ vet_save_buffer_power_context(void) asm_context_sync_ordered(&vet_ordering); - tbre_save_context_percpu(cpulocal_get_index()); + trbe_save_context_percpu(cpulocal_get_index()); // Disable E2TB access MDCR_EL2_set_E2TB(&mdcr_el2, 2); @@ -138,7 +138,7 @@ vet_restore_buffer_power_context(void) asm_context_sync_ordered(&vet_ordering); - tbre_restore_context_percpu(cpulocal_get_index()); + trbe_restore_context_percpu(cpulocal_get_index()); // Disable E2TB access MDCR_EL2_set_E2TB(&mdcr_el2, 2); @@ -146,11 +146,11 @@ vet_restore_buffer_power_context(void) } vcpu_trap_result_t -vtbre_handle_vcpu_trap_sysreg(ESR_EL2_ISS_MSR_MRS_t iss) +vtrbe_handle_vcpu_trap_sysreg(ESR_EL2_ISS_MSR_MRS_t iss) { vcpu_trap_result_t ret; -#if DISABLE_TBRE +#if DISABLE_TRBE (void)iss; ret = VCPU_TRAP_RESULT_UNHANDLED; @@ -161,7 +161,7 @@ vtbre_handle_vcpu_trap_sysreg(ESR_EL2_ISS_MSR_MRS_t iss) (ESR_EL2_ISS_MSR_MRS_get_Op1(&iss) != 0U) || (ESR_EL2_ISS_MSR_MRS_get_CRn(&iss) != 9U) || (ESR_EL2_ISS_MSR_MRS_get_CRm(&iss) != 11U))) { - // Not a TBRE register access. + // Not a TRBE register access. ret = VCPU_TRAP_RESULT_UNHANDLED; } else if (!vcpu_option_flags_get_trace_allowed( &thread->vcpu_options)) { @@ -173,7 +173,7 @@ vtbre_handle_vcpu_trap_sysreg(ESR_EL2_ISS_MSR_MRS_t iss) current->vet_trace_buffer_enabled = true; // only enable the register access - vtbre_prohibit_registers_access(false); + vtrbe_prohibit_registers_access(false); ret = VCPU_TRAP_RESULT_RETRY; } else { diff --git a/hyp/vm/vtbre/vtbre.ev b/hyp/vm/vtrbe/vtrbe.ev similarity index 81% rename from hyp/vm/vtbre/vtbre.ev rename to hyp/vm/vtrbe/vtrbe.ev index 745d0f1..1c347d8 100644 --- a/hyp/vm/vtbre/vtbre.ev +++ b/hyp/vm/vtrbe/vtrbe.ev @@ -2,7 +2,7 @@ // // SPDX-License-Identifier: BSD-3-Clause -module vtbre +module vtrbe subscribe boot_cpu_cold_init() @@ -12,7 +12,7 @@ subscribe object_create_thread priority -10 subscribe vcpu_trap_sysreg_read - handler vtbre_handle_vcpu_trap_sysreg + handler vtrbe_handle_vcpu_trap_sysreg subscribe vcpu_trap_sysreg_write - handler vtbre_handle_vcpu_trap_sysreg + handler vtrbe_handle_vcpu_trap_sysreg diff --git a/tools/build/__main__.py b/tools/build/__main__.py index f56273e..af2c359 100644 --- a/tools/build/__main__.py +++ b/tools/build/__main__.py @@ -400,9 +400,12 @@ def template_match(template_engine, d): try: llvm_root = graph.get_env('LLVM') except KeyError: - logger.error( - "Please set $LLVM to the root of the prebuilt LLVM") - sys.exit(1) + try: + llvm_root = graph.get_env('QCOM_LLVM') + except KeyError: + logger.error( + "Please set $QCOM_LLVM or $LLVM to the root of the prebuilt LLVM") + sys.exit(1) # Use a QC prebuilt LLVM graph.add_env('CLANG', os.path.join(llvm_root, 'bin', 'clang')) diff --git a/tools/cpptest/Checkers_Man_All_Req.properties b/tools/cpptest/Checkers_Man_All_Req.properties index b16bba1..212ae2e 100644 --- a/tools/cpptest/Checkers_Man_All_Req.properties +++ b/tools/cpptest/Checkers_Man_All_Req.properties @@ -50,6 +50,9 @@ MISRAC2012-DIR_4_1-g=true MISRAC2012-DIR_4_1-h=true MISRAC2012-DIR_4_1-i=true MISRAC2012-DIR_4_1-j=true +MISRAC2012-DIR_4_1-k=true +MISRAC2012-DIR_4_1-l=true +MISRAC2012-DIR_4_1-m=true MISRAC2012-DIR_4_2-a=true MISRAC2012-DIR_4_3-a=true MISRAC2012-DIR_4_4-a=true @@ -61,6 +64,11 @@ MISRAC2012-DIR_4_7-a=true MISRAC2012-DIR_4_7-b=true MISRAC2012-DIR_4_8-a=false MISRAC2012-DIR_4_9-a=false +MISRAC2012-DIR_5_1-a=true +MISRAC2012-DIR_5_1-b=true +MISRAC2012-DIR_5_1-c=true +MISRAC2012-DIR_5_2-a=true +MISRAC2012-DIR_5_3-a=true MISRAC2012-RULE_10_1-a=true MISRAC2012-RULE_10_1-b=true MISRAC2012-RULE_10_1-c=true @@ -68,6 +76,7 @@ MISRAC2012-RULE_10_1-d=true MISRAC2012-RULE_10_1-e=true MISRAC2012-RULE_10_1-f=true MISRAC2012-RULE_10_1-g=true +MISRAC2012-RULE_10_1-h=true MISRAC2012-RULE_10_2-a=true MISRAC2012-RULE_10_3-a=true MISRAC2012-RULE_10_3-b=true @@ -80,6 +89,7 @@ MISRAC2012-RULE_10_6-a=true MISRAC2012-RULE_10_7-a=true MISRAC2012-RULE_10_7-b=true MISRAC2012-RULE_10_8-a=true +MISRAC2012-RULE_11_10-a=true MISRAC2012-RULE_11_1-a=true MISRAC2012-RULE_11_1-b=true MISRAC2012-RULE_11_2-a=true @@ -103,6 +113,7 @@ MISRAC2012-RULE_12_3-a=false MISRAC2012-RULE_12_4-a=true MISRAC2012-RULE_12_4-b=true MISRAC2012-RULE_12_5-a=true +MISRAC2012-RULE_12_6-a=true MISRAC2012-RULE_13_1-a=true MISRAC2012-RULE_13_2-a=true MISRAC2012-RULE_13_2-b=true @@ -111,6 +122,7 @@ MISRAC2012-RULE_13_2-d=true MISRAC2012-RULE_13_2-e=true MISRAC2012-RULE_13_2-f=true MISRAC2012-RULE_13_2-g=true +MISRAC2012-RULE_13_2-h=true MISRAC2012-RULE_13_3-a=true MISRAC2012-RULE_13_4-a=true MISRAC2012-RULE_13_5-a=true @@ -201,6 +213,10 @@ MISRAC2012-RULE_16_5-a=true MISRAC2012-RULE_16_6-a=true MISRAC2012-RULE_16_7-a=true MISRAC2012-RULE_16_7-b=true +MISRAC2012-RULE_17_10-a=true +MISRAC2012-RULE_17_11-a=true +MISRAC2012-RULE_17_12-a=true +MISRAC2012-RULE_17_13-a=true MISRAC2012-RULE_17_1-a=true MISRAC2012-RULE_17_1-b=true MISRAC2012-RULE_17_2-a=true @@ -212,6 +228,7 @@ MISRAC2012-RULE_17_7-a=true MISRAC2012-RULE_17_7-b=true MISRAC2012-RULE_17_8-a=true MISRAC2012-RULE_17_9-a=true +MISRAC2012-RULE_18_10-a=true MISRAC2012-RULE_18_1-a=true MISRAC2012-RULE_18_1-b=true MISRAC2012-RULE_18_1-c=true @@ -221,6 +238,7 @@ MISRAC2012-RULE_18_4-a=false MISRAC2012-RULE_18_5-a=true MISRAC2012-RULE_18_6-a=true MISRAC2012-RULE_18_6-b=true +MISRAC2012-RULE_18_6-c=true MISRAC2012-RULE_18_7-a=true MISRAC2012-RULE_18_8-a=true MISRAC2012-RULE_18_9-a=true @@ -266,6 +284,8 @@ MISRAC2012-RULE_21_21-a=true MISRAC2012-RULE_21_22-a=true MISRAC2012-RULE_21_23-a=true MISRAC2012-RULE_21_24-a=true +MISRAC2012-RULE_21_25-a=true +MISRAC2012-RULE_21_26-a=true MISRAC2012-RULE_21_2-a=true MISRAC2012-RULE_21_2-b=false MISRAC2012-RULE_21_2-c=true @@ -288,6 +308,16 @@ MISRAC2012-RULE_2_1-g=true MISRAC2012-RULE-22_10-a-reportOnMissingErrnoCheck=false MISRAC2012-RULE_22_10-a-reportWhenErrnoIsNotZero=false MISRAC2012-RULE_22_10-a=true +MISRAC2012-RULE_22_11-a=true +MISRAC2012-RULE_22_12-a=true +MISRAC2012-RULE_22_13-a=true +MISRAC2012-RULE_22_14-a=true +MISRAC2012-RULE_22_14-b=true +MISRAC2012-RULE_22_15-a=true +MISRAC2012-RULE_22_16-a=true +MISRAC2012-RULE_22_17-a=true +MISRAC2012-RULE_22_18-a=true +MISRAC2012-RULE_22_19-a=true MISRAC2012-RULE_22_1-a-fieldsStoreResources=false MISRAC2012-RULE_22_1-a-nonMemberMethodsStoreResource=false MISRAC2012-RULE_22_1-a-patternName=^malloc|calloc|realloc|fopen$ @@ -295,6 +325,7 @@ MISRAC2012-RULE_22_1-a-patternNameMethodsStore=true MISRAC2012-RULE_22_1-a-reportUnvalidatedViolations=false MISRAC2012-RULE_22_1-a-storeByTPMethods=false MISRAC2012-RULE_22_1-a=true +MISRAC2012-RULE_22_20-a=true MISRAC2012-RULE_22_2-a=true MISRAC2012-RULE_22_2-b=true MISRAC2012-RULE_22_3-a=true @@ -309,6 +340,16 @@ MISRAC2012-RULE_22_8-a=true MISRAC2012-RULE_22_9-a-reportOnUnnecessaryErrnoCheck=false MISRAC2012-RULE_22_9-a-reportWhenErrnoIsNotZero=false MISRAC2012-RULE_22_9-a=true +MISRAC2012-RULE_23_1-a=true +MISRAC2012-RULE_23_1-b=true +MISRAC2012-RULE_23_2-a=true +MISRAC2012-RULE_23_3-a=true +MISRAC2012-RULE_23_4-a=true +MISRAC2012-RULE_23_5-a=true +MISRAC2012-RULE_23_6-a=true +MISRAC2012-RULE_23_6-b=true +MISRAC2012-RULE_23_7-a=true +MISRAC2012-RULE_23_8-a=true MISRAC2012-RULE_2_2-a=true MISRAC2012-RULE_2_3-a=false MISRAC2012-RULE_2_3-b=false @@ -317,6 +358,9 @@ MISRAC2012-RULE_2_4-b=false MISRAC2012-RULE_2_5-a=false MISRAC2012-RULE_2_6-a=false MISRAC2012-RULE_2_7-a=false +MISRAC2012-RULE_2_8-a=true +MISRAC2012-RULE_2_8-b=true +MISRAC2012-RULE_2_8-c=true MISRAC2012-RULE_3_1-a=true MISRAC2012-RULE_3_1-b=true MISRAC2012-RULE_3_1-c=true @@ -341,14 +385,18 @@ MISRAC2012-RULE_5_6-b=true MISRAC2012-RULE_5_7-a=true MISRAC2012-RULE_5_7-b=true MISRAC2012-RULE_5_8-a=true +MISRAC2012-RULE_5_8-b=true MISRAC2012-RULE_5_9-a=true MISRAC2012-RULE_5_9-b=true MISRAC2012-RULE_6_1-a=true MISRAC2012-RULE_6_2-a=true +MISRAC2012-RULE_6_3-a=true MISRAC2012-RULE_7_1-a=true MISRAC2012-RULE_7_2-a=true MISRAC2012-RULE_7_3-a=true MISRAC2012-RULE_7_4-a=true +MISRAC2012-RULE_7_5-a=true +MISRAC2012-RULE_7_6-a=true MISRAC2012-RULE_8_10-a=true MISRAC2012-RULE_8_11-a=true MISRAC2012-RULE_8_12-a=true @@ -378,3 +426,5 @@ MISRAC2012-RULE_9_2-a=true MISRAC2012-RULE_9_3-a=true MISRAC2012-RULE_9_4-a=true MISRAC2012-RULE_9_5-a=true +MISRAC2012-RULE_9_6-a=true +MISRAC2012-RULE_9_7-a=true diff --git a/tools/cpptest/misra_xml_to_json.py b/tools/cpptest/misra_xml_to_json.py index dbbf0f0..9ab10e6 100755 --- a/tools/cpptest/misra_xml_to_json.py +++ b/tools/cpptest/misra_xml_to_json.py @@ -42,11 +42,13 @@ } deviation_map = { + # Deviation because the behaviour proscribed by the rule is exactly the + # intended behaviour of assert(): it prints the unexpanded expression. 'MISRAC2012-RULE_20_12-a': [ (None, re.compile(r"parameter of potential macro 'assert'")), ], # False positives due to __c11 builtins taking int memory order arguments - # instead of enum + # instead of enum in the Clang implementation. 'MISRAC2012-RULE_10_3-b': [ (None, re.compile(r"number '2'.*'essentially Enum'.*" r"'__c11_atomic_load'.*'essentially signed'")), @@ -99,6 +101,29 @@ 'MISRAC2012-RULE_16_3-b': [ (re.compile(r'^build/.*/objects/.*\.c$'), None), ], + # False positive due to a builtin sizeof variant that does not evaluate its + # argument, so there is no uninitialised use. + 'MISRAC2012-RULE_9_1-a': [ + (None, re.compile(r'passed to "__builtin_object_size"')), + ], + 'MISRAC2012-RULE_1_3-b': [ + (None, re.compile(r'passed to "__builtin_object_size"')), + ], + # Deviation because casting a pointer to _Atomic to a pointer that can't be + # dereferenced at all (const void *) is reasonably safe, and is needed for + # certain builtin functions where the compiler knows the real underlying + # object type anyway (e.g. __builtin_object_size) or where the object type + # does not matter (e.g. __builtin_prefetch). + 'MISRAC2012-RULE_11_8-a': [ + (None, re.compile(r"to the 'const void \*' type which removes the " + r"'_Atomic' qualifiers")), + ], + # Compliance with rule 21.25 would have a significant performance impact. + # All existing uses have been thoroughly analysed and tested, so we will + # seek a project-wide deviation for this rule. + 'MISRAC2012-RULE_21_25-a': [ + (None, None), + ], } diff --git a/tools/debug/tracebuf.py b/tools/debug/tracebuf.py index b367330..dd902d9 100755 --- a/tools/debug/tracebuf.py +++ b/tools/debug/tracebuf.py @@ -49,6 +49,12 @@ 53: "PSCI_VPM_VCPU_RESUME", 54: "PSCI_SYSTEM_SUSPEND", 55: "PSCI_SYSTEM_RESUME", + 128: "WAIT_QUEUE_RESERVE", + 129: "WAIT_QUEUE_WAKE", + 130: "WAIT_QUEUE_WAKE_ACK", + 131: "WAIT_QUEUE_SLEEP", + 132: "WAIT_QUEUE_RESUME", + 133: "WAIT_QUEUE_FREE", }