diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index e139dd78..1c07a2cd 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -29,6 +29,8 @@ jobs: steps: - name: Checkout VMM repository uses: actions/checkout@v3 + with: + submodules: 'true' - name: Download Microkit SDK run: ./ci/acquire_sdk.sh microkit-sdk.zip ${{ secrets.GITHUB_TOKEN }} linux-x86-64 shell: bash @@ -58,6 +60,8 @@ jobs: steps: - name: Checkout VMM repository uses: actions/checkout@v3 + with: + submodules: 'true' - name: Download Microkit SDK run: ./ci/acquire_sdk.sh microkit-sdk.zip ${{ secrets.GITHUB_TOKEN }} macos-x86-64 shell: bash @@ -88,6 +92,8 @@ jobs: steps: - name: Checkout VMM repository uses: actions/checkout@v3 + with: + submodules: 'true' - name: Download Microkit SDK run: ./ci/acquire_sdk.sh microkit-sdk.zip ${{ secrets.GITHUB_TOKEN }} linux-x86-64 shell: bash @@ -109,6 +115,8 @@ jobs: steps: - name: Checkout VMM repository uses: actions/checkout@v3 + with: + submodules: 'true' - name: Download Microkit SDK run: ./ci/acquire_sdk.sh microkit-sdk.zip ${{ secrets.GITHUB_TOKEN }} macos-x86-64 shell: bash diff --git a/src/arch/aarch64/fault.c b/src/arch/aarch64/fault.c index 582e4e20..f6f3763a 100644 --- a/src/arch/aarch64/fault.c +++ b/src/arch/aarch64/fault.c @@ -197,6 +197,14 @@ uint64_t fault_emulate(seL4_UserContext *regs, uint64_t reg, uint64_t addr, uint } } +void fault_emulate_write(seL4_UserContext *regs, size_t addr, size_t fsr, size_t reg_val) { + // @ivanv: audit + /* Get register opearand */ + int rt = get_rt(fsr); + seL4_Word *reg_ctx = decode_rt(rt, regs); + *reg_ctx = fault_emulate(regs, *reg_ctx, addr, fsr, reg_val); +} + bool fault_advance(size_t vcpu_id, seL4_UserContext *regs, uint64_t addr, uint64_t fsr, uint64_t reg_val) { /* Get register opearand */ @@ -294,12 +302,13 @@ struct vm_exception_handler { uintptr_t base; uintptr_t end; vm_exception_handler_t callback; + void *data; }; #define MAX_VM_EXCEPTION_HANDLERS 16 struct vm_exception_handler registered_vm_exception_handlers[MAX_VM_EXCEPTION_HANDLERS]; size_t vm_exception_handler_index = 0; -bool fault_register_vm_exception_handler(uintptr_t base, size_t size, vm_exception_handler_t callback) { +bool fault_register_vm_exception_handler(uintptr_t base, size_t size, vm_exception_handler_t callback, void *data) { // @ivanv audit necessary here since this code was written very quickly. Other things to check such // as the region of memory is not overlapping with other regions, also should have GIC_DIST regions // use this API. @@ -316,18 +325,21 @@ bool fault_register_vm_exception_handler(uintptr_t base, size_t size, vm_excepti .base = base, .end = base + size, .callback = callback, + .data = data, }; vm_exception_handler_index += 1; return true; } -static bool fault_handle_registered_vm_exceptions(size_t vcpu_id, uintptr_t addr) { +static bool fault_handle_registered_vm_exceptions(size_t vcpu_id, uintptr_t addr, size_t fsr, seL4_UserContext *regs) { for (int i = 0; i < MAX_VM_EXCEPTION_HANDLERS; i++) { uintptr_t base = registered_vm_exception_handlers[i].base; uintptr_t end = registered_vm_exception_handlers[i].end; + vm_exception_handler_t callback = registered_vm_exception_handlers[i].callback; + void *data = registered_vm_exception_handlers[i].data; if (addr >= base && addr < end) { - bool success = registered_vm_exception_handlers[i].callback(vcpu_id, addr); + bool success = callback(vcpu_id, addr - base, fsr, regs, data); if (!success) { // @ivanv: improve error message LOG_VMM_ERR("registered virtual memory exception handler for region [0x%lx..0x%lx) at address 0x%lx failed\n", base, end, addr); @@ -361,8 +373,7 @@ bool fault_handle_vm_exception(size_t vcpu_id) return handle_vgic_redist_fault(vcpu_id, addr, fsr, ®s); #endif default: { - LOG_VMM("calling fault handle vm exception\n"); - bool success = fault_handle_registered_vm_exceptions(vcpu_id ,addr); + bool success = fault_handle_registered_vm_exceptions(vcpu_id, addr, fsr, ®s); if (!success) { /* * We could not find a registered handler for the address, meaning that the fault diff --git a/src/arch/aarch64/fault.h b/src/arch/aarch64/fault.h index 0c977587..53acc377 100644 --- a/src/arch/aarch64/fault.h +++ b/src/arch/aarch64/fault.h @@ -8,6 +8,7 @@ #include #include +#include #include /* Fault-handling functions */ @@ -19,8 +20,8 @@ bool fault_handle_user_exception(size_t vcpu_id); bool fault_handle_unknown_syscall(size_t vcpu_id); bool fault_handle_vm_exception(size_t vcpu_id); -typedef bool (*vm_exception_handler_t)(size_t vcpu_id, uintptr_t addr); -bool fault_register_vm_exception_handler(uintptr_t base, size_t size, vm_exception_handler_t callback); +typedef bool (*vm_exception_handler_t)(size_t vcpu_id, size_t offset, size_t fsr, seL4_UserContext *regs, void *data); +bool fault_register_vm_exception_handler(uintptr_t base, size_t size, vm_exception_handler_t callback, void *data); /* Helpers for emulating the fault and getting fault details */ bool fault_advance_vcpu(size_t vcpu_id, seL4_UserContext *regs); @@ -28,6 +29,7 @@ bool fault_advance(size_t vcpu_id, seL4_UserContext *regs, uint64_t addr, uint64 uint64_t fault_get_data_mask(uint64_t addr, uint64_t fsr); uint64_t fault_get_data(seL4_UserContext *regs, uint64_t fsr); uint64_t fault_emulate(seL4_UserContext *regs, uint64_t reg, uint64_t addr, uint64_t fsr, uint64_t reg_val); +void fault_emulate_write(seL4_UserContext *regs, size_t addr, size_t fsr, size_t reg_val); /* Take the fault label given by the kernel and convert it to a string. */ char *fault_to_string(seL4_Word fault_label); diff --git a/src/virtio/config.h b/src/virtio/config.h new file mode 100644 index 00000000..615b86c6 --- /dev/null +++ b/src/virtio/config.h @@ -0,0 +1,107 @@ +/* SPDX-License-Identifier: BSD-3-Clause + Copyright Linux */ + +#pragma once +/* This header, excluding the #ifdef __KERNEL__ part, is BSD licensed so + * anyone can use the definitions to implement compatible drivers/servers. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of IBM nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. */ + +/* Virtio devices use a standardized configuration space to define their + * features and pass configuration information, but each implementation can + * store and access that space differently. */ + +/* Status byte for guest to report progress, and synchronize features. */ +/* We have seen device and processed generic fields (VIRTIO_CONFIG_F_VIRTIO) */ +/* the guest OS triggers a device reset*/ +#define VIRTIO_CONFIG_S_RESET 0 +/* the guest OS has noticed the device. */ +#define VIRTIO_CONFIG_S_ACKNOWLEDGE 1 +/* the guest OS has found a driver for the device. */ +#define VIRTIO_CONFIG_S_DRIVER 2 +/* Driver has used its parts of the config, and is happy */ +#define VIRTIO_CONFIG_S_DRIVER_OK 4 +/* Driver has finished configuring features */ +#define VIRTIO_CONFIG_S_FEATURES_OK 8 +/* Device entered invalid state, driver must reset it */ +#define VIRTIO_CONFIG_S_NEEDS_RESET 0x40 +/* We've given up on this device. */ +#define VIRTIO_CONFIG_S_FAILED 0x80 + +/* + * Virtio feature bits VIRTIO_TRANSPORT_F_START through + * VIRTIO_TRANSPORT_F_END are reserved for the transport + * being used (e.g. virtio_ring, virtio_pci etc.), the + * rest are per-device feature bits. + */ +#define VIRTIO_TRANSPORT_F_START 28 +#define VIRTIO_TRANSPORT_F_END 41 + +#ifndef VIRTIO_CONFIG_NO_LEGACY +/* Do we get callbacks when the ring is completely used, even if we've + * suppressed them? */ +#define VIRTIO_F_NOTIFY_ON_EMPTY 24 + +/* Can the device handle any descriptor layout? */ +#define VIRTIO_F_ANY_LAYOUT 27 +#endif /* VIRTIO_CONFIG_NO_LEGACY */ + +/* v1.0 compliant. */ +#define VIRTIO_F_VERSION_1 32 + +/* + * If clear - device has the platform DMA (e.g. IOMMU) bypass quirk feature. + * If set - use platform DMA tools to access the memory. + * + * Note the reverse polarity (compared to most other features), + * this is for compatibility with legacy systems. + */ +#define VIRTIO_F_ACCESS_PLATFORM 33 +/* Legacy name for VIRTIO_F_ACCESS_PLATFORM (for compatibility with old userspace) */ +#define VIRTIO_F_IOMMU_PLATFORM VIRTIO_F_ACCESS_PLATFORM + +/* This feature indicates support for the packed virtqueue layout. */ +#define VIRTIO_F_RING_PACKED 34 + +/* + * Inorder feature indicates that all buffers are used by the device + * in the same order in which they have been made available. + */ +#define VIRTIO_F_IN_ORDER 35 + +/* + * This feature indicates that memory accesses by the driver and the + * device are ordered in a way described by the platform. + */ +#define VIRTIO_F_ORDER_PLATFORM 36 + +/* + * Does the device support Single Root I/O Virtualization? + */ +#define VIRTIO_F_SR_IOV 37 + +/* + * This feature indicates that the driver can reset a queue individually. + */ +#define VIRTIO_F_RING_RESET 40 \ No newline at end of file diff --git a/src/virtio/mmio.c b/src/virtio/mmio.c new file mode 100644 index 00000000..33031f02 --- /dev/null +++ b/src/virtio/mmio.c @@ -0,0 +1,315 @@ +/* + * Copyright 2023, UNSW (ABN 57 195 873 179) + * + * SPDX-License-Identifier: BSD-2-Clause + */ +#include +#include "util/util.h" +#include "fault.h" +#include "virtio/config.h" +#include "virtio/mmio.h" +#include "virtio/virtq.h" + +/* Uncomment this to enable debug logging */ +// #define DEBUG_MMIO + +// @ivanv: use this logging instead of having commented printfs +#if defined(DEBUG_MMIO) +#define LOG_MMIO(...) do{ printf("%s|VIRTIO(MMIO): ", microkit_name); printf(__VA_ARGS__); }while(0) +#else +#define LOG_MMIO(...) do{}while(0) +#endif + +// @jade: add some giant comments about this file +// generic virtio mmio emul interface + +#define REG_RANGE(r0, r1) r0 ... (r1 - 1) + +struct virtq *get_current_virtq_by_handler(virtio_device_t *dev) +{ + assert(dev->data.QueueSel < dev->num_vqs); + return &dev->vqs[dev->data.QueueSel].virtq; +} + +/* + * Protocol for device status changing can be found in section + * '3.1 Device Initialization' of the virtIO specification. + */ +// @ivanv: why is this function necessary, why not just use data.Status? +int handle_virtio_mmio_get_status_flag(virtio_device_t *dev, uint32_t *retreg) +{ + *retreg = dev->data.Status; + return 1; +} + +int handle_virtio_mmio_set_status_flag(virtio_device_t *dev, uint32_t reg) +{ + int success = 1; + + // we only care about the new status + dev->data.Status &= reg; + reg ^= dev->data.Status; + // printf("VIRTIO MMIO|INFO: set status flag 0x%x.\n", reg); + + switch (reg) { + case VIRTIO_CONFIG_S_RESET: + dev->data.Status = 0; + dev->funs->device_reset(dev); + break; + + case VIRTIO_CONFIG_S_ACKNOWLEDGE: + // are we following the initialization protocol? + if (dev->data.Status == 0) { + dev->data.Status |= VIRTIO_CONFIG_S_ACKNOWLEDGE; + // nothing to do from our side (as the virtio device). + } + break; + + case VIRTIO_CONFIG_S_DRIVER: + // are we following the initialization protocol? + if (dev->data.Status & VIRTIO_CONFIG_S_ACKNOWLEDGE) { + dev->data.Status |= VIRTIO_CONFIG_S_DRIVER; + // nothing to do from our side (as the virtio device). + } + break; + + case VIRTIO_CONFIG_S_FEATURES_OK: + // are we following the initialization protocol? + if (dev->data.Status & VIRTIO_CONFIG_S_DRIVER) { + // are features OK? + dev->data.Status |= (dev->data.features_happy ? VIRTIO_CONFIG_S_FEATURES_OK : 0); + } + break; + + case VIRTIO_CONFIG_S_DRIVER_OK: + dev->data.Status |= VIRTIO_CONFIG_S_DRIVER_OK; + // probably do some san checks here + break; + + case VIRTIO_CONFIG_S_FAILED: + printf("VIRTIO MMIO|INFO: received FAILED status from driver, giving up this device.\n"); + break; + + default: + printf("VIRTIO MMIO|INFO: unknown device status 0x%x.\n", reg); + success = 0; + } + return success; +} + +static bool handle_virtio_mmio_reg_read(virtio_device_t *dev, size_t vcpu_id, size_t offset, size_t fsr, seL4_UserContext *regs) +{ + + uint32_t reg = 0; + bool success = true; + LOG_MMIO("read from 0x%lx\n", offset); + + switch (offset) { + case REG_RANGE(REG_VIRTIO_MMIO_MAGIC_VALUE, REG_VIRTIO_MMIO_VERSION): + reg = VIRTIO_MMIO_DEV_MAGIC; + break; + case REG_RANGE(REG_VIRTIO_MMIO_VERSION, REG_VIRTIO_MMIO_DEVICE_ID): + reg = VIRTIO_MMIO_DEV_VERSION; + break; + case REG_RANGE(REG_VIRTIO_MMIO_DEVICE_ID, REG_VIRTIO_MMIO_VENDOR_ID): + reg = dev->data.DeviceID; + break; + case REG_RANGE(REG_VIRTIO_MMIO_VENDOR_ID, REG_VIRTIO_MMIO_DEVICE_FEATURES): + reg = dev->data.VendorID; + break; + case REG_RANGE(REG_VIRTIO_MMIO_DEVICE_FEATURES, REG_VIRTIO_MMIO_DEVICE_FEATURES_SEL): + success = dev->funs->get_device_features(dev, ®); + break; + case REG_RANGE(REG_VIRTIO_MMIO_QUEUE_NUM_MAX, REG_VIRTIO_MMIO_QUEUE_NUM): + reg = QUEUE_SIZE; + break; + case REG_RANGE(REG_VIRTIO_MMIO_QUEUE_READY, REG_VIRTIO_MMIO_QUEUE_NOTIFY): + if (dev->data.QueueSel < dev->num_vqs) { + reg = dev->vqs[dev->data.QueueSel].ready; + } else { + LOG_VMM_ERR("invalid virtq index 0x%lx (number of virtqs is 0x%lx) " + "given when accessing REG_VIRTIO_MMIO_QUEUE_READY\n", dev->data.QueueSel, dev->num_vqs); + success = false; + } + break; + case REG_RANGE(REG_VIRTIO_MMIO_INTERRUPT_STATUS, REG_VIRTIO_MMIO_INTERRUPT_ACK): + reg = dev->data.InterruptStatus; + break; + case REG_RANGE(REG_VIRTIO_MMIO_STATUS, REG_VIRTIO_MMIO_QUEUE_DESC_LOW): + success = handle_virtio_mmio_get_status_flag(dev, ®); + break; + case REG_RANGE(REG_VIRTIO_MMIO_CONFIG_GENERATION, REG_VIRTIO_MMIO_CONFIG): + /* ConfigGeneration will need to be update every time when the device changes any of + * the device config. Currently we only have virtio net device that doesn't do any update + * on the device config, so the ConfigGeneration is always 0. I left this comment + * here as a reminder. */ + // reg = device->data.ConfigGeneration; + break; + case REG_RANGE(REG_VIRTIO_MMIO_CONFIG, REG_VIRTIO_MMIO_CONFIG + 0x100): + success = dev->funs->get_device_config(dev, offset, ®); + // uint32_t mask = fault_get_data_mask(fault_addr, fsr); + // printf("\"%s\"|VIRTIO MMIO|INFO: device config offset 0x%x, value 0x%x, mask 0x%x\n", sel4cp_name, offset, reg & mask, mask); + break; + default: + printf("VIRTIO MMIO|INFO: unknown register 0x%x.\n", offset); + success = false; + } + + uint32_t mask = fault_get_data_mask(offset, fsr); + // @ivanv: make it clearer that just passing the offset is okay, + // possibly just fix the API + fault_emulate_write(regs, offset, fsr, reg & mask); + + return success; +} + +static bool handle_virtio_mmio_reg_write(virtio_device_t *dev, size_t vcpu_id, size_t offset, size_t fsr, seL4_UserContext *regs) +{ + bool success = true; + uint32_t data = fault_get_data(regs, fsr); + uint32_t mask = fault_get_data_mask(offset, fsr); + /* Mask the data to write */ + data &= mask; + + // printf("\"%s\"|VIRTIO MMIO|INFO: Write to 0x%x.\n", sel4cp_name, offset); + + switch (offset) { + case REG_RANGE(REG_VIRTIO_MMIO_DEVICE_FEATURES_SEL, REG_VIRTIO_MMIO_DRIVER_FEATURES): + dev->data.DeviceFeaturesSel = data; + break; + case REG_RANGE(REG_VIRTIO_MMIO_DRIVER_FEATURES, REG_VIRTIO_MMIO_DRIVER_FEATURES_SEL): + success = dev->funs->set_driver_features(dev, data); + break; + case REG_RANGE(REG_VIRTIO_MMIO_DRIVER_FEATURES_SEL, REG_VIRTIO_MMIO_QUEUE_SEL): + dev->data.DriverFeaturesSel = data; + break; + case REG_RANGE(REG_VIRTIO_MMIO_QUEUE_SEL, REG_VIRTIO_MMIO_QUEUE_NUM_MAX): + dev->data.QueueSel = data; + break; + case REG_RANGE(REG_VIRTIO_MMIO_QUEUE_NUM, REG_VIRTIO_MMIO_QUEUE_READY): { + if (dev->data.QueueSel < dev->num_vqs) { + struct virtq *virtq = get_current_virtq_by_handler(dev); + virtq->num = (unsigned int)data; + } else { + LOG_VMM_ERR("invalid virtq index 0x%lx (number of virtqs is 0x%lx) " + "given when accessing REG_VIRTIO_MMIO_QUEUE_NUM\n", dev->data.QueueSel, dev->num_vqs); + success = false; + } + break; + } + case REG_RANGE(REG_VIRTIO_MMIO_QUEUE_READY, REG_VIRTIO_MMIO_QUEUE_NOTIFY): + if (data == 0x1) { + dev->vqs[dev->data.QueueSel].ready = true; + // the virtq is already in ram so we don't need to do any initiation + } + break; + case REG_RANGE(REG_VIRTIO_MMIO_QUEUE_NOTIFY, REG_VIRTIO_MMIO_INTERRUPT_STATUS): + success = dev->funs->queue_notify(dev); + break; + case REG_RANGE(REG_VIRTIO_MMIO_INTERRUPT_ACK, REG_VIRTIO_MMIO_STATUS): + dev->data.InterruptStatus &= ~data; + break; + case REG_RANGE(REG_VIRTIO_MMIO_STATUS, REG_VIRTIO_MMIO_QUEUE_DESC_LOW): + success = handle_virtio_mmio_set_status_flag(dev, data); + break; + case REG_RANGE(REG_VIRTIO_MMIO_QUEUE_DESC_LOW, REG_VIRTIO_MMIO_QUEUE_DESC_HIGH): { + if (dev->data.QueueSel < dev->num_vqs) { + struct virtq *virtq = get_current_virtq_by_handler(dev); + uintptr_t ptr = (uintptr_t)virtq->desc; + ptr |= data; + virtq->desc = (struct virtq_desc *)ptr; + } else { + LOG_VMM_ERR("invalid virtq index 0x%lx (number of virtqs is 0x%lx) " + "given when accessing REG_VIRTIO_MMIO_QUEUE_DESC_LOW\n", dev->data.QueueSel, dev->num_vqs); + success = false; + } + break; + } + case REG_RANGE(REG_VIRTIO_MMIO_QUEUE_DESC_HIGH, REG_VIRTIO_MMIO_QUEUE_AVAIL_LOW): { + if (dev->data.QueueSel < dev->num_vqs) { + struct virtq *virtq = get_current_virtq_by_handler(dev); + uintptr_t ptr = (uintptr_t)virtq->desc; + ptr |= (uintptr_t)data << 32; + virtq->desc = (struct virtq_desc *)ptr; + } else { + LOG_VMM_ERR("invalid virtq index 0x%lx (number of virtqs is 0x%lx) " + "given when accessing REG_VIRTIO_MMIO_QUEUE_DESC_HIGH\n", dev->data.QueueSel, dev->num_vqs); + success = false; + } + } + break; + case REG_RANGE(REG_VIRTIO_MMIO_QUEUE_AVAIL_LOW, REG_VIRTIO_MMIO_QUEUE_AVAIL_HIGH): { + if (dev->data.QueueSel < dev->num_vqs) { + struct virtq *virtq = get_current_virtq_by_handler(dev); + uintptr_t ptr = (uintptr_t)virtq->avail; + ptr |= data; + virtq->avail = (struct virtq_avail *)ptr; + } else { + LOG_VMM_ERR("invalid virtq index 0x%lx (number of virtqs is 0x%lx) " + "given when accessing REG_VIRTIO_MMIO_QUEUE_AVAIL_LOW\n", dev->data.QueueSel, dev->num_vqs); + success = false; + } + break; + } + case REG_RANGE(REG_VIRTIO_MMIO_QUEUE_AVAIL_HIGH, REG_VIRTIO_MMIO_QUEUE_USED_LOW): { + if (dev->data.QueueSel < dev->num_vqs) { + struct virtq *virtq = get_current_virtq_by_handler(dev); + uintptr_t ptr = (uintptr_t)virtq->avail; + ptr |= (uintptr_t)data << 32; + virtq->avail = (struct virtq_avail *)ptr; + // printf("VIRTIO MMIO|INFO: virtq avail 0x%lx\n.", ptr); + } else { + LOG_VMM_ERR("invalid virtq index 0x%lx (number of virtqs is 0x%lx) " + "given when accessing REG_VIRTIO_MMIO_QUEUE_AVAIL_HIGH\n", dev->data.QueueSel, dev->num_vqs); + success = false; + } + break; + } + case REG_RANGE(REG_VIRTIO_MMIO_QUEUE_USED_LOW, REG_VIRTIO_MMIO_QUEUE_USED_HIGH): { + if (dev->data.QueueSel < dev->num_vqs) { + struct virtq *virtq = get_current_virtq_by_handler(dev); + uintptr_t ptr = (uintptr_t)virtq->used; + ptr |= data; + virtq->used = (struct virtq_used *)ptr; + } else { + LOG_VMM_ERR("invalid virtq index 0x%lx (number of virtqs is 0x%lx) " + "given when accessing REG_VIRTIO_MMIO_QUEUE_USED_LOW\n", dev->data.QueueSel, dev->num_vqs); + success = false; + } + break; + } + case REG_RANGE(REG_VIRTIO_MMIO_QUEUE_USED_HIGH, REG_VIRTIO_MMIO_CONFIG_GENERATION): { + if (dev->data.QueueSel < dev->num_vqs) { + struct virtq *virtq = get_current_virtq_by_handler(dev); + uintptr_t ptr = (uintptr_t)virtq->used; + ptr |= (uintptr_t)data << 32; + virtq->used = (struct virtq_used *)ptr; + // printf("VIRTIO MMIO|INFO: virtq used 0x%lx\n.", ptr); + } else { + LOG_VMM_ERR("invalid virtq index 0x%lx (number of virtqs is 0x%lx) " + "given when accessing REG_VIRTIO_MMIO_QUEUE_USED_HIGH\n", dev->data.QueueSel, dev->num_vqs); + success = false; + } + break; + } + case REG_RANGE(REG_VIRTIO_MMIO_CONFIG, REG_VIRTIO_MMIO_CONFIG + 0x100): + success = dev->funs->set_device_config(dev, offset, data); + break; + default: + printf("VIRTIO MMIO|INFO: unknown register 0x%x.", offset); + success = false; + } + + return success; +} + +bool virtio_mmio_fault_handle(size_t vcpu_id, size_t offset, size_t fsr, seL4_UserContext *regs, void *data) +{ + virtio_device_t *dev = (virtio_device_t *) data; + assert(dev); + if (fault_is_read(fsr)) { + return handle_virtio_mmio_reg_read(dev, vcpu_id, offset, fsr, regs); + } else { + return handle_virtio_mmio_reg_write(dev, vcpu_id, offset, fsr, regs); + } +} diff --git a/src/virtio/mmio.h b/src/virtio/mmio.h new file mode 100644 index 00000000..a3231ec0 --- /dev/null +++ b/src/virtio/mmio.h @@ -0,0 +1,137 @@ +/* + * Copyright 2023, UNSW (ABN 57 195 873 179) + * + * SPDX-License-Identifier: BSD-2-Clause + */ +#pragma once + +#include +#include "util/util.h" +#include "virtio/virtq.h" +#include "serial/libserialsharedringbuffer/include/shared_ringbuffer.h" + +// table 4.1 +#define VIRTIO_MMIO_DEV_MAGIC 0x74726976 // "virt" +#define VIRTIO_MMIO_DEV_VERSION 0x2 +#define VIRTIO_MMIO_DEV_VERSION_LEGACY 0x1 +#define VIRTIO_MMIO_DEV_VERSION_LEGACY 0x1 +#define VIRTIO_MMIO_DEV_VENDOR_ID 0x344c6573 // "seL4" + +#define REG_VIRTIO_MMIO_MAGIC_VALUE 0x000 +#define REG_VIRTIO_MMIO_VERSION 0x004 +#define REG_VIRTIO_MMIO_DEVICE_ID 0x008 +#define REG_VIRTIO_MMIO_VENDOR_ID 0x00c +#define REG_VIRTIO_MMIO_DEVICE_FEATURES 0x010 +#define REG_VIRTIO_MMIO_DEVICE_FEATURES_SEL 0x014 +#define REG_VIRTIO_MMIO_DRIVER_FEATURES 0x020 +#define REG_VIRTIO_MMIO_DRIVER_FEATURES_SEL 0x024 +#define REG_VIRTIO_MMIO_QUEUE_SEL 0x030 +#define REG_VIRTIO_MMIO_QUEUE_NUM_MAX 0x034 +#define REG_VIRTIO_MMIO_QUEUE_NUM 0x038 +#define REG_VIRTIO_MMIO_QUEUE_READY 0x044 +#define REG_VIRTIO_MMIO_QUEUE_NOTIFY 0x050 +#define REG_VIRTIO_MMIO_INTERRUPT_STATUS 0x060 +#define REG_VIRTIO_MMIO_INTERRUPT_ACK 0x064 +#define REG_VIRTIO_MMIO_STATUS 0x070 +#define REG_VIRTIO_MMIO_QUEUE_DESC_LOW 0x080 +#define REG_VIRTIO_MMIO_QUEUE_DESC_HIGH 0x084 +#define REG_VIRTIO_MMIO_QUEUE_AVAIL_LOW 0x090 +#define REG_VIRTIO_MMIO_QUEUE_AVAIL_HIGH 0x094 +#define REG_VIRTIO_MMIO_QUEUE_USED_LOW 0x0a0 +#define REG_VIRTIO_MMIO_QUEUE_USED_HIGH 0x0a4 +#define REG_VIRTIO_MMIO_CONFIG_GENERATION 0x0fc +#define REG_VIRTIO_MMIO_CONFIG 0x100 + +// section 5 +// The following device IDs are used to identify different types of virtio devices, +// only devices that sel4cp VMM currently supports are listed +#define DEVICE_ID_VIRTIO_NET 1 +#define DEVICE_ID_VIRTIO_BLOCK 2 +#define DEVICE_ID_VIRTIO_CONSOLE 3 +#define DEVICE_ID_VIRTIO_VSOCK 19 + +/* The maximum size (number of elements) of a virtqueue. It is set + * to 128 because I copied it from the camkes virtio device. If you find + * out that the virtqueue gets full easily, increase the number. + */ +#define QUEUE_SIZE 128 + +/* handler of a virtqueue */ +// @ivanv: we can pack/bitfield this struct +typedef struct virtio_queue_handler { + struct virtq virtq; + /* is this virtq fully initialised? */ + bool ready; + /* the last index that the virtIO device processed */ + uint16_t last_idx; +} virtio_queue_handler_t; + +struct virtio_device; + +// functions provided by the emul (device) layer for the emul (mmio) layer +typedef struct virtio_emul_funs { + void (*device_reset)(struct virtio_device *dev); + + // REG_VIRTIO_MMIO_DEVICE_FEATURES related operations + int (*get_device_features)(struct virtio_device *dev, uint32_t *features); + int (*set_driver_features)(struct virtio_device *dev, uint32_t features); + + // REG_VIRTIO_MMIO_CONFIG related operations + int (*get_device_config)(struct virtio_device *dev, uint32_t offset, uint32_t *ret_val); + int (*set_device_config)(struct virtio_device *dev, uint32_t offset, uint32_t val); + int (*queue_notify)(struct virtio_device *dev); +} virtio_device_funs_t; + +// infomation you need for manipulating MMIO Device Register Layout +// @ivanv: I don't see why this extra struct is necessary, why not just make it part of the virtio device struct +// virtio_device? +typedef struct virtio_device_info { + uint32_t DeviceID; + uint32_t VendorID; + + uint32_t DeviceFeaturesSel; + uint32_t DriverFeaturesSel; + /* True if we are happy with what the driver requires */ + bool features_happy; + + uint32_t QueueSel; + uint32_t QueueNotify; + + uint32_t InterruptStatus; + + uint32_t Status; + + // uint32_t ConfigGeneration; +} virtio_device_info_t; + +/* Everything needed at runtime for a virtIO device to function. */ +typedef struct virtio_device { + virtio_device_info_t data; + virtio_device_funs_t *funs; + + /* List of virt queues for the device */ + virtio_queue_handler_t *vqs; + /* Length of the vqs list */ + size_t num_vqs; + /* Virtual IRQ associated with this virtIO device */ + size_t virq; + /* Handlers for sDDF ring buffers */ + ring_handle_t *sddf_rx_ring; + ring_handle_t *sddf_tx_ring; + /* Microkit channel to the sDDF TX multiplexor */ + // @ivanv: this is microkit specific so maybe should be a callback instead or something. + // @ivanv: my worry here is that the device struct is supposed to be for all devices, but + // this is specific to device classes such as serial and networking + size_t sddf_mux_tx_ch; +} virtio_device_t; + +/** + * Handles MMIO Device Register Layout I/O for VirtIO MMIO + * + * @param vcpu_id ID of the vcpu + * @param fault_addr fault address + * @param fsr fault status register + * @param regs registers + * @param data pointer to virtIO device registered with the fault handler + */ +bool virtio_mmio_fault_handle(size_t vcpu_id, size_t offset, size_t fsr, seL4_UserContext *regs, void *data); diff --git a/src/virtio/virtio.c b/src/virtio/virtio.c new file mode 100644 index 00000000..99c9d213 --- /dev/null +++ b/src/virtio/virtio.c @@ -0,0 +1,54 @@ +/* + * Copyright 2023, UNSW + * + * SPDX-License-Identifier: BSD-2-Clause + */ +#include "fault.h" +#include "util/util.h" +#include "virtio/console.h" +#include "virtio/virtio.h" +#include "virq.h" + +static struct virtio_queue_handler virtio_console_queues[VIRTIO_CONSOLE_NUM_VIRTQ]; + +void virtio_virq_default_ack(size_t vcpu_id, int irq, void *cookie) { + // nothing needs to be done +} + +// assumes virq controller has been initialised +bool virtio_mmio_device_init(virtio_device_t *dev, + enum virtio_device_type type, + uintptr_t region_base, + uintptr_t region_size, + size_t virq, + ring_handle_t *sddf_rx_ring, + ring_handle_t *sddf_tx_ring, + size_t sddf_mux_tx_ch) +{ + bool success = true; + switch (type) { + case CONSOLE: + virtio_console_init(dev, virtio_console_queues, VIRTIO_CONSOLE_NUM_VIRTQ, virq, sddf_rx_ring, sddf_tx_ring, sddf_mux_tx_ch); + success = fault_register_vm_exception_handler(region_base, + region_size, + &virtio_mmio_fault_handle, + dev); + if (!success) { + LOG_VMM_ERR("Could not register virtual memory fault handler for " + "virtIO region [0x%lx..0x%lx)\n", region_base, region_base + region_size); + return false; + } + break; + default: + LOG_VMM_ERR("Unsupported virtIO device type given: %d\n", type); + return false; + } + + /* Register the virtual IRQ that will be used to communicate from the device + * to the guest. This assumes that the interrupt controller is already setup. */ + // @ivanv: we should check that (on AArch64) the virq is an SPI. + success = virq_register(GUEST_VCPU_ID, virq, &virtio_virq_default_ack, NULL); + assert(success); + + return success; +} diff --git a/src/virtio/virtio.h b/src/virtio/virtio.h new file mode 100644 index 00000000..54c537a8 --- /dev/null +++ b/src/virtio/virtio.h @@ -0,0 +1,31 @@ +/* + * Copyright 2023, UNSW + * + * SPDX-License-Identifier: BSD-2-Clause + */ +#pragma once + +#include +#include +#include +#include "virtio/mmio.h" + +/* + * All terminology used and functionality of the virtIO device implementation + * adheres with the following specification: + * Virtual I/O Device (VIRTIO) Version 1.2 + */ + +/* All the supported virtIO device types. */ +enum virtio_device_type { + CONSOLE, +}; + +bool virtio_mmio_device_init(virtio_device_t *dev, + enum virtio_device_type type, + uintptr_t region_base, + uintptr_t region_size, + size_t virq, + ring_handle_t *sddf_rx_ring, + ring_handle_t *sddf_tx_ring, + size_t sddf_mux_tx_ch); diff --git a/src/virtio/virtq.h b/src/virtio/virtq.h new file mode 100644 index 00000000..ba417aec --- /dev/null +++ b/src/virtio/virtq.h @@ -0,0 +1,160 @@ +/* SPDX-License-Identifier: BSD-3-Clause */ +#pragma once +/* An interface for efficient virtio implementation, currently for use by KVM + * and lguest, but hopefully others soon. Do NOT change this since it will + * break existing servers and clients. + * + * This header is BSD licensed so anyone can use the definitions to implement + * compatible drivers/servers. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of IBM nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * Copyright Rusty Russell IBM Corporation 2007. */ + +#include + +/* This marks a buffer as continuing via the next field. */ +#define VIRTQ_DESC_F_NEXT 1 +/* This marks a buffer as write-only (otherwise read-only). */ +#define VIRTQ_DESC_F_WRITE 2 +/* This means the buffer contains a list of buffer descriptors. */ +#define VIRTQ_DESC_F_INDIRECT 4 + +/* The Host uses this in used->flags to advise the Guest: don't kick me when + * you add a buffer. It's unreliable, so it's simply an optimization. Guest + * will still kick if it's out of buffers. */ +#define VIRTQ_USED_F_NO_NOTIFY 1 +/* The Guest uses this in avail->flags to advise the Host: don't interrupt me + * when you consume a buffer. It's unreliable, so it's simply an + * optimization. */ +#define virtq_AVAIL_F_NO_INTERRUPT 1 + +/* We support indirect buffer descriptors */ +#define VIRTQ_AVAIL_F_NO_INTERRUPT 28 + +/* The Guest publishes the used index for which it expects an interrupt + * at the end of the avail ring. Host should ignore the avail->flags field. */ +/* The Host publishes the avail index for which it expects a kick + * at the end of the used ring. Guest should ignore the used->flags field. */ +#define VIRTIO_RING_F_EVENT_IDX 29 + +/* Virtio ring descriptors: 16 bytes. These can chain together via "next". */ +struct virtq_desc { + /* Address (guest-physical). */ + uint64_t addr; + /* Length. */ + uint32_t len; + /* The flags as indicated above. */ + uint16_t flags; + /* We chain unused descriptors via this, too */ + uint16_t next; +}; + +struct virtq_avail { + uint16_t flags; + uint16_t idx; + uint16_t ring[]; +}; + +/* u32 is used here for ids for padding reasons. */ +struct virtq_used_elem { + /* Index of start of used descriptor chain. */ + uint32_t id; + /* Total length of the descriptor chain which was used (written to) */ + uint32_t len; +}; + +struct virtq_used { + uint16_t flags; + uint16_t idx; + struct virtq_used_elem ring[]; +}; + +struct virtq { + unsigned int num; + struct virtq_desc *desc; + struct virtq_avail *avail; + struct virtq_used *used; +}; + +/* The standard layout for the ring is a continuous chunk of memory which looks + * like this. We assume num is a power of 2. + * + * struct virtq + * { + * // The actual descriptors (16 bytes each) + * struct virtq_desc desc[num]; + * + * // A ring of available descriptor heads with free-running index. + * uint16_t avail_flags; + * uint16_t avail_idx; + * uint16_t available[num]; + * uint16_t used_event_idx; + * + * // Padding to the next align boundary. + * char pad[]; + * + * // A ring of used descriptor heads with free-running index. + * uint16_t used_flags; + * uint16_t used_idx; + * struct virtq_used_elem used[num]; + * uint16_t avail_event_idx; + * }; + */ +/* We publish the used event index at the end of the available ring, and vice + * versa. They are at the end for backwards compatibility. */ +#define virtq_used_event(vr) ((vr)->avail->ring[(vr)->num]) +#define virtq_avail_event(vr) (*(uint16_t *)&(vr)->used->ring[(vr)->num]) + +static inline void virtq_init(struct virtq *vr, unsigned int num, void *p, + unsigned long align) +{ + vr->num = num; + vr->desc = p; + vr->avail = p + num*sizeof(struct virtq_desc); + vr->used = (void *)(((unsigned long)&vr->avail->ring[num] + sizeof(uint16_t) + + align-1) & ~(align - 1)); +} + +static inline unsigned virtq_size(unsigned int num, unsigned long align) +{ + return ((sizeof(struct virtq_desc) * num + sizeof(uint16_t) * (3 + num) + + align - 1) & ~(align - 1)) + + sizeof(uint16_t) * 3 + sizeof(struct virtq_used_elem) * num; +} + +/* The following is used with USED_EVENT_IDX and AVAIL_EVENT_IDX */ +/* Assuming a given event_idx value from the other size, if + * we have just incremented index from old to new_idx, + * should we trigger an event? */ +static inline int virtq_need_event(uint16_t event_idx, uint16_t new_idx, uint16_t old) +{ + /* Note: Xen has similar logic for notification hold-off + * in include/xen/interface/io/ring.h with req_event and req_prod + * corresponding to event_idx + 1 and new_idx respectively. + * Note also that req_event and req_prod in Xen start at 1, + * event indexes in virtio start at 0. */ + return (uint16_t)(new_idx - event_idx - 1) < (uint16_t)(new_idx - old); +} +