From e408527b4b92de3d6260435dee06f1ec686c5874 Mon Sep 17 00:00:00 2001 From: Eric Chan Date: Thu, 3 Oct 2024 12:41:54 +1000 Subject: [PATCH] CI style Signed-off-by: Eric Chan --- examples/virtio/board/odroidc4/virtio.system | 47 +- examples/virtio/client_vmm.c | 69 +- examples/virtio/include/blk_config.h | 38 +- include/libvmm/virtio/block.h | 87 +- src/virtio/block.c | 1220 +++++++++--------- tools/linux/uio/libuio.c | 428 +++--- tools/linux/uio_drivers/blk/blk.c | 257 ++-- 7 files changed, 1114 insertions(+), 1032 deletions(-) diff --git a/examples/virtio/board/odroidc4/virtio.system b/examples/virtio/board/odroidc4/virtio.system index 8035a01c..035b36da 100644 --- a/examples/virtio/board/odroidc4/virtio.system +++ b/examples/virtio/board/odroidc4/virtio.system @@ -7,7 +7,7 @@ - + @@ -165,20 +165,20 @@ - - - - + + + + - + - + @@ -207,10 +207,12 @@ - - - - + + + + + + @@ -221,19 +223,22 @@ - - - - - - - - + + + + + + + + + - + - + + + diff --git a/examples/virtio/client_vmm.c b/examples/virtio/client_vmm.c index 7987c0e2..f27a6c94 100644 --- a/examples/virtio/client_vmm.c +++ b/examples/virtio/client_vmm.c @@ -74,38 +74,37 @@ uintptr_t blk_storage_info; static struct virtio_blk_device virtio_blk; -_Static_assert(BLK_DATA_REGION_SIZE_CLI0 >= BLK_TRANSFER_SIZE && BLK_DATA_REGION_SIZE_CLI0 % BLK_TRANSFER_SIZE == 0, - "Client0 data region size must be a multiple of the transfer size"); -_Static_assert(BLK_DATA_REGION_SIZE_CLI1 >= BLK_TRANSFER_SIZE && BLK_DATA_REGION_SIZE_CLI1 % BLK_TRANSFER_SIZE == 0, - "Client1 data region size must be a multiple of the transfer size"); +_Static_assert( + BLK_DATA_REGION_SIZE_CLI0 >= BLK_TRANSFER_SIZE && + BLK_DATA_REGION_SIZE_CLI0 % BLK_TRANSFER_SIZE == 0, + "Client0 data region size must be a multiple of the transfer size"); +_Static_assert( + BLK_DATA_REGION_SIZE_CLI1 >= BLK_TRANSFER_SIZE && + BLK_DATA_REGION_SIZE_CLI1 % BLK_TRANSFER_SIZE == 0, + "Client1 data region size must be a multiple of the transfer size"); void init(void) { - blk_storage_info_t *storage_info = (blk_storage_info_t *)blk_storage_info; - - /* Busy wait until blk device is ready */ - while (!blk_storage_is_ready(storage_info)); - - /* Initialise the VMM, the VCPU(s), and start the guest */ - LOG_VMM("starting \"%s\"\n", microkit_name); - /* Place all the binaries in the right locations before starting the guest */ - size_t kernel_size = _guest_kernel_image_end - _guest_kernel_image; - size_t dtb_size = _guest_dtb_image_end - _guest_dtb_image; - size_t initrd_size = _guest_initrd_image_end - _guest_initrd_image; - uintptr_t kernel_pc = linux_setup_images(guest_ram_vaddr, - (uintptr_t) _guest_kernel_image, - kernel_size, - (uintptr_t) _guest_dtb_image, - GUEST_DTB_VADDR, - dtb_size, - (uintptr_t) _guest_initrd_image, - GUEST_INIT_RAM_DISK_VADDR, - initrd_size - ); - if (!kernel_pc) { - LOG_VMM_ERR("Failed to initialise guest images\n"); - return; - } + blk_storage_info_t *storage_info = (blk_storage_info_t *)blk_storage_info; + + /* Busy wait until blk device is ready */ + while (!blk_storage_is_ready(storage_info)) + ; + + /* Initialise the VMM, the VCPU(s), and start the guest */ + LOG_VMM("starting \"%s\"\n", microkit_name); + /* Place all the binaries in the right locations before starting the guest */ + size_t kernel_size = _guest_kernel_image_end - _guest_kernel_image; + size_t dtb_size = _guest_dtb_image_end - _guest_dtb_image; + size_t initrd_size = _guest_initrd_image_end - _guest_initrd_image; + uintptr_t kernel_pc = linux_setup_images( + guest_ram_vaddr, (uintptr_t)_guest_kernel_image, kernel_size, + (uintptr_t)_guest_dtb_image, GUEST_DTB_VADDR, dtb_size, + (uintptr_t)_guest_initrd_image, GUEST_INIT_RAM_DISK_VADDR, initrd_size); + if (!kernel_pc) { + LOG_VMM_ERR("Failed to initialise guest images\n"); + return; + } /* Initialise the virtual GIC driver */ bool success = virq_controller_init(GUEST_VCPU_ID); @@ -135,14 +134,10 @@ void init(void) blk_cli_queue_size(microkit_name)); /* Initialise virtIO block device */ - success = virtio_mmio_blk_init(&virtio_blk, - VIRTIO_BLK_BASE, VIRTIO_BLK_SIZE, VIRTIO_BLK_IRQ, - blk_data, - BLK_DATA_SIZE, - storage_info, - &blk_queue_h, - blk_cli_queue_size(microkit_name), - BLK_CH); + success = virtio_mmio_blk_init(&virtio_blk, VIRTIO_BLK_BASE, + VIRTIO_BLK_SIZE, VIRTIO_BLK_IRQ, blk_data, + BLK_DATA_SIZE, storage_info, &blk_queue_h, + blk_cli_queue_size(microkit_name), BLK_CH); assert(success); /* Finally start the guest */ diff --git a/examples/virtio/include/blk_config.h b/examples/virtio/include/blk_config.h index 229fca10..4557259a 100644 --- a/examples/virtio/include/blk_config.h +++ b/examples/virtio/include/blk_config.h @@ -15,11 +15,12 @@ #define BLK_NAME_CLI0 "CLIENT_VMM-1" #define BLK_NAME_CLI1 "CLIENT_VMM-2" -#define BLK_QUEUE_CAPACITY_CLI0 1024 -#define BLK_QUEUE_CAPACITY_CLI1 1024 -#define BLK_QUEUE_CAPACITY_DRIV (BLK_QUEUE_CAPACITY_CLI0 + BLK_QUEUE_CAPACITY_CLI1) +#define BLK_QUEUE_CAPACITY_CLI0 1024 +#define BLK_QUEUE_CAPACITY_CLI1 1024 +#define BLK_QUEUE_CAPACITY_DRIV \ + (BLK_QUEUE_CAPACITY_CLI0 + BLK_QUEUE_CAPACITY_CLI1) -#define BLK_REGION_SIZE 0x200000 +#define BLK_REGION_SIZE 0x200000 #define BLK_DATA_REGION_SIZE_CLI0 BLK_REGION_SIZE #define BLK_DATA_REGION_SIZE_CLI1 BLK_REGION_SIZE #define BLK_DATA_REGION_SIZE_DRIV BLK_REGION_SIZE @@ -30,16 +31,17 @@ static const int blk_partition_mapping[BLK_NUM_CLIENTS] = { 0, 1 }; -static inline blk_storage_info_t *blk_virt_cli_storage_info(blk_storage_info_t *info, unsigned int id) -{ - switch (id) { - case 0: - return info; - case 1: - return (blk_storage_info_t *)((uintptr_t)info + BLK_STORAGE_INFO_REGION_SIZE); - default: - return NULL; - } +static inline blk_storage_info_t * +blk_virt_cli_storage_info(blk_storage_info_t *info, unsigned int id) { + switch (id) { + case 0: + return info; + case 1: + return (blk_storage_info_t *)((uintptr_t)info + + BLK_STORAGE_INFO_REGION_SIZE); + default: + return NULL; + } } static inline uintptr_t blk_virt_cli_data_region(uintptr_t data, unsigned int id) @@ -94,9 +96,9 @@ static inline uint32_t blk_virt_cli_queue_size(unsigned int id) { switch (id) { case 0: - return BLK_QUEUE_CAPACITY_CLI0; + return BLK_QUEUE_CAPACITY_CLI0; case 1: - return BLK_QUEUE_CAPACITY_CLI1; + return BLK_QUEUE_CAPACITY_CLI1; default: return 0; } @@ -105,9 +107,9 @@ static inline uint32_t blk_virt_cli_queue_size(unsigned int id) static inline uint32_t blk_cli_queue_size(char *pd_name) { if (!sddf_strcmp(pd_name, BLK_NAME_CLI0)) { - return BLK_QUEUE_CAPACITY_CLI0; + return BLK_QUEUE_CAPACITY_CLI0; } else if (!sddf_strcmp(pd_name, BLK_NAME_CLI1)) { - return BLK_QUEUE_CAPACITY_CLI1; + return BLK_QUEUE_CAPACITY_CLI1; } else { return 0; } diff --git a/include/libvmm/virtio/block.h b/include/libvmm/virtio/block.h index d4968bd0..88fbeda2 100644 --- a/include/libvmm/virtio/block.h +++ b/include/libvmm/virtio/block.h @@ -150,56 +150,53 @@ struct virtio_blk_outhdr { * virtio response from sddf response. */ typedef struct reqbk { - // /* For writing response byte in virtio descriptor */ - // uint8_t *virtio_resp_byte; - /* Descriptor head of the virtio request */ - uint16_t virtio_desc_head; - /* For enqueuing sddf req/resp */ - uintptr_t sddf_data_cell_base; - uint16_t sddf_count; - uint32_t sddf_block_number; - uintptr_t sddf_data; - /* The size of data contained in virtio request */ - uint32_t virtio_body_size_bytes; - /* Indicates this request is an unaligned write from virtIO. When not true, - * this request is the "read" part of the read-modify-write. A subsequent - * write request will be enqueued to complete the read-modify-write. - */ - bool aligned; + // /* For writing response byte in virtio descriptor */ + // uint8_t *virtio_resp_byte; + /* Descriptor head of the virtio request */ + uint16_t virtio_desc_head; + /* For enqueuing sddf req/resp */ + uintptr_t sddf_data_cell_base; + uint16_t sddf_count; + uint32_t sddf_block_number; + uintptr_t sddf_data; + /* The size of data contained in virtio request */ + uint32_t virtio_body_size_bytes; + /* Indicates this request is an unaligned write from virtIO. When not true, + * this request is the "read" part of the read-modify-write. A subsequent + * write request will be enqueued to complete the read-modify-write. + */ + bool aligned; } reqbk_t; struct virtio_blk_device { - struct virtio_device virtio_device; - struct virtio_blk_config config; - struct virtio_queue_handler vqs[VIRTIO_BLK_NUM_VIRTQ]; - /* Request bookkeep indexed by the request id */ - reqbk_t reqsbk[SDDF_MAX_QUEUE_CAPACITY]; - /* Data struct that handles allocation and freeing of fixed size data cells - * in sDDF memory region */ - fsmalloc_t fsmalloc; - bitarray_t fsmalloc_avail_bitarr; - word_t fsmalloc_avail_bitarr_words[roundup_bits2words64(SDDF_MAX_DATA_CELLS)]; - /* Index allocator for sddf request ids */ - ialloc_t ialloc; - uint32_t ialloc_idxlist[SDDF_MAX_QUEUE_CAPACITY]; - /* Sddf structures */ - blk_storage_info_t *storage_info; - blk_queue_handle_t queue_h; - uint32_t queue_capacity; - uintptr_t data_region; - /* Channel to notify microkit component serving this client */ - int server_ch; + struct virtio_device virtio_device; + struct virtio_blk_config config; + struct virtio_queue_handler vqs[VIRTIO_BLK_NUM_VIRTQ]; + /* Request bookkeep indexed by the request id */ + reqbk_t reqsbk[SDDF_MAX_QUEUE_CAPACITY]; + /* Data struct that handles allocation and freeing of fixed size data cells + * in sDDF memory region */ + fsmalloc_t fsmalloc; + bitarray_t fsmalloc_avail_bitarr; + word_t fsmalloc_avail_bitarr_words[roundup_bits2words64(SDDF_MAX_DATA_CELLS)]; + /* Index allocator for sddf request ids */ + ialloc_t ialloc; + uint32_t ialloc_idxlist[SDDF_MAX_QUEUE_CAPACITY]; + /* Sddf structures */ + blk_storage_info_t *storage_info; + blk_queue_handle_t queue_h; + uint32_t queue_capacity; + uintptr_t data_region; + /* Channel to notify microkit component serving this client */ + int server_ch; }; bool virtio_mmio_blk_init(struct virtio_blk_device *blk_dev, - uintptr_t region_base, - uintptr_t region_size, - size_t virq, - uintptr_t data_region, - size_t data_region_size, - blk_storage_info_t *storage_info, - blk_queue_handle_t *queue_h, - uint32_t queue_capacity, - int server_ch); + uintptr_t region_base, uintptr_t region_size, + size_t virq, uintptr_t data_region, + size_t data_region_size, + blk_storage_info_t *storage_info, + blk_queue_handle_t *queue_h, uint32_t queue_capacity, + int server_ch); bool virtio_blk_handle_resp(struct virtio_blk_device *blk_dev); diff --git a/src/virtio/block.c b/src/virtio/block.c index b584c1c5..e7a30280 100644 --- a/src/virtio/block.c +++ b/src/virtio/block.c @@ -33,607 +33,674 @@ static inline struct virtio_blk_device *device_state(struct virtio_device *dev) return (struct virtio_blk_device *)dev->device_data; } -static inline void virtio_blk_mmio_reset(struct virtio_device *dev) -{ - dev->vqs[VIRTIO_BLK_DEFAULT_VIRTQ].ready = false; - dev->vqs[VIRTIO_BLK_DEFAULT_VIRTQ].last_idx = 0; +static inline void virtio_blk_mmio_reset(struct virtio_device *dev) { + dev->vqs[VIRTIO_BLK_DEFAULT_VIRTQ].ready = false; + dev->vqs[VIRTIO_BLK_DEFAULT_VIRTQ].last_idx = 0; } -static inline bool virtio_blk_mmio_get_device_features(struct virtio_device *dev, uint32_t *features) -{ - if (dev->data.Status & VIRTIO_CONFIG_S_FEATURES_OK) { - LOG_BLOCK_ERR("driver somehow wants to read device features after FEATURES_OK\n"); - } - - switch (dev->data.DeviceFeaturesSel) { - /* feature bits 0 to 31 */ - case 0: - *features = BIT_LOW(VIRTIO_BLK_F_FLUSH); - *features = *features | BIT_LOW(VIRTIO_BLK_F_BLK_SIZE); - break; - /* features bits 32 to 63 */ - case 1: - *features = BIT_HIGH(VIRTIO_F_VERSION_1); - break; - default: - LOG_BLOCK_ERR("driver sets DeviceFeaturesSel to 0x%x, which doesn't make sense\n", - dev->data.DeviceFeaturesSel); - return false; - } - - return true; +static inline bool +virtio_blk_mmio_get_device_features(struct virtio_device *dev, + uint32_t *features) { + if (dev->data.Status & VIRTIO_CONFIG_S_FEATURES_OK) { + LOG_BLOCK_ERR( + "driver somehow wants to read device features after FEATURES_OK\n"); + } + + switch (dev->data.DeviceFeaturesSel) { + /* feature bits 0 to 31 */ + case 0: + *features = BIT_LOW(VIRTIO_BLK_F_FLUSH); + *features = *features | BIT_LOW(VIRTIO_BLK_F_BLK_SIZE); + break; + /* features bits 32 to 63 */ + case 1: + *features = BIT_HIGH(VIRTIO_F_VERSION_1); + break; + default: + LOG_BLOCK_ERR( + "driver sets DeviceFeaturesSel to 0x%x, which doesn't make sense\n", + dev->data.DeviceFeaturesSel); + return false; + } + + return true; } -static inline bool virtio_blk_mmio_set_driver_features(struct virtio_device *dev, uint32_t features) -{ - /* According to virtio initialisation protocol, - this should check what device features were set, and return the subset of features understood - by the driver. */ - bool success = false; - - uint32_t device_features = 0; - device_features |= BIT_LOW(VIRTIO_BLK_F_FLUSH); - device_features |= BIT_LOW(VIRTIO_BLK_F_BLK_SIZE); - - switch (dev->data.DriverFeaturesSel) { - /* feature bits 0 to 31 */ - case 0: - success = (features == device_features); - break; - /* features bits 32 to 63 */ - case 1: - success = (features == BIT_HIGH(VIRTIO_F_VERSION_1)); - break; - default: - LOG_BLOCK_ERR("driver sets DriverFeaturesSel to 0x%x, which doesn't make sense\n", - dev->data.DriverFeaturesSel); - return false; - } - - if (success) { - dev->data.features_happy = 1; - } - - return success; +static inline bool +virtio_blk_mmio_set_driver_features(struct virtio_device *dev, + uint32_t features) { + /* According to virtio initialisation protocol, + this should check what device features were set, and return the subset of + features understood by the driver. */ + bool success = false; + + uint32_t device_features = 0; + device_features |= BIT_LOW(VIRTIO_BLK_F_FLUSH); + device_features |= BIT_LOW(VIRTIO_BLK_F_BLK_SIZE); + + switch (dev->data.DriverFeaturesSel) { + /* feature bits 0 to 31 */ + case 0: + success = (features == device_features); + break; + /* features bits 32 to 63 */ + case 1: + success = (features == BIT_HIGH(VIRTIO_F_VERSION_1)); + break; + default: + LOG_BLOCK_ERR( + "driver sets DriverFeaturesSel to 0x%x, which doesn't make sense\n", + dev->data.DriverFeaturesSel); + return false; + } + + if (success) { + dev->data.features_happy = 1; + } + + return success; } -static inline bool virtio_blk_mmio_get_device_config(struct virtio_device *dev, uint32_t offset, uint32_t *ret_val) -{ - struct virtio_blk_device *state = device_state(dev); - - uintptr_t config_base_addr = (uintptr_t)&state->config; - uintptr_t config_field_offset = (uintptr_t)(offset - REG_VIRTIO_MMIO_CONFIG); - uint32_t *config_field_addr = (uint32_t *)(config_base_addr + config_field_offset); - *ret_val = *config_field_addr; - LOG_BLOCK("get device config with base_addr 0x%x and field_address 0x%x has value %d\n", - config_base_addr, config_field_addr, *ret_val); - - return true; +static inline bool virtio_blk_mmio_get_device_config(struct virtio_device *dev, + uint32_t offset, + uint32_t *ret_val) { + struct virtio_blk_device *state = device_state(dev); + + uintptr_t config_base_addr = (uintptr_t)&state->config; + uintptr_t config_field_offset = (uintptr_t)(offset - REG_VIRTIO_MMIO_CONFIG); + uint32_t *config_field_addr = + (uint32_t *)(config_base_addr + config_field_offset); + *ret_val = *config_field_addr; + LOG_BLOCK("get device config with base_addr 0x%x and field_address 0x%x has " + "value %d\n", + config_base_addr, config_field_addr, *ret_val); + + return true; } -static inline bool virtio_blk_mmio_set_device_config(struct virtio_device *dev, uint32_t offset, uint32_t val) -{ - struct virtio_blk_device *state = device_state(dev); - - uintptr_t config_base_addr = (uintptr_t)&state->config; - uintptr_t config_field_offset = (uintptr_t)(offset - REG_VIRTIO_MMIO_CONFIG); - uint32_t *config_field_addr = (uint32_t *)(config_base_addr + config_field_offset); - *config_field_addr = val; - LOG_BLOCK("set device config with base_addr 0x%x and field_address 0x%x with value %d\n", - config_base_addr, config_field_addr, val); - - return true; +static inline bool virtio_blk_mmio_set_device_config(struct virtio_device *dev, + uint32_t offset, + uint32_t val) { + struct virtio_blk_device *state = device_state(dev); + + uintptr_t config_base_addr = (uintptr_t)&state->config; + uintptr_t config_field_offset = (uintptr_t)(offset - REG_VIRTIO_MMIO_CONFIG); + uint32_t *config_field_addr = + (uint32_t *)(config_base_addr + config_field_offset); + *config_field_addr = val; + LOG_BLOCK("set device config with base_addr 0x%x and field_address 0x%x with " + "value %d\n", + config_base_addr, config_field_addr, val); + + return true; } -static inline void virtio_blk_used_buffer(struct virtio_device *dev, uint16_t desc) -{ - struct virtq *virtq = &dev->vqs[VIRTIO_BLK_DEFAULT_VIRTQ].virtq; - struct virtq_used_elem used_elem = {desc, 0}; +static inline void virtio_blk_used_buffer(struct virtio_device *dev, + uint16_t desc) { + struct virtq *virtq = &dev->vqs[VIRTIO_BLK_DEFAULT_VIRTQ].virtq; + struct virtq_used_elem used_elem = {desc, 0}; - virtq->used->ring[virtq->used->idx % virtq->num] = used_elem; - virtq->used->idx++; + virtq->used->ring[virtq->used->idx % virtq->num] = used_elem; + virtq->used->idx++; } -static inline bool virtio_blk_virq_inject(struct virtio_device *dev) -{ - return virq_inject(GUEST_VCPU_ID, dev->virq); +static inline bool virtio_blk_virq_inject(struct virtio_device *dev) { + return virq_inject(GUEST_VCPU_ID, dev->virq); } static inline void virtio_blk_set_interrupt_status(struct virtio_device *dev, - bool used_buffer, - bool config_change) -{ - /* Set the reason of the irq. - bit 0: used buffer - bit 1: configuration change */ - dev->data.InterruptStatus = used_buffer | (config_change << 1); + bool used_buffer, + bool config_change) { + /* Set the reason of the irq. + bit 0: used buffer + bit 1: configuration change */ + dev->data.InterruptStatus = used_buffer | (config_change << 1); } -static inline void virtio_blk_set_req_fail(struct virtio_device *dev, uint16_t desc) -{ - struct virtq *virtq = &dev->vqs[VIRTIO_BLK_DEFAULT_VIRTQ].virtq; - - /* Loop to the final byte of the final descriptor and write response code there */ - uint16_t curr_desc = desc; - while(virtq->desc[curr_desc].flags & VIRTQ_DESC_F_NEXT) { - curr_desc = virtq->desc[curr_desc].next; - } - assert(virtq->desc[curr_desc].flags & VIRTQ_DESC_F_WRITE); - *(uint8_t *)(virtq->desc[curr_desc].addr + virtq->desc[curr_desc].len - 1) = VIRTIO_BLK_S_IOERR; +static inline void virtio_blk_set_req_fail(struct virtio_device *dev, + uint16_t desc) { + struct virtq *virtq = &dev->vqs[VIRTIO_BLK_DEFAULT_VIRTQ].virtq; + + /* Loop to the final byte of the final descriptor and write response code + * there */ + uint16_t curr_desc = desc; + while (virtq->desc[curr_desc].flags & VIRTQ_DESC_F_NEXT) { + curr_desc = virtq->desc[curr_desc].next; + } + assert(virtq->desc[curr_desc].flags & VIRTQ_DESC_F_WRITE); + *(uint8_t *)(virtq->desc[curr_desc].addr + virtq->desc[curr_desc].len - 1) = + VIRTIO_BLK_S_IOERR; } -static inline void virtio_blk_set_req_success(struct virtio_device *dev, uint16_t desc) -{ - struct virtq *virtq = &dev->vqs[VIRTIO_BLK_DEFAULT_VIRTQ].virtq; +static inline void virtio_blk_set_req_success(struct virtio_device *dev, + uint16_t desc) { + struct virtq *virtq = &dev->vqs[VIRTIO_BLK_DEFAULT_VIRTQ].virtq; - uint16_t curr_desc = desc; - while(virtq->desc[curr_desc].flags & VIRTQ_DESC_F_NEXT) { - curr_desc = virtq->desc[curr_desc].next; - } - assert(virtq->desc[curr_desc].flags & VIRTQ_DESC_F_WRITE); - *((uint8_t *)virtq->desc[curr_desc].addr) = VIRTIO_BLK_S_OK; + uint16_t curr_desc = desc; + while (virtq->desc[curr_desc].flags & VIRTQ_DESC_F_NEXT) { + curr_desc = virtq->desc[curr_desc].next; + } + assert(virtq->desc[curr_desc].flags & VIRTQ_DESC_F_WRITE); + *((uint8_t *)virtq->desc[curr_desc].addr) = VIRTIO_BLK_S_OK; } -static inline bool sddf_make_req_check(struct virtio_blk_device *state, uint16_t sddf_count) -{ - /* Check if ialloc is full, if data region is full, if req queue is full. - If these all pass then this request can be handled successfully */ - if (ialloc_full(&state->ialloc)) { - LOG_BLOCK_ERR("Request bookkeeping array is full\n"); - return false; - } - - if (blk_queue_full_req(&state->queue_h)) { - LOG_BLOCK_ERR("Request queue is full\n"); - return false; - } - - if (fsmalloc_full(&state->fsmalloc, sddf_count)) { - LOG_BLOCK_ERR("Data region is full\n"); - return false; - } - - return true; +static inline bool sddf_make_req_check(struct virtio_blk_device *state, + uint16_t sddf_count) { + /* Check if ialloc is full, if data region is full, if req queue is full. + If these all pass then this request can be handled successfully */ + if (ialloc_full(&state->ialloc)) { + LOG_BLOCK_ERR("Request bookkeeping array is full\n"); + return false; + } + + if (blk_queue_full_req(&state->queue_h)) { + LOG_BLOCK_ERR("Request queue is full\n"); + return false; + } + + if (fsmalloc_full(&state->fsmalloc, sddf_count)) { + LOG_BLOCK_ERR("Data region is full\n"); + return false; + } + + return true; } static bool virtio_blk_mmio_queue_notify(struct virtio_device *dev) { - int err = 0; - /* If multiqueue feature bit negotiated, should read which queue from dev->QueueNotify, - but for now we just assume it's the one and only default queue */ - virtio_queue_handler_t *vq = &dev->vqs[VIRTIO_BLK_DEFAULT_VIRTQ]; - struct virtq *virtq = &vq->virtq; - - struct virtio_blk_device *state = device_state(dev); - - /* If any request has to be dropped due to any number of reasons, this becomes true */ - bool has_dropped = false; - - bool virt_notify = false; - - /* Handle available requests beginning from the last handled request */ - uint16_t last_handled_avail_idx = vq->last_idx; - - LOG_BLOCK("------------- Driver notified device -------------\n"); - for (; last_handled_avail_idx != virtq->avail->idx; last_handled_avail_idx++) { - uint16_t desc_head = virtq->avail->ring[last_handled_avail_idx % virtq->num]; - uint16_t curr_desc = desc_head; - uint32_t curr_desc_bytes_read = 0; - - /* There are three parts with each block request. The header, body (which contains the data) and reply. */ - uint32_t header_bytes_read = 0; - struct virtio_blk_outhdr virtio_req_header; - for (; header_bytes_read < sizeof(struct virtio_blk_outhdr); curr_desc = virtq->desc[curr_desc].next) { - /* Header is device read only */ - assert(!(virtq->desc[curr_desc].flags & VIRTQ_DESC_F_WRITE)); - /* We can guarantee existence of next descriptor as footer is write only */ - assert(virtq->desc[curr_desc].flags & VIRTQ_DESC_F_NEXT); - if (header_bytes_read + virtq->desc[curr_desc].len > sizeof(struct virtio_blk_outhdr)) { - memcpy(&virtio_req_header, (void *)virtq->desc[curr_desc].addr, sizeof(struct virtio_blk_outhdr) - header_bytes_read); - curr_desc_bytes_read = sizeof(struct virtio_blk_outhdr) - header_bytes_read; - header_bytes_read += sizeof(struct virtio_blk_outhdr) - header_bytes_read; - /* Don't go to the next descriptor yet, we're not done processing with current one */ - break; - } else { - memcpy(&virtio_req_header, (void *)virtq->desc[curr_desc].addr, virtq->desc[curr_desc].len); - header_bytes_read += virtq->desc[curr_desc].len; - } - } - - LOG_BLOCK("----- Request type is 0x%x -----\n", virtio_req_header.type); - - switch (virtio_req_header.type) { - case VIRTIO_BLK_T_IN: { - LOG_BLOCK("Request type is VIRTIO_BLK_T_IN\n"); - LOG_BLOCK("Sector (read/write offset) is %d\n", virtio_req_header.sector); - - /* Converting virtio sector number to sddf block number, we are rounding down */ - uint32_t sddf_block_number = (virtio_req_header.sector * VIRTIO_BLK_SECTOR_SIZE) / BLK_TRANSFER_SIZE; - - /* Figure out how many bytes are in the body of the request */ - uint32_t body_size_bytes = 0; - uint32_t tmp_curr_desc_bytes_read = curr_desc_bytes_read; - for (uint16_t tmp_curr_desc = curr_desc; virtq->desc[tmp_curr_desc].flags & VIRTQ_DESC_F_NEXT; tmp_curr_desc = virtq->desc[tmp_curr_desc].next) { - if (tmp_curr_desc_bytes_read != 0) { - body_size_bytes += virtq->desc[tmp_curr_desc].len - tmp_curr_desc_bytes_read; - tmp_curr_desc_bytes_read = 0; - } else { - body_size_bytes += virtq->desc[tmp_curr_desc].len; - } - if (!(virtq->desc[tmp_curr_desc].flags & VIRTQ_DESC_F_WRITE) || virtq->desc[tmp_curr_desc].len < VIRTIO_BLK_SECTOR_SIZE) { - break; - } - } - - /* Converting bytes to the number of blocks, we are rounding up */ - uint16_t sddf_count = (body_size_bytes + BLK_TRANSFER_SIZE - 1) / BLK_TRANSFER_SIZE; - - if (!sddf_make_req_check(state, sddf_count)) { - virtio_blk_set_req_fail(dev, curr_desc); - has_dropped = true; - break; - } + int err = 0; + /* If multiqueue feature bit negotiated, should read which queue from + dev->QueueNotify, but for now we just assume it's the one and only default + queue */ + virtio_queue_handler_t *vq = &dev->vqs[VIRTIO_BLK_DEFAULT_VIRTQ]; + struct virtq *virtq = &vq->virtq; + + struct virtio_blk_device *state = device_state(dev); + + /* If any request has to be dropped due to any number of reasons, this becomes + * true */ + bool has_dropped = false; + + bool virt_notify = false; + + /* Handle available requests beginning from the last handled request */ + uint16_t last_handled_avail_idx = vq->last_idx; + + LOG_BLOCK("------------- Driver notified device -------------\n"); + for (; last_handled_avail_idx != virtq->avail->idx; + last_handled_avail_idx++) { + uint16_t desc_head = + virtq->avail->ring[last_handled_avail_idx % virtq->num]; + uint16_t curr_desc = desc_head; + uint32_t curr_desc_bytes_read = 0; + + /* There are three parts with each block request. The header, body (which + * contains the data) and reply. */ + uint32_t header_bytes_read = 0; + struct virtio_blk_outhdr virtio_req_header; + for (; header_bytes_read < sizeof(struct virtio_blk_outhdr); + curr_desc = virtq->desc[curr_desc].next) { + /* Header is device read only */ + assert(!(virtq->desc[curr_desc].flags & VIRTQ_DESC_F_WRITE)); + /* We can guarantee existence of next descriptor as footer is write only + */ + assert(virtq->desc[curr_desc].flags & VIRTQ_DESC_F_NEXT); + if (header_bytes_read + virtq->desc[curr_desc].len > + sizeof(struct virtio_blk_outhdr)) { + memcpy(&virtio_req_header, (void *)virtq->desc[curr_desc].addr, + sizeof(struct virtio_blk_outhdr) - header_bytes_read); + curr_desc_bytes_read = + sizeof(struct virtio_blk_outhdr) - header_bytes_read; + header_bytes_read += + sizeof(struct virtio_blk_outhdr) - header_bytes_read; + /* Don't go to the next descriptor yet, we're not done processing with + * current one */ + break; + } else { + memcpy(&virtio_req_header, (void *)virtq->desc[curr_desc].addr, + virtq->desc[curr_desc].len); + header_bytes_read += virtq->desc[curr_desc].len; + } + } - /* Allocate data cells from sddf data region based on sddf_count */ - uintptr_t sddf_data_cell_base; - fsmalloc_alloc(&state->fsmalloc, &sddf_data_cell_base, sddf_count); - - /* Find address within the data cells for reading/writing virtio data */ - uintptr_t sddf_data = sddf_data_cell_base + (virtio_req_header.sector * VIRTIO_BLK_SECTOR_SIZE) % BLK_TRANSFER_SIZE; - - /* Generate sddf request id and bookkeep the request */ - uint32_t req_id; - err = ialloc_alloc(&state->ialloc, &req_id); - assert(!err); - state->reqsbk[req_id] = (reqbk_t) { - desc_head, - sddf_data_cell_base, - sddf_count, - sddf_block_number, - sddf_data, - body_size_bytes, - false - }; - - uintptr_t sddf_offset = sddf_data_cell_base - ((struct virtio_blk_device *)dev->device_data)->data_region; - err = blk_enqueue_req(&state->queue_h, BLK_REQ_READ, sddf_offset, sddf_block_number, sddf_count, req_id); - assert(!err); - virt_notify = true; - break; + LOG_BLOCK("----- Request type is 0x%x -----\n", virtio_req_header.type); + + switch (virtio_req_header.type) { + case VIRTIO_BLK_T_IN: { + LOG_BLOCK("Request type is VIRTIO_BLK_T_IN\n"); + LOG_BLOCK("Sector (read/write offset) is %d\n", virtio_req_header.sector); + + /* Converting virtio sector number to sddf block number, we are rounding + * down */ + uint32_t sddf_block_number = + (virtio_req_header.sector * VIRTIO_BLK_SECTOR_SIZE) / + BLK_TRANSFER_SIZE; + + /* Figure out how many bytes are in the body of the request */ + uint32_t body_size_bytes = 0; + uint32_t tmp_curr_desc_bytes_read = curr_desc_bytes_read; + for (uint16_t tmp_curr_desc = curr_desc; + virtq->desc[tmp_curr_desc].flags & VIRTQ_DESC_F_NEXT; + tmp_curr_desc = virtq->desc[tmp_curr_desc].next) { + if (tmp_curr_desc_bytes_read != 0) { + body_size_bytes += + virtq->desc[tmp_curr_desc].len - tmp_curr_desc_bytes_read; + tmp_curr_desc_bytes_read = 0; + } else { + body_size_bytes += virtq->desc[tmp_curr_desc].len; } - case VIRTIO_BLK_T_OUT: { - LOG_BLOCK("Request type is VIRTIO_BLK_T_OUT\n"); - LOG_BLOCK("Sector (read/write offset) is %d\n", virtio_req_header.sector); - - /* Converting virtio sector number to sddf block number, we are rounding down */ - uint32_t sddf_block_number = (virtio_req_header.sector * VIRTIO_BLK_SECTOR_SIZE) / BLK_TRANSFER_SIZE; - - /* Figure out how many bytes are in the body of the request */ - uint32_t body_size_bytes = 0; - uint32_t tmp_curr_desc_bytes_read = curr_desc_bytes_read; - for (uint16_t tmp_curr_desc = curr_desc; virtq->desc[tmp_curr_desc].flags & VIRTQ_DESC_F_NEXT; tmp_curr_desc = virtq->desc[tmp_curr_desc].next) { - if (tmp_curr_desc_bytes_read != 0) { - body_size_bytes += virtq->desc[tmp_curr_desc].len - tmp_curr_desc_bytes_read; - tmp_curr_desc_bytes_read = 0; - } else { - body_size_bytes += virtq->desc[tmp_curr_desc].len; - } - if (!(virtq->desc[tmp_curr_desc].flags & VIRTQ_DESC_F_WRITE) || virtq->desc[tmp_curr_desc].len < VIRTIO_BLK_SECTOR_SIZE) { - break; - } - } - - /* Converting bytes to the number of blocks, we are rounding up */ - uint16_t sddf_count = (body_size_bytes + BLK_TRANSFER_SIZE - 1) / BLK_TRANSFER_SIZE; - - if (!sddf_make_req_check(state, sddf_count)) { - virtio_blk_set_req_fail(dev, curr_desc); - has_dropped = true; - break; - } - - /* If the write request is not aligned on the sddf transfer size, we need to do a read-modify-write: - * we need to first read the surrounding memory, overwrite the memory on the unaligned areas, and - * then write the entire memory back to disk. - */ - bool aligned = ((virtio_req_header.sector % (BLK_TRANSFER_SIZE / VIRTIO_BLK_SECTOR_SIZE)) == 0); - if (!aligned) { - /* Allocate data buffer from data region based on sddf_count */ - uintptr_t sddf_data_cell_base; - fsmalloc_alloc(&state->fsmalloc, &sddf_data_cell_base, sddf_count); - /* Find address within the data cells for reading/writing virtio data */ - uintptr_t sddf_data = sddf_data_cell_base + (virtio_req_header.sector * VIRTIO_BLK_SECTOR_SIZE) % BLK_TRANSFER_SIZE; - /* Generate sddf request id and bookkeep the request */ - uint32_t req_id; - ialloc_alloc(&state->ialloc, &req_id); - state->reqsbk[req_id] = (reqbk_t) { - desc_head, - sddf_data_cell_base, - sddf_count, - sddf_block_number, - sddf_data, - body_size_bytes, - aligned - }; - - uintptr_t sddf_offset = sddf_data_cell_base - ((struct virtio_blk_device *)dev->device_data)->data_region; - err = blk_enqueue_req(&state->queue_h, BLK_REQ_READ, sddf_offset, sddf_block_number, sddf_count, req_id); - assert(!err); - } else { - /* Handle normal write request */ - /* Allocate data buffer from data region based on sddf_count */ - uintptr_t sddf_data_cell_base; - fsmalloc_alloc(&state->fsmalloc, &sddf_data_cell_base, sddf_count); - /* Find address within the data cells for reading/writing virtio data */ - uintptr_t sddf_data = sddf_data_cell_base + (virtio_req_header.sector * VIRTIO_BLK_SECTOR_SIZE) % BLK_TRANSFER_SIZE; - /* Copy data from virtio buffer to sddf buffer */ - uint32_t body_bytes_read = 0; - for (; body_bytes_read < body_size_bytes; curr_desc = virtq->desc[curr_desc].next) { - /* For write requests, the body is a read descriptor, and the footer is a write descriptor, we know - * there must be a descriptor cut-off at the end. - */ - assert(body_bytes_read + virtq->desc[curr_desc].len <= body_size_bytes); - assert(virtq->desc[curr_desc].flags & VIRTQ_DESC_F_NEXT); - if (curr_desc_bytes_read != 0) { - memcpy((void *)sddf_data + body_bytes_read, - (void *)virtq->desc[curr_desc].addr + curr_desc_bytes_read, - virtq->desc[curr_desc].len - curr_desc_bytes_read); - body_bytes_read += virtq->desc[curr_desc].len - curr_desc_bytes_read; - curr_desc_bytes_read = 0; - } else { - memcpy((void *)sddf_data + body_bytes_read, (void *)virtq->desc[curr_desc].addr, virtq->desc[curr_desc].len); - body_bytes_read += virtq->desc[curr_desc].len; - } - } - - /* Generate sddf request id and bookkeep the request */ - uint32_t req_id; - ialloc_alloc(&state->ialloc, &req_id); - state->reqsbk[req_id] = (reqbk_t) { - desc_head, - sddf_data_cell_base, - sddf_count, - sddf_block_number, - sddf_data, - body_size_bytes, - aligned - }; - - uintptr_t sddf_offset = sddf_data_cell_base - ((struct virtio_blk_device *)dev->device_data)->data_region; - err = blk_enqueue_req(&state->queue_h, BLK_REQ_WRITE, sddf_offset, sddf_block_number, sddf_count, req_id); - assert(!err); - } - virt_notify = true; - break; + if (!(virtq->desc[tmp_curr_desc].flags & VIRTQ_DESC_F_WRITE) || + virtq->desc[tmp_curr_desc].len < VIRTIO_BLK_SECTOR_SIZE) { + break; } - case VIRTIO_BLK_T_FLUSH: { - LOG_BLOCK("Request type is VIRTIO_BLK_T_FLUSH\n"); - - if (!sddf_make_req_check(state, 0)) { - virtio_blk_set_req_fail(dev, curr_desc); - has_dropped = true; - break; - } + } - /* Bookkeep the request */ - uint32_t req_id; - ialloc_alloc(&state->ialloc, &req_id); - /* except for virtio desc, nothing else needs to be retrieved later - * so leave as 0 */ - state->reqsbk[req_id] = (reqbk_t) { - desc_head, 0, 0, 0, 0, 0, false - }; + /* Converting bytes to the number of blocks, we are rounding up */ + uint16_t sddf_count = + (body_size_bytes + BLK_TRANSFER_SIZE - 1) / BLK_TRANSFER_SIZE; - err = blk_enqueue_req(&state->queue_h, BLK_REQ_FLUSH, 0, 0, 0, req_id); - break; - virt_notify = true; - } - default: { - LOG_BLOCK_ERR( - "Handling VirtIO block request, but virtIO request type is not recognised: %d\n", - virtio_req_header.type); - virtio_blk_set_req_fail(dev, curr_desc); - has_dropped = true; - break; + if (!sddf_make_req_check(state, sddf_count)) { + virtio_blk_set_req_fail(dev, curr_desc); + has_dropped = true; + break; + } + + /* Allocate data cells from sddf data region based on sddf_count */ + uintptr_t sddf_data_cell_base; + fsmalloc_alloc(&state->fsmalloc, &sddf_data_cell_base, sddf_count); + + /* Find address within the data cells for reading/writing virtio data */ + uintptr_t sddf_data = sddf_data_cell_base + (virtio_req_header.sector * + VIRTIO_BLK_SECTOR_SIZE) % + BLK_TRANSFER_SIZE; + + /* Generate sddf request id and bookkeep the request */ + uint32_t req_id; + err = ialloc_alloc(&state->ialloc, &req_id); + assert(!err); + state->reqsbk[req_id] = (reqbk_t){ + desc_head, sddf_data_cell_base, sddf_count, sddf_block_number, + sddf_data, body_size_bytes, false}; + + uintptr_t sddf_offset = + sddf_data_cell_base - + ((struct virtio_blk_device *)dev->device_data)->data_region; + err = blk_enqueue_req(&state->queue_h, BLK_REQ_READ, sddf_offset, + sddf_block_number, sddf_count, req_id); + assert(!err); + virt_notify = true; + break; + } + case VIRTIO_BLK_T_OUT: { + LOG_BLOCK("Request type is VIRTIO_BLK_T_OUT\n"); + LOG_BLOCK("Sector (read/write offset) is %d\n", virtio_req_header.sector); + + /* Converting virtio sector number to sddf block number, we are rounding + * down */ + uint32_t sddf_block_number = + (virtio_req_header.sector * VIRTIO_BLK_SECTOR_SIZE) / + BLK_TRANSFER_SIZE; + + /* Figure out how many bytes are in the body of the request */ + uint32_t body_size_bytes = 0; + uint32_t tmp_curr_desc_bytes_read = curr_desc_bytes_read; + for (uint16_t tmp_curr_desc = curr_desc; + virtq->desc[tmp_curr_desc].flags & VIRTQ_DESC_F_NEXT; + tmp_curr_desc = virtq->desc[tmp_curr_desc].next) { + if (tmp_curr_desc_bytes_read != 0) { + body_size_bytes += + virtq->desc[tmp_curr_desc].len - tmp_curr_desc_bytes_read; + tmp_curr_desc_bytes_read = 0; + } else { + body_size_bytes += virtq->desc[tmp_curr_desc].len; } + if (!(virtq->desc[tmp_curr_desc].flags & VIRTQ_DESC_F_WRITE) || + virtq->desc[tmp_curr_desc].len < VIRTIO_BLK_SECTOR_SIZE) { + break; } - } + } - /* Update virtq index to the next available request to be handled */ - vq->last_idx = last_handled_avail_idx; + /* Converting bytes to the number of blocks, we are rounding up */ + uint16_t sddf_count = + (body_size_bytes + BLK_TRANSFER_SIZE - 1) / BLK_TRANSFER_SIZE; - /* If any request has to be dropped due to any number of reasons, we inject an interrupt */ - bool virq_inject_success = true; - if (has_dropped) { - virtio_blk_set_interrupt_status(dev, true, false); - virq_inject_success = virtio_blk_virq_inject(dev); + if (!sddf_make_req_check(state, sddf_count)) { + virtio_blk_set_req_fail(dev, curr_desc); + has_dropped = true; + break; + } + + /* If the write request is not aligned on the sddf transfer size, we need + * to do a read-modify-write: we need to first read the surrounding + * memory, overwrite the memory on the unaligned areas, and then write the + * entire memory back to disk. + */ + bool aligned = ((virtio_req_header.sector % + (BLK_TRANSFER_SIZE / VIRTIO_BLK_SECTOR_SIZE)) == 0); + if (!aligned) { + /* Allocate data buffer from data region based on sddf_count */ + uintptr_t sddf_data_cell_base; + fsmalloc_alloc(&state->fsmalloc, &sddf_data_cell_base, sddf_count); + /* Find address within the data cells for reading/writing virtio data */ + uintptr_t sddf_data = sddf_data_cell_base + (virtio_req_header.sector * + VIRTIO_BLK_SECTOR_SIZE) % + BLK_TRANSFER_SIZE; + /* Generate sddf request id and bookkeep the request */ + uint32_t req_id; + ialloc_alloc(&state->ialloc, &req_id); + state->reqsbk[req_id] = (reqbk_t){ + desc_head, sddf_data_cell_base, sddf_count, sddf_block_number, + sddf_data, body_size_bytes, aligned}; + + uintptr_t sddf_offset = + sddf_data_cell_base - + ((struct virtio_blk_device *)dev->device_data)->data_region; + err = blk_enqueue_req(&state->queue_h, BLK_REQ_READ, sddf_offset, + sddf_block_number, sddf_count, req_id); + assert(!err); + } else { + /* Handle normal write request */ + /* Allocate data buffer from data region based on sddf_count */ + uintptr_t sddf_data_cell_base; + fsmalloc_alloc(&state->fsmalloc, &sddf_data_cell_base, sddf_count); + /* Find address within the data cells for reading/writing virtio data */ + uintptr_t sddf_data = sddf_data_cell_base + (virtio_req_header.sector * + VIRTIO_BLK_SECTOR_SIZE) % + BLK_TRANSFER_SIZE; + /* Copy data from virtio buffer to sddf buffer */ + uint32_t body_bytes_read = 0; + for (; body_bytes_read < body_size_bytes; + curr_desc = virtq->desc[curr_desc].next) { + /* For write requests, the body is a read descriptor, and the footer + * is a write descriptor, we know there must be a descriptor cut-off + * at the end. + */ + assert(body_bytes_read + virtq->desc[curr_desc].len <= + body_size_bytes); + assert(virtq->desc[curr_desc].flags & VIRTQ_DESC_F_NEXT); + if (curr_desc_bytes_read != 0) { + memcpy((void *)sddf_data + body_bytes_read, + (void *)virtq->desc[curr_desc].addr + curr_desc_bytes_read, + virtq->desc[curr_desc].len - curr_desc_bytes_read); + body_bytes_read += + virtq->desc[curr_desc].len - curr_desc_bytes_read; + curr_desc_bytes_read = 0; + } else { + memcpy((void *)sddf_data + body_bytes_read, + (void *)virtq->desc[curr_desc].addr, + virtq->desc[curr_desc].len); + body_bytes_read += virtq->desc[curr_desc].len; + } + } + + /* Generate sddf request id and bookkeep the request */ + uint32_t req_id; + ialloc_alloc(&state->ialloc, &req_id); + state->reqsbk[req_id] = (reqbk_t){ + desc_head, sddf_data_cell_base, sddf_count, sddf_block_number, + sddf_data, body_size_bytes, aligned}; + + uintptr_t sddf_offset = + sddf_data_cell_base - + ((struct virtio_blk_device *)dev->device_data)->data_region; + err = blk_enqueue_req(&state->queue_h, BLK_REQ_WRITE, sddf_offset, + sddf_block_number, sddf_count, req_id); + assert(!err); + } + virt_notify = true; + break; } + case VIRTIO_BLK_T_FLUSH: { + LOG_BLOCK("Request type is VIRTIO_BLK_T_FLUSH\n"); - if (virt_notify && !blk_queue_plugged_req(&state->queue_h)) { - microkit_notify(state->server_ch); + if (!sddf_make_req_check(state, 0)) { + virtio_blk_set_req_fail(dev, curr_desc); + has_dropped = true; + break; + } + + /* Bookkeep the request */ + uint32_t req_id; + ialloc_alloc(&state->ialloc, &req_id); + /* except for virtio desc, nothing else needs to be retrieved later + * so leave as 0 */ + state->reqsbk[req_id] = (reqbk_t){desc_head, 0, 0, 0, 0, 0, false}; + + err = blk_enqueue_req(&state->queue_h, BLK_REQ_FLUSH, 0, 0, 0, req_id); + break; + virt_notify = true; } + default: { + LOG_BLOCK_ERR("Handling VirtIO block request, but virtIO request type is " + "not recognised: %d\n", + virtio_req_header.type); + virtio_blk_set_req_fail(dev, curr_desc); + has_dropped = true; + break; + } + } + } + + /* Update virtq index to the next available request to be handled */ + vq->last_idx = last_handled_avail_idx; + + /* If any request has to be dropped due to any number of reasons, we inject an + * interrupt */ + bool virq_inject_success = true; + if (has_dropped) { + virtio_blk_set_interrupt_status(dev, true, false); + virq_inject_success = virtio_blk_virq_inject(dev); + } - return virq_inject_success; + if (virt_notify && !blk_queue_plugged_req(&state->queue_h)) { + microkit_notify(state->server_ch); + } + + return virq_inject_success; } bool virtio_blk_handle_resp(struct virtio_blk_device *state) { - int err = 0; - struct virtio_device *dev = &state->virtio_device; - - blk_resp_status_t sddf_ret_status; - uint16_t sddf_ret_success_count; - uint32_t sddf_ret_id; - - bool virt_notify = false; - bool resp_handled = false; - while (!blk_queue_empty_resp(&state->queue_h)) { - err = blk_dequeue_resp(&state->queue_h, - &sddf_ret_status, - &sddf_ret_success_count, - &sddf_ret_id); - assert(!err); + int err = 0; + struct virtio_device *dev = &state->virtio_device; - /* Retrieve request bookkeep information and free allocated id */ - reqbk_t *reqbk = &state->reqsbk[sddf_ret_id]; - err = ialloc_free(&state->ialloc, sddf_ret_id); - assert(!err); + blk_resp_status_t sddf_ret_status; + uint16_t sddf_ret_success_count; + uint32_t sddf_ret_id; - struct virtq *virtq = &dev->vqs[VIRTIO_BLK_DEFAULT_VIRTQ].virtq; + bool virt_notify = false; + bool resp_handled = false; + while (!blk_queue_empty_resp(&state->queue_h)) { + err = blk_dequeue_resp(&state->queue_h, &sddf_ret_status, + &sddf_ret_success_count, &sddf_ret_id); + assert(!err); - uint16_t curr_desc = reqbk->virtio_desc_head; - uint32_t curr_desc_bytes_read = 0; + /* Retrieve request bookkeep information and free allocated id */ + reqbk_t *reqbk = &state->reqsbk[sddf_ret_id]; + err = ialloc_free(&state->ialloc, sddf_ret_id); + assert(!err); - uint32_t header_bytes_read = 0; - struct virtio_blk_outhdr virtio_req_header; - for (; header_bytes_read < sizeof(struct virtio_blk_outhdr); curr_desc = virtq->desc[curr_desc].next) { - /* Header is device read only */ - assert(!(virtq->desc[curr_desc].flags & VIRTQ_DESC_F_WRITE)); - /* We can always guarantee existence of next descriptor as footer is write only */ - assert(virtq->desc[curr_desc].flags & VIRTQ_DESC_F_NEXT); - if (header_bytes_read + virtq->desc[curr_desc].len > sizeof(struct virtio_blk_outhdr)) { - memcpy(&virtio_req_header, (void *)virtq->desc[curr_desc].addr, sizeof(struct virtio_blk_outhdr) - header_bytes_read); - curr_desc_bytes_read = sizeof(struct virtio_blk_outhdr) - header_bytes_read; - header_bytes_read += sizeof(struct virtio_blk_outhdr) - header_bytes_read; - /* Don't go to the next descriptor yet, we're not done processing with current one */ - break; - } else { - memcpy(&virtio_req_header, (void *)virtq->desc[curr_desc].addr, virtq->desc[curr_desc].len); - header_bytes_read += virtq->desc[curr_desc].len; - } - } + struct virtq *virtq = &dev->vqs[VIRTIO_BLK_DEFAULT_VIRTQ].virtq; - bool resp_success = false; - if (sddf_ret_status == BLK_RESP_OK) { - resp_success = true; - switch (virtio_req_header.type) { - case VIRTIO_BLK_T_IN: { - /* Going from read (header) to write (body) descriptor, there should be a descriptor cut-off at the beginning. */ - assert(curr_desc_bytes_read == 0); - uint32_t body_bytes_read = 0; - for (; body_bytes_read < reqbk->virtio_body_size_bytes; curr_desc = virtq->desc[curr_desc].next) { - if (body_bytes_read + virtq->desc[curr_desc].len > reqbk->virtio_body_size_bytes) { - memcpy((void *)virtq->desc[curr_desc].addr, (void *)reqbk->sddf_data + body_bytes_read, reqbk->virtio_body_size_bytes - body_bytes_read); - body_bytes_read += reqbk->virtio_body_size_bytes - body_bytes_read; - /* This is the final descriptor if we get into this condition, don't go to next descriptor */ - LOG_VMM("virtq->desc[curr_desc].len: %d\n", virtq->desc[curr_desc].len); - assert(!(virtq->desc[curr_desc].flags & VIRTQ_DESC_F_NEXT)); - break; - } else { - memcpy((void *)virtq->desc[curr_desc].addr, (void *)reqbk->sddf_data + body_bytes_read, virtq->desc[curr_desc].len); - body_bytes_read += virtq->desc[curr_desc].len; - /* Because there is still the footer, we are guaranteed next descriptor exists */ - assert(virtq->desc[curr_desc].flags & VIRTQ_DESC_F_NEXT); - } - } - break; - } - case VIRTIO_BLK_T_OUT: { - if (!reqbk->aligned) { - /* Handling read-modify-write procedure, copy virtio write data to the correct offset - * in the same sddf data region allocated to do the surrounding read. - */ - uint32_t body_bytes_read = 0; - for (; body_bytes_read < reqbk->virtio_body_size_bytes; curr_desc = virtq->desc[curr_desc].next) { - /* For write requests, the body is a read descriptor and the footer is a write descriptor, - * there must be a descriptor cut-off at the end - */ - assert(body_bytes_read + virtq->desc[curr_desc].len <= reqbk->virtio_body_size_bytes); - assert(virtq->desc[curr_desc].flags & VIRTQ_DESC_F_NEXT); - if (curr_desc_bytes_read != 0) { - memcpy((void *)reqbk->sddf_data + body_bytes_read, - (void *)virtq->desc[curr_desc].addr + curr_desc_bytes_read, - virtq->desc[curr_desc].len - curr_desc_bytes_read); - body_bytes_read += virtq->desc[curr_desc].len - curr_desc_bytes_read; - curr_desc_bytes_read = 0; - } else { - memcpy((void *)reqbk->sddf_data + body_bytes_read, (void *)virtq->desc[curr_desc].addr, virtq->desc[curr_desc].len); - body_bytes_read += virtq->desc[curr_desc].len; - } - } - - uint32_t new_sddf_id; - err = ialloc_alloc(&state->ialloc, &new_sddf_id); - assert(!err); - state->reqsbk[new_sddf_id] = (reqbk_t) { - reqbk->virtio_desc_head, - reqbk->sddf_data_cell_base, - reqbk->sddf_count, - reqbk->sddf_block_number, - 0, /* unused */ - 0, /* unused */ - true, - }; - - err = blk_enqueue_req(&state->queue_h, - BLK_REQ_WRITE, - reqbk->sddf_data_cell_base - state->data_region, - reqbk->sddf_block_number, - reqbk->sddf_count, - new_sddf_id); - assert(!err); - virt_notify = true; - /* The virtIO request is not complete yet so we don't tell the driver (just skip over to next request) */ - continue; - } - break; - } - case VIRTIO_BLK_T_FLUSH: - break; - default: { - LOG_BLOCK_ERR( - "Retrieving sDDF block response, but virtIO request type is not recognised: %d\n", - virtio_req_header.type); - resp_success = false; - break; - } - } - } + uint16_t curr_desc = reqbk->virtio_desc_head; + uint32_t curr_desc_bytes_read = 0; + + uint32_t header_bytes_read = 0; + struct virtio_blk_outhdr virtio_req_header; + for (; header_bytes_read < sizeof(struct virtio_blk_outhdr); + curr_desc = virtq->desc[curr_desc].next) { + /* Header is device read only */ + assert(!(virtq->desc[curr_desc].flags & VIRTQ_DESC_F_WRITE)); + /* We can always guarantee existence of next descriptor as footer is write + * only */ + assert(virtq->desc[curr_desc].flags & VIRTQ_DESC_F_NEXT); + if (header_bytes_read + virtq->desc[curr_desc].len > + sizeof(struct virtio_blk_outhdr)) { + memcpy(&virtio_req_header, (void *)virtq->desc[curr_desc].addr, + sizeof(struct virtio_blk_outhdr) - header_bytes_read); + curr_desc_bytes_read = + sizeof(struct virtio_blk_outhdr) - header_bytes_read; + header_bytes_read += + sizeof(struct virtio_blk_outhdr) - header_bytes_read; + /* Don't go to the next descriptor yet, we're not done processing with + * current one */ + break; + } else { + memcpy(&virtio_req_header, (void *)virtq->desc[curr_desc].addr, + virtq->desc[curr_desc].len); + header_bytes_read += virtq->desc[curr_desc].len; + } + } - if (resp_success) { - virtio_blk_set_req_success(dev, curr_desc); - } else { - virtio_blk_set_req_fail(dev, curr_desc); + bool resp_success = false; + if (sddf_ret_status == BLK_RESP_OK) { + resp_success = true; + switch (virtio_req_header.type) { + case VIRTIO_BLK_T_IN: { + /* Going from read (header) to write (body) descriptor, there should be + * a descriptor cut-off at the beginning. */ + assert(curr_desc_bytes_read == 0); + uint32_t body_bytes_read = 0; + for (; body_bytes_read < reqbk->virtio_body_size_bytes; + curr_desc = virtq->desc[curr_desc].next) { + if (body_bytes_read + virtq->desc[curr_desc].len > + reqbk->virtio_body_size_bytes) { + memcpy((void *)virtq->desc[curr_desc].addr, + (void *)reqbk->sddf_data + body_bytes_read, + reqbk->virtio_body_size_bytes - body_bytes_read); + body_bytes_read += reqbk->virtio_body_size_bytes - body_bytes_read; + /* This is the final descriptor if we get into this condition, don't + * go to next descriptor */ + LOG_VMM("virtq->desc[curr_desc].len: %d\n", + virtq->desc[curr_desc].len); + assert(!(virtq->desc[curr_desc].flags & VIRTQ_DESC_F_NEXT)); + break; + } else { + memcpy((void *)virtq->desc[curr_desc].addr, + (void *)reqbk->sddf_data + body_bytes_read, + virtq->desc[curr_desc].len); + body_bytes_read += virtq->desc[curr_desc].len; + /* Because there is still the footer, we are guaranteed next + * descriptor exists */ + assert(virtq->desc[curr_desc].flags & VIRTQ_DESC_F_NEXT); + } } - - /* Free corresponding bookkeeping structures regardless of the request's - * success status. - */ - if (virtio_req_header.type == VIRTIO_BLK_T_IN || virtio_req_header.type == VIRTIO_BLK_T_OUT) { - fsmalloc_free(&state->fsmalloc, reqbk->sddf_data_cell_base, reqbk->sddf_count); + break; + } + case VIRTIO_BLK_T_OUT: { + if (!reqbk->aligned) { + /* Handling read-modify-write procedure, copy virtio write data to the + * correct offset in the same sddf data region allocated to do the + * surrounding read. + */ + uint32_t body_bytes_read = 0; + for (; body_bytes_read < reqbk->virtio_body_size_bytes; + curr_desc = virtq->desc[curr_desc].next) { + /* For write requests, the body is a read descriptor and the footer + * is a write descriptor, there must be a descriptor cut-off at the + * end + */ + assert(body_bytes_read + virtq->desc[curr_desc].len <= + reqbk->virtio_body_size_bytes); + assert(virtq->desc[curr_desc].flags & VIRTQ_DESC_F_NEXT); + if (curr_desc_bytes_read != 0) { + memcpy((void *)reqbk->sddf_data + body_bytes_read, + (void *)virtq->desc[curr_desc].addr + curr_desc_bytes_read, + virtq->desc[curr_desc].len - curr_desc_bytes_read); + body_bytes_read += + virtq->desc[curr_desc].len - curr_desc_bytes_read; + curr_desc_bytes_read = 0; + } else { + memcpy((void *)reqbk->sddf_data + body_bytes_read, + (void *)virtq->desc[curr_desc].addr, + virtq->desc[curr_desc].len); + body_bytes_read += virtq->desc[curr_desc].len; + } + } + + uint32_t new_sddf_id; + err = ialloc_alloc(&state->ialloc, &new_sddf_id); + assert(!err); + state->reqsbk[new_sddf_id] = (reqbk_t){ + reqbk->virtio_desc_head, + reqbk->sddf_data_cell_base, + reqbk->sddf_count, + reqbk->sddf_block_number, + 0, /* unused */ + 0, /* unused */ + true, + }; + + err = blk_enqueue_req(&state->queue_h, BLK_REQ_WRITE, + reqbk->sddf_data_cell_base - state->data_region, + reqbk->sddf_block_number, reqbk->sddf_count, + new_sddf_id); + assert(!err); + virt_notify = true; + /* The virtIO request is not complete yet so we don't tell the driver + * (just skip over to next request) */ + continue; } + break; + } + case VIRTIO_BLK_T_FLUSH: + break; + default: { + LOG_BLOCK_ERR("Retrieving sDDF block response, but virtIO request type " + "is not recognised: %d\n", + virtio_req_header.type); + resp_success = false; + break; + } + } + } - virtio_blk_used_buffer(dev, reqbk->virtio_desc_head); - - resp_handled = true; + if (resp_success) { + virtio_blk_set_req_success(dev, curr_desc); + } else { + virtio_blk_set_req_fail(dev, curr_desc); } - /* We need to know if we handled any responses, if we did, we inject an - * interrupt, if we didn't we don't inject. + /* Free corresponding bookkeeping structures regardless of the request's + * success status. */ - bool virq_inject_success = true; - if (resp_handled) { - virtio_blk_set_interrupt_status(dev, true, false); - virq_inject_success = virtio_blk_virq_inject(dev); + if (virtio_req_header.type == VIRTIO_BLK_T_IN || + virtio_req_header.type == VIRTIO_BLK_T_OUT) { + fsmalloc_free(&state->fsmalloc, reqbk->sddf_data_cell_base, + reqbk->sddf_count); } - if (virt_notify) { - microkit_notify(state->server_ch); - } + virtio_blk_used_buffer(dev, reqbk->virtio_desc_head); - return virq_inject_success; + resp_handled = true; + } + + /* We need to know if we handled any responses, if we did, we inject an + * interrupt, if we didn't we don't inject. + */ + bool virq_inject_success = true; + if (resp_handled) { + virtio_blk_set_interrupt_status(dev, true, false); + virq_inject_success = virtio_blk_virq_inject(dev); + } + + if (virt_notify) { + microkit_notify(state->server_ch); + } + + return virq_inject_success; } -static inline void virtio_blk_config_init(struct virtio_blk_device *blk_dev) -{ - blk_storage_info_t *storage_info = blk_dev->storage_info; +static inline void virtio_blk_config_init(struct virtio_blk_device *blk_dev) { + blk_storage_info_t *storage_info = blk_dev->storage_info; - blk_dev->config.capacity = (BLK_TRANSFER_SIZE / VIRTIO_BLK_SECTOR_SIZE) * storage_info->capacity; - if (storage_info->block_size != 0) { - blk_dev->config.blk_size = storage_info->block_size * BLK_TRANSFER_SIZE; - } else { - blk_dev->config.blk_size = storage_info->sector_size; - } + blk_dev->config.capacity = + (BLK_TRANSFER_SIZE / VIRTIO_BLK_SECTOR_SIZE) * storage_info->capacity; + if (storage_info->block_size != 0) { + blk_dev->config.blk_size = storage_info->block_size * BLK_TRANSFER_SIZE; + } else { + blk_dev->config.blk_size = storage_info->sector_size; + } } static virtio_device_funs_t functions = { @@ -646,46 +713,41 @@ static virtio_device_funs_t functions = { }; bool virtio_mmio_blk_init(struct virtio_blk_device *blk_dev, - uintptr_t region_base, - uintptr_t region_size, - size_t virq, - uintptr_t data_region, + uintptr_t region_base, uintptr_t region_size, + size_t virq, uintptr_t data_region, size_t data_region_size, blk_storage_info_t *storage_info, - blk_queue_handle_t *queue_h, - uint32_t queue_capacity, - int server_ch) -{ - struct virtio_device *dev = &blk_dev->virtio_device; - - dev->data.DeviceID = DEVICE_ID_VIRTIO_BLOCK; - dev->data.VendorID = VIRTIO_MMIO_DEV_VENDOR_ID; - dev->funs = &functions; - dev->vqs = blk_dev->vqs; - dev->num_vqs = VIRTIO_BLK_NUM_VIRTQ; - dev->virq = virq; - dev->device_data = blk_dev; - - blk_dev->storage_info = storage_info; - blk_dev->queue_h = *queue_h; - blk_dev->data_region = data_region; - blk_dev->queue_capacity = queue_capacity; - blk_dev->server_ch = server_ch; - - size_t num_sddf_cells = (data_region_size / BLK_TRANSFER_SIZE) < SDDF_MAX_DATA_CELLS ? - (data_region_size / BLK_TRANSFER_SIZE) : SDDF_MAX_DATA_CELLS; - - virtio_blk_config_init(blk_dev); - - fsmalloc_init(&blk_dev->fsmalloc, - data_region, - BLK_TRANSFER_SIZE, - num_sddf_cells, - &blk_dev->fsmalloc_avail_bitarr, - blk_dev->fsmalloc_avail_bitarr_words, - roundup_bits2words64(num_sddf_cells)); - - ialloc_init(&blk_dev->ialloc, blk_dev->ialloc_idxlist, num_sddf_cells); - - return virtio_mmio_register_device(dev, region_base, region_size, virq); + blk_queue_handle_t *queue_h, uint32_t queue_capacity, + int server_ch) { + struct virtio_device *dev = &blk_dev->virtio_device; + + dev->data.DeviceID = DEVICE_ID_VIRTIO_BLOCK; + dev->data.VendorID = VIRTIO_MMIO_DEV_VENDOR_ID; + dev->funs = &functions; + dev->vqs = blk_dev->vqs; + dev->num_vqs = VIRTIO_BLK_NUM_VIRTQ; + dev->virq = virq; + dev->device_data = blk_dev; + + blk_dev->storage_info = storage_info; + blk_dev->queue_h = *queue_h; + blk_dev->data_region = data_region; + blk_dev->queue_capacity = queue_capacity; + blk_dev->server_ch = server_ch; + + size_t num_sddf_cells = + (data_region_size / BLK_TRANSFER_SIZE) < SDDF_MAX_DATA_CELLS + ? (data_region_size / BLK_TRANSFER_SIZE) + : SDDF_MAX_DATA_CELLS; + + virtio_blk_config_init(blk_dev); + + fsmalloc_init(&blk_dev->fsmalloc, data_region, BLK_TRANSFER_SIZE, + num_sddf_cells, &blk_dev->fsmalloc_avail_bitarr, + blk_dev->fsmalloc_avail_bitarr_words, + roundup_bits2words64(num_sddf_cells)); + + ialloc_init(&blk_dev->ialloc, blk_dev->ialloc_idxlist, num_sddf_cells); + + return virtio_mmio_register_device(dev, region_base, region_size, virq); } diff --git a/tools/linux/uio/libuio.c b/tools/linux/uio/libuio.c index 6029dd22..b9eb167c 100644 --- a/tools/linux/uio/libuio.c +++ b/tools/linux/uio/libuio.c @@ -68,256 +68,264 @@ __attribute__((weak)) void driver_notified() void uio_notify() { - /* Writing 1 to the UIO device ACKs the IRQ (which transfers execution to the VMM) - * and also re-enables the interrupt. - */ - int32_t one = 1; - int ret = write(main_uio_fd, &one, 4); - if (ret < 0) { - LOG_UIO_ERR("writing 1 to device failed with ret val: %d, errno: %d\n", ret, errno); - } - fsync(main_uio_fd); + /* Writing 1 to the UIO device ACKs the IRQ (which transfers execution to the + * VMM) and also re-enables the interrupt. + */ + int32_t one = 1; + int ret = write(main_uio_fd, &one, 4); + if (ret < 0) { + LOG_UIO_ERR("writing 1 to device failed with ret val: %d, errno: %d\n", ret, + errno); + } + fsync(main_uio_fd); } -static int uio_num_maps(int uio_num) -{ - DIR *dir; - struct dirent *entry; - struct stat statbuf; - regex_t regex; - int count = 0; - - char path[MAX_PATHNAME]; - int len = snprintf(path, sizeof(path), "/sys/class/uio/uio%d/maps", uio_num); - if (len < 0 || len >= sizeof(path)) { - LOG_UIO_ERR("Failed to create maps path string\n"); - return -1; - } - - /* Compile regex that searches for maps */ - if (regcomp(®ex, "^map[0-9]+$", REG_EXTENDED) != 0) { - LOG_UIO_ERR("Could not compile regex\n"); - return -1; - } +static int uio_num_maps(int uio_num) { + DIR *dir; + struct dirent *entry; + struct stat statbuf; + regex_t regex; + int count = 0; + + char path[MAX_PATHNAME]; + int len = snprintf(path, sizeof(path), "/sys/class/uio/uio%d/maps", uio_num); + if (len < 0 || len >= sizeof(path)) { + LOG_UIO_ERR("Failed to create maps path string\n"); + return -1; + } - dir = opendir(path); - if (dir == NULL) { - LOG_UIO_ERR("Failed to open uio maps directory\n"); - return -1; - } + /* Compile regex that searches for maps */ + if (regcomp(®ex, "^map[0-9]+$", REG_EXTENDED) != 0) { + LOG_UIO_ERR("Could not compile regex\n"); + return -1; + } - /* Read directory entries */ - while ((entry = readdir(dir)) != NULL) { - char fullPath[MAX_PATHNAME]; - - int len = snprintf(fullPath, sizeof(fullPath), "%s/%s", path, entry->d_name); - if (len < 0 || len >= sizeof(fullPath)) { - LOG_UIO_ERR("Failed to create full uio maps path\n"); - return -1; - }; - - /* Check if entry is a directory */ - if (stat(fullPath, &statbuf) == 0 && S_ISDIR(statbuf.st_mode)) { - /* Skip over . and .. */ - if (strcmp(entry->d_name, ".") == 0 || strcmp(entry->d_name, "..") == 0) { - continue; - } - - /* Check if directory name matches regex */ - if (regexec(®ex, entry->d_name, 0, NULL, 0) == 0) { - count++; - LOG_UIO("Map found: %s\n", entry->d_name); - } - } + dir = opendir(path); + if (dir == NULL) { + LOG_UIO_ERR("Failed to open uio maps directory\n"); + return -1; + } + + /* Read directory entries */ + while ((entry = readdir(dir)) != NULL) { + char fullPath[MAX_PATHNAME]; + + int len = + snprintf(fullPath, sizeof(fullPath), "%s/%s", path, entry->d_name); + if (len < 0 || len >= sizeof(fullPath)) { + LOG_UIO_ERR("Failed to create full uio maps path\n"); + return -1; + }; + + /* Check if entry is a directory */ + if (stat(fullPath, &statbuf) == 0 && S_ISDIR(statbuf.st_mode)) { + /* Skip over . and .. */ + if (strcmp(entry->d_name, ".") == 0 || strcmp(entry->d_name, "..") == 0) { + continue; + } + + /* Check if directory name matches regex */ + if (regexec(®ex, entry->d_name, 0, NULL, 0) == 0) { + count++; + LOG_UIO("Map found: %s\n", entry->d_name); + } } + } - LOG_UIO("Total directories matching 'map[0-9]+': %d\n", count); + LOG_UIO("Total directories matching 'map[0-9]+': %d\n", count); - closedir(dir); - regfree(®ex); + closedir(dir); + regfree(®ex); - return count; + return count; } -static int uio_map_size(int uio_num, int map_num) -{ - char path[MAX_PATHNAME]; - char buf[MAX_PATHNAME]; +static int uio_map_size(int uio_num, int map_num) { + char path[MAX_PATHNAME]; + char buf[MAX_PATHNAME]; - int len = snprintf(path, sizeof(path), "/sys/class/uio/uio%d/maps/map%d/size", uio_num, map_num); - if (len < 0 || len >= sizeof(path)) { - LOG_UIO_ERR("Failed to create uio%d map%d size path string\n", uio_num, map_num); - return -1; - } + int len = snprintf(path, sizeof(path), "/sys/class/uio/uio%d/maps/map%d/size", + uio_num, map_num); + if (len < 0 || len >= sizeof(path)) { + LOG_UIO_ERR("Failed to create uio%d map%d size path string\n", uio_num, + map_num); + return -1; + } - int fd = open(path, O_RDONLY); - if (fd < 0) { - LOG_UIO_ERR("Failed to open %s\n", path); - return -1; - } - ssize_t ret = read(fd, buf, sizeof(buf)); - if (ret < 0) { - LOG_UIO_ERR("Failed to read map%d size\n", map_num); - return -1; - } - close(fd); + int fd = open(path, O_RDONLY); + if (fd < 0) { + LOG_UIO_ERR("Failed to open %s\n", path); + return -1; + } + ssize_t ret = read(fd, buf, sizeof(buf)); + if (ret < 0) { + LOG_UIO_ERR("Failed to read map%d size\n", map_num); + return -1; + } + close(fd); - int size = strtoul(buf, NULL, 0); - if (size == 0 || size == ULONG_MAX) { - LOG_UIO_ERR("Failed to convert map%d size to integer\n", map_num); - return -1; - } + int size = strtoul(buf, NULL, 0); + if (size == 0 || size == ULONG_MAX) { + LOG_UIO_ERR("Failed to convert map%d size to integer\n", map_num); + return -1; + } - return size; + return size; } -static int uio_map_addr(int uio_num, int map_num, uintptr_t *addr) -{ - char path[MAX_PATHNAME]; - char buf[MAX_PATHNAME]; +static int uio_map_addr(int uio_num, int map_num, uintptr_t *addr) { + char path[MAX_PATHNAME]; + char buf[MAX_PATHNAME]; - int len = snprintf(path, sizeof(path), "/sys/class/uio/uio%d/maps/map%d/addr", uio_num, map_num); - if (len < 0 || len >= sizeof(path)) { - LOG_UIO_ERR("Failed to create uio%d map%d addr path string\n", uio_num, map_num); - return -1; - } + int len = snprintf(path, sizeof(path), "/sys/class/uio/uio%d/maps/map%d/addr", + uio_num, map_num); + if (len < 0 || len >= sizeof(path)) { + LOG_UIO_ERR("Failed to create uio%d map%d addr path string\n", uio_num, + map_num); + return -1; + } - int fd = open(path, O_RDONLY); - if (fd < 0) { - LOG_UIO_ERR("Failed to open %s\n", path); - return -1; - } - ssize_t ret = read(fd, buf, sizeof(buf)); - if (ret < 0) { - LOG_UIO_ERR("Failed to read map%d addr\n", map_num); - return -1; - } - close(fd); + int fd = open(path, O_RDONLY); + if (fd < 0) { + LOG_UIO_ERR("Failed to open %s\n", path); + return -1; + } + ssize_t ret = read(fd, buf, sizeof(buf)); + if (ret < 0) { + LOG_UIO_ERR("Failed to read map%d addr\n", map_num); + return -1; + } + close(fd); - uintptr_t ret_addr = strtoul(buf, NULL, 0); - if (ret_addr == 0 || ret_addr == ULONG_MAX) { - LOG_UIO_ERR("Failed to convert map%d addr to integer\n", map_num); - return -1; - } + uintptr_t ret_addr = strtoul(buf, NULL, 0); + if (ret_addr == 0 || ret_addr == ULONG_MAX) { + LOG_UIO_ERR("Failed to convert map%d addr to integer\n", map_num); + return -1; + } - *addr = ret_addr; + *addr = ret_addr; - return 0; + return 0; } -static int uio_map_init(int uio_fd, int uio_num) -{ - LOG_UIO("Initialising UIO device %d mappings\n", uio_num); +static int uio_map_init(int uio_fd, int uio_num) { + LOG_UIO("Initialising UIO device %d mappings\n", uio_num); - int curr_num_maps = uio_num_maps(uio_num); - if (curr_num_maps < 0) { - LOG_UIO_ERR("Failed to get number of maps\n"); - return -1; + int curr_num_maps = uio_num_maps(uio_num); + if (curr_num_maps < 0) { + LOG_UIO_ERR("Failed to get number of maps\n"); + return -1; + } + if (curr_num_maps == 0) { + LOG_UIO_ERR("No maps found\n"); + return -1; + } + + num_maps += curr_num_maps; + + for (int i = 0; i < curr_num_maps; i++) { + if (curr_map >= UIO_MAX_MAPS) { + LOG_UIO_ERR("too many maps, maximum is %d\n", UIO_MAX_MAPS); + close(uio_fd); + return -1; } - if (curr_num_maps == 0) { - LOG_UIO_ERR("No maps found\n"); - return -1; + + int size = uio_map_size(uio_num, i); + if (size < 0) { + LOG_UIO_ERR("Failed to get size of map%d\n", i); + close(uio_fd); + return -1; } - num_maps += curr_num_maps; - - for (int i = 0; i < curr_num_maps; i++) { - if (curr_map >= UIO_MAX_MAPS) { - LOG_UIO_ERR("too many maps, maximum is %d\n", UIO_MAX_MAPS); - close(uio_fd); - return -1; - } - - int size = uio_map_size(uio_num, i); - if (size < 0) { - LOG_UIO_ERR("Failed to get size of map%d\n", i); - close(uio_fd); - return -1; - } - - if (uio_map_addr(uio_num, i, &maps_phys[curr_map]) != 0) { - LOG_UIO_ERR("Failed to get addr of map%d\n", i); - close(uio_fd); - return -1; - } - - if ((maps[curr_map] = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, uio_fd, i * getpagesize())) == NULL) { - LOG_UIO_ERR("mmap failed, errno: %d\n", errno); - close(uio_fd); - return -1; - } - - LOG_UIO("mmaped map%d (driver map%d) with 0x%x bytes at %p\n", i, curr_map, size, maps[curr_map]); - - curr_map++; + if (uio_map_addr(uio_num, i, &maps_phys[curr_map]) != 0) { + LOG_UIO_ERR("Failed to get addr of map%d\n", i); + close(uio_fd); + return -1; } - return 0; + if ((maps[curr_map] = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, + uio_fd, i * getpagesize())) == NULL) { + LOG_UIO_ERR("mmap failed, errno: %d\n", errno); + close(uio_fd); + return -1; + } + + LOG_UIO("mmaped map%d (driver map%d) with 0x%x bytes at %p\n", i, curr_map, + size, maps[curr_map]); + + curr_map++; + } + + return 0; } int main(int argc, char **argv) { - if (argc < 1) { - printf("Usage: %s [driver_args...]\n", argv[0]); - return 1; + if (argc < 1) { + printf("Usage: %s [driver_args...]\n", argv[0]); + return 1; + } + + for (int uio_num = 0; uio_num < UIO_MAX_DEV; uio_num++) { + /* Append UIO_NUM to "/dev/uio" to get the full path of the uio device, e.g. + * "/dev/uio0" */ + char uio_device_name[MAX_PATHNAME]; + int len = snprintf(uio_device_name, sizeof(uio_device_name), "/dev/uio%d", + uio_num); + if (len < 0 || len >= sizeof(uio_device_name)) { + LOG_UIO_ERR("Failed to create uio device name string\n"); + return 1; } - for (int uio_num = 0; uio_num < UIO_MAX_DEV; uio_num++) { - /* Append UIO_NUM to "/dev/uio" to get the full path of the uio device, e.g. "/dev/uio0" */ - char uio_device_name[MAX_PATHNAME]; - int len = snprintf(uio_device_name, sizeof(uio_device_name), "/dev/uio%d", uio_num); - if (len < 0 || len >= sizeof(uio_device_name)) { - LOG_UIO_ERR("Failed to create uio device name string\n"); - return 1; - } - - int uio_fd = open(uio_device_name, O_RDWR); - if (uio_fd < 0) { - LOG_UIO("Failed to open %s\n", uio_device_name); - if (uio_num == MAIN_UIO_NUM) { - LOG_UIO_ERR("Could not open main UIO device, failing\n"); - return 1; - } else { - LOG_UIO("Assuming no more UIO devices\n"); - } - break; - } - - /* Initialise uio device mappings. This reads into /sys/class/uio to determine - * the number of associated devices, their maps and their sizes. - */ - if (uio_map_init(uio_fd, uio_num) != 0) { - LOG_UIO_ERR("Failed to initialise UIO device mappings\n"); - return 1; - } - - /* Set /dev/uio0 as the interrupt */ - if (uio_num == MAIN_UIO_NUM) { - LOG_UIO("Setting main uio device to %s\n", uio_device_name); - main_uio_fd = uio_fd; - } + int uio_fd = open(uio_device_name, O_RDWR); + if (uio_fd < 0) { + LOG_UIO("Failed to open %s\n", uio_device_name); + if (uio_num == MAIN_UIO_NUM) { + LOG_UIO_ERR("Could not open main UIO device, failing\n"); + return 1; + } else { + LOG_UIO("Assuming no more UIO devices\n"); + } + break; } - /* Enable uio interrupt on initialisation. */ - uio_notify(); + /* Initialise uio device mappings. This reads into /sys/class/uio to + * determine the number of associated devices, their maps and their sizes. + */ + if (uio_map_init(uio_fd, uio_num) != 0) { + LOG_UIO_ERR("Failed to initialise UIO device mappings\n"); + return 1; + } - /* Initialise driver */ - /* We pass the UIO device mappings to the driver, skipping the first one which only contains UIO's irq status */ - LOG_UIO("Initialising driver with %d maps\n", num_maps - 1); - if (driver_init(maps + 1, maps_phys + 1, num_maps - 1, argc - 1, argv + 1) != 0) { - LOG_UIO_ERR("Failed to initialise driver\n"); - return 1; + /* Set /dev/uio0 as the interrupt */ + if (uio_num == MAIN_UIO_NUM) { + LOG_UIO("Setting main uio device to %s\n", uio_device_name); + main_uio_fd = uio_fd; } - + } + + /* Enable uio interrupt on initialisation. */ + uio_notify(); + + /* Initialise driver */ + /* We pass the UIO device mappings to the driver, skipping the first one which + * only contains UIO's irq status */ + LOG_UIO("Initialising driver with %d maps\n", num_maps - 1); + if (driver_init(maps + 1, maps_phys + 1, num_maps - 1, argc - 1, argv + 1) != + 0) { + LOG_UIO_ERR("Failed to initialise driver\n"); + return 1; + } + while (true) { - int irq_count; - int read_ret = read(main_uio_fd, &irq_count, sizeof(irq_count)); - _unused(read_ret); - assert(read_ret >= 0); - LOG_UIO("received irq, count: %d\n", irq_count); - - /* wake the guest driver up to do some real works */ - driver_notified(); + int irq_count; + int read_ret = read(main_uio_fd, &irq_count, sizeof(irq_count)); + _unused(read_ret); + assert(read_ret >= 0); + LOG_UIO("received irq, count: %d\n", irq_count); + + /* wake the guest driver up to do some real works */ + driver_notified(); } return 0; diff --git a/tools/linux/uio_drivers/blk/blk.c b/tools/linux/uio_drivers/blk/blk.c index 65597ed4..935cd59d 100644 --- a/tools/linux/uio_drivers/blk/blk.c +++ b/tools/linux/uio_drivers/blk/blk.c @@ -3,18 +3,18 @@ * * SPDX-License-Identifier: BSD-2-Clause */ -#include -#include -#include +#include +#include #include +#include +#include +#include #include -#include -#include #include -#include #include +#include #include -#include +#include #include #include @@ -50,12 +50,13 @@ int driver_init(void **maps, uintptr_t *maps_phys, int num_maps, int argc, char { LOG_UIO_BLOCK("Initialising...\n"); - /* Expects a storage_info map, request queue map, response queue map, virt data map, - * and BLK_NUM_CLIENT data mappings. + /* Expects a storage_info map, request queue map, response queue map, virt + * data map, and BLK_NUM_CLIENT data mappings. */ if (num_maps != BLK_NUM_CLIENTS + 4) { - LOG_UIO_BLOCK_ERR("Expecting %d maps, got %d\n", BLK_NUM_CLIENTS + 3, num_maps); - return -1; + LOG_UIO_BLOCK_ERR("Expecting %d maps, got %d\n", BLK_NUM_CLIENTS + 3, + num_maps); + return -1; } if (argc != 1) { @@ -71,18 +72,20 @@ int driver_init(void **maps, uintptr_t *maps_phys, int num_maps, int argc, char blk_data = (uintptr_t)maps[3]; blk_data_phys = (uintptr_t)maps_phys[3]; - LOG_UIO_BLOCK("Storage info phys addr: 0x%lx, Request queue phys addr: 0x%lx, Response queue phys addr: 0x%lx, Virt data io addr: 0x%lx\n", - maps_phys[0], maps_phys[1], maps_phys[2], maps_phys[3]); + LOG_UIO_BLOCK( + "Storage info phys addr: 0x%lx, Request queue phys addr: 0x%lx, " + "Response queue phys addr: 0x%lx, Virt data io addr: 0x%lx\n", + maps_phys[0], maps_phys[1], maps_phys[2], maps_phys[3]); for (int i = 0; i < BLK_NUM_CLIENTS; i++) { - client_data[i] = (uintptr_t)maps[4 + i]; - client_data_phys[i] = (uintptr_t)maps_phys[4 + i]; + client_data[i] = (uintptr_t)maps[4 + i]; + client_data_phys[i] = (uintptr_t)maps_phys[4 + i]; } #ifdef DEBUG_UIO_BLOCK LOG_UIO_BLOCK("Client data io addr:\n"); for (int i = 0; i < BLK_NUM_CLIENTS; i++) { - printf(" client %i: 0x%lx\n", i, client_data_phys[i]); + printf(" client %i: 0x%lx\n", i, client_data_phys[i]); } #endif @@ -90,8 +93,8 @@ int driver_init(void **maps, uintptr_t *maps_phys, int num_maps, int argc, char storage_fd = open(storage_path, O_RDWR); if (storage_fd < 0) { - LOG_UIO_BLOCK_ERR("Failed to open storage driver: %s\n", strerror(errno)); - return -1; + LOG_UIO_BLOCK_ERR("Failed to open storage driver: %s\n", strerror(errno)); + return -1; } LOG_UIO_BLOCK("Opened storage drive: %s\n", storage_path); @@ -137,7 +140,9 @@ int driver_init(void **maps, uintptr_t *maps_phys, int num_maps, int argc, char } blk_storage_info->capacity = size / BLK_TRANSFER_SIZE; - LOG_UIO_BLOCK("Raw block device: read_only=%d, sector_size=%d, capacity(sectors)=%ld\n", (int)blk_storage_info->read_only, + LOG_UIO_BLOCK("Raw block device: read_only=%d, sector_size=%d, " + "capacity(sectors)=%ld\n", + (int)blk_storage_info->read_only, blk_storage_info->sector_size, blk_storage_info->capacity); /* Optimal size */ @@ -152,116 +157,124 @@ int driver_init(void **maps, uintptr_t *maps_phys, int num_maps, int argc, char return 0; } -/* The virtualiser gives us an io address. We need to figure out which uio mapping - * this corresponds to, so that we can fetch the corresponding mmaped virt address. +/* The virtualiser gives us an io address. We need to figure out which uio + * mapping this corresponds to, so that we can fetch the corresponding mmaped + * virt address. */ -static inline uintptr_t io_to_virt(uintptr_t io_addr) -{ - if (io_addr - blk_data_phys < BLK_DATA_REGION_SIZE_DRIV) { - return blk_data + (io_addr - blk_data_phys); - } else { - for (int i = 0; i < BLK_NUM_CLIENTS; i++) { - if (io_addr - client_data_phys[i] < blk_virt_cli_data_region_size(i)) { - return client_data[i] + (io_addr - client_data_phys[i]); - } - } +static inline uintptr_t io_to_virt(uintptr_t io_addr) { + if (io_addr - blk_data_phys < BLK_DATA_REGION_SIZE_DRIV) { + return blk_data + (io_addr - blk_data_phys); + } else { + for (int i = 0; i < BLK_NUM_CLIENTS; i++) { + if (io_addr - client_data_phys[i] < blk_virt_cli_data_region_size(i)) { + return client_data[i] + (io_addr - client_data_phys[i]); + } } - assert(false); - return 0; + } + assert(false); + return 0; } void driver_notified() { - int err = 0; - _unused(err); - blk_req_code_t req_code; - uintptr_t req_io; - uint32_t req_block_number; - uint16_t req_count; - uint32_t req_id; - - while (!blk_queue_empty_req(&h)) { - err = blk_dequeue_req(&h, &req_code, &req_io, &req_block_number, &req_count, &req_id); - assert(!err); - LOG_UIO_BLOCK("Received command: code=%d, io=0x%lx, block_number=%d, count=%d, id=%d\n", req_code, req_io, - req_block_number, req_count, req_id); - - blk_resp_status_t status = BLK_RESP_OK; - uint16_t success_count = 0; - - switch (req_code) { - case BLK_REQ_READ: { - int ret = lseek(storage_fd, (off_t)req_block_number * BLK_TRANSFER_SIZE, SEEK_SET); - if (ret < 0) { - LOG_UIO_BLOCK_ERR("Failed to seek in storage: %s\n", strerror(errno)); - status = BLK_RESP_ERR_UNSPEC; - success_count = 0; - break; - } - LOG_UIO_BLOCK("Reading from storage, io address: 0x%lx\n", req_io); - - int bytes_read = read(storage_fd, (void *)io_to_virt(req_io), req_count * BLK_TRANSFER_SIZE); - LOG_UIO_BLOCK("Read from storage successfully: %d bytes\n", bytes_read); - if (bytes_read < 0) { - LOG_UIO_BLOCK_ERR("Failed to read from storage: %s\n", strerror(errno)); - status = BLK_RESP_ERR_UNSPEC; - success_count = 0; - } else { - status = BLK_RESP_OK; - success_count = bytes_read / BLK_TRANSFER_SIZE; - } - break; - } - case BLK_REQ_WRITE: { - int ret = lseek(storage_fd, (off_t)req_block_number * BLK_TRANSFER_SIZE, SEEK_SET); - if (ret < 0) { - LOG_UIO_BLOCK_ERR("Failed to seek in storage: %s\n", strerror(errno)); - status = BLK_RESP_ERR_UNSPEC; - success_count = 0; - break; - } - LOG_UIO_BLOCK("Writing to storage, io address: 0x%lx\n", req_io); - int bytes_written = write(storage_fd, (void *)io_to_virt(req_io), req_count * BLK_TRANSFER_SIZE); - LOG_UIO_BLOCK("Wrote to storage successfully: %d bytes\n", bytes_written); - if (bytes_written < 0) { - LOG_UIO_BLOCK_ERR("Failed to write to storage: %s\n", strerror(errno)); - status = BLK_RESP_ERR_UNSPEC; - success_count = 0; - } else { - status = BLK_RESP_OK; - success_count = bytes_written / BLK_TRANSFER_SIZE; - } - break; - } - case BLK_REQ_FLUSH: { - int ret = fsync(storage_fd); - if (ret != 0) { - LOG_UIO_BLOCK_ERR("Failed to flush storage: %s\n", strerror(errno)); - status = BLK_RESP_ERR_UNSPEC; - } else { - status = BLK_RESP_OK; - } - break; - } - case BLK_REQ_BARRIER: { - int ret = fsync(storage_fd); - if (ret != 0) { - LOG_UIO_BLOCK_ERR("Failed to flush storage: %s\n", strerror(errno)); - status = BLK_RESP_ERR_UNSPEC; - } else { - status = BLK_RESP_OK; - } - break; - } - default: - LOG_UIO_BLOCK_ERR("Unknown command code: %d\n", req_code); - assert(false); - } - /* Response queue is never full if number of inflight requests <= response queue capacity */ - err = blk_enqueue_resp(&h, status, success_count, req_id); - assert(!err); - LOG_UIO_BLOCK("Enqueued response: status=%d, success_count=%d, id=%d\n", status, success_count, req_id); + int err = 0; + _unused(err); + blk_req_code_t req_code; + uintptr_t req_io; + uint32_t req_block_number; + uint16_t req_count; + uint32_t req_id; + + while (!blk_queue_empty_req(&h)) { + err = blk_dequeue_req(&h, &req_code, &req_io, &req_block_number, &req_count, + &req_id); + assert(!err); + LOG_UIO_BLOCK("Received command: code=%d, io=0x%lx, block_number=%d, " + "count=%d, id=%d\n", + req_code, req_io, req_block_number, req_count, req_id); + + blk_resp_status_t status = BLK_RESP_OK; + uint16_t success_count = 0; + + switch (req_code) { + case BLK_REQ_READ: { + int ret = lseek(storage_fd, (off_t)req_block_number * BLK_TRANSFER_SIZE, + SEEK_SET); + if (ret < 0) { + LOG_UIO_BLOCK_ERR("Failed to seek in storage: %s\n", strerror(errno)); + status = BLK_RESP_ERR_UNSPEC; + success_count = 0; + break; + } + LOG_UIO_BLOCK("Reading from storage, io address: 0x%lx\n", req_io); + + int bytes_read = read(storage_fd, (void *)io_to_virt(req_io), + req_count * BLK_TRANSFER_SIZE); + LOG_UIO_BLOCK("Read from storage successfully: %d bytes\n", bytes_read); + if (bytes_read < 0) { + LOG_UIO_BLOCK_ERR("Failed to read from storage: %s\n", strerror(errno)); + status = BLK_RESP_ERR_UNSPEC; + success_count = 0; + } else { + status = BLK_RESP_OK; + success_count = bytes_read / BLK_TRANSFER_SIZE; + } + break; + } + case BLK_REQ_WRITE: { + int ret = lseek(storage_fd, (off_t)req_block_number * BLK_TRANSFER_SIZE, + SEEK_SET); + if (ret < 0) { + LOG_UIO_BLOCK_ERR("Failed to seek in storage: %s\n", strerror(errno)); + status = BLK_RESP_ERR_UNSPEC; + success_count = 0; + break; + } + LOG_UIO_BLOCK("Writing to storage, io address: 0x%lx\n", req_io); + int bytes_written = write(storage_fd, (void *)io_to_virt(req_io), + req_count * BLK_TRANSFER_SIZE); + LOG_UIO_BLOCK("Wrote to storage successfully: %d bytes\n", bytes_written); + if (bytes_written < 0) { + LOG_UIO_BLOCK_ERR("Failed to write to storage: %s\n", strerror(errno)); + status = BLK_RESP_ERR_UNSPEC; + success_count = 0; + } else { + status = BLK_RESP_OK; + success_count = bytes_written / BLK_TRANSFER_SIZE; + } + break; + } + case BLK_REQ_FLUSH: { + int ret = fsync(storage_fd); + if (ret != 0) { + LOG_UIO_BLOCK_ERR("Failed to flush storage: %s\n", strerror(errno)); + status = BLK_RESP_ERR_UNSPEC; + } else { + status = BLK_RESP_OK; + } + break; + } + case BLK_REQ_BARRIER: { + int ret = fsync(storage_fd); + if (ret != 0) { + LOG_UIO_BLOCK_ERR("Failed to flush storage: %s\n", strerror(errno)); + status = BLK_RESP_ERR_UNSPEC; + } else { + status = BLK_RESP_OK; + } + break; + } + default: + LOG_UIO_BLOCK_ERR("Unknown command code: %d\n", req_code); + assert(false); } + /* Response queue is never full if number of inflight requests <= response + * queue capacity */ + err = blk_enqueue_resp(&h, status, success_count, req_id); + assert(!err); + LOG_UIO_BLOCK("Enqueued response: status=%d, success_count=%d, id=%d\n", + status, success_count, req_id); + } uio_notify(); LOG_UIO_BLOCK("Notified other side\n");