import RT-Thread@9217865c without bsp, libcpu and components/net

This commit is contained in:
Zihao Yu 2023-05-20 16:23:33 +08:00
commit e2376a3709
1414 changed files with 390370 additions and 0 deletions

View file

@ -0,0 +1,11 @@
# RT-Thread building script for component
from building import *
cwd = GetCurrentDir()
src = Glob('*.c')
CPPPATH = [cwd]
group = DefineGroup('DeviceDrivers', src, depend = ['RT_USING_VIRTIO'], CPPPATH = CPPPATH)
Return('group')

View file

@ -0,0 +1,324 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-11-11 GuEe-GUI the first version
*/
#include <rtthread.h>
#include <cpuport.h>
#include <virtio.h>
rt_inline void _virtio_dev_check(struct virtio_device *dev)
{
RT_ASSERT(dev != RT_NULL);
RT_ASSERT(dev->mmio_config != RT_NULL);
}
void virtio_reset_device(struct virtio_device *dev)
{
_virtio_dev_check(dev);
dev->mmio_config->status = 0;
}
void virtio_status_acknowledge_driver(struct virtio_device *dev)
{
_virtio_dev_check(dev);
dev->mmio_config->status |= VIRTIO_STATUS_ACKNOWLEDGE | VIRTIO_STATUS_DRIVER;
}
void virtio_status_driver_ok(struct virtio_device *dev)
{
_virtio_dev_check(dev);
dev->mmio_config->status |= VIRTIO_STATUS_FEATURES_OK | VIRTIO_STATUS_DRIVER_OK;
}
void virtio_interrupt_ack(struct virtio_device *dev)
{
rt_uint32_t status;
_virtio_dev_check(dev);
status = dev->mmio_config->interrupt_status;
if (status != 0)
{
dev->mmio_config->interrupt_ack = status;
}
}
rt_bool_t virtio_has_feature(struct virtio_device *dev, rt_uint32_t feature_bit)
{
_virtio_dev_check(dev);
return !!(dev->mmio_config->device_features & (1UL << feature_bit));
}
rt_err_t virtio_queues_alloc(struct virtio_device *dev, rt_size_t queues_num)
{
_virtio_dev_check(dev);
dev->queues = rt_malloc(sizeof(struct virtq) * queues_num);
if (dev->queues != RT_NULL)
{
dev->queues_num = queues_num;
return RT_EOK;
}
return -RT_ENOMEM;
}
void virtio_queues_free(struct virtio_device *dev)
{
if (dev->queues != RT_NULL)
{
dev->queues_num = 0;
rt_free(dev->queues);
}
}
rt_err_t virtio_queue_init(struct virtio_device *dev, rt_uint32_t queue_index, rt_size_t ring_size)
{
int i;
void *pages;
rt_size_t pages_total_size;
struct virtq *queue;
_virtio_dev_check(dev);
RT_ASSERT(dev->mmio_config->queue_num_max > 0);
RT_ASSERT(dev->mmio_config->queue_num_max > queue_index);
/* ring_size is power of 2 */
RT_ASSERT(ring_size > 0);
RT_ASSERT(((ring_size - 1) & ring_size) == 0);
queue = &dev->queues[queue_index];
pages_total_size = VIRTIO_PAGE_ALIGN(
VIRTQ_DESC_TOTAL_SIZE(ring_size) + VIRTQ_AVAIL_TOTAL_SIZE(ring_size)) + VIRTQ_USED_TOTAL_SIZE(ring_size);
pages = rt_malloc_align(pages_total_size, VIRTIO_PAGE_SIZE);
if (pages == RT_NULL)
{
return -RT_ENOMEM;
}
queue->free = rt_malloc(sizeof(rt_bool_t) * ring_size);
if (queue->free == RT_NULL)
{
rt_free_align(pages);
return -RT_ENOMEM;
}
rt_memset(pages, 0, pages_total_size);
dev->mmio_config->guest_page_size = VIRTIO_PAGE_SIZE;
dev->mmio_config->queue_sel = queue_index;
dev->mmio_config->queue_num = ring_size;
dev->mmio_config->queue_align = VIRTIO_PAGE_SIZE;
dev->mmio_config->queue_pfn = VIRTIO_VA2PA(pages) >> VIRTIO_PAGE_SHIFT;
queue->num = ring_size;
queue->desc = (struct virtq_desc *)((rt_ubase_t)pages);
queue->avail = (struct virtq_avail *)(((rt_ubase_t)pages) + VIRTQ_DESC_TOTAL_SIZE(ring_size));
queue->used = (struct virtq_used *)VIRTIO_PAGE_ALIGN(
(rt_ubase_t)&queue->avail->ring[ring_size] + VIRTQ_AVAIL_RES_SIZE);
queue->used_idx = 0;
/* All descriptors start out unused */
for (i = 0; i < ring_size; ++i)
{
queue->free[i] = RT_TRUE;
}
queue->free_count = ring_size;
return RT_EOK;
}
void virtio_queue_destroy(struct virtio_device *dev, rt_uint32_t queue_index)
{
struct virtq *queue;
_virtio_dev_check(dev);
RT_ASSERT(dev->mmio_config->queue_num_max > 0);
RT_ASSERT(dev->mmio_config->queue_num_max > queue_index);
queue = &dev->queues[queue_index];
RT_ASSERT(queue->num > 0);
rt_free(queue->free);
rt_free_align((void *)queue->desc);
dev->mmio_config->queue_sel = queue_index;
dev->mmio_config->queue_pfn = RT_NULL;
queue->num = 0;
queue->desc = RT_NULL;
queue->avail = RT_NULL;
queue->used = RT_NULL;
}
void virtio_queue_notify(struct virtio_device *dev, rt_uint32_t queue_index)
{
_virtio_dev_check(dev);
dev->mmio_config->queue_notify = queue_index;
}
void virtio_submit_chain(struct virtio_device *dev, rt_uint32_t queue_index, rt_uint16_t desc_index)
{
rt_size_t ring_size;
struct virtq *queue;
_virtio_dev_check(dev);
queue = &dev->queues[queue_index];
ring_size = queue->num;
/* Tell the device the first index in our chain of descriptors */
queue->avail->ring[queue->avail->idx % ring_size] = desc_index;
rt_hw_dsb();
/* Tell the device another avail ring entry is available */
queue->avail->idx++;
rt_hw_dsb();
}
rt_uint16_t virtio_alloc_desc(struct virtio_device *dev, rt_uint32_t queue_index)
{
int i;
struct virtq *queue;
_virtio_dev_check(dev);
RT_ASSERT(queue_index < dev->queues_num);
queue = &dev->queues[queue_index];
if (queue->free_count > 0)
{
rt_size_t ring_size = queue->num;
for (i = 0; i < ring_size; ++i)
{
if (queue->free[i])
{
queue->free[i] = RT_FALSE;
queue->free_count--;
return (rt_uint16_t)i;
}
}
}
return VIRTQ_INVALID_DESC_ID;
}
void virtio_free_desc(struct virtio_device *dev, rt_uint32_t queue_index, rt_uint16_t desc_index)
{
struct virtq *queue;
_virtio_dev_check(dev);
queue = &dev->queues[queue_index];
RT_ASSERT(queue_index < dev->queues_num);
RT_ASSERT(!queue->free[desc_index]);
queue->desc[desc_index].addr = 0;
queue->desc[desc_index].len = 0;
queue->desc[desc_index].flags = 0;
queue->desc[desc_index].next = 0;
queue->free[desc_index] = RT_TRUE;
queue->free_count++;
}
rt_err_t virtio_alloc_desc_chain(struct virtio_device *dev, rt_uint32_t queue_index, rt_size_t count,
rt_uint16_t *indexs)
{
int i, j;
_virtio_dev_check(dev);
RT_ASSERT(indexs != RT_NULL);
if (dev->queues[queue_index].free_count < count)
{
return -RT_ERROR;
}
for (i = 0; i < count; ++i)
{
indexs[i] = virtio_alloc_desc(dev, queue_index);
if (indexs[i] == VIRTQ_INVALID_DESC_ID)
{
for (j = 0; j < i; ++j)
{
virtio_free_desc(dev, queue_index, indexs[j]);
}
return -RT_ERROR;
}
}
return RT_EOK;
}
void virtio_free_desc_chain(struct virtio_device *dev, rt_uint32_t queue_index, rt_uint16_t desc_index)
{
rt_uint16_t flags, next;
struct virtq_desc *desc;
_virtio_dev_check(dev);
desc = &dev->queues[queue_index].desc[0];
for (;;)
{
flags = desc[desc_index].flags;
next = desc[desc_index].next;
virtio_free_desc(dev, queue_index, desc_index);
if (flags & VIRTQ_DESC_F_NEXT)
{
desc_index = next;
}
else
{
break;
}
}
}
void virtio_fill_desc(struct virtio_device *dev, rt_uint32_t queue_index, rt_uint16_t desc_index,
rt_uint64_t addr, rt_uint32_t len, rt_uint16_t flags, rt_uint16_t next)
{
struct virtq_desc *desc;
_virtio_dev_check(dev);
desc = &dev->queues[queue_index].desc[desc_index];
desc->addr = addr;
desc->len = len;
desc->flags = flags;
desc->next = next;
}

View file

@ -0,0 +1,155 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-9-16 GuEe-GUI the first version
* 2021-11-11 GuEe-GUI modify to virtio common interface
*/
#ifndef __VIRTIO_H__
#define __VIRTIO_H__
#include <rthw.h>
#include <rtdef.h>
#ifdef RT_USING_SMART
#include <mmu.h>
#include <ioremap.h>
#endif
#if RT_NAME_MAX < 16
#error "Please set RT_NAME_MAX to at lest 16"
#endif
#ifdef RT_USING_VIRTIO10
#define RT_USING_VIRTIO_VERSION 0x1
#endif
#include <virtio_mmio.h>
#include <virtio_queue.h>
#define VIRTIO_MAGIC_VALUE 0x74726976 /* "virt" */
#define VIRTIO_STATUS_ACKNOWLEDGE (1 << 0)
#define VIRTIO_STATUS_DRIVER (1 << 1)
#define VIRTIO_STATUS_DRIVER_OK (1 << 2)
#define VIRTIO_STATUS_FEATURES_OK (1 << 3)
#define VIRTIO_STATUS_NEEDS_RESET (1 << 6)
#define VIRTIO_STATUS_FAILED (1 << 7)
#define VIRTIO_F_NOTIFY_ON_EMPTY 24
#define VIRTIO_F_ANY_LAYOUT 27
#define VIRTIO_F_RING_INDIRECT_DESC 28
#define VIRTIO_F_RING_EVENT_IDX 29
#define VIRTIO_F_VERSION_1 32
#define VIRTIO_F_RING_PACKED 34
#ifdef RT_USING_SMART
#define VIRTIO_VA2PA(vaddr) ((rt_ubase_t)rt_kmem_v2p(vaddr))
#define VIRTIO_PA2VA(paddr) ((rt_ubase_t)rt_ioremap((void *)paddr, ARCH_PAGE_SIZE))
#else
#define VIRTIO_VA2PA(vaddr) ((rt_ubase_t)vaddr)
#define VIRTIO_PA2VA(paddr) ((rt_ubase_t)paddr)
#endif /* RT_USING_SMART */
#define VIRTIO_PAGE_SHIFT 12
#define VIRTIO_PAGE_SIZE (1 << VIRTIO_PAGE_SHIFT)
#define VIRTIO_PAGE_ALIGN(addr) (RT_ALIGN(addr, VIRTIO_PAGE_SIZE))
enum
{
/* virtio 1.0 */
VIRTIO_DEVICE_ID_INVALID = 0, /* Invalid device */
VIRTIO_DEVICE_ID_NET = 1, /* Net */
VIRTIO_DEVICE_ID_BLOCK = 2, /* Block */
VIRTIO_DEVICE_ID_CONSOLE = 3, /* Console */
VIRTIO_DEVICE_ID_RNG = 4, /* Rng */
VIRTIO_DEVICE_ID_BALLOON = 5, /* Balloon */
VIRTIO_DEVICE_ID_IOMEM = 6, /* IO memory */
VIRTIO_DEVICE_ID_RPMSG = 7, /* Remote processor messaging */
VIRTIO_DEVICE_ID_SCSI = 8, /* SCSI */
VIRTIO_DEVICE_ID_9P = 9, /* 9p console */
VIRTIO_DEVICE_ID_MAC80211_WLAN = 10, /* Mac80211 wlan */
VIRTIO_DEVICE_ID_RPROC_SERIAL = 11, /* Remoteproc serial link */
VIRTIO_DEVICE_ID_CAIF = 12, /* CAIF */
VIRTIO_DEVICE_ID_MEM_BALLOON = 13, /* Memory balloon */
VIRTIO_DEVICE_ID_GPU = 16, /* GPU */
VIRTIO_DEVICE_ID_TIME = 17, /* Timer/clock device */
VIRTIO_DEVICE_ID_INPUT = 18, /* Input */
/* virtio 1.1 */
VIRTIO_DEVICE_ID_SOCKET = 19, /* Socket device */
VIRTIO_DEVICE_ID_CRYPTO = 20, /* Crypto device */
VIRTIO_DEVICE_ID_SIG_DIS_MOD = 21, /* Signal Distribution Module */
VIRTIO_DEVICE_ID_PSTORE = 22, /* Pstore device */
VIRTIO_DEVICE_ID_IOMMU = 23, /* IOMMU device */
VIRTIO_DEVICE_ID_MEM = 24, /* Memory device */
/* virtio 1.2 */
VIRTIO_DEVICE_ID_AUDIO = 25, /* Audio device */
VIRTIO_DEVICE_ID_FS = 26, /* File system device */
VIRTIO_DEVICE_ID_PMEM = 27, /* PMEM device */
VIRTIO_DEVICE_ID_RPMB = 28, /* RPMB device */
VIRTIO_DEVICE_ID_MAC80211_HWSIM = 29, /* Mac80211 hwsim wireless simulation device */
VIRTIO_DEVICE_ID_VIDEO_ENCODER = 30, /* Video encoder device */
VIRTIO_DEVICE_ID_VIDEO_DECODER = 31, /* Video decoder device */
VIRTIO_DEVICE_ID_SCMI = 32, /* SCMI device */
VIRTIO_DEVICE_ID_NITRO_SEC_MOD = 33, /* NitroSecureModule */
VIRTIO_DEVICE_ID_I2C_ADAPTER = 34, /* I2C adapter */
VIRTIO_DEVICE_ID_WATCHDOG = 35, /* Watchdog */
VIRTIO_DEVICE_ID_CAN = 36, /* CAN device */
VIRTIO_DEVICE_ID_DMABUF = 37, /* Virtio dmabuf */
VIRTIO_DEVICE_ID_PARAM_SERV = 38, /* Parameter Server */
VIRTIO_DEVICE_ID_AUDIO_POLICY = 39, /* Audio policy device */
VIRTIO_DEVICE_ID_BT = 40, /* Bluetooth device */
VIRTIO_DEVICE_ID_GPIO = 41, /* GPIO device */
VIRTIO_DEVICE_ID_RDMA = 42, /* RDMA device */
VIRTIO_DEVICE_TYPE_SIZE
};
struct virtio_device
{
rt_uint32_t irq;
struct virtq *queues;
rt_size_t queues_num;
union
{
rt_ubase_t *mmio_base;
struct virtio_mmio_config *mmio_config;
};
#ifdef RT_USING_SMP
struct rt_spinlock spinlock;
#endif
void *priv;
};
typedef rt_err_t (*virtio_device_init_handler)(rt_ubase_t *mmio_base, rt_uint32_t irq);
void virtio_reset_device(struct virtio_device *dev);
void virtio_status_acknowledge_driver(struct virtio_device *dev);
void virtio_status_driver_ok(struct virtio_device *dev);
void virtio_interrupt_ack(struct virtio_device *dev);
rt_bool_t virtio_has_feature(struct virtio_device *dev, rt_uint32_t feature_bit);
rt_err_t virtio_queues_alloc(struct virtio_device *dev, rt_size_t queues_num);
void virtio_queues_free(struct virtio_device *dev);
rt_err_t virtio_queue_init(struct virtio_device *dev, rt_uint32_t queue_index, rt_size_t ring_size);
void virtio_queue_destroy(struct virtio_device *dev, rt_uint32_t queue_index);
void virtio_queue_notify(struct virtio_device *dev, rt_uint32_t queue_index);
void virtio_submit_chain(struct virtio_device *dev, rt_uint32_t queue_index, rt_uint16_t desc_index);
rt_uint16_t virtio_alloc_desc(struct virtio_device *dev, rt_uint32_t queue_index);
void virtio_free_desc(struct virtio_device *dev, rt_uint32_t queue_index, rt_uint16_t desc_index);
rt_err_t virtio_alloc_desc_chain(struct virtio_device *dev, rt_uint32_t queue_index, rt_size_t count,
rt_uint16_t *indexs);
void virtio_free_desc_chain(struct virtio_device *dev, rt_uint32_t queue_index, rt_uint16_t desc_index);
void virtio_fill_desc(struct virtio_device *dev, rt_uint32_t queue_index, rt_uint16_t desc_index,
rt_uint64_t addr, rt_uint32_t len, rt_uint16_t flags, rt_uint16_t next);
#endif /* __VIRTIO_H__ */

View file

@ -0,0 +1,253 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-9-16 GuEe-GUI the first version
* 2021-11-11 GuEe-GUI using virtio common interface
*/
#include <rthw.h>
#include <rtthread.h>
#include <cpuport.h>
#ifdef RT_USING_VIRTIO_BLK
#include <virtio_blk.h>
static void virtio_blk_rw(struct virtio_blk_device *virtio_blk_dev, rt_off_t pos, void *buffer, rt_size_t count,
int flags)
{
rt_uint16_t idx[3];
rt_size_t size = count * virtio_blk_dev->config->blk_size;
struct virtio_device *virtio_dev = &virtio_blk_dev->virtio_dev;
#ifdef RT_USING_SMP
rt_base_t level = rt_spin_lock_irqsave(&virtio_dev->spinlock);
#endif
/* Allocate 3 descriptors */
while (virtio_alloc_desc_chain(virtio_dev, 0, 3, idx))
{
#ifdef RT_USING_SMP
rt_spin_unlock_irqrestore(&virtio_dev->spinlock, level);
#endif
rt_thread_yield();
#ifdef RT_USING_SMP
level = rt_spin_lock_irqsave(&virtio_dev->spinlock);
#endif
}
virtio_blk_dev->info[idx[0]].status = 0xff;
virtio_blk_dev->info[idx[0]].valid = RT_TRUE;
virtio_blk_dev->info[idx[0]].req.type = flags;
virtio_blk_dev->info[idx[0]].req.ioprio = 0;
virtio_blk_dev->info[idx[0]].req.sector = pos * (virtio_blk_dev->config->blk_size / 512);
flags = flags == VIRTIO_BLK_T_OUT ? 0 : VIRTQ_DESC_F_WRITE;
virtio_fill_desc(virtio_dev, VIRTIO_BLK_QUEUE, idx[0],
VIRTIO_VA2PA(&virtio_blk_dev->info[idx[0]].req), sizeof(struct virtio_blk_req), VIRTQ_DESC_F_NEXT, idx[1]);
virtio_fill_desc(virtio_dev, VIRTIO_BLK_QUEUE, idx[1],
VIRTIO_VA2PA(buffer), size, flags | VIRTQ_DESC_F_NEXT, idx[2]);
virtio_fill_desc(virtio_dev, VIRTIO_BLK_QUEUE, idx[2],
VIRTIO_VA2PA(&virtio_blk_dev->info[idx[0]].status), sizeof(rt_uint8_t), VIRTQ_DESC_F_WRITE, 0);
virtio_submit_chain(virtio_dev, VIRTIO_BLK_QUEUE, idx[0]);
virtio_queue_notify(virtio_dev, VIRTIO_BLK_QUEUE);
/* Wait for virtio_blk_isr() to done */
while (virtio_blk_dev->info[idx[0]].valid)
{
#ifdef RT_USING_SMP
rt_spin_unlock_irqrestore(&virtio_dev->spinlock, level);
#endif
rt_thread_yield();
#ifdef RT_USING_SMP
level = rt_spin_lock_irqsave(&virtio_dev->spinlock);
#endif
}
virtio_free_desc_chain(virtio_dev, VIRTIO_BLK_QUEUE, idx[0]);
#ifdef RT_USING_SMP
rt_spin_unlock_irqrestore(&virtio_dev->spinlock, level);
#endif
}
static rt_ssize_t virtio_blk_read(rt_device_t dev, rt_off_t pos, void *buffer, rt_size_t count)
{
virtio_blk_rw((struct virtio_blk_device *)dev, pos, buffer, count, VIRTIO_BLK_T_IN);
return count;
}
static rt_ssize_t virtio_blk_write(rt_device_t dev, rt_off_t pos, const void *buffer, rt_size_t count)
{
virtio_blk_rw((struct virtio_blk_device *)dev, pos, (void *)buffer, count, VIRTIO_BLK_T_OUT);
return count;
}
static rt_err_t virtio_blk_control(rt_device_t dev, int cmd, void *args)
{
rt_err_t status = RT_EOK;
struct virtio_blk_device *virtio_blk_dev = (struct virtio_blk_device *)dev;
switch (cmd)
{
case RT_DEVICE_CTRL_BLK_GETGEOME:
{
struct rt_device_blk_geometry *geometry = (struct rt_device_blk_geometry *)args;
if (geometry == RT_NULL)
{
status = -RT_ERROR;
break;
}
geometry->bytes_per_sector = VIRTIO_BLK_BYTES_PER_SECTOR;
geometry->block_size = virtio_blk_dev->config->blk_size;
geometry->sector_count = virtio_blk_dev->config->capacity;
}
break;
default:
status = -RT_EINVAL;
break;
}
return status;
}
#ifdef RT_USING_DEVICE_OPS
const static struct rt_device_ops virtio_blk_ops =
{
RT_NULL,
RT_NULL,
RT_NULL,
virtio_blk_read,
virtio_blk_write,
virtio_blk_control
};
#endif
static void virtio_blk_isr(int irqno, void *param)
{
rt_uint32_t id;
struct virtio_blk_device *virtio_blk_dev = (struct virtio_blk_device *)param;
struct virtio_device *virtio_dev = &virtio_blk_dev->virtio_dev;
struct virtq *queue = &virtio_dev->queues[VIRTIO_BLK_QUEUE];
#ifdef RT_USING_SMP
rt_base_t level = rt_spin_lock_irqsave(&virtio_dev->spinlock);
#endif
virtio_interrupt_ack(virtio_dev);
rt_hw_dsb();
/* The device increments disk.used->idx when it adds an entry to the used ring */
while (queue->used_idx != queue->used->idx)
{
rt_hw_dsb();
id = queue->used->ring[queue->used_idx % queue->num].id;
RT_ASSERT(virtio_blk_dev->info[id].status == 0);
/* Done with buffer */
virtio_blk_dev->info[id].valid = RT_FALSE;
queue->used_idx++;
}
#ifdef RT_USING_SMP
rt_spin_unlock_irqrestore(&virtio_dev->spinlock, level);
#endif
}
rt_err_t rt_virtio_blk_init(rt_ubase_t *mmio_base, rt_uint32_t irq)
{
static int dev_no = 0;
char dev_name[RT_NAME_MAX];
struct virtio_device *virtio_dev;
struct virtio_blk_device *virtio_blk_dev;
virtio_blk_dev = rt_malloc(sizeof(struct virtio_blk_device));
if (virtio_blk_dev == RT_NULL)
{
return -RT_ENOMEM;
}
virtio_dev = &virtio_blk_dev->virtio_dev;
virtio_dev->irq = irq;
virtio_dev->mmio_base = mmio_base;
virtio_blk_dev->config = (struct virtio_blk_config *)virtio_dev->mmio_config->config;
#ifdef RT_USING_SMP
rt_spin_lock_init(&virtio_dev->spinlock);
#endif
virtio_reset_device(virtio_dev);
virtio_status_acknowledge_driver(virtio_dev);
/* Negotiate features */
virtio_dev->mmio_config->driver_features = virtio_dev->mmio_config->device_features & ~(
(1 << VIRTIO_BLK_F_RO) |
(1 << VIRTIO_BLK_F_MQ) |
(1 << VIRTIO_BLK_F_SCSI) |
(1 << VIRTIO_BLK_F_CONFIG_WCE) |
(1 << VIRTIO_F_ANY_LAYOUT) |
(1 << VIRTIO_F_RING_EVENT_IDX) |
(1 << VIRTIO_F_RING_INDIRECT_DESC));
/* Tell device that feature negotiation is complete and we're completely ready */
virtio_status_driver_ok(virtio_dev);
if (virtio_queues_alloc(virtio_dev, 1) != RT_EOK)
{
goto _alloc_fail;
}
/* Initialize queue 0 */
if (virtio_queue_init(virtio_dev, 0, VIRTIO_BLK_QUEUE_RING_SIZE) != RT_EOK)
{
goto _alloc_fail;
}
virtio_blk_dev->parent.type = RT_Device_Class_Block;
#ifdef RT_USING_DEVICE_OPS
virtio_blk_dev->parent.ops = &virtio_blk_ops;
#else
virtio_blk_dev->parent.init = RT_NULL;
virtio_blk_dev->parent.open = RT_NULL;
virtio_blk_dev->parent.close = RT_NULL;
virtio_blk_dev->parent.read = virtio_blk_read;
virtio_blk_dev->parent.write = virtio_blk_write;
virtio_blk_dev->parent.control = virtio_blk_control;
#endif
rt_snprintf(dev_name, RT_NAME_MAX, "virtio-blk%d", dev_no++);
rt_hw_interrupt_install(irq, virtio_blk_isr, virtio_blk_dev, dev_name);
rt_hw_interrupt_umask(irq);
return rt_device_register((rt_device_t)virtio_blk_dev, dev_name, RT_DEVICE_FLAG_RDWR | RT_DEVICE_FLAG_REMOVABLE);
_alloc_fail:
if (virtio_blk_dev != RT_NULL)
{
virtio_queues_free(virtio_dev);
rt_free(virtio_blk_dev);
}
return -RT_ENOMEM;
}
#endif /* RT_USING_VIRTIO_BLK */

View file

@ -0,0 +1,105 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-9-16 GuEe-GUI the first version
* 2021-11-11 GuEe-GUI using virtio common interface
*/
#ifndef __VIRTIO_BLK_H__
#define __VIRTIO_BLK_H__
#include <rtdef.h>
#include <virtio.h>
#define VIRTIO_BLK_QUEUE 0
#define VIRTIO_BLK_BYTES_PER_SECTOR 512
#define VIRTIO_BLK_QUEUE_RING_SIZE 4
#define VIRTIO_BLK_F_RO 5 /* Disk is read-only */
#define VIRTIO_BLK_F_SCSI 7 /* Supports scsi command passthru */
#define VIRTIO_BLK_F_CONFIG_WCE 11 /* Writeback mode available in config */
#define VIRTIO_BLK_F_MQ 12 /* Support more than one vq */
#define VIRTIO_BLK_T_IN 0 /* Read the blk */
#define VIRTIO_BLK_T_OUT 1 /* Write the blk */
#define VIRTIO_BLK_T_SCSI_CMD 2
#define VIRTIO_BLK_T_SCSI_CMD_OUT 3
#define VIRTIO_BLK_T_FLUSH 4
#define VIRTIO_BLK_T_FLUSH_OUT 5
struct virtio_blk_req
{
rt_uint32_t type;
rt_uint32_t ioprio;
rt_uint64_t sector;
};
struct virtio_blk_config
{
rt_uint64_t capacity; /* The capacity (in 512-byte sectors). */
rt_uint32_t size_max; /* The maximum segment size (if VIRTIO_BLK_F_SIZE_MAX) */
rt_uint32_t seg_max; /* The maximum number of segments (if VIRTIO_BLK_F_SEG_MAX) */
/* Geometry of the device (if VIRTIO_BLK_F_GEOMETRY) */
struct virtio_blk_geometry
{
rt_uint16_t cylinders;
rt_uint8_t heads;
rt_uint8_t sectors;
} geometry;
rt_uint32_t blk_size; /* Block size of device (if VIRTIO_BLK_F_BLK_SIZE) */
struct virtio_blk_topology
{
/* # Of logical blocks per physical block (log2) */
rt_uint8_t physical_block_exp;
/* Offset of first aligned logical block */
rt_uint8_t alignment_offset;
/* Suggested minimum I/O size in blocks */
rt_uint16_t min_io_size;
/* Optimal (suggested maximum) I/O size in blocks */
rt_uint32_t opt_io_size;
} topology;
rt_uint8_t writeback;
rt_uint8_t unused0;
rt_uint16_t num_queues;
rt_uint32_t max_discard_sectors;
rt_uint32_t max_discard_seg;
rt_uint32_t discard_sector_alignment;
rt_uint32_t max_write_zeroes_sectors;
rt_uint32_t max_write_zeroes_seg;
rt_uint8_t write_zeroes_may_unmap;
rt_uint8_t unused1[3];
rt_uint32_t max_secure_erase_sectors;
rt_uint32_t max_secure_erase_seg;
rt_uint32_t secure_erase_sector_alignment;
} __attribute__((packed));
struct virtio_blk_device
{
struct rt_device parent;
struct virtio_device virtio_dev;
struct virtio_blk_config *config;
struct
{
rt_bool_t valid;
rt_uint8_t status;
struct virtio_blk_req req;
} info[VIRTIO_BLK_QUEUE_RING_SIZE];
};
rt_err_t rt_virtio_blk_init(rt_ubase_t *mmio_base, rt_uint32_t irq);
#endif /* __VIRTIO_BLK_H__ */

View file

@ -0,0 +1,778 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-11-11 GuEe-GUI the first version
*/
#include <rthw.h>
#include <rtthread.h>
#include <cpuport.h>
#ifdef RT_USING_VIRTIO_CONSOLE
#include <virtio_console.h>
struct port_device
{
struct rt_device parent;
rt_list_t node;
rt_uint32_t port_id;
rt_bool_t rx_notify;
rt_bool_t need_destroy;
struct virtio_console_device *console;
struct virtq *queue_rx, *queue_tx;
rt_uint32_t queue_rx_index, queue_tx_index;
#ifdef RT_USING_SMP
struct rt_spinlock spinlock_rx, spinlock_tx;
#endif
struct rt_device_notify rx_notify_helper;
struct
{
char rx_char, tx_char;
} info[VIRTIO_CONSOLE_QUEUE_SIZE];
};
static void virtio_console_send_ctrl(struct virtio_console_device *virtio_console_dev,
struct virtio_console_control *ctrl)
{
rt_uint16_t id;
struct virtio_device *virtio_dev = &virtio_console_dev->virtio_dev;
struct virtq *queue_ctrl_tx;
#ifdef RT_USING_SMP
rt_base_t level = rt_spin_lock_irqsave(&virtio_dev->spinlock);
#endif
queue_ctrl_tx = &virtio_dev->queues[VIRTIO_CONSOLE_QUEUE_CTRL_TX];
id = queue_ctrl_tx->avail->idx % queue_ctrl_tx->num;
rt_memcpy(&virtio_console_dev->info[id].tx_ctrl, ctrl, sizeof(struct virtio_console_control));
virtio_free_desc(virtio_dev, VIRTIO_CONSOLE_QUEUE_CTRL_TX, id);
virtio_fill_desc(virtio_dev, VIRTIO_CONSOLE_QUEUE_CTRL_TX, id,
virtio_console_dev->info[id].tx_ctrl_paddr, sizeof(struct virtio_console_control), 0, 0);
virtio_submit_chain(virtio_dev, VIRTIO_CONSOLE_QUEUE_CTRL_TX, id);
virtio_queue_notify(virtio_dev, VIRTIO_CONSOLE_QUEUE_CTRL_TX);
virtio_alloc_desc(virtio_dev, VIRTIO_CONSOLE_QUEUE_CTRL_TX);
#ifdef RT_USING_SMP
rt_spin_unlock_irqrestore(&virtio_dev->spinlock, level);
#endif
}
static rt_err_t virtio_console_port_init(rt_device_t dev);
static rt_err_t virtio_console_port_open(rt_device_t dev, rt_uint16_t oflag);
static rt_err_t virtio_console_port_close(rt_device_t dev);
static rt_ssize_t virtio_console_port_read(rt_device_t dev, rt_off_t pos, void *buffer, rt_size_t size);
static rt_ssize_t virtio_console_port_write(rt_device_t dev, rt_off_t pos, const void *buffer, rt_size_t size);
static rt_err_t virtio_console_port_control(rt_device_t dev, int cmd, void *args);
#ifdef RT_USING_DEVICE_OPS
const static struct rt_device_ops virtio_console_port_ops =
{
virtio_console_port_init,
virtio_console_port_open,
virtio_console_port_close,
virtio_console_port_read,
virtio_console_port_write,
virtio_console_port_control
};
#endif
static rt_err_t virtio_console_port_create(struct virtio_console_device *virtio_console_dev)
{
rt_uint32_t port_id;
char dev_name[RT_NAME_MAX];
struct port_device *port_dev, *prev_port_dev = RT_NULL;
struct virtio_device *virtio_dev = &virtio_console_dev->virtio_dev;
if (virtio_console_dev->port_nr > 0 && !virtio_has_feature(virtio_dev, VIRTIO_CONSOLE_F_MULTIPORT))
{
return -RT_ENOSYS;
}
if (virtio_console_dev->port_nr >= virtio_console_dev->max_port_nr)
{
return -RT_EFULL;
}
port_id = 0;
/* The port device list is always ordered, so just find next number for id */
rt_list_for_each_entry(port_dev, &virtio_console_dev->port_head, node)
{
if (port_dev->port_id != port_id)
{
break;
}
++port_id;
prev_port_dev = port_dev;
}
port_dev = rt_malloc(sizeof(struct port_device));
if (port_dev == RT_NULL)
{
return -RT_ENOMEM;
}
port_dev->parent.type = RT_Device_Class_Char;
#ifdef RT_USING_DEVICE_OPS
port_dev->parent.ops = &virtio_console_port_ops;
#else
port_dev->parent.init = virtio_console_port_init;
port_dev->parent.open = virtio_console_port_open;
port_dev->parent.close = virtio_console_port_close;
port_dev->parent.read = virtio_console_port_read;
port_dev->parent.write = virtio_console_port_write;
port_dev->parent.control = virtio_console_port_control;
#endif
port_dev->parent.rx_indicate = RT_NULL;
port_dev->parent.tx_complete = RT_NULL;
rt_list_init(&port_dev->node);
port_dev->port_id = port_id;
port_dev->need_destroy = RT_FALSE;
port_dev->rx_notify = RT_TRUE;
port_dev->console = virtio_console_dev;
port_dev->queue_rx_index = VIRTIO_CONSOLE_PORT_QUEUE_INDEX(port_dev->port_id, VIRTIO_CONSOLE_QUEUE_DATA_RX);
port_dev->queue_tx_index = VIRTIO_CONSOLE_PORT_QUEUE_INDEX(port_dev->port_id, VIRTIO_CONSOLE_QUEUE_DATA_TX);
port_dev->queue_rx = &virtio_dev->queues[port_dev->queue_rx_index];
port_dev->queue_tx = &virtio_dev->queues[port_dev->queue_tx_index];
#ifdef RT_USING_SMP
rt_spin_lock_init(&port_dev->spinlock_rx);
rt_spin_lock_init(&port_dev->spinlock_tx);
#endif
rt_snprintf(dev_name, RT_NAME_MAX, "vport%dp%d", virtio_console_dev->console_id, port_id);
if (rt_device_register((rt_device_t)port_dev, dev_name, RT_DEVICE_FLAG_RDWR | RT_DEVICE_FLAG_INT_RX) != RT_EOK)
{
rt_free(port_dev);
return -RT_ERROR;
}
if (prev_port_dev != RT_NULL)
{
rt_list_insert_after(&prev_port_dev->node, &port_dev->node);
}
else
{
/* Port0 */
rt_list_insert_after(&virtio_console_dev->port_head, &port_dev->node);
}
virtio_console_dev->port_nr++;
return RT_EOK;
}
static void virtio_console_port_destroy(struct virtio_console_device *virtio_console_dev,
struct port_device *port_dev)
{
struct virtio_console_control set_ctrl;
set_ctrl.id = port_dev->port_id;
set_ctrl.event = VIRTIO_CONSOLE_PORT_OPEN;
set_ctrl.value = 0;
virtio_console_send_ctrl(virtio_console_dev, &set_ctrl);
virtio_console_dev->port_nr--;
rt_list_remove(&port_dev->node);
rt_device_unregister((rt_device_t)port_dev);
rt_free(port_dev);
}
static rt_err_t virtio_console_port_init(rt_device_t dev)
{
rt_uint16_t id;
rt_uint16_t idx[VIRTIO_CONSOLE_QUEUE_SIZE];
rt_uint16_t rx_queue_index, tx_queue_index;
struct port_device *port_dev = (struct port_device *)dev;
struct virtio_console_device *virtio_console_dev = port_dev->console;
struct virtio_device *virtio_dev = &virtio_console_dev->virtio_dev;
struct virtq *queue_rx, *queue_tx;
rx_queue_index = VIRTIO_CONSOLE_PORT_QUEUE_INDEX(port_dev->port_id, VIRTIO_CONSOLE_QUEUE_DATA_RX);
tx_queue_index = VIRTIO_CONSOLE_PORT_QUEUE_INDEX(port_dev->port_id, VIRTIO_CONSOLE_QUEUE_DATA_TX);
queue_rx = &virtio_dev->queues[rx_queue_index];
queue_tx = &virtio_dev->queues[tx_queue_index];
virtio_alloc_desc_chain(virtio_dev, rx_queue_index, queue_rx->num, idx);
virtio_alloc_desc_chain(virtio_dev, tx_queue_index, queue_tx->num, idx);
for (id = 0; id < queue_rx->num; ++id)
{
void *addr = &port_dev->info[id].rx_char;
virtio_fill_desc(virtio_dev, rx_queue_index, id,
VIRTIO_VA2PA(addr), sizeof(char), VIRTQ_DESC_F_WRITE, 0);
queue_rx->avail->ring[id] = id;
}
rt_hw_dsb();
queue_rx->avail->flags = 0;
queue_rx->avail->idx = queue_rx->num;
queue_rx->used_idx = queue_rx->used->idx;
queue_tx->avail->flags = VIRTQ_AVAIL_F_NO_INTERRUPT;
queue_tx->avail->idx = 0;
virtio_queue_notify(virtio_dev, rx_queue_index);
if (virtio_has_feature(virtio_dev, VIRTIO_CONSOLE_F_MULTIPORT))
{
struct virtio_console_control set_ctrl;
set_ctrl.id = VIRTIO_CONSOLE_PORT_BAD_ID;
set_ctrl.event = VIRTIO_CONSOLE_DEVICE_READY;
set_ctrl.value = 1;
virtio_console_send_ctrl(virtio_console_dev, &set_ctrl);
}
return RT_EOK;
}
static rt_err_t virtio_console_port_open(rt_device_t dev, rt_uint16_t oflag)
{
struct port_device *port_dev = (struct port_device *)dev;
if (port_dev->port_id == 0 && virtio_has_feature(&port_dev->console->virtio_dev, VIRTIO_CONSOLE_F_MULTIPORT))
{
/* Port0 is reserve in multiport */
return -RT_ERROR;
}
port_dev->rx_notify = RT_TRUE;
return RT_EOK;
}
static rt_err_t virtio_console_port_close(rt_device_t dev)
{
struct port_device *port_dev = (struct port_device *)dev;
if (port_dev->need_destroy)
{
virtio_console_port_destroy(port_dev->console, port_dev);
/*
* We released the device memory in virtio_console_port_destroy,
* rt_device_close has not finished yet, make the return value
* to empty so that rt_device_close will not access the device memory.
*/
return -RT_EEMPTY;
}
port_dev->rx_notify = RT_FALSE;
return RT_EOK;
}
static rt_ssize_t virtio_console_port_read(rt_device_t dev, rt_off_t pos, void *buffer, rt_size_t size)
{
rt_off_t i = 0;
rt_uint16_t id;
rt_uint32_t len;
struct port_device *port_dev = (struct port_device *)dev;
struct virtio_device *virtio_dev = &port_dev->console->virtio_dev;
rt_uint32_t queue_rx_index = port_dev->queue_rx_index;
struct virtq *queue_rx = port_dev->queue_rx;
#ifdef RT_USING_SMP
rt_base_t level = rt_spin_lock_irqsave(&port_dev->spinlock_rx);
#endif
while (i < size)
{
if (queue_rx->used_idx == queue_rx->used->idx)
{
break;
}
rt_hw_dsb();
id = queue_rx->used->ring[queue_rx->used_idx % queue_rx->num].id;
len = queue_rx->used->ring[queue_rx->used_idx % queue_rx->num].len;
if (len > sizeof(char))
{
rt_kprintf("%s: Receive buffer's size = %u is too big!\n", port_dev->parent.parent.name, len);
len = sizeof(char);
}
*((char *)buffer + i) = port_dev->info[id].rx_char;
queue_rx->used_idx++;
virtio_submit_chain(virtio_dev, queue_rx_index, id);
virtio_queue_notify(virtio_dev, queue_rx_index);
i += len;
}
#ifdef RT_USING_SMP
rt_spin_unlock_irqrestore(&port_dev->spinlock_rx, level);
#endif
size = i;
return size;
}
static rt_ssize_t virtio_console_port_write(rt_device_t dev, rt_off_t pos, const void *buffer, rt_size_t size)
{
char ch = 0;
rt_off_t i = 0;
rt_uint16_t id;
struct port_device *port_dev = (struct port_device *)dev;
struct virtio_device *virtio_dev = &port_dev->console->virtio_dev;
rt_uint32_t queue_tx_index = port_dev->queue_tx_index;
struct virtq *queue_tx = port_dev->queue_tx;
#ifdef RT_USING_SMP
rt_base_t level = rt_spin_lock_irqsave(&port_dev->spinlock_tx);
#endif
while (i < size || ch == '\r')
{
id = queue_tx->avail->idx % queue_tx->num;
/* Keep the way until 'new line' are unified */
if (ch != '\r')
{
ch = *((const char *)buffer + i);
}
else
{
i -= sizeof(char);
}
port_dev->info[id].tx_char = ch;
ch = (ch == '\n' ? '\r' : 0);
virtio_free_desc(virtio_dev, queue_tx_index, id);
virtio_fill_desc(virtio_dev, queue_tx_index, id,
VIRTIO_VA2PA(&port_dev->info[id].tx_char), sizeof(char), 0, 0);
virtio_submit_chain(virtio_dev, queue_tx_index, id);
virtio_queue_notify(virtio_dev, queue_tx_index);
virtio_alloc_desc(virtio_dev, queue_tx_index);
i += sizeof(char);
}
#ifdef RT_USING_SMP
rt_spin_unlock_irqrestore(&port_dev->spinlock_tx, level);
#endif
return size;
}
static rt_err_t virtio_console_port_control(rt_device_t dev, int cmd, void *args)
{
rt_err_t status = RT_EOK;
struct port_device *port_dev = (struct port_device *)dev;
switch (cmd)
{
case RT_DEVICE_CTRL_NOTIFY_SET:
if (args == RT_NULL)
{
status = -RT_ERROR;
break;
}
rt_memcpy(&port_dev->rx_notify_helper, args, sizeof(port_dev->rx_notify_helper));
break;
case RT_DEVICE_CTRL_CLR_INT:
/* Disable RX */
port_dev->rx_notify = RT_FALSE;
break;
case RT_DEVICE_CTRL_SET_INT:
/* Enable RX */
port_dev->rx_notify = RT_TRUE;
break;
case VIRTIO_DEVICE_CTRL_CONSOLE_PORT_DESTROY:
{
port_dev->need_destroy = RT_TRUE;
port_dev->rx_notify = RT_FALSE;
}
break;
default:
status = -RT_EINVAL;
break;
}
return status;
}
static rt_err_t virtio_console_init(rt_device_t dev)
{
struct virtio_console_device *virtio_console_dev = (struct virtio_console_device *)dev;
struct virtio_device *virtio_dev = &virtio_console_dev->virtio_dev;
if (virtio_has_feature(virtio_dev, VIRTIO_CONSOLE_F_MULTIPORT))
{
rt_uint16_t id;
rt_uint16_t idx[VIRTIO_CONSOLE_QUEUE_SIZE];
struct virtq *queue_ctrl_rx, *queue_ctrl_tx;
queue_ctrl_rx = &virtio_dev->queues[VIRTIO_CONSOLE_QUEUE_CTRL_RX];
queue_ctrl_tx = &virtio_dev->queues[VIRTIO_CONSOLE_QUEUE_CTRL_TX];
virtio_alloc_desc_chain(virtio_dev, VIRTIO_CONSOLE_QUEUE_CTRL_RX, queue_ctrl_rx->num, idx);
virtio_alloc_desc_chain(virtio_dev, VIRTIO_CONSOLE_QUEUE_CTRL_TX, queue_ctrl_tx->num, idx);
for (id = 0; id < queue_ctrl_rx->num; ++id)
{
void *addr = &virtio_console_dev->info[id].rx_ctrl;
virtio_fill_desc(virtio_dev, VIRTIO_CONSOLE_QUEUE_CTRL_RX, id,
VIRTIO_VA2PA(addr), sizeof(struct virtio_console_control), VIRTQ_DESC_F_WRITE, 0);
queue_ctrl_rx->avail->ring[id] = id;
}
rt_hw_dsb();
for (id = 0; id < queue_ctrl_tx->num; ++id)
{
virtio_console_dev->info[id].tx_ctrl_paddr = VIRTIO_VA2PA(&virtio_console_dev->info[id].tx_ctrl);
}
queue_ctrl_rx->avail->flags = 0;
queue_ctrl_rx->avail->idx = queue_ctrl_rx->num;
queue_ctrl_rx->used_idx = queue_ctrl_rx->used->idx;
queue_ctrl_tx->avail->flags = VIRTQ_AVAIL_F_NO_INTERRUPT;
queue_ctrl_tx->avail->idx = 0;
virtio_queue_notify(virtio_dev, VIRTIO_CONSOLE_QUEUE_CTRL_RX);
}
return virtio_console_port_create(virtio_console_dev);
}
static rt_err_t virtio_console_control(rt_device_t dev, int cmd, void *args)
{
rt_err_t status = RT_EOK;
struct virtio_console_device *virtio_console_dev = (struct virtio_console_device *)dev;
switch (cmd)
{
case VIRTIO_DEVICE_CTRL_CONSOLE_PORT_CREATE:
status = virtio_console_port_create(virtio_console_dev);
break;
default:
status = -RT_EINVAL;
break;
}
return status;
}
#ifdef RT_USING_DEVICE_OPS
const static struct rt_device_ops virtio_console_ops =
{
virtio_console_init,
RT_NULL,
RT_NULL,
RT_NULL,
RT_NULL,
virtio_console_control
};
#endif
static void virtio_console_isr(int irqno, void *param)
{
rt_uint32_t id;
rt_uint32_t len;
struct port_device *port_dev;
struct virtio_console_device *virtio_console_dev = (struct virtio_console_device *)param;
struct virtio_device *virtio_dev = &virtio_console_dev->virtio_dev;
const char *dev_name = virtio_console_dev->parent.parent.name;
#ifdef RT_USING_SMP
rt_base_t level = rt_spin_lock_irqsave(&virtio_dev->spinlock);
#endif
virtio_interrupt_ack(virtio_dev);
rt_hw_dsb();
do {
struct virtq *queue_rx;
struct virtio_console_control *ctrl, set_ctrl;
if (!virtio_has_feature(virtio_dev, VIRTIO_CONSOLE_F_MULTIPORT))
{
break;
}
queue_rx = &virtio_dev->queues[VIRTIO_CONSOLE_QUEUE_CTRL_RX];
if (queue_rx->used_idx == queue_rx->used->idx)
{
break;
}
rt_hw_dsb();
id = queue_rx->used->ring[queue_rx->used_idx % queue_rx->num].id;
len = queue_rx->used->ring[queue_rx->used_idx % queue_rx->num].len;
queue_rx->used_idx++;
if (len != sizeof(struct virtio_console_control))
{
rt_kprintf("%s: Invalid ctrl!\n", dev_name);
break;
}
ctrl = &virtio_console_dev->info[id].rx_ctrl;
switch (ctrl->event)
{
case VIRTIO_CONSOLE_PORT_ADD:
{
set_ctrl.id = ctrl->id;
set_ctrl.event = VIRTIO_CONSOLE_PORT_READY;
set_ctrl.value = 1;
#ifdef RT_USING_SMP
rt_spin_unlock_irqrestore(&virtio_dev->spinlock, level);
#endif
virtio_console_send_ctrl(virtio_console_dev, &set_ctrl);
#ifdef RT_USING_SMP
level = rt_spin_lock_irqsave(&virtio_dev->spinlock);
#endif
}
break;
case VIRTIO_CONSOLE_PORT_REMOVE:
break;
case VIRTIO_CONSOLE_RESIZE:
break;
case VIRTIO_CONSOLE_PORT_OPEN:
{
set_ctrl.id = ctrl->id;
set_ctrl.event = VIRTIO_CONSOLE_PORT_OPEN;
set_ctrl.value = 1;
#ifdef RT_USING_SMP
rt_spin_unlock_irqrestore(&virtio_dev->spinlock, level);
#endif
virtio_console_send_ctrl(virtio_console_dev, &set_ctrl);
#ifdef RT_USING_SMP
level = rt_spin_lock_irqsave(&virtio_dev->spinlock);
#endif
}
break;
case VIRTIO_CONSOLE_PORT_NAME:
break;
default:
rt_kprintf("%s: Unsupport ctrl[id: %d, event: %d, value: %d]!\n",
dev_name, ctrl->id, ctrl->event, ctrl->value);
break;
}
} while (0);
#ifdef RT_USING_SMP
rt_spin_unlock_irqrestore(&virtio_dev->spinlock, level);
#endif
rt_list_for_each_entry(port_dev, &virtio_console_dev->port_head, node)
{
rt_uint32_t queue_rx_index = port_dev->queue_rx_index;
struct virtq *queue_rx = port_dev->queue_rx;
#ifdef RT_USING_SMP
rt_base_t level = rt_spin_lock_irqsave(&port_dev->spinlock_rx);
#endif
if (queue_rx->used_idx != queue_rx->used->idx)
{
rt_hw_dsb();
id = queue_rx->used->ring[queue_rx->used_idx % queue_rx->num].id;
len = queue_rx->used->ring[queue_rx->used_idx % queue_rx->num].len;
if (port_dev->rx_notify)
{
#ifdef RT_USING_SMP
rt_spin_unlock_irqrestore(&port_dev->spinlock_rx, level);
#endif
/* Will call virtio_console_port_read to inc used_idx */
if (port_dev->parent.rx_indicate != RT_NULL)
{
port_dev->parent.rx_indicate(&port_dev->parent, len);
}
if (port_dev->rx_notify_helper.notify != RT_NULL)
{
port_dev->rx_notify_helper.notify(port_dev->rx_notify_helper.dev);
}
#ifdef RT_USING_SMP
level = rt_spin_lock_irqsave(&port_dev->spinlock_rx);
#endif
}
else
{
queue_rx->used_idx++;
virtio_submit_chain(virtio_dev, queue_rx_index, id);
virtio_queue_notify(virtio_dev, queue_rx_index);
}
}
#ifdef RT_USING_SMP
rt_spin_unlock_irqrestore(&port_dev->spinlock_rx, level);
#endif
}
}
rt_err_t rt_virtio_console_init(rt_ubase_t *mmio_base, rt_uint32_t irq)
{
int i;
rt_size_t queues_num;
static int dev_no = 0;
char dev_name[RT_NAME_MAX];
struct virtio_device *virtio_dev;
struct virtio_console_device *virtio_console_dev;
RT_ASSERT(RT_USING_VIRTIO_CONSOLE_PORT_MAX_NR > 0);
virtio_console_dev = rt_malloc(sizeof(struct virtio_console_device));
if (virtio_console_dev == RT_NULL)
{
goto _alloc_fail;
}
virtio_dev = &virtio_console_dev->virtio_dev;
virtio_dev->irq = irq;
virtio_dev->mmio_base = mmio_base;
virtio_console_dev->config = (struct virtio_console_config *)virtio_dev->mmio_config->config;
#ifdef RT_USING_SMP
rt_spin_lock_init(&virtio_dev->spinlock);
#endif
virtio_reset_device(virtio_dev);
virtio_status_acknowledge_driver(virtio_dev);
virtio_dev->mmio_config->driver_features = virtio_dev->mmio_config->device_features & ~(
(1 << VIRTIO_F_RING_EVENT_IDX) |
(1 << VIRTIO_F_RING_INDIRECT_DESC));
virtio_status_driver_ok(virtio_dev);
if (!virtio_has_feature(virtio_dev, VIRTIO_CONSOLE_F_MULTIPORT))
{
virtio_console_dev->max_port_nr = 1;
queues_num = 2;
}
else
{
if (virtio_console_dev->config->max_nr_ports > RT_USING_VIRTIO_CONSOLE_PORT_MAX_NR)
{
virtio_console_dev->max_port_nr = RT_USING_VIRTIO_CONSOLE_PORT_MAX_NR;
virtio_console_dev->config->max_nr_ports = virtio_console_dev->max_port_nr;
}
else
{
virtio_console_dev->max_port_nr = virtio_console_dev->config->max_nr_ports;
}
queues_num = VIRTIO_CONSOLE_PORT_QUEUE_INDEX(virtio_console_dev->max_port_nr, VIRTIO_CONSOLE_QUEUE_DATA_RX);
}
if (virtio_queues_alloc(virtio_dev, queues_num) != RT_EOK)
{
goto _alloc_fail;
}
for (i = 0; i < virtio_dev->queues_num; ++i)
{
if (virtio_queue_init(virtio_dev, i, VIRTIO_CONSOLE_QUEUE_SIZE) != RT_EOK)
{
for (; i >= 0; --i)
{
virtio_queue_destroy(virtio_dev, i);
}
goto _alloc_fail;
}
}
virtio_console_dev->parent.type = RT_Device_Class_Char;
#ifdef RT_USING_DEVICE_OPS
virtio_console_dev->parent.ops = &virtio_console_ops;
#else
virtio_console_dev->parent.init = virtio_console_init;
virtio_console_dev->parent.open = RT_NULL;
virtio_console_dev->parent.close = RT_NULL;
virtio_console_dev->parent.read = RT_NULL;
virtio_console_dev->parent.write = RT_NULL;
virtio_console_dev->parent.control = virtio_console_control;
#endif
virtio_console_dev->parent.rx_indicate = RT_NULL;
virtio_console_dev->parent.tx_complete = RT_NULL;
virtio_console_dev->console_id = dev_no;
virtio_console_dev->port_nr = 0;
rt_list_init(&virtio_console_dev->port_head);
rt_snprintf(dev_name, RT_NAME_MAX, "virtio-console%d", dev_no++);
rt_hw_interrupt_install(irq, virtio_console_isr, virtio_console_dev, dev_name);
rt_hw_interrupt_umask(irq);
return rt_device_register((rt_device_t)virtio_console_dev, dev_name, RT_DEVICE_FLAG_RDWR | RT_DEVICE_FLAG_INT_RX);
_alloc_fail:
if (virtio_console_dev != RT_NULL)
{
virtio_queues_free(virtio_dev);
rt_free(virtio_console_dev);
}
return -RT_ENOMEM;
}
#endif /* RT_USING_VIRTIO_CONSOLE */

View file

@ -0,0 +1,97 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-11-11 GuEe-GUI the first version
*/
#ifndef __VIRTIO_CONSOLE_H__
#define __VIRTIO_CONSOLE_H__
#include <rtdef.h>
#include <virtio.h>
#ifndef RT_USING_VIRTIO_CONSOLE_PORT_MAX_NR
#define RT_USING_VIRTIO_CONSOLE_PORT_MAX_NR 4
#endif
#define VIRTIO_CONSOLE_QUEUE_DATA_RX 0
#define VIRTIO_CONSOLE_QUEUE_DATA_TX 1
#define VIRTIO_CONSOLE_QUEUE_CTRL_RX 2
#define VIRTIO_CONSOLE_QUEUE_CTRL_TX 3
#define VIRTIO_CONSOLE_QUEUE_SIZE 64
/* Every port has data rx & tx, and port0 has ctrl rx & tx in multiport */
#define VIRTIO_CONSOLE_PORT_QUEUE_INDEX(id, queue) ((id) * 2 + (!!(id)) * 2 + (queue))
#define VIRTIO_CONSOLE_PORT_BAD_ID (~(rt_uint32_t)0)
#define VIRTIO_CONSOLE_F_SIZE 0 /* Does host provide console size? */
#define VIRTIO_CONSOLE_F_MULTIPORT 1 /* Does host provide multiple ports? */
#define VIRTIO_CONSOLE_F_EMERG_WRITE 2 /* Does host support emergency write? */
struct virtio_console_config
{
rt_uint16_t cols;
rt_uint16_t rows;
rt_uint32_t max_nr_ports;
rt_uint32_t emerg_wr;
} __attribute__((packed));
struct virtio_console_control
{
rt_uint32_t id; /* Port number */
rt_uint16_t event; /* The kind of control event */
rt_uint16_t value; /* Extra information for the event */
};
enum virtio_console_control_event
{
VIRTIO_CONSOLE_DEVICE_READY = 0,
VIRTIO_CONSOLE_PORT_ADD,
VIRTIO_CONSOLE_PORT_REMOVE,
VIRTIO_CONSOLE_PORT_READY,
VIRTIO_CONSOLE_CONSOLE_PORT,
VIRTIO_CONSOLE_RESIZE,
VIRTIO_CONSOLE_PORT_OPEN,
VIRTIO_CONSOLE_PORT_NAME,
};
struct virtio_console_resize
{
rt_uint16_t cols;
rt_uint16_t rows;
};
struct virtio_console_device
{
struct rt_device parent;
struct virtio_device virtio_dev;
rt_uint32_t console_id;
rt_size_t port_nr;
rt_size_t max_port_nr;
rt_list_t port_head;
struct virtio_console_config *config;
struct
{
rt_ubase_t tx_ctrl_paddr;
struct virtio_console_control rx_ctrl, tx_ctrl;
} info[VIRTIO_CONSOLE_QUEUE_SIZE];
};
rt_err_t rt_virtio_console_init(rt_ubase_t *mmio_base, rt_uint32_t irq);
enum
{
VIRTIO_DEVICE_CTRL_CONSOLE_PORT_CREATE = 0x20,
VIRTIO_DEVICE_CTRL_CONSOLE_PORT_DESTROY,
};
#endif /* __VIRTIO_CONSOLE_H__ */

View file

@ -0,0 +1,934 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-11-11 GuEe-GUI the first version
*/
#include <rthw.h>
#include <rtthread.h>
#include <cpuport.h>
#ifdef RT_USING_VIRTIO_GPU
#include <virtio_gpu.h>
static struct virtio_gpu_device *_primary_virtio_gpu_dev = RT_NULL;
static rt_ubase_t _pixel_format_convert(rt_ubase_t format, rt_bool_t to_virtio_gpu_format)
{
rt_ubase_t ret = 0;
if (to_virtio_gpu_format)
{
switch (format)
{
case RTGRAPHIC_PIXEL_FORMAT_RGB888:
ret = VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM;
break;
case RTGRAPHIC_PIXEL_FORMAT_ARGB888:
ret = VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM;
break;
case RTGRAPHIC_PIXEL_FORMAT_ABGR888:
ret = VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM;
break;
default:
break;
}
}
else
{
switch (format)
{
case VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM:
ret = RTGRAPHIC_PIXEL_FORMAT_RGB888;
break;
case VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM:
ret = RTGRAPHIC_PIXEL_FORMAT_ARGB888;
break;
case VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM:
ret = RTGRAPHIC_PIXEL_FORMAT_ABGR888;
break;
default:
break;
}
}
return ret;
}
static void virtio_gpu_ctrl_send_command(struct virtio_gpu_device *virtio_gpu_dev,
const void *cmd, rt_size_t cmd_len, void *res, rt_size_t res_len)
{
rt_uint16_t idx[2];
void *addr = &virtio_gpu_dev->gpu_request;
void *ret_res = ((rt_uint8_t *)addr + cmd_len);
struct virtio_device *virtio_dev = &virtio_gpu_dev->virtio_dev;
#ifdef RT_USING_SMP
rt_base_t level = rt_spin_lock_irqsave(&virtio_dev->spinlock);
#endif
while (virtio_alloc_desc_chain(virtio_dev, VIRTIO_GPU_QUEUE_CTRL, 2, idx))
{
#ifdef RT_USING_SMP
rt_spin_unlock_irqrestore(&virtio_dev->spinlock, level);
#endif
rt_thread_yield();
#ifdef RT_USING_SMP
level = rt_spin_lock_irqsave(&virtio_dev->spinlock);
#endif
}
rt_memcpy(&virtio_gpu_dev->gpu_request, cmd, cmd_len);
virtio_fill_desc(virtio_dev, VIRTIO_GPU_QUEUE_CTRL, idx[0],
VIRTIO_VA2PA(addr), cmd_len, VIRTQ_DESC_F_NEXT, idx[1]);
virtio_fill_desc(virtio_dev, VIRTIO_GPU_QUEUE_CTRL, idx[1],
VIRTIO_VA2PA(addr) + cmd_len, res_len, VIRTQ_DESC_F_WRITE, 0);
rt_memset(ret_res, 0, res_len);
virtio_gpu_dev->info[idx[0]].ctrl_valid = RT_TRUE;
virtio_submit_chain(virtio_dev, VIRTIO_GPU_QUEUE_CTRL, idx[0]);
virtio_queue_notify(virtio_dev, VIRTIO_GPU_QUEUE_CTRL);
while (virtio_gpu_dev->info[idx[0]].ctrl_valid)
{
#ifdef RT_USING_SMP
rt_spin_unlock_irqrestore(&virtio_dev->spinlock, level);
#endif
rt_thread_yield();
#ifdef RT_USING_SMP
level = rt_spin_lock_irqsave(&virtio_dev->spinlock);
#endif
}
virtio_free_desc_chain(virtio_dev, VIRTIO_GPU_QUEUE_CTRL, idx[0]);
rt_memcpy(res, ret_res, res_len);
#ifdef RT_USING_SMP
rt_spin_unlock_irqrestore(&virtio_dev->spinlock, level);
#endif
}
static void virtio_gpu_cursor_send_command(struct virtio_gpu_device *virtio_gpu_dev,
const void *cmd, rt_size_t cmd_len)
{
rt_uint16_t id;
void *addr;
struct virtio_device *virtio_dev = &virtio_gpu_dev->virtio_dev;
#ifdef RT_USING_SMP
rt_base_t level = rt_spin_lock_irqsave(&virtio_dev->spinlock);
#endif
while ((id = virtio_alloc_desc(virtio_dev, VIRTIO_GPU_QUEUE_CURSOR)) == VIRTQ_INVALID_DESC_ID)
{
#ifdef RT_USING_SMP
rt_spin_unlock_irqrestore(&virtio_dev->spinlock, level);
#endif
rt_thread_yield();
#ifdef RT_USING_SMP
level = rt_spin_lock_irqsave(&virtio_dev->spinlock);
#endif
}
addr = &virtio_gpu_dev->info[id].cursor_cmd;
virtio_gpu_dev->info[id].cursor_valid = RT_TRUE;
rt_memcpy(addr, cmd, cmd_len);
virtio_fill_desc(virtio_dev, VIRTIO_GPU_QUEUE_CURSOR, id, VIRTIO_VA2PA(addr), cmd_len, 0, 0);
virtio_submit_chain(virtio_dev, VIRTIO_GPU_QUEUE_CURSOR, id);
virtio_queue_notify(virtio_dev, VIRTIO_GPU_QUEUE_CURSOR);
while (virtio_gpu_dev->info[id].cursor_valid)
{
#ifdef RT_USING_SMP
rt_spin_unlock_irqrestore(&virtio_dev->spinlock, level);
#endif
rt_thread_yield();
#ifdef RT_USING_SMP
level = rt_spin_lock_irqsave(&virtio_dev->spinlock);
#endif
}
virtio_free_desc(virtio_dev, VIRTIO_GPU_QUEUE_CURSOR, id);
#ifdef RT_USING_SMP
rt_spin_unlock_irqrestore(&virtio_dev->spinlock, level);
#endif
}
static rt_err_t virtio_gpu_create_2d_resource(struct virtio_gpu_device *virtio_gpu_dev, enum virtio_gpu_formats format,
rt_uint32_t *resource_id, rt_uint32_t width, rt_uint32_t height)
{
struct virtio_gpu_ctrl_hdr res;
struct virtio_gpu_resource_create_2d req;
*resource_id = ++virtio_gpu_dev->next_resource_id;
req.hdr.type = VIRTIO_GPU_CMD_RESOURCE_CREATE_2D;
req.resource_id = *resource_id;
req.format = format;
req.width = width;
req.height = height;
virtio_gpu_ctrl_send_command(virtio_gpu_dev, &req, sizeof(req), &res, sizeof(res));
if (res.type == VIRTIO_GPU_RESP_OK_NODATA)
{
return RT_EOK;
}
return -RT_ERROR;
}
static rt_err_t virtio_gpu_unref_resource(struct virtio_gpu_device *virtio_gpu_dev, rt_uint32_t resource_id)
{
struct virtio_gpu_ctrl_hdr res;
struct virtio_gpu_resource_unref req;
rt_memset(&req, 0, sizeof(req));
req.hdr.type = VIRTIO_GPU_CMD_RESOURCE_UNREF;
req.resource_id = resource_id;
virtio_gpu_ctrl_send_command(virtio_gpu_dev, &req, sizeof(req), &res, sizeof(res));
if (res.type == VIRTIO_GPU_RESP_OK_NODATA)
{
return RT_EOK;
}
return -RT_ERROR;
}
static rt_err_t virtio_gpu_attach_backing_resource(struct virtio_gpu_device *virtio_gpu_dev, rt_uint32_t resource_id,
void *buffer, rt_size_t size)
{
struct virtio_gpu_ctrl_hdr res;
struct
{
struct virtio_gpu_resource_attach_backing req;
struct virtio_gpu_mem_entry mem;
} req;
rt_memset(&req, 0, sizeof(req));
req.req.hdr.type = VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING;
req.req.resource_id = resource_id;
req.req.nr_entries = 1;
req.mem.addr = VIRTIO_VA2PA(buffer);
req.mem.length = size;
virtio_gpu_ctrl_send_command(virtio_gpu_dev, &req, sizeof(req), &res, sizeof(res));
if (res.type == VIRTIO_GPU_RESP_OK_NODATA)
{
return RT_EOK;
}
return -RT_ERROR;
}
static rt_err_t virtio_gpu_set_scanout(struct virtio_gpu_device *virtio_gpu_dev, rt_uint32_t scanout_id,
rt_uint32_t resource_id, rt_uint32_t width, rt_uint32_t height)
{
struct virtio_gpu_ctrl_hdr res;
struct virtio_gpu_set_scanout req;
rt_memset(&req, 0, sizeof(req));
req.hdr.type = VIRTIO_GPU_CMD_SET_SCANOUT;
req.r.x = 0;
req.r.y = 0;
req.r.width = width;
req.r.height = height;
req.scanout_id = scanout_id;
req.resource_id = resource_id;
virtio_gpu_ctrl_send_command(virtio_gpu_dev, &req, sizeof(req), &res, sizeof(res));
if (res.type == VIRTIO_GPU_RESP_OK_NODATA)
{
return RT_EOK;
}
return -RT_ERROR;
}
static rt_err_t virtio_gpu_flush_resource(struct virtio_gpu_device *virtio_gpu_dev, rt_uint32_t resource_id,
rt_uint32_t x, rt_uint32_t y, rt_uint32_t width, rt_uint32_t height)
{
struct virtio_gpu_ctrl_hdr res;
struct virtio_gpu_resource_flush req;
rt_memset(&req, 0, sizeof(req));
req.hdr.type = VIRTIO_GPU_CMD_RESOURCE_FLUSH;
req.r.x = x;
req.r.y = y;
req.r.width = width;
req.r.height = height;
req.resource_id = resource_id;
virtio_gpu_ctrl_send_command(virtio_gpu_dev, &req, sizeof(req), &res, sizeof(res));
if (res.type == VIRTIO_GPU_RESP_OK_NODATA)
{
return RT_EOK;
}
return -RT_ERROR;
}
static rt_err_t virtio_gpu_transfer_to_host_2d(struct virtio_gpu_device *virtio_gpu_dev, rt_uint32_t resource_id,
rt_uint32_t x, rt_uint32_t y, rt_uint32_t width, rt_uint32_t height, rt_uint32_t offset)
{
struct virtio_gpu_ctrl_hdr res;
struct virtio_gpu_transfer_to_host_2d req;
rt_memset(&req, 0, sizeof(req));
req.hdr.type = VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D;
req.r.x = x;
req.r.y = y;
req.r.width = width;
req.r.height = height;
req.offset = offset;
req.resource_id = resource_id;
virtio_gpu_ctrl_send_command(virtio_gpu_dev, &req, sizeof(req), &res, sizeof(res));
if (res.type == VIRTIO_GPU_RESP_OK_NODATA)
{
return RT_EOK;
}
return -RT_ERROR;
}
static rt_err_t virtio_gpu_gfx_flush_2d(struct virtio_gpu_device *virtio_gpu_dev, rt_uint32_t resource_id,
rt_uint32_t x, rt_uint32_t y, rt_uint32_t width, rt_uint32_t height)
{
rt_err_t status = virtio_gpu_transfer_to_host_2d(virtio_gpu_dev, resource_id, x, y, width, height, 0);
if (status == RT_EOK)
{
status = virtio_gpu_flush_resource(virtio_gpu_dev, resource_id, x, y, width, height);
}
return status;
}
static rt_err_t virtio_gpu_update_cursor(struct virtio_gpu_device *virtio_gpu_dev, rt_uint32_t scanout_id,
rt_uint32_t resource_id, rt_uint32_t hot_x, rt_uint32_t hot_y)
{
struct virtio_gpu_update_cursor req;
rt_memset(&req, 0, sizeof(req));
req.hdr.type = VIRTIO_GPU_CMD_UPDATE_CURSOR;
req.pos.scanout_id = scanout_id;
req.resource_id = resource_id;
req.hot_x = hot_x;
req.hot_y = hot_y;
virtio_gpu_cursor_send_command(virtio_gpu_dev, &req, sizeof(req));
return RT_EOK;
}
static rt_err_t virtio_gpu_cursor_move(struct virtio_gpu_device *virtio_gpu_dev, rt_uint32_t scanout_id,
rt_uint32_t resource_id, rt_uint32_t x, rt_uint32_t y)
{
struct virtio_gpu_update_cursor req;
rt_memset(&req, 0, sizeof(req));
req.hdr.type = VIRTIO_GPU_CMD_MOVE_CURSOR;
req.pos.scanout_id = scanout_id;
req.pos.x = x;
req.pos.y = y;
req.resource_id = resource_id;
virtio_gpu_cursor_send_command(virtio_gpu_dev, &req, sizeof(req));
return RT_EOK;
}
static rt_err_t virtio_gpu_cursor_set_img(struct virtio_gpu_device *virtio_gpu_dev, void *img)
{
rt_err_t status;
rt_memcpy(virtio_gpu_dev->cursor_img, img, VIRTIO_GPU_CURSOR_IMG_SIZE);
status = virtio_gpu_attach_backing_resource(virtio_gpu_dev,
virtio_gpu_dev->cursor_resource_id, virtio_gpu_dev->cursor_img, VIRTIO_GPU_CURSOR_IMG_SIZE);
if (status != RT_EOK)
{
return status;
}
status = virtio_gpu_transfer_to_host_2d(virtio_gpu_dev, virtio_gpu_dev->cursor_resource_id,
0, 0, VIRTIO_GPU_CURSOR_WIDTH, VIRTIO_GPU_CURSOR_HEIGHT, 0);
return status;
}
static rt_err_t virtio_gpu_get_display_info(struct virtio_gpu_device *virtio_gpu_dev)
{
int i;
struct virtio_gpu_ctrl_hdr req;
struct virtio_gpu_resp_display_info info;
rt_memset(&req, 0, sizeof(req));
req.type = VIRTIO_GPU_CMD_GET_DISPLAY_INFO;
virtio_gpu_ctrl_send_command(virtio_gpu_dev, &req, sizeof(req), &info, sizeof(info));
if (info.hdr.type != VIRTIO_GPU_RESP_OK_DISPLAY_INFO)
{
return -RT_ERROR;
}
for (i = 0; i < VIRTIO_GPU_MAX_SCANOUTS; ++i)
{
if (info.pmodes[i].enabled)
{
if (virtio_gpu_dev->pmode_id == VIRTIO_GPU_INVALID_PMODE_ID)
{
rt_memcpy(&virtio_gpu_dev->pmode, &info.pmodes[i], sizeof(virtio_gpu_dev->pmode));
virtio_gpu_dev->pmode_id = i;
}
}
}
return RT_EOK;
}
static rt_err_t virtio_gpu_init(rt_device_t dev)
{
rt_err_t status;
struct virtio_gpu_device *virtio_gpu_dev = (struct virtio_gpu_device *)dev;
struct virtio_device *virtio_dev = &virtio_gpu_dev->virtio_dev;
struct virtq *queue_ctrl, *queue_cursor;
queue_ctrl = &virtio_dev->queues[VIRTIO_GPU_QUEUE_CTRL];
queue_cursor = &virtio_dev->queues[VIRTIO_GPU_QUEUE_CURSOR];
queue_ctrl->avail->flags = 0;
queue_cursor->avail->flags = 0;
status = virtio_gpu_get_display_info(virtio_gpu_dev);
if (virtio_gpu_dev->pmode_id != VIRTIO_GPU_INVALID_PMODE_ID && _primary_virtio_gpu_dev == RT_NULL)
{
/* This device is ready */
_primary_virtio_gpu_dev = virtio_gpu_dev;
}
return status;
}
static rt_ssize_t virtio_gpu_read(rt_device_t dev, rt_off_t pos, void *buffer, rt_size_t size)
{
struct virtio_gpu_device *virtio_gpu_dev = (struct virtio_gpu_device *)dev;
if (virtio_gpu_dev->framebuffer == RT_NULL || pos + size >= virtio_gpu_dev->smem_len)
{
return 0;
}
rt_mutex_take(&virtio_gpu_dev->rw_mutex, RT_WAITING_FOREVER);
rt_memcpy(buffer, (rt_uint8_t *)virtio_gpu_dev->framebuffer + pos, size);
rt_mutex_release(&virtio_gpu_dev->rw_mutex);
return size;
}
static rt_ssize_t virtio_gpu_write(rt_device_t dev, rt_off_t pos, const void *buffer, rt_size_t size)
{
struct virtio_gpu_device *virtio_gpu_dev = (struct virtio_gpu_device *)dev;
if (virtio_gpu_dev->framebuffer == RT_NULL || pos + size >= virtio_gpu_dev->smem_len)
{
return 0;
}
rt_mutex_take(&virtio_gpu_dev->rw_mutex, RT_WAITING_FOREVER);
rt_memcpy((rt_uint8_t *)virtio_gpu_dev->framebuffer + pos, buffer, size);
rt_mutex_release(&virtio_gpu_dev->rw_mutex);
return size;
}
static rt_err_t virtio_gpu_control(rt_device_t dev, int cmd, void *args)
{
rt_err_t status = RT_EOK;
struct virtio_gpu_device *virtio_gpu_dev = (struct virtio_gpu_device *)dev;
switch (cmd)
{
case VIRTIO_DEVICE_CTRL_GPU_SET_PRIMARY:
_primary_virtio_gpu_dev = virtio_gpu_dev;
return status;
}
if (args == RT_NULL)
{
return -RT_ERROR;
}
switch (cmd)
{
case RTGRAPHIC_CTRL_RECT_UPDATE:
{
struct rt_device_rect_info *info = (struct rt_device_rect_info *)args;
if (virtio_gpu_dev->pmode_id == VIRTIO_GPU_INVALID_PMODE_ID)
{
status = -RT_ERROR;
break;
}
status = virtio_gpu_gfx_flush_2d(virtio_gpu_dev, virtio_gpu_dev->display_resource_id,
info->x, info->y, info->width, info->height);
}
break;
case RTGRAPHIC_CTRL_GET_INFO:
{
struct rt_device_graphic_info *info = (struct rt_device_graphic_info *)args;
info->pixel_format = _pixel_format_convert((rt_ubase_t)args, RT_FALSE);
info->bits_per_pixel = VIRTIO_GPU_FORMAT_BPP;
info->pitch = virtio_gpu_dev->pmode.r.width * VIRTIO_GPU_FORMAT_PIXEL;
info->width = virtio_gpu_dev->pmode.r.width;
info->height = virtio_gpu_dev->pmode.r.height;
info->framebuffer = virtio_gpu_dev->framebuffer;
info->smem_len = virtio_gpu_dev->smem_len;
}
break;
case VIRTIO_DEVICE_CTRL_GPU_CREATE_2D:
virtio_gpu_dev->format = _pixel_format_convert((rt_ubase_t)args, RT_TRUE);
if (virtio_gpu_dev->format == 0 || virtio_gpu_dev->pmode_id == VIRTIO_GPU_INVALID_PMODE_ID)
{
status = -RT_ERROR;
break;
}
status = virtio_gpu_create_2d_resource(virtio_gpu_dev, virtio_gpu_dev->format,
&virtio_gpu_dev->display_resource_id, virtio_gpu_dev->pmode.r.width, virtio_gpu_dev->pmode.r.height);
if (status != RT_EOK)
{
break;
}
virtio_gpu_dev->smem_len =
virtio_gpu_dev->pmode.r.width * virtio_gpu_dev->pmode.r.height * VIRTIO_GPU_FORMAT_PIXEL;
virtio_gpu_dev->smem_len = RT_ALIGN(virtio_gpu_dev->smem_len, VIRTIO_PAGE_SIZE);
virtio_gpu_dev->framebuffer = rt_malloc_align(virtio_gpu_dev->smem_len, VIRTIO_PAGE_SIZE);
if (virtio_gpu_dev->framebuffer == RT_NULL)
{
virtio_gpu_unref_resource(virtio_gpu_dev, virtio_gpu_dev->display_resource_id);
status = -RT_ENOMEM;
break;
}
status = virtio_gpu_attach_backing_resource(virtio_gpu_dev,
virtio_gpu_dev->display_resource_id, virtio_gpu_dev->framebuffer, virtio_gpu_dev->smem_len);
if (status != RT_EOK)
{
break;
}
status = virtio_gpu_set_scanout(virtio_gpu_dev, virtio_gpu_dev->pmode_id, virtio_gpu_dev->display_resource_id,
virtio_gpu_dev->pmode.r.width, virtio_gpu_dev->pmode.r.height);
break;
case VIRTIO_DEVICE_CTRL_CURSOR_SETUP:
if (virtio_gpu_dev->pmode_id == VIRTIO_GPU_INVALID_PMODE_ID)
{
status = -RT_ERROR;
break;
}
rt_mutex_take(&virtio_gpu_dev->ops_mutex, RT_WAITING_FOREVER);
status = virtio_gpu_create_2d_resource(virtio_gpu_dev, virtio_gpu_dev->format,
&virtio_gpu_dev->cursor_resource_id, VIRTIO_GPU_CURSOR_WIDTH, VIRTIO_GPU_CURSOR_HEIGHT);
if (status != RT_EOK)
{
goto _cursor_setup_end;
}
status = virtio_gpu_cursor_set_img(virtio_gpu_dev, args);
if (status != RT_EOK)
{
goto _cursor_setup_end;
}
virtio_gpu_dev->cursor_x = 0;
virtio_gpu_dev->cursor_y = 0;
status = virtio_gpu_update_cursor(virtio_gpu_dev, virtio_gpu_dev->pmode_id, virtio_gpu_dev->cursor_resource_id,
virtio_gpu_dev->cursor_x, virtio_gpu_dev->cursor_y);
if (status == RT_EOK)
{
virtio_gpu_dev->cursor_enable = RT_TRUE;
}
_cursor_setup_end:
rt_mutex_release(&virtio_gpu_dev->ops_mutex);
break;
case VIRTIO_DEVICE_CTRL_CURSOR_SET_IMG:
if (virtio_gpu_dev->pmode_id == VIRTIO_GPU_INVALID_PMODE_ID || !virtio_gpu_dev->cursor_enable)
{
status = -RT_ERROR;
break;
}
rt_mutex_take(&virtio_gpu_dev->ops_mutex, RT_WAITING_FOREVER);
status = virtio_gpu_cursor_set_img(virtio_gpu_dev, args);
if (status != RT_EOK)
{
goto _cursor_set_img_end;
}
status = virtio_gpu_update_cursor(virtio_gpu_dev, virtio_gpu_dev->pmode_id, virtio_gpu_dev->cursor_resource_id,
virtio_gpu_dev->cursor_x, virtio_gpu_dev->cursor_y);
_cursor_set_img_end:
rt_mutex_release(&virtio_gpu_dev->ops_mutex);
break;
case VIRTIO_DEVICE_CTRL_CURSOR_MOVE:
if (virtio_gpu_dev->pmode_id == VIRTIO_GPU_INVALID_PMODE_ID || !virtio_gpu_dev->cursor_enable)
{
status = -RT_ERROR;
break;
}
rt_mutex_take(&virtio_gpu_dev->ops_mutex, RT_WAITING_FOREVER);
virtio_gpu_dev->cursor_x = ((rt_uint32_t *)args)[0];
virtio_gpu_dev->cursor_y = ((rt_uint32_t *)args)[1];
status = virtio_gpu_cursor_move(virtio_gpu_dev, virtio_gpu_dev->pmode_id, virtio_gpu_dev->cursor_resource_id,
virtio_gpu_dev->cursor_x, virtio_gpu_dev->cursor_y);
rt_mutex_release(&virtio_gpu_dev->ops_mutex);
break;
default:
status = -RT_EINVAL;
break;
}
return status;
}
#ifdef RT_USING_DEVICE_OPS
const static struct rt_device_ops virtio_gpu_ops =
{
virtio_gpu_init,
RT_NULL,
RT_NULL,
virtio_gpu_read,
virtio_gpu_write,
virtio_gpu_control
};
#endif
static void virtio_gpu_set_pixel(const char *pixel, int x, int y)
{
rt_uint8_t *fb;
struct virtio_gpu_device *virtio_gpu_dev = _primary_virtio_gpu_dev;
if (virtio_gpu_dev == RT_NULL || virtio_gpu_dev->pmode_id == VIRTIO_GPU_INVALID_PMODE_ID)
{
return;
}
fb = (rt_uint8_t *)virtio_gpu_dev->framebuffer;
fb += (y * virtio_gpu_dev->pmode.r.width + x) * VIRTIO_GPU_FORMAT_PIXEL;
*((rt_uint32_t *)fb) = *((rt_uint32_t *)pixel);
}
static void virtio_gpu_get_pixel(char *pixel, int x, int y)
{
rt_uint8_t *fb;
struct virtio_gpu_device *virtio_gpu_dev = _primary_virtio_gpu_dev;
if (virtio_gpu_dev == RT_NULL || virtio_gpu_dev->pmode_id == VIRTIO_GPU_INVALID_PMODE_ID)
{
return;
}
fb = (rt_uint8_t *)virtio_gpu_dev->framebuffer;
*((rt_uint32_t *)pixel) = *(fb + (y * virtio_gpu_dev->pmode.r.width + x) * VIRTIO_GPU_FORMAT_PIXEL);
}
static void virtio_gpu_draw_hline(const char *pixel, int x1, int x2, int y)
{
int i;
rt_uint8_t *fb;
rt_uint32_t color = *((rt_uint32_t *)pixel);
struct virtio_gpu_device *virtio_gpu_dev = _primary_virtio_gpu_dev;
if (virtio_gpu_dev == RT_NULL || virtio_gpu_dev->pmode_id == VIRTIO_GPU_INVALID_PMODE_ID ||
x1 < 0 || x2 < 0 || y < 0)
{
return;
}
if (x1 > x2)
{
x1 ^= x2;
x2 ^= x1;
x1 ^= x2;
}
fb = (rt_uint8_t *)virtio_gpu_dev->framebuffer;
fb += (y * virtio_gpu_dev->pmode.r.width + x1) * VIRTIO_GPU_FORMAT_PIXEL;
for (i = x1; i < x2; ++i)
{
*((rt_uint32_t *)fb) = color;
fb += VIRTIO_GPU_FORMAT_PIXEL;
}
}
static void virtio_gpu_draw_vline(const char *pixel, int x, int y1, int y2)
{
int i;
rt_uint8_t *fb;
rt_uint16_t pitch;
rt_uint32_t color = *((rt_uint32_t *)pixel);
struct virtio_gpu_device *virtio_gpu_dev = _primary_virtio_gpu_dev;
if (virtio_gpu_dev == RT_NULL || virtio_gpu_dev->pmode_id == VIRTIO_GPU_INVALID_PMODE_ID ||
x < 0 || y1 < 0 || y2 < 0)
{
return;
}
if (y1 > y2)
{
y1 ^= y2;
y2 ^= y1;
y1 ^= y2;
}
fb = (rt_uint8_t *)virtio_gpu_dev->framebuffer;
fb += (y1 * virtio_gpu_dev->pmode.r.width + x) * VIRTIO_GPU_FORMAT_PIXEL;
pitch = virtio_gpu_dev->pmode.r.width * VIRTIO_GPU_FORMAT_PIXEL;
for (i = y1; i < y2; ++i)
{
*((rt_uint32_t *)fb) = color;
fb += pitch;
}
}
static void virtio_gpu_blit_line(const char *pixel, int x, int y, rt_size_t size)
{
int i;
rt_uint8_t *fb;
rt_uint32_t *colors = (rt_uint32_t *)pixel;
struct virtio_gpu_device *virtio_gpu_dev = _primary_virtio_gpu_dev;
if (virtio_gpu_dev == RT_NULL || virtio_gpu_dev->pmode_id == VIRTIO_GPU_INVALID_PMODE_ID || x < 0 || y < 0)
{
return;
}
fb = (rt_uint8_t *)virtio_gpu_dev->framebuffer;
fb += (y * virtio_gpu_dev->pmode.r.width + x) * VIRTIO_GPU_FORMAT_PIXEL;
for (i = 0; i < size; ++i)
{
*((rt_uint32_t *)fb) = *colors++;
fb += VIRTIO_GPU_FORMAT_PIXEL;
}
}
static struct rt_device_graphic_ops virtio_gpu_graphic_ops =
{
virtio_gpu_set_pixel,
virtio_gpu_get_pixel,
virtio_gpu_draw_hline,
virtio_gpu_draw_vline,
virtio_gpu_blit_line
};
static void virtio_gpu_isr(int irqno, void *param)
{
rt_uint16_t id;
struct virtio_gpu_device *virtio_gpu_dev = (struct virtio_gpu_device *)param;
struct virtio_device *virtio_dev = &virtio_gpu_dev->virtio_dev;
struct virtq *queue_ctrl = &virtio_dev->queues[VIRTIO_GPU_QUEUE_CTRL];
struct virtq *queue_cursor = &virtio_dev->queues[VIRTIO_GPU_QUEUE_CURSOR];
#ifdef RT_USING_SMP
rt_base_t level = rt_spin_lock_irqsave(&virtio_dev->spinlock);
#endif
virtio_interrupt_ack(virtio_dev);
rt_hw_dsb();
while (queue_ctrl->used_idx != queue_ctrl->used->idx)
{
rt_hw_dsb();
id = queue_ctrl->used->ring[queue_ctrl->used_idx % queue_ctrl->num].id;
virtio_gpu_dev->info[id].ctrl_valid = RT_FALSE;
queue_ctrl->used_idx++;
}
while (queue_cursor->used_idx != queue_cursor->used->idx)
{
rt_hw_dsb();
id = queue_cursor->used->ring[queue_cursor->used_idx % queue_cursor->num].id;
virtio_gpu_dev->info[id].cursor_valid = RT_FALSE;
queue_cursor->used_idx++;
}
#ifdef RT_USING_SMP
rt_spin_unlock_irqrestore(&virtio_dev->spinlock, level);
#endif
}
rt_err_t rt_virtio_gpu_init(rt_ubase_t *mmio_base, rt_uint32_t irq)
{
static int dev_no = 0;
char dev_name[RT_NAME_MAX];
struct virtio_device *virtio_dev;
struct virtio_gpu_device *virtio_gpu_dev;
virtio_gpu_dev = rt_malloc(sizeof(struct virtio_gpu_device));
if (virtio_gpu_dev == RT_NULL)
{
goto _alloc_fail;
}
virtio_dev = &virtio_gpu_dev->virtio_dev;
virtio_dev->irq = irq;
virtio_dev->mmio_base = mmio_base;
virtio_gpu_dev->pmode_id = VIRTIO_GPU_INVALID_PMODE_ID;
virtio_gpu_dev->display_resource_id = 0;
virtio_gpu_dev->cursor_resource_id = 0;
virtio_gpu_dev->next_resource_id = 0;
virtio_gpu_dev->framebuffer = RT_NULL;
virtio_gpu_dev->smem_len = 0;
virtio_gpu_dev->cursor_enable = RT_FALSE;
#ifdef RT_USING_SMP
rt_spin_lock_init(&virtio_dev->spinlock);
#endif
virtio_reset_device(virtio_dev);
virtio_status_acknowledge_driver(virtio_dev);
virtio_dev->mmio_config->driver_features = virtio_dev->mmio_config->device_features & ~(
(1 << VIRTIO_F_RING_EVENT_IDX) |
(1 << VIRTIO_F_RING_INDIRECT_DESC));
virtio_status_driver_ok(virtio_dev);
if (virtio_queues_alloc(virtio_dev, 2) != RT_EOK)
{
goto _alloc_fail;
}
if (virtio_queue_init(virtio_dev, VIRTIO_GPU_QUEUE_CTRL, VIRTIO_GPU_QUEUE_SIZE) != RT_EOK)
{
goto _alloc_fail;
}
if (virtio_queue_init(virtio_dev, VIRTIO_GPU_QUEUE_CURSOR, VIRTIO_GPU_QUEUE_SIZE) != RT_EOK)
{
virtio_queue_destroy(virtio_dev, VIRTIO_GPU_QUEUE_CTRL);
goto _alloc_fail;
}
virtio_gpu_dev->parent.type = RT_Device_Class_Graphic;
#ifdef RT_USING_DEVICE_OPS
virtio_gpu_dev->parent.ops = &virtio_gpu_ops;
#else
virtio_gpu_dev->parent.init = virtio_gpu_init;
virtio_gpu_dev->parent.open = RT_NULL;
virtio_gpu_dev->parent.close = RT_NULL;
virtio_gpu_dev->parent.read = virtio_gpu_read;
virtio_gpu_dev->parent.write = virtio_gpu_write;
virtio_gpu_dev->parent.control = virtio_gpu_control;
#endif
virtio_gpu_dev->parent.user_data = &virtio_gpu_graphic_ops;
rt_snprintf(dev_name, RT_NAME_MAX, "virtio-gpu%d", dev_no++);
rt_mutex_init(&virtio_gpu_dev->rw_mutex, dev_name, RT_IPC_FLAG_PRIO);
rt_mutex_init(&virtio_gpu_dev->ops_mutex, dev_name, RT_IPC_FLAG_PRIO);
rt_hw_interrupt_install(irq, virtio_gpu_isr, virtio_gpu_dev, dev_name);
rt_hw_interrupt_umask(irq);
return rt_device_register((rt_device_t)virtio_gpu_dev, dev_name, RT_DEVICE_FLAG_RDWR);
_alloc_fail:
if (virtio_gpu_dev != RT_NULL)
{
virtio_queues_free(virtio_dev);
rt_free(virtio_gpu_dev);
}
return -RT_ENOMEM;
}
#endif /* RT_USING_VIRTIO_GPU */

View file

@ -0,0 +1,412 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-11-11 GuEe-GUI the first version
*/
#ifndef __VIRTIO_GPU_H__
#define __VIRTIO_GPU_H__
#include <rtdef.h>
#include <virtio.h>
#define VIRTIO_GPU_QUEUE_CTRL 0
#define VIRTIO_GPU_QUEUE_CURSOR 1
#define VIRTIO_GPU_QUEUE_SIZE 32
#define VIRTIO_GPU_F_VIRGL 0 /* VIRTIO_GPU_CMD_CTX_*, VIRTIO_GPU_CMD_*_3D */
#define VIRTIO_GPU_F_EDID 1 /* VIRTIO_GPU_CMD_GET_EDID */
#define VIRTIO_GPU_F_RESOURCE_UUID 2 /* VIRTIO_GPU_CMD_RESOURCE_ASSIGN_UUID */
#define VIRTIO_GPU_F_RESOURCE_BLOB 3 /* VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB */
#define VIRTIO_GPU_F_CONTEXT_INIT 4 /* VIRTIO_GPU_CMD_CREATE_CONTEXT with context_init and multiple timelines */
#define VIRTIO_GPU_EVENT_DISPLAY (1 << 0)
#define VIRTIO_GPU_FORMAT_BPP 32
#define VIRTIO_GPU_FORMAT_PIXEL 4
#define VIRTIO_GPU_CURSOR_WIDTH 64
#define VIRTIO_GPU_CURSOR_HEIGHT 64
#define VIRTIO_GPU_CURSOR_IMG_SIZE (VIRTIO_GPU_CURSOR_WIDTH * VIRTIO_GPU_CURSOR_HEIGHT * VIRTIO_GPU_FORMAT_PIXEL)
#define VIRTIO_GPU_INVALID_PMODE_ID RT_UINT32_MAX
/* GPU control */
struct virtio_gpu_config
{
rt_uint32_t events_read;
rt_uint32_t events_clear;
rt_uint32_t num_scanouts; /* 1 ~ 16 */
rt_uint32_t reserved;
};
enum virtio_gpu_ctrl_type
{
VIRTIO_GPU_UNDEFINED = 0,
/* 2d commands */
VIRTIO_GPU_CMD_GET_DISPLAY_INFO = 0x0100,
VIRTIO_GPU_CMD_RESOURCE_CREATE_2D,
VIRTIO_GPU_CMD_RESOURCE_UNREF,
VIRTIO_GPU_CMD_SET_SCANOUT,
VIRTIO_GPU_CMD_RESOURCE_FLUSH,
VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D,
VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING,
VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING,
VIRTIO_GPU_CMD_GET_CAPSET_INFO,
VIRTIO_GPU_CMD_GET_CAPSET,
VIRTIO_GPU_CMD_GET_EDID,
VIRTIO_GPU_CMD_RESOURCE_ASSIGN_UUID,
VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB,
VIRTIO_GPU_CMD_SET_SCANOUT_BLOB,
/* 3d commands */
VIRTIO_GPU_CMD_CTX_CREATE = 0x0200,
VIRTIO_GPU_CMD_CTX_DESTROY,
VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE,
VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE,
VIRTIO_GPU_CMD_RESOURCE_CREATE_3D,
VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D,
VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D,
VIRTIO_GPU_CMD_SUBMIT_3D,
VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB,
VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB,
/* cursor commands */
VIRTIO_GPU_CMD_UPDATE_CURSOR = 0x0300,
VIRTIO_GPU_CMD_MOVE_CURSOR,
/* success responses */
VIRTIO_GPU_RESP_OK_NODATA = 0x1100,
VIRTIO_GPU_RESP_OK_DISPLAY_INFO,
VIRTIO_GPU_RESP_OK_CAPSET_INFO,
VIRTIO_GPU_RESP_OK_CAPSET,
VIRTIO_GPU_RESP_OK_EDID,
VIRTIO_GPU_RESP_OK_RESOURCE_UUID,
VIRTIO_GPU_RESP_OK_MAP_INFO,
/* error responses */
VIRTIO_GPU_RESP_ERR_UNSPEC = 0x1200,
VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY,
VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID,
VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID,
VIRTIO_GPU_RESP_ERR_INVALID_CONTEXT_ID,
VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER,
};
#define VIRTIO_GPU_FLAG_FENCE (1 << 0)
struct virtio_gpu_ctrl_hdr
{
rt_uint32_t type;
rt_uint32_t flags;
rt_uint64_t fence_id;
rt_uint32_t ctx_id;
rt_uint8_t ring_idx;
rt_uint8_t padding[3];
};
#define VIRTIO_GPU_MAX_SCANOUTS 16
struct virtio_gpu_rect
{
rt_uint32_t x;
rt_uint32_t y;
rt_uint32_t width;
rt_uint32_t height;
};
struct virtio_gpu_resp_display_info
{
struct virtio_gpu_ctrl_hdr hdr;
struct virtio_gpu_display_one
{
struct virtio_gpu_rect r;
rt_uint32_t enabled;
rt_uint32_t flags;
} pmodes[VIRTIO_GPU_MAX_SCANOUTS];
};
struct virtio_gpu_get_edid
{
struct virtio_gpu_ctrl_hdr hdr;
rt_uint32_t scanout;
rt_uint32_t padding;
};
struct virtio_gpu_resp_edid
{
struct virtio_gpu_ctrl_hdr hdr;
rt_uint32_t size;
rt_uint32_t padding;
rt_uint8_t edid[1024];
};
enum virtio_gpu_formats
{
VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM = 1,
VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM = 2,
VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM = 3,
VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM = 4,
VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM = 67,
VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM = 68,
VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM = 121,
VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM = 134,
};
struct virtio_gpu_resource_create_2d
{
struct virtio_gpu_ctrl_hdr hdr;
rt_uint32_t resource_id;
rt_uint32_t format;
rt_uint32_t width;
rt_uint32_t height;
};
struct virtio_gpu_resource_unref
{
struct virtio_gpu_ctrl_hdr hdr;
rt_uint32_t resource_id;
rt_uint32_t padding;
};
struct virtio_gpu_set_scanout
{
struct virtio_gpu_ctrl_hdr hdr;
struct virtio_gpu_rect r;
rt_uint32_t scanout_id;
rt_uint32_t resource_id;
};
struct virtio_gpu_resource_flush
{
struct virtio_gpu_ctrl_hdr hdr;
struct virtio_gpu_rect r;
rt_uint32_t resource_id;
rt_uint32_t padding;
};
struct virtio_gpu_transfer_to_host_2d
{
struct virtio_gpu_ctrl_hdr hdr;
struct virtio_gpu_rect r;
rt_uint64_t offset;
rt_uint32_t resource_id;
rt_uint32_t padding;
};
struct virtio_gpu_resource_attach_backing
{
struct virtio_gpu_ctrl_hdr hdr;
rt_uint32_t resource_id;
rt_uint32_t nr_entries;
};
struct virtio_gpu_mem_entry
{
rt_uint64_t addr;
rt_uint32_t length;
rt_uint32_t padding;
};
struct virtio_gpu_resource_detach_backing
{
struct virtio_gpu_ctrl_hdr hdr;
rt_uint32_t resource_id;
rt_uint32_t padding;
};
struct virtio_gpu_get_capset_info
{
struct virtio_gpu_ctrl_hdr hdr;
rt_uint32_t capset_index;
rt_uint32_t padding;
};
#define VIRTIO_GPU_CAPSET_VIRGL 1
#define VIRTIO_GPU_CAPSET_VIRGL2 2
#define VIRTIO_GPU_CAPSET_GFXSTREAM 3
#define VIRTIO_GPU_CAPSET_VENUS 4
#define VIRTIO_GPU_CAPSET_CROSS_DOMAIN 5
struct virtio_gpu_resp_capset_info
{
struct virtio_gpu_ctrl_hdr hdr;
rt_uint32_t capset_id;
rt_uint32_t capset_max_version;
rt_uint32_t capset_max_size;
rt_uint32_t padding;
};
struct virtio_gpu_get_capset
{
struct virtio_gpu_ctrl_hdr hdr;
rt_uint32_t capset_id;
rt_uint32_t capset_version;
};
struct virtio_gpu_resp_capset
{
struct virtio_gpu_ctrl_hdr hdr;
rt_uint8_t capset_data[];
};
struct virtio_gpu_resource_assign_uuid
{
struct virtio_gpu_ctrl_hdr hdr;
rt_uint32_t resource_id;
rt_uint32_t padding;
};
struct virtio_gpu_resp_resource_uuid
{
struct virtio_gpu_ctrl_hdr hdr;
rt_uint8_t uuid[16];
};
#define VIRTIO_GPU_BLOB_MEM_GUEST 0x0001
#define VIRTIO_GPU_BLOB_MEM_HOST3D 0x0002
#define VIRTIO_GPU_BLOB_MEM_HOST3D_GUEST 0x0003
#define VIRTIO_GPU_BLOB_FLAG_USE_MAPPABLE 0x0001
#define VIRTIO_GPU_BLOB_FLAG_USE_SHAREABLE 0x0002
#define VIRTIO_GPU_BLOB_FLAG_USE_CROSS_DEVICE 0x0004
struct virtio_gpu_resource_create_blob
{
struct virtio_gpu_ctrl_hdr hdr;
rt_uint32_t resource_id;
rt_uint32_t blob_mem;
rt_uint32_t blob_flags;
rt_uint32_t nr_entries;
rt_uint64_t blob_id;
rt_uint64_t size;
};
struct virtio_gpu_set_scanout_blob
{
struct virtio_gpu_ctrl_hdr hdr;
struct virtio_gpu_rect r;
rt_uint32_t scanout_id;
rt_uint32_t resource_id;
rt_uint32_t width;
rt_uint32_t height;
rt_uint32_t format;
rt_uint32_t padding;
rt_uint32_t strides[4];
rt_uint32_t offsets[4];
};
#define VIRTIO_GPU_CONTEXT_INIT_CAPSET_ID_MASK 0x000000ff
struct virtio_gpu_ctx_create
{
struct virtio_gpu_ctrl_hdr hdr;
rt_uint32_t nlen;
rt_uint32_t context_init;
char debug_name[64];
};
struct virtio_gpu_resource_map_blob
{
struct virtio_gpu_ctrl_hdr hdr;
rt_uint32_t resource_id;
rt_uint32_t padding;
rt_uint64_t offset;
};
#define VIRTIO_GPU_MAP_CACHE_MASK 0x0f
#define VIRTIO_GPU_MAP_CACHE_NONE 0x00
#define VIRTIO_GPU_MAP_CACHE_CACHED 0x01
#define VIRTIO_GPU_MAP_CACHE_UNCACHED 0x02
#define VIRTIO_GPU_MAP_CACHE_WC 0x03
struct virtio_gpu_resp_map_info
{
struct virtio_gpu_ctrl_hdr hdr;
rt_uint32_t map_info;
rt_uint32_t padding;
};
struct virtio_gpu_resource_unmap_blob
{
struct virtio_gpu_ctrl_hdr hdr;
rt_uint32_t resource_id;
rt_uint32_t padding;
};
/* GPU cursor */
struct virtio_gpu_cursor_pos
{
rt_uint32_t scanout_id;
rt_uint32_t x;
rt_uint32_t y;
rt_uint32_t padding;
};
struct virtio_gpu_update_cursor
{
struct virtio_gpu_ctrl_hdr hdr;
struct virtio_gpu_cursor_pos pos;
rt_uint32_t resource_id;
rt_uint32_t hot_x;
rt_uint32_t hot_y;
rt_uint32_t padding;
};
struct virtio_gpu_device
{
struct rt_device parent;
struct virtio_device virtio_dev;
/* Current display's info */
struct virtio_gpu_display_one pmode;
enum virtio_gpu_formats format;
rt_uint32_t pmode_id;
rt_uint32_t cursor_x, cursor_y;
rt_uint32_t display_resource_id;
rt_uint32_t cursor_resource_id;
rt_uint32_t next_resource_id;
/* Display framebuffer */
struct rt_mutex rw_mutex;
void *framebuffer;
rt_uint32_t smem_len;
/* Cursor image info */
rt_bool_t cursor_enable;
struct rt_mutex ops_mutex;
rt_uint8_t cursor_img[VIRTIO_GPU_CURSOR_IMG_SIZE];
/* GPU request info */
struct virtio_gpu_resp_display_info gpu_request;
struct
{
rt_bool_t ctrl_valid;
rt_bool_t cursor_valid;
struct virtio_gpu_update_cursor cursor_cmd;
} info[VIRTIO_GPU_QUEUE_SIZE];
};
rt_err_t rt_virtio_gpu_init(rt_ubase_t *mmio_base, rt_uint32_t irq);
enum
{
VIRTIO_DEVICE_CTRL_GPU_SET_PRIMARY = 0x20,
VIRTIO_DEVICE_CTRL_GPU_CREATE_2D,
VIRTIO_DEVICE_CTRL_CURSOR_SETUP,
VIRTIO_DEVICE_CTRL_CURSOR_SET_IMG,
VIRTIO_DEVICE_CTRL_CURSOR_MOVE,
};
#endif /* __VIRTIO_GPU_H__ */

View file

@ -0,0 +1,449 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-11-11 GuEe-GUI the first version
*/
#include <rthw.h>
#include <rtthread.h>
#include <cpuport.h>
#ifdef RT_USING_VIRTIO_INPUT
#include <virtio_input.h>
static void _set_bit(rt_uint32_t nr, volatile rt_ubase_t *addr)
{
rt_ubase_t mask = BIT_MASK(nr);
rt_ubase_t *p = ((rt_ubase_t *)addr) + BIT_WORD(nr);
*p |= mask;
}
static rt_ssize_t virtio_input_cfg_select(struct virtio_input_device *virtio_input_dev,
rt_uint8_t select, rt_uint8_t subsel)
{
struct virtio_input_config *config = virtio_input_dev->config;
rt_hw_dsb();
config->select = select;
config->subsel = subsel;
rt_hw_dsb();
return config->size;
}
static void virtio_input_cfg_bits(struct virtio_input_device *virtio_input_dev,
rt_uint8_t select, rt_uint8_t subsel, rt_ubase_t *bits, rt_uint32_t bitcount)
{
int i;
rt_uint32_t bit;
rt_uint8_t bytes;
rt_uint8_t *virtio_bits;
void *config_base = virtio_input_dev->config;
rt_off_t offset = (rt_size_t)&((struct virtio_input_config *)0)->bitmap;
bytes = virtio_input_cfg_select(virtio_input_dev, select, subsel);
if (bytes == 0)
{
return;
}
if (bitcount > bytes * 8)
{
bitcount = bytes * 8;
}
/*
* Bitmap in virtio config space is a simple stream of bytes,
* with the first byte carrying bits 0-7, second bits 8-15 and
* so on.
*/
virtio_bits = rt_malloc(bytes);
if (virtio_bits == RT_NULL)
{
return;
}
for (i = 0; i < bytes; ++i)
{
void *buffer = (void *)virtio_bits + i;
if (virtio_input_dev->virtio_dev.mmio_config->version == 1)
{
HWREG8(config_base + offset + i) = *((rt_uint8_t *)buffer);
}
else
{
rt_memcpy(config_base + offset + i, buffer, sizeof(rt_uint8_t));
}
}
for (bit = 0; bit < bitcount; ++bit)
{
if (virtio_bits[bit / 8] & (1 << (bit % 8)))
{
_set_bit(bit, bits);
}
}
rt_free(virtio_bits);
if (select == VIRTIO_INPUT_CFG_EV_BITS)
{
_set_bit(subsel, virtio_input_dev->ev_bit);
}
}
static rt_err_t virtio_input_init(rt_device_t dev)
{
int i;
rt_uint16_t idx[VIRTIO_INPUT_QUEUE_MAX_SIZE];
struct virtio_input_device *virtio_input_dev = (struct virtio_input_device *)dev;
struct virtio_device *virtio_dev = &virtio_input_dev->virtio_dev;
struct virtq *queue_event, *queue_status;
virtio_input_cfg_bits(virtio_input_dev, VIRTIO_INPUT_CFG_EV_BITS, EV_KEY, virtio_input_dev->key_bit, KEY_CNT);
virtio_input_cfg_bits(virtio_input_dev, VIRTIO_INPUT_CFG_EV_BITS, EV_REL, virtio_input_dev->rel_bit, REL_CNT);
virtio_input_cfg_bits(virtio_input_dev, VIRTIO_INPUT_CFG_EV_BITS, EV_ABS, virtio_input_dev->abs_bit, ABS_CNT);
queue_event = &virtio_dev->queues[VIRTIO_INPUT_QUEUE_EVENT];
queue_status = &virtio_dev->queues[VIRTIO_INPUT_QUEUE_STATUS];
virtio_alloc_desc_chain(virtio_dev, VIRTIO_INPUT_QUEUE_EVENT, queue_event->num, idx);
virtio_alloc_desc_chain(virtio_dev, VIRTIO_INPUT_QUEUE_STATUS, queue_status->num, idx);
for (i = 0; i < queue_event->num; ++i)
{
rt_uint16_t id = i;
void *addr = &virtio_input_dev->recv_events[i];
virtio_fill_desc(virtio_dev, VIRTIO_INPUT_QUEUE_EVENT, id,
VIRTIO_VA2PA(addr), sizeof(struct virtio_input_event), VIRTQ_DESC_F_WRITE, 0);
virtio_submit_chain(virtio_dev, VIRTIO_INPUT_QUEUE_EVENT, id);
}
rt_hw_dsb();
queue_event->avail->flags = 0;
queue_status->avail->flags = VIRTQ_AVAIL_F_NO_INTERRUPT;
virtio_queue_notify(virtio_dev, VIRTIO_INPUT_QUEUE_EVENT);
return RT_EOK;
}
static rt_ssize_t virtio_input_read(rt_device_t dev, rt_off_t pos, void *buffer, rt_size_t size)
{
struct virtio_input_device *virtio_input_dev = (struct virtio_input_device *)dev;
if (buffer == RT_NULL || pos + size >= virtio_input_dev->virtio_dev.queues[VIRTIO_INPUT_QUEUE_EVENT].num)
{
return 0;
}
rt_mutex_take(&virtio_input_dev->rw_mutex, RT_WAITING_FOREVER);
rt_memcpy(buffer, &virtio_input_dev->bcst_events[pos], size);
rt_mutex_release(&virtio_input_dev->rw_mutex);
return size;
}
static rt_ssize_t virtio_input_write(rt_device_t dev, rt_off_t pos, const void *buffer, rt_size_t size)
{
struct virtio_input_device *virtio_input_dev = (struct virtio_input_device *)dev;
if (buffer == RT_NULL || pos + size >= virtio_input_dev->virtio_dev.queues[VIRTIO_INPUT_QUEUE_EVENT].num)
{
return 0;
}
rt_mutex_take(&virtio_input_dev->rw_mutex, RT_WAITING_FOREVER);
rt_memcpy(&virtio_input_dev->bcst_events[pos], buffer, size);
rt_mutex_release(&virtio_input_dev->rw_mutex);
return size;
}
static rt_err_t virtio_input_control(rt_device_t dev, int cmd, void *args)
{
rt_err_t status = RT_EOK;
struct virtio_input_device *virtio_input_dev = (struct virtio_input_device *)dev;
struct virtio_device *virtio_dev = &virtio_input_dev->virtio_dev;
struct virtio_input_config *config = virtio_input_dev->config;
if (args == RT_NULL)
{
return -RT_ERROR;
}
switch (cmd)
{
case VIRTIO_DEVICE_CTRL_INPUT_GET_TYPE:
*(enum virtio_input_type *)args = virtio_input_dev->type;
break;
case VIRTIO_DEVICE_CTRL_INPUT_BIND_BSCT_HANDLER:
virtio_input_dev->bsct_handler = args;
break;
case VIRTIO_DEVICE_CTRL_INPUT_GET_ABS_X_INFO:
virtio_input_cfg_select(virtio_input_dev, VIRTIO_INPUT_CFG_ABS_INFO, VIRTIO_INPUT_ABS_AXIS_X);
rt_memcpy(args, config, sizeof(struct virtio_input_config));
break;
case VIRTIO_DEVICE_CTRL_INPUT_GET_ABS_Y_INFO:
virtio_input_cfg_select(virtio_input_dev, VIRTIO_INPUT_CFG_ABS_INFO, VIRTIO_INPUT_ABS_AXIS_Y);
rt_memcpy(args, config, sizeof(struct virtio_input_config));
break;
case VIRTIO_DEVICE_CTRL_INPUT_SET_STATUS:
{
rt_uint16_t id;
void *addr;
struct virtq *queue_status = &virtio_dev->queues[VIRTIO_INPUT_QUEUE_STATUS];
#ifdef RT_USING_SMP
rt_base_t level = rt_spin_lock_irqsave(&virtio_dev->spinlock);
#endif
id = queue_status->avail->idx % queue_status->num;
addr = &virtio_input_dev->xmit_events[id];
rt_memcpy(addr, args, sizeof(struct virtio_input_event));
virtio_free_desc(virtio_dev, VIRTIO_INPUT_QUEUE_STATUS, id);
virtio_fill_desc(virtio_dev, VIRTIO_INPUT_QUEUE_STATUS, id,
VIRTIO_VA2PA(addr), sizeof(struct virtio_input_event), 0, 0);
virtio_submit_chain(virtio_dev, VIRTIO_INPUT_QUEUE_STATUS, id);
virtio_queue_notify(virtio_dev, VIRTIO_INPUT_QUEUE_STATUS);
virtio_alloc_desc(virtio_dev, VIRTIO_INPUT_QUEUE_STATUS);
#ifdef RT_USING_SMP
rt_spin_unlock_irqrestore(&virtio_dev->spinlock, level);
#endif
}
break;
case VIRTIO_DEVICE_CTRL_INPUT_GET_EV_BIT:
rt_memcpy(args, virtio_input_dev->ev_bit, sizeof(virtio_input_dev->ev_bit));
break;
case VIRTIO_DEVICE_CTRL_INPUT_GET_KEY_BIT:
rt_memcpy(args, virtio_input_dev->key_bit, sizeof(virtio_input_dev->key_bit));
break;
case VIRTIO_DEVICE_CTRL_INPUT_GET_REL_BIT:
rt_memcpy(args, virtio_input_dev->rel_bit, sizeof(virtio_input_dev->rel_bit));
break;
case VIRTIO_DEVICE_CTRL_INPUT_GET_ABS_BIT:
rt_memcpy(args, virtio_input_dev->abs_bit, sizeof(virtio_input_dev->abs_bit));
break;
default:
status = -RT_EINVAL;
break;
}
return status;
}
#ifdef RT_USING_DEVICE_OPS
const static struct rt_device_ops virtio_input_ops =
{
virtio_input_init,
RT_NULL,
RT_NULL,
virtio_input_read,
virtio_input_write,
virtio_input_control
};
#endif
static void virtio_input_isr(int irqno, void *param)
{
struct virtio_input_device *virtio_input_dev = (struct virtio_input_device *)param;
struct virtio_device *virtio_dev = &virtio_input_dev->virtio_dev;
struct virtq *event_queue = &virtio_dev->queues[VIRTIO_INPUT_QUEUE_EVENT];
const char *dev_name = virtio_input_dev->parent.parent.name;
#ifdef RT_USING_SMP
rt_base_t level = rt_spin_lock_irqsave(&virtio_dev->spinlock);
#endif
virtio_interrupt_ack(virtio_dev);
rt_hw_dsb();
while (event_queue->used_idx != event_queue->used->idx)
{
rt_uint16_t id = event_queue->used->ring[event_queue->used_idx % event_queue->num].id;
rt_uint32_t len = event_queue->used->ring[event_queue->used_idx % event_queue->num].len;
if (len == sizeof(struct virtio_input_event))
{
struct virtio_input_event *recv_events = &virtio_input_dev->recv_events[id];
struct virtio_input_event *bcst_events = &virtio_input_dev->bcst_events[id];
if (recv_events->type >= EV_SYN && recv_events->type <= EV_ABS)
{
bcst_events->type = recv_events->type;
bcst_events->code = recv_events->code;
bcst_events->value = recv_events->value;
if (virtio_input_dev->bsct_handler != RT_NULL)
{
virtio_input_dev->bsct_handler(*bcst_events);
}
}
else
{
rt_kprintf("%s: Unsupport event[type: %02x, code: %02x, value: %08x]!\n",
dev_name, recv_events->type, recv_events->code, recv_events->value);
}
}
else
{
rt_kprintf("%s: Invalid event!\n", dev_name);
}
event_queue->used_idx++;
virtio_submit_chain(virtio_dev, VIRTIO_INPUT_QUEUE_EVENT, id);
virtio_queue_notify(virtio_dev, VIRTIO_INPUT_QUEUE_EVENT);
}
#ifdef RT_USING_SMP
rt_spin_unlock_irqrestore(&virtio_dev->spinlock, level);
#endif
}
rt_err_t rt_virtio_input_init(rt_ubase_t *mmio_base, rt_uint32_t irq)
{
rt_uint32_t flag;
static int dev_no = 0;
char dev_name[RT_NAME_MAX];
struct virtio_device *virtio_dev;
struct virtio_input_device *virtio_input_dev;
virtio_input_dev = rt_malloc(sizeof(struct virtio_input_device));
if (virtio_input_dev == RT_NULL)
{
goto _alloc_fail;
}
virtio_dev = &virtio_input_dev->virtio_dev;
virtio_dev->irq = irq;
virtio_dev->mmio_base = mmio_base;
virtio_input_dev->config = (struct virtio_input_config *)virtio_dev->mmio_config->config;
virtio_input_dev->bsct_handler = RT_NULL;
#ifdef RT_USING_SMP
rt_spin_lock_init(&virtio_dev->spinlock);
#endif
virtio_reset_device(virtio_dev);
virtio_status_acknowledge_driver(virtio_dev);
virtio_dev->mmio_config->driver_features = virtio_dev->mmio_config->device_features & ~(
(1 << VIRTIO_F_RING_EVENT_IDX) |
(1 << VIRTIO_F_RING_INDIRECT_DESC));
virtio_status_driver_ok(virtio_dev);
if (virtio_queues_alloc(virtio_dev, 2) != RT_EOK)
{
goto _alloc_fail;
}
if (virtio_queue_init(virtio_dev, VIRTIO_INPUT_QUEUE_EVENT, VIRTIO_INPUT_EVENT_QUEUE_SIZE) != RT_EOK)
{
goto _alloc_fail;
}
if (virtio_queue_init(virtio_dev, VIRTIO_INPUT_QUEUE_STATUS, VIRTIO_INPUT_STATUS_QUEUE_SIZE) != RT_EOK)
{
virtio_queue_destroy(virtio_dev, VIRTIO_INPUT_QUEUE_EVENT);
goto _alloc_fail;
}
virtio_input_cfg_select(virtio_input_dev, VIRTIO_INPUT_CFG_ID_DEVIDS, 0);
if (virtio_input_dev->config->ids.product == EV_ABS)
{
virtio_input_dev->type = VIRTIO_INPUT_TYPE_TABLET;
virtio_input_dev->parent.type = RT_Device_Class_Touch;
flag = RT_DEVICE_FLAG_STANDALONE | RT_DEVICE_FLAG_INT_RX;
}
else
{
if (virtio_input_dev->config->ids.product == EV_KEY)
{
virtio_input_dev->type = VIRTIO_INPUT_TYPE_KEYBOARD;
}
else
{
virtio_input_dev->type = VIRTIO_INPUT_TYPE_MOUSE;
}
/* Replace it to "KeyBoard" or "Mouse" if support in the future */
virtio_input_dev->parent.type = RT_Device_Class_Miscellaneous;
flag = RT_DEVICE_FLAG_RDWR;
}
#ifdef RT_USING_DEVICE_OPS
virtio_input_dev->parent.ops = &virtio_input_ops;
#else
virtio_input_dev->parent.init = virtio_input_init;
virtio_input_dev->parent.open = RT_NULL;
virtio_input_dev->parent.close = RT_NULL;
virtio_input_dev->parent.read = virtio_input_read;
virtio_input_dev->parent.write = virtio_input_write;
virtio_input_dev->parent.control = virtio_input_control;
#endif
rt_snprintf(dev_name, RT_NAME_MAX, "virtio-input%d", dev_no++);
rt_mutex_init(&virtio_input_dev->rw_mutex, dev_name, RT_IPC_FLAG_PRIO);
rt_hw_interrupt_install(irq, virtio_input_isr, virtio_input_dev, dev_name);
rt_hw_interrupt_umask(irq);
return rt_device_register((rt_device_t)virtio_input_dev, dev_name, flag);
_alloc_fail:
if (virtio_input_dev != RT_NULL)
{
virtio_queues_free(virtio_dev);
rt_free(virtio_input_dev);
}
return -RT_ENOMEM;
}
#endif /* RT_USING_VIRTIO_INPUT */

View file

@ -0,0 +1,145 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-11-11 GuEe-GUI the first version
*/
#ifndef __VIRTIO_INPUT_H__
#define __VIRTIO_INPUT_H__
#include <rtdef.h>
#include <virtio.h>
#include <virtio_input_event_codes.h>
#define VIRTIO_INPUT_QUEUE_EVENT 0
#define VIRTIO_INPUT_QUEUE_STATUS 1
#define VIRTIO_INPUT_EVENT_QUEUE_SIZE 64
#define VIRTIO_INPUT_STATUS_QUEUE_SIZE 8
#define VIRTIO_INPUT_QUEUE_MAX_SIZE (VIRTIO_INPUT_EVENT_QUEUE_SIZE > VIRTIO_INPUT_STATUS_QUEUE_SIZE ? \
VIRTIO_INPUT_EVENT_QUEUE_SIZE : VIRTIO_INPUT_STATUS_QUEUE_SIZE)
#define VIRTIO_INPUT_ABS_AXIS_X 0
#define VIRTIO_INPUT_ABS_AXIS_Y 1
enum virtio_input_type
{
VIRTIO_INPUT_TYPE_KEYBOARD,
VIRTIO_INPUT_TYPE_MOUSE,
VIRTIO_INPUT_TYPE_TABLET,
VIRTIO_INPUT_TYPE_SIZE,
};
enum virtio_input_config_select
{
VIRTIO_INPUT_CFG_UNSET = 0x00,
VIRTIO_INPUT_CFG_ID_NAME = 0x01,
VIRTIO_INPUT_CFG_ID_SERIAL = 0x02,
VIRTIO_INPUT_CFG_ID_DEVIDS = 0x03,
VIRTIO_INPUT_CFG_PROP_BITS = 0x10,
VIRTIO_INPUT_CFG_EV_BITS = 0x11,
VIRTIO_INPUT_CFG_ABS_INFO = 0x12,
};
struct virtio_input_absinfo
{
rt_uint32_t min; /* Minimum value for the axis */
rt_uint32_t max; /* Maximum value for the axis */
rt_uint32_t fuzz; /* Fuzz value that is used to filter noise from the event stream */
rt_uint32_t flat; /* Within this value will be discarded by joydev interface and reported as 0 instead */
rt_uint32_t res; /* Resolution for the values reported for the axis */
};
struct virtio_input_devids
{
rt_uint16_t bustype;
rt_uint16_t vendor;
rt_uint16_t product;
rt_uint16_t version;
};
struct virtio_input_config
{
rt_uint8_t select;
rt_uint8_t subsel;
rt_uint8_t size;
rt_uint8_t reserved[5];
union
{
char string[128];
rt_uint8_t bitmap[128];
struct virtio_input_absinfo abs;
struct virtio_input_devids ids;
};
} __attribute__((packed));
struct virtio_input_event
{
rt_uint16_t type;
rt_uint16_t code;
rt_uint32_t value;
};
#ifdef ARCH_CPU_64BIT
#define BITS_PER_LONG 64
#else
#define BITS_PER_LONG 32
#endif
#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
#define BITS_PER_BYTE 8
#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE)
#define BITS_TO_BYTES(nr) DIV_ROUND_UP(nr, BITS_PER_TYPE(char))
#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_TYPE(long))
struct virtio_input_device
{
struct rt_device parent;
struct virtio_device virtio_dev;
rt_ubase_t ev_bit[BITS_TO_LONGS(EV_CNT)];
rt_ubase_t key_bit[BITS_TO_LONGS(KEY_CNT)];
rt_ubase_t rel_bit[BITS_TO_LONGS(REL_CNT)];
rt_ubase_t abs_bit[BITS_TO_LONGS(ABS_CNT)];
enum virtio_input_type type;
struct virtio_input_config *config;
/* Broadcast events */
struct rt_mutex rw_mutex;
void (*bsct_handler)(struct virtio_input_event event);
struct virtio_input_event bcst_events[VIRTIO_INPUT_EVENT_QUEUE_SIZE];
/* Receive events */
struct virtio_input_event recv_events[VIRTIO_INPUT_EVENT_QUEUE_SIZE];
/* Transmit status */
struct virtio_input_event xmit_events[VIRTIO_INPUT_STATUS_QUEUE_SIZE];
};
enum
{
VIRTIO_DEVICE_CTRL_INPUT_GET_TYPE = 0x20,
VIRTIO_DEVICE_CTRL_INPUT_BIND_BSCT_HANDLER,
VIRTIO_DEVICE_CTRL_INPUT_GET_ABS_X_INFO,
VIRTIO_DEVICE_CTRL_INPUT_GET_ABS_Y_INFO,
VIRTIO_DEVICE_CTRL_INPUT_SET_STATUS,
VIRTIO_DEVICE_CTRL_INPUT_GET_EV_BIT,
VIRTIO_DEVICE_CTRL_INPUT_GET_KEY_BIT,
VIRTIO_DEVICE_CTRL_INPUT_GET_REL_BIT,
VIRTIO_DEVICE_CTRL_INPUT_GET_ABS_BIT,
};
rt_err_t rt_virtio_input_init(rt_ubase_t *mmio_base, rt_uint32_t irq);
#endif /* __VIRTIO_INPUT_H__ */

View file

@ -0,0 +1,932 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-11-11 GuEe-GUI the first version
*/
#ifndef __VIRTIO_INPUT_EVENT_CODES__
#define __VIRTIO_INPUT_EVENT_CODES__
/* Device properties and quirks */
#define INPUT_PROP_POINTER 0x00 /* Needs a pointer */
#define INPUT_PROP_DIRECT 0x01 /* Direct input devices */
#define INPUT_PROP_BUTTONPAD 0x02 /* Has button(s) under pad */
#define INPUT_PROP_SEMI_MT 0x03 /* Touch rectangle only */
#define INPUT_PROP_TOPBUTTONPAD 0x04 /* Softbuttons at top of pad */
#define INPUT_PROP_POINTING_STICK 0x05 /* Is a pointing stick */
#define INPUT_PROP_ACCELEROMETER 0x06 /* Has accelerometer */
#define INPUT_PROP_MAX 0x1f
#define INPUT_PROP_CNT (INPUT_PROP_MAX + 1)
/* Event types */
#define EV_SYN 0x00 /* Synchronization events */
#define EV_KEY 0x01 /* Keys and buttons type */
#define EV_REL 0x02 /* Relative axes events */
#define EV_ABS 0x03 /* Absolute axes events */
#define EV_MSC 0x04 /* Misc events */
#define EV_SW 0x05 /* Switch events */
#define EV_LED 0x11 /* LEDs events */
#define EV_SND 0x12 /* Sounds events */
#define EV_REP 0x14 /* Repeat events */
#define EV_FF 0x15 /* Force feedback events */
#define EV_PWR 0x16 /* Power management events */
#define EV_FF_STATUS 0x17 /* Force feedback state */
#define EV_MAX 0x1f /* Maximum number of events */
#define EV_CNT (EV_MAX + 1)/* Event count */
/* Synchronization events.*/
#define SYN_REPORT 0
#define SYN_CONFIG 1
#define SYN_MT_REPORT 2
#define SYN_DROPPED 3
#define SYN_MAX 0xf
#define SYN_CNT (SYN_MAX + 1)
/*
* Keys and buttons
*
* Most of the keys/buttons are modeled after USB HUT 1.12
* (see http://www.usb.org/developers/hidpage).
* Abbreviations in the comments:
* AC - Application Control
* AL - Application Launch Button
* SC - System Control
*/
#define KEY_RESERVED 0
#define KEY_ESC 1
#define KEY_1 2
#define KEY_2 3
#define KEY_3 4
#define KEY_4 5
#define KEY_5 6
#define KEY_6 7
#define KEY_7 8
#define KEY_8 9
#define KEY_9 10
#define KEY_0 11
#define KEY_MINUS 12
#define KEY_EQUAL 13
#define KEY_BACKSPACE 14
#define KEY_TAB 15
#define KEY_Q 16
#define KEY_W 17
#define KEY_E 18
#define KEY_R 19
#define KEY_T 20
#define KEY_Y 21
#define KEY_U 22
#define KEY_I 23
#define KEY_O 24
#define KEY_P 25
#define KEY_LEFTBRACE 26
#define KEY_RIGHTBRACE 27
#define KEY_ENTER 28
#define KEY_LEFTCTRL 29
#define KEY_A 30
#define KEY_S 31
#define KEY_D 32
#define KEY_F 33
#define KEY_G 34
#define KEY_H 35
#define KEY_J 36
#define KEY_K 37
#define KEY_L 38
#define KEY_SEMICOLON 39
#define KEY_APOSTROPHE 40
#define KEY_GRAVE 41
#define KEY_LEFTSHIFT 42
#define KEY_BACKSLASH 43
#define KEY_Z 44
#define KEY_X 45
#define KEY_C 46
#define KEY_V 47
#define KEY_B 48
#define KEY_N 49
#define KEY_M 50
#define KEY_COMMA 51
#define KEY_DOT 52
#define KEY_SLASH 53
#define KEY_RIGHTSHIFT 54
#define KEY_KPASTERISK 55
#define KEY_LEFTALT 56
#define KEY_SPACE 57
#define KEY_CAPSLOCK 58
#define KEY_F1 59
#define KEY_F2 60
#define KEY_F3 61
#define KEY_F4 62
#define KEY_F5 63
#define KEY_F6 64
#define KEY_F7 65
#define KEY_F8 66
#define KEY_F9 67
#define KEY_F10 68
#define KEY_NUMLOCK 69
#define KEY_SCROLLLOCK 70
#define KEY_KP7 71
#define KEY_KP8 72
#define KEY_KP9 73
#define KEY_KPMINUS 74
#define KEY_KP4 75
#define KEY_KP5 76
#define KEY_KP6 77
#define KEY_KPPLUS 78
#define KEY_KP1 79
#define KEY_KP2 80
#define KEY_KP3 81
#define KEY_KP0 82
#define KEY_KPDOT 83
#define KEY_ZENKAKUHANKAKU 85
#define KEY_102ND 86
#define KEY_F11 87
#define KEY_F12 88
#define KEY_RO 89
#define KEY_KATAKANA 90
#define KEY_HIRAGANA 91
#define KEY_HENKAN 92
#define KEY_KATAKANAHIRAGANA 93
#define KEY_MUHENKAN 94
#define KEY_KPJPCOMMA 95
#define KEY_KPENTER 96
#define KEY_RIGHTCTRL 97
#define KEY_KPSLASH 98
#define KEY_SYSRQ 99
#define KEY_RIGHTALT 100
#define KEY_LINEFEED 101
#define KEY_HOME 102
#define KEY_UP 103
#define KEY_PAGEUP 104
#define KEY_LEFT 105
#define KEY_RIGHT 106
#define KEY_END 107
#define KEY_DOWN 108
#define KEY_PAGEDOWN 109
#define KEY_INSERT 110
#define KEY_DELETE 111
#define KEY_MACRO 112
#define KEY_MUTE 113
#define KEY_VOLUMEDOWN 114
#define KEY_VOLUMEUP 115
#define KEY_POWER 116 /* SC System Power Down */
#define KEY_KPEQUAL 117
#define KEY_KPPLUSMINUS 118
#define KEY_PAUSE 119
#define KEY_SCALE 120 /* AL Compiz Scale (Expose) */
#define KEY_KPCOMMA 121
#define KEY_HANGEUL 122
#define KEY_HANGUEL KEY_HANGEUL
#define KEY_HANJA 123
#define KEY_YEN 124
#define KEY_LEFTMETA 125
#define KEY_RIGHTMETA 126
#define KEY_COMPOSE 127
#define KEY_STOP 128 /* AC Stop */
#define KEY_AGAIN 129
#define KEY_PROPS 130 /* AC Properties */
#define KEY_UNDO 131 /* AC Undo */
#define KEY_FRONT 132
#define KEY_COPY 133 /* AC Copy */
#define KEY_OPEN 134 /* AC Open */
#define KEY_PASTE 135 /* AC Paste */
#define KEY_FIND 136 /* AC Search */
#define KEY_CUT 137 /* AC Cut */
#define KEY_HELP 138 /* AL Integrated Help Center */
#define KEY_MENU 139 /* Menu (show menu) */
#define KEY_CALC 140 /* AL Calculator */
#define KEY_SETUP 141
#define KEY_SLEEP 142 /* SC System Sleep */
#define KEY_WAKEUP 143 /* System Wake Up */
#define KEY_FILE 144 /* AL Local Machine Browser */
#define KEY_SENDFILE 145
#define KEY_DELETEFILE 146
#define KEY_XFER 147
#define KEY_PROG1 148
#define KEY_PROG2 149
#define KEY_WWW 150 /* AL Internet Browser */
#define KEY_MSDOS 151
#define KEY_COFFEE 152 /* AL Terminal Lock/Screensaver */
#define KEY_SCREENLOCK KEY_COFFEE
#define KEY_ROTATE_DISPLAY 153 /* Display orientation for e.g. tablets */
#define KEY_DIRECTION KEY_ROTATE_DISPLAY
#define KEY_CYCLEWINDOWS 154
#define KEY_MAIL 155
#define KEY_BOOKMARKS 156 /* AC Bookmarks */
#define KEY_COMPUTER 157
#define KEY_BACK 158 /* AC Back */
#define KEY_FORWARD 159 /* AC Forward */
#define KEY_CLOSECD 160
#define KEY_EJECTCD 161
#define KEY_EJECTCLOSECD 162
#define KEY_NEXTSONG 163
#define KEY_PLAYPAUSE 164
#define KEY_PREVIOUSSONG 165
#define KEY_STOPCD 166
#define KEY_RECORD 167
#define KEY_REWIND 168
#define KEY_PHONE 169 /* Media Select Telephone */
#define KEY_ISO 170
#define KEY_CONFIG 171 /* AL Consumer Control Configuration */
#define KEY_HOMEPAGE 172 /* AC Home */
#define KEY_REFRESH 173 /* AC Refresh */
#define KEY_EXIT 174 /* AC Exit */
#define KEY_MOVE 175
#define KEY_EDIT 176
#define KEY_SCROLLUP 177
#define KEY_SCROLLDOWN 178
#define KEY_KPLEFTPAREN 179
#define KEY_KPRIGHTPAREN 180
#define KEY_NEW 181 /* AC New */
#define KEY_REDO 182 /* AC Redo/Repeat */
#define KEY_F13 183
#define KEY_F14 184
#define KEY_F15 185
#define KEY_F16 186
#define KEY_F17 187
#define KEY_F18 188
#define KEY_F19 189
#define KEY_F20 190
#define KEY_F21 191
#define KEY_F22 192
#define KEY_F23 193
#define KEY_F24 194
#define KEY_PLAYCD 200
#define KEY_PAUSECD 201
#define KEY_PROG3 202
#define KEY_PROG4 203
#define KEY_ALL_APPLICATIONS 204 /* AC Desktop Show All Applications */
#define KEY_DASHBOARD KEY_ALL_APPLICATIONS
#define KEY_SUSPEND 205
#define KEY_CLOSE 206 /* AC Close */
#define KEY_PLAY 207
#define KEY_FASTFORWARD 208
#define KEY_BASSBOOST 209
#define KEY_PRINT 210 /* AC Print */
#define KEY_HP 211
#define KEY_CAMERA 212
#define KEY_SOUND 213
#define KEY_QUESTION 214
#define KEY_EMAIL 215
#define KEY_CHAT 216
#define KEY_SEARCH 217
#define KEY_CONNECT 218
#define KEY_FINANCE 219 /* AL Checkbook/Finance */
#define KEY_SPORT 220
#define KEY_SHOP 221
#define KEY_ALTERASE 222
#define KEY_CANCEL 223 /* AC Cancel */
#define KEY_BRIGHTNESSDOWN 224
#define KEY_BRIGHTNESSUP 225
#define KEY_MEDIA 226
#define KEY_SWITCHVIDEOMODE 227 /* Cycle between available video outputs (Monitor/LCD/TV-out/etc) */
#define KEY_KBDILLUMTOGGLE 228
#define KEY_KBDILLUMDOWN 229
#define KEY_KBDILLUMUP 230
#define KEY_SEND 231 /* AC Send */
#define KEY_REPLY 232 /* AC Reply */
#define KEY_FORWARDMAIL 233 /* AC Forward Msg */
#define KEY_SAVE 234 /* AC Save */
#define KEY_DOCUMENTS 235
#define KEY_BATTERY 236
#define KEY_BLUETOOTH 237
#define KEY_WLAN 238
#define KEY_UWB 239
#define KEY_UNKNOWN 240
#define KEY_VIDEO_NEXT 241 /* Drive next video source */
#define KEY_VIDEO_PREV 242 /* Drive previous video source */
#define KEY_BRIGHTNESS_CYCLE 243 /* Brightness up, after max is min */
#define KEY_BRIGHTNESS_AUTO 244 /* Set Auto Brightness: manual brightness control is off, rely on ambient */
#define KEY_BRIGHTNESS_ZERO KEY_BRIGHTNESS_AUTO
#define KEY_DISPLAY_OFF 245 /* Display device to off state */
#define KEY_WWAN 246 /* Wireless WAN (LTE, UMTS, GSM, etc.) */
#define KEY_WIMAX KEY_WWAN
#define KEY_RFKILL 247 /* Key that controls all radios */
#define KEY_MICMUTE 248 /* Mute / unmute the microphone */
/* Code 255 is reserved for special needs of AT keyboard driver */
#define BTN_MISC 0x100
#define BTN_0 0x100
#define BTN_1 0x101
#define BTN_2 0x102
#define BTN_3 0x103
#define BTN_4 0x104
#define BTN_5 0x105
#define BTN_6 0x106
#define BTN_7 0x107
#define BTN_8 0x108
#define BTN_9 0x109
#define BTN_MOUSE 0x110
#define BTN_LEFT 0x110
#define BTN_RIGHT 0x111
#define BTN_MIDDLE 0x112
#define BTN_SIDE 0x113
#define BTN_EXTRA 0x114
#define BTN_FORWARD 0x115
#define BTN_BACK 0x116
#define BTN_TASK 0x117
#define BTN_JOYSTICK 0x120
#define BTN_TRIGGER 0x120
#define BTN_THUMB 0x121
#define BTN_THUMB2 0x122
#define BTN_TOP 0x123
#define BTN_TOP2 0x124
#define BTN_PINKIE 0x125
#define BTN_BASE 0x126
#define BTN_BASE2 0x127
#define BTN_BASE3 0x128
#define BTN_BASE4 0x129
#define BTN_BASE5 0x12a
#define BTN_BASE6 0x12b
#define BTN_DEAD 0x12f
#define BTN_GAMEPAD 0x130
#define BTN_SOUTH 0x130
#define BTN_A BTN_SOUTH
#define BTN_EAST 0x131
#define BTN_B BTN_EAST
#define BTN_C 0x132
#define BTN_NORTH 0x133
#define BTN_X BTN_NORTH
#define BTN_WEST 0x134
#define BTN_Y BTN_WEST
#define BTN_Z 0x135
#define BTN_TL 0x136
#define BTN_TR 0x137
#define BTN_TL2 0x138
#define BTN_TR2 0x139
#define BTN_SELECT 0x13a
#define BTN_START 0x13b
#define BTN_MODE 0x13c
#define BTN_THUMBL 0x13d
#define BTN_THUMBR 0x13e
#define BTN_DIGI 0x140
#define BTN_TOOL_PEN 0x140
#define BTN_TOOL_RUBBER 0x141
#define BTN_TOOL_BRUSH 0x142
#define BTN_TOOL_PENCIL 0x143
#define BTN_TOOL_AIRBRUSH 0x144
#define BTN_TOOL_FINGER 0x145
#define BTN_TOOL_MOUSE 0x146
#define BTN_TOOL_LENS 0x147
#define BTN_TOOL_QUINTTAP 0x148 /* Five fingers on trackpad */
#define BTN_STYLUS3 0x149
#define BTN_TOUCH 0x14a
#define BTN_STYLUS 0x14b
#define BTN_STYLUS2 0x14c
#define BTN_TOOL_DOUBLETAP 0x14d
#define BTN_TOOL_TRIPLETAP 0x14e
#define BTN_TOOL_QUADTAP 0x14f /* Four fingers on trackpad */
#define BTN_WHEEL 0x150
#define BTN_GEAR_DOWN 0x150
#define BTN_GEAR_UP 0x151
#define KEY_OK 0x160
#define KEY_SELECT 0x161
#define KEY_GOTO 0x162
#define KEY_CLEAR 0x163
#define KEY_POWER2 0x164
#define KEY_OPTION 0x165
#define KEY_INFO 0x166 /* AL OEM Features/Tips/Tutorial */
#define KEY_TIME 0x167
#define KEY_VENDOR 0x168
#define KEY_ARCHIVE 0x169
#define KEY_PROGRAM 0x16a /* Media Select Program Guide */
#define KEY_CHANNEL 0x16b
#define KEY_FAVORITES 0x16c
#define KEY_EPG 0x16d
#define KEY_PVR 0x16e /* Media Select Home */
#define KEY_MHP 0x16f
#define KEY_LANGUAGE 0x170
#define KEY_TITLE 0x171
#define KEY_SUBTITLE 0x172
#define KEY_ANGLE 0x173
#define KEY_FULL_SCREEN 0x174 /* AC View Toggle */
#define KEY_ZOOM KEY_FULL_SCREEN
#define KEY_MODE 0x175
#define KEY_KEYBOARD 0x176
#define KEY_ASPECT_RATIO 0x177 /* HUTRR37: Aspect */
#define KEY_SCREEN KEY_ASPECT_RATIO
#define KEY_PC 0x178 /* Media Select Computer */
#define KEY_TV 0x179 /* Media Select TV */
#define KEY_TV2 0x17a /* Media Select Cable */
#define KEY_VCR 0x17b /* Media Select VCR */
#define KEY_VCR2 0x17c /* VCR Plus */
#define KEY_SAT 0x17d /* Media Select Satellite */
#define KEY_SAT2 0x17e
#define KEY_CD 0x17f /* Media Select CD */
#define KEY_TAPE 0x180 /* Media Select Tape */
#define KEY_RADIO 0x181
#define KEY_TUNER 0x182 /* Media Select Tuner */
#define KEY_PLAYER 0x183
#define KEY_TEXT 0x184
#define KEY_DVD 0x185 /* Media Select DVD */
#define KEY_AUX 0x186
#define KEY_MP3 0x187
#define KEY_AUDIO 0x188 /* AL Audio Browser */
#define KEY_VIDEO 0x189 /* AL Movie Browser */
#define KEY_DIRECTORY 0x18a
#define KEY_LIST 0x18b
#define KEY_MEMO 0x18c /* Media Select Messages */
#define KEY_CALENDAR 0x18d
#define KEY_RED 0x18e
#define KEY_GREEN 0x18f
#define KEY_YELLOW 0x190
#define KEY_BLUE 0x191
#define KEY_CHANNELUP 0x192 /* Channel Increment */
#define KEY_CHANNELDOWN 0x193 /* Channel Decrement */
#define KEY_FIRST 0x194
#define KEY_LAST 0x195 /* Recall Last */
#define KEY_AB 0x196
#define KEY_NEXT 0x197
#define KEY_RESTART 0x198
#define KEY_SLOW 0x199
#define KEY_SHUFFLE 0x19a
#define KEY_BREAK 0x19b
#define KEY_PREVIOUS 0x19c
#define KEY_DIGITS 0x19d
#define KEY_TEEN 0x19e
#define KEY_TWEN 0x19f
#define KEY_VIDEOPHONE 0x1a0 /* Media Select Video Phone */
#define KEY_GAMES 0x1a1 /* Media Select Games */
#define KEY_ZOOMIN 0x1a2 /* AC Zoom In */
#define KEY_ZOOMOUT 0x1a3 /* AC Zoom Out */
#define KEY_ZOOMRESET 0x1a4 /* AC Zoom */
#define KEY_WORDPROCESSOR 0x1a5 /* AL Word Processor */
#define KEY_EDITOR 0x1a6 /* AL Text Editor */
#define KEY_SPREADSHEET 0x1a7 /* AL Spreadsheet */
#define KEY_GRAPHICSEDITOR 0x1a8 /* AL Graphics Editor */
#define KEY_PRESENTATION 0x1a9 /* AL Presentation App */
#define KEY_DATABASE 0x1aa /* AL Database App */
#define KEY_NEWS 0x1ab /* AL Newsreader */
#define KEY_VOICEMAIL 0x1ac /* AL Voicemail */
#define KEY_ADDRESSBOOK 0x1ad /* AL Contacts/Address Book */
#define KEY_MESSENGER 0x1ae /* AL Instant Messaging */
#define KEY_DISPLAYTOGGLE 0x1af /* Turn display (LCD) on and off */
#define KEY_BRIGHTNESS_TOGGLE KEY_DISPLAYTOGGLE
#define KEY_SPELLCHECK 0x1b0 /* AL Spell Check */
#define KEY_LOGOFF 0x1b1 /* AL Logoff */
#define KEY_DOLLAR 0x1b2
#define KEY_EURO 0x1b3
#define KEY_FRAMEBACK 0x1b4 /* Consumer - transport controls */
#define KEY_FRAMEFORWARD 0x1b5
#define KEY_CONTEXT_MENU 0x1b6 /* GenDesc - system context menu */
#define KEY_MEDIA_REPEAT 0x1b7 /* Consumer - transport control */
#define KEY_10CHANNELSUP 0x1b8 /* 10 channels up (10+) */
#define KEY_10CHANNELSDOWN 0x1b9 /* 10 channels down (10-) */
#define KEY_IMAGES 0x1ba /* AL Image Browser */
#define KEY_NOTIFICATION_CENTER 0x1bc /* Show/hide the notification center */
#define KEY_PICKUP_PHONE 0x1bd /* Answer incoming call */
#define KEY_HANGUP_PHONE 0x1be /* Decline incoming call */
#define KEY_DEL_EOL 0x1c0
#define KEY_DEL_EOS 0x1c1
#define KEY_INS_LINE 0x1c2
#define KEY_DEL_LINE 0x1c3
#define KEY_FN 0x1d0
#define KEY_FN_ESC 0x1d1
#define KEY_FN_F1 0x1d2
#define KEY_FN_F2 0x1d3
#define KEY_FN_F3 0x1d4
#define KEY_FN_F4 0x1d5
#define KEY_FN_F5 0x1d6
#define KEY_FN_F6 0x1d7
#define KEY_FN_F7 0x1d8
#define KEY_FN_F8 0x1d9
#define KEY_FN_F9 0x1da
#define KEY_FN_F10 0x1db
#define KEY_FN_F11 0x1dc
#define KEY_FN_F12 0x1dd
#define KEY_FN_1 0x1de
#define KEY_FN_2 0x1df
#define KEY_FN_D 0x1e0
#define KEY_FN_E 0x1e1
#define KEY_FN_F 0x1e2
#define KEY_FN_S 0x1e3
#define KEY_FN_B 0x1e4
#define KEY_FN_RIGHT_SHIFT 0x1e5
#define KEY_BRL_DOT1 0x1f1
#define KEY_BRL_DOT2 0x1f2
#define KEY_BRL_DOT3 0x1f3
#define KEY_BRL_DOT4 0x1f4
#define KEY_BRL_DOT5 0x1f5
#define KEY_BRL_DOT6 0x1f6
#define KEY_BRL_DOT7 0x1f7
#define KEY_BRL_DOT8 0x1f8
#define KEY_BRL_DOT9 0x1f9
#define KEY_BRL_DOT10 0x1fa
#define KEY_NUMERIC_0 0x200 /* Used by phones, remote controls, and other keypads */
#define KEY_NUMERIC_1 0x201
#define KEY_NUMERIC_2 0x202
#define KEY_NUMERIC_3 0x203
#define KEY_NUMERIC_4 0x204
#define KEY_NUMERIC_5 0x205
#define KEY_NUMERIC_6 0x206
#define KEY_NUMERIC_7 0x207
#define KEY_NUMERIC_8 0x208
#define KEY_NUMERIC_9 0x209
#define KEY_NUMERIC_STAR 0x20a
#define KEY_NUMERIC_POUND 0x20b
#define KEY_NUMERIC_A 0x20c /* Phone key A - HUT Telephony 0xb9 */
#define KEY_NUMERIC_B 0x20d
#define KEY_NUMERIC_C 0x20e
#define KEY_NUMERIC_D 0x20f
#define KEY_CAMERA_FOCUS 0x210
#define KEY_WPS_BUTTON 0x211 /* WiFi Protected Setup key */
#define KEY_TOUCHPAD_TOGGLE 0x212 /* Request switch touchpad on or off */
#define KEY_TOUCHPAD_ON 0x213
#define KEY_TOUCHPAD_OFF 0x214
#define KEY_CAMERA_ZOOMIN 0x215
#define KEY_CAMERA_ZOOMOUT 0x216
#define KEY_CAMERA_UP 0x217
#define KEY_CAMERA_DOWN 0x218
#define KEY_CAMERA_LEFT 0x219
#define KEY_CAMERA_RIGHT 0x21a
#define KEY_ATTENDANT_ON 0x21b
#define KEY_ATTENDANT_OFF 0x21c
#define KEY_ATTENDANT_TOGGLE 0x21d /* Attendant call on or off */
#define KEY_LIGHTS_TOGGLE 0x21e /* Reading light on or off */
#define BTN_DPAD_UP 0x220
#define BTN_DPAD_DOWN 0x221
#define BTN_DPAD_LEFT 0x222
#define BTN_DPAD_RIGHT 0x223
#define KEY_ALS_TOGGLE 0x230 /* Ambient light sensor */
#define KEY_ROTATE_LOCK_TOGGLE 0x231 /* Display rotation lock */
#define KEY_BUTTONCONFIG 0x240 /* AL Button Configuration */
#define KEY_TASKMANAGER 0x241 /* AL Task/Project Manager */
#define KEY_JOURNAL 0x242 /* AL Log/Journal/Timecard */
#define KEY_CONTROLPANEL 0x243 /* AL Control Panel */
#define KEY_APPSELECT 0x244 /* AL Select Task/Application */
#define KEY_SCREENSAVER 0x245 /* AL Screen Saver */
#define KEY_VOICECOMMAND 0x246 /* Listening Voice Command */
#define KEY_ASSISTANT 0x247 /* AL Context-aware desktop assistant */
#define KEY_KBD_LAYOUT_NEXT 0x248 /* AC Next Keyboard Layout Select */
#define KEY_EMOJI_PICKER 0x249 /* Show/hide emoji picker (HUTRR101) */
#define KEY_DICTATE 0x24a /* Start or Stop Voice Dictation Session (HUTRR99) */
#define KEY_BRIGHTNESS_MIN 0x250 /* Set Brightness to Minimum */
#define KEY_BRIGHTNESS_MAX 0x251 /* Set Brightness to Maximum */
#define KEY_KBDINPUTASSIST_PREV 0x260
#define KEY_KBDINPUTASSIST_NEXT 0x261
#define KEY_KBDINPUTASSIST_PREVGROUP 0x262
#define KEY_KBDINPUTASSIST_NEXTGROUP 0x263
#define KEY_KBDINPUTASSIST_ACCEPT 0x264
#define KEY_KBDINPUTASSIST_CANCEL 0x265
/* Diagonal movement keys */
#define KEY_RIGHT_UP 0x266
#define KEY_RIGHT_DOWN 0x267
#define KEY_LEFT_UP 0x268
#define KEY_LEFT_DOWN 0x269
#define KEY_ROOT_MENU 0x26a /* Show Device's Root Menu */
/* Show Top Menu of the Media (e.g. DVD) */
#define KEY_MEDIA_TOP_MENU 0x26b
#define KEY_NUMERIC_11 0x26c
#define KEY_NUMERIC_12 0x26d
/*
* Toggle Audio Description: refers to an audio service that helps blind and
* visually impaired consumers understand the action in a program. Note: in
* some countries this is referred to as "Video Description".
*/
#define KEY_AUDIO_DESC 0x26e
#define KEY_3D_MODE 0x26f
#define KEY_NEXT_FAVORITE 0x270
#define KEY_STOP_RECORD 0x271
#define KEY_PAUSE_RECORD 0x272
#define KEY_VOD 0x273 /* Video on Demand */
#define KEY_UNMUTE 0x274
#define KEY_FASTREVERSE 0x275
#define KEY_SLOWREVERSE 0x276
/*
* Control a data application associated with the currently viewed channel,
* e.g. teletext or data broadcast application (MHEG, MHP, HbbTV, etc.)
*/
#define KEY_DATA 0x277
#define KEY_ONSCREEN_KEYBOARD 0x278
/* Electronic privacy screen control */
#define KEY_PRIVACY_SCREEN_TOGGLE 0x279
/* Select an area of screen to be copied */
#define KEY_SELECTIVE_SCREENSHOT 0x27a
/* Move the focus to the next or previous user controllable element within a UI container */
#define KEY_NEXT_ELEMENT 0x27b
#define KEY_PREVIOUS_ELEMENT 0x27c
/* Toggle Autopilot engagement */
#define KEY_AUTOPILOT_ENGAGE_TOGGLE 0x27d
/* Shortcut Keys */
#define KEY_MARK_WAYPOINT 0x27e
#define KEY_SOS 0x27f
#define KEY_NAV_CHART 0x280
#define KEY_FISHING_CHART 0x281
#define KEY_SINGLE_RANGE_RADAR 0x282
#define KEY_DUAL_RANGE_RADAR 0x283
#define KEY_RADAR_OVERLAY 0x284
#define KEY_TRADITIONAL_SONAR 0x285
#define KEY_CLEARVU_SONAR 0x286
#define KEY_SIDEVU_SONAR 0x287
#define KEY_NAV_INFO 0x288
#define KEY_BRIGHTNESS_MENU 0x289
/*
* Some keyboards have keys which do not have a defined meaning, these keys
* are intended to be programmed / bound to macros by the user. For most
* keyboards with these macro-keys the key-sequence to inject, or action to
* take, is all handled by software on the host side. So from the kernel's
* point of view these are just normal keys.
*
* The KEY_MACRO# codes below are intended for such keys, which may be labeled
* e.g. G1-G18, or S1 - S30. The KEY_MACRO# codes MUST NOT be used for keys
* where the marking on the key does indicate a defined meaning / purpose.
*
* The KEY_MACRO# codes MUST also NOT be used as fallback for when no existing
* KEY_FOO define matches the marking / purpose. In this case a new KEY_FOO
* define MUST be added.
*/
#define KEY_MACRO1 0x290
#define KEY_MACRO2 0x291
#define KEY_MACRO3 0x292
#define KEY_MACRO4 0x293
#define KEY_MACRO5 0x294
#define KEY_MACRO6 0x295
#define KEY_MACRO7 0x296
#define KEY_MACRO8 0x297
#define KEY_MACRO9 0x298
#define KEY_MACRO10 0x299
#define KEY_MACRO11 0x29a
#define KEY_MACRO12 0x29b
#define KEY_MACRO13 0x29c
#define KEY_MACRO14 0x29d
#define KEY_MACRO15 0x29e
#define KEY_MACRO16 0x29f
#define KEY_MACRO17 0x2a0
#define KEY_MACRO18 0x2a1
#define KEY_MACRO19 0x2a2
#define KEY_MACRO20 0x2a3
#define KEY_MACRO21 0x2a4
#define KEY_MACRO22 0x2a5
#define KEY_MACRO23 0x2a6
#define KEY_MACRO24 0x2a7
#define KEY_MACRO25 0x2a8
#define KEY_MACRO26 0x2a9
#define KEY_MACRO27 0x2aa
#define KEY_MACRO28 0x2ab
#define KEY_MACRO29 0x2ac
#define KEY_MACRO30 0x2ad
/*
* Some keyboards with the macro-keys described above have some extra keys
* for controlling the host-side software responsible for the macro handling:
* -A macro recording start/stop key. Note that not all keyboards which emit
* KEY_MACRO_RECORD_START will also emit KEY_MACRO_RECORD_STOP if
* KEY_MACRO_RECORD_STOP is not advertised, then KEY_MACRO_RECORD_START
* should be interpreted as a recording start/stop toggle;
* -Keys for switching between different macro (pre)sets, either a key for
* cycling through the configured presets or keys to directly select a preset.
*/
#define KEY_MACRO_RECORD_START 0x2b0
#define KEY_MACRO_RECORD_STOP 0x2b1
#define KEY_MACRO_PRESET_CYCLE 0x2b2
#define KEY_MACRO_PRESET1 0x2b3
#define KEY_MACRO_PRESET2 0x2b4
#define KEY_MACRO_PRESET3 0x2b5
/*
* Some keyboards have a buildin LCD panel where the contents are controlled
* by the host. Often these have a number of keys directly below the LCD
* intended for controlling a menu shown on the LCD. These keys often don't
* have any labeling so we just name them KEY_KBD_LCD_MENU#
*/
#define KEY_KBD_LCD_MENU1 0x2b8
#define KEY_KBD_LCD_MENU2 0x2b9
#define KEY_KBD_LCD_MENU3 0x2ba
#define KEY_KBD_LCD_MENU4 0x2bb
#define KEY_KBD_LCD_MENU5 0x2bc
#define BTN_TRIGGER_HAPPY 0x2c0
#define BTN_TRIGGER_HAPPY1 0x2c0
#define BTN_TRIGGER_HAPPY2 0x2c1
#define BTN_TRIGGER_HAPPY3 0x2c2
#define BTN_TRIGGER_HAPPY4 0x2c3
#define BTN_TRIGGER_HAPPY5 0x2c4
#define BTN_TRIGGER_HAPPY6 0x2c5
#define BTN_TRIGGER_HAPPY7 0x2c6
#define BTN_TRIGGER_HAPPY8 0x2c7
#define BTN_TRIGGER_HAPPY9 0x2c8
#define BTN_TRIGGER_HAPPY10 0x2c9
#define BTN_TRIGGER_HAPPY11 0x2ca
#define BTN_TRIGGER_HAPPY12 0x2cb
#define BTN_TRIGGER_HAPPY13 0x2cc
#define BTN_TRIGGER_HAPPY14 0x2cd
#define BTN_TRIGGER_HAPPY15 0x2ce
#define BTN_TRIGGER_HAPPY16 0x2cf
#define BTN_TRIGGER_HAPPY17 0x2d0
#define BTN_TRIGGER_HAPPY18 0x2d1
#define BTN_TRIGGER_HAPPY19 0x2d2
#define BTN_TRIGGER_HAPPY20 0x2d3
#define BTN_TRIGGER_HAPPY21 0x2d4
#define BTN_TRIGGER_HAPPY22 0x2d5
#define BTN_TRIGGER_HAPPY23 0x2d6
#define BTN_TRIGGER_HAPPY24 0x2d7
#define BTN_TRIGGER_HAPPY25 0x2d8
#define BTN_TRIGGER_HAPPY26 0x2d9
#define BTN_TRIGGER_HAPPY27 0x2da
#define BTN_TRIGGER_HAPPY28 0x2db
#define BTN_TRIGGER_HAPPY29 0x2dc
#define BTN_TRIGGER_HAPPY30 0x2dd
#define BTN_TRIGGER_HAPPY31 0x2de
#define BTN_TRIGGER_HAPPY32 0x2df
#define BTN_TRIGGER_HAPPY33 0x2e0
#define BTN_TRIGGER_HAPPY34 0x2e1
#define BTN_TRIGGER_HAPPY35 0x2e2
#define BTN_TRIGGER_HAPPY36 0x2e3
#define BTN_TRIGGER_HAPPY37 0x2e4
#define BTN_TRIGGER_HAPPY38 0x2e5
#define BTN_TRIGGER_HAPPY39 0x2e6
#define BTN_TRIGGER_HAPPY40 0x2e7
/* We avoid low common keys in module aliases so they don't get huge. */
#define KEY_MIN_INTERESTING KEY_MUTE
#define KEY_MAX 0x2ff
#define KEY_CNT (KEY_MAX + 1)
/* Relative axes */
#define REL_X 0x00
#define REL_Y 0x01
#define REL_Z 0x02
#define REL_RX 0x03
#define REL_RY 0x04
#define REL_RZ 0x05
#define REL_HWHEEL 0x06
#define REL_DIAL 0x07
#define REL_WHEEL 0x08
#define REL_MISC 0x09
/*
* 0x0a is reserved and should not be used in input drivers.
* It was used by HID as REL_MISC + 1 and userspace needs to detect if
* the next REL_* event is correct or is just REL_MISC + n.
* We define here REL_RESERVED so userspace can rely on it and detect
* the situation described above.
*/
#define REL_RESERVED 0x0a
#define REL_WHEEL_HI_RES 0x0b
#define REL_HWHEEL_HI_RES 0x0c
#define REL_MAX 0x0f
#define REL_CNT (REL_MAX + 1)
/* Absolute axes */
#define ABS_X 0x00
#define ABS_Y 0x01
#define ABS_Z 0x02
#define ABS_RX 0x03
#define ABS_RY 0x04
#define ABS_RZ 0x05
#define ABS_THROTTLE 0x06
#define ABS_RUDDER 0x07
#define ABS_WHEEL 0x08
#define ABS_GAS 0x09
#define ABS_BRAKE 0x0a
#define ABS_HAT0X 0x10
#define ABS_HAT0Y 0x11
#define ABS_HAT1X 0x12
#define ABS_HAT1Y 0x13
#define ABS_HAT2X 0x14
#define ABS_HAT2Y 0x15
#define ABS_HAT3X 0x16
#define ABS_HAT3Y 0x17
#define ABS_PRESSURE 0x18
#define ABS_DISTANCE 0x19
#define ABS_TILT_X 0x1a
#define ABS_TILT_Y 0x1b
#define ABS_TOOL_WIDTH 0x1c
#define ABS_VOLUME 0x20
#define ABS_MISC 0x28
/*
* 0x2e is reserved and should not be used in input drivers.
* It was used by HID as ABS_MISC + 6 and userspace needs to detect if
* the next ABS_* event is correct or is just ABS_MISC + n.
* We define here ABS_RESERVED so userspace can rely on it and detect
* the situation described above.
*/
#define ABS_RESERVED 0x2e
#define ABS_MT_SLOT 0x2f /* MT slot being modified */
#define ABS_MT_TOUCH_MAJOR 0x30 /* Major axis of touching ellipse */
#define ABS_MT_TOUCH_MINOR 0x31 /* Minor axis (omit if circular) */
#define ABS_MT_WIDTH_MAJOR 0x32 /* Major axis of approaching ellipse */
#define ABS_MT_WIDTH_MINOR 0x33 /* Minor axis (omit if circular) */
#define ABS_MT_ORIENTATION 0x34 /* Ellipse orientation */
#define ABS_MT_POSITION_X 0x35 /* Center X touch position */
#define ABS_MT_POSITION_Y 0x36 /* Center Y touch position */
#define ABS_MT_TOOL_TYPE 0x37 /* Type of touching device */
#define ABS_MT_BLOB_ID 0x38 /* Group a set of packets as a blob */
#define ABS_MT_TRACKING_ID 0x39 /* Unique ID of initiated contact */
#define ABS_MT_PRESSURE 0x3a /* Pressure on contact area */
#define ABS_MT_DISTANCE 0x3b /* Contact hover distance */
#define ABS_MT_TOOL_X 0x3c /* Center X tool position */
#define ABS_MT_TOOL_Y 0x3d /* Center Y tool position */
#define ABS_MAX 0x3f
#define ABS_CNT (ABS_MAX + 1)
/* Switch events */
#define SW_LID 0x00 /* Set = lid shut */
#define SW_TABLET_MODE 0x01 /* Set = tablet mode */
#define SW_HEADPHONE_INSERT 0x02 /* Set = inserted */
#define SW_RFKILL_ALL 0x03 /* rfkill master switch, type "any" set = radio enabled */
#define SW_RADIO SW_RFKILL_ALL /* deprecated */
#define SW_MICROPHONE_INSERT 0x04 /* Set = inserted */
#define SW_DOCK 0x05 /* Set = plugged into dock */
#define SW_LINEOUT_INSERT 0x06 /* Set = inserted */
#define SW_JACK_PHYSICAL_INSERT 0x07 /* Set = mechanical switch set */
#define SW_VIDEOOUT_INSERT 0x08 /* Set = inserted */
#define SW_CAMERA_LENS_COVER 0x09 /* Set = lens covered */
#define SW_KEYPAD_SLIDE 0x0a /* Set = keypad slide out */
#define SW_FRONT_PROXIMITY 0x0b /* Set = front proximity sensor active */
#define SW_ROTATE_LOCK 0x0c /* Set = rotate locked/disabled */
#define SW_LINEIN_INSERT 0x0d /* Set = inserted */
#define SW_MUTE_DEVICE 0x0e /* Set = device disabled */
#define SW_PEN_INSERTED 0x0f /* Set = pen inserted */
#define SW_MACHINE_COVER 0x10 /* Set = cover closed */
#define SW_MAX 0x10
#define SW_CNT (SW_MAX + 1)
/* Misc events */
#define MSC_SERIAL 0x00
#define MSC_PULSELED 0x01
#define MSC_GESTURE 0x02
#define MSC_RAW 0x03
#define MSC_SCAN 0x04
#define MSC_TIMESTAMP 0x05
#define MSC_MAX 0x07
#define MSC_CNT (MSC_MAX + 1)
/* LEDs */
#define LED_NUML 0x00
#define LED_CAPSL 0x01
#define LED_SCROLLL 0x02
#define LED_COMPOSE 0x03
#define LED_KANA 0x04
#define LED_SLEEP 0x05
#define LED_SUSPEND 0x06
#define LED_MUTE 0x07
#define LED_MISC 0x08
#define LED_MAIL 0x09
#define LED_CHARGING 0x0a
#define LED_MAX 0x0f
#define LED_CNT (LED_MAX + 1)
/* Autorepeat values */
#define REP_DELAY 0x00
#define REP_PERIOD 0x01
#define REP_MAX 0x01
#define REP_CNT (REP_MAX + 1)
/* Sounds */
#define SND_CLICK 0x00
#define SND_BELL 0x01
#define SND_TONE 0x02
#define SND_MAX 0x07
#define SND_CNT (SND_MAX + 1)
#endif /* __VIRTIO_INPUT_EVENT_CODES__ */

View file

@ -0,0 +1,67 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-9-16 GuEe-GUI the first version
* 2021-11-11 GuEe-GUI modify to virtio common interface
*/
#ifndef __VIRTIO_MMIO_H__
#define __VIRTIO_MMIO_H__
#include <rtdef.h>
struct virtio_mmio_config
{
rt_uint32_t magic; /* [0x00]<RO> Magic value */
rt_uint32_t version; /* [0x04]<RO> Device version number */
rt_uint32_t device_id; /* [0x08]<RO> Virtio Subsystem Device ID */
rt_uint32_t vendor_id; /* [0x0c]<RO> Virtio Subsystem Vendor ID */
rt_uint32_t device_features; /* [0x10]<RO> Flags representing features the device supports */
rt_uint32_t device_features_sel; /* [0x14]<WO> Device (host) features word selection. */
rt_uint32_t res0[2]; /* [0x18] */
rt_uint32_t driver_features; /* [0x20]<WO> Device features understood and activated by the driver */
rt_uint32_t driver_features_sel; /* [0x24]<WO> Activated (guest) features word selection */
rt_uint32_t guest_page_size; /* [0x28]<WO> Guest page size, this value should be a power of 2 */
rt_uint32_t res1[1]; /* [0x2c] */
rt_uint32_t queue_sel; /* [0x30]<WO> Virtual queue index */
rt_uint32_t queue_num_max; /* [0x34]<RO> Maximum virtual queue size */
rt_uint32_t queue_num; /* [0x38]<WO> Virtual queue size */
rt_uint32_t queue_align; /* [0x3c]<WO> Used Ring alignment in the virtual queue */
rt_uint32_t queue_pfn; /* [0x40]<RW> Guest physical page number of the virtual queue */
rt_uint32_t queue_ready; /* [0x44]<RW> Virtual queue ready bit */
rt_uint32_t res2[2]; /* [0x48] */
rt_uint32_t queue_notify; /* [0x50]<WO> Queue notifier */
rt_uint32_t res3[3]; /* [0x54] */
rt_uint32_t interrupt_status; /* [0x60]<RO> Interrupt status */
rt_uint32_t interrupt_ack; /* [0x64]<WO> Interrupt acknowledge */
rt_uint32_t res4[2]; /* [0x68] */
rt_uint32_t status; /* [0x70]<RW> Device status */
rt_uint32_t res5[3]; /* [0x74] */
rt_uint32_t queue_desc_low; /* [0x80]<WO> Virtual queues Descriptor Area 64 bit long physical address */
rt_uint32_t queue_desc_high; /* [0x84]<WO> */
rt_uint32_t res6[2]; /* [0x88] */
rt_uint32_t queue_driver_low; /* [0x90]<WO> Virtual queues Driver Area 64 bit long physical address */
rt_uint32_t queue_driver_high; /* [0x94]<WO> */
rt_uint32_t res7[2]; /* [0x98] */
rt_uint32_t queue_device_low; /* [0xa0]<WO> Virtual queues Device Area 64 bit long physical address */
rt_uint32_t queue_device_high; /* [0xa4]<WO> */
rt_uint32_t res8[21]; /* [0xa8] */
rt_uint32_t config_generation; /* [0xfc]<RO> Configuration atomicity value */
rt_uint32_t config[]; /* [0x100+]<RO> Configuration space */
/*
* According to the compiler's optimization ways, we should force compiler not
* to optimization here, but it will cause some compilers generate memory access
* instructions fail. So we allow user to choose a toggle of optimize here.
*/
#ifdef RT_USING_VIRTIO_MMIO_ALIGN
} __attribute__((packed));
#else
};
#endif
#endif /* __VIRTIO_MMIO_H__ */

View file

@ -0,0 +1,315 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-11-11 GuEe-GUI the first version
*/
#include <rthw.h>
#include <rtthread.h>
#include <cpuport.h>
#include <mm_aspace.h>
#ifdef RT_USING_VIRTIO_NET
#include <virtio_net.h>
static rt_err_t virtio_net_tx(rt_device_t dev, struct pbuf *p)
{
rt_uint16_t id;
struct virtio_net_device *virtio_net_dev = (struct virtio_net_device *)dev;
struct virtio_device *virtio_dev = &virtio_net_dev->virtio_dev;
struct virtq *queue_tx = &virtio_dev->queues[VIRTIO_NET_QUEUE_TX];
#ifdef RT_USING_SMP
rt_base_t level = rt_spin_lock_irqsave(&virtio_dev->spinlock);
#endif
id = (queue_tx->avail->idx * 2) % queue_tx->num;
virtio_net_dev->info[id].hdr.flags = 0;
virtio_net_dev->info[id].hdr.gso_type = 0;
virtio_net_dev->info[id].hdr.hdr_len = 0;
virtio_net_dev->info[id].hdr.gso_size = 0;
virtio_net_dev->info[id].hdr.csum_start = 0;
virtio_net_dev->info[id].hdr.csum_offset = 0;
virtio_net_dev->info[id].hdr.num_buffers = 0;
pbuf_copy_partial(p, virtio_net_dev->info[id].rx_buffer, p->tot_len, 0);
virtio_free_desc(virtio_dev, VIRTIO_NET_QUEUE_TX, id);
virtio_free_desc(virtio_dev, VIRTIO_NET_QUEUE_TX, id + 1);
virtio_fill_desc(virtio_dev, VIRTIO_NET_QUEUE_TX, id,
VIRTIO_VA2PA(&virtio_net_dev->info[id].hdr), VIRTIO_NET_HDR_SIZE, VIRTQ_DESC_F_NEXT, id + 1);
virtio_fill_desc(virtio_dev, VIRTIO_NET_QUEUE_TX, id + 1,
VIRTIO_VA2PA(virtio_net_dev->info[id].rx_buffer), p->tot_len, 0, 0);
virtio_submit_chain(virtio_dev, VIRTIO_NET_QUEUE_TX, id);
virtio_queue_notify(virtio_dev, VIRTIO_NET_QUEUE_TX);
virtio_alloc_desc(virtio_dev, VIRTIO_NET_QUEUE_TX);
virtio_alloc_desc(virtio_dev, VIRTIO_NET_QUEUE_TX);
#ifdef RT_USING_SMP
rt_spin_unlock_irqrestore(&virtio_dev->spinlock, level);
#endif
return RT_EOK;
}
static struct pbuf *virtio_net_rx(rt_device_t dev)
{
rt_uint16_t id;
rt_uint32_t len;
struct pbuf *p = RT_NULL, *new, *ret = RT_NULL;
struct virtio_net_device *virtio_net_dev = (struct virtio_net_device *)dev;
struct virtio_device *virtio_dev = &virtio_net_dev->virtio_dev;
struct virtq *queue_rx = &virtio_dev->queues[VIRTIO_NET_QUEUE_RX];
while (queue_rx->used_idx != queue_rx->used->idx)
{
#ifdef RT_USING_SMP
rt_base_t level = rt_spin_lock_irqsave(&virtio_dev->spinlock);
#endif
id = (queue_rx->used->ring[queue_rx->used_idx % queue_rx->num].id + 1) % queue_rx->num;
len = queue_rx->used->ring[queue_rx->used_idx % queue_rx->num].len - VIRTIO_NET_HDR_SIZE;
#ifdef RT_USING_SMP
rt_spin_unlock_irqrestore(&virtio_dev->spinlock, level);
#endif
if (len > VIRTIO_NET_PAYLOAD_MAX_SIZE)
{
rt_kprintf("%s: Receive buffer's size = %u is too big!\n", virtio_net_dev->parent.parent.parent.name, len);
len = VIRTIO_NET_PAYLOAD_MAX_SIZE;
}
new = pbuf_alloc(PBUF_RAW, len, PBUF_RAM);
if (p != RT_NULL)
{
p->next = new;
p = p->next;
}
else
{
p = new;
ret = p;
}
if (p != RT_NULL)
{
#ifdef RT_USING_SMP
level = rt_spin_lock_irqsave(&virtio_dev->spinlock);
#endif
rt_memcpy(p->payload, (void *)queue_rx->desc[id].addr - PV_OFFSET, len);
queue_rx->used_idx++;
virtio_submit_chain(virtio_dev, VIRTIO_NET_QUEUE_RX, id - 1);
virtio_queue_notify(virtio_dev, VIRTIO_NET_QUEUE_RX);
#ifdef RT_USING_SMP
rt_spin_unlock_irqrestore(&virtio_dev->spinlock, level);
#endif
}
else
{
break;
}
}
return ret;
}
static rt_err_t virtio_net_init(rt_device_t dev)
{
int i;
rt_uint16_t idx[VIRTIO_NET_RTX_QUEUE_SIZE];
struct virtio_net_device *virtio_net_dev = (struct virtio_net_device *)dev;
struct virtio_device *virtio_dev = &virtio_net_dev->virtio_dev;
struct virtq *queue_rx, *queue_tx;
queue_rx = &virtio_dev->queues[VIRTIO_NET_QUEUE_RX];
queue_tx = &virtio_dev->queues[VIRTIO_NET_QUEUE_TX];
virtio_alloc_desc_chain(virtio_dev, VIRTIO_NET_QUEUE_RX, queue_rx->num, idx);
virtio_alloc_desc_chain(virtio_dev, VIRTIO_NET_QUEUE_TX, queue_tx->num, idx);
for (i = 0; i < queue_rx->num; ++i)
{
rt_uint16_t id = (i * 2) % queue_rx->num;
void *addr = virtio_net_dev->info[i].tx_buffer;
/* Descriptor for net_hdr */
virtio_fill_desc(virtio_dev, VIRTIO_NET_QUEUE_RX, id,
VIRTIO_VA2PA(addr), VIRTIO_NET_HDR_SIZE, VIRTQ_DESC_F_NEXT | VIRTQ_DESC_F_WRITE, id + 1);
/* Descriptor for data */
virtio_fill_desc(virtio_dev, VIRTIO_NET_QUEUE_RX, id + 1,
VIRTIO_VA2PA(addr) + VIRTIO_NET_HDR_SIZE, VIRTIO_NET_MSS, VIRTQ_DESC_F_WRITE, 0);
queue_rx->avail->ring[i] = id;
}
rt_hw_dsb();
queue_rx->avail->flags = 0;
queue_rx->avail->idx = queue_rx->num;
queue_rx->used_idx = queue_rx->used->idx;
queue_tx->avail->flags = VIRTQ_AVAIL_F_NO_INTERRUPT;
queue_tx->avail->idx = 0;
virtio_queue_notify(virtio_dev, VIRTIO_NET_QUEUE_RX);
return eth_device_linkchange(&virtio_net_dev->parent, RT_TRUE);
}
static rt_err_t virtio_net_control(rt_device_t dev, int cmd, void *args)
{
rt_err_t status = RT_EOK;
struct virtio_net_device *virtio_net_dev = (struct virtio_net_device *)dev;
switch (cmd)
{
case NIOCTL_GADDR:
if (args == RT_NULL)
{
status = -RT_ERROR;
break;
}
rt_memcpy(args, virtio_net_dev->config->mac, sizeof(virtio_net_dev->config->mac));
break;
default:
status = -RT_EINVAL;
break;
}
return status;
}
#ifdef RT_USING_DEVICE_OPS
const static struct rt_device_ops virtio_net_ops =
{
virtio_net_init,
RT_NULL,
RT_NULL,
RT_NULL,
RT_NULL,
virtio_net_control
};
#endif
static void virtio_net_isr(int irqno, void *param)
{
struct virtio_net_device *virtio_net_dev = (struct virtio_net_device *)param;
struct virtio_device *virtio_dev = &virtio_net_dev->virtio_dev;
struct virtq *queue_rx = &virtio_dev->queues[VIRTIO_NET_QUEUE_RX];
#ifdef RT_USING_SMP
rt_base_t level = rt_spin_lock_irqsave(&virtio_dev->spinlock);
#endif
virtio_interrupt_ack(virtio_dev);
rt_hw_dsb();
if (queue_rx->used_idx != queue_rx->used->idx)
{
rt_hw_dsb();
eth_device_ready(&virtio_net_dev->parent);
}
#ifdef RT_USING_SMP
rt_spin_unlock_irqrestore(&virtio_dev->spinlock, level);
#endif
}
rt_err_t rt_virtio_net_init(rt_ubase_t *mmio_base, rt_uint32_t irq)
{
static int dev_no = 0;
char dev_name[RT_NAME_MAX];
struct virtio_device *virtio_dev;
struct virtio_net_device *virtio_net_dev;
virtio_net_dev = rt_malloc(sizeof(struct virtio_net_device));
if (virtio_net_dev == RT_NULL)
{
goto _alloc_fail;
}
virtio_dev = &virtio_net_dev->virtio_dev;
virtio_dev->irq = irq;
virtio_dev->mmio_base = mmio_base;
virtio_net_dev->config = (struct virtio_net_config *)virtio_dev->mmio_config->config;
#ifdef RT_USING_SMP
rt_spin_lock_init(&virtio_dev->spinlock);
#endif
virtio_reset_device(virtio_dev);
virtio_status_acknowledge_driver(virtio_dev);
virtio_dev->mmio_config->driver_features = virtio_dev->mmio_config->device_features & ~(
(1 << VIRTIO_NET_F_CTRL_VQ) |
(1 << VIRTIO_F_RING_EVENT_IDX));
virtio_status_driver_ok(virtio_dev);
if (virtio_queues_alloc(virtio_dev, 2) != RT_EOK)
{
goto _alloc_fail;
}
if (virtio_queue_init(virtio_dev, VIRTIO_NET_QUEUE_RX, VIRTIO_NET_RTX_QUEUE_SIZE) != RT_EOK)
{
goto _alloc_fail;
}
if (virtio_queue_init(virtio_dev, VIRTIO_NET_QUEUE_TX, VIRTIO_NET_RTX_QUEUE_SIZE) != RT_EOK)
{
virtio_queue_destroy(virtio_dev, VIRTIO_NET_QUEUE_RX);
goto _alloc_fail;
}
virtio_net_dev->parent.parent.type = RT_Device_Class_NetIf;
#ifdef RT_USING_DEVICE_OPS
virtio_net_dev->parent.parent.ops = &virtio_net_ops;
#else
virtio_net_dev->parent.parent.init = virtio_net_init;
virtio_net_dev->parent.parent.open = RT_NULL;
virtio_net_dev->parent.parent.close = RT_NULL;
virtio_net_dev->parent.parent.read = RT_NULL;
virtio_net_dev->parent.parent.write = RT_NULL;
virtio_net_dev->parent.parent.control = virtio_net_control;
#endif
virtio_net_dev->parent.eth_tx = virtio_net_tx;
virtio_net_dev->parent.eth_rx = virtio_net_rx;
rt_snprintf(dev_name, RT_NAME_MAX, "virtio-net%d", dev_no++);
rt_hw_interrupt_install(irq, virtio_net_isr, virtio_net_dev, dev_name);
rt_hw_interrupt_umask(irq);
return eth_device_init(&virtio_net_dev->parent, dev_name);
_alloc_fail:
if (virtio_net_dev != RT_NULL)
{
virtio_queues_free(virtio_dev);
rt_free(virtio_net_dev);
}
return -RT_ENOMEM;
}
#endif /* RT_USING_VIRTIO_NET */

View file

@ -0,0 +1,119 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-11-11 GuEe-GUI the first version
*/
#ifndef __VIRTIO_NET_H__
#define __VIRTIO_NET_H__
#ifdef RT_USING_VIRTIO_NET
#include <rtdef.h>
#include <netif/ethernetif.h>
#include <virtio.h>
#define VIRTIO_NET_QUEUE_RX 0
#define VIRTIO_NET_QUEUE_TX 1
#define VIRTIO_NET_RTX_QUEUE_SIZE 16
#define VIRTIO_NET_RTX_BUF_SIZE 2048
#define VIRTIO_NET_F_CSUM 0 /* Host handles pkts w/ partial csum */
#define VIRTIO_NET_F_GUEST_CSUM 1 /* Guest handles pkts w/ partial csum */
#define VIRTIO_NET_F_CTRL_GUEST_OFFLOADS 2 /* Dynamic offload configuration */
#define VIRTIO_NET_F_MTU 3 /* Initial MTU advice */
#define VIRTIO_NET_F_MAC 5 /* Host has given MAC address */
#define VIRTIO_NET_F_GUEST_TSO4 7 /* Guest can handle TSOv4 in */
#define VIRTIO_NET_F_GUEST_TSO6 8 /* Guest can handle TSOv6 in */
#define VIRTIO_NET_F_GUEST_ECN 9 /* Guest can handle TSO[6] w/ ECN in */
#define VIRTIO_NET_F_GUEST_UFO 10 /* Guest can handle UFO in */
#define VIRTIO_NET_F_HOST_TSO4 11 /* Host can handle TSOv4 in */
#define VIRTIO_NET_F_HOST_TSO6 12 /* Host can handle TSOv6 in */
#define VIRTIO_NET_F_HOST_ECN 13 /* Host can handle TSO[6] w/ ECN in */
#define VIRTIO_NET_F_HOST_UFO 14 /* Host can handle UFO in */
#define VIRTIO_NET_F_MRG_RXBUF 15 /* Host can merge receive buffers. */
#define VIRTIO_NET_F_STATUS 16 /* virtio_net_config.status available */
#define VIRTIO_NET_F_CTRL_VQ 17 /* Control channel available */
#define VIRTIO_NET_F_CTRL_RX 18 /* Control channel RX mode support */
#define VIRTIO_NET_F_CTRL_VLAN 19 /* Control channel VLAN filtering */
#define VIRTIO_NET_F_CTRL_RX_EXTRA 20 /* Extra RX mode control support */
#define VIRTIO_NET_F_GUEST_ANNOUNCE 21 /* Guest can announce device on the network */
#define VIRTIO_NET_F_MQ 22 /* Device supports Receive Flow Steering */
#define VIRTIO_NET_F_CTRL_MAC_ADDR 23 /* Set MAC address */
#define VIRTIO_NET_F_HASH_REPORT 57 /* Supports hash report */
#define VIRTIO_NET_F_RSS 60 /* Supports RSS RX steering */
#define VIRTIO_NET_F_RSC_EXT 61 /* Extended coalescing info */
#define VIRTIO_NET_F_STANDBY 62 /* Act as standby for another device with the same MAC */
#define VIRTIO_NET_F_SPEED_DUPLEX 63 /* Device set linkspeed and duplex */
#define VIRTIO_NET_S_LINK_UP (1 << 0)
#define VIRTIO_NET_S_ANNOUNCE (1 << 1)
#define VIRTIO_NET_HDR_F_NEEDS_CSUM 1
#define VIRTIO_NET_HDR_F_DATA_VALID 2
#define VIRTIO_NET_HDR_F_RSC_INFO 4
#define VIRTIO_NET_HDR_GSO_NONE 0
#define VIRTIO_NET_HDR_GSO_TCPV4 1
#define VIRTIO_NET_HDR_GSO_UDP 3
#define VIRTIO_NET_HDR_GSO_TCPV6 4
#define VIRTIO_NET_HDR_GSO_ECN 0x80
struct virtio_net_hdr
{
rt_uint8_t flags;
rt_uint8_t gso_type;
rt_uint16_t hdr_len;
rt_uint16_t gso_size;
rt_uint16_t csum_start;
rt_uint16_t csum_offset;
rt_uint16_t num_buffers;
} __attribute__ ((packed));
#define VIRTIO_NET_MSS 1514
#define VIRTIO_NET_HDR_SIZE (sizeof(struct virtio_net_hdr))
#define VIRTIO_NET_PAYLOAD_MAX_SIZE (VIRTIO_NET_HDR_SIZE + VIRTIO_NET_MSS)
struct virtio_net_config
{
rt_uint8_t mac[6];
rt_uint16_t status;
rt_uint16_t max_virtqueue_pairs;
rt_uint16_t mtu;
rt_uint32_t speed;
rt_uint8_t duplex;
rt_uint8_t rss_max_key_size;
rt_uint16_t rss_max_indirection_table_length;
rt_uint32_t supported_hash_types;
} __attribute__((packed));
struct virtio_net_device
{
struct eth_device parent;
struct virtio_device virtio_dev;
struct virtio_net_config *config;
struct
{
/* Transmit hdr */
struct virtio_net_hdr hdr;
/* Transmit buffer */
rt_uint8_t tx_buffer[VIRTIO_NET_PAYLOAD_MAX_SIZE];
/* Receive buffer */
rt_uint8_t rx_buffer[VIRTIO_NET_PAYLOAD_MAX_SIZE];
} info[VIRTIO_NET_RTX_QUEUE_SIZE];
};
rt_err_t rt_virtio_net_init(rt_ubase_t *mmio_base, rt_uint32_t irq);
#endif /* RT_USING_VIRTIO_NET */
#endif /* __VIRTIO_NET_H__ */

View file

@ -0,0 +1,97 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-11-11 GuEe-GUI the first version
*/
#ifndef __VIRTIO_QUEUE_H__
#define __VIRTIO_QUEUE_H__
#include <rtdef.h>
#define VIRTQ_DESC_F_NEXT 1 /* This marks a buffer as continuing via the next field. */
#define VIRTQ_DESC_F_WRITE 2 /* This marks a buffer as write-only (otherwise read-only). */
#define VIRTQ_DESC_F_INDIRECT 4 /* This means the buffer contains a list of buffer descriptors. */
/*
* The device uses this in used->flags to advise the driver: don't kick me
* when you add a buffer. It's unreliable, so it's simply an optimization.
*/
#define VIRTQ_USED_F_NO_NOTIFY 1
/*
* The driver uses this in avail->flags to advise the device: don't
* interrupt me when you consume a buffer. It's unreliable, so it's
* simply an optimization.
*/
#define VIRTQ_AVAIL_F_NO_INTERRUPT 1
/* Virtqueue descriptors: 16 bytes. These can chain together via "next". */
struct virtq_desc
{
rt_uint64_t addr; /* Address (guest-physical). */
rt_uint32_t len; /* Length. */
rt_uint16_t flags; /* The flags as indicated above. */
rt_uint16_t next; /* We chain unused descriptors via this, too */
};
struct virtq_avail
{
rt_uint16_t flags; /* Notifications */
rt_uint16_t idx; /* Where the driver would put the next descriptor entry in the ring (modulo the queue size) */
rt_uint16_t ring[];
/*
* Only if VIRTIO_F_RING_EVENT_IDX
* rt_uint16_t used_event;
*/
};
struct virtq_used_elem
{
rt_uint32_t id; /* Index of start of used descriptor chain. */
rt_uint32_t len; /* Total length of the descriptor chain which was written to. */
};
struct virtq_used
{
rt_uint16_t flags;
rt_uint16_t idx;
struct virtq_used_elem ring[];
/*
* Only if VIRTIO_F_RING_EVENT_IDX
* rt_uint16_t avail_event;
*/
};
struct virtq
{
rt_uint32_t num;
struct virtq_desc *desc;
struct virtq_avail *avail;
struct virtq_used *used;
/* Helper of driver */
rt_uint16_t used_idx;
rt_bool_t *free;
rt_size_t free_count;
};
#define VIRTQ_DESC_TOTAL_SIZE(ring_size) (sizeof(struct virtq_desc) * (ring_size))
/* flags, idx, used_event + ring * ring_size */
#define VIRTQ_AVAIL_TOTAL_SIZE(ring_size) (sizeof(rt_uint16_t) * 3 + sizeof(rt_uint16_t) * (ring_size))
/* flags, idx, avail_event + ring * ring_size */
#define VIRTQ_USED_TOTAL_SIZE(ring_size) (sizeof(rt_uint16_t) * 3 + sizeof(struct virtq_used_elem) * (ring_size))
#define VIRTQ_AVAIL_RES_SIZE (sizeof(rt_uint16_t)) /* used_event */
#define VIRTQ_USED_RES_SIZE (sizeof(rt_uint16_t)) /* avail_event */
#define VIRTQ_INVALID_DESC_ID RT_UINT16_MAX
#endif /* __VIRTIO_QUEUE_H__ */