import RT-Thread@9217865c without bsp, libcpu and components/net

This commit is contained in:
Zihao Yu 2023-05-20 16:23:33 +08:00
commit e2376a3709
1414 changed files with 390370 additions and 0 deletions

15
components/vmm/SConscript Normal file
View file

@ -0,0 +1,15 @@
# RT-Thread building script for component
from building import *
cwd = GetCurrentDir()
src = Glob('*.c')
src += Glob('utilities/rshell.c')
if GetDepend('RT_USING_VMM_RFS'):
src += Glob('utilities/rfs.c')
CPPPATH = [cwd, os.path.join(cwd, 'share_hdr')]
group = DefineGroup('VMM', src, depend = ['RT_USING_VMM'], CPPPATH = CPPPATH)
Return('group')

View file

@ -0,0 +1,37 @@
From 848bdea67f5fc201cd05687f207e5f8f42b0990d Mon Sep 17 00:00:00 2001
From: Grissiom <chaos.proton@gmail.com>
Date: Thu, 3 Apr 2014 16:51:58 +0800
Subject: [PATCH 2/2] arm: gic: correct the cpu map on gic_raise_softirq for UP
system
The CPU mask on UP system is empty, so if we want to raise softirq on UP
system, designate CPU0 to the map.
Maybe the more correct way is to fix the gic_get_cpumask.
Signed-off-by: Grissiom <chaos.proton@gmail.com>
---
arch/arm/common/gic.c | 7 +++++++
1 file changed, 7 insertions(+)
diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c
index a9d7357..5da382b 100644
--- a/arch/arm/common/gic.c
+++ b/arch/arm/common/gic.c
@@ -858,6 +858,13 @@ void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
*/
dsb();
+ /*
+ * On UP system, realview-pb-a8 for example, the CPU mask is empty. The
+ * softirq are always handled on CPU0.
+ */
+ if (map == 0) {
+ map = 1;
+ }
/* this always happens on GIC0 */
writel_relaxed(map << 16 | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
}
--
1.8.4

172
components/vmm/vmm.c Normal file
View file

@ -0,0 +1,172 @@
/*
* VMM startup file.
*
* COPYRIGHT (C) 2011-2021, Real-Thread Information Technology Ltd
* All rights reserved
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2013-06-15 Bernard the first verion
*/
#include <rthw.h>
#include <rtthread.h>
#include "board.h"
#include "vmm.h"
#include "vmm_context.h"
extern void rt_hw_interrupt_init(void);
extern void rt_application_init(void);
void vmm_entry(struct vmm_entry_param* param) rt_section(".vmm_init");
struct rt_thread vmm_thread rt_section(".bss.share.vmm");
extern rt_uint8_t vmm_stack_start;
extern rt_uint8_t vmm_stack_end;
void vmm_thread_init(struct rt_thread *thread, const char *name)
{
extern struct rt_thread *rt_current_thread;
rt_thread_init(thread, name, RT_NULL, RT_NULL,
&vmm_stack_start, &vmm_stack_end - &vmm_stack_start,
RT_THREAD_PRIORITY_MAX - 1, 10);
/* set thread to ready status but not switch to */
rt_thread_startup(thread);
/* set current thread as vmm thread */
rt_current_thread = thread;
}
#ifdef VMM_VERIFY_GUEST
static void _verify_guest(void *p)
{
while (1)
{
rt_thread_delay(RT_TICK_PER_SECOND/4);
vmm_verify_guest_status(vmm_thread.sp);
}
}
static void vmm_create_monitor(void)
{
rt_thread_t tid;
tid = rt_thread_create("vmon",
_verify_guest, RT_NULL,
1024, 8, 20);
if (tid)
rt_thread_startup(tid);
}
#endif
#ifdef RT_VMM_USING_DOMAIN
extern unsigned long guest_domain_val;
extern unsigned long vmm_domain_val;
#endif
static void vmm_entry_glue(rt_uint32_t level,
unsigned int vmm_domain,
unsigned int kernel_domain)
/* inline would make the section setting meaningless */
__attribute__((noinline))
rt_section(".vmm_glue");
static void vmm_entry_glue(rt_uint32_t level,
unsigned int vmm_domain,
unsigned int kernel_domain)
{
rt_schedule();
#ifdef RT_VMM_USING_DOMAIN
/* protect us from the guest code, but leave the shared region permission
*/
guest_domain_val &= ~(0x3 << (vmm_domain * 2));
/* don't touch the guest kernel space */
vmm_domain_val &= ~(0x3 << (kernel_domain * 2));
#endif
rt_hw_interrupt_enable(level);
}
void vmm_entry(struct vmm_entry_param *param)
{
rt_base_t level;
level = rt_hw_interrupt_disable();
/* set iomap */
vmm_iomap_init(param->iomap);
/* set VMM context address */
vmm_context_init(&RT_VMM_SHARE->ctx);
/* init hardware interrupt */
rt_hw_interrupt_init();
vmm_vector_init();
/* init board */
rt_hw_board_init();
/* show version */
rt_show_version();
rt_kprintf("share ctx: %p(%x)\n",
&RT_VMM_SHARE->ctx, sizeof(RT_VMM_SHARE->ctx));
/* init timer system */
rt_system_timer_init();
{
rt_uint32_t ttbr;
asm volatile ("mrc p15, 0, %0, c2, c0, 0\n"
: "=r"(ttbr));
rt_kprintf("Linux TTBR: 0x%08x\n", ttbr);
/*
*rt_hw_cpu_dump_page_table((void*)((ttbr & (0xffffc000))
* - 0x80000000 + 0xC0000000));
*/
/*rt_hw_cpu_dump_page_table((void*)(0xc0004000));*/
}
#ifdef RT_VMM_USING_DOMAIN
vmm_context_init_domain(param->domain);
#endif
rt_kprintf("heap: 0x%p - 0x%p, %dKi bytes\n",
(void*)HEAP_BEGIN, (void*)HEAP_END,
((int)HEAP_END - (int)HEAP_BEGIN) / 1024);
/* init heap memory system */
rt_system_heap_init((void*)HEAP_BEGIN, (void*)HEAP_END);
/* init scheduler system */
rt_system_scheduler_init();
rt_kprintf("user application init.\n");
/* init application */
rt_application_init();
#ifdef VMM_VERIFY_GUEST
vmm_create_monitor();
#endif
rt_system_timer_thread_init();
vmm_thread_init(&vmm_thread, "vmm");
#ifdef RT_VMM_USING_DOMAIN
rt_kprintf("domain protect present\n");
#endif
/* start scheduler */
rt_kprintf("do the first scheduling...\n");
vmm_entry_glue(level,
param->domain->vmm,
param->domain->kernel);
}

44
components/vmm/vmm.h Normal file
View file

@ -0,0 +1,44 @@
/*
* VMM startup file.
*
* COPYRIGHT (C) 2011-2021, Real-Thread Information Technology Ltd
* All rights reserved
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2013-06-15 Bernard the first verion
*/
#ifndef __VMM_H__
#define __VMM_H__
#ifndef __ASSEMBLY__
#include <stddef.h> // for size_t
#endif
#define VMM_VERIFY_GUEST
#include <rtt_api.h>
#ifndef __ASSEMBLY__
void vmm_iomap_init(struct vmm_iomap *iomap);
unsigned long vmm_find_iomap(const char *name);
unsigned long vmm_find_iomap_by_pa(unsigned long pa);
void vmm_vector_init(void);
/* If the rshell is run, we could not rt_kprintf in some situation because
* write to a vbus channel *Would BLOCK*. So we cannot use it in interrupt
* context, we cannot use it within the context of idle(vmm). */
#define vmm_debug(fmt, ...)
#define vmm_verbose(fmt, ...)
#define vmm_info(fmt, ...)
#endif
#define ARRAY_SIZE(ar) (sizeof(ar)/sizeof(ar[0]))
#endif

View file

@ -0,0 +1,317 @@
/*
* COPYRIGHT (C) 2011-2021, Real-Thread Information Technology Ltd
* All rights reserved
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2013-11-04 Grissiom add comment
*/
#include <rthw.h>
#include <rtthread.h>
#include <interrupt.h>
#include <log_trace.h>
#include <vmm.h>
#include "vmm_context.h"
struct rt_vmm_share_layout rt_vmm_share rt_section(".vmm.share");
volatile struct vmm_context *_vmm_context = RT_NULL;
void vmm_context_init(void *context_addr)
{
_vmm_context = (struct vmm_context *)context_addr;
rt_memset((void *)_vmm_context, 0x00, sizeof(struct vmm_context));
/* When loading RT-Thread, the IRQ on the guest should be disabled. */
_vmm_context->virq_status = 1;
}
#ifdef RT_VMM_USING_DOMAIN
unsigned long guest_domain_val rt_section(".bss.share");
unsigned long vmm_domain_val rt_section(".bss.share");
/* some RT-Thread code need to be called in the guest
* context(rt_thread_idle_excute for example). To simplify the code, we need a
* "super" domain mode to have access of both side. The code executed in super
* domain mode is restricted and should be harmless. */
unsigned long super_domain_val rt_section(".bss.share");
void vmm_context_init_domain(struct vmm_domain *domain)
{
asm volatile ("mrc p15, 0, %0, c3, c0\n" : "=r" (guest_domain_val));
rt_kprintf("Linux domain: kernel: %d, user: %d, io: %d\n"
"VMM domain: vmm: %d, share: %d\n",
domain->kernel, domain->user, domain->io,
domain->vmm, domain->vmm_share);
if (domain->kernel == domain->vmm ||
domain->io == domain->vmm)
{
rt_kprintf("VMM and the guest share the same domain\n");
super_domain_val = vmm_domain_val = guest_domain_val;
return;
}
vmm_domain_val = guest_domain_val;
/* become client to our own territory */
vmm_domain_val |= (1 << (domain->vmm * 2)) | (1 << (domain->vmm_share * 2));
super_domain_val = vmm_domain_val;
/* super domain has access to both side */
super_domain_val |= (1 << (domain->kernel * 2)) | (1 << (domain->user * 2));
rt_kprintf("Original DAC: 0x%08x\n", guest_domain_val);
}
unsigned long vmm_context_enter_domain(unsigned long domain_val)
{
unsigned long old_domain;
asm volatile ("mrc p15, 0, %0, c3, c0\n" : "=r" (old_domain));
asm volatile ("mcr p15, 0, %0, c3, c0\n" : :"r" (domain_val) : "memory");
return old_domain;
}
void vmm_context_restore_domain(unsigned long domain_val)
{
asm volatile ("mcr p15, 0, %0, c3, c0\n" : :"r" (domain_val) : "memory");
}
#endif
void vmm_virq_pending(int irq)
{
/* when running this piece of code, the guest is already suspended. So it's
* safe to set the bits without locks. */
_vmm_context->virq_pending[irq / 32] |= (1 << (irq % 32));
_vmm_context->virq_pended = 1;
/* mask this IRQ in host */
rt_hw_interrupt_mask(irq);
}
void vmm_virq_update(void)
{
if ((!_vmm_context->virq_status) &&
( _vmm_context->virq_pended))
{
rt_hw_interrupt_trigger(RT_VMM_VIRQ_TRIGGER);
}
}
/** check the guest IRQ status
*
* @return 0 on guest should handle IRQ, -1 on should restore the guest context
* normally.
*/
int vmm_virq_check(void)
{
if ((!_vmm_context->virq_status) &&
( _vmm_context->virq_pended))
{
return 0;
}
return -1;
}
/* 10 = len("%08x, ") */
static char _vmbuf[10*ARRAY_SIZE(_vmm_context->virq_pending)];
void vmm_dump_virq(void)
{
int i, s;
vmm_info("---- virtual IRQ ----\n");
vmm_info(" status: %08x, pended: %08x, pending:\n",
_vmm_context->virq_status, _vmm_context->virq_pended);
for (s = 0, i = 0; i < ARRAY_SIZE(_vmm_context->virq_pending); i++)
{
s += rt_snprintf(_vmbuf+s, sizeof(_vmbuf)-s,
"%08x, ", _vmm_context->virq_pending[i]);
}
vmm_info("%.*s\n", sizeof(_vmbuf), _vmbuf);
vmm_info("---- virtual IRQ ----\n");
}
int vmm_virq_coherence_ok(void)
{
int i, res;
int should_pend = 0;
for (i = 0; i < ARRAY_SIZE(_vmm_context->virq_pending); i++)
{
should_pend |= _vmm_context->virq_pending[i];
}
res = (_vmm_context->virq_pended == !!should_pend);
if (!res)
{
vmm_info("--- %x %x, %x\n",
_vmm_context->virq_pended, should_pend, !!should_pend);
}
return res;
}
extern struct rt_thread vmm_thread;
void vmm_show_guest_reg(void)
{
struct rt_hw_stack *sp = vmm_thread.sp;
#ifdef RT_VMM_USING_DOMAIN
unsigned long old_domain;
old_domain = vmm_context_enter_domain(super_domain_val);
#endif
vmm_info("CPSR: %08x, PC: %08x, LR: %08x, SP: %08x\n",
sp->cpsr, sp->pc, sp->lr, sp+1);
#ifdef RT_VMM_USING_DOMAIN
vmm_context_restore_domain(old_domain);
#endif
}
void vmm_dump_domain(void)
{
unsigned long dac;
asm volatile ("mrc p15, 0, %0, c3, c0\n" : "=r" (dac));
vmm_info("current DAC: %08x\n", dac);
#ifdef RT_VMM_USING_DOMAIN
vmm_info("guest DAC: %08x, RTT DAC: %08x, super DAC: %08x\n",
guest_domain_val, vmm_domain_val, super_domain_val);
#endif
}
void vmm_show_guest(void)
{
vmm_show_guest_reg();
vmm_dump_virq();
vmm_dump_domain();
}
#ifdef RT_USING_FINSH
#include <finsh.h>
FINSH_FUNCTION_EXPORT_ALIAS(vmm_show_guest, vmm, show vmm status);
#endif
static int _bad_cpsr(unsigned long cpsr)
{
int bad = 1;
switch (cpsr & MODEMASK)
{
case USERMODE:
case FIQMODE:
case IRQMODE:
case SVCMODE:
#ifdef CPU_HAS_MONITOR_MODE
case MONITORMODE:
#endif
case ABORTMODE:
#ifdef CPU_HAS_HYP_MODE
case HYPMODE:
#endif
case UNDEFMODE:
case MODEMASK:
bad = 0;
break;
};
return bad;
}
void vmm_verify_guest_status(struct rt_hw_stack *sp)
{
int dump_vmm = 0;
unsigned long cpsr;
#ifdef RT_VMM_USING_DOMAIN
unsigned long old_domain;
old_domain = vmm_context_enter_domain(super_domain_val);
#endif
cpsr = sp->cpsr;
if (_bad_cpsr(cpsr))
{
vmm_info("=================================\n");
vmm_info("VMM WARING: bad CPSR in guest\n");
dump_vmm = 1;
}
else
{
if (cpsr & A_Bit && 0)
{
vmm_info("=================================\n");
vmm_info("VMM WARING: A bit is set in guest\n");
dump_vmm = 1;
}
if ((cpsr & I_Bit) && (sp->pc <= VMM_BEGIN))
{
vmm_info("=================================\n");
vmm_info("VMM WARING: IRQ disabled in guest\n");
dump_vmm = 1;
}
if (cpsr & F_Bit)
{
vmm_info("=================================\n");
vmm_info("VMM WARING: FIQ disabled in guest\n");
dump_vmm = 1;
}
if ((cpsr & MODEMASK) == USERMODE)
{
if (_vmm_context->virq_status & 1)
{
vmm_info("=================================\n");
vmm_info("VMM WARING: VIRQ disabled in user mode\n");
dump_vmm = 1;
}
if ((sp->pc > 0xbf000000) && (sp->pc < 0xffff0000))
{
vmm_info("=================================\n");
vmm_info("VMM WARING: executing kernel code in usr mode\n");
dump_vmm = 1;
}
/* FIXME: when the guest is suspended in user mode and its
* interrupts come, this can be misleading. */
#if 0
if (_vmm_context->virq_pended)
{
vmm_info("=================================\n");
vmm_info("VMM WARING: VIRQ pended in user mode\n");
dump_vmm = 1;
}
#endif
}
else if ((cpsr & MODEMASK) == SVCMODE && sp->pc < 0xbf000000)
{
vmm_info("=================================\n");
vmm_info("VMM WARING: executing usr code in svc mode\n");
dump_vmm = 1;
}
}
#if 0
if (!vmm_virq_coherence_ok())
{
vmm_info("=================================\n");
vmm_info("VMM WARING: bad VIRQ status\n");
dump_vmm = 1;
}
#endif
if (dump_vmm)
{
vmm_show_guest();
vmm_info("=================================\n");
}
#ifdef RT_VMM_USING_DOMAIN
vmm_context_restore_domain(old_domain);
#endif
}

View file

@ -0,0 +1,28 @@
/*
* COPYRIGHT (C) 2011-2021, Real-Thread Information Technology Ltd
* All rights reserved
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2013-11-04 Grissiom add comment
*/
#ifndef __VMM_CONTEXT_H__
#define __VMM_CONTEXT_H__
#include <armv7.h> // for struct rt_hw_stack
#include "vmm.h"
void vmm_context_init(void *context_addr);
#ifdef RT_VMM_USING_DOMAIN
void vmm_context_init_domain(struct vmm_domain *domain);
#endif
void vmm_virq_pending(int irq);
void vmm_verify_guest_status(struct rt_hw_stack *sp);
void vmm_show_guest(void);
#endif

View file

@ -0,0 +1,49 @@
/*
* VMM IO map table
*
* COPYRIGHT (C) 2011-2021, Real-Thread Information Technology Ltd
* All rights reserved
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2013-06-15 Bernard the first verion
*/
#include <rtthread.h>
#include "vmm.h"
static struct vmm_iomap _vmm_iomap[RT_VMM_IOMAP_MAXNR];
void vmm_iomap_init(struct vmm_iomap *iomap)
{
rt_memcpy(_vmm_iomap, iomap, sizeof(_vmm_iomap));
}
/* find virtual address according to name */
unsigned long vmm_find_iomap(const char *name)
{
int i;
for (i = 0; i < ARRAY_SIZE(_vmm_iomap); i++)
{
if (rt_strcmp(_vmm_iomap[i].name, name) == 0)
return (unsigned long)_vmm_iomap[i].va;
}
return 0;
}
/* find virtual address according to physcal address */
unsigned long vmm_find_iomap_by_pa(unsigned long pa)
{
int i;
for (i = 0; i < ARRAY_SIZE(_vmm_iomap); i++)
{
if (_vmm_iomap[i].pa == pa)
return (unsigned long)_vmm_iomap[i].va;
}
return 0;
}

View file

@ -0,0 +1,31 @@
/*
* VMM vector handle
*
* COPYRIGHT (C) 2011-2021, Real-Thread Information Technology Ltd
* All rights reserved
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2013-06-15 Bernard the first verion
*/
#include <rthw.h>
#include <rtthread.h>
#include <interrupt.h>
#include "vmm.h"
void vmm_guest_isr(int irqno, void* parameter)
{
/* nothing, let GuestOS to handle it */
rt_hw_interrupt_clear(irqno);
}
void vmm_vector_init(void)
{
rt_hw_interrupt_install(RT_VMM_VIRQ_TRIGGER, vmm_guest_isr, RT_NULL, "virq");
rt_hw_interrupt_umask(RT_VMM_VIRQ_TRIGGER);
return;
}