import RT-Thread@9217865c without bsp, libcpu and components/net

This commit is contained in:
Zihao Yu 2023-05-20 16:23:33 +08:00
commit e2376a3709
1414 changed files with 390370 additions and 0 deletions

View file

@ -0,0 +1,121 @@
#include "mm_aspace.h"
#include <rtthread.h>
#include <stdint.h>
#include <string.h>
#include <lwp_elf.h>
#ifdef ARCH_MM_MMU
#include <mmu.h>
#include <page.h>
#endif
typedef struct
{
Elf32_Word st_name;
Elf32_Addr st_value;
Elf32_Word st_size;
unsigned char st_info;
unsigned char st_other;
Elf32_Half st_shndx;
} Elf32_sym;
#ifdef ARCH_MM_MMU
void arch_elf_reloc(rt_aspace_t aspace, void *text_start, void *rel_dyn_start, size_t rel_dyn_size, void *got_start, size_t got_size, Elf32_sym *dynsym)
{
size_t rel_off;
void* addr;
if (rel_dyn_size && !dynsym)
{
return;
}
for (rel_off = 0; rel_off < rel_dyn_size; rel_off += 8)
{
uint32_t v1, v2;
/*
memcpy(&v1, rel_dyn_start + rel_off, 4);
memcpy(&v2, rel_dyn_start + rel_off + 4, 4);
*/
addr = rt_hw_mmu_v2p(aspace, (void*)((char*)rel_dyn_start + rel_off));
addr = (void*)((char*)addr - PV_OFFSET);
memcpy(&v1, addr, 4);
addr = rt_hw_mmu_v2p(aspace, (void*)((char*)rel_dyn_start + rel_off + 4));
addr = (void*)((char*)addr - PV_OFFSET);
memcpy(&v2, addr, 4);
addr = rt_hw_mmu_v2p(aspace, (void*)((char*)text_start + v1));
addr = (void*)((char*)addr - PV_OFFSET);
if ((v2 & 0xff) == R_ARM_RELATIVE)
{
// *(uint32_t*)(text_start + v1) += (uint32_t)text_start;
*(uint32_t*)addr += (uint32_t)text_start;
}
else if ((v2 & 0xff) == R_ARM_ABS32)
{
uint32_t t;
t = (v2 >> 8);
if (t) /* 0 is UDF */
{
// *(uint32_t*)(text_start + v1) = (uint32_t)(text_start + dynsym[t].st_value);
*(uint32_t*)addr = (uint32_t)((char*)text_start + dynsym[t].st_value);
}
}
}
/* modify got */
if (got_size)
{
uint32_t *got_item = (uint32_t*)got_start;
for (rel_off = 0; rel_off < got_size; rel_off += 4, got_item++)
{
//*got_item += (uint32_t)text_start;
addr = rt_hw_mmu_v2p(aspace, got_item);
addr = (void*)((char*)addr - PV_OFFSET);
*(uint32_t *)addr += (uint32_t)text_start;
}
}
}
#else
void arch_elf_reloc(void *text_start, void *rel_dyn_start, size_t rel_dyn_size, void *got_start, size_t got_size, Elf32_sym *dynsym)
{
size_t rel_off;
if (rel_dyn_size && !dynsym)
{
return;
}
for (rel_off = 0; rel_off < rel_dyn_size; rel_off += 8)
{
uint32_t v1, v2;
memcpy(&v1, (void*)((char*)rel_dyn_start + rel_off), 4);
memcpy(&v2, (void*)((char*)rel_dyn_start + rel_off + 4), 4);
if ((v2 & 0xff) == R_ARM_RELATIVE)
{
*(uint32_t*)((char*)text_start + v1) += (uint32_t)text_start;
}
else if ((v2 & 0xff) == R_ARM_ABS32)
{
uint32_t t;
t = (v2 >> 8);
if (t) /* 0 is UDF */
{
*(uint32_t*)((char*)text_start + v1) = (uint32_t)((char*)text_start + dynsym[t].st_value);
}
}
}
/* modify got */
if (got_size)
{
uint32_t *got_item = (uint32_t*)got_start;
for (rel_off = 0; rel_off < got_size; rel_off += 4, got_item++)
{
*got_item += (uint32_t)text_start;
}
}
}
#endif

View file

@ -0,0 +1,161 @@
/*
* Copyright (c) 2006-2018, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2019-10-28 Jesven first version
*/
#include <rthw.h>
#include <rtthread.h>
#include <stddef.h>
#ifdef ARCH_MM_MMU
#define DBG_TAG "lwp.arch"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
#include <lwp_arch.h>
#include <lwp_user_mm.h>
#define KPTE_START (KERNEL_VADDR_START >> ARCH_SECTION_SHIFT)
int arch_user_space_init(struct rt_lwp *lwp)
{
size_t *mmu_table;
mmu_table = (size_t *)rt_pages_alloc(2);
if (!mmu_table)
{
return -RT_ENOMEM;
}
lwp->end_heap = USER_HEAP_VADDR;
rt_memcpy(mmu_table + KPTE_START, (size_t *)rt_kernel_space.page_table + KPTE_START, ARCH_PAGE_SIZE);
rt_memset(mmu_table, 0, 3 * ARCH_PAGE_SIZE);
rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, mmu_table, 4 * ARCH_PAGE_SIZE);
lwp->aspace = rt_aspace_create((void *)USER_VADDR_START, USER_VADDR_TOP - USER_VADDR_START, mmu_table);
if (!lwp->aspace)
{
return -RT_ERROR;
}
return 0;
}
static struct rt_varea kuser_varea;
void arch_kuser_init(rt_aspace_t aspace, void *vectors)
{
const size_t kuser_size = 0x1000;
int err;
extern char __kuser_helper_start[], __kuser_helper_end[];
int kuser_sz = __kuser_helper_end - __kuser_helper_start;
err = rt_aspace_map_static(aspace, &kuser_varea, &vectors, kuser_size,
MMU_MAP_U_RO, MMF_MAP_FIXED | MMF_PREFETCH,
&rt_mm_dummy_mapper, 0);
if (err != 0)
while (1)
; // early failed
rt_memcpy((void *)((char *)vectors + 0x1000 - kuser_sz), __kuser_helper_start, kuser_sz);
/*
* vectors + 0xfe0 = __kuser_get_tls
* vectors + 0xfe8 = hardware TLS instruction at 0xffff0fe8
*/
rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, (void *)((char *)vectors + 0x1000 - kuser_sz), kuser_sz);
rt_hw_cpu_icache_ops(RT_HW_CACHE_INVALIDATE, (void *)((char *)vectors + 0x1000 - kuser_sz), kuser_sz);
}
void arch_user_space_free(struct rt_lwp *lwp)
{
if (lwp)
{
RT_ASSERT(lwp->aspace);
void *pgtbl = lwp->aspace->page_table;
rt_aspace_delete(lwp->aspace);
/* must be freed after aspace delete, pgtbl is required for unmap */
rt_pages_free(pgtbl, 2);
lwp->aspace = RT_NULL;
}
else
{
LOG_W("%s: NULL lwp as parameter", __func__);
RT_ASSERT(0);
}
}
int arch_expand_user_stack(void *addr)
{
int ret = 0;
size_t stack_addr = (size_t)addr;
stack_addr &= ~ARCH_PAGE_MASK;
if ((stack_addr >= (size_t)USER_STACK_VSTART) && (stack_addr < (size_t)USER_STACK_VEND))
{
void *map = lwp_map_user(lwp_self(), (void *)stack_addr, ARCH_PAGE_SIZE, 0);
if (map || lwp_user_accessable(addr, 1))
{
ret = 1;
}
}
return ret;
}
#ifdef LWP_ENABLE_ASID
#define MAX_ASID_BITS 8
#define MAX_ASID (1 << MAX_ASID_BITS)
static uint64_t global_generation = 1;
static char asid_valid_bitmap[MAX_ASID];
unsigned int arch_get_asid(struct rt_lwp *lwp)
{
if (lwp == RT_NULL)
{
// kernel
return 0;
}
if (lwp->generation == global_generation)
{
return lwp->asid;
}
if (lwp->asid && !asid_valid_bitmap[lwp->asid])
{
asid_valid_bitmap[lwp->asid] = 1;
return lwp->asid;
}
for (unsigned i = 1; i < MAX_ASID; i++)
{
if (asid_valid_bitmap[i] == 0)
{
asid_valid_bitmap[i] = 1;
lwp->generation = global_generation;
lwp->asid = i;
return lwp->asid;
}
}
global_generation++;
memset(asid_valid_bitmap, 0, MAX_ASID * sizeof(char));
asid_valid_bitmap[1] = 1;
lwp->generation = global_generation;
lwp->asid = 1;
asm volatile ("mcr p15, 0, r0, c8, c7, 0\ndsb\nisb" ::: "memory");
return lwp->asid;
}
#endif
#endif

View file

@ -0,0 +1,49 @@
/*
* Copyright (c) 2006-2020, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
*/
#ifndef LWP_ARCH_H__
#define LWP_ARCH_H__
#include <lwp.h>
#include <lwp_arch_comm.h>
#ifdef ARCH_MM_MMU
#define USER_VADDR_TOP 0xC0000000UL
#define USER_HEAP_VEND 0xB0000000UL
#define USER_HEAP_VADDR 0x80000000UL
#define USER_STACK_VSTART 0x70000000UL
#define USER_STACK_VEND USER_HEAP_VADDR
#define LDSO_LOAD_VADDR 0x60000000UL
#define USER_VADDR_START 0x00100000UL
#define USER_LOAD_VADDR USER_VADDR_START
#ifdef __cplusplus
extern "C" {
#endif
rt_inline unsigned long rt_hw_ffz(unsigned long x)
{
return __builtin_ffsl(~x) - 1;
}
rt_inline void icache_invalid_all(void)
{
asm volatile ("mcr p15, 0, r0, c7, c5, 0\ndsb\nisb":::"memory");//iciallu
}
unsigned int arch_get_asid(struct rt_lwp *lwp);
#ifdef __cplusplus
}
#endif
#endif
#endif /*LWP_ARCH_H__*/

View file

@ -0,0 +1,435 @@
/*
* Copyright (c) 2006-2020, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2018-12-10 Jesven first version
*/
#include "rtconfig.h"
#include "asm-generic.h"
#define Mode_USR 0x10
#define Mode_FIQ 0x11
#define Mode_IRQ 0x12
#define Mode_SVC 0x13
#define Mode_MON 0x16
#define Mode_ABT 0x17
#define Mode_UDF 0x1B
#define Mode_SYS 0x1F
#define A_Bit 0x100
#define I_Bit 0x80 @; when I bit is set, IRQ is disabled
#define F_Bit 0x40 @; when F bit is set, FIQ is disabled
#define T_Bit 0x20
.cpu cortex-a9
.syntax unified
.text
/*
* void arch_start_umode(args, text, ustack, kstack);
*/
.global arch_start_umode
.type arch_start_umode, % function
arch_start_umode:
mrs r9, cpsr
bic r9, #0x1f
orr r9, #Mode_USR
cpsid i
msr spsr, r9
mov sp, r3
mov r3, r2 ;/* user stack top */
/* set data address. */
movs pc, r1
/*
* void arch_crt_start_umode(args, text, ustack, kstack);
*/
.global arch_crt_start_umode
.type arch_crt_start_umode, % function
arch_crt_start_umode:
cps #Mode_SYS
sub sp, r2, #16
ldr r2, =lwp_thread_return
ldr r4, [r2]
str r4, [sp]
ldr r4, [r2, #4]
str r4, [sp, #4]
ldr r4, [r2, #8]
str r4, [sp, #8]
mov r4, sp
mcr p15, 0, r4, c7, c11, 1 ;//dc cmvau
add r4, #4
mcr p15, 0, r4, c7, c11, 1 ;//dc cmvau
add r4, #4
mcr p15, 0, r4, c7, c11, 1 ;//dc cmvau
dsb
isb
mcr p15, 0, r4, c7, c5, 0 ;//iciallu
dsb
isb
mov lr, sp
cps #Mode_SVC
mrs r9, cpsr
bic r9, #0x1f
orr r9, #Mode_USR
cpsid i
msr spsr, r9
mov sp, r3
/* set data address. */
movs pc, r1
/*
void arch_set_thread_context(void *exit_addr, void *new_thread_stack, void *user_stack, void **thread_sp);
*/
.global arch_set_thread_context
arch_set_thread_context:
sub r1, #(10 * 4 + 4 * 4) /* {r4 - r12, lr} , {r4, r5, spsr, u_pc} */
stmfd r1!, {r0}
mov r12, #0
stmfd r1!, {r12}
stmfd r1!, {r1 - r12}
stmfd r1!, {r12} /* new thread return value */
mrs r12, cpsr
orr r12, #(1 << 7) /* disable irq */
stmfd r1!, {r12} /* spsr */
mov r12, #0
stmfd r1!, {r12} /* now user lr is 0 */
stmfd r1!, {r2} /* user sp */
#ifdef RT_USING_FPU
stmfd r1!, {r12} /* not use fpu */
#endif
str r1, [r3]
mov pc, lr
.global arch_get_user_sp
arch_get_user_sp:
cps #Mode_SYS
mov r0, sp
cps #Mode_SVC
mov pc, lr
.global sys_fork
.global sys_vfork
.global arch_fork_exit
sys_fork:
sys_vfork:
push {r4 - r12, lr}
bl _sys_fork
arch_fork_exit:
pop {r4 - r12, lr}
b arch_syscall_exit
.global sys_clone
.global arch_clone_exit
sys_clone:
push {r4 - r12, lr}
bl _sys_clone
arch_clone_exit:
pop {r4 - r12, lr}
b arch_syscall_exit
/*
void lwp_exec_user(void *args, void *kernel_stack, void *user_entry)
*/
.global lwp_exec_user
lwp_exec_user:
cpsid i
mov sp, r1
mov lr, r2
mov r2, #Mode_USR
msr spsr_cxsf, r2
ldr r3, =0x80000000
b arch_ret_to_user
/*
* void SVC_Handler(void);
*/
.global vector_swi
.type vector_swi, % function
START_POINT(vector_swi)
push {lr}
mrs lr, spsr
push {r4, r5, lr}
cpsie i
push {r0 - r3, r12}
bl rt_thread_self
bl lwp_user_setting_save
and r0, r7, #0xf000
cmp r0, #0xe000
beq arch_signal_quit
cmp r0, #0xf000
beq ret_from_user
and r0, r7, #0xff
bl lwp_get_sys_api
cmp r0, #0 /* r0 = api */
mov lr, r0
pop {r0 - r3, r12}
beq arch_syscall_exit
blx lr
START_POINT_END(vector_swi)
.global arch_syscall_exit
arch_syscall_exit:
cpsid i
pop {r4, r5, lr}
msr spsr_cxsf, lr
pop {lr}
.global arch_ret_to_user
arch_ret_to_user:
push {r0-r3, r12, lr}
bl lwp_check_debug
bl lwp_check_exit_request
cmp r0, #0
beq 1f
mov r0, #0
b sys_exit
1:
bl lwp_signal_check
cmp r0, #0
pop {r0-r3, r12, lr}
bne user_do_signal
push {r0}
ldr r0, =rt_dbg_ops
ldr r0, [r0]
cmp r0, #0
pop {r0}
beq 2f
push {r0-r3, r12, lr}
mov r0, lr
bl dbg_attach_req
pop {r0-r3, r12, lr}
2:
movs pc, lr
#ifdef RT_USING_SMART
.global lwp_check_debug
lwp_check_debug:
ldr r0, =rt_dbg_ops
ldr r0, [r0]
cmp r0, #0
bne 1f
bx lr
1:
push {lr}
bl dbg_check_suspend
cmp r0, #0
beq lwp_check_debug_quit
cps #Mode_SYS
sub sp, #8
ldr r0, =lwp_debugreturn
ldr r1, [r0]
str r1, [sp]
ldr r1, [r0, #4]
str r1, [sp, #4]
mov r1, sp
mcr p15, 0, r1, c7, c11, 1 ;//dc cmvau
add r1, #4
mcr p15, 0, r1, c7, c11, 1 ;//dc cmvau
dsb
isb
mcr p15, 0, r0, c7, c5, 0 ;//iciallu
dsb
isb
mov r0, sp /* lwp_debugreturn */
cps #Mode_SVC
mrs r1, spsr
push {r1}
mov r1, #Mode_USR
msr spsr_cxsf, r1
movs pc, r0
ret_from_user:
cps #Mode_SYS
add sp, #8
cps #Mode_SVC
/*
pop {r0 - r3, r12}
pop {r4 - r6, lr}
*/
add sp, #(4*9)
pop {r4}
msr spsr_cxsf, r4
lwp_check_debug_quit:
pop {pc}
arch_signal_quit:
cpsid i
pop {r0 - r3, r12}
pop {r4, r5, lr}
pop {lr}
bl lwp_signal_restore
/* r0 is user_ctx : ori sp, pc, cpsr*/
ldr r1, [r0]
ldr r2, [r0, #4]
ldr r3, [r0, #8]
msr spsr_cxsf, r3
mov lr, r2
cps #Mode_SYS
mov sp, r1
pop {r0-r12, lr}
cps #Mode_SVC
b arch_ret_to_user
user_do_signal:
mov r0, r0
cps #Mode_SYS
push {r0-r12, lr}
sub sp, #8
ldr r0, =lwp_sigreturn
ldr r1, [r0]
str r1, [sp]
ldr r1, [r0, #4]
str r1, [sp, #4]
mov r1, sp
mcr p15, 0, r1, c7, c11, 1 ;//dc cmvau
add r1, #4
mcr p15, 0, r1, c7, c11, 1 ;//dc cmvau
dsb
isb
mcr p15, 0, r0, c7, c5, 0 ;//iciallu
dsb
isb
mov r5, sp ;//if func is 0
mov lr, sp
add r0, sp, #8 /* lwp_sigreturn */
cps #Mode_SVC
mov r1, lr
mrs r2, spsr
bl lwp_signal_backup
/* r0 is signal */
mov r4, r0
bl lwp_sighandler_get
mov lr, r0
cmp lr, #0
moveq lr, r5
mov r0, r4
movs pc, lr
lwp_debugreturn:
mov r7, #0xf000
svc #0
lwp_sigreturn:
mov r7, #0xe000
svc #0
lwp_thread_return:
mov r0, #0
mov r7, #0x01
svc #0
#endif
.global check_vfp
check_vfp:
#ifdef RT_USING_FPU
vmrs r0, fpexc
ubfx r0, r0, #30, #1
#else
mov r0, #0
#endif
mov pc, lr
.global get_vfp
get_vfp:
#ifdef RT_USING_FPU
vstmia r0!, {d0-d15}
vstmia r0!, {d16-d31}
vmrs r1, fpscr
str r1, [r0]
#endif
mov pc, lr
.globl arch_get_tidr
arch_get_tidr:
mrc p15, 0, r0, c13, c0, 3
bx lr
.global arch_set_thread_area
arch_set_thread_area:
.globl arch_set_tidr
arch_set_tidr:
mcr p15, 0, r0, c13, c0, 3
bx lr
/* kuser suppurt */
.macro kuser_pad, sym, size
.if (. - \sym) & 3
.rept 4 - (. - \sym) & 3
.byte 0
.endr
.endif
.rept (\size - (. - \sym)) / 4
.word 0xe7fddef1
.endr
.endm
.align 5
.globl __kuser_helper_start
__kuser_helper_start:
__kuser_cmpxchg64: @ 0xffff0f60
stmfd sp!, {r4, r5, r6, lr}
ldmia r0, {r4, r5} @ load old val
ldmia r1, {r6, lr} @ load new val
1: ldmia r2, {r0, r1} @ load current val
eors r3, r0, r4 @ compare with oldval (1)
eorseq r3, r1, r5 @ compare with oldval (2)
2: stmiaeq r2, {r6, lr} @ store newval if eq
rsbs r0, r3, #0 @ set return val and C flag
ldmfd sp!, {r4, r5, r6, pc}
kuser_pad __kuser_cmpxchg64, 64
__kuser_memory_barrier: @ 0xffff0fa0
dmb
mov pc, lr
kuser_pad __kuser_memory_barrier, 32
__kuser_cmpxchg: @ 0xffff0fc0
1: ldr r3, [r2] @ load current val
subs r3, r3, r0 @ compare with oldval
2: streq r1, [r2] @ store newval if eq
rsbs r0, r3, #0 @ set return val and C flag
mov pc, lr
kuser_pad __kuser_cmpxchg, 32
__kuser_get_tls: @ 0xffff0fe0
mrc p15, 0, r0, c13, c0, 3 @ 0xffff0fe8 hardware TLS code
mov pc, lr
ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init
kuser_pad __kuser_get_tls, 16
.rep 3
.word 0 @ 0xffff0ff0 software TLS value, then
.endr @ pad up to __kuser_helper_version
__kuser_helper_version: @ 0xffff0ffc
.word ((__kuser_helper_end - __kuser_helper_start) >> 5)
.globl __kuser_helper_end
__kuser_helper_end: