import RT-Thread@9217865c without bsp, libcpu and components/net
This commit is contained in:
commit
e2376a3709
1414 changed files with 390370 additions and 0 deletions
11
components/lwp/arch/risc-v/rv64/SConscript
Normal file
11
components/lwp/arch/risc-v/rv64/SConscript
Normal file
|
@ -0,0 +1,11 @@
|
|||
# RT-Thread building script for component
|
||||
|
||||
from building import *
|
||||
|
||||
cwd = GetCurrentDir()
|
||||
src = Glob('*.c') + Glob('*.S')
|
||||
CPPPATH = [cwd]
|
||||
|
||||
group = DefineGroup('lwp-riscv', src, depend = ['RT_USING_SMART'], CPPPATH = CPPPATH)
|
||||
|
||||
Return('group')
|
261
components/lwp/arch/risc-v/rv64/lwp_arch.c
Normal file
261
components/lwp/arch/risc-v/rv64/lwp_arch.c
Normal file
|
@ -0,0 +1,261 @@
|
|||
/*
|
||||
* Copyright (c) 2006-2021, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
* 2020-11-18 Jesven first version
|
||||
* 2021-02-03 lizhirui port to riscv64
|
||||
* 2021-02-06 lizhirui add thread filter
|
||||
* 2021-02-19 lizhirui port to new version of rt-smart
|
||||
* 2021-03-02 lizhirui add a auxillary function for interrupt
|
||||
* 2021-03-04 lizhirui delete thread filter
|
||||
* 2021-03-04 lizhirui modify for new version of rt-smart
|
||||
* 2021-11-22 JasonHu add lwp_set_thread_context
|
||||
* 2021-11-30 JasonHu add clone/fork support
|
||||
*/
|
||||
#include <rthw.h>
|
||||
#include <rtthread.h>
|
||||
|
||||
#include <stddef.h>
|
||||
|
||||
#ifdef ARCH_MM_MMU
|
||||
|
||||
#define DBG_TAG "lwp.arch"
|
||||
#define DBG_LVL DBG_INFO
|
||||
#include <rtdbg.h>
|
||||
|
||||
#include <lwp.h>
|
||||
#include <lwp_arch.h>
|
||||
#include <lwp_user_mm.h>
|
||||
#include <page.h>
|
||||
|
||||
#include <cpuport.h>
|
||||
#include <encoding.h>
|
||||
#include <stack.h>
|
||||
|
||||
extern rt_ubase_t MMUTable[];
|
||||
|
||||
void *lwp_copy_return_code_to_user_stack()
|
||||
{
|
||||
void lwp_thread_return();
|
||||
void lwp_thread_return_end();
|
||||
rt_thread_t tid = rt_thread_self();
|
||||
|
||||
if (tid->user_stack != RT_NULL)
|
||||
{
|
||||
rt_size_t size = (rt_size_t)lwp_thread_return_end - (rt_size_t)lwp_thread_return;
|
||||
rt_size_t userstack = (rt_size_t)tid->user_stack + tid->user_stack_size - size;
|
||||
rt_memcpy((void *)userstack, lwp_thread_return, size);
|
||||
return (void *)userstack;
|
||||
}
|
||||
|
||||
return RT_NULL;
|
||||
}
|
||||
|
||||
rt_ubase_t lwp_fix_sp(rt_ubase_t cursp)
|
||||
{
|
||||
void lwp_thread_return();
|
||||
void lwp_thread_return_end();
|
||||
|
||||
if (cursp == 0)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
return cursp - ((rt_size_t)lwp_thread_return_end - (rt_size_t)lwp_thread_return);
|
||||
}
|
||||
|
||||
rt_thread_t rt_thread_sp_to_thread(void *spmember_addr)
|
||||
{
|
||||
return (rt_thread_t)(((rt_ubase_t)spmember_addr) - (offsetof(struct rt_thread, sp)));
|
||||
}
|
||||
|
||||
void *get_thread_kernel_stack_top(rt_thread_t thread)
|
||||
{
|
||||
return (void *)(((rt_size_t)thread->stack_addr) + ((rt_size_t)thread->stack_size));
|
||||
}
|
||||
|
||||
void *arch_get_user_sp(void)
|
||||
{
|
||||
/* user sp saved in interrupt context */
|
||||
rt_thread_t self = rt_thread_self();
|
||||
rt_uint8_t *stack_top = (rt_uint8_t *)self->stack_addr + self->stack_size;
|
||||
struct rt_hw_stack_frame *frame = (struct rt_hw_stack_frame *)(stack_top - sizeof(struct rt_hw_stack_frame));
|
||||
|
||||
return (void *)frame->user_sp_exc_stack;
|
||||
}
|
||||
|
||||
int arch_user_space_init(struct rt_lwp *lwp)
|
||||
{
|
||||
rt_ubase_t *mmu_table;
|
||||
|
||||
mmu_table = (rt_ubase_t *)rt_pages_alloc_ext(0, PAGE_ANY_AVAILABLE);
|
||||
if (!mmu_table)
|
||||
{
|
||||
return -RT_ENOMEM;
|
||||
}
|
||||
|
||||
lwp->end_heap = USER_HEAP_VADDR;
|
||||
|
||||
rt_memcpy(mmu_table, rt_kernel_space.page_table, ARCH_PAGE_SIZE);
|
||||
rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, mmu_table, ARCH_PAGE_SIZE);
|
||||
|
||||
lwp->aspace = rt_aspace_create(
|
||||
(void *)USER_VADDR_START, USER_VADDR_TOP - USER_VADDR_START, mmu_table);
|
||||
if (!lwp->aspace)
|
||||
{
|
||||
return -RT_ERROR;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void *arch_kernel_mmu_table_get(void)
|
||||
{
|
||||
return (void *)((char *)MMUTable);
|
||||
}
|
||||
|
||||
void arch_user_space_free(struct rt_lwp *lwp)
|
||||
{
|
||||
if (lwp)
|
||||
{
|
||||
RT_ASSERT(lwp->aspace);
|
||||
|
||||
void *pgtbl = lwp->aspace->page_table;
|
||||
rt_aspace_delete(lwp->aspace);
|
||||
|
||||
/* must be freed after aspace delete, pgtbl is required for unmap */
|
||||
rt_pages_free(pgtbl, 0);
|
||||
lwp->aspace = RT_NULL;
|
||||
}
|
||||
else
|
||||
{
|
||||
LOG_W("%s: NULL lwp as parameter", __func__);
|
||||
RT_ASSERT(0);
|
||||
}
|
||||
}
|
||||
|
||||
long _sys_clone(void *arg[]);
|
||||
long sys_clone(void *arg[])
|
||||
{
|
||||
return _sys_clone(arg);
|
||||
}
|
||||
|
||||
long _sys_fork(void);
|
||||
long sys_fork(void)
|
||||
{
|
||||
return _sys_fork();
|
||||
}
|
||||
|
||||
long _sys_vfork(void);
|
||||
long sys_vfork(void)
|
||||
{
|
||||
return _sys_fork();
|
||||
}
|
||||
|
||||
/**
|
||||
* set exec context for fork/clone.
|
||||
*/
|
||||
int arch_set_thread_context(void (*exit)(void), void *new_thread_stack,
|
||||
void *user_stack, void **thread_sp)
|
||||
{
|
||||
RT_ASSERT(exit != RT_NULL);
|
||||
RT_ASSERT(user_stack != RT_NULL);
|
||||
RT_ASSERT(new_thread_stack != RT_NULL);
|
||||
RT_ASSERT(thread_sp != RT_NULL);
|
||||
struct rt_hw_stack_frame *syscall_frame;
|
||||
struct rt_hw_stack_frame *thread_frame;
|
||||
|
||||
rt_uint8_t *stk;
|
||||
rt_uint8_t *syscall_stk;
|
||||
|
||||
stk = (rt_uint8_t *)new_thread_stack;
|
||||
/* reserve syscall context, all the registers are copyed from parent */
|
||||
stk -= CTX_REG_NR * REGBYTES;
|
||||
syscall_stk = stk;
|
||||
|
||||
syscall_frame = (struct rt_hw_stack_frame *)stk;
|
||||
|
||||
/* modify user sp */
|
||||
syscall_frame->user_sp_exc_stack = (rt_ubase_t)user_stack;
|
||||
|
||||
/* skip ecall */
|
||||
syscall_frame->epc += 4;
|
||||
|
||||
/* child return value is 0 */
|
||||
syscall_frame->a0 = 0;
|
||||
syscall_frame->a1 = 0;
|
||||
|
||||
/* reset thread area */
|
||||
rt_thread_t thread = rt_container_of((unsigned long)thread_sp, struct rt_thread, sp);
|
||||
syscall_frame->tp = (rt_ubase_t)thread->thread_idr;
|
||||
|
||||
#ifdef ARCH_USING_NEW_CTX_SWITCH
|
||||
extern void *_rt_hw_stack_init(rt_ubase_t *sp, rt_ubase_t ra, rt_ubase_t sstatus);
|
||||
rt_ubase_t sstatus = read_csr(sstatus) | SSTATUS_SPP;
|
||||
sstatus &= ~SSTATUS_SIE;
|
||||
|
||||
/* compatible to RESTORE_CONTEXT */
|
||||
stk = (void *)_rt_hw_stack_init((rt_ubase_t *)stk, (rt_ubase_t)exit, sstatus);
|
||||
#else
|
||||
/* build temp thread context */
|
||||
stk -= sizeof(struct rt_hw_stack_frame);
|
||||
|
||||
thread_frame = (struct rt_hw_stack_frame *)stk;
|
||||
|
||||
int i;
|
||||
for (i = 0; i < sizeof(struct rt_hw_stack_frame) / sizeof(rt_ubase_t); i++)
|
||||
{
|
||||
((rt_ubase_t *)thread_frame)[i] = 0xdeadbeaf;
|
||||
}
|
||||
|
||||
/* set pc for thread */
|
||||
thread_frame->epc = (rt_ubase_t)exit;
|
||||
|
||||
/* set old exception mode as supervisor, because in kernel */
|
||||
thread_frame->sstatus = read_csr(sstatus) | SSTATUS_SPP;
|
||||
thread_frame->sstatus &= ~SSTATUS_SIE; /* must disable interrupt */
|
||||
|
||||
/* set stack as syscall stack */
|
||||
thread_frame->user_sp_exc_stack = (rt_ubase_t)syscall_stk;
|
||||
|
||||
#endif /* ARCH_USING_NEW_CTX_SWITCH */
|
||||
/* save new stack top */
|
||||
*thread_sp = (void *)stk;
|
||||
|
||||
/**
|
||||
* The stack for child thread:
|
||||
*
|
||||
* +------------------------+ --> kernel stack top
|
||||
* | syscall stack |
|
||||
* | |
|
||||
* | @sp | --> `user_stack`
|
||||
* | @epc | --> user ecall addr + 4 (skip ecall)
|
||||
* | @a0&a1 | --> 0 (for child return 0)
|
||||
* | |
|
||||
* +------------------------+ --> temp thread stack top
|
||||
* | temp thread stack | ^
|
||||
* | | |
|
||||
* | @sp | ---------/
|
||||
* | @epc | --> `exit` (arch_clone_exit/arch_fork_exit)
|
||||
* | |
|
||||
* +------------------------+ --> thread sp
|
||||
*/
|
||||
}
|
||||
|
||||
/**
|
||||
* void lwp_exec_user(void *args, void *kernel_stack, void *user_entry)
|
||||
*/
|
||||
void lwp_exec_user(void *args, void *kernel_stack, void *user_entry)
|
||||
{
|
||||
arch_start_umode(args, user_entry, (void *)USER_STACK_VEND, kernel_stack);
|
||||
}
|
||||
|
||||
void *arch_get_usp_from_uctx(struct rt_user_context *uctx)
|
||||
{
|
||||
return uctx->sp;
|
||||
}
|
||||
|
||||
#endif /* ARCH_MM_MMU */
|
68
components/lwp/arch/risc-v/rv64/lwp_arch.h
Normal file
68
components/lwp/arch/risc-v/rv64/lwp_arch.h
Normal file
|
@ -0,0 +1,68 @@
|
|||
/*
|
||||
* Copyright (c) 2006-2021, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
*/
|
||||
|
||||
#ifndef LWP_ARCH_H__
|
||||
#define LWP_ARCH_H__
|
||||
|
||||
#include <rthw.h>
|
||||
#include <lwp.h>
|
||||
#include <lwp_arch_comm.h>
|
||||
|
||||
#ifdef ARCH_MM_MMU
|
||||
|
||||
#ifdef ARCH_MM_MMU_32BIT_LIMIT
|
||||
#define USER_HEAP_VADDR 0xF0000000UL
|
||||
#define USER_HEAP_VEND 0xFE000000UL
|
||||
#define USER_STACK_VSTART 0xE0000000UL
|
||||
#define USER_STACK_VEND USER_HEAP_VADDR
|
||||
#define USER_VADDR_START 0xC0000000UL
|
||||
#define USER_VADDR_TOP 0xFF000000UL
|
||||
#define USER_LOAD_VADDR 0xD0000000UL
|
||||
#define LDSO_LOAD_VADDR USER_LOAD_VADDR
|
||||
#else
|
||||
#define USER_HEAP_VADDR 0x300000000UL
|
||||
#define USER_HEAP_VEND 0xffffffffffff0000UL
|
||||
#define USER_STACK_VSTART 0x270000000UL
|
||||
#define USER_STACK_VEND USER_HEAP_VADDR
|
||||
#define USER_VADDR_START 0x200000000UL
|
||||
#define USER_VADDR_TOP 0xfffffffffffff000UL
|
||||
#define USER_LOAD_VADDR 0x200000000
|
||||
#define LDSO_LOAD_VADDR 0x200000000
|
||||
#endif
|
||||
|
||||
/* this attribution is cpu specified, and it should be defined in riscv_mmu.h */
|
||||
#ifndef MMU_MAP_U_RWCB
|
||||
#define MMU_MAP_U_RWCB 0
|
||||
#endif
|
||||
|
||||
#ifndef MMU_MAP_U_RW
|
||||
#define MMU_MAP_U_RW 0
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
rt_inline unsigned long rt_hw_ffz(unsigned long x)
|
||||
{
|
||||
return __builtin_ffsl(~x) - 1;
|
||||
}
|
||||
|
||||
rt_inline void icache_invalid_all(void)
|
||||
{
|
||||
rt_hw_cpu_icache_invalidate_all();
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
#endif /*LWP_ARCH_H__*/
|
348
components/lwp/arch/risc-v/rv64/lwp_gcc.S
Normal file
348
components/lwp/arch/risc-v/rv64/lwp_gcc.S
Normal file
|
@ -0,0 +1,348 @@
|
|||
/*
|
||||
* Copyright (c) 2006-2020, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
* 2018-12-10 Jesven first version
|
||||
* 2021-02-03 lizhirui port to riscv64
|
||||
* 2021-02-19 lizhirui port to new version of rt-smart
|
||||
* 2022-11-08 Wangxiaoyao Cleanup codes;
|
||||
* Support new context switch
|
||||
*/
|
||||
|
||||
#include "rtconfig.h"
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
#define __ASSEMBLY__
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#include "cpuport.h"
|
||||
#include "encoding.h"
|
||||
#include "stackframe.h"
|
||||
#include "asm-generic.h"
|
||||
|
||||
.section .text.lwp
|
||||
|
||||
/*
|
||||
* void arch_start_umode(args, text, ustack, kstack);
|
||||
*/
|
||||
.global arch_start_umode
|
||||
.type arch_start_umode, % function
|
||||
arch_start_umode:
|
||||
// load kstack for user process
|
||||
csrw sscratch, a3
|
||||
li t0, SSTATUS_SPP | SSTATUS_SIE // set as user mode, close interrupt
|
||||
csrc sstatus, t0
|
||||
li t0, SSTATUS_SPIE // enable interrupt when return to user mode
|
||||
csrs sstatus, t0
|
||||
|
||||
csrw sepc, a1
|
||||
mv a3, a2
|
||||
sret//enter user mode
|
||||
|
||||
/*
|
||||
* void arch_crt_start_umode(args, text, ustack, kstack);
|
||||
*/
|
||||
.global arch_crt_start_umode
|
||||
.type arch_crt_start_umode, % function
|
||||
arch_crt_start_umode:
|
||||
li t0, SSTATUS_SPP | SSTATUS_SIE // set as user mode, close interrupt
|
||||
csrc sstatus, t0
|
||||
li t0, SSTATUS_SPIE // enable interrupt when return to user mode
|
||||
csrs sstatus, t0
|
||||
|
||||
csrw sepc, a1
|
||||
mv s0, a0
|
||||
mv s1, a1
|
||||
mv s2, a2
|
||||
mv s3, a3
|
||||
mv a0, s2
|
||||
call lwp_copy_return_code_to_user_stack
|
||||
mv a0, s2
|
||||
call lwp_fix_sp
|
||||
mv sp, a0//user_sp
|
||||
mv ra, a0//return address
|
||||
mv a0, s0//args
|
||||
|
||||
csrw sscratch, s3
|
||||
sret//enter user mode
|
||||
|
||||
/**
|
||||
* Unify exit point from kernel mode to enter user space
|
||||
* we handle following things here:
|
||||
* 1. restoring user mode debug state (not support yet)
|
||||
* 2. handling thread's exit request
|
||||
* 3. handling POSIX signal
|
||||
* 4. restoring user context
|
||||
* 5. jump to user mode
|
||||
*/
|
||||
.global arch_ret_to_user
|
||||
arch_ret_to_user:
|
||||
// TODO: we don't support kernel gdb server in risc-v yet
|
||||
// so we don't check debug state here and handle debugging bussiness
|
||||
|
||||
call lwp_check_exit_request
|
||||
beqz a0, 1f
|
||||
mv a0, x0
|
||||
call sys_exit
|
||||
|
||||
1:
|
||||
call lwp_signal_check
|
||||
beqz a0, ret_to_user_exit
|
||||
J user_do_signal
|
||||
|
||||
ret_to_user_exit:
|
||||
RESTORE_ALL
|
||||
// `RESTORE_ALL` also reset sp to user sp, and setup sscratch
|
||||
sret
|
||||
|
||||
/**
|
||||
* Restore user context from exception frame stroraged in ustack
|
||||
* And handle pending signals;
|
||||
*/
|
||||
arch_signal_quit:
|
||||
call lwp_signal_restore
|
||||
call arch_get_usp_from_uctx
|
||||
// return value is user sp
|
||||
mv sp, a0
|
||||
|
||||
// restore user sp before enter trap
|
||||
addi a0, sp, CTX_REG_NR * REGBYTES
|
||||
csrw sscratch, a0
|
||||
|
||||
RESTORE_ALL
|
||||
SAVE_ALL
|
||||
j arch_ret_to_user
|
||||
|
||||
/**
|
||||
* Prepare and enter user signal handler
|
||||
* Move user exception frame and setup signal return
|
||||
* routine in user stack
|
||||
*/
|
||||
user_do_signal:
|
||||
/* prefetch ustack to avoid corrupted status in RESTORE/STORE pair below */
|
||||
LOAD t0, FRAME_OFF_SP(sp)
|
||||
addi t1, t0, -CTX_REG_NR * REGBYTES
|
||||
LOAD t2, (t0)
|
||||
li t3, -0x1000
|
||||
1:
|
||||
add t0, t0, t3
|
||||
LOAD t2, (t0)
|
||||
bgt t0, t1, 1b
|
||||
|
||||
/** restore and backup kernel sp carefully to avoid leaking */
|
||||
addi t0, sp, CTX_REG_NR * REGBYTES
|
||||
csrw sscratch, t0
|
||||
|
||||
RESTORE_ALL
|
||||
SAVE_ALL
|
||||
|
||||
/**
|
||||
* save lwp_sigreturn in user memory
|
||||
*/
|
||||
mv s0, sp
|
||||
la t0, lwp_sigreturn
|
||||
la t1, lwp_sigreturn_end
|
||||
// t1 <- size
|
||||
sub t1, t1, t0
|
||||
// s0 <- dst
|
||||
sub s0, s0, t1
|
||||
mv s2, t1
|
||||
lwp_sigreturn_copy_loop:
|
||||
addi t2, t1, -1
|
||||
add t3, t0, t2
|
||||
add t4, s0, t2
|
||||
lb t5, 0(t3)
|
||||
sb t5, 0(t4)
|
||||
mv t1, t2
|
||||
bnez t1, lwp_sigreturn_copy_loop
|
||||
|
||||
/**
|
||||
* 1. clear sscratch & restore kernel sp to
|
||||
* enter kernel mode routine
|
||||
* 2. storage exp frame address to restore context,
|
||||
* by calling to lwp_signal_backup
|
||||
* 3. storage lwp_sigreturn entry address
|
||||
* 4. get signal id as param for signal handler
|
||||
*/
|
||||
mv s1, sp
|
||||
csrrw sp, sscratch, x0
|
||||
|
||||
/**
|
||||
* synchronize dcache & icache if target is
|
||||
* a Harvard Architecture machine, otherwise
|
||||
* do nothing
|
||||
*/
|
||||
mv a0, s0
|
||||
mv a1, s2
|
||||
call rt_hw_sync_cache_local
|
||||
|
||||
/**
|
||||
* backup user sp (point to saved exception frame, skip sigreturn routine)
|
||||
* And get signal id
|
||||
|
||||
* a0: user sp
|
||||
* a1: user_pc (not used, marked as 0 to avoid abuse)
|
||||
* a2: user_flag (not used, marked as 0 to avoid abuse)
|
||||
*/
|
||||
mv a0, s1
|
||||
mv a1, zero
|
||||
mv a2, zero
|
||||
call lwp_signal_backup
|
||||
|
||||
/**
|
||||
* backup signal id in s2,
|
||||
* and get sighandler by signal id
|
||||
*/
|
||||
mv s2, a0
|
||||
call lwp_sighandler_get
|
||||
|
||||
/**
|
||||
* set regiter RA to user signal handler
|
||||
* set sp to user sp & save kernel sp in sscratch
|
||||
*/
|
||||
mv ra, s0
|
||||
csrw sscratch, sp
|
||||
mv sp, s0
|
||||
|
||||
/**
|
||||
* a0 is signal_handler,
|
||||
* s1 = s0 == NULL ? lwp_sigreturn : s0;
|
||||
*/
|
||||
mv s1, s0
|
||||
beqz a0, skip_user_signal_handler
|
||||
mv s1, a0
|
||||
|
||||
skip_user_signal_handler:
|
||||
// enter user mode and enable interrupt when return to user mode
|
||||
li t0, SSTATUS_SPP
|
||||
csrc sstatus, t0
|
||||
li t0, SSTATUS_SPIE
|
||||
csrs sstatus, t0
|
||||
|
||||
// sepc <- signal_handler
|
||||
csrw sepc, s1
|
||||
// a0 <- signal id
|
||||
mv a0, s2
|
||||
sret
|
||||
|
||||
.align 3
|
||||
lwp_debugreturn:
|
||||
li a7, 0xff
|
||||
ecall
|
||||
|
||||
.align 3
|
||||
lwp_sigreturn:
|
||||
li a7, 0xfe
|
||||
ecall
|
||||
|
||||
.align 3
|
||||
lwp_sigreturn_end:
|
||||
|
||||
.align 3
|
||||
.global lwp_thread_return
|
||||
lwp_thread_return:
|
||||
li a0, 0
|
||||
li a7, 1
|
||||
ecall
|
||||
|
||||
.align 3
|
||||
.global lwp_thread_return_end
|
||||
lwp_thread_return_end:
|
||||
|
||||
.globl arch_get_tidr
|
||||
arch_get_tidr:
|
||||
mv a0, tp
|
||||
ret
|
||||
|
||||
.global arch_set_thread_area
|
||||
arch_set_thread_area:
|
||||
.globl arch_set_tidr
|
||||
arch_set_tidr:
|
||||
mv tp, a0
|
||||
ret
|
||||
|
||||
.global arch_clone_exit
|
||||
.global arch_fork_exit
|
||||
arch_fork_exit:
|
||||
arch_clone_exit:
|
||||
j arch_syscall_exit
|
||||
|
||||
START_POINT(syscall_entry)
|
||||
#ifndef ARCH_USING_NEW_CTX_SWITCH
|
||||
//swap to thread kernel stack
|
||||
csrr t0, sstatus
|
||||
andi t0, t0, 0x100
|
||||
beqz t0, __restore_sp_from_tcb
|
||||
|
||||
__restore_sp_from_sscratch: // from kernel
|
||||
csrr t0, sscratch
|
||||
j __move_stack_context
|
||||
|
||||
__restore_sp_from_tcb: // from user
|
||||
la a0, rt_current_thread
|
||||
LOAD a0, 0(a0)
|
||||
jal get_thread_kernel_stack_top
|
||||
mv t0, a0
|
||||
|
||||
__move_stack_context:
|
||||
mv t1, sp//src
|
||||
mv sp, t0//switch stack
|
||||
addi sp, sp, -CTX_REG_NR * REGBYTES
|
||||
//copy context
|
||||
li s0, CTX_REG_NR//cnt
|
||||
mv t2, sp//dst
|
||||
|
||||
copy_context_loop:
|
||||
LOAD t0, 0(t1)
|
||||
STORE t0, 0(t2)
|
||||
addi s0, s0, -1
|
||||
addi t1, t1, 8
|
||||
addi t2, t2, 8
|
||||
bnez s0, copy_context_loop
|
||||
#endif /* ARCH_USING_NEW_CTX_SWITCH */
|
||||
|
||||
/* fetch SYSCALL ID */
|
||||
LOAD a7, 17 * REGBYTES(sp)
|
||||
addi a7, a7, -0xfe
|
||||
beqz a7, arch_signal_quit
|
||||
|
||||
#ifdef ARCH_MM_MMU
|
||||
/* save setting when syscall enter */
|
||||
call rt_thread_self
|
||||
call lwp_user_setting_save
|
||||
#endif
|
||||
|
||||
mv a0, sp
|
||||
OPEN_INTERRUPT
|
||||
call syscall_handler
|
||||
j arch_syscall_exit
|
||||
START_POINT_END(syscall_entry)
|
||||
|
||||
.global arch_syscall_exit
|
||||
arch_syscall_exit:
|
||||
CLOSE_INTERRUPT
|
||||
|
||||
#if defined(ARCH_MM_MMU)
|
||||
LOAD s0, 2 * REGBYTES(sp)
|
||||
andi s0, s0, 0x100
|
||||
bnez s0, dont_ret_to_user
|
||||
j arch_ret_to_user
|
||||
#endif
|
||||
dont_ret_to_user:
|
||||
|
||||
#ifdef ARCH_MM_MMU
|
||||
/* restore setting when syscall exit */
|
||||
call rt_thread_self
|
||||
call lwp_user_setting_restore
|
||||
|
||||
/* after restore the reg `tp`, need modify context */
|
||||
STORE tp, 4 * REGBYTES(sp)
|
||||
#endif
|
||||
|
||||
//restore context
|
||||
RESTORE_ALL
|
||||
csrw sscratch, zero
|
||||
sret
|
109
components/lwp/arch/risc-v/rv64/reloc.c
Normal file
109
components/lwp/arch/risc-v/rv64/reloc.c
Normal file
|
@ -0,0 +1,109 @@
|
|||
#include "mm_aspace.h"
|
||||
#include <rtthread.h>
|
||||
#include <stdint.h>
|
||||
#include <string.h>
|
||||
#include <elf.h>
|
||||
#ifdef ARCH_MM_MMU
|
||||
#include <mmu.h>
|
||||
#include <page.h>
|
||||
#endif
|
||||
|
||||
typedef struct
|
||||
{
|
||||
Elf64_Word st_name;
|
||||
Elf64_Addr st_value;
|
||||
Elf64_Word st_size;
|
||||
unsigned char st_info;
|
||||
unsigned char st_other;
|
||||
Elf64_Half st_shndx;
|
||||
} Elf64_sym;
|
||||
|
||||
#ifdef ARCH_MM_MMU
|
||||
void arch_elf_reloc(rt_aspace_t aspace, void *text_start, void *rel_dyn_start, size_t rel_dyn_size, void *got_start, size_t got_size, Elf64_sym *dynsym)
|
||||
{
|
||||
size_t rel_off;
|
||||
void* addr;
|
||||
|
||||
if (rel_dyn_size && !dynsym)
|
||||
{
|
||||
return;
|
||||
}
|
||||
for (rel_off = 0; rel_off < rel_dyn_size; rel_off += 8)
|
||||
{
|
||||
uint32_t v1, v2;
|
||||
|
||||
addr = rt_hw_mmu_v2p(aspace, (void *)(((rt_size_t)rel_dyn_start) + rel_off));
|
||||
memcpy(&v1, addr, 4);
|
||||
addr = rt_hw_mmu_v2p(aspace, (void *)(((rt_size_t)rel_dyn_start) + rel_off + 4));
|
||||
memcpy(&v2, addr, 4);
|
||||
|
||||
addr = rt_hw_mmu_v2p(aspace, (void *)((rt_size_t)text_start + v1));
|
||||
if ((v2 & 0xff) == R_ARM_RELATIVE)
|
||||
{
|
||||
*(rt_size_t*)addr += (rt_size_t)text_start;
|
||||
}
|
||||
else if ((v2 & 0xff) == R_ARM_ABS32)
|
||||
{
|
||||
uint32_t t;
|
||||
t = (v2 >> 8);
|
||||
if (t) /* 0 is UDF */
|
||||
{
|
||||
*(rt_size_t*)addr = (((rt_size_t)text_start) + dynsym[t].st_value);
|
||||
}
|
||||
}
|
||||
}
|
||||
/* modify got */
|
||||
if (got_size)
|
||||
{
|
||||
uint32_t *got_item = (uint32_t*)got_start;
|
||||
|
||||
for (rel_off = 0; rel_off < got_size; rel_off += 4, got_item++)
|
||||
{
|
||||
addr = rt_hw_mmu_v2p(aspace, got_item);
|
||||
*(rt_size_t *)addr += (rt_size_t)text_start;
|
||||
}
|
||||
}
|
||||
}
|
||||
#else
|
||||
|
||||
void arch_elf_reloc(void *text_start, void *rel_dyn_start, size_t rel_dyn_size, void *got_start, size_t got_size, Elf64_sym *dynsym)
|
||||
{
|
||||
size_t rel_off;
|
||||
|
||||
if (rel_dyn_size && !dynsym)
|
||||
{
|
||||
return;
|
||||
}
|
||||
for (rel_off = 0; rel_off < rel_dyn_size; rel_off += 8)
|
||||
{
|
||||
uint32_t v1, v2;
|
||||
|
||||
memcpy(&v1, ((rt_uint8_t *)rel_dyn_start) + rel_off, 4);
|
||||
memcpy(&v2, ((rt_uint8_t *)rel_dyn_start) + rel_off + 4, 4);
|
||||
|
||||
if ((v2 & 0xff) == R_ARM_RELATIVE)
|
||||
{
|
||||
*(uint32_t*)(((rt_size_t)text_start) + v1) += (uint32_t)text_start;
|
||||
}
|
||||
else if ((v2 & 0xff) == R_ARM_ABS32)
|
||||
{
|
||||
uint32_t t;
|
||||
t = (v2 >> 8);
|
||||
if (t) /* 0 is UDF */
|
||||
{
|
||||
*(uint32_t*)(((rt_size_t)text_start) + v1) = (uint32_t)(((rt_size_t)text_start) + dynsym[t].st_value);
|
||||
}
|
||||
}
|
||||
}
|
||||
/* modify got */
|
||||
if (got_size)
|
||||
{
|
||||
uint32_t *got_item = (uint32_t*)got_start;
|
||||
|
||||
for (rel_off = 0; rel_off < got_size; rel_off += 4, got_item++)
|
||||
{
|
||||
*got_item += (uint32_t)text_start;
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
Loading…
Add table
Add a link
Reference in a new issue