import RT-Thread@9217865c without bsp, libcpu and components/net
This commit is contained in:
commit
e2376a3709
1414 changed files with 390370 additions and 0 deletions
67
components/lwp/Kconfig
Normal file
67
components/lwp/Kconfig
Normal file
|
@ -0,0 +1,67 @@
|
|||
menuconfig RT_USING_LWP
|
||||
bool "light-weight process"
|
||||
depends on RT_USING_SMART
|
||||
default y
|
||||
help
|
||||
The lwP is a light weight process running in user mode.
|
||||
|
||||
if RT_USING_LWP
|
||||
config RT_LWP_MAX_NR
|
||||
int "The max number of light-weight process"
|
||||
default 30
|
||||
|
||||
config LWP_TASK_STACK_SIZE
|
||||
int "The lwp thread kernel stack size"
|
||||
default 16384
|
||||
|
||||
config RT_CH_MSG_MAX_NR
|
||||
int "The maximum number of channel messages"
|
||||
default 1024
|
||||
|
||||
config LWP_CONSOLE_INPUT_BUFFER_SIZE
|
||||
int "The input buffer size of lwp console device"
|
||||
default 1024
|
||||
|
||||
config LWP_TID_MAX_NR
|
||||
int "The maximum number of lwp thread id"
|
||||
default 64
|
||||
|
||||
config LWP_ENABLE_ASID
|
||||
bool "The switch of ASID feature"
|
||||
depends on ARCH_ARM_CORTEX_A
|
||||
default y
|
||||
|
||||
if ARCH_MM_MMU
|
||||
config RT_LWP_SHM_MAX_NR
|
||||
int "The maximum number of shared memory"
|
||||
default 64
|
||||
endif
|
||||
|
||||
if ARCH_MM_MPU
|
||||
config RT_LWP_MPU_MAX_NR
|
||||
int "The maximum number of mpu region"
|
||||
default 2
|
||||
|
||||
config RT_LWP_USING_SHM
|
||||
bool "Enable shared memory"
|
||||
default y
|
||||
endif
|
||||
|
||||
config LWP_UNIX98_PTY
|
||||
bool "The unix98 PTY support"
|
||||
default n
|
||||
|
||||
if LWP_UNIX98_PTY
|
||||
config LWP_PTY_INPUT_BFSZ
|
||||
int "The unix98 PTY input buffer size"
|
||||
default 1024
|
||||
|
||||
config LWP_PTY_PTS_SIZE
|
||||
int "The unix98 PTY device max num"
|
||||
default 3
|
||||
|
||||
config LWP_PTY_USING_DEBUG
|
||||
bool "The unix98 PTY debug output"
|
||||
default n
|
||||
endif
|
||||
endif
|
45
components/lwp/SConscript
Normal file
45
components/lwp/SConscript
Normal file
|
@ -0,0 +1,45 @@
|
|||
Import('rtconfig')
|
||||
from building import *
|
||||
import os
|
||||
|
||||
cwd = GetCurrentDir()
|
||||
src = []
|
||||
CPPPATH = [cwd]
|
||||
|
||||
support_arch = {"arm": ["cortex-m3", "cortex-m4", "cortex-m7", "arm926", "cortex-a"],
|
||||
"aarch64":["cortex-a"],
|
||||
"risc-v": ["rv64"],
|
||||
"x86": ["i386"]}
|
||||
platform_file = {'armcc': 'rvds.S', 'gcc': 'gcc.S', 'iar': 'iar.S'}
|
||||
|
||||
platform = rtconfig.PLATFORM
|
||||
arch = rtconfig.ARCH
|
||||
cpu = rtconfig.CPU
|
||||
|
||||
# fix the cpu for risc-v
|
||||
if arch == 'risc-v':
|
||||
rv64 = ['virt64', 'c906']
|
||||
if cpu in rv64:
|
||||
cpu = 'rv64'
|
||||
|
||||
if GetDepend('LWP_UNIX98_PTY'):
|
||||
# print("LWP_UNIX98_PTY")
|
||||
src += Glob('unix98pty/*.c')
|
||||
CPPPATH += ['unix98pty/']
|
||||
|
||||
if platform in platform_file.keys(): # support platforms
|
||||
if arch in support_arch.keys() and cpu in support_arch[arch]:
|
||||
asm_path = 'arch/' + arch + '/' + cpu + '/*_' + platform_file[platform]
|
||||
arch_common = 'arch/' + arch + '/' + 'common/*.c'
|
||||
if not GetDepend('ARCH_MM_MMU'):
|
||||
excluded_files = ['ioremap.c', 'lwp_futex.c', 'lwp_mm_area.c', 'lwp_pmutex.c', 'lwp_shm.c', 'lwp_user_mm.c']
|
||||
src += [f for f in Glob('*.c') if os.path.basename(str(f)) not in excluded_files] + Glob(asm_path) + Glob(arch_common)
|
||||
else:
|
||||
src += Glob('*.c') + Glob(asm_path) + Glob(arch_common)
|
||||
src += Glob('arch/' + arch + '/' + cpu + '/*.c')
|
||||
CPPPATH = [cwd]
|
||||
CPPPATH += [cwd + '/arch/' + arch + '/' + cpu]
|
||||
|
||||
group = DefineGroup('lwP', src, depend = ['RT_USING_SMART'], CPPPATH = CPPPATH)
|
||||
|
||||
Return('group')
|
28
components/lwp/arch/aarch64/common/reloc.c
Normal file
28
components/lwp/arch/aarch64/common/reloc.c
Normal file
|
@ -0,0 +1,28 @@
|
|||
#include <rtthread.h>
|
||||
#include <stdint.h>
|
||||
#include <string.h>
|
||||
#include <elf.h>
|
||||
#ifdef ARCH_MM_MMU
|
||||
#include <mmu.h>
|
||||
#include <page.h>
|
||||
#endif
|
||||
|
||||
#define Elf_Word Elf64_Word
|
||||
#define Elf_Addr Elf64_Addr
|
||||
#define Elf_Half Elf64_Half
|
||||
#define Elf_Ehdr Elf64_Ehdr #define Elf_Phdr Elf64_Phdr
|
||||
#define Elf_Shdr Elf64_Shdr
|
||||
|
||||
typedef struct
|
||||
{
|
||||
Elf_Word st_name;
|
||||
Elf_Addr st_value;
|
||||
Elf_Word st_size;
|
||||
unsigned char st_info;
|
||||
unsigned char st_other;
|
||||
Elf_Half st_shndx;
|
||||
} Elf_sym;
|
||||
|
||||
void arch_elf_reloc(void *text_start, void *rel_dyn_start, size_t rel_dyn_size, void *got_start, size_t got_size, Elf_sym *dynsym)
|
||||
{
|
||||
}
|
94
components/lwp/arch/aarch64/cortex-a/lwp_arch.c
Normal file
94
components/lwp/arch/aarch64/cortex-a/lwp_arch.c
Normal file
|
@ -0,0 +1,94 @@
|
|||
/*
|
||||
* Copyright (c) 2006-2021, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
* 2021-05-18 Jesven first version
|
||||
*/
|
||||
|
||||
#include <rthw.h>
|
||||
#include <rtthread.h>
|
||||
|
||||
#ifdef ARCH_MM_MMU
|
||||
|
||||
#define DBG_TAG "lwp.arch"
|
||||
#define DBG_LVL DBG_INFO
|
||||
#include <rtdbg.h>
|
||||
|
||||
#include <lwp_arch.h>
|
||||
#include <lwp_user_mm.h>
|
||||
|
||||
extern size_t MMUTable[];
|
||||
|
||||
int arch_user_space_init(struct rt_lwp *lwp)
|
||||
{
|
||||
size_t *mmu_table;
|
||||
|
||||
mmu_table = (size_t *)rt_pages_alloc_ext(0, PAGE_ANY_AVAILABLE);
|
||||
if (!mmu_table)
|
||||
{
|
||||
return -RT_ENOMEM;
|
||||
}
|
||||
|
||||
lwp->end_heap = USER_HEAP_VADDR;
|
||||
|
||||
memset(mmu_table, 0, ARCH_PAGE_SIZE);
|
||||
rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, mmu_table, ARCH_PAGE_SIZE);
|
||||
|
||||
lwp->aspace = rt_aspace_create(
|
||||
(void *)USER_VADDR_START, USER_VADDR_TOP - USER_VADDR_START, mmu_table);
|
||||
if (!lwp->aspace)
|
||||
{
|
||||
return -RT_ERROR;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void *arch_kernel_mmu_table_get(void)
|
||||
{
|
||||
return (void *)NULL;
|
||||
}
|
||||
|
||||
void arch_user_space_free(struct rt_lwp *lwp)
|
||||
{
|
||||
if (lwp)
|
||||
{
|
||||
RT_ASSERT(lwp->aspace);
|
||||
void *pgtbl = lwp->aspace->page_table;
|
||||
rt_aspace_delete(lwp->aspace);
|
||||
|
||||
/* must be freed after aspace delete, pgtbl is required for unmap */
|
||||
rt_pages_free(pgtbl, 0);
|
||||
lwp->aspace = NULL;
|
||||
}
|
||||
else
|
||||
{
|
||||
LOG_W("%s: NULL lwp as parameter", __func__);
|
||||
RT_ASSERT(0);
|
||||
}
|
||||
}
|
||||
|
||||
int arch_expand_user_stack(void *addr)
|
||||
{
|
||||
int ret = 0;
|
||||
size_t stack_addr = (size_t)addr;
|
||||
|
||||
stack_addr &= ~ARCH_PAGE_MASK;
|
||||
if ((stack_addr >= (size_t)USER_STACK_VSTART) &&
|
||||
(stack_addr < (size_t)USER_STACK_VEND))
|
||||
{
|
||||
void *map =
|
||||
lwp_map_user(lwp_self(), (void *)stack_addr, ARCH_PAGE_SIZE, 0);
|
||||
|
||||
if (map || lwp_user_accessable(addr, 1))
|
||||
{
|
||||
ret = 1;
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
#endif
|
45
components/lwp/arch/aarch64/cortex-a/lwp_arch.h
Normal file
45
components/lwp/arch/aarch64/cortex-a/lwp_arch.h
Normal file
|
@ -0,0 +1,45 @@
|
|||
/*
|
||||
* Copyright (c) 2006-2021, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
* 2021-05-18 Jesven first version
|
||||
*/
|
||||
|
||||
#ifndef LWP_ARCH_H__
|
||||
#define LWP_ARCH_H__
|
||||
|
||||
#include <lwp.h>
|
||||
#include <lwp_arch_comm.h>
|
||||
|
||||
#ifdef ARCH_MM_MMU
|
||||
|
||||
#define USER_VADDR_TOP 0x0001000000000000UL
|
||||
#define USER_HEAP_VEND 0x0000ffffB0000000UL
|
||||
#define USER_HEAP_VADDR 0x0000ffff80000000UL
|
||||
#define USER_STACK_VSTART 0x0000ffff70000000UL
|
||||
#define USER_STACK_VEND USER_HEAP_VADDR
|
||||
#define LDSO_LOAD_VADDR 0x60000000UL
|
||||
#define USER_VADDR_START 0x00200000UL
|
||||
#define USER_LOAD_VADDR USER_VADDR_START
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
unsigned long rt_hw_ffz(unsigned long x);
|
||||
|
||||
rt_inline void icache_invalid_all(void)
|
||||
{
|
||||
asm volatile ("ic ialluis\n\tisb sy":::"memory");
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
#endif /*LWP_ARCH_H__*/
|
593
components/lwp/arch/aarch64/cortex-a/lwp_gcc.S
Normal file
593
components/lwp/arch/aarch64/cortex-a/lwp_gcc.S
Normal file
|
@ -0,0 +1,593 @@
|
|||
/*
|
||||
* Copyright (c) 2006-2021, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
* 2021-05-18 Jesven first version
|
||||
*/
|
||||
|
||||
#include "rtconfig.h"
|
||||
#include "asm-generic.h"
|
||||
#include "asm-fpu.h"
|
||||
|
||||
/*********************
|
||||
* SPSR BIT *
|
||||
*********************/
|
||||
|
||||
#define SPSR_Mode(v) ((v) << 0)
|
||||
#define SPSR_A64 (0 << 4)
|
||||
#define SPSR_RESEVRED_5 (0 << 5)
|
||||
#define SPSR_FIQ_MASKED(v) ((v) << 6)
|
||||
#define SPSR_IRQ_MASKED(v) ((v) << 7)
|
||||
#define SPSR_SERROR_MASKED(v) ((v) << 8)
|
||||
#define SPSR_D_MASKED(v) ((v) << 9)
|
||||
#define SPSR_RESEVRED_10_19 (0 << 10)
|
||||
#define SPSR_IL(v) ((v) << 20)
|
||||
#define SPSR_SS(v) ((v) << 21)
|
||||
#define SPSR_RESEVRED_22_27 (0 << 22)
|
||||
#define SPSR_V(v) ((v) << 28)
|
||||
#define SPSR_C(v) ((v) << 29)
|
||||
#define SPSR_Z(v) ((v) << 30)
|
||||
#define SPSR_N(v) ((v) << 31)
|
||||
|
||||
/*********************
|
||||
* CONTEXT_OFFSET *
|
||||
*********************/
|
||||
|
||||
#define CONTEXT_OFFSET_ELR_EL1 0x0
|
||||
#define CONTEXT_OFFSET_SPSR_EL1 0x8
|
||||
#define CONTEXT_OFFSET_SP_EL0 0x10
|
||||
#define CONTEXT_OFFSET_X30 0x18
|
||||
#define CONTEXT_OFFSET_FPCR 0x20
|
||||
#define CONTEXT_OFFSET_FPSR 0x28
|
||||
#define CONTEXT_OFFSET_X28 0x30
|
||||
#define CONTEXT_OFFSET_X29 0x38
|
||||
#define CONTEXT_OFFSET_X26 0x40
|
||||
#define CONTEXT_OFFSET_X27 0x48
|
||||
#define CONTEXT_OFFSET_X24 0x50
|
||||
#define CONTEXT_OFFSET_X25 0x58
|
||||
#define CONTEXT_OFFSET_X22 0x60
|
||||
#define CONTEXT_OFFSET_X23 0x68
|
||||
#define CONTEXT_OFFSET_X20 0x70
|
||||
#define CONTEXT_OFFSET_X21 0x78
|
||||
#define CONTEXT_OFFSET_X18 0x80
|
||||
#define CONTEXT_OFFSET_X19 0x88
|
||||
#define CONTEXT_OFFSET_X16 0x90
|
||||
#define CONTEXT_OFFSET_X17 0x98
|
||||
#define CONTEXT_OFFSET_X14 0xa0
|
||||
#define CONTEXT_OFFSET_X15 0xa8
|
||||
#define CONTEXT_OFFSET_X12 0xb0
|
||||
#define CONTEXT_OFFSET_X13 0xb8
|
||||
#define CONTEXT_OFFSET_X10 0xc0
|
||||
#define CONTEXT_OFFSET_X11 0xc8
|
||||
#define CONTEXT_OFFSET_X8 0xd0
|
||||
#define CONTEXT_OFFSET_X9 0xd8
|
||||
#define CONTEXT_OFFSET_X6 0xe0
|
||||
#define CONTEXT_OFFSET_X7 0xe8
|
||||
#define CONTEXT_OFFSET_X4 0xf0
|
||||
#define CONTEXT_OFFSET_X5 0xf8
|
||||
#define CONTEXT_OFFSET_X2 0x100
|
||||
#define CONTEXT_OFFSET_X3 0x108
|
||||
#define CONTEXT_OFFSET_X0 0x110
|
||||
#define CONTEXT_OFFSET_X1 0x118
|
||||
|
||||
#define CONTEXT_OFFSET_Q15 0x120
|
||||
#define CONTEXT_OFFSET_Q14 0x130
|
||||
#define CONTEXT_OFFSET_Q13 0x140
|
||||
#define CONTEXT_OFFSET_Q12 0x150
|
||||
#define CONTEXT_OFFSET_Q11 0x160
|
||||
#define CONTEXT_OFFSET_Q10 0x170
|
||||
#define CONTEXT_OFFSET_Q9 0x180
|
||||
#define CONTEXT_OFFSET_Q8 0x190
|
||||
#define CONTEXT_OFFSET_Q7 0x1a0
|
||||
#define CONTEXT_OFFSET_Q6 0x1b0
|
||||
#define CONTEXT_OFFSET_Q5 0x1c0
|
||||
#define CONTEXT_OFFSET_Q4 0x1d0
|
||||
#define CONTEXT_OFFSET_Q3 0x1e0
|
||||
#define CONTEXT_OFFSET_Q2 0x1f0
|
||||
#define CONTEXT_OFFSET_Q1 0x200
|
||||
#define CONTEXT_OFFSET_Q0 0x210
|
||||
|
||||
#define CONTEXT_FPU_SIZE 0x100
|
||||
#define CONTEXT_SIZE 0x220
|
||||
|
||||
/**************************************************/
|
||||
|
||||
.text
|
||||
|
||||
/*
|
||||
* void arch_start_umode(args, text, ustack, kstack);
|
||||
*/
|
||||
.global arch_start_umode
|
||||
.type arch_start_umode, % function
|
||||
arch_start_umode:
|
||||
mov sp, x3
|
||||
mov x4, #(SPSR_Mode(0) | SPSR_A64)
|
||||
mov x3, x2 ;/* user stack top */
|
||||
msr daifset, #3
|
||||
dsb sy
|
||||
mrs x30, sp_el0
|
||||
msr spsr_el1, x4
|
||||
msr elr_el1, x1
|
||||
eret
|
||||
|
||||
/*
|
||||
* void arch_crt_start_umode(args, text, ustack, kstack);
|
||||
*/
|
||||
.global arch_crt_start_umode
|
||||
.type arch_crt_start_umode, % function
|
||||
arch_crt_start_umode:
|
||||
sub x4, x2, #0x10
|
||||
adr x2, lwp_thread_return
|
||||
ldr x5, [x2]
|
||||
str x5, [x4]
|
||||
ldr x5, [x2, #4]
|
||||
str x5, [x4, #4]
|
||||
ldr x5, [x2, #8]
|
||||
str x5, [x4, #8]
|
||||
|
||||
mov x5, x4
|
||||
dc cvau, x5
|
||||
add x5, x5, #8
|
||||
dc cvau, x5
|
||||
dsb sy
|
||||
ic ialluis
|
||||
dsb sy
|
||||
|
||||
msr sp_el0, x4
|
||||
|
||||
mov sp, x3
|
||||
mov x4, #(SPSR_Mode(0) | SPSR_A64)
|
||||
msr daifset, #3
|
||||
dsb sy
|
||||
mrs x30, sp_el0
|
||||
msr spsr_el1, x4
|
||||
msr elr_el1, x1
|
||||
eret
|
||||
|
||||
/*
|
||||
void arch_set_thread_context(void *exit_addr, void *new_thread_stack, void *user_stack, void **thread_sp);
|
||||
*/
|
||||
.global arch_set_thread_context
|
||||
arch_set_thread_context:
|
||||
sub x1, x1, #CONTEXT_SIZE
|
||||
str x2, [x1, #CONTEXT_OFFSET_SP_EL0]
|
||||
sub x1, x1, #CONTEXT_SIZE
|
||||
str xzr, [x1, #CONTEXT_OFFSET_X0] /* new thread return 0 */
|
||||
mov x4, #((3 << 6) | 0x4 | 0x1) /* el1h, disable interrupt */
|
||||
str x4, [x1, #CONTEXT_OFFSET_SPSR_EL1]
|
||||
str x0, [x1, #CONTEXT_OFFSET_ELR_EL1]
|
||||
str x1, [x3]
|
||||
ret
|
||||
|
||||
.global arch_get_user_sp
|
||||
arch_get_user_sp:
|
||||
mrs x0, sp_el0
|
||||
ret
|
||||
|
||||
.global arch_fork_exit
|
||||
.global arch_clone_exit
|
||||
arch_fork_exit:
|
||||
arch_clone_exit:
|
||||
b arch_syscall_exit
|
||||
|
||||
/*
|
||||
void lwp_exec_user(void *args, void *kernel_stack, void *user_entry)
|
||||
*/
|
||||
.global lwp_exec_user
|
||||
lwp_exec_user:
|
||||
mov sp, x1
|
||||
mov x4, #(SPSR_Mode(0) | SPSR_A64)
|
||||
ldr x3, =0x0000ffff80000000
|
||||
msr daifset, #3
|
||||
msr spsr_el1, x4
|
||||
msr elr_el1, x2
|
||||
eret
|
||||
|
||||
/*
|
||||
* void SVC_Handler(regs);
|
||||
* since this routine reset the SP, we take it as a start point
|
||||
*/
|
||||
START_POINT(SVC_Handler)
|
||||
/* x0 is initial sp */
|
||||
mov sp, x0
|
||||
|
||||
msr daifclr, #3 /* enable interrupt */
|
||||
|
||||
bl rt_thread_self
|
||||
bl lwp_user_setting_save
|
||||
|
||||
ldp x8, x9, [sp, #(CONTEXT_OFFSET_X8)]
|
||||
and x0, x8, #0xf000
|
||||
cmp x0, #0xe000
|
||||
beq arch_signal_quit
|
||||
|
||||
cmp x0, #0xf000
|
||||
beq ret_from_user
|
||||
|
||||
uxtb x0, w8
|
||||
bl lwp_get_sys_api
|
||||
cmp x0, xzr
|
||||
mov x30, x0
|
||||
beq arch_syscall_exit
|
||||
ldp x0, x1, [sp, #(CONTEXT_OFFSET_X0)]
|
||||
ldp x2, x3, [sp, #(CONTEXT_OFFSET_X2)]
|
||||
ldp x4, x5, [sp, #(CONTEXT_OFFSET_X4)]
|
||||
ldp x6, x7, [sp, #(CONTEXT_OFFSET_X6)]
|
||||
blr x30
|
||||
/* jump explictly, make this code position independant */
|
||||
b arch_syscall_exit
|
||||
START_POINT_END(SVC_Handler)
|
||||
|
||||
.global arch_syscall_exit
|
||||
arch_syscall_exit:
|
||||
msr daifset, #3
|
||||
|
||||
ldp x2, x3, [sp], #0x10 /* SPSR and ELR. */
|
||||
msr spsr_el1, x3
|
||||
msr elr_el1, x2
|
||||
|
||||
ldp x29, x30, [sp], #0x10
|
||||
msr sp_el0, x29
|
||||
ldp x28, x29, [sp], #0x10
|
||||
msr fpcr, x28
|
||||
msr fpsr, x29
|
||||
ldp x28, x29, [sp], #0x10
|
||||
ldp x26, x27, [sp], #0x10
|
||||
ldp x24, x25, [sp], #0x10
|
||||
ldp x22, x23, [sp], #0x10
|
||||
ldp x20, x21, [sp], #0x10
|
||||
ldp x18, x19, [sp], #0x10
|
||||
ldp x16, x17, [sp], #0x10
|
||||
ldp x14, x15, [sp], #0x10
|
||||
ldp x12, x13, [sp], #0x10
|
||||
ldp x10, x11, [sp], #0x10
|
||||
ldp x8, x9, [sp], #0x10
|
||||
add sp, sp, #0x40
|
||||
RESTORE_FPU sp
|
||||
|
||||
.global arch_ret_to_user
|
||||
arch_ret_to_user:
|
||||
SAVE_FPU sp
|
||||
stp x0, x1, [sp, #-0x10]!
|
||||
stp x2, x3, [sp, #-0x10]!
|
||||
stp x4, x5, [sp, #-0x10]!
|
||||
stp x6, x7, [sp, #-0x10]!
|
||||
stp x8, x9, [sp, #-0x10]!
|
||||
stp x10, x11, [sp, #-0x10]!
|
||||
stp x12, x13, [sp, #-0x10]!
|
||||
stp x14, x15, [sp, #-0x10]!
|
||||
stp x16, x17, [sp, #-0x10]!
|
||||
stp x18, x19, [sp, #-0x10]!
|
||||
stp x20, x21, [sp, #-0x10]!
|
||||
stp x22, x23, [sp, #-0x10]!
|
||||
stp x24, x25, [sp, #-0x10]!
|
||||
stp x26, x27, [sp, #-0x10]!
|
||||
stp x28, x29, [sp, #-0x10]!
|
||||
|
||||
mrs x0, fpcr
|
||||
mrs x1, fpsr
|
||||
stp x0, x1, [sp, #-0x10]!
|
||||
stp x29, x30, [sp, #-0x10]!
|
||||
|
||||
bl lwp_check_debug
|
||||
bl lwp_check_exit_request
|
||||
cbz w0, 1f
|
||||
mov x0, xzr
|
||||
b sys_exit
|
||||
1:
|
||||
ldr x0, =rt_dbg_ops
|
||||
ldr x0, [x0]
|
||||
cbz x0, 3f
|
||||
bl dbg_thread_in_debug
|
||||
mov x1, #(1 << 21)
|
||||
mrs x2, spsr_el1
|
||||
cbz w0, 2f
|
||||
orr x2, x2, x1
|
||||
msr spsr_el1, x2
|
||||
b 3f
|
||||
2:
|
||||
bic x2, x2, x1
|
||||
msr spsr_el1, x2
|
||||
3:
|
||||
bl lwp_signal_check
|
||||
cmp x0, xzr
|
||||
|
||||
ldp x29, x30, [sp], #0x10
|
||||
ldp x0, x1, [sp], #0x10
|
||||
msr fpcr, x0
|
||||
msr fpsr, x1
|
||||
|
||||
ldp x28, x29, [sp], #0x10
|
||||
ldp x26, x27, [sp], #0x10
|
||||
ldp x24, x25, [sp], #0x10
|
||||
ldp x22, x23, [sp], #0x10
|
||||
ldp x20, x21, [sp], #0x10
|
||||
ldp x18, x19, [sp], #0x10
|
||||
ldp x16, x17, [sp], #0x10
|
||||
ldp x14, x15, [sp], #0x10
|
||||
ldp x12, x13, [sp], #0x10
|
||||
ldp x10, x11, [sp], #0x10
|
||||
ldp x8, x9, [sp], #0x10
|
||||
ldp x6, x7, [sp], #0x10
|
||||
ldp x4, x5, [sp], #0x10
|
||||
ldp x2, x3, [sp], #0x10
|
||||
ldp x0, x1, [sp], #0x10
|
||||
RESTORE_FPU sp
|
||||
|
||||
bne user_do_signal
|
||||
|
||||
stp x0, x1, [sp, #-0x10]!
|
||||
ldr x0, =rt_dbg_ops
|
||||
ldr x0, [x0]
|
||||
cmp x0, xzr
|
||||
ldp x0, x1, [sp], #0x10
|
||||
beq 1f
|
||||
SAVE_FPU sp
|
||||
stp x0, x1, [sp, #-0x10]!
|
||||
stp x2, x3, [sp, #-0x10]!
|
||||
stp x4, x5, [sp, #-0x10]!
|
||||
stp x6, x7, [sp, #-0x10]!
|
||||
stp x8, x9, [sp, #-0x10]!
|
||||
stp x10, x11, [sp, #-0x10]!
|
||||
stp x12, x13, [sp, #-0x10]!
|
||||
stp x14, x15, [sp, #-0x10]!
|
||||
stp x16, x17, [sp, #-0x10]!
|
||||
stp x18, x19, [sp, #-0x10]!
|
||||
stp x20, x21, [sp, #-0x10]!
|
||||
stp x22, x23, [sp, #-0x10]!
|
||||
stp x24, x25, [sp, #-0x10]!
|
||||
stp x26, x27, [sp, #-0x10]!
|
||||
stp x28, x29, [sp, #-0x10]!
|
||||
mrs x0, fpcr
|
||||
mrs x1, fpsr
|
||||
stp x0, x1, [sp, #-0x10]!
|
||||
stp x29, x30, [sp, #-0x10]!
|
||||
mrs x0, elr_el1
|
||||
bl dbg_attach_req
|
||||
ldp x29, x30, [sp], #0x10
|
||||
ldp x0, x1, [sp], #0x10
|
||||
msr fpcr, x0
|
||||
msr fpsr, x1
|
||||
ldp x28, x29, [sp], #0x10
|
||||
ldp x26, x27, [sp], #0x10
|
||||
ldp x24, x25, [sp], #0x10
|
||||
ldp x22, x23, [sp], #0x10
|
||||
ldp x20, x21, [sp], #0x10
|
||||
ldp x18, x19, [sp], #0x10
|
||||
ldp x16, x17, [sp], #0x10
|
||||
ldp x14, x15, [sp], #0x10
|
||||
ldp x12, x13, [sp], #0x10
|
||||
ldp x10, x11, [sp], #0x10
|
||||
ldp x8, x9, [sp], #0x10
|
||||
ldp x6, x7, [sp], #0x10
|
||||
ldp x4, x5, [sp], #0x10
|
||||
ldp x2, x3, [sp], #0x10
|
||||
ldp x0, x1, [sp], #0x10
|
||||
RESTORE_FPU sp
|
||||
1:
|
||||
eret
|
||||
|
||||
/*
|
||||
struct rt_hw_exp_stack
|
||||
{
|
||||
unsigned long pc; 0
|
||||
unsigned long cpsr;
|
||||
unsigned long sp_el0; 0x10
|
||||
unsigned long x30;
|
||||
unsigned long fpcr; 0x20
|
||||
unsigned long fpsr;
|
||||
unsigned long x28; 0x30
|
||||
unsigned long x29;
|
||||
unsigned long x26; 0x40
|
||||
unsigned long x27;
|
||||
unsigned long x24; 0x50
|
||||
unsigned long x25;
|
||||
unsigned long x22; 0x60
|
||||
unsigned long x23;
|
||||
unsigned long x20; 0x70
|
||||
unsigned long x21;
|
||||
unsigned long x18; 0x80
|
||||
unsigned long x19;
|
||||
unsigned long x16; 0x90
|
||||
unsigned long x17;
|
||||
unsigned long x14; 0xa0
|
||||
unsigned long x15;
|
||||
unsigned long x12; 0xb0
|
||||
unsigned long x13;
|
||||
unsigned long x10; 0xc0
|
||||
unsigned long x11;
|
||||
unsigned long x8; 0xd0
|
||||
unsigned long x9;
|
||||
unsigned long x6; 0xe0
|
||||
unsigned long x7;
|
||||
unsigned long x4; 0xf0
|
||||
unsigned long x5;
|
||||
unsigned long x2; 0x100
|
||||
unsigned long x3;
|
||||
unsigned long x0; 0x110
|
||||
unsigned long x1;
|
||||
|
||||
unsigned long long fpu[16]; 0x120
|
||||
0x220 = 0x120 + 0x10 * 0x10
|
||||
};
|
||||
*/
|
||||
.global lwp_check_debug
|
||||
lwp_check_debug:
|
||||
ldr x0, =rt_dbg_ops
|
||||
ldr x0, [x0]
|
||||
cbnz x0, 1f
|
||||
ret
|
||||
1:
|
||||
stp x29, x30, [sp, #-0x10]!
|
||||
bl dbg_check_suspend
|
||||
cbz w0, lwp_check_debug_quit
|
||||
|
||||
mrs x2, sp_el0
|
||||
sub x2, x2, #0x10
|
||||
mov x3, x2
|
||||
msr sp_el0, x2
|
||||
ldr x0, =lwp_debugreturn
|
||||
ldr w1, [x0]
|
||||
str w1, [x2]
|
||||
ldr w1, [x0, #4]
|
||||
str w1, [x2, #4]
|
||||
|
||||
dc cvau, x2
|
||||
add x2, x2, #4
|
||||
dc cvau, x2
|
||||
|
||||
dsb sy
|
||||
isb sy
|
||||
|
||||
ic ialluis
|
||||
isb sy
|
||||
|
||||
mrs x0, elr_el1
|
||||
mrs x1, spsr_el1
|
||||
stp x0, x1, [sp, #-0x10]!
|
||||
msr elr_el1, x3 /* lwp_debugreturn */
|
||||
mov x1, #(SPSR_Mode(0) | SPSR_A64)
|
||||
orr x1, x1, #(1 << 21)
|
||||
msr spsr_el1, x1
|
||||
eret
|
||||
ret_from_user:
|
||||
/* sp_el0 += 16 for drop ins lwp_debugreturn */
|
||||
mrs x0, sp_el0
|
||||
add x0, x0, #0x10
|
||||
msr sp_el0, x0
|
||||
/* now is el1, sp is pos(empty) - sizeof(context) */
|
||||
mov x0, sp
|
||||
add x0, x0, #0x220
|
||||
mov sp, x0
|
||||
ldp x0, x1, [sp], #0x10 /* x1 is origin spsr_el1 */
|
||||
msr elr_el1, x0 /* x0 is origin elr_el1 */
|
||||
msr spsr_el1, x1
|
||||
lwp_check_debug_quit:
|
||||
ldp x29, x30, [sp], #0x10
|
||||
ret
|
||||
|
||||
arch_signal_quit:
|
||||
msr daifset, #3
|
||||
/*
|
||||
drop stack data
|
||||
*/
|
||||
add sp, sp, #CONTEXT_SIZE
|
||||
bl lwp_signal_restore
|
||||
/* x0 is user_ctx : ori sp, pc, cpsr */
|
||||
ldr x1, [x0]
|
||||
ldr x2, [x0, #8]
|
||||
ldr x3, [x0, #16]
|
||||
msr spsr_el1, x3
|
||||
msr elr_el1, x2
|
||||
add x1, x1, #16
|
||||
msr sp_el0, x1
|
||||
|
||||
msr spsel, #0
|
||||
|
||||
ldp x29, x30, [sp], #0x10
|
||||
ldp x28, x29, [sp], #0x10
|
||||
msr fpcr, x28
|
||||
msr fpsr, x29
|
||||
ldp x28, x29, [sp], #0x10
|
||||
ldp x26, x27, [sp], #0x10
|
||||
ldp x24, x25, [sp], #0x10
|
||||
ldp x22, x23, [sp], #0x10
|
||||
ldp x20, x21, [sp], #0x10
|
||||
ldp x18, x19, [sp], #0x10
|
||||
ldp x16, x17, [sp], #0x10
|
||||
ldp x14, x15, [sp], #0x10
|
||||
ldp x12, x13, [sp], #0x10
|
||||
ldp x10, x11, [sp], #0x10
|
||||
ldp x8, x9, [sp], #0x10
|
||||
ldp x6, x7, [sp], #0x10
|
||||
ldp x4, x5, [sp], #0x10
|
||||
ldp x2, x3, [sp], #0x10
|
||||
ldp x0, x1, [sp], #0x10
|
||||
RESTORE_FPU sp
|
||||
|
||||
msr spsel, #1
|
||||
|
||||
b arch_ret_to_user
|
||||
|
||||
user_do_signal:
|
||||
msr spsel, #0
|
||||
SAVE_FPU sp
|
||||
stp x0, x1, [sp, #-0x10]!
|
||||
stp x2, x3, [sp, #-0x10]!
|
||||
stp x4, x5, [sp, #-0x10]!
|
||||
stp x6, x7, [sp, #-0x10]!
|
||||
stp x8, x9, [sp, #-0x10]!
|
||||
stp x10, x11, [sp, #-0x10]!
|
||||
stp x12, x13, [sp, #-0x10]!
|
||||
stp x14, x15, [sp, #-0x10]!
|
||||
stp x16, x17, [sp, #-0x10]!
|
||||
stp x18, x19, [sp, #-0x10]!
|
||||
stp x20, x21, [sp, #-0x10]!
|
||||
stp x22, x23, [sp, #-0x10]!
|
||||
stp x24, x25, [sp, #-0x10]!
|
||||
stp x26, x27, [sp, #-0x10]!
|
||||
stp x28, x29, [sp, #-0x10]!
|
||||
mrs x28, fpcr
|
||||
mrs x29, fpsr
|
||||
stp x28, x29, [sp, #-0x10]!
|
||||
stp x29, x30, [sp, #-0x10]!
|
||||
|
||||
sub sp, sp, #0x10
|
||||
adr x0, lwp_sigreturn
|
||||
ldr w1, [x0]
|
||||
str w1, [sp]
|
||||
ldr w1, [x0, #4]
|
||||
str w1, [sp, #4]
|
||||
|
||||
mov x20, sp /* lwp_sigreturn */
|
||||
mov x0, sp
|
||||
|
||||
dc cvau, x0
|
||||
dsb sy
|
||||
ic ialluis
|
||||
dsb sy
|
||||
|
||||
msr spsel, #1
|
||||
|
||||
mrs x1, elr_el1
|
||||
mrs x2, spsr_el1
|
||||
bl lwp_signal_backup
|
||||
/* x0 is signal */
|
||||
mov x19, x0
|
||||
bl lwp_sighandler_get
|
||||
adds x1, x0, xzr
|
||||
mov x0, x19
|
||||
bne 1f
|
||||
mov x1, x20
|
||||
1:
|
||||
msr elr_el1, x1
|
||||
mov x30, x20
|
||||
eret
|
||||
|
||||
lwp_debugreturn:
|
||||
mov x8, 0xf000
|
||||
svc #0
|
||||
|
||||
lwp_sigreturn:
|
||||
mov x8, #0xe000
|
||||
svc #0
|
||||
|
||||
lwp_thread_return:
|
||||
mov x0, xzr
|
||||
mov x8, #0x01
|
||||
svc #0
|
||||
|
||||
.globl arch_get_tidr
|
||||
arch_get_tidr:
|
||||
mrs x0, tpidr_el0
|
||||
ret
|
||||
|
||||
.global arch_set_thread_area
|
||||
arch_set_thread_area:
|
||||
.globl arch_set_tidr
|
||||
arch_set_tidr:
|
||||
msr tpidr_el0, x0
|
||||
ret
|
121
components/lwp/arch/arm/common/reloc.c
Normal file
121
components/lwp/arch/arm/common/reloc.c
Normal file
|
@ -0,0 +1,121 @@
|
|||
#include "mm_aspace.h"
|
||||
#include <rtthread.h>
|
||||
#include <stdint.h>
|
||||
#include <string.h>
|
||||
#include <lwp_elf.h>
|
||||
#ifdef ARCH_MM_MMU
|
||||
#include <mmu.h>
|
||||
#include <page.h>
|
||||
#endif
|
||||
|
||||
typedef struct
|
||||
{
|
||||
Elf32_Word st_name;
|
||||
Elf32_Addr st_value;
|
||||
Elf32_Word st_size;
|
||||
unsigned char st_info;
|
||||
unsigned char st_other;
|
||||
Elf32_Half st_shndx;
|
||||
} Elf32_sym;
|
||||
|
||||
#ifdef ARCH_MM_MMU
|
||||
void arch_elf_reloc(rt_aspace_t aspace, void *text_start, void *rel_dyn_start, size_t rel_dyn_size, void *got_start, size_t got_size, Elf32_sym *dynsym)
|
||||
{
|
||||
size_t rel_off;
|
||||
void* addr;
|
||||
|
||||
if (rel_dyn_size && !dynsym)
|
||||
{
|
||||
return;
|
||||
}
|
||||
for (rel_off = 0; rel_off < rel_dyn_size; rel_off += 8)
|
||||
{
|
||||
uint32_t v1, v2;
|
||||
|
||||
/*
|
||||
memcpy(&v1, rel_dyn_start + rel_off, 4);
|
||||
memcpy(&v2, rel_dyn_start + rel_off + 4, 4);
|
||||
*/
|
||||
|
||||
addr = rt_hw_mmu_v2p(aspace, (void*)((char*)rel_dyn_start + rel_off));
|
||||
addr = (void*)((char*)addr - PV_OFFSET);
|
||||
memcpy(&v1, addr, 4);
|
||||
addr = rt_hw_mmu_v2p(aspace, (void*)((char*)rel_dyn_start + rel_off + 4));
|
||||
addr = (void*)((char*)addr - PV_OFFSET);
|
||||
memcpy(&v2, addr, 4);
|
||||
|
||||
addr = rt_hw_mmu_v2p(aspace, (void*)((char*)text_start + v1));
|
||||
addr = (void*)((char*)addr - PV_OFFSET);
|
||||
if ((v2 & 0xff) == R_ARM_RELATIVE)
|
||||
{
|
||||
// *(uint32_t*)(text_start + v1) += (uint32_t)text_start;
|
||||
*(uint32_t*)addr += (uint32_t)text_start;
|
||||
}
|
||||
else if ((v2 & 0xff) == R_ARM_ABS32)
|
||||
{
|
||||
uint32_t t;
|
||||
t = (v2 >> 8);
|
||||
if (t) /* 0 is UDF */
|
||||
{
|
||||
// *(uint32_t*)(text_start + v1) = (uint32_t)(text_start + dynsym[t].st_value);
|
||||
*(uint32_t*)addr = (uint32_t)((char*)text_start + dynsym[t].st_value);
|
||||
}
|
||||
}
|
||||
}
|
||||
/* modify got */
|
||||
if (got_size)
|
||||
{
|
||||
uint32_t *got_item = (uint32_t*)got_start;
|
||||
|
||||
for (rel_off = 0; rel_off < got_size; rel_off += 4, got_item++)
|
||||
{
|
||||
//*got_item += (uint32_t)text_start;
|
||||
addr = rt_hw_mmu_v2p(aspace, got_item);
|
||||
addr = (void*)((char*)addr - PV_OFFSET);
|
||||
*(uint32_t *)addr += (uint32_t)text_start;
|
||||
}
|
||||
}
|
||||
}
|
||||
#else
|
||||
|
||||
void arch_elf_reloc(void *text_start, void *rel_dyn_start, size_t rel_dyn_size, void *got_start, size_t got_size, Elf32_sym *dynsym)
|
||||
{
|
||||
size_t rel_off;
|
||||
|
||||
if (rel_dyn_size && !dynsym)
|
||||
{
|
||||
return;
|
||||
}
|
||||
for (rel_off = 0; rel_off < rel_dyn_size; rel_off += 8)
|
||||
{
|
||||
uint32_t v1, v2;
|
||||
|
||||
memcpy(&v1, (void*)((char*)rel_dyn_start + rel_off), 4);
|
||||
memcpy(&v2, (void*)((char*)rel_dyn_start + rel_off + 4), 4);
|
||||
|
||||
if ((v2 & 0xff) == R_ARM_RELATIVE)
|
||||
{
|
||||
*(uint32_t*)((char*)text_start + v1) += (uint32_t)text_start;
|
||||
}
|
||||
else if ((v2 & 0xff) == R_ARM_ABS32)
|
||||
{
|
||||
uint32_t t;
|
||||
t = (v2 >> 8);
|
||||
if (t) /* 0 is UDF */
|
||||
{
|
||||
*(uint32_t*)((char*)text_start + v1) = (uint32_t)((char*)text_start + dynsym[t].st_value);
|
||||
}
|
||||
}
|
||||
}
|
||||
/* modify got */
|
||||
if (got_size)
|
||||
{
|
||||
uint32_t *got_item = (uint32_t*)got_start;
|
||||
|
||||
for (rel_off = 0; rel_off < got_size; rel_off += 4, got_item++)
|
||||
{
|
||||
*got_item += (uint32_t)text_start;
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
161
components/lwp/arch/arm/cortex-a/lwp_arch.c
Normal file
161
components/lwp/arch/arm/cortex-a/lwp_arch.c
Normal file
|
@ -0,0 +1,161 @@
|
|||
/*
|
||||
* Copyright (c) 2006-2018, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
* 2019-10-28 Jesven first version
|
||||
*/
|
||||
|
||||
#include <rthw.h>
|
||||
#include <rtthread.h>
|
||||
#include <stddef.h>
|
||||
|
||||
#ifdef ARCH_MM_MMU
|
||||
|
||||
#define DBG_TAG "lwp.arch"
|
||||
#define DBG_LVL DBG_INFO
|
||||
#include <rtdbg.h>
|
||||
|
||||
#include <lwp_arch.h>
|
||||
#include <lwp_user_mm.h>
|
||||
|
||||
#define KPTE_START (KERNEL_VADDR_START >> ARCH_SECTION_SHIFT)
|
||||
|
||||
int arch_user_space_init(struct rt_lwp *lwp)
|
||||
{
|
||||
size_t *mmu_table;
|
||||
|
||||
mmu_table = (size_t *)rt_pages_alloc(2);
|
||||
if (!mmu_table)
|
||||
{
|
||||
return -RT_ENOMEM;
|
||||
}
|
||||
|
||||
lwp->end_heap = USER_HEAP_VADDR;
|
||||
|
||||
rt_memcpy(mmu_table + KPTE_START, (size_t *)rt_kernel_space.page_table + KPTE_START, ARCH_PAGE_SIZE);
|
||||
rt_memset(mmu_table, 0, 3 * ARCH_PAGE_SIZE);
|
||||
rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, mmu_table, 4 * ARCH_PAGE_SIZE);
|
||||
|
||||
lwp->aspace = rt_aspace_create((void *)USER_VADDR_START, USER_VADDR_TOP - USER_VADDR_START, mmu_table);
|
||||
if (!lwp->aspace)
|
||||
{
|
||||
return -RT_ERROR;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct rt_varea kuser_varea;
|
||||
|
||||
void arch_kuser_init(rt_aspace_t aspace, void *vectors)
|
||||
{
|
||||
const size_t kuser_size = 0x1000;
|
||||
int err;
|
||||
extern char __kuser_helper_start[], __kuser_helper_end[];
|
||||
int kuser_sz = __kuser_helper_end - __kuser_helper_start;
|
||||
|
||||
err = rt_aspace_map_static(aspace, &kuser_varea, &vectors, kuser_size,
|
||||
MMU_MAP_U_RO, MMF_MAP_FIXED | MMF_PREFETCH,
|
||||
&rt_mm_dummy_mapper, 0);
|
||||
if (err != 0)
|
||||
while (1)
|
||||
; // early failed
|
||||
|
||||
rt_memcpy((void *)((char *)vectors + 0x1000 - kuser_sz), __kuser_helper_start, kuser_sz);
|
||||
/*
|
||||
* vectors + 0xfe0 = __kuser_get_tls
|
||||
* vectors + 0xfe8 = hardware TLS instruction at 0xffff0fe8
|
||||
*/
|
||||
rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, (void *)((char *)vectors + 0x1000 - kuser_sz), kuser_sz);
|
||||
rt_hw_cpu_icache_ops(RT_HW_CACHE_INVALIDATE, (void *)((char *)vectors + 0x1000 - kuser_sz), kuser_sz);
|
||||
}
|
||||
|
||||
void arch_user_space_free(struct rt_lwp *lwp)
|
||||
{
|
||||
if (lwp)
|
||||
{
|
||||
RT_ASSERT(lwp->aspace);
|
||||
void *pgtbl = lwp->aspace->page_table;
|
||||
rt_aspace_delete(lwp->aspace);
|
||||
|
||||
/* must be freed after aspace delete, pgtbl is required for unmap */
|
||||
rt_pages_free(pgtbl, 2);
|
||||
lwp->aspace = RT_NULL;
|
||||
}
|
||||
else
|
||||
{
|
||||
LOG_W("%s: NULL lwp as parameter", __func__);
|
||||
RT_ASSERT(0);
|
||||
}
|
||||
}
|
||||
|
||||
int arch_expand_user_stack(void *addr)
|
||||
{
|
||||
int ret = 0;
|
||||
size_t stack_addr = (size_t)addr;
|
||||
|
||||
stack_addr &= ~ARCH_PAGE_MASK;
|
||||
if ((stack_addr >= (size_t)USER_STACK_VSTART) && (stack_addr < (size_t)USER_STACK_VEND))
|
||||
{
|
||||
void *map = lwp_map_user(lwp_self(), (void *)stack_addr, ARCH_PAGE_SIZE, 0);
|
||||
|
||||
if (map || lwp_user_accessable(addr, 1))
|
||||
{
|
||||
ret = 1;
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
#ifdef LWP_ENABLE_ASID
|
||||
#define MAX_ASID_BITS 8
|
||||
#define MAX_ASID (1 << MAX_ASID_BITS)
|
||||
static uint64_t global_generation = 1;
|
||||
static char asid_valid_bitmap[MAX_ASID];
|
||||
unsigned int arch_get_asid(struct rt_lwp *lwp)
|
||||
{
|
||||
if (lwp == RT_NULL)
|
||||
{
|
||||
// kernel
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (lwp->generation == global_generation)
|
||||
{
|
||||
return lwp->asid;
|
||||
}
|
||||
|
||||
if (lwp->asid && !asid_valid_bitmap[lwp->asid])
|
||||
{
|
||||
asid_valid_bitmap[lwp->asid] = 1;
|
||||
return lwp->asid;
|
||||
}
|
||||
|
||||
for (unsigned i = 1; i < MAX_ASID; i++)
|
||||
{
|
||||
if (asid_valid_bitmap[i] == 0)
|
||||
{
|
||||
asid_valid_bitmap[i] = 1;
|
||||
lwp->generation = global_generation;
|
||||
lwp->asid = i;
|
||||
return lwp->asid;
|
||||
}
|
||||
}
|
||||
|
||||
global_generation++;
|
||||
memset(asid_valid_bitmap, 0, MAX_ASID * sizeof(char));
|
||||
|
||||
asid_valid_bitmap[1] = 1;
|
||||
lwp->generation = global_generation;
|
||||
lwp->asid = 1;
|
||||
|
||||
asm volatile ("mcr p15, 0, r0, c8, c7, 0\ndsb\nisb" ::: "memory");
|
||||
|
||||
return lwp->asid;
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
49
components/lwp/arch/arm/cortex-a/lwp_arch.h
Normal file
49
components/lwp/arch/arm/cortex-a/lwp_arch.h
Normal file
|
@ -0,0 +1,49 @@
|
|||
/*
|
||||
* Copyright (c) 2006-2020, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
*/
|
||||
|
||||
#ifndef LWP_ARCH_H__
|
||||
#define LWP_ARCH_H__
|
||||
|
||||
#include <lwp.h>
|
||||
#include <lwp_arch_comm.h>
|
||||
|
||||
#ifdef ARCH_MM_MMU
|
||||
|
||||
#define USER_VADDR_TOP 0xC0000000UL
|
||||
#define USER_HEAP_VEND 0xB0000000UL
|
||||
#define USER_HEAP_VADDR 0x80000000UL
|
||||
#define USER_STACK_VSTART 0x70000000UL
|
||||
#define USER_STACK_VEND USER_HEAP_VADDR
|
||||
#define LDSO_LOAD_VADDR 0x60000000UL
|
||||
#define USER_VADDR_START 0x00100000UL
|
||||
#define USER_LOAD_VADDR USER_VADDR_START
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
rt_inline unsigned long rt_hw_ffz(unsigned long x)
|
||||
{
|
||||
return __builtin_ffsl(~x) - 1;
|
||||
}
|
||||
|
||||
rt_inline void icache_invalid_all(void)
|
||||
{
|
||||
asm volatile ("mcr p15, 0, r0, c7, c5, 0\ndsb\nisb":::"memory");//iciallu
|
||||
}
|
||||
|
||||
unsigned int arch_get_asid(struct rt_lwp *lwp);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
#endif /*LWP_ARCH_H__*/
|
435
components/lwp/arch/arm/cortex-a/lwp_gcc.S
Normal file
435
components/lwp/arch/arm/cortex-a/lwp_gcc.S
Normal file
|
@ -0,0 +1,435 @@
|
|||
/*
|
||||
* Copyright (c) 2006-2020, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
* 2018-12-10 Jesven first version
|
||||
*/
|
||||
|
||||
#include "rtconfig.h"
|
||||
#include "asm-generic.h"
|
||||
|
||||
#define Mode_USR 0x10
|
||||
#define Mode_FIQ 0x11
|
||||
#define Mode_IRQ 0x12
|
||||
#define Mode_SVC 0x13
|
||||
#define Mode_MON 0x16
|
||||
#define Mode_ABT 0x17
|
||||
#define Mode_UDF 0x1B
|
||||
#define Mode_SYS 0x1F
|
||||
|
||||
#define A_Bit 0x100
|
||||
#define I_Bit 0x80 @; when I bit is set, IRQ is disabled
|
||||
#define F_Bit 0x40 @; when F bit is set, FIQ is disabled
|
||||
#define T_Bit 0x20
|
||||
|
||||
.cpu cortex-a9
|
||||
.syntax unified
|
||||
.text
|
||||
|
||||
/*
|
||||
* void arch_start_umode(args, text, ustack, kstack);
|
||||
*/
|
||||
.global arch_start_umode
|
||||
.type arch_start_umode, % function
|
||||
arch_start_umode:
|
||||
mrs r9, cpsr
|
||||
bic r9, #0x1f
|
||||
orr r9, #Mode_USR
|
||||
cpsid i
|
||||
msr spsr, r9
|
||||
mov sp, r3
|
||||
|
||||
mov r3, r2 ;/* user stack top */
|
||||
/* set data address. */
|
||||
movs pc, r1
|
||||
|
||||
/*
|
||||
* void arch_crt_start_umode(args, text, ustack, kstack);
|
||||
*/
|
||||
.global arch_crt_start_umode
|
||||
.type arch_crt_start_umode, % function
|
||||
arch_crt_start_umode:
|
||||
cps #Mode_SYS
|
||||
sub sp, r2, #16
|
||||
ldr r2, =lwp_thread_return
|
||||
ldr r4, [r2]
|
||||
str r4, [sp]
|
||||
ldr r4, [r2, #4]
|
||||
str r4, [sp, #4]
|
||||
ldr r4, [r2, #8]
|
||||
str r4, [sp, #8]
|
||||
|
||||
mov r4, sp
|
||||
mcr p15, 0, r4, c7, c11, 1 ;//dc cmvau
|
||||
add r4, #4
|
||||
mcr p15, 0, r4, c7, c11, 1 ;//dc cmvau
|
||||
add r4, #4
|
||||
mcr p15, 0, r4, c7, c11, 1 ;//dc cmvau
|
||||
dsb
|
||||
isb
|
||||
mcr p15, 0, r4, c7, c5, 0 ;//iciallu
|
||||
dsb
|
||||
isb
|
||||
|
||||
mov lr, sp
|
||||
cps #Mode_SVC
|
||||
|
||||
mrs r9, cpsr
|
||||
bic r9, #0x1f
|
||||
orr r9, #Mode_USR
|
||||
cpsid i
|
||||
msr spsr, r9
|
||||
mov sp, r3
|
||||
|
||||
/* set data address. */
|
||||
movs pc, r1
|
||||
|
||||
/*
|
||||
void arch_set_thread_context(void *exit_addr, void *new_thread_stack, void *user_stack, void **thread_sp);
|
||||
*/
|
||||
.global arch_set_thread_context
|
||||
arch_set_thread_context:
|
||||
sub r1, #(10 * 4 + 4 * 4) /* {r4 - r12, lr} , {r4, r5, spsr, u_pc} */
|
||||
stmfd r1!, {r0}
|
||||
mov r12, #0
|
||||
stmfd r1!, {r12}
|
||||
stmfd r1!, {r1 - r12}
|
||||
stmfd r1!, {r12} /* new thread return value */
|
||||
mrs r12, cpsr
|
||||
orr r12, #(1 << 7) /* disable irq */
|
||||
stmfd r1!, {r12} /* spsr */
|
||||
mov r12, #0
|
||||
stmfd r1!, {r12} /* now user lr is 0 */
|
||||
stmfd r1!, {r2} /* user sp */
|
||||
#ifdef RT_USING_FPU
|
||||
stmfd r1!, {r12} /* not use fpu */
|
||||
#endif
|
||||
str r1, [r3]
|
||||
mov pc, lr
|
||||
|
||||
.global arch_get_user_sp
|
||||
arch_get_user_sp:
|
||||
cps #Mode_SYS
|
||||
mov r0, sp
|
||||
cps #Mode_SVC
|
||||
mov pc, lr
|
||||
|
||||
.global sys_fork
|
||||
.global sys_vfork
|
||||
.global arch_fork_exit
|
||||
sys_fork:
|
||||
sys_vfork:
|
||||
push {r4 - r12, lr}
|
||||
bl _sys_fork
|
||||
arch_fork_exit:
|
||||
pop {r4 - r12, lr}
|
||||
b arch_syscall_exit
|
||||
|
||||
.global sys_clone
|
||||
.global arch_clone_exit
|
||||
sys_clone:
|
||||
push {r4 - r12, lr}
|
||||
bl _sys_clone
|
||||
arch_clone_exit:
|
||||
pop {r4 - r12, lr}
|
||||
b arch_syscall_exit
|
||||
/*
|
||||
void lwp_exec_user(void *args, void *kernel_stack, void *user_entry)
|
||||
*/
|
||||
.global lwp_exec_user
|
||||
lwp_exec_user:
|
||||
cpsid i
|
||||
mov sp, r1
|
||||
mov lr, r2
|
||||
mov r2, #Mode_USR
|
||||
msr spsr_cxsf, r2
|
||||
ldr r3, =0x80000000
|
||||
b arch_ret_to_user
|
||||
|
||||
/*
|
||||
* void SVC_Handler(void);
|
||||
*/
|
||||
.global vector_swi
|
||||
.type vector_swi, % function
|
||||
START_POINT(vector_swi)
|
||||
push {lr}
|
||||
mrs lr, spsr
|
||||
push {r4, r5, lr}
|
||||
|
||||
cpsie i
|
||||
|
||||
push {r0 - r3, r12}
|
||||
|
||||
bl rt_thread_self
|
||||
bl lwp_user_setting_save
|
||||
|
||||
and r0, r7, #0xf000
|
||||
cmp r0, #0xe000
|
||||
beq arch_signal_quit
|
||||
|
||||
cmp r0, #0xf000
|
||||
beq ret_from_user
|
||||
and r0, r7, #0xff
|
||||
bl lwp_get_sys_api
|
||||
cmp r0, #0 /* r0 = api */
|
||||
mov lr, r0
|
||||
|
||||
pop {r0 - r3, r12}
|
||||
beq arch_syscall_exit
|
||||
blx lr
|
||||
START_POINT_END(vector_swi)
|
||||
|
||||
.global arch_syscall_exit
|
||||
arch_syscall_exit:
|
||||
cpsid i
|
||||
pop {r4, r5, lr}
|
||||
msr spsr_cxsf, lr
|
||||
pop {lr}
|
||||
|
||||
.global arch_ret_to_user
|
||||
arch_ret_to_user:
|
||||
push {r0-r3, r12, lr}
|
||||
bl lwp_check_debug
|
||||
bl lwp_check_exit_request
|
||||
cmp r0, #0
|
||||
beq 1f
|
||||
mov r0, #0
|
||||
b sys_exit
|
||||
1:
|
||||
bl lwp_signal_check
|
||||
cmp r0, #0
|
||||
pop {r0-r3, r12, lr}
|
||||
bne user_do_signal
|
||||
|
||||
push {r0}
|
||||
ldr r0, =rt_dbg_ops
|
||||
ldr r0, [r0]
|
||||
cmp r0, #0
|
||||
pop {r0}
|
||||
beq 2f
|
||||
push {r0-r3, r12, lr}
|
||||
mov r0, lr
|
||||
bl dbg_attach_req
|
||||
pop {r0-r3, r12, lr}
|
||||
2:
|
||||
movs pc, lr
|
||||
|
||||
#ifdef RT_USING_SMART
|
||||
.global lwp_check_debug
|
||||
lwp_check_debug:
|
||||
ldr r0, =rt_dbg_ops
|
||||
ldr r0, [r0]
|
||||
cmp r0, #0
|
||||
bne 1f
|
||||
bx lr
|
||||
1:
|
||||
push {lr}
|
||||
bl dbg_check_suspend
|
||||
cmp r0, #0
|
||||
beq lwp_check_debug_quit
|
||||
|
||||
cps #Mode_SYS
|
||||
sub sp, #8
|
||||
ldr r0, =lwp_debugreturn
|
||||
ldr r1, [r0]
|
||||
str r1, [sp]
|
||||
ldr r1, [r0, #4]
|
||||
str r1, [sp, #4]
|
||||
|
||||
mov r1, sp
|
||||
mcr p15, 0, r1, c7, c11, 1 ;//dc cmvau
|
||||
add r1, #4
|
||||
mcr p15, 0, r1, c7, c11, 1 ;//dc cmvau
|
||||
dsb
|
||||
isb
|
||||
mcr p15, 0, r0, c7, c5, 0 ;//iciallu
|
||||
dsb
|
||||
isb
|
||||
|
||||
mov r0, sp /* lwp_debugreturn */
|
||||
cps #Mode_SVC
|
||||
|
||||
mrs r1, spsr
|
||||
push {r1}
|
||||
mov r1, #Mode_USR
|
||||
msr spsr_cxsf, r1
|
||||
movs pc, r0
|
||||
ret_from_user:
|
||||
cps #Mode_SYS
|
||||
add sp, #8
|
||||
cps #Mode_SVC
|
||||
/*
|
||||
pop {r0 - r3, r12}
|
||||
pop {r4 - r6, lr}
|
||||
*/
|
||||
add sp, #(4*9)
|
||||
pop {r4}
|
||||
msr spsr_cxsf, r4
|
||||
lwp_check_debug_quit:
|
||||
pop {pc}
|
||||
|
||||
arch_signal_quit:
|
||||
cpsid i
|
||||
pop {r0 - r3, r12}
|
||||
pop {r4, r5, lr}
|
||||
pop {lr}
|
||||
bl lwp_signal_restore
|
||||
/* r0 is user_ctx : ori sp, pc, cpsr*/
|
||||
ldr r1, [r0]
|
||||
ldr r2, [r0, #4]
|
||||
ldr r3, [r0, #8]
|
||||
msr spsr_cxsf, r3
|
||||
mov lr, r2
|
||||
cps #Mode_SYS
|
||||
mov sp, r1
|
||||
pop {r0-r12, lr}
|
||||
cps #Mode_SVC
|
||||
b arch_ret_to_user
|
||||
|
||||
user_do_signal:
|
||||
mov r0, r0
|
||||
cps #Mode_SYS
|
||||
push {r0-r12, lr}
|
||||
|
||||
sub sp, #8
|
||||
ldr r0, =lwp_sigreturn
|
||||
ldr r1, [r0]
|
||||
str r1, [sp]
|
||||
ldr r1, [r0, #4]
|
||||
str r1, [sp, #4]
|
||||
|
||||
mov r1, sp
|
||||
mcr p15, 0, r1, c7, c11, 1 ;//dc cmvau
|
||||
add r1, #4
|
||||
mcr p15, 0, r1, c7, c11, 1 ;//dc cmvau
|
||||
dsb
|
||||
isb
|
||||
mcr p15, 0, r0, c7, c5, 0 ;//iciallu
|
||||
dsb
|
||||
isb
|
||||
|
||||
mov r5, sp ;//if func is 0
|
||||
mov lr, sp
|
||||
|
||||
add r0, sp, #8 /* lwp_sigreturn */
|
||||
cps #Mode_SVC
|
||||
mov r1, lr
|
||||
mrs r2, spsr
|
||||
bl lwp_signal_backup
|
||||
/* r0 is signal */
|
||||
mov r4, r0
|
||||
bl lwp_sighandler_get
|
||||
mov lr, r0
|
||||
cmp lr, #0
|
||||
moveq lr, r5
|
||||
mov r0, r4
|
||||
movs pc, lr
|
||||
|
||||
lwp_debugreturn:
|
||||
mov r7, #0xf000
|
||||
svc #0
|
||||
|
||||
lwp_sigreturn:
|
||||
mov r7, #0xe000
|
||||
svc #0
|
||||
|
||||
lwp_thread_return:
|
||||
mov r0, #0
|
||||
mov r7, #0x01
|
||||
svc #0
|
||||
#endif
|
||||
|
||||
.global check_vfp
|
||||
check_vfp:
|
||||
#ifdef RT_USING_FPU
|
||||
vmrs r0, fpexc
|
||||
ubfx r0, r0, #30, #1
|
||||
#else
|
||||
mov r0, #0
|
||||
#endif
|
||||
mov pc, lr
|
||||
|
||||
.global get_vfp
|
||||
get_vfp:
|
||||
#ifdef RT_USING_FPU
|
||||
vstmia r0!, {d0-d15}
|
||||
vstmia r0!, {d16-d31}
|
||||
vmrs r1, fpscr
|
||||
str r1, [r0]
|
||||
#endif
|
||||
mov pc, lr
|
||||
|
||||
.globl arch_get_tidr
|
||||
arch_get_tidr:
|
||||
mrc p15, 0, r0, c13, c0, 3
|
||||
bx lr
|
||||
|
||||
.global arch_set_thread_area
|
||||
arch_set_thread_area:
|
||||
.globl arch_set_tidr
|
||||
arch_set_tidr:
|
||||
mcr p15, 0, r0, c13, c0, 3
|
||||
bx lr
|
||||
|
||||
/* kuser suppurt */
|
||||
.macro kuser_pad, sym, size
|
||||
.if (. - \sym) & 3
|
||||
.rept 4 - (. - \sym) & 3
|
||||
.byte 0
|
||||
.endr
|
||||
.endif
|
||||
.rept (\size - (. - \sym)) / 4
|
||||
.word 0xe7fddef1
|
||||
.endr
|
||||
.endm
|
||||
|
||||
.align 5
|
||||
.globl __kuser_helper_start
|
||||
__kuser_helper_start:
|
||||
__kuser_cmpxchg64: @ 0xffff0f60
|
||||
stmfd sp!, {r4, r5, r6, lr}
|
||||
ldmia r0, {r4, r5} @ load old val
|
||||
ldmia r1, {r6, lr} @ load new val
|
||||
1: ldmia r2, {r0, r1} @ load current val
|
||||
eors r3, r0, r4 @ compare with oldval (1)
|
||||
eorseq r3, r1, r5 @ compare with oldval (2)
|
||||
2: stmiaeq r2, {r6, lr} @ store newval if eq
|
||||
rsbs r0, r3, #0 @ set return val and C flag
|
||||
ldmfd sp!, {r4, r5, r6, pc}
|
||||
|
||||
kuser_pad __kuser_cmpxchg64, 64
|
||||
|
||||
__kuser_memory_barrier: @ 0xffff0fa0
|
||||
dmb
|
||||
mov pc, lr
|
||||
|
||||
kuser_pad __kuser_memory_barrier, 32
|
||||
|
||||
__kuser_cmpxchg: @ 0xffff0fc0
|
||||
1: ldr r3, [r2] @ load current val
|
||||
subs r3, r3, r0 @ compare with oldval
|
||||
2: streq r1, [r2] @ store newval if eq
|
||||
rsbs r0, r3, #0 @ set return val and C flag
|
||||
mov pc, lr
|
||||
|
||||
kuser_pad __kuser_cmpxchg, 32
|
||||
|
||||
__kuser_get_tls: @ 0xffff0fe0
|
||||
mrc p15, 0, r0, c13, c0, 3 @ 0xffff0fe8 hardware TLS code
|
||||
mov pc, lr
|
||||
ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init
|
||||
|
||||
kuser_pad __kuser_get_tls, 16
|
||||
|
||||
.rep 3
|
||||
.word 0 @ 0xffff0ff0 software TLS value, then
|
||||
.endr @ pad up to __kuser_helper_version
|
||||
|
||||
__kuser_helper_version: @ 0xffff0ffc
|
||||
.word ((__kuser_helper_end - __kuser_helper_start) >> 5)
|
||||
|
||||
.globl __kuser_helper_end
|
||||
__kuser_helper_end:
|
11
components/lwp/arch/risc-v/rv64/SConscript
Normal file
11
components/lwp/arch/risc-v/rv64/SConscript
Normal file
|
@ -0,0 +1,11 @@
|
|||
# RT-Thread building script for component
|
||||
|
||||
from building import *
|
||||
|
||||
cwd = GetCurrentDir()
|
||||
src = Glob('*.c') + Glob('*.S')
|
||||
CPPPATH = [cwd]
|
||||
|
||||
group = DefineGroup('lwp-riscv', src, depend = ['RT_USING_SMART'], CPPPATH = CPPPATH)
|
||||
|
||||
Return('group')
|
261
components/lwp/arch/risc-v/rv64/lwp_arch.c
Normal file
261
components/lwp/arch/risc-v/rv64/lwp_arch.c
Normal file
|
@ -0,0 +1,261 @@
|
|||
/*
|
||||
* Copyright (c) 2006-2021, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
* 2020-11-18 Jesven first version
|
||||
* 2021-02-03 lizhirui port to riscv64
|
||||
* 2021-02-06 lizhirui add thread filter
|
||||
* 2021-02-19 lizhirui port to new version of rt-smart
|
||||
* 2021-03-02 lizhirui add a auxillary function for interrupt
|
||||
* 2021-03-04 lizhirui delete thread filter
|
||||
* 2021-03-04 lizhirui modify for new version of rt-smart
|
||||
* 2021-11-22 JasonHu add lwp_set_thread_context
|
||||
* 2021-11-30 JasonHu add clone/fork support
|
||||
*/
|
||||
#include <rthw.h>
|
||||
#include <rtthread.h>
|
||||
|
||||
#include <stddef.h>
|
||||
|
||||
#ifdef ARCH_MM_MMU
|
||||
|
||||
#define DBG_TAG "lwp.arch"
|
||||
#define DBG_LVL DBG_INFO
|
||||
#include <rtdbg.h>
|
||||
|
||||
#include <lwp.h>
|
||||
#include <lwp_arch.h>
|
||||
#include <lwp_user_mm.h>
|
||||
#include <page.h>
|
||||
|
||||
#include <cpuport.h>
|
||||
#include <encoding.h>
|
||||
#include <stack.h>
|
||||
|
||||
extern rt_ubase_t MMUTable[];
|
||||
|
||||
void *lwp_copy_return_code_to_user_stack()
|
||||
{
|
||||
void lwp_thread_return();
|
||||
void lwp_thread_return_end();
|
||||
rt_thread_t tid = rt_thread_self();
|
||||
|
||||
if (tid->user_stack != RT_NULL)
|
||||
{
|
||||
rt_size_t size = (rt_size_t)lwp_thread_return_end - (rt_size_t)lwp_thread_return;
|
||||
rt_size_t userstack = (rt_size_t)tid->user_stack + tid->user_stack_size - size;
|
||||
rt_memcpy((void *)userstack, lwp_thread_return, size);
|
||||
return (void *)userstack;
|
||||
}
|
||||
|
||||
return RT_NULL;
|
||||
}
|
||||
|
||||
rt_ubase_t lwp_fix_sp(rt_ubase_t cursp)
|
||||
{
|
||||
void lwp_thread_return();
|
||||
void lwp_thread_return_end();
|
||||
|
||||
if (cursp == 0)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
return cursp - ((rt_size_t)lwp_thread_return_end - (rt_size_t)lwp_thread_return);
|
||||
}
|
||||
|
||||
rt_thread_t rt_thread_sp_to_thread(void *spmember_addr)
|
||||
{
|
||||
return (rt_thread_t)(((rt_ubase_t)spmember_addr) - (offsetof(struct rt_thread, sp)));
|
||||
}
|
||||
|
||||
void *get_thread_kernel_stack_top(rt_thread_t thread)
|
||||
{
|
||||
return (void *)(((rt_size_t)thread->stack_addr) + ((rt_size_t)thread->stack_size));
|
||||
}
|
||||
|
||||
void *arch_get_user_sp(void)
|
||||
{
|
||||
/* user sp saved in interrupt context */
|
||||
rt_thread_t self = rt_thread_self();
|
||||
rt_uint8_t *stack_top = (rt_uint8_t *)self->stack_addr + self->stack_size;
|
||||
struct rt_hw_stack_frame *frame = (struct rt_hw_stack_frame *)(stack_top - sizeof(struct rt_hw_stack_frame));
|
||||
|
||||
return (void *)frame->user_sp_exc_stack;
|
||||
}
|
||||
|
||||
int arch_user_space_init(struct rt_lwp *lwp)
|
||||
{
|
||||
rt_ubase_t *mmu_table;
|
||||
|
||||
mmu_table = (rt_ubase_t *)rt_pages_alloc_ext(0, PAGE_ANY_AVAILABLE);
|
||||
if (!mmu_table)
|
||||
{
|
||||
return -RT_ENOMEM;
|
||||
}
|
||||
|
||||
lwp->end_heap = USER_HEAP_VADDR;
|
||||
|
||||
rt_memcpy(mmu_table, rt_kernel_space.page_table, ARCH_PAGE_SIZE);
|
||||
rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, mmu_table, ARCH_PAGE_SIZE);
|
||||
|
||||
lwp->aspace = rt_aspace_create(
|
||||
(void *)USER_VADDR_START, USER_VADDR_TOP - USER_VADDR_START, mmu_table);
|
||||
if (!lwp->aspace)
|
||||
{
|
||||
return -RT_ERROR;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void *arch_kernel_mmu_table_get(void)
|
||||
{
|
||||
return (void *)((char *)MMUTable);
|
||||
}
|
||||
|
||||
void arch_user_space_free(struct rt_lwp *lwp)
|
||||
{
|
||||
if (lwp)
|
||||
{
|
||||
RT_ASSERT(lwp->aspace);
|
||||
|
||||
void *pgtbl = lwp->aspace->page_table;
|
||||
rt_aspace_delete(lwp->aspace);
|
||||
|
||||
/* must be freed after aspace delete, pgtbl is required for unmap */
|
||||
rt_pages_free(pgtbl, 0);
|
||||
lwp->aspace = RT_NULL;
|
||||
}
|
||||
else
|
||||
{
|
||||
LOG_W("%s: NULL lwp as parameter", __func__);
|
||||
RT_ASSERT(0);
|
||||
}
|
||||
}
|
||||
|
||||
long _sys_clone(void *arg[]);
|
||||
long sys_clone(void *arg[])
|
||||
{
|
||||
return _sys_clone(arg);
|
||||
}
|
||||
|
||||
long _sys_fork(void);
|
||||
long sys_fork(void)
|
||||
{
|
||||
return _sys_fork();
|
||||
}
|
||||
|
||||
long _sys_vfork(void);
|
||||
long sys_vfork(void)
|
||||
{
|
||||
return _sys_fork();
|
||||
}
|
||||
|
||||
/**
|
||||
* set exec context for fork/clone.
|
||||
*/
|
||||
int arch_set_thread_context(void (*exit)(void), void *new_thread_stack,
|
||||
void *user_stack, void **thread_sp)
|
||||
{
|
||||
RT_ASSERT(exit != RT_NULL);
|
||||
RT_ASSERT(user_stack != RT_NULL);
|
||||
RT_ASSERT(new_thread_stack != RT_NULL);
|
||||
RT_ASSERT(thread_sp != RT_NULL);
|
||||
struct rt_hw_stack_frame *syscall_frame;
|
||||
struct rt_hw_stack_frame *thread_frame;
|
||||
|
||||
rt_uint8_t *stk;
|
||||
rt_uint8_t *syscall_stk;
|
||||
|
||||
stk = (rt_uint8_t *)new_thread_stack;
|
||||
/* reserve syscall context, all the registers are copyed from parent */
|
||||
stk -= CTX_REG_NR * REGBYTES;
|
||||
syscall_stk = stk;
|
||||
|
||||
syscall_frame = (struct rt_hw_stack_frame *)stk;
|
||||
|
||||
/* modify user sp */
|
||||
syscall_frame->user_sp_exc_stack = (rt_ubase_t)user_stack;
|
||||
|
||||
/* skip ecall */
|
||||
syscall_frame->epc += 4;
|
||||
|
||||
/* child return value is 0 */
|
||||
syscall_frame->a0 = 0;
|
||||
syscall_frame->a1 = 0;
|
||||
|
||||
/* reset thread area */
|
||||
rt_thread_t thread = rt_container_of((unsigned long)thread_sp, struct rt_thread, sp);
|
||||
syscall_frame->tp = (rt_ubase_t)thread->thread_idr;
|
||||
|
||||
#ifdef ARCH_USING_NEW_CTX_SWITCH
|
||||
extern void *_rt_hw_stack_init(rt_ubase_t *sp, rt_ubase_t ra, rt_ubase_t sstatus);
|
||||
rt_ubase_t sstatus = read_csr(sstatus) | SSTATUS_SPP;
|
||||
sstatus &= ~SSTATUS_SIE;
|
||||
|
||||
/* compatible to RESTORE_CONTEXT */
|
||||
stk = (void *)_rt_hw_stack_init((rt_ubase_t *)stk, (rt_ubase_t)exit, sstatus);
|
||||
#else
|
||||
/* build temp thread context */
|
||||
stk -= sizeof(struct rt_hw_stack_frame);
|
||||
|
||||
thread_frame = (struct rt_hw_stack_frame *)stk;
|
||||
|
||||
int i;
|
||||
for (i = 0; i < sizeof(struct rt_hw_stack_frame) / sizeof(rt_ubase_t); i++)
|
||||
{
|
||||
((rt_ubase_t *)thread_frame)[i] = 0xdeadbeaf;
|
||||
}
|
||||
|
||||
/* set pc for thread */
|
||||
thread_frame->epc = (rt_ubase_t)exit;
|
||||
|
||||
/* set old exception mode as supervisor, because in kernel */
|
||||
thread_frame->sstatus = read_csr(sstatus) | SSTATUS_SPP;
|
||||
thread_frame->sstatus &= ~SSTATUS_SIE; /* must disable interrupt */
|
||||
|
||||
/* set stack as syscall stack */
|
||||
thread_frame->user_sp_exc_stack = (rt_ubase_t)syscall_stk;
|
||||
|
||||
#endif /* ARCH_USING_NEW_CTX_SWITCH */
|
||||
/* save new stack top */
|
||||
*thread_sp = (void *)stk;
|
||||
|
||||
/**
|
||||
* The stack for child thread:
|
||||
*
|
||||
* +------------------------+ --> kernel stack top
|
||||
* | syscall stack |
|
||||
* | |
|
||||
* | @sp | --> `user_stack`
|
||||
* | @epc | --> user ecall addr + 4 (skip ecall)
|
||||
* | @a0&a1 | --> 0 (for child return 0)
|
||||
* | |
|
||||
* +------------------------+ --> temp thread stack top
|
||||
* | temp thread stack | ^
|
||||
* | | |
|
||||
* | @sp | ---------/
|
||||
* | @epc | --> `exit` (arch_clone_exit/arch_fork_exit)
|
||||
* | |
|
||||
* +------------------------+ --> thread sp
|
||||
*/
|
||||
}
|
||||
|
||||
/**
|
||||
* void lwp_exec_user(void *args, void *kernel_stack, void *user_entry)
|
||||
*/
|
||||
void lwp_exec_user(void *args, void *kernel_stack, void *user_entry)
|
||||
{
|
||||
arch_start_umode(args, user_entry, (void *)USER_STACK_VEND, kernel_stack);
|
||||
}
|
||||
|
||||
void *arch_get_usp_from_uctx(struct rt_user_context *uctx)
|
||||
{
|
||||
return uctx->sp;
|
||||
}
|
||||
|
||||
#endif /* ARCH_MM_MMU */
|
68
components/lwp/arch/risc-v/rv64/lwp_arch.h
Normal file
68
components/lwp/arch/risc-v/rv64/lwp_arch.h
Normal file
|
@ -0,0 +1,68 @@
|
|||
/*
|
||||
* Copyright (c) 2006-2021, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
*/
|
||||
|
||||
#ifndef LWP_ARCH_H__
|
||||
#define LWP_ARCH_H__
|
||||
|
||||
#include <rthw.h>
|
||||
#include <lwp.h>
|
||||
#include <lwp_arch_comm.h>
|
||||
|
||||
#ifdef ARCH_MM_MMU
|
||||
|
||||
#ifdef ARCH_MM_MMU_32BIT_LIMIT
|
||||
#define USER_HEAP_VADDR 0xF0000000UL
|
||||
#define USER_HEAP_VEND 0xFE000000UL
|
||||
#define USER_STACK_VSTART 0xE0000000UL
|
||||
#define USER_STACK_VEND USER_HEAP_VADDR
|
||||
#define USER_VADDR_START 0xC0000000UL
|
||||
#define USER_VADDR_TOP 0xFF000000UL
|
||||
#define USER_LOAD_VADDR 0xD0000000UL
|
||||
#define LDSO_LOAD_VADDR USER_LOAD_VADDR
|
||||
#else
|
||||
#define USER_HEAP_VADDR 0x300000000UL
|
||||
#define USER_HEAP_VEND 0xffffffffffff0000UL
|
||||
#define USER_STACK_VSTART 0x270000000UL
|
||||
#define USER_STACK_VEND USER_HEAP_VADDR
|
||||
#define USER_VADDR_START 0x200000000UL
|
||||
#define USER_VADDR_TOP 0xfffffffffffff000UL
|
||||
#define USER_LOAD_VADDR 0x200000000
|
||||
#define LDSO_LOAD_VADDR 0x200000000
|
||||
#endif
|
||||
|
||||
/* this attribution is cpu specified, and it should be defined in riscv_mmu.h */
|
||||
#ifndef MMU_MAP_U_RWCB
|
||||
#define MMU_MAP_U_RWCB 0
|
||||
#endif
|
||||
|
||||
#ifndef MMU_MAP_U_RW
|
||||
#define MMU_MAP_U_RW 0
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
rt_inline unsigned long rt_hw_ffz(unsigned long x)
|
||||
{
|
||||
return __builtin_ffsl(~x) - 1;
|
||||
}
|
||||
|
||||
rt_inline void icache_invalid_all(void)
|
||||
{
|
||||
rt_hw_cpu_icache_invalidate_all();
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
#endif /*LWP_ARCH_H__*/
|
348
components/lwp/arch/risc-v/rv64/lwp_gcc.S
Normal file
348
components/lwp/arch/risc-v/rv64/lwp_gcc.S
Normal file
|
@ -0,0 +1,348 @@
|
|||
/*
|
||||
* Copyright (c) 2006-2020, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
* 2018-12-10 Jesven first version
|
||||
* 2021-02-03 lizhirui port to riscv64
|
||||
* 2021-02-19 lizhirui port to new version of rt-smart
|
||||
* 2022-11-08 Wangxiaoyao Cleanup codes;
|
||||
* Support new context switch
|
||||
*/
|
||||
|
||||
#include "rtconfig.h"
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
#define __ASSEMBLY__
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#include "cpuport.h"
|
||||
#include "encoding.h"
|
||||
#include "stackframe.h"
|
||||
#include "asm-generic.h"
|
||||
|
||||
.section .text.lwp
|
||||
|
||||
/*
|
||||
* void arch_start_umode(args, text, ustack, kstack);
|
||||
*/
|
||||
.global arch_start_umode
|
||||
.type arch_start_umode, % function
|
||||
arch_start_umode:
|
||||
// load kstack for user process
|
||||
csrw sscratch, a3
|
||||
li t0, SSTATUS_SPP | SSTATUS_SIE // set as user mode, close interrupt
|
||||
csrc sstatus, t0
|
||||
li t0, SSTATUS_SPIE // enable interrupt when return to user mode
|
||||
csrs sstatus, t0
|
||||
|
||||
csrw sepc, a1
|
||||
mv a3, a2
|
||||
sret//enter user mode
|
||||
|
||||
/*
|
||||
* void arch_crt_start_umode(args, text, ustack, kstack);
|
||||
*/
|
||||
.global arch_crt_start_umode
|
||||
.type arch_crt_start_umode, % function
|
||||
arch_crt_start_umode:
|
||||
li t0, SSTATUS_SPP | SSTATUS_SIE // set as user mode, close interrupt
|
||||
csrc sstatus, t0
|
||||
li t0, SSTATUS_SPIE // enable interrupt when return to user mode
|
||||
csrs sstatus, t0
|
||||
|
||||
csrw sepc, a1
|
||||
mv s0, a0
|
||||
mv s1, a1
|
||||
mv s2, a2
|
||||
mv s3, a3
|
||||
mv a0, s2
|
||||
call lwp_copy_return_code_to_user_stack
|
||||
mv a0, s2
|
||||
call lwp_fix_sp
|
||||
mv sp, a0//user_sp
|
||||
mv ra, a0//return address
|
||||
mv a0, s0//args
|
||||
|
||||
csrw sscratch, s3
|
||||
sret//enter user mode
|
||||
|
||||
/**
|
||||
* Unify exit point from kernel mode to enter user space
|
||||
* we handle following things here:
|
||||
* 1. restoring user mode debug state (not support yet)
|
||||
* 2. handling thread's exit request
|
||||
* 3. handling POSIX signal
|
||||
* 4. restoring user context
|
||||
* 5. jump to user mode
|
||||
*/
|
||||
.global arch_ret_to_user
|
||||
arch_ret_to_user:
|
||||
// TODO: we don't support kernel gdb server in risc-v yet
|
||||
// so we don't check debug state here and handle debugging bussiness
|
||||
|
||||
call lwp_check_exit_request
|
||||
beqz a0, 1f
|
||||
mv a0, x0
|
||||
call sys_exit
|
||||
|
||||
1:
|
||||
call lwp_signal_check
|
||||
beqz a0, ret_to_user_exit
|
||||
J user_do_signal
|
||||
|
||||
ret_to_user_exit:
|
||||
RESTORE_ALL
|
||||
// `RESTORE_ALL` also reset sp to user sp, and setup sscratch
|
||||
sret
|
||||
|
||||
/**
|
||||
* Restore user context from exception frame stroraged in ustack
|
||||
* And handle pending signals;
|
||||
*/
|
||||
arch_signal_quit:
|
||||
call lwp_signal_restore
|
||||
call arch_get_usp_from_uctx
|
||||
// return value is user sp
|
||||
mv sp, a0
|
||||
|
||||
// restore user sp before enter trap
|
||||
addi a0, sp, CTX_REG_NR * REGBYTES
|
||||
csrw sscratch, a0
|
||||
|
||||
RESTORE_ALL
|
||||
SAVE_ALL
|
||||
j arch_ret_to_user
|
||||
|
||||
/**
|
||||
* Prepare and enter user signal handler
|
||||
* Move user exception frame and setup signal return
|
||||
* routine in user stack
|
||||
*/
|
||||
user_do_signal:
|
||||
/* prefetch ustack to avoid corrupted status in RESTORE/STORE pair below */
|
||||
LOAD t0, FRAME_OFF_SP(sp)
|
||||
addi t1, t0, -CTX_REG_NR * REGBYTES
|
||||
LOAD t2, (t0)
|
||||
li t3, -0x1000
|
||||
1:
|
||||
add t0, t0, t3
|
||||
LOAD t2, (t0)
|
||||
bgt t0, t1, 1b
|
||||
|
||||
/** restore and backup kernel sp carefully to avoid leaking */
|
||||
addi t0, sp, CTX_REG_NR * REGBYTES
|
||||
csrw sscratch, t0
|
||||
|
||||
RESTORE_ALL
|
||||
SAVE_ALL
|
||||
|
||||
/**
|
||||
* save lwp_sigreturn in user memory
|
||||
*/
|
||||
mv s0, sp
|
||||
la t0, lwp_sigreturn
|
||||
la t1, lwp_sigreturn_end
|
||||
// t1 <- size
|
||||
sub t1, t1, t0
|
||||
// s0 <- dst
|
||||
sub s0, s0, t1
|
||||
mv s2, t1
|
||||
lwp_sigreturn_copy_loop:
|
||||
addi t2, t1, -1
|
||||
add t3, t0, t2
|
||||
add t4, s0, t2
|
||||
lb t5, 0(t3)
|
||||
sb t5, 0(t4)
|
||||
mv t1, t2
|
||||
bnez t1, lwp_sigreturn_copy_loop
|
||||
|
||||
/**
|
||||
* 1. clear sscratch & restore kernel sp to
|
||||
* enter kernel mode routine
|
||||
* 2. storage exp frame address to restore context,
|
||||
* by calling to lwp_signal_backup
|
||||
* 3. storage lwp_sigreturn entry address
|
||||
* 4. get signal id as param for signal handler
|
||||
*/
|
||||
mv s1, sp
|
||||
csrrw sp, sscratch, x0
|
||||
|
||||
/**
|
||||
* synchronize dcache & icache if target is
|
||||
* a Harvard Architecture machine, otherwise
|
||||
* do nothing
|
||||
*/
|
||||
mv a0, s0
|
||||
mv a1, s2
|
||||
call rt_hw_sync_cache_local
|
||||
|
||||
/**
|
||||
* backup user sp (point to saved exception frame, skip sigreturn routine)
|
||||
* And get signal id
|
||||
|
||||
* a0: user sp
|
||||
* a1: user_pc (not used, marked as 0 to avoid abuse)
|
||||
* a2: user_flag (not used, marked as 0 to avoid abuse)
|
||||
*/
|
||||
mv a0, s1
|
||||
mv a1, zero
|
||||
mv a2, zero
|
||||
call lwp_signal_backup
|
||||
|
||||
/**
|
||||
* backup signal id in s2,
|
||||
* and get sighandler by signal id
|
||||
*/
|
||||
mv s2, a0
|
||||
call lwp_sighandler_get
|
||||
|
||||
/**
|
||||
* set regiter RA to user signal handler
|
||||
* set sp to user sp & save kernel sp in sscratch
|
||||
*/
|
||||
mv ra, s0
|
||||
csrw sscratch, sp
|
||||
mv sp, s0
|
||||
|
||||
/**
|
||||
* a0 is signal_handler,
|
||||
* s1 = s0 == NULL ? lwp_sigreturn : s0;
|
||||
*/
|
||||
mv s1, s0
|
||||
beqz a0, skip_user_signal_handler
|
||||
mv s1, a0
|
||||
|
||||
skip_user_signal_handler:
|
||||
// enter user mode and enable interrupt when return to user mode
|
||||
li t0, SSTATUS_SPP
|
||||
csrc sstatus, t0
|
||||
li t0, SSTATUS_SPIE
|
||||
csrs sstatus, t0
|
||||
|
||||
// sepc <- signal_handler
|
||||
csrw sepc, s1
|
||||
// a0 <- signal id
|
||||
mv a0, s2
|
||||
sret
|
||||
|
||||
.align 3
|
||||
lwp_debugreturn:
|
||||
li a7, 0xff
|
||||
ecall
|
||||
|
||||
.align 3
|
||||
lwp_sigreturn:
|
||||
li a7, 0xfe
|
||||
ecall
|
||||
|
||||
.align 3
|
||||
lwp_sigreturn_end:
|
||||
|
||||
.align 3
|
||||
.global lwp_thread_return
|
||||
lwp_thread_return:
|
||||
li a0, 0
|
||||
li a7, 1
|
||||
ecall
|
||||
|
||||
.align 3
|
||||
.global lwp_thread_return_end
|
||||
lwp_thread_return_end:
|
||||
|
||||
.globl arch_get_tidr
|
||||
arch_get_tidr:
|
||||
mv a0, tp
|
||||
ret
|
||||
|
||||
.global arch_set_thread_area
|
||||
arch_set_thread_area:
|
||||
.globl arch_set_tidr
|
||||
arch_set_tidr:
|
||||
mv tp, a0
|
||||
ret
|
||||
|
||||
.global arch_clone_exit
|
||||
.global arch_fork_exit
|
||||
arch_fork_exit:
|
||||
arch_clone_exit:
|
||||
j arch_syscall_exit
|
||||
|
||||
START_POINT(syscall_entry)
|
||||
#ifndef ARCH_USING_NEW_CTX_SWITCH
|
||||
//swap to thread kernel stack
|
||||
csrr t0, sstatus
|
||||
andi t0, t0, 0x100
|
||||
beqz t0, __restore_sp_from_tcb
|
||||
|
||||
__restore_sp_from_sscratch: // from kernel
|
||||
csrr t0, sscratch
|
||||
j __move_stack_context
|
||||
|
||||
__restore_sp_from_tcb: // from user
|
||||
la a0, rt_current_thread
|
||||
LOAD a0, 0(a0)
|
||||
jal get_thread_kernel_stack_top
|
||||
mv t0, a0
|
||||
|
||||
__move_stack_context:
|
||||
mv t1, sp//src
|
||||
mv sp, t0//switch stack
|
||||
addi sp, sp, -CTX_REG_NR * REGBYTES
|
||||
//copy context
|
||||
li s0, CTX_REG_NR//cnt
|
||||
mv t2, sp//dst
|
||||
|
||||
copy_context_loop:
|
||||
LOAD t0, 0(t1)
|
||||
STORE t0, 0(t2)
|
||||
addi s0, s0, -1
|
||||
addi t1, t1, 8
|
||||
addi t2, t2, 8
|
||||
bnez s0, copy_context_loop
|
||||
#endif /* ARCH_USING_NEW_CTX_SWITCH */
|
||||
|
||||
/* fetch SYSCALL ID */
|
||||
LOAD a7, 17 * REGBYTES(sp)
|
||||
addi a7, a7, -0xfe
|
||||
beqz a7, arch_signal_quit
|
||||
|
||||
#ifdef ARCH_MM_MMU
|
||||
/* save setting when syscall enter */
|
||||
call rt_thread_self
|
||||
call lwp_user_setting_save
|
||||
#endif
|
||||
|
||||
mv a0, sp
|
||||
OPEN_INTERRUPT
|
||||
call syscall_handler
|
||||
j arch_syscall_exit
|
||||
START_POINT_END(syscall_entry)
|
||||
|
||||
.global arch_syscall_exit
|
||||
arch_syscall_exit:
|
||||
CLOSE_INTERRUPT
|
||||
|
||||
#if defined(ARCH_MM_MMU)
|
||||
LOAD s0, 2 * REGBYTES(sp)
|
||||
andi s0, s0, 0x100
|
||||
bnez s0, dont_ret_to_user
|
||||
j arch_ret_to_user
|
||||
#endif
|
||||
dont_ret_to_user:
|
||||
|
||||
#ifdef ARCH_MM_MMU
|
||||
/* restore setting when syscall exit */
|
||||
call rt_thread_self
|
||||
call lwp_user_setting_restore
|
||||
|
||||
/* after restore the reg `tp`, need modify context */
|
||||
STORE tp, 4 * REGBYTES(sp)
|
||||
#endif
|
||||
|
||||
//restore context
|
||||
RESTORE_ALL
|
||||
csrw sscratch, zero
|
||||
sret
|
109
components/lwp/arch/risc-v/rv64/reloc.c
Normal file
109
components/lwp/arch/risc-v/rv64/reloc.c
Normal file
|
@ -0,0 +1,109 @@
|
|||
#include "mm_aspace.h"
|
||||
#include <rtthread.h>
|
||||
#include <stdint.h>
|
||||
#include <string.h>
|
||||
#include <elf.h>
|
||||
#ifdef ARCH_MM_MMU
|
||||
#include <mmu.h>
|
||||
#include <page.h>
|
||||
#endif
|
||||
|
||||
typedef struct
|
||||
{
|
||||
Elf64_Word st_name;
|
||||
Elf64_Addr st_value;
|
||||
Elf64_Word st_size;
|
||||
unsigned char st_info;
|
||||
unsigned char st_other;
|
||||
Elf64_Half st_shndx;
|
||||
} Elf64_sym;
|
||||
|
||||
#ifdef ARCH_MM_MMU
|
||||
void arch_elf_reloc(rt_aspace_t aspace, void *text_start, void *rel_dyn_start, size_t rel_dyn_size, void *got_start, size_t got_size, Elf64_sym *dynsym)
|
||||
{
|
||||
size_t rel_off;
|
||||
void* addr;
|
||||
|
||||
if (rel_dyn_size && !dynsym)
|
||||
{
|
||||
return;
|
||||
}
|
||||
for (rel_off = 0; rel_off < rel_dyn_size; rel_off += 8)
|
||||
{
|
||||
uint32_t v1, v2;
|
||||
|
||||
addr = rt_hw_mmu_v2p(aspace, (void *)(((rt_size_t)rel_dyn_start) + rel_off));
|
||||
memcpy(&v1, addr, 4);
|
||||
addr = rt_hw_mmu_v2p(aspace, (void *)(((rt_size_t)rel_dyn_start) + rel_off + 4));
|
||||
memcpy(&v2, addr, 4);
|
||||
|
||||
addr = rt_hw_mmu_v2p(aspace, (void *)((rt_size_t)text_start + v1));
|
||||
if ((v2 & 0xff) == R_ARM_RELATIVE)
|
||||
{
|
||||
*(rt_size_t*)addr += (rt_size_t)text_start;
|
||||
}
|
||||
else if ((v2 & 0xff) == R_ARM_ABS32)
|
||||
{
|
||||
uint32_t t;
|
||||
t = (v2 >> 8);
|
||||
if (t) /* 0 is UDF */
|
||||
{
|
||||
*(rt_size_t*)addr = (((rt_size_t)text_start) + dynsym[t].st_value);
|
||||
}
|
||||
}
|
||||
}
|
||||
/* modify got */
|
||||
if (got_size)
|
||||
{
|
||||
uint32_t *got_item = (uint32_t*)got_start;
|
||||
|
||||
for (rel_off = 0; rel_off < got_size; rel_off += 4, got_item++)
|
||||
{
|
||||
addr = rt_hw_mmu_v2p(aspace, got_item);
|
||||
*(rt_size_t *)addr += (rt_size_t)text_start;
|
||||
}
|
||||
}
|
||||
}
|
||||
#else
|
||||
|
||||
void arch_elf_reloc(void *text_start, void *rel_dyn_start, size_t rel_dyn_size, void *got_start, size_t got_size, Elf64_sym *dynsym)
|
||||
{
|
||||
size_t rel_off;
|
||||
|
||||
if (rel_dyn_size && !dynsym)
|
||||
{
|
||||
return;
|
||||
}
|
||||
for (rel_off = 0; rel_off < rel_dyn_size; rel_off += 8)
|
||||
{
|
||||
uint32_t v1, v2;
|
||||
|
||||
memcpy(&v1, ((rt_uint8_t *)rel_dyn_start) + rel_off, 4);
|
||||
memcpy(&v2, ((rt_uint8_t *)rel_dyn_start) + rel_off + 4, 4);
|
||||
|
||||
if ((v2 & 0xff) == R_ARM_RELATIVE)
|
||||
{
|
||||
*(uint32_t*)(((rt_size_t)text_start) + v1) += (uint32_t)text_start;
|
||||
}
|
||||
else if ((v2 & 0xff) == R_ARM_ABS32)
|
||||
{
|
||||
uint32_t t;
|
||||
t = (v2 >> 8);
|
||||
if (t) /* 0 is UDF */
|
||||
{
|
||||
*(uint32_t*)(((rt_size_t)text_start) + v1) = (uint32_t)(((rt_size_t)text_start) + dynsym[t].st_value);
|
||||
}
|
||||
}
|
||||
}
|
||||
/* modify got */
|
||||
if (got_size)
|
||||
{
|
||||
uint32_t *got_item = (uint32_t*)got_start;
|
||||
|
||||
for (rel_off = 0; rel_off < got_size; rel_off += 4, got_item++)
|
||||
{
|
||||
*got_item += (uint32_t)text_start;
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
11
components/lwp/arch/x86/i386/SConscript
Normal file
11
components/lwp/arch/x86/i386/SConscript
Normal file
|
@ -0,0 +1,11 @@
|
|||
# RT-Thread building script for component
|
||||
|
||||
from building import *
|
||||
|
||||
cwd = GetCurrentDir()
|
||||
src = Glob('*.c') + Glob('*.S')
|
||||
CPPPATH = [cwd]
|
||||
|
||||
group = DefineGroup('lwp-x86-i386', src, depend = ['RT_USING_SMART'], CPPPATH = CPPPATH)
|
||||
|
||||
Return('group')
|
371
components/lwp/arch/x86/i386/lwp_arch.c
Normal file
371
components/lwp/arch/x86/i386/lwp_arch.c
Normal file
|
@ -0,0 +1,371 @@
|
|||
/*
|
||||
* Copyright (c) 2006-2021, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
* 2021-7-14 JasonHu first version
|
||||
*/
|
||||
|
||||
#include <rthw.h>
|
||||
#include <stddef.h>
|
||||
#include <rtconfig.h>
|
||||
#include <rtdbg.h>
|
||||
|
||||
#ifdef ARCH_MM_MMU
|
||||
|
||||
#include <stackframe.h>
|
||||
#include <interrupt.h>
|
||||
#include <segment.h>
|
||||
|
||||
#include <mmu.h>
|
||||
#include <page.h>
|
||||
#include <lwp_mm_area.h>
|
||||
#include <lwp_user_mm.h>
|
||||
#include <lwp_arch.h>
|
||||
|
||||
#ifdef RT_USING_SIGNALS
|
||||
#include <lwp_signal.h>
|
||||
#endif /* RT_USING_SIGNALS */
|
||||
|
||||
extern size_t g_mmu_table[];
|
||||
|
||||
int arch_expand_user_stack(void *addr)
|
||||
{
|
||||
int ret = 0;
|
||||
size_t stack_addr = (size_t)addr;
|
||||
|
||||
stack_addr &= ~PAGE_OFFSET_MASK;
|
||||
if ((stack_addr >= (size_t)USER_STACK_VSTART) && (stack_addr < (size_t)USER_STACK_VEND))
|
||||
{
|
||||
void *map = lwp_map_user(lwp_self(), (void *)stack_addr, PAGE_SIZE, RT_FALSE);
|
||||
|
||||
if (map || lwp_user_accessable(addr, 1))
|
||||
{
|
||||
ret = 1; /* map success */
|
||||
}
|
||||
else /* map failed, send signal SIGSEGV */
|
||||
{
|
||||
#ifdef RT_USING_SIGNALS
|
||||
dbg_log(DBG_ERROR, "[fault] thread %s mapped addr %p failed!\n", rt_thread_self()->parent.name, addr);
|
||||
lwp_thread_kill(rt_thread_self(), SIGSEGV);
|
||||
ret = 1; /* return 1, will return back to intr, then check exit */
|
||||
#endif
|
||||
}
|
||||
}
|
||||
else /* not stack, send signal SIGSEGV */
|
||||
{
|
||||
#ifdef RT_USING_SIGNALS
|
||||
dbg_log(DBG_ERROR, "[fault] thread %s access unmapped addr %p!\n", rt_thread_self()->parent.name, addr);
|
||||
lwp_thread_kill(rt_thread_self(), SIGSEGV);
|
||||
ret = 1; /* return 1, will return back to intr, then check exit */
|
||||
#endif
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
void *get_thread_kernel_stack_top(rt_thread_t thread)
|
||||
{
|
||||
return RT_NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* don't support this in i386, it's ok!
|
||||
*/
|
||||
void *arch_get_user_sp()
|
||||
{
|
||||
return RT_NULL;
|
||||
}
|
||||
|
||||
int arch_user_space_init(struct rt_lwp *lwp)
|
||||
{
|
||||
rt_size_t *mmu_table;
|
||||
|
||||
mmu_table = (rt_size_t *)rt_pages_alloc_ext(0, PAGE_ANY_AVAILABLE);
|
||||
if (!mmu_table)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
rt_memset(mmu_table, 0, ARCH_PAGE_SIZE);
|
||||
|
||||
lwp->end_heap = USER_HEAP_VADDR;
|
||||
memcpy(mmu_table, g_mmu_table, ARCH_PAGE_SIZE / 4);
|
||||
memset((rt_uint8_t *)mmu_table + ARCH_PAGE_SIZE / 4, 0, ARCH_PAGE_SIZE / 4 * 3);
|
||||
rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, mmu_table, ARCH_PAGE_SIZE);
|
||||
if (rt_hw_mmu_map_init(&lwp->mmu_info, (void*)USER_VADDR_START, USER_VADDR_TOP - USER_VADDR_START, mmu_table, PV_OFFSET) < 0)
|
||||
{
|
||||
rt_pages_free(mmu_table, 0);
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void *arch_kernel_mmu_table_get(void)
|
||||
{
|
||||
return (void *)((char *)g_mmu_table);
|
||||
}
|
||||
|
||||
void arch_user_space_vtable_free(struct rt_lwp *lwp)
|
||||
{
|
||||
if (lwp && lwp->mmu_info.vtable)
|
||||
{
|
||||
rt_pages_free(lwp->mmu_info.vtable, 0);
|
||||
lwp->mmu_info.vtable = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
void arch_set_thread_area(void *p)
|
||||
{
|
||||
rt_hw_seg_tls_set((rt_ubase_t) p);
|
||||
rt_thread_t cur = rt_thread_self();
|
||||
cur->thread_idr = p; /* update thread idr after first set */
|
||||
}
|
||||
|
||||
void *arch_get_tidr(void)
|
||||
{
|
||||
rt_thread_t cur = rt_thread_self();
|
||||
if (!cur->lwp) /* no lwp, don't get thread idr from tls seg */
|
||||
return NULL;
|
||||
return (void *)rt_hw_seg_tls_get(); /* get thread idr from tls seg */
|
||||
}
|
||||
|
||||
void arch_set_tidr(void *p)
|
||||
{
|
||||
rt_thread_t cur = rt_thread_self();
|
||||
if (!cur->lwp) /* no lwp, don't set thread idr to tls seg */
|
||||
return;
|
||||
rt_hw_seg_tls_set((rt_ubase_t) p); /* set tls seg addr as thread idr */
|
||||
}
|
||||
|
||||
static void lwp_user_stack_init(rt_hw_stack_frame_t *frame)
|
||||
{
|
||||
frame->ds = frame->es = USER_DATA_SEL;
|
||||
frame->cs = USER_CODE_SEL;
|
||||
frame->ss = USER_STACK_SEL;
|
||||
frame->gs = USER_TLS_SEL;
|
||||
frame->fs = 0; /* unused */
|
||||
|
||||
frame->edi = frame->esi = \
|
||||
frame->ebp = frame->esp_dummy = 0;
|
||||
frame->eax = frame->ebx = \
|
||||
frame->ecx = frame->edx = 0;
|
||||
|
||||
frame->error_code = 0;
|
||||
frame->vec_no = 0;
|
||||
|
||||
frame->eflags = (EFLAGS_MBS | EFLAGS_IF_1 | EFLAGS_IOPL_3);
|
||||
}
|
||||
|
||||
extern void lwp_switch_to_user(void *frame);
|
||||
/**
|
||||
* user entry, set frame.
|
||||
* at the end of execute, we need enter user mode,
|
||||
* in x86, we can set stack, arg, text entry in a stack frame,
|
||||
* then pop then into register, final use iret to switch kernel mode to user mode.
|
||||
*/
|
||||
void arch_start_umode(void *args, const void *text, void *ustack, void *k_stack)
|
||||
{
|
||||
rt_uint8_t *stk = k_stack;
|
||||
stk -= sizeof(struct rt_hw_stack_frame);
|
||||
struct rt_hw_stack_frame *frame = (struct rt_hw_stack_frame *)stk;
|
||||
|
||||
lwp_user_stack_init(frame);
|
||||
frame->esp = (rt_uint32_t)ustack - 32;
|
||||
frame->ebx = (rt_uint32_t)args;
|
||||
frame->eip = (rt_uint32_t)text;
|
||||
lwp_switch_to_user(frame);
|
||||
/* should never return */
|
||||
}
|
||||
|
||||
void lwp_exec_user(void *args, void *kernel_stack, void *user_entry)
|
||||
{
|
||||
arch_start_umode(args, (const void *)user_entry, (void *)USER_STACK_VEND, kernel_stack);
|
||||
}
|
||||
|
||||
extern void lwp_thread_return();
|
||||
extern void lwp_thread_return_end();
|
||||
|
||||
static void *lwp_copy_return_code_to_user_stack(void *ustack)
|
||||
{
|
||||
size_t size = (size_t)lwp_thread_return_end - (size_t)lwp_thread_return;
|
||||
void *retcode = (void *)((size_t)ustack - size);
|
||||
memcpy(retcode, (void *)lwp_thread_return, size);
|
||||
return retcode;
|
||||
}
|
||||
|
||||
/**
|
||||
* when called sys_thread_create, need create a thread, after thread stared, will come here,
|
||||
* like arch_start_umode, will enter user mode, but we must set thread exit function. it looks like:
|
||||
* void func(void *arg)
|
||||
* {
|
||||
* ...
|
||||
* }
|
||||
* when thread func return, we must call exit code to exit thread, or not the program runs away.
|
||||
* so we need copy exit code to user and call exit code when func return.
|
||||
*/
|
||||
void arch_crt_start_umode(void *args, const void *text, void *ustack, void *k_stack)
|
||||
{
|
||||
RT_ASSERT(ustack != NULL);
|
||||
|
||||
rt_uint8_t *stk;
|
||||
stk = (rt_uint8_t *)((rt_uint8_t *)k_stack + sizeof(rt_ubase_t));
|
||||
stk = (rt_uint8_t *)RT_ALIGN_DOWN(((rt_ubase_t)stk), sizeof(rt_ubase_t));
|
||||
stk -= sizeof(struct rt_hw_stack_frame);
|
||||
struct rt_hw_stack_frame *frame = (struct rt_hw_stack_frame *)stk;
|
||||
|
||||
lwp_user_stack_init(frame);
|
||||
|
||||
/* make user thread stack */
|
||||
unsigned long *retcode = lwp_copy_return_code_to_user_stack(ustack); /* copy ret code */
|
||||
unsigned long *retstack = (unsigned long *)RT_ALIGN_DOWN(((rt_ubase_t)retcode), sizeof(rt_ubase_t));
|
||||
|
||||
/**
|
||||
* x86 call stack
|
||||
*
|
||||
* retcode here
|
||||
*
|
||||
* arg n
|
||||
* arg n - 1
|
||||
* ...
|
||||
* arg 2
|
||||
* arg 1
|
||||
* arg 0
|
||||
* eip (caller return addr, point to retcode)
|
||||
* esp
|
||||
*/
|
||||
*(--retstack) = (unsigned long) args; /* arg */
|
||||
*(--retstack) = (unsigned long) retcode; /* ret eip */
|
||||
|
||||
frame->esp = (rt_uint32_t)retstack;
|
||||
frame->eip = (rt_uint32_t)text;
|
||||
lwp_switch_to_user(frame);
|
||||
/* should never return */
|
||||
}
|
||||
|
||||
rt_thread_t rt_thread_sp_to_thread(void *spmember_addr)
|
||||
{
|
||||
return (rt_thread_t)(((rt_ubase_t)spmember_addr) - (offsetof(struct rt_thread, sp)));
|
||||
}
|
||||
|
||||
/**
|
||||
* set exec context for fork/clone.
|
||||
* user_stack(unused)
|
||||
*/
|
||||
void arch_set_thread_context(void *exit_addr, void *new_thread_stack, void *user_stack, void **thread_sp)
|
||||
{
|
||||
/**
|
||||
* thread kernel stack was set to tss.esp0, when intrrupt/syscall occur,
|
||||
* the stack frame will store in kernel stack top, so we can get the stack
|
||||
* frame by kernel stack top.
|
||||
*/
|
||||
rt_hw_stack_frame_t *frame = (rt_hw_stack_frame_t *)((rt_ubase_t)new_thread_stack - sizeof(rt_hw_stack_frame_t));
|
||||
|
||||
frame->eax = 0; /* child return 0 */
|
||||
|
||||
rt_hw_context_t *context = (rt_hw_context_t *) (((rt_uint32_t *)frame) - HW_CONTEXT_MEMBER_NR);
|
||||
context->eip = (void *)exit_addr; /* when thread started, jump to intr exit for enter user mode */
|
||||
context->ebp = context->ebx = context->esi = context->edi = 0;
|
||||
|
||||
/**
|
||||
* set sp as the address of first member of rt_hw_context,
|
||||
* when scheduler call switch, pop stack from context stack.
|
||||
*/
|
||||
*thread_sp = (void *)&context->ebp;
|
||||
|
||||
/**
|
||||
* after set context, the stack like this:
|
||||
*
|
||||
* -----------
|
||||
* stack frame| eax = 0
|
||||
* -----------
|
||||
* context(only HW_CONTEXT_MEMBER_NR)| eip = rt_hw_intr_exit
|
||||
* -----------
|
||||
* thread sp | to <- rt_hw_context_switch(from, to)
|
||||
* -----------
|
||||
*/
|
||||
}
|
||||
|
||||
#ifdef RT_USING_SIGNALS
|
||||
|
||||
#define SIGNAL_RET_CODE_SIZE 16
|
||||
|
||||
struct rt_signal_frame
|
||||
{
|
||||
char *ret_addr; /* return addr when handler return */
|
||||
int signo; /* signal for user handler arg */
|
||||
rt_hw_stack_frame_t frame; /* save kernel signal stack */
|
||||
char ret_code[SIGNAL_RET_CODE_SIZE]; /* save return code */
|
||||
};
|
||||
typedef struct rt_signal_frame rt_signal_frame_t;
|
||||
|
||||
extern void lwp_signal_return();
|
||||
extern void lwp_signal_return_end();
|
||||
|
||||
void lwp_try_do_signal(rt_hw_stack_frame_t *frame)
|
||||
{
|
||||
if (!lwp_signal_check())
|
||||
return;
|
||||
|
||||
/* 1. backup signal mask */
|
||||
int signal = lwp_signal_backup((void *) frame->esp, (void *) frame->eip, (void *) frame->eflags);
|
||||
|
||||
/* 2. get signal handler */
|
||||
lwp_sighandler_t handler = lwp_sighandler_get(signal);
|
||||
if (handler == RT_NULL) /* no handler, ignore */
|
||||
{
|
||||
lwp_signal_restore();
|
||||
return;
|
||||
}
|
||||
|
||||
rt_base_t level = rt_hw_interrupt_disable();
|
||||
/* 3. backup frame */
|
||||
rt_signal_frame_t *sig_frame = (rt_signal_frame_t *)((frame->esp - sizeof(rt_signal_frame_t)) & -8UL);
|
||||
memcpy(&sig_frame->frame, frame, sizeof(rt_hw_stack_frame_t));
|
||||
sig_frame->signo = signal;
|
||||
|
||||
/**
|
||||
* 4. copy user return code into user stack
|
||||
*
|
||||
* save current frame on user stack. the user stack like:
|
||||
*
|
||||
* ----------
|
||||
* user code stack
|
||||
* ----------+ -> esp before enter kernel
|
||||
* signal frame
|
||||
* ----------+ -> esp when handle signal handler
|
||||
* signal handler stack
|
||||
* ----------
|
||||
*/
|
||||
size_t ret_code_size = (size_t)lwp_signal_return_end - (size_t)lwp_signal_return;
|
||||
memcpy(sig_frame->ret_code, (void *)lwp_signal_return, ret_code_size);
|
||||
sig_frame->ret_addr = sig_frame->ret_code;
|
||||
|
||||
/* 5. jmp to user execute handler, update frame register info */
|
||||
lwp_user_stack_init(frame);
|
||||
frame->eip = (rt_uint32_t) handler;
|
||||
frame->esp = (rt_uint32_t) sig_frame;
|
||||
|
||||
rt_hw_interrupt_enable(level);
|
||||
}
|
||||
|
||||
void lwp_signal_do_return(rt_hw_stack_frame_t *frame)
|
||||
{
|
||||
/**
|
||||
* ASSUME: in x86, each stack push and pop element is 4 byte. so STACK_ELEM_SIZE = sizeof(int) => 4.
|
||||
* when signal handler return, the stack move to the buttom of signal frame.
|
||||
* but return will pop eip from esp, then {esp += STACK_ELEM_SIZE}, thus {esp = (signal frame) + STACK_ELEM_SIZE}.
|
||||
* so {(signal frame) = esp - STACK_ELEM_SIZE}
|
||||
*/
|
||||
rt_signal_frame_t *sig_frame = (rt_signal_frame_t *)(frame->esp - sizeof(rt_uint32_t));
|
||||
memcpy(frame, &sig_frame->frame, sizeof(rt_hw_stack_frame_t));
|
||||
|
||||
/**
|
||||
* restore signal info, but don't use rt_user_context,
|
||||
* we use sig_frame to restore stack frame
|
||||
*/
|
||||
lwp_signal_restore();
|
||||
}
|
||||
#endif /* RT_USING_SIGNALS */
|
||||
|
||||
#endif /* ARCH_MM_MMU */
|
49
components/lwp/arch/x86/i386/lwp_arch.h
Normal file
49
components/lwp/arch/x86/i386/lwp_arch.h
Normal file
|
@ -0,0 +1,49 @@
|
|||
/*
|
||||
* Copyright (c) 2006-2021, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
* 2021-07-18 JasonHu first version
|
||||
*/
|
||||
|
||||
#ifndef LWP_ARCH_H__
|
||||
#define LWP_ARCH_H__
|
||||
|
||||
#include <lwp.h>
|
||||
#include <lwp_arch_comm.h>
|
||||
#include <stackframe.h>
|
||||
|
||||
#ifdef ARCH_MM_MMU
|
||||
#define USER_VADDR_TOP 0xFFFFF000UL
|
||||
#define USER_HEAP_VEND 0xE0000000UL
|
||||
#define USER_HEAP_VADDR 0x90000000UL
|
||||
#define USER_STACK_VSTART 0x80000000UL
|
||||
#define USER_STACK_VEND USER_HEAP_VADDR
|
||||
#define LDSO_LOAD_VADDR 0x70000000UL
|
||||
#define USER_VADDR_START 0x40000000UL
|
||||
#define USER_LOAD_VADDR USER_VADDR_START
|
||||
|
||||
#define SIGNAL_RETURN_SYSCAL_ID 0xe000
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
rt_thread_t rt_thread_sp_to_thread(void *spmember_addr);
|
||||
|
||||
void lwp_signal_do_return(rt_hw_stack_frame_t *frame);
|
||||
|
||||
rt_inline unsigned long rt_hw_ffz(unsigned long x)
|
||||
{
|
||||
return __builtin_ffsl(~x) - 1;
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* ARCH_MM_MMU */
|
||||
|
||||
#endif /*LWP_ARCH_H__*/
|
73
components/lwp/arch/x86/i386/lwp_gcc.S
Normal file
73
components/lwp/arch/x86/i386/lwp_gcc.S
Normal file
|
@ -0,0 +1,73 @@
|
|||
/*
|
||||
* Copyright (c) 2006-2021, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
* 2021-7-14 JasonHu first version
|
||||
*/
|
||||
|
||||
#include "rtconfig.h"
|
||||
|
||||
.section .text.lwp
|
||||
|
||||
/*
|
||||
* void lwp_switch_to_user(frame);
|
||||
*/
|
||||
.global lwp_switch_to_user
|
||||
lwp_switch_to_user:
|
||||
movl 0x4(%esp), %esp
|
||||
addl $4,%esp // skip intr no
|
||||
popal
|
||||
popl %gs
|
||||
popl %fs
|
||||
popl %es
|
||||
popl %ds
|
||||
addl $4, %esp // skip error_code
|
||||
iret // enter to user mode
|
||||
|
||||
.extern arch_syscall_exit
|
||||
.global sys_fork
|
||||
.global sys_vfork
|
||||
.global arch_fork_exit
|
||||
sys_fork:
|
||||
sys_vfork:
|
||||
jmp _sys_fork
|
||||
arch_fork_exit:
|
||||
jmp arch_syscall_exit
|
||||
|
||||
.global sys_clone
|
||||
.global arch_clone_exit
|
||||
sys_clone:
|
||||
jmp _sys_clone
|
||||
arch_clone_exit:
|
||||
jmp arch_syscall_exit
|
||||
|
||||
/**
|
||||
* rt thread return code
|
||||
*/
|
||||
.align 4
|
||||
.global lwp_thread_return
|
||||
lwp_thread_return:
|
||||
movl $1, %eax // eax = 1, sys_exit
|
||||
movl $0, %ebx
|
||||
int $0x80
|
||||
.align 4
|
||||
.global lwp_thread_return_end
|
||||
lwp_thread_return_end:
|
||||
|
||||
#ifdef RT_USING_SIGNALS
|
||||
/**
|
||||
* signal return code
|
||||
*/
|
||||
.align 4
|
||||
.global lwp_signal_return
|
||||
lwp_signal_return:
|
||||
movl $0xe000, %eax // special syscall id for return code
|
||||
int $0x80
|
||||
.align 4
|
||||
.global lwp_signal_return_end
|
||||
lwp_signal_return_end:
|
||||
|
||||
#endif /* RT_USING_SIGNALS */
|
41
components/lwp/arch/x86/i386/reloc.c
Normal file
41
components/lwp/arch/x86/i386/reloc.c
Normal file
|
@ -0,0 +1,41 @@
|
|||
/*
|
||||
* Copyright (c) 2006-2021, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
* 2021-07-28 JasonHu first version
|
||||
*/
|
||||
|
||||
#include <rtthread.h>
|
||||
#include <stdint.h>
|
||||
#include <string.h>
|
||||
#include <elf.h>
|
||||
#ifdef ARCH_MM_MMU
|
||||
#include <mmu.h>
|
||||
#include <page.h>
|
||||
#endif
|
||||
|
||||
typedef struct
|
||||
{
|
||||
Elf32_Word st_name;
|
||||
Elf32_Addr st_value;
|
||||
Elf32_Word st_size;
|
||||
unsigned char st_info;
|
||||
unsigned char st_other;
|
||||
Elf32_Half st_shndx;
|
||||
} Elf32_sym;
|
||||
|
||||
#ifdef ARCH_MM_MMU
|
||||
void arch_elf_reloc(rt_mmu_info *m_info, void *text_start, void *rel_dyn_start, size_t rel_dyn_size, void *got_start, size_t got_size, Elf32_sym *dynsym)
|
||||
{
|
||||
|
||||
}
|
||||
#else
|
||||
|
||||
void arch_elf_reloc(void *text_start, void *rel_dyn_start, size_t rel_dyn_size, void *got_start, size_t got_size, Elf32_sym *dynsym)
|
||||
{
|
||||
|
||||
}
|
||||
#endif
|
1398
components/lwp/lwp.c
Normal file
1398
components/lwp/lwp.c
Normal file
File diff suppressed because it is too large
Load diff
308
components/lwp/lwp.h
Normal file
308
components/lwp/lwp.h
Normal file
|
@ -0,0 +1,308 @@
|
|||
/*
|
||||
* Copyright (c) 2006-2021, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
* 2018-06-29 heyuanjie first version
|
||||
* 2019-10-12 Jesven Add MMU and userspace support
|
||||
* 2020-10-08 Bernard Architecture and code cleanup
|
||||
* 2021-08-26 linzhenxing add lwp_setcwd\lwp_getcwd
|
||||
*/
|
||||
|
||||
/*
|
||||
* RT-Thread light-weight process
|
||||
*/
|
||||
#ifndef __LWP_H__
|
||||
#define __LWP_H__
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
#include <rthw.h>
|
||||
#include <rtthread.h>
|
||||
#include <dfs.h>
|
||||
|
||||
#include "lwp_pid.h"
|
||||
#include "lwp_ipc.h"
|
||||
#include "lwp_signal.h"
|
||||
#include "lwp_syscall.h"
|
||||
#include "lwp_avl.h"
|
||||
#include "mm_aspace.h"
|
||||
|
||||
#ifdef ARCH_MM_MMU
|
||||
#include "lwp_shm.h"
|
||||
|
||||
#include "mmu.h"
|
||||
#include "page.h"
|
||||
#else
|
||||
#include "lwp_mpu.h"
|
||||
#endif
|
||||
#include "lwp_arch.h"
|
||||
|
||||
#ifdef RT_USING_MUSL
|
||||
#include <locale.h>
|
||||
#endif
|
||||
#ifdef RT_USING_TTY
|
||||
struct tty_struct;
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#define LWP_MAGIC 0x5A
|
||||
|
||||
#define LWP_TYPE_FIX_ADDR 0x01
|
||||
#define LWP_TYPE_DYN_ADDR 0x02
|
||||
|
||||
#define LWP_ARG_MAX 8
|
||||
|
||||
struct rt_lwp_objs
|
||||
{
|
||||
rt_aspace_t source;
|
||||
struct rt_mem_obj mem_obj;
|
||||
};
|
||||
|
||||
struct rt_lwp
|
||||
{
|
||||
#ifdef ARCH_MM_MMU
|
||||
size_t end_heap;
|
||||
rt_aspace_t aspace;
|
||||
struct rt_lwp_objs *lwp_obj;
|
||||
#else
|
||||
#ifdef ARCH_MM_MPU
|
||||
struct rt_mpu_info mpu_info;
|
||||
#endif /* ARCH_MM_MPU */
|
||||
#endif
|
||||
|
||||
#ifdef RT_USING_SMP
|
||||
int bind_cpu;
|
||||
#endif
|
||||
|
||||
uint8_t lwp_type;
|
||||
uint8_t reserv[3];
|
||||
|
||||
struct rt_lwp *parent;
|
||||
struct rt_lwp *first_child;
|
||||
struct rt_lwp *sibling;
|
||||
|
||||
rt_list_t wait_list;
|
||||
int32_t finish;
|
||||
int lwp_ret;
|
||||
|
||||
void *text_entry;
|
||||
uint32_t text_size;
|
||||
void *data_entry;
|
||||
uint32_t data_size;
|
||||
|
||||
int ref;
|
||||
void *args;
|
||||
uint32_t args_length;
|
||||
pid_t pid;
|
||||
pid_t __pgrp; /*Accessed via process_group()*/
|
||||
pid_t tty_old_pgrp;
|
||||
pid_t session;
|
||||
rt_list_t t_grp;
|
||||
|
||||
int leader; /*boolean value for session group_leader*/
|
||||
struct dfs_fdtable fdt;
|
||||
char cmd[RT_NAME_MAX];
|
||||
|
||||
int sa_flags;
|
||||
lwp_sigset_t signal;
|
||||
lwp_sigset_t signal_mask;
|
||||
int signal_mask_bak;
|
||||
rt_uint32_t signal_in_process;
|
||||
lwp_sighandler_t signal_handler[_LWP_NSIG];
|
||||
|
||||
struct lwp_avl_struct *object_root;
|
||||
struct rt_mutex object_mutex;
|
||||
struct rt_user_context user_ctx;
|
||||
|
||||
struct rt_wqueue wait_queue; /*for console */
|
||||
struct tty_struct *tty; /* NULL if no tty */
|
||||
|
||||
struct lwp_avl_struct *address_search_head; /* for addressed object fast rearch */
|
||||
char working_directory[DFS_PATH_MAX];
|
||||
int debug;
|
||||
int background;
|
||||
uint32_t bak_first_ins;
|
||||
|
||||
#ifdef LWP_ENABLE_ASID
|
||||
uint64_t generation;
|
||||
unsigned int asid;
|
||||
#endif
|
||||
};
|
||||
|
||||
struct rt_lwp *lwp_self(void);
|
||||
|
||||
enum lwp_exit_request_type
|
||||
{
|
||||
LWP_EXIT_REQUEST_NONE = 0,
|
||||
LWP_EXIT_REQUEST_TRIGGERED,
|
||||
LWP_EXIT_REQUEST_IN_PROCESS,
|
||||
};
|
||||
struct termios *get_old_termios(void);
|
||||
void lwp_setcwd(char *buf);
|
||||
char *lwp_getcwd(void);
|
||||
void lwp_request_thread_exit(rt_thread_t thread_to_exit);
|
||||
int lwp_check_exit_request(void);
|
||||
void lwp_terminate(struct rt_lwp *lwp);
|
||||
void lwp_wait_subthread_exit(void);
|
||||
|
||||
int lwp_tid_get(void);
|
||||
void lwp_tid_put(int tid);
|
||||
rt_thread_t lwp_tid_get_thread(int tid);
|
||||
void lwp_tid_set_thread(int tid, rt_thread_t thread);
|
||||
|
||||
size_t lwp_user_strlen(const char *s, int *err);
|
||||
int lwp_execve(char *filename, int debug, int argc, char **argv, char **envp);
|
||||
|
||||
/*create by lwp_setsid.c*/
|
||||
int setsid(void);
|
||||
#ifdef ARCH_MM_MMU
|
||||
void lwp_aspace_switch(struct rt_thread *thread);
|
||||
#endif
|
||||
void lwp_user_setting_save(rt_thread_t thread);
|
||||
void lwp_user_setting_restore(rt_thread_t thread);
|
||||
int lwp_setaffinity(pid_t pid, int cpu);
|
||||
|
||||
#ifdef ARCH_MM_MMU
|
||||
struct __pthread {
|
||||
/* Part 1 -- these fields may be external or
|
||||
* * internal (accessed via asm) ABI. Do not change. */
|
||||
struct pthread *self;
|
||||
uintptr_t *dtv;
|
||||
struct pthread *prev, *next; /* non-ABI */
|
||||
uintptr_t sysinfo;
|
||||
uintptr_t canary, canary2;
|
||||
|
||||
/* Part 2 -- implementation details, non-ABI. */
|
||||
int tid;
|
||||
int errno_val;
|
||||
volatile int detach_state;
|
||||
volatile int cancel;
|
||||
volatile unsigned char canceldisable, cancelasync;
|
||||
unsigned char tsd_used:1;
|
||||
unsigned char dlerror_flag:1;
|
||||
unsigned char *map_base;
|
||||
size_t map_size;
|
||||
void *stack;
|
||||
size_t stack_size;
|
||||
size_t guard_size;
|
||||
void *result;
|
||||
struct __ptcb *cancelbuf;
|
||||
void **tsd;
|
||||
struct {
|
||||
volatile void *volatile head;
|
||||
long off;
|
||||
volatile void *volatile pending;
|
||||
} robust_list;
|
||||
volatile int timer_id;
|
||||
locale_t locale;
|
||||
volatile int killlock[1];
|
||||
char *dlerror_buf;
|
||||
void *stdio_locks;
|
||||
|
||||
/* Part 3 -- the positions of these fields relative to
|
||||
* * the end of the structure is external and internal ABI. */
|
||||
uintptr_t canary_at_end;
|
||||
uintptr_t *dtv_copy;
|
||||
};
|
||||
#endif
|
||||
|
||||
/* for futex op */
|
||||
#define FUTEX_WAIT 0
|
||||
#define FUTEX_WAKE 1
|
||||
|
||||
/* for pmutex op */
|
||||
#define PMUTEX_INIT 0
|
||||
#define PMUTEX_LOCK 1
|
||||
#define PMUTEX_UNLOCK 2
|
||||
#define PMUTEX_DESTROY 3
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#define AUX_ARRAY_ITEMS_NR 6
|
||||
|
||||
/* aux key */
|
||||
#define AT_NULL 0
|
||||
#define AT_IGNORE 1
|
||||
#define AT_EXECFD 2
|
||||
#define AT_PHDR 3
|
||||
#define AT_PHENT 4
|
||||
#define AT_PHNUM 5
|
||||
#define AT_PAGESZ 6
|
||||
#define AT_BASE 7
|
||||
#define AT_FLAGS 8
|
||||
#define AT_ENTRY 9
|
||||
#define AT_NOTELF 10
|
||||
#define AT_UID 11
|
||||
#define AT_EUID 12
|
||||
#define AT_GID 13
|
||||
#define AT_EGID 14
|
||||
#define AT_CLKTCK 17
|
||||
#define AT_PLATFORM 15
|
||||
#define AT_HWCAP 16
|
||||
#define AT_FPUCW 18
|
||||
#define AT_DCACHEBSIZE 19
|
||||
#define AT_ICACHEBSIZE 20
|
||||
#define AT_UCACHEBSIZE 21
|
||||
#define AT_IGNOREPPC 22
|
||||
#define AT_SECURE 23
|
||||
#define AT_BASE_PLATFORM 24
|
||||
#define AT_RANDOM 25
|
||||
#define AT_HWCAP2 26
|
||||
#define AT_EXECFN 31
|
||||
|
||||
struct process_aux_item
|
||||
{
|
||||
size_t key;
|
||||
size_t value;
|
||||
};
|
||||
|
||||
struct process_aux
|
||||
{
|
||||
struct process_aux_item item[AUX_ARRAY_ITEMS_NR];
|
||||
};
|
||||
|
||||
struct lwp_args_info
|
||||
{
|
||||
char **argv;
|
||||
char **envp;
|
||||
int argc;
|
||||
int envc;
|
||||
int size;
|
||||
};
|
||||
|
||||
struct dbg_ops_t
|
||||
{
|
||||
int (*dbg)(int argc, char **argv);
|
||||
uint32_t (*arch_get_ins)(void);
|
||||
void (*arch_activate_step)(void);
|
||||
void (*arch_deactivate_step)(void);
|
||||
int (*check_debug_event)(struct rt_hw_exp_stack *regs, unsigned long esr);
|
||||
rt_channel_t (*gdb_get_server_channel)(void);
|
||||
int (*gdb_get_step_type)(void);
|
||||
void (*lwp_check_debug_attach_req)(void *pc);
|
||||
int (*lwp_check_debug_suspend)(void);
|
||||
};
|
||||
extern struct dbg_ops_t *rt_dbg_ops;
|
||||
|
||||
int dbg_thread_in_debug(void);
|
||||
void dbg_register(struct dbg_ops_t *dbg_ops);
|
||||
|
||||
uint32_t dbg_get_ins(void);
|
||||
void dbg_activate_step(void);
|
||||
void dbg_deactivate_step(void);
|
||||
int dbg_check_event(struct rt_hw_exp_stack *regs, unsigned long arg);
|
||||
rt_channel_t gdb_server_channel(void);
|
||||
int dbg_step_type(void);
|
||||
void dbg_attach_req(void *pc);
|
||||
int dbg_check_suspend(void);
|
||||
void rt_hw_set_process_id(int pid);
|
||||
|
||||
#endif
|
57
components/lwp/lwp_arch_comm.h
Normal file
57
components/lwp/lwp_arch_comm.h
Normal file
|
@ -0,0 +1,57 @@
|
|||
/*
|
||||
* Copyright (c) 2006-2021, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
*/
|
||||
|
||||
#ifndef __LWP_ARCH_COMM__
|
||||
#define __LWP_ARCH_COMM__
|
||||
|
||||
#include <mm_aspace.h>
|
||||
#include <rtthread.h>
|
||||
#include <mmu.h>
|
||||
|
||||
/**
|
||||
* APIs that must port to all architectures
|
||||
*/
|
||||
|
||||
/* syscall handlers */
|
||||
void arch_clone_exit(void);
|
||||
void arch_fork_exit(void);
|
||||
void arch_syscall_exit();
|
||||
void arch_ret_to_user();
|
||||
|
||||
/* ELF relocation */
|
||||
#ifdef ARCH_MM_MMU
|
||||
|
||||
struct rt_lwp;
|
||||
void arch_elf_reloc(rt_aspace_t aspace, void *text_start, void *rel_dyn_start, size_t rel_dyn_size, void *got_start, size_t got_size, void *dynsym);
|
||||
#else
|
||||
void arch_elf_reloc(void *text_start, void *rel_dyn_start, size_t rel_dyn_size, void *got_start, size_t got_size, void *dynsym);
|
||||
#endif
|
||||
|
||||
/* User entry. enter user program code for the first time */
|
||||
void arch_crt_start_umode(void *args, const void *text, void *ustack, void *user_stack);
|
||||
void arch_start_umode(void *args, const void *text, void *ustack, void *k_stack);
|
||||
|
||||
/* lwp create and setup */
|
||||
int arch_set_thread_context(void (*exit)(void), void *new_thread_stack,
|
||||
void *user_stack, void **thread_sp);
|
||||
void *arch_get_user_sp(void);
|
||||
|
||||
/* user space setup and control */
|
||||
int arch_user_space_init(struct rt_lwp *lwp);
|
||||
void arch_user_space_free(struct rt_lwp *lwp);
|
||||
void *arch_kernel_mmu_table_get(void);
|
||||
void arch_kuser_init(rt_aspace_t aspace, void *vectors);
|
||||
int arch_expand_user_stack(void *addr);
|
||||
|
||||
/* thread id register */
|
||||
void arch_set_thread_area(void *p);
|
||||
void* arch_get_tidr(void);
|
||||
void arch_set_tidr(void *p);
|
||||
|
||||
#endif /* __LWP_ARCH_COMM__ */
|
227
components/lwp/lwp_avl.c
Normal file
227
components/lwp/lwp_avl.c
Normal file
|
@ -0,0 +1,227 @@
|
|||
/*
|
||||
* Copyright (c) 2006-2020, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
* 2019-10-12 Jesven first version
|
||||
*/
|
||||
#include <rtthread.h>
|
||||
#include <lwp_avl.h>
|
||||
|
||||
static void lwp_avl_rebalance(struct lwp_avl_struct ***nodeplaces_ptr, int count)
|
||||
{
|
||||
for (; count > 0; count--)
|
||||
{
|
||||
struct lwp_avl_struct **nodeplace = *--nodeplaces_ptr;
|
||||
struct lwp_avl_struct *node = *nodeplace;
|
||||
struct lwp_avl_struct *nodeleft = node->avl_left;
|
||||
struct lwp_avl_struct *noderight = node->avl_right;
|
||||
int heightleft = heightof(nodeleft);
|
||||
int heightright = heightof(noderight);
|
||||
if (heightright + 1 < heightleft)
|
||||
{
|
||||
struct lwp_avl_struct *nodeleftleft = nodeleft->avl_left;
|
||||
struct lwp_avl_struct *nodeleftright = nodeleft->avl_right;
|
||||
int heightleftright = heightof(nodeleftright);
|
||||
if (heightof(nodeleftleft) >= heightleftright)
|
||||
{
|
||||
node->avl_left = nodeleftright;
|
||||
nodeleft->avl_right = node;
|
||||
nodeleft->avl_height = 1 + (node->avl_height = 1 + heightleftright);
|
||||
*nodeplace = nodeleft;
|
||||
}
|
||||
else
|
||||
{
|
||||
nodeleft->avl_right = nodeleftright->avl_left;
|
||||
node->avl_left = nodeleftright->avl_right;
|
||||
nodeleftright->avl_left = nodeleft;
|
||||
nodeleftright->avl_right = node;
|
||||
nodeleft->avl_height = node->avl_height = heightleftright;
|
||||
nodeleftright->avl_height = heightleft;
|
||||
*nodeplace = nodeleftright;
|
||||
}
|
||||
}
|
||||
else if (heightleft + 1 < heightright)
|
||||
{
|
||||
struct lwp_avl_struct *noderightright = noderight->avl_right;
|
||||
struct lwp_avl_struct *noderightleft = noderight->avl_left;
|
||||
int heightrightleft = heightof(noderightleft);
|
||||
if (heightof(noderightright) >= heightrightleft)
|
||||
{
|
||||
node->avl_right = noderightleft;
|
||||
noderight->avl_left = node;
|
||||
noderight->avl_height = 1 + (node->avl_height = 1 + heightrightleft);
|
||||
*nodeplace = noderight;
|
||||
}
|
||||
else
|
||||
{
|
||||
noderight->avl_left = noderightleft->avl_right;
|
||||
node->avl_right = noderightleft->avl_left;
|
||||
noderightleft->avl_right = noderight;
|
||||
noderightleft->avl_left = node;
|
||||
noderight->avl_height = node->avl_height = heightrightleft;
|
||||
noderightleft->avl_height = heightright;
|
||||
*nodeplace = noderightleft;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
int height = (heightleft < heightright ? heightright : heightleft) + 1;
|
||||
if (height == node->avl_height)
|
||||
break;
|
||||
node->avl_height = height;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void lwp_avl_remove(struct lwp_avl_struct *node_to_delete, struct lwp_avl_struct **ptree)
|
||||
{
|
||||
avl_key_t key = node_to_delete->avl_key;
|
||||
struct lwp_avl_struct **nodeplace = ptree;
|
||||
struct lwp_avl_struct **stack[avl_maxheight];
|
||||
uint32_t stack_count = 0;
|
||||
struct lwp_avl_struct ***stack_ptr = &stack[0]; /* = &stack[stackcount] */
|
||||
struct lwp_avl_struct **nodeplace_to_delete;
|
||||
for (;;)
|
||||
{
|
||||
struct lwp_avl_struct *node = *nodeplace;
|
||||
if (node == AVL_EMPTY)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
*stack_ptr++ = nodeplace;
|
||||
stack_count++;
|
||||
if (key == node->avl_key)
|
||||
break;
|
||||
if (key < node->avl_key)
|
||||
nodeplace = &node->avl_left;
|
||||
else
|
||||
nodeplace = &node->avl_right;
|
||||
}
|
||||
nodeplace_to_delete = nodeplace;
|
||||
if (node_to_delete->avl_left == AVL_EMPTY)
|
||||
{
|
||||
*nodeplace_to_delete = node_to_delete->avl_right;
|
||||
stack_ptr--;
|
||||
stack_count--;
|
||||
}
|
||||
else
|
||||
{
|
||||
struct lwp_avl_struct ***stack_ptr_to_delete = stack_ptr;
|
||||
struct lwp_avl_struct **nodeplace = &node_to_delete->avl_left;
|
||||
struct lwp_avl_struct *node;
|
||||
for (;;)
|
||||
{
|
||||
node = *nodeplace;
|
||||
if (node->avl_right == AVL_EMPTY)
|
||||
break;
|
||||
*stack_ptr++ = nodeplace;
|
||||
stack_count++;
|
||||
nodeplace = &node->avl_right;
|
||||
}
|
||||
*nodeplace = node->avl_left;
|
||||
node->avl_left = node_to_delete->avl_left;
|
||||
node->avl_right = node_to_delete->avl_right;
|
||||
node->avl_height = node_to_delete->avl_height;
|
||||
*nodeplace_to_delete = node;
|
||||
*stack_ptr_to_delete = &node->avl_left;
|
||||
}
|
||||
lwp_avl_rebalance(stack_ptr, stack_count);
|
||||
}
|
||||
|
||||
void lwp_avl_insert(struct lwp_avl_struct *new_node, struct lwp_avl_struct **ptree)
|
||||
{
|
||||
avl_key_t key = new_node->avl_key;
|
||||
struct lwp_avl_struct **nodeplace = ptree;
|
||||
struct lwp_avl_struct **stack[avl_maxheight];
|
||||
int stack_count = 0;
|
||||
struct lwp_avl_struct ***stack_ptr = &stack[0]; /* = &stack[stackcount] */
|
||||
for (;;)
|
||||
{
|
||||
struct lwp_avl_struct *node = *nodeplace;
|
||||
if (node == AVL_EMPTY)
|
||||
break;
|
||||
*stack_ptr++ = nodeplace;
|
||||
stack_count++;
|
||||
if (key < node->avl_key)
|
||||
nodeplace = &node->avl_left;
|
||||
else
|
||||
nodeplace = &node->avl_right;
|
||||
}
|
||||
new_node->avl_left = AVL_EMPTY;
|
||||
new_node->avl_right = AVL_EMPTY;
|
||||
new_node->avl_height = 1;
|
||||
*nodeplace = new_node;
|
||||
lwp_avl_rebalance(stack_ptr, stack_count);
|
||||
}
|
||||
|
||||
struct lwp_avl_struct *lwp_avl_find(avl_key_t key, struct lwp_avl_struct *ptree)
|
||||
{
|
||||
for (;;)
|
||||
{
|
||||
if (ptree == AVL_EMPTY)
|
||||
{
|
||||
return (struct lwp_avl_struct *)0;
|
||||
}
|
||||
if (key == ptree->avl_key)
|
||||
break;
|
||||
if (key < ptree->avl_key)
|
||||
ptree = ptree->avl_left;
|
||||
else
|
||||
ptree = ptree->avl_right;
|
||||
}
|
||||
return ptree;
|
||||
}
|
||||
|
||||
int lwp_avl_traversal(struct lwp_avl_struct *ptree, int (*fun)(struct lwp_avl_struct *, void *), void *arg)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (!ptree)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
if (ptree->avl_left)
|
||||
{
|
||||
ret = lwp_avl_traversal(ptree->avl_left, fun, arg);
|
||||
if (ret != 0)
|
||||
{
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
ret = (*fun)(ptree, arg);
|
||||
if (ret != 0)
|
||||
{
|
||||
return ret;
|
||||
}
|
||||
if (ptree->avl_right)
|
||||
{
|
||||
ret = lwp_avl_traversal(ptree->avl_right, fun, arg);
|
||||
if (ret != 0)
|
||||
{
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
rt_weak struct lwp_avl_struct* lwp_map_find_first(struct lwp_avl_struct* ptree)
|
||||
{
|
||||
if (ptree == AVL_EMPTY)
|
||||
{
|
||||
return (struct lwp_avl_struct *)0;
|
||||
}
|
||||
while (1)
|
||||
{
|
||||
if (!ptree->avl_left)
|
||||
{
|
||||
break;
|
||||
}
|
||||
ptree = ptree->avl_left;
|
||||
}
|
||||
return ptree;
|
||||
}
|
||||
|
46
components/lwp/lwp_avl.h
Normal file
46
components/lwp/lwp_avl.h
Normal file
|
@ -0,0 +1,46 @@
|
|||
/*
|
||||
* Copyright (c) 2006-2020, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
* 2019-10-12 Jesven first version
|
||||
*/
|
||||
#ifndef LWP_AVL_H__
|
||||
#define LWP_AVL_H__
|
||||
|
||||
#include <rtthread.h>
|
||||
|
||||
#include <string.h>
|
||||
#include <stdint.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#define avl_key_t size_t
|
||||
#define AVL_EMPTY (struct lwp_avl_struct *)0
|
||||
#define avl_maxheight 32
|
||||
#define heightof(tree) ((tree) == AVL_EMPTY ? 0 : (tree)->avl_height)
|
||||
|
||||
struct lwp_avl_struct
|
||||
{
|
||||
struct lwp_avl_struct *avl_left;
|
||||
struct lwp_avl_struct *avl_right;
|
||||
int avl_height;
|
||||
avl_key_t avl_key;
|
||||
void *data;
|
||||
};
|
||||
|
||||
void lwp_avl_remove(struct lwp_avl_struct * node_to_delete, struct lwp_avl_struct ** ptree);
|
||||
void lwp_avl_insert (struct lwp_avl_struct * new_node, struct lwp_avl_struct ** ptree);
|
||||
struct lwp_avl_struct* lwp_avl_find(avl_key_t key, struct lwp_avl_struct* ptree);
|
||||
int lwp_avl_traversal(struct lwp_avl_struct* ptree, int (*fun)(struct lwp_avl_struct*, void *), void *arg);
|
||||
struct lwp_avl_struct* lwp_map_find_first(struct lwp_avl_struct* ptree);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* LWP_AVL_H__ */
|
119
components/lwp/lwp_dbg.c
Normal file
119
components/lwp/lwp_dbg.c
Normal file
|
@ -0,0 +1,119 @@
|
|||
#include <rtthread.h>
|
||||
#include <rthw.h>
|
||||
#include <lwp.h>
|
||||
|
||||
int dbg_thread_in_debug(void)
|
||||
{
|
||||
int ret = 0;
|
||||
struct rt_lwp *lwp = lwp_self();
|
||||
|
||||
if (lwp && lwp->debug)
|
||||
{
|
||||
ret = 1;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct dbg_ops_t *rt_dbg_ops = RT_NULL;
|
||||
RTM_EXPORT(rt_dbg_ops);
|
||||
|
||||
void dbg_register(struct dbg_ops_t *dbg_ops)
|
||||
{
|
||||
rt_dbg_ops = dbg_ops;
|
||||
}
|
||||
RTM_EXPORT(dbg_register);
|
||||
|
||||
static int dbg(int argc, char **argv)
|
||||
{
|
||||
int ret = -1;
|
||||
|
||||
if (rt_dbg_ops)
|
||||
{
|
||||
ret = rt_dbg_ops->dbg(argc, argv);
|
||||
}
|
||||
else
|
||||
{
|
||||
rt_kprintf("Error: DBG command is not enabled!\n");
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
MSH_CMD_EXPORT(dbg, dbg);
|
||||
|
||||
uint32_t dbg_get_ins(void)
|
||||
{
|
||||
uint32_t ins = 0;
|
||||
|
||||
if (rt_dbg_ops)
|
||||
{
|
||||
ins = rt_dbg_ops->arch_get_ins();
|
||||
}
|
||||
return ins;
|
||||
}
|
||||
|
||||
void dbg_activate_step(void)
|
||||
{
|
||||
if (rt_dbg_ops)
|
||||
{
|
||||
rt_dbg_ops->arch_activate_step();
|
||||
}
|
||||
}
|
||||
|
||||
void dbg_deactivate_step(void)
|
||||
{
|
||||
if (rt_dbg_ops)
|
||||
{
|
||||
rt_dbg_ops->arch_deactivate_step();
|
||||
}
|
||||
}
|
||||
|
||||
int dbg_check_event(struct rt_hw_exp_stack *regs, unsigned long esr)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (rt_dbg_ops)
|
||||
{
|
||||
ret = rt_dbg_ops->check_debug_event(regs, esr);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
rt_channel_t gdb_server_channel(void)
|
||||
{
|
||||
rt_channel_t ret = RT_NULL;
|
||||
|
||||
if (rt_dbg_ops)
|
||||
{
|
||||
ret = rt_dbg_ops->gdb_get_server_channel();
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
int dbg_step_type(void)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (rt_dbg_ops)
|
||||
{
|
||||
ret = rt_dbg_ops->gdb_get_step_type();
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
void dbg_attach_req(void *pc)
|
||||
{
|
||||
if (rt_dbg_ops)
|
||||
{
|
||||
rt_dbg_ops->lwp_check_debug_attach_req(pc);
|
||||
}
|
||||
}
|
||||
|
||||
int dbg_check_suspend(void)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (rt_dbg_ops)
|
||||
{
|
||||
ret = rt_dbg_ops->lwp_check_debug_suspend();
|
||||
}
|
||||
return ret;
|
||||
}
|
3520
components/lwp/lwp_elf.h
Normal file
3520
components/lwp/lwp_elf.h
Normal file
File diff suppressed because it is too large
Load diff
254
components/lwp/lwp_futex.c
Normal file
254
components/lwp/lwp_futex.c
Normal file
|
@ -0,0 +1,254 @@
|
|||
/*
|
||||
* Copyright (c) 2006-2021, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
* 2021/01/02 bernard the first version
|
||||
*/
|
||||
|
||||
#include <rtthread.h>
|
||||
#include <lwp.h>
|
||||
#ifdef ARCH_MM_MMU
|
||||
#include <lwp_user_mm.h>
|
||||
#endif
|
||||
#include "sys/time.h"
|
||||
|
||||
struct rt_futex
|
||||
{
|
||||
int *uaddr;
|
||||
rt_list_t waiting_thread;
|
||||
struct lwp_avl_struct node;
|
||||
struct rt_object *custom_obj;
|
||||
};
|
||||
|
||||
static struct rt_mutex _futex_lock;
|
||||
|
||||
static int futex_system_init(void)
|
||||
{
|
||||
rt_mutex_init(&_futex_lock, "futexList", RT_IPC_FLAG_FIFO);
|
||||
return 0;
|
||||
}
|
||||
INIT_PREV_EXPORT(futex_system_init);
|
||||
|
||||
rt_err_t futex_destory(void *data)
|
||||
{
|
||||
rt_err_t ret = -1;
|
||||
rt_base_t level;
|
||||
struct rt_futex *futex = (struct rt_futex *)data;
|
||||
|
||||
if (futex)
|
||||
{
|
||||
level = rt_hw_interrupt_disable();
|
||||
/* remove futex from futext avl */
|
||||
lwp_avl_remove(&futex->node, (struct lwp_avl_struct **)futex->node.data);
|
||||
rt_hw_interrupt_enable(level);
|
||||
|
||||
/* release object */
|
||||
rt_free(futex);
|
||||
ret = 0;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct rt_futex *futex_create(int *uaddr, struct rt_lwp *lwp)
|
||||
{
|
||||
struct rt_futex *futex = RT_NULL;
|
||||
struct rt_object *obj = RT_NULL;
|
||||
|
||||
if (!lwp)
|
||||
{
|
||||
return RT_NULL;
|
||||
}
|
||||
futex = (struct rt_futex *)rt_malloc(sizeof(struct rt_futex));
|
||||
if (!futex)
|
||||
{
|
||||
return RT_NULL;
|
||||
}
|
||||
obj = rt_custom_object_create("futex", (void *)futex, futex_destory);
|
||||
if (!obj)
|
||||
{
|
||||
rt_free(futex);
|
||||
return RT_NULL;
|
||||
}
|
||||
|
||||
futex->uaddr = uaddr;
|
||||
futex->node.avl_key = (avl_key_t)uaddr;
|
||||
futex->node.data = &lwp->address_search_head;
|
||||
futex->custom_obj = obj;
|
||||
rt_list_init(&(futex->waiting_thread));
|
||||
|
||||
/* insert into futex head */
|
||||
lwp_avl_insert(&futex->node, &lwp->address_search_head);
|
||||
return futex;
|
||||
}
|
||||
|
||||
static struct rt_futex *futex_get(void *uaddr, struct rt_lwp *lwp)
|
||||
{
|
||||
struct rt_futex *futex = RT_NULL;
|
||||
struct lwp_avl_struct *node = RT_NULL;
|
||||
|
||||
node = lwp_avl_find((avl_key_t)uaddr, lwp->address_search_head);
|
||||
if (!node)
|
||||
{
|
||||
return RT_NULL;
|
||||
}
|
||||
futex = rt_container_of(node, struct rt_futex, node);
|
||||
return futex;
|
||||
}
|
||||
|
||||
int futex_wait(struct rt_futex *futex, int value, const struct timespec *timeout)
|
||||
{
|
||||
rt_base_t level = 0;
|
||||
rt_err_t ret = -RT_EINTR;
|
||||
|
||||
if (*(futex->uaddr) == value)
|
||||
{
|
||||
rt_thread_t thread = rt_thread_self();
|
||||
|
||||
level = rt_hw_interrupt_disable();
|
||||
ret = rt_thread_suspend_with_flag(thread, RT_INTERRUPTIBLE);
|
||||
|
||||
if (ret < 0)
|
||||
{
|
||||
rt_mutex_release(&_futex_lock);
|
||||
rt_hw_interrupt_enable(level);
|
||||
rt_set_errno(EINTR);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* add into waiting thread list */
|
||||
rt_list_insert_before(&(futex->waiting_thread), &(thread->tlist));
|
||||
|
||||
/* with timeout */
|
||||
if (timeout)
|
||||
{
|
||||
rt_int32_t time = rt_timespec_to_tick(timeout);
|
||||
|
||||
/* start the timer of thread */
|
||||
rt_timer_control(&(thread->thread_timer),
|
||||
RT_TIMER_CTRL_SET_TIME,
|
||||
&time);
|
||||
rt_timer_start(&(thread->thread_timer));
|
||||
}
|
||||
rt_mutex_release(&_futex_lock);
|
||||
rt_hw_interrupt_enable(level);
|
||||
|
||||
/* do schedule */
|
||||
rt_schedule();
|
||||
|
||||
ret = thread->error;
|
||||
/* check errno */
|
||||
}
|
||||
else
|
||||
{
|
||||
rt_mutex_release(&_futex_lock);
|
||||
rt_set_errno(EAGAIN);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void futex_wake(struct rt_futex *futex, int number)
|
||||
{
|
||||
rt_base_t level = rt_hw_interrupt_disable();
|
||||
while (!rt_list_isempty(&(futex->waiting_thread)) && number)
|
||||
{
|
||||
rt_thread_t thread;
|
||||
|
||||
thread = rt_list_entry(futex->waiting_thread.next, struct rt_thread, tlist);
|
||||
/* remove from waiting list */
|
||||
rt_list_remove(&(thread->tlist));
|
||||
|
||||
thread->error = RT_EOK;
|
||||
/* resume the suspended thread */
|
||||
rt_thread_resume(thread);
|
||||
|
||||
number--;
|
||||
}
|
||||
rt_mutex_release(&_futex_lock);
|
||||
rt_hw_interrupt_enable(level);
|
||||
|
||||
/* do schedule */
|
||||
rt_schedule();
|
||||
}
|
||||
|
||||
#include <syscall_generic.h>
|
||||
|
||||
sysret_t sys_futex(int *uaddr, int op, int val, const struct timespec *timeout,
|
||||
int *uaddr2, int val3)
|
||||
{
|
||||
struct rt_lwp *lwp = RT_NULL;
|
||||
struct rt_futex *futex = RT_NULL;
|
||||
int ret = 0;
|
||||
rt_err_t lock_ret = 0;
|
||||
|
||||
if (!lwp_user_accessable(uaddr, sizeof(int)))
|
||||
{
|
||||
rt_set_errno(EINVAL);
|
||||
return -RT_EINVAL;
|
||||
}
|
||||
|
||||
/**
|
||||
* if (op & (FUTEX_WAKE|FUTEX_FD|FUTEX_WAKE_BITSET|FUTEX_TRYLOCK_PI|FUTEX_UNLOCK_PI)) was TRUE
|
||||
* `timeout` should be ignored by implementation, according to POSIX futex(2) manual.
|
||||
* since only FUTEX_WAKE is implemented in rt-smart, only FUTEX_WAKE was omitted currently
|
||||
*/
|
||||
if (timeout && !(op & (FUTEX_WAKE)))
|
||||
{
|
||||
if (!lwp_user_accessable((void *)timeout, sizeof(struct timespec)))
|
||||
{
|
||||
rt_set_errno(EINVAL);
|
||||
return -RT_EINVAL;
|
||||
}
|
||||
}
|
||||
lock_ret = rt_mutex_take_interruptible(&_futex_lock, RT_WAITING_FOREVER);
|
||||
if (lock_ret != RT_EOK)
|
||||
{
|
||||
rt_set_errno(EAGAIN);
|
||||
return -RT_EINTR;
|
||||
}
|
||||
|
||||
lwp = lwp_self();
|
||||
futex = futex_get(uaddr, lwp);
|
||||
if (futex == RT_NULL)
|
||||
{
|
||||
/* create a futex according to this uaddr */
|
||||
futex = futex_create(uaddr, lwp);
|
||||
if (futex == RT_NULL)
|
||||
{
|
||||
rt_mutex_release(&_futex_lock);
|
||||
rt_set_errno(ENOMEM);
|
||||
return -RT_ENOMEM;
|
||||
}
|
||||
if (lwp_user_object_add(lwp, futex->custom_obj) != 0)
|
||||
{
|
||||
rt_custom_object_destroy(futex->custom_obj);
|
||||
rt_mutex_release(&_futex_lock);
|
||||
rt_set_errno(ENOMEM);
|
||||
return -RT_ENOMEM;
|
||||
}
|
||||
}
|
||||
|
||||
switch (op)
|
||||
{
|
||||
case FUTEX_WAIT:
|
||||
ret = futex_wait(futex, val, timeout);
|
||||
/* _futex_lock is released by futex_wait */
|
||||
break;
|
||||
|
||||
case FUTEX_WAKE:
|
||||
futex_wake(futex, val);
|
||||
/* _futex_lock is released by futex_wake */
|
||||
break;
|
||||
|
||||
default:
|
||||
rt_mutex_release(&_futex_lock);
|
||||
rt_set_errno(ENOSYS);
|
||||
ret = -ENOSYS;
|
||||
break;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
1213
components/lwp/lwp_ipc.c
Normal file
1213
components/lwp/lwp_ipc.c
Normal file
File diff suppressed because it is too large
Load diff
70
components/lwp/lwp_ipc.h
Normal file
70
components/lwp/lwp_ipc.h
Normal file
|
@ -0,0 +1,70 @@
|
|||
/*
|
||||
* Copyright (c) 2006-2020, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
* 2019-10-12 Jesven first version
|
||||
*/
|
||||
|
||||
#ifndef LWP_IPC_H__
|
||||
#define LWP_IPC_H__
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
enum
|
||||
{
|
||||
RT_CHANNEL_RAW,
|
||||
RT_CHANNEL_BUFFER,
|
||||
RT_CHANNEL_FD
|
||||
};
|
||||
|
||||
struct rt_channel_msg
|
||||
{
|
||||
void *sender;
|
||||
int type;
|
||||
union
|
||||
{
|
||||
struct chbuf
|
||||
{
|
||||
void *buf;
|
||||
size_t length;
|
||||
} b;
|
||||
struct chfd
|
||||
{
|
||||
void *file;
|
||||
int fd;
|
||||
} fd;
|
||||
void* d;
|
||||
} u;
|
||||
};
|
||||
typedef struct rt_channel_msg *rt_channel_msg_t;
|
||||
|
||||
int rt_channel_open(const char *name, int flags);
|
||||
rt_err_t rt_channel_close(int fd);
|
||||
rt_err_t rt_channel_send(int fd, rt_channel_msg_t data);
|
||||
rt_err_t rt_channel_send_recv(int fd, rt_channel_msg_t data, rt_channel_msg_t data_ret);
|
||||
rt_err_t rt_channel_send_recv_timeout(int fd, rt_channel_msg_t data, rt_channel_msg_t data_ret, rt_int32_t time);
|
||||
rt_err_t rt_channel_reply(int fd, rt_channel_msg_t data);
|
||||
rt_err_t rt_channel_recv(int fd, rt_channel_msg_t data);
|
||||
rt_err_t rt_channel_recv_timeout(int fd, rt_channel_msg_t data, rt_int32_t time);
|
||||
rt_err_t rt_channel_peek(int fd, rt_channel_msg_t data);
|
||||
|
||||
rt_channel_t rt_raw_channel_open(const char *name, int flags);
|
||||
rt_err_t rt_raw_channel_close(rt_channel_t ch);
|
||||
rt_err_t rt_raw_channel_send(rt_channel_t ch, rt_channel_msg_t data);
|
||||
rt_err_t rt_raw_channel_send_recv(rt_channel_t ch, rt_channel_msg_t data, rt_channel_msg_t data_ret);
|
||||
rt_err_t rt_raw_channel_send_recv_timeout(rt_channel_t ch, rt_channel_msg_t data, rt_channel_msg_t data_ret, rt_int32_t time);
|
||||
rt_err_t rt_raw_channel_reply(rt_channel_t ch, rt_channel_msg_t data);
|
||||
rt_err_t rt_raw_channel_recv(rt_channel_t ch, rt_channel_msg_t data);
|
||||
rt_err_t rt_raw_channel_recv_timeout(rt_channel_t ch, rt_channel_msg_t data, rt_int32_t time);
|
||||
rt_err_t rt_raw_channel_peek(rt_channel_t ch, rt_channel_msg_t data);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
38
components/lwp/lwp_ipc_internal.h
Normal file
38
components/lwp/lwp_ipc_internal.h
Normal file
|
@ -0,0 +1,38 @@
|
|||
/*
|
||||
* Copyright (c) 2006-2020, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
* 2019-10-16 Jesven first version
|
||||
*/
|
||||
#ifndef LWP_IPC_INTERNAL_H__
|
||||
#define LWP_IPC_INTERNAL_H__
|
||||
|
||||
#include <rthw.h>
|
||||
#include <rtthread.h>
|
||||
#include <lwp.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
enum
|
||||
{
|
||||
FDT_TYPE_LWP,
|
||||
FDT_TYPE_KERNEL
|
||||
};
|
||||
|
||||
int lwp_channel_open(int fdt_type, const char *name, int flags);
|
||||
rt_err_t lwp_channel_close(int fdt_type, int fd);
|
||||
rt_err_t lwp_channel_send(int fdt_type, int fd, rt_channel_msg_t data);
|
||||
rt_err_t lwp_channel_send_recv_timeout(int fdt_type, int fd, rt_channel_msg_t data, rt_channel_msg_t data_ret, rt_int32_t time);
|
||||
rt_err_t lwp_channel_reply(int fdt_type, int fd, rt_channel_msg_t data);
|
||||
rt_err_t lwp_channel_recv_timeout(int fdt_type, int fd, rt_channel_msg_t data, rt_int32_t time);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* LWP_IPC_INTERNAL_H__*/
|
40
components/lwp/lwp_mm.c
Normal file
40
components/lwp/lwp_mm.c
Normal file
|
@ -0,0 +1,40 @@
|
|||
/*
|
||||
* Copyright (c) 2006-2018, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
*/
|
||||
|
||||
#include <rthw.h>
|
||||
#include <rtthread.h>
|
||||
#include "lwp_mm.h"
|
||||
|
||||
static rt_mutex_t mm_lock;
|
||||
|
||||
void rt_mm_lock(void)
|
||||
{
|
||||
if (rt_thread_self())
|
||||
{
|
||||
if (!mm_lock)
|
||||
{
|
||||
mm_lock = rt_mutex_create("mm_lock", RT_IPC_FLAG_FIFO);
|
||||
}
|
||||
if (mm_lock)
|
||||
{
|
||||
rt_mutex_take(mm_lock, RT_WAITING_FOREVER);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void rt_mm_unlock(void)
|
||||
{
|
||||
if (rt_thread_self())
|
||||
{
|
||||
if (mm_lock)
|
||||
{
|
||||
rt_mutex_release(mm_lock);
|
||||
}
|
||||
}
|
||||
}
|
16
components/lwp/lwp_mm.h
Normal file
16
components/lwp/lwp_mm.h
Normal file
|
@ -0,0 +1,16 @@
|
|||
/*
|
||||
* Copyright (c) 2006-2020, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
*/
|
||||
|
||||
#ifndef __LWP_MM_H__
|
||||
#define __LWP_MM_H__
|
||||
|
||||
void rt_mm_lock(void);
|
||||
void rt_mm_unlock(void);
|
||||
|
||||
#endif /*__LWP_MM_H__*/
|
1124
components/lwp/lwp_pid.c
Normal file
1124
components/lwp/lwp_pid.c
Normal file
File diff suppressed because it is too large
Load diff
52
components/lwp/lwp_pid.h
Normal file
52
components/lwp/lwp_pid.h
Normal file
|
@ -0,0 +1,52 @@
|
|||
/*
|
||||
* Copyright (c) 2006-2020, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
* 2020-02-23 Jesven first version.
|
||||
*/
|
||||
|
||||
#ifndef LWP_PID_H__
|
||||
#define LWP_PID_H__
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
struct rt_lwp;
|
||||
|
||||
struct lwp_avl_struct *lwp_get_pid_ary(void);
|
||||
|
||||
struct rt_lwp* lwp_new(void);
|
||||
void lwp_free(struct rt_lwp* lwp);
|
||||
|
||||
int lwp_ref_inc(struct rt_lwp *lwp);
|
||||
int lwp_ref_dec(struct rt_lwp *lwp);
|
||||
|
||||
struct rt_lwp* lwp_from_pid(pid_t pid);
|
||||
pid_t lwp_to_pid(struct rt_lwp* lwp);
|
||||
|
||||
pid_t lwp_name2pid(const char* name);
|
||||
char* lwp_pid2name(int32_t pid);
|
||||
|
||||
int lwp_getpid(void);
|
||||
|
||||
pid_t waitpid(pid_t pid, int *status, int options);
|
||||
long list_process(void);
|
||||
|
||||
void lwp_user_object_lock_init(struct rt_lwp *lwp);
|
||||
void lwp_user_object_lock_destroy(struct rt_lwp *lwp);
|
||||
void lwp_user_object_lock(struct rt_lwp *lwp);
|
||||
void lwp_user_object_unlock(struct rt_lwp *lwp);
|
||||
int lwp_user_object_add(struct rt_lwp *lwp, rt_object_t object);
|
||||
rt_err_t lwp_user_object_delete(struct rt_lwp *lwp, rt_object_t object);
|
||||
void lwp_user_object_clear(struct rt_lwp *lwp);
|
||||
void lwp_user_object_dup(struct rt_lwp *dst_lwp, struct rt_lwp *src_lwp);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
455
components/lwp/lwp_pmutex.c
Normal file
455
components/lwp/lwp_pmutex.c
Normal file
|
@ -0,0 +1,455 @@
|
|||
/*
|
||||
* Copyright (c) 2006-2021, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
* 2021/01/02 bernard the first version
|
||||
* 2022/12/18 bernard fix the _m_lock to tid in user land.
|
||||
*/
|
||||
|
||||
#include <rtthread.h>
|
||||
#include <lwp.h>
|
||||
#ifdef ARCH_MM_MMU
|
||||
#include <lwp_user_mm.h>
|
||||
#endif
|
||||
#include <sys/time.h>
|
||||
|
||||
#define PMUTEX_NORMAL 0 /* Unable to recursion */
|
||||
#define PMUTEX_RECURSIVE 1 /* Can be recursion */
|
||||
#define PMUTEX_ERRORCHECK 2 /* This type of mutex provides error checking */
|
||||
|
||||
struct rt_pmutex
|
||||
{
|
||||
union
|
||||
{
|
||||
rt_mutex_t kmutex;
|
||||
rt_sem_t ksem; /* use sem to emulate the mutex without recursive */
|
||||
} lock;
|
||||
|
||||
struct lwp_avl_struct node;
|
||||
struct rt_object *custom_obj;
|
||||
rt_uint8_t type; /* pmutex type */
|
||||
};
|
||||
|
||||
/*
|
||||
* userspace mutex definitions in musl
|
||||
*/
|
||||
struct rt_umutex
|
||||
{
|
||||
union
|
||||
{
|
||||
int __i[6];
|
||||
volatile int __vi[6];
|
||||
volatile void *volatile __p[6];
|
||||
} __u;
|
||||
};
|
||||
#define _m_type __u.__i[0]
|
||||
#define _m_lock __u.__vi[1]
|
||||
#define _m_waiters __u.__vi[2]
|
||||
#define _m_prev __u.__p[3]
|
||||
#define _m_next __u.__p[4]
|
||||
#define _m_count __u.__i[5]
|
||||
|
||||
static struct rt_mutex _pmutex_lock;
|
||||
|
||||
static int pmutex_system_init(void)
|
||||
{
|
||||
rt_mutex_init(&_pmutex_lock, "pmtxLock", RT_IPC_FLAG_FIFO);
|
||||
return 0;
|
||||
}
|
||||
INIT_PREV_EXPORT(pmutex_system_init);
|
||||
|
||||
static rt_err_t pmutex_destory(void *data)
|
||||
{
|
||||
rt_err_t ret = -1;
|
||||
rt_base_t level = 0;
|
||||
struct rt_pmutex *pmutex = (struct rt_pmutex *)data;
|
||||
|
||||
if (pmutex)
|
||||
{
|
||||
level = rt_hw_interrupt_disable();
|
||||
/* remove pmutex from pmutext avl */
|
||||
lwp_avl_remove(&pmutex->node, (struct lwp_avl_struct **)pmutex->node.data);
|
||||
rt_hw_interrupt_enable(level);
|
||||
|
||||
if (pmutex->type == PMUTEX_NORMAL)
|
||||
{
|
||||
rt_sem_delete(pmutex->lock.ksem);
|
||||
}
|
||||
else
|
||||
{
|
||||
rt_mutex_delete(pmutex->lock.kmutex);
|
||||
}
|
||||
|
||||
/* release object */
|
||||
rt_free(pmutex);
|
||||
ret = 0;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct rt_pmutex* pmutex_create(void *umutex, struct rt_lwp *lwp)
|
||||
{
|
||||
struct rt_pmutex *pmutex = RT_NULL;
|
||||
struct rt_object *obj = RT_NULL;
|
||||
rt_ubase_t type;
|
||||
|
||||
if (!lwp)
|
||||
{
|
||||
return RT_NULL;
|
||||
}
|
||||
|
||||
long *p = (long *)umutex;
|
||||
/* umutex[0] bit[0-1] saved mutex type */
|
||||
type = *p & 3;
|
||||
if (type != PMUTEX_NORMAL && type != PMUTEX_RECURSIVE && type != PMUTEX_ERRORCHECK)
|
||||
{
|
||||
return RT_NULL;
|
||||
}
|
||||
|
||||
pmutex = (struct rt_pmutex *)rt_malloc(sizeof(struct rt_pmutex));
|
||||
if (!pmutex)
|
||||
{
|
||||
return RT_NULL;
|
||||
}
|
||||
|
||||
if (type == PMUTEX_NORMAL)
|
||||
{
|
||||
pmutex->lock.ksem = rt_sem_create("pmutex", 1, RT_IPC_FLAG_PRIO);
|
||||
if (!pmutex->lock.ksem)
|
||||
{
|
||||
rt_free(pmutex);
|
||||
return RT_NULL;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
pmutex->lock.kmutex = rt_mutex_create("pmutex", RT_IPC_FLAG_PRIO);
|
||||
if (!pmutex->lock.kmutex)
|
||||
{
|
||||
rt_free(pmutex);
|
||||
return RT_NULL;
|
||||
}
|
||||
}
|
||||
|
||||
obj = rt_custom_object_create("pmutex", (void *)pmutex, pmutex_destory);
|
||||
if (!obj)
|
||||
{
|
||||
if (pmutex->type == PMUTEX_NORMAL)
|
||||
{
|
||||
rt_sem_delete(pmutex->lock.ksem);
|
||||
}
|
||||
else
|
||||
{
|
||||
rt_mutex_delete(pmutex->lock.kmutex);
|
||||
}
|
||||
rt_free(pmutex);
|
||||
return RT_NULL;
|
||||
}
|
||||
pmutex->node.avl_key = (avl_key_t)umutex;
|
||||
pmutex->node.data = &lwp->address_search_head;
|
||||
pmutex->custom_obj = obj;
|
||||
pmutex->type = type;
|
||||
|
||||
/* insert into pmutex head */
|
||||
lwp_avl_insert(&pmutex->node, &lwp->address_search_head);
|
||||
return pmutex;
|
||||
}
|
||||
|
||||
static struct rt_pmutex* pmutex_get(void *umutex, struct rt_lwp *lwp)
|
||||
{
|
||||
struct rt_pmutex *pmutex = RT_NULL;
|
||||
struct lwp_avl_struct *node = RT_NULL;
|
||||
|
||||
node = lwp_avl_find((avl_key_t)umutex, lwp->address_search_head);
|
||||
if (!node)
|
||||
{
|
||||
return RT_NULL;
|
||||
}
|
||||
pmutex = rt_container_of(node, struct rt_pmutex, node);
|
||||
return pmutex;
|
||||
}
|
||||
|
||||
static int _pthread_mutex_init(void *umutex)
|
||||
{
|
||||
struct rt_lwp *lwp = RT_NULL;
|
||||
struct rt_pmutex *pmutex = RT_NULL;
|
||||
rt_err_t lock_ret = 0;
|
||||
|
||||
/* umutex union is 6 x (void *) */
|
||||
if (!lwp_user_accessable(umutex, sizeof(void *) * 6))
|
||||
{
|
||||
rt_set_errno(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
lock_ret = rt_mutex_take_interruptible(&_pmutex_lock, RT_WAITING_FOREVER);
|
||||
if (lock_ret != RT_EOK)
|
||||
{
|
||||
rt_set_errno(EAGAIN);
|
||||
return -EINTR;
|
||||
}
|
||||
|
||||
lwp = lwp_self();
|
||||
pmutex = pmutex_get(umutex, lwp);
|
||||
if (pmutex == RT_NULL)
|
||||
{
|
||||
/* create a pmutex according to this umutex */
|
||||
pmutex = pmutex_create(umutex, lwp);
|
||||
if (pmutex == RT_NULL)
|
||||
{
|
||||
rt_mutex_release(&_pmutex_lock);
|
||||
rt_set_errno(ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
if (lwp_user_object_add(lwp, pmutex->custom_obj) != 0)
|
||||
{
|
||||
rt_custom_object_destroy(pmutex->custom_obj);
|
||||
rt_set_errno(ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
rt_base_t level = rt_hw_interrupt_disable();
|
||||
|
||||
if (pmutex->type == PMUTEX_NORMAL)
|
||||
{
|
||||
pmutex->lock.ksem->value = 1;
|
||||
}
|
||||
else
|
||||
{
|
||||
pmutex->lock.kmutex->owner = RT_NULL;
|
||||
pmutex->lock.kmutex->priority = 0xFF;
|
||||
pmutex->lock.kmutex->hold = 0;
|
||||
pmutex->lock.kmutex->ceiling_priority = 0xFF;
|
||||
}
|
||||
rt_hw_interrupt_enable(level);
|
||||
}
|
||||
|
||||
rt_mutex_release(&_pmutex_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int _pthread_mutex_lock_timeout(void *umutex, struct timespec *timeout)
|
||||
{
|
||||
struct rt_lwp *lwp = RT_NULL;
|
||||
struct rt_pmutex *pmutex = RT_NULL;
|
||||
struct rt_umutex *umutex_p = (struct rt_umutex*)umutex;
|
||||
rt_err_t lock_ret = 0;
|
||||
rt_int32_t time = RT_WAITING_FOREVER;
|
||||
register rt_base_t temp;
|
||||
|
||||
if (!lwp_user_accessable((void *)umutex, sizeof(struct rt_umutex)))
|
||||
{
|
||||
rt_set_errno(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (timeout)
|
||||
{
|
||||
if (!lwp_user_accessable((void *)timeout, sizeof(struct timespec)))
|
||||
{
|
||||
rt_set_errno(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
time = rt_timespec_to_tick(timeout);
|
||||
}
|
||||
|
||||
lock_ret = rt_mutex_take_interruptible(&_pmutex_lock, RT_WAITING_FOREVER);
|
||||
if (lock_ret != RT_EOK)
|
||||
{
|
||||
rt_set_errno(EAGAIN);
|
||||
return -EINTR;
|
||||
}
|
||||
|
||||
lwp = lwp_self();
|
||||
pmutex = pmutex_get(umutex, lwp);
|
||||
if (pmutex == RT_NULL)
|
||||
{
|
||||
rt_mutex_release(&_pmutex_lock);
|
||||
rt_set_errno(EINVAL);
|
||||
return -ENOMEM; /* umutex not recored in kernel */
|
||||
}
|
||||
|
||||
rt_mutex_release(&_pmutex_lock);
|
||||
|
||||
switch (pmutex->type)
|
||||
{
|
||||
case PMUTEX_NORMAL:
|
||||
lock_ret = rt_sem_take_interruptible(pmutex->lock.ksem, time);
|
||||
break;
|
||||
case PMUTEX_RECURSIVE:
|
||||
lock_ret = rt_mutex_take_interruptible(pmutex->lock.kmutex, time);
|
||||
if (lock_ret == RT_EOK)
|
||||
{
|
||||
umutex_p->_m_lock = rt_thread_self()->tid;
|
||||
}
|
||||
break;
|
||||
case PMUTEX_ERRORCHECK:
|
||||
temp = rt_hw_interrupt_disable();
|
||||
if (pmutex->lock.kmutex->owner == rt_thread_self())
|
||||
{
|
||||
/* enable interrupt */
|
||||
rt_hw_interrupt_enable(temp);
|
||||
return -EDEADLK;
|
||||
}
|
||||
lock_ret = rt_mutex_take_interruptible(pmutex->lock.kmutex, time);
|
||||
if (lock_ret == RT_EOK)
|
||||
{
|
||||
umutex_p->_m_lock = rt_thread_self()->tid;
|
||||
}
|
||||
rt_hw_interrupt_enable(temp);
|
||||
break;
|
||||
default: /* unknown type */
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (lock_ret != RT_EOK)
|
||||
{
|
||||
if (lock_ret == -RT_ETIMEOUT)
|
||||
{
|
||||
if (time == 0) /* timeout is 0, means try lock failed */
|
||||
{
|
||||
rt_set_errno(EBUSY);
|
||||
return -EBUSY;
|
||||
}
|
||||
else
|
||||
{
|
||||
rt_set_errno(ETIMEDOUT);
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
rt_set_errno(EAGAIN);
|
||||
return -EAGAIN;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int _pthread_mutex_unlock(void *umutex)
|
||||
{
|
||||
rt_err_t lock_ret = 0;
|
||||
struct rt_lwp *lwp = RT_NULL;
|
||||
struct rt_pmutex *pmutex = RT_NULL;
|
||||
struct rt_umutex *umutex_p = (struct rt_umutex*)umutex;
|
||||
|
||||
lock_ret = rt_mutex_take_interruptible(&_pmutex_lock, RT_WAITING_FOREVER);
|
||||
if (lock_ret != RT_EOK)
|
||||
{
|
||||
rt_set_errno(EAGAIN);
|
||||
return -EINTR;
|
||||
}
|
||||
|
||||
lwp = lwp_self();
|
||||
pmutex = pmutex_get(umutex, lwp);
|
||||
if (pmutex == RT_NULL)
|
||||
{
|
||||
rt_mutex_release(&_pmutex_lock);
|
||||
rt_set_errno(EPERM);
|
||||
return -EPERM;//unlock static mutex of unlock state
|
||||
}
|
||||
|
||||
rt_mutex_release(&_pmutex_lock);
|
||||
|
||||
switch (pmutex->type)
|
||||
{
|
||||
case PMUTEX_NORMAL:
|
||||
if(pmutex->lock.ksem->value >=1)
|
||||
{
|
||||
rt_set_errno(EPERM);
|
||||
return -EPERM;//unlock dynamic mutex of unlock state
|
||||
}
|
||||
else
|
||||
{
|
||||
lock_ret = rt_sem_release(pmutex->lock.ksem);
|
||||
}
|
||||
break;
|
||||
case PMUTEX_RECURSIVE:
|
||||
case PMUTEX_ERRORCHECK:
|
||||
lock_ret = rt_mutex_release(pmutex->lock.kmutex);
|
||||
if ((lock_ret == RT_EOK) && pmutex->lock.kmutex->owner == NULL)
|
||||
{
|
||||
umutex_p->_m_lock = 0;
|
||||
}
|
||||
break;
|
||||
default: /* unknown type */
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (lock_ret != RT_EOK)
|
||||
{
|
||||
rt_set_errno(EPERM);
|
||||
return -EAGAIN;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int _pthread_mutex_destroy(void *umutex)
|
||||
{
|
||||
struct rt_lwp *lwp = RT_NULL;
|
||||
struct rt_pmutex *pmutex = RT_NULL;
|
||||
rt_err_t lock_ret = 0;
|
||||
|
||||
lock_ret = rt_mutex_take_interruptible(&_pmutex_lock, RT_WAITING_FOREVER);
|
||||
if (lock_ret != RT_EOK)
|
||||
{
|
||||
rt_set_errno(EAGAIN);
|
||||
return -EINTR;
|
||||
}
|
||||
|
||||
lwp = lwp_self();
|
||||
pmutex = pmutex_get(umutex, lwp);
|
||||
if (pmutex == RT_NULL)
|
||||
{
|
||||
rt_mutex_release(&_pmutex_lock);
|
||||
rt_set_errno(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
lwp_user_object_delete(lwp, pmutex->custom_obj);
|
||||
rt_mutex_release(&_pmutex_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#include <syscall_generic.h>
|
||||
|
||||
sysret_t sys_pmutex(void *umutex, int op, void *arg)
|
||||
{
|
||||
int ret = -EINVAL;
|
||||
|
||||
switch (op)
|
||||
{
|
||||
case PMUTEX_INIT:
|
||||
ret = _pthread_mutex_init(umutex);
|
||||
break;
|
||||
case PMUTEX_LOCK:
|
||||
ret = _pthread_mutex_lock_timeout(umutex, (struct timespec*)arg);
|
||||
if (ret == -ENOMEM)
|
||||
{
|
||||
/* lock not init, try init it and lock again. */
|
||||
ret = _pthread_mutex_init(umutex);
|
||||
if (ret == 0)
|
||||
{
|
||||
ret = _pthread_mutex_lock_timeout(umutex, (struct timespec*)arg);
|
||||
}
|
||||
}
|
||||
break;
|
||||
case PMUTEX_UNLOCK:
|
||||
ret = _pthread_mutex_unlock(umutex);
|
||||
break;
|
||||
case PMUTEX_DESTROY:
|
||||
ret = _pthread_mutex_destroy(umutex);
|
||||
break;
|
||||
default:
|
||||
rt_set_errno(EINVAL);
|
||||
break;
|
||||
}
|
||||
return ret;
|
||||
}
|
27
components/lwp/lwp_setsid.c
Normal file
27
components/lwp/lwp_setsid.c
Normal file
|
@ -0,0 +1,27 @@
|
|||
#include <rthw.h>
|
||||
#include <rtthread.h>
|
||||
|
||||
#include "lwp.h"
|
||||
//#include "lwp_tid.h"
|
||||
#include "lwp_pid.h"
|
||||
|
||||
int setsid(void)
|
||||
{
|
||||
int err = -EPERM;
|
||||
struct rt_thread *current_thread = rt_thread_self();
|
||||
struct rt_lwp *current_lwp = (struct rt_lwp *)rt_thread_self()->lwp;
|
||||
|
||||
if (current_lwp->session == current_thread->tid)
|
||||
{
|
||||
return err;
|
||||
}
|
||||
|
||||
current_lwp->session = current_thread->tid;
|
||||
current_lwp->__pgrp = current_thread->tid;
|
||||
current_lwp->leader = 1;
|
||||
current_lwp->tty = RT_NULL;
|
||||
current_lwp->tty_old_pgrp = 0;
|
||||
|
||||
err = current_lwp->session;
|
||||
return err;
|
||||
}
|
466
components/lwp/lwp_shm.c
Normal file
466
components/lwp/lwp_shm.c
Normal file
|
@ -0,0 +1,466 @@
|
|||
/*
|
||||
* Copyright (c) 2006-2020, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
* 2019-10-12 Jesven first version
|
||||
* 2023-02-20 wangxiaoyao adapt to mm
|
||||
*/
|
||||
#include <rthw.h>
|
||||
#include <rtthread.h>
|
||||
|
||||
#ifdef ARCH_MM_MMU
|
||||
#include <lwp.h>
|
||||
#include <lwp_shm.h>
|
||||
#include <lwp_mm.h>
|
||||
|
||||
#include <lwp_user_mm.h>
|
||||
#include <mmu.h>
|
||||
|
||||
/* the kernel structure to represent a share-memory */
|
||||
struct lwp_shm_struct
|
||||
{
|
||||
struct rt_mem_obj mem_obj;
|
||||
size_t addr; /* point to the next item in the free list when not used */
|
||||
size_t size;
|
||||
int ref;
|
||||
size_t key;
|
||||
};
|
||||
|
||||
static struct lwp_avl_struct *shm_tree_key;
|
||||
static struct lwp_avl_struct *shm_tree_pa;
|
||||
|
||||
static int shm_free_list = -1; /* the single-direct list of freed items */
|
||||
static int shm_id_used = 0; /* the latest allocated item in the array */
|
||||
static struct lwp_shm_struct _shm_ary[RT_LWP_SHM_MAX_NR];
|
||||
|
||||
static const char *get_shm_name(rt_varea_t varea)
|
||||
{
|
||||
return "user.shm";
|
||||
}
|
||||
|
||||
static void on_shm_varea_open(struct rt_varea *varea)
|
||||
{
|
||||
struct lwp_shm_struct *shm;
|
||||
shm = rt_container_of(varea->mem_obj, struct lwp_shm_struct, mem_obj);
|
||||
shm->ref += 1;
|
||||
}
|
||||
|
||||
static void on_shm_varea_close(struct rt_varea *varea)
|
||||
{
|
||||
struct lwp_shm_struct *shm;
|
||||
shm = rt_container_of(varea->mem_obj, struct lwp_shm_struct, mem_obj);
|
||||
shm->ref -= 1;
|
||||
}
|
||||
|
||||
static void on_shm_page_fault(struct rt_varea *varea, struct rt_aspace_fault_msg *msg)
|
||||
{
|
||||
struct lwp_shm_struct *shm;
|
||||
int err;
|
||||
shm = rt_container_of(varea->mem_obj, struct lwp_shm_struct, mem_obj);
|
||||
|
||||
/* map all share page frames to user space in a time */
|
||||
void *page = (void *)shm->addr;
|
||||
void *pg_paddr = (char *)page + PV_OFFSET;
|
||||
err = rt_varea_map_range(varea, varea->start, pg_paddr, shm->size);
|
||||
|
||||
if (err == RT_EOK)
|
||||
{
|
||||
msg->response.status = MM_FAULT_STATUS_OK_MAPPED;
|
||||
msg->response.size = shm->size;
|
||||
msg->response.vaddr = page;
|
||||
}
|
||||
|
||||
return ;
|
||||
}
|
||||
|
||||
/*
|
||||
* Try to allocate an structure 'lwp_shm_struct' from the freed list or the
|
||||
* static array.
|
||||
*/
|
||||
static int _shm_id_alloc(void)
|
||||
{
|
||||
int id = -1;
|
||||
|
||||
if (shm_free_list != -1) /* first try the freed list */
|
||||
{
|
||||
id = shm_free_list;
|
||||
shm_free_list = (int)_shm_ary[shm_free_list].addr; /* single-direction */
|
||||
}
|
||||
else if (shm_id_used < RT_LWP_SHM_MAX_NR) /* then try the array */
|
||||
{
|
||||
id = shm_id_used;
|
||||
shm_id_used++;
|
||||
}
|
||||
return id;
|
||||
}
|
||||
|
||||
/* Release the item in the static array to the freed list. */
|
||||
static void shm_id_free(int id)
|
||||
{
|
||||
/* link the freed itme to the single-direction list */
|
||||
_shm_ary[id].addr = (size_t)shm_free_list;
|
||||
shm_free_list = id;
|
||||
}
|
||||
|
||||
/* Locate the shared memory through 'key' or create a new one. */
|
||||
static int _lwp_shmget(size_t key, size_t size, int create)
|
||||
{
|
||||
int id = -1;
|
||||
struct lwp_avl_struct *node_key = 0;
|
||||
struct lwp_avl_struct *node_pa = 0;
|
||||
void *page_addr = 0;
|
||||
uint32_t bit = 0;
|
||||
|
||||
/* try to locate the item with the key in the binary tree */
|
||||
node_key = lwp_avl_find(key, shm_tree_key);
|
||||
if (node_key)
|
||||
{
|
||||
return (struct lwp_shm_struct *)node_key->data - _shm_ary; /* the index */
|
||||
}
|
||||
|
||||
/* If there doesn't exist such an item and we're allowed to create one ... */
|
||||
if (create)
|
||||
{
|
||||
struct lwp_shm_struct* p;
|
||||
|
||||
if (!size)
|
||||
{
|
||||
goto err;
|
||||
}
|
||||
|
||||
id = _shm_id_alloc();
|
||||
if (id == -1)
|
||||
{
|
||||
goto err;
|
||||
}
|
||||
|
||||
/* allocate pages up to 2's exponent to cover the required size */
|
||||
bit = rt_page_bits(size);
|
||||
page_addr = rt_pages_alloc_ext(bit, PAGE_ANY_AVAILABLE); /* virtual address */
|
||||
if (!page_addr)
|
||||
{
|
||||
goto err;
|
||||
}
|
||||
|
||||
/* initialize the shared memory structure */
|
||||
p = _shm_ary + id;
|
||||
p->addr = (size_t)page_addr;
|
||||
p->size = (1UL << (bit + ARCH_PAGE_SHIFT));
|
||||
p->ref = 0;
|
||||
p->key = key;
|
||||
p->mem_obj.get_name = get_shm_name;
|
||||
p->mem_obj.on_page_fault = on_shm_page_fault;
|
||||
p->mem_obj.on_varea_open = on_shm_varea_open;
|
||||
p->mem_obj.on_varea_close = on_shm_varea_close;
|
||||
p->mem_obj.hint_free = NULL;
|
||||
p->mem_obj.on_page_offload = NULL;
|
||||
|
||||
/* then insert it into the balancing binary tree */
|
||||
node_key = (struct lwp_avl_struct *)rt_malloc(sizeof(struct lwp_avl_struct) * 2);
|
||||
if (!node_key)
|
||||
{
|
||||
goto err;
|
||||
}
|
||||
node_key->avl_key = p->key;
|
||||
node_key->data = (void *)p;
|
||||
lwp_avl_insert(node_key, &shm_tree_key);
|
||||
node_pa = node_key + 1;
|
||||
node_pa->avl_key = p->addr;
|
||||
node_pa->data = (void *)p;
|
||||
lwp_avl_insert(node_pa, &shm_tree_pa);
|
||||
}
|
||||
return id;
|
||||
|
||||
err:
|
||||
if (id != -1)
|
||||
{
|
||||
shm_id_free(id);
|
||||
}
|
||||
if (page_addr)
|
||||
{
|
||||
rt_pages_free(page_addr, bit);
|
||||
}
|
||||
if (node_key)
|
||||
{
|
||||
rt_free(node_key);
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* A wrapping function, get the shared memory with interrupts disabled. */
|
||||
int lwp_shmget(size_t key, size_t size, int create)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
rt_mm_lock();
|
||||
ret = _lwp_shmget(key, size, create);
|
||||
rt_mm_unlock();
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Locate the binary tree node_key corresponding to the shared-memory id. */
|
||||
static struct lwp_avl_struct *shm_id_to_node(int id)
|
||||
{
|
||||
struct lwp_avl_struct *node_key = 0;
|
||||
struct lwp_shm_struct *p = RT_NULL;
|
||||
|
||||
/* check id */
|
||||
if (id < 0 || id >= RT_LWP_SHM_MAX_NR)
|
||||
{
|
||||
return RT_NULL;
|
||||
}
|
||||
|
||||
p = _shm_ary + id; /* the address of the shared-memory structure */
|
||||
node_key = lwp_avl_find(p->key, shm_tree_key);
|
||||
if (!node_key)
|
||||
{
|
||||
return RT_NULL;
|
||||
}
|
||||
if (node_key->data != (void *)p)
|
||||
{
|
||||
return RT_NULL;
|
||||
}
|
||||
return node_key;
|
||||
}
|
||||
|
||||
/* Free the shared pages, the shared-memory structure and its binary tree node_key. */
|
||||
static int _lwp_shmrm(int id)
|
||||
{
|
||||
struct lwp_avl_struct *node_key = RT_NULL;
|
||||
struct lwp_avl_struct *node_pa = RT_NULL;
|
||||
struct lwp_shm_struct* p = RT_NULL;
|
||||
uint32_t bit = 0;
|
||||
|
||||
node_key = shm_id_to_node(id);
|
||||
if (!node_key)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
p = (struct lwp_shm_struct *)node_key->data;
|
||||
if (p->ref)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
bit = rt_page_bits(p->size);
|
||||
rt_pages_free((void *)p->addr, bit);
|
||||
lwp_avl_remove(node_key, &shm_tree_key);
|
||||
node_pa = node_key + 1;
|
||||
lwp_avl_remove(node_pa, &shm_tree_pa);
|
||||
rt_free(node_key);
|
||||
shm_id_free(id);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* A wrapping function, free the shared memory with interrupt disabled. */
|
||||
int lwp_shmrm(int id)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
ret = _lwp_shmrm(id);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Map the shared memory specified by 'id' to the specified virtual address. */
|
||||
static void *_lwp_shmat(int id, void *shm_vaddr)
|
||||
{
|
||||
int err;
|
||||
struct rt_lwp *lwp = RT_NULL;
|
||||
struct lwp_avl_struct *node_key = RT_NULL;
|
||||
struct lwp_shm_struct *p = RT_NULL;
|
||||
void *va = shm_vaddr;
|
||||
|
||||
/* The id is used to locate the node_key in the binary tree, and then get the
|
||||
* shared-memory structure linked to the node_key. We don't use the id to refer
|
||||
* to the shared-memory structure directly, because the binary tree is used
|
||||
* to verify the structure is really in use.
|
||||
*/
|
||||
node_key = shm_id_to_node(id);
|
||||
if (!node_key)
|
||||
{
|
||||
return RT_NULL;
|
||||
}
|
||||
p = (struct lwp_shm_struct *)node_key->data; /* p = _shm_ary[id]; */
|
||||
|
||||
/* map the shared memory into the address space of the current thread */
|
||||
lwp = lwp_self();
|
||||
if (!lwp)
|
||||
{
|
||||
return RT_NULL;
|
||||
}
|
||||
|
||||
err = rt_aspace_map(lwp->aspace, &va, p->size, MMU_MAP_U_RWCB, MMF_PREFETCH,
|
||||
&p->mem_obj, 0);
|
||||
if (err != RT_EOK)
|
||||
{
|
||||
va = RT_NULL;
|
||||
}
|
||||
return va;
|
||||
}
|
||||
|
||||
/* A wrapping function: attach the shared memory to the specified address. */
|
||||
void *lwp_shmat(int id, void *shm_vaddr)
|
||||
{
|
||||
void *ret = RT_NULL;
|
||||
|
||||
if (((size_t)shm_vaddr & ARCH_PAGE_MASK) != 0)
|
||||
{
|
||||
return RT_NULL;
|
||||
}
|
||||
|
||||
ret = _lwp_shmat(id, shm_vaddr);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct lwp_shm_struct *_lwp_shm_struct_get(struct rt_lwp *lwp, void *shm_vaddr)
|
||||
{
|
||||
void *pa = RT_NULL;
|
||||
struct lwp_avl_struct *node_pa = RT_NULL;
|
||||
|
||||
if (!lwp)
|
||||
{
|
||||
return RT_NULL;
|
||||
}
|
||||
pa = lwp_v2p(lwp, shm_vaddr); /* physical memory */
|
||||
|
||||
node_pa = lwp_avl_find((size_t)pa, shm_tree_pa);
|
||||
if (!node_pa)
|
||||
{
|
||||
return RT_NULL;
|
||||
}
|
||||
return (struct lwp_shm_struct *)node_pa->data;
|
||||
}
|
||||
|
||||
static int _lwp_shm_ref_inc(struct rt_lwp *lwp, void *shm_vaddr)
|
||||
{
|
||||
struct lwp_shm_struct* p = _lwp_shm_struct_get(lwp, shm_vaddr);
|
||||
|
||||
if (p)
|
||||
{
|
||||
p->ref++;
|
||||
return p->ref;
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
int lwp_shm_ref_inc(struct rt_lwp *lwp, void *shm_vaddr)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
rt_mm_lock();
|
||||
ret = _lwp_shm_ref_inc(lwp, shm_vaddr);
|
||||
rt_mm_unlock();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int _lwp_shm_ref_dec(struct rt_lwp *lwp, void *shm_vaddr)
|
||||
{
|
||||
struct lwp_shm_struct* p = _lwp_shm_struct_get(lwp, shm_vaddr);
|
||||
|
||||
if (p && (p->ref > 0))
|
||||
{
|
||||
p->ref--;
|
||||
return p->ref;
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
int lwp_shm_ref_dec(struct rt_lwp *lwp, void *shm_vaddr)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
rt_mm_lock();
|
||||
ret = _lwp_shm_ref_dec(lwp, shm_vaddr);
|
||||
rt_mm_unlock();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Unmap the shared memory from the address space of the current thread. */
|
||||
int _lwp_shmdt(void *shm_vaddr)
|
||||
{
|
||||
struct rt_lwp *lwp = RT_NULL;
|
||||
int ret = 0;
|
||||
|
||||
lwp = lwp_self();
|
||||
if (!lwp)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
|
||||
ret = rt_aspace_unmap(lwp->aspace, shm_vaddr);
|
||||
if (ret != RT_EOK)
|
||||
{
|
||||
ret = -1;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* A wrapping function: detach the mapped shared memory. */
|
||||
int lwp_shmdt(void *shm_vaddr)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
rt_mm_lock();
|
||||
ret = _lwp_shmdt(shm_vaddr);
|
||||
rt_mm_unlock();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Get the virtual address of a shared memory in kernel. */
|
||||
void *_lwp_shminfo(int id)
|
||||
{
|
||||
struct lwp_avl_struct *node_key = RT_NULL;
|
||||
struct lwp_shm_struct *p = RT_NULL;
|
||||
|
||||
/* the share memory is in use only if it exsits in the binary tree */
|
||||
node_key = shm_id_to_node(id);
|
||||
if (!node_key)
|
||||
{
|
||||
return RT_NULL;
|
||||
}
|
||||
p = (struct lwp_shm_struct *)node_key->data; /* p = _shm_ary[id]; */
|
||||
|
||||
return (void *)((char *)p->addr - PV_OFFSET); /* get the virtual address */
|
||||
}
|
||||
|
||||
/* A wrapping function: get the virtual address of a shared memory. */
|
||||
void *lwp_shminfo(int id)
|
||||
{
|
||||
void *vaddr = RT_NULL;
|
||||
|
||||
rt_mm_lock();
|
||||
vaddr = _lwp_shminfo(id);
|
||||
rt_mm_unlock();
|
||||
return vaddr;
|
||||
}
|
||||
|
||||
#ifdef RT_USING_FINSH
|
||||
static int _shm_info(struct lwp_avl_struct* node_key, void *data)
|
||||
{
|
||||
int id = 0;
|
||||
struct lwp_shm_struct* p = (struct lwp_shm_struct *)node_key->data;
|
||||
|
||||
id = p - _shm_ary;
|
||||
rt_kprintf("0x%08x 0x%08x 0x%08x %8d\n", p->key, p->addr, p->size, id);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void list_shm(void)
|
||||
{
|
||||
rt_kprintf(" key paddr size id\n");
|
||||
rt_kprintf("---------- ---------- ---------- --------\n");
|
||||
rt_mm_lock();
|
||||
lwp_avl_traversal(shm_tree_key, _shm_info, NULL);
|
||||
rt_mm_unlock();
|
||||
}
|
||||
MSH_CMD_EXPORT(list_shm, show share memory info);
|
||||
#endif
|
||||
|
||||
#endif
|
31
components/lwp/lwp_shm.h
Normal file
31
components/lwp/lwp_shm.h
Normal file
|
@ -0,0 +1,31 @@
|
|||
/*
|
||||
* Copyright (c) 2006-2020, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
* 2019-10-12 Jesven first version
|
||||
*/
|
||||
#ifndef __LWP_SHM_H__
|
||||
#define __LWP_SHM_H__
|
||||
|
||||
#include <lwp_avl.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
int lwp_shmget(size_t key, size_t size, int create);
|
||||
int lwp_shmrm(int id);
|
||||
void* lwp_shmat(int id, void* shm_vaddr);
|
||||
int lwp_shmdt(void* shm_vaddr);
|
||||
void *lwp_shminfo(int id);
|
||||
int lwp_shm_ref_inc(struct rt_lwp *lwp, void *shm_vaddr);
|
||||
int lwp_shm_ref_dec(struct rt_lwp *lwp, void *shm_vaddr);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /*__LWP_SHM_H__*/
|
606
components/lwp/lwp_signal.c
Normal file
606
components/lwp/lwp_signal.c
Normal file
|
@ -0,0 +1,606 @@
|
|||
/*
|
||||
* Copyright (c) 2006-2020, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
* 2019-11-12 Jesven first version
|
||||
*/
|
||||
|
||||
#include <rthw.h>
|
||||
#include <rtthread.h>
|
||||
|
||||
#include "lwp.h"
|
||||
#include "lwp_arch.h"
|
||||
#include "sys/signal.h"
|
||||
|
||||
rt_inline void lwp_sigaddset(lwp_sigset_t *set, int _sig)
|
||||
{
|
||||
unsigned long sig = _sig - 1;
|
||||
|
||||
if (_LWP_NSIG_WORDS == 1)
|
||||
{
|
||||
set->sig[0] |= 1UL << sig;
|
||||
}
|
||||
else
|
||||
{
|
||||
set->sig[sig / _LWP_NSIG_BPW] |= 1UL << (sig % _LWP_NSIG_BPW);
|
||||
}
|
||||
}
|
||||
|
||||
rt_inline void lwp_sigdelset(lwp_sigset_t *set, int _sig)
|
||||
{
|
||||
unsigned long sig = _sig - 1;
|
||||
|
||||
if (_LWP_NSIG_WORDS == 1)
|
||||
{
|
||||
set->sig[0] &= ~(1UL << sig);
|
||||
}
|
||||
else
|
||||
{
|
||||
set->sig[sig / _LWP_NSIG_BPW] &= ~(1UL << (sig % _LWP_NSIG_BPW));
|
||||
}
|
||||
}
|
||||
|
||||
rt_inline int lwp_sigisemptyset(lwp_sigset_t *set)
|
||||
{
|
||||
switch (_LWP_NSIG_WORDS)
|
||||
{
|
||||
case 4:
|
||||
return (set->sig[3] | set->sig[2] |
|
||||
set->sig[1] | set->sig[0]) == 0;
|
||||
case 2:
|
||||
return (set->sig[1] | set->sig[0]) == 0;
|
||||
case 1:
|
||||
return set->sig[0] == 0;
|
||||
default:
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
rt_inline int lwp_sigismember(lwp_sigset_t *set, int _sig)
|
||||
{
|
||||
unsigned long sig = _sig - 1;
|
||||
|
||||
if (_LWP_NSIG_WORDS == 1)
|
||||
{
|
||||
return 1 & (set->sig[0] >> sig);
|
||||
}
|
||||
else
|
||||
{
|
||||
return 1 & (set->sig[sig / _LWP_NSIG_BPW] >> (sig % _LWP_NSIG_BPW));
|
||||
}
|
||||
}
|
||||
|
||||
rt_inline int next_signal(lwp_sigset_t *pending, lwp_sigset_t *mask)
|
||||
{
|
||||
unsigned long i, *s, *m, x;
|
||||
int sig = 0;
|
||||
|
||||
s = pending->sig;
|
||||
m = mask->sig;
|
||||
|
||||
x = *s & ~*m;
|
||||
if (x)
|
||||
{
|
||||
sig = rt_hw_ffz(~x) + 1;
|
||||
return sig;
|
||||
}
|
||||
|
||||
switch (_LWP_NSIG_WORDS)
|
||||
{
|
||||
default:
|
||||
for (i = 1; i < _LWP_NSIG_WORDS; ++i)
|
||||
{
|
||||
x = *++s &~ *++m;
|
||||
if (!x)
|
||||
continue;
|
||||
sig = rt_hw_ffz(~x) + i*_LWP_NSIG_BPW + 1;
|
||||
break;
|
||||
}
|
||||
break;
|
||||
|
||||
case 2:
|
||||
x = s[1] &~ m[1];
|
||||
if (!x)
|
||||
break;
|
||||
sig = rt_hw_ffz(~x) + _LWP_NSIG_BPW + 1;
|
||||
break;
|
||||
|
||||
case 1:
|
||||
/* Nothing to do */
|
||||
break;
|
||||
}
|
||||
|
||||
return sig;
|
||||
}
|
||||
|
||||
int lwp_suspend_sigcheck(rt_thread_t thread, int suspend_flag)
|
||||
{
|
||||
struct rt_lwp *lwp = (struct rt_lwp*)thread->lwp;
|
||||
int ret = 0;
|
||||
|
||||
switch (suspend_flag)
|
||||
{
|
||||
case RT_INTERRUPTIBLE:
|
||||
if (!lwp_sigisemptyset(&thread->signal))
|
||||
{
|
||||
break;
|
||||
}
|
||||
if (thread->lwp && !lwp_sigisemptyset(&lwp->signal))
|
||||
{
|
||||
break;
|
||||
}
|
||||
ret = 1;
|
||||
break;
|
||||
case RT_KILLABLE:
|
||||
if (lwp_sigismember(&thread->signal, SIGKILL))
|
||||
{
|
||||
break;
|
||||
}
|
||||
if (thread->lwp && lwp_sigismember(&lwp->signal, SIGKILL))
|
||||
{
|
||||
break;
|
||||
}
|
||||
ret = 1;
|
||||
break;
|
||||
case RT_UNINTERRUPTIBLE:
|
||||
ret = 1;
|
||||
break;
|
||||
default:
|
||||
RT_ASSERT(0);
|
||||
break;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
int lwp_signal_check(void)
|
||||
{
|
||||
rt_base_t level;
|
||||
struct rt_thread *thread;
|
||||
struct rt_lwp *lwp;
|
||||
uint32_t have_signal = 0;
|
||||
|
||||
level = rt_hw_interrupt_disable();
|
||||
|
||||
thread = rt_thread_self();
|
||||
|
||||
if (thread->signal_in_process)
|
||||
{
|
||||
goto out;
|
||||
}
|
||||
|
||||
lwp = (struct rt_lwp*)thread->lwp;
|
||||
|
||||
if (lwp->signal_in_process)
|
||||
{
|
||||
goto out;
|
||||
}
|
||||
|
||||
have_signal = !lwp_sigisemptyset(&thread->signal);
|
||||
if (have_signal)
|
||||
{
|
||||
thread->signal_in_process = 1;
|
||||
goto out;
|
||||
}
|
||||
have_signal = !lwp_sigisemptyset(&lwp->signal);
|
||||
if (have_signal)
|
||||
{
|
||||
lwp->signal_in_process = 1;
|
||||
}
|
||||
out:
|
||||
rt_hw_interrupt_enable(level);
|
||||
return have_signal;
|
||||
}
|
||||
|
||||
int lwp_signal_backup(void *user_sp, void *user_pc, void* user_flag)
|
||||
{
|
||||
rt_base_t level;
|
||||
struct rt_thread *thread;
|
||||
struct rt_lwp *lwp;
|
||||
int signal;
|
||||
|
||||
level = rt_hw_interrupt_disable();
|
||||
thread = rt_thread_self();
|
||||
if (thread->signal_in_process)
|
||||
{
|
||||
thread->user_ctx.sp = user_sp;
|
||||
thread->user_ctx.pc = user_pc;
|
||||
thread->user_ctx.flag = user_flag;
|
||||
|
||||
signal = next_signal(&thread->signal, &thread->signal_mask);
|
||||
RT_ASSERT(signal != 0);
|
||||
lwp_sigaddset(&thread->signal_mask, signal);
|
||||
thread->signal_mask_bak = signal;
|
||||
lwp_sigdelset(&thread->signal, signal);
|
||||
}
|
||||
else
|
||||
{
|
||||
lwp = (struct rt_lwp*)thread->lwp;
|
||||
lwp->user_ctx.sp = user_sp;
|
||||
lwp->user_ctx.pc = user_pc;
|
||||
lwp->user_ctx.flag = user_flag;
|
||||
|
||||
signal = next_signal(&lwp->signal, &lwp->signal_mask);
|
||||
RT_ASSERT(signal != 0);
|
||||
lwp_sigaddset(&lwp->signal_mask, signal);
|
||||
lwp->signal_mask_bak = signal;
|
||||
lwp_sigdelset(&lwp->signal, signal);
|
||||
}
|
||||
rt_hw_interrupt_enable(level);
|
||||
return signal;
|
||||
}
|
||||
|
||||
struct rt_user_context *lwp_signal_restore(void)
|
||||
{
|
||||
rt_base_t level;
|
||||
struct rt_thread *thread;
|
||||
struct rt_lwp *lwp;
|
||||
struct rt_user_context *ctx;
|
||||
|
||||
level = rt_hw_interrupt_disable();
|
||||
thread = rt_thread_self();
|
||||
if (thread->signal_in_process)
|
||||
{
|
||||
ctx = &thread->user_ctx;
|
||||
thread->signal_in_process = 0;
|
||||
|
||||
lwp_sigdelset(&thread->signal_mask, thread->signal_mask_bak);
|
||||
thread->signal_mask_bak = 0;
|
||||
}
|
||||
else
|
||||
{
|
||||
lwp = (struct rt_lwp*)thread->lwp;
|
||||
ctx = &lwp->user_ctx;
|
||||
RT_ASSERT(lwp->signal_in_process != 0);
|
||||
lwp->signal_in_process = 0;
|
||||
|
||||
lwp_sigdelset(&lwp->signal_mask, lwp->signal_mask_bak);
|
||||
lwp->signal_mask_bak = 0;
|
||||
}
|
||||
rt_hw_interrupt_enable(level);
|
||||
return ctx;
|
||||
}
|
||||
|
||||
rt_inline int _lwp_check_ignore(int sig)
|
||||
{
|
||||
if (sig == SIGCHLD || sig == SIGCONT)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void sys_exit(int value);
|
||||
lwp_sighandler_t lwp_sighandler_get(int sig)
|
||||
{
|
||||
lwp_sighandler_t func = RT_NULL;
|
||||
struct rt_lwp *lwp;
|
||||
rt_thread_t thread;
|
||||
rt_base_t level;
|
||||
|
||||
if (sig == 0 || sig > _LWP_NSIG)
|
||||
{
|
||||
return func;
|
||||
}
|
||||
level = rt_hw_interrupt_disable();
|
||||
thread = rt_thread_self();
|
||||
#ifndef ARCH_MM_MMU
|
||||
if (thread->signal_in_process)
|
||||
{
|
||||
func = thread->signal_handler[sig - 1];
|
||||
goto out;
|
||||
}
|
||||
#endif
|
||||
lwp = (struct rt_lwp*)thread->lwp;
|
||||
|
||||
func = lwp->signal_handler[sig - 1];
|
||||
if (!func)
|
||||
{
|
||||
if (_lwp_check_ignore(sig))
|
||||
{
|
||||
goto out;
|
||||
}
|
||||
if (lwp->signal_in_process)
|
||||
{
|
||||
lwp_terminate(lwp);
|
||||
}
|
||||
sys_exit(0);
|
||||
}
|
||||
out:
|
||||
rt_hw_interrupt_enable(level);
|
||||
|
||||
if (func == (lwp_sighandler_t)SIG_IGN)
|
||||
{
|
||||
func = RT_NULL;
|
||||
}
|
||||
return func;
|
||||
}
|
||||
|
||||
void lwp_sighandler_set(int sig, lwp_sighandler_t func)
|
||||
{
|
||||
rt_base_t level;
|
||||
|
||||
if (sig == 0 || sig > _LWP_NSIG)
|
||||
return;
|
||||
if (sig == SIGKILL || sig == SIGSTOP)
|
||||
return;
|
||||
level = rt_hw_interrupt_disable();
|
||||
((struct rt_lwp*)rt_thread_self()->lwp)->signal_handler[sig - 1] = func;
|
||||
rt_hw_interrupt_enable(level);
|
||||
}
|
||||
|
||||
#ifndef ARCH_MM_MMU
|
||||
void lwp_thread_sighandler_set(int sig, lwp_sighandler_t func)
|
||||
{
|
||||
rt_base_t level;
|
||||
|
||||
if (sig == 0 || sig > _LWP_NSIG)
|
||||
return;
|
||||
level = rt_hw_interrupt_disable();
|
||||
rt_thread_self()->signal_handler[sig - 1] = func;
|
||||
rt_hw_interrupt_enable(level);
|
||||
}
|
||||
#endif
|
||||
|
||||
int lwp_sigaction(int sig, const struct lwp_sigaction *act,
|
||||
struct lwp_sigaction *oact, size_t sigsetsize)
|
||||
{
|
||||
rt_base_t level;
|
||||
struct rt_lwp *lwp;
|
||||
int ret = -RT_EINVAL;
|
||||
lwp_sigset_t newset;
|
||||
|
||||
level = rt_hw_interrupt_disable();
|
||||
lwp = (struct rt_lwp*)rt_thread_self()->lwp;
|
||||
if (!lwp)
|
||||
{
|
||||
goto out;
|
||||
}
|
||||
if (sigsetsize != sizeof(lwp_sigset_t))
|
||||
{
|
||||
goto out;
|
||||
}
|
||||
if (!act && !oact)
|
||||
{
|
||||
goto out;
|
||||
}
|
||||
if (oact)
|
||||
{
|
||||
oact->sa_flags = lwp->sa_flags;
|
||||
oact->sa_mask = lwp->signal_mask;
|
||||
oact->sa_restorer = RT_NULL;
|
||||
oact->__sa_handler._sa_handler = lwp->signal_handler[sig - 1];
|
||||
}
|
||||
if (act)
|
||||
{
|
||||
lwp->sa_flags = act->sa_flags;
|
||||
newset = act->sa_mask;
|
||||
lwp_sigdelset(&newset, SIGKILL);
|
||||
lwp_sigdelset(&newset, SIGSTOP);
|
||||
lwp->signal_mask = newset;
|
||||
lwp_sighandler_set(sig, act->__sa_handler._sa_handler);
|
||||
}
|
||||
ret = 0;
|
||||
out:
|
||||
rt_hw_interrupt_enable(level);
|
||||
return ret;
|
||||
}
|
||||
|
||||
rt_inline void sigorsets(lwp_sigset_t *dset, const lwp_sigset_t *set0, const lwp_sigset_t *set1)
|
||||
{
|
||||
switch (_LWP_NSIG_WORDS)
|
||||
{
|
||||
case 4:
|
||||
dset->sig[3] = set0->sig[3] | set1->sig[3];
|
||||
dset->sig[2] = set0->sig[2] | set1->sig[2];
|
||||
case 2:
|
||||
dset->sig[1] = set0->sig[1] | set1->sig[1];
|
||||
case 1:
|
||||
dset->sig[0] = set0->sig[0] | set1->sig[0];
|
||||
default:
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
rt_inline void sigandsets(lwp_sigset_t *dset, const lwp_sigset_t *set0, const lwp_sigset_t *set1)
|
||||
{
|
||||
switch (_LWP_NSIG_WORDS)
|
||||
{
|
||||
case 4:
|
||||
dset->sig[3] = set0->sig[3] & set1->sig[3];
|
||||
dset->sig[2] = set0->sig[2] & set1->sig[2];
|
||||
case 2:
|
||||
dset->sig[1] = set0->sig[1] & set1->sig[1];
|
||||
case 1:
|
||||
dset->sig[0] = set0->sig[0] & set1->sig[0];
|
||||
default:
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
int lwp_sigprocmask(int how, const lwp_sigset_t *sigset, lwp_sigset_t *oset)
|
||||
{
|
||||
int ret = -1;
|
||||
rt_base_t level;
|
||||
struct rt_lwp *lwp;
|
||||
struct rt_thread *thread;
|
||||
lwp_sigset_t newset;
|
||||
|
||||
level = rt_hw_interrupt_disable();
|
||||
|
||||
thread = rt_thread_self();
|
||||
lwp = (struct rt_lwp*)thread->lwp;
|
||||
if (!lwp)
|
||||
{
|
||||
goto out;
|
||||
}
|
||||
if (oset)
|
||||
{
|
||||
rt_memcpy(oset, &lwp->signal_mask, sizeof(lwp_sigset_t));
|
||||
}
|
||||
|
||||
if (sigset)
|
||||
{
|
||||
switch (how)
|
||||
{
|
||||
case SIG_BLOCK:
|
||||
sigorsets(&newset, &lwp->signal_mask, sigset);
|
||||
break;
|
||||
case SIG_UNBLOCK:
|
||||
sigandsets(&newset, &lwp->signal_mask, sigset);
|
||||
break;
|
||||
case SIG_SETMASK:
|
||||
newset = *sigset;
|
||||
break;
|
||||
default:
|
||||
ret = -RT_EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
lwp_sigdelset(&newset, SIGKILL);
|
||||
lwp_sigdelset(&newset, SIGSTOP);
|
||||
|
||||
lwp->signal_mask = newset;
|
||||
}
|
||||
ret = 0;
|
||||
out:
|
||||
rt_hw_interrupt_enable(level);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int lwp_thread_sigprocmask(int how, const lwp_sigset_t *sigset, lwp_sigset_t *oset)
|
||||
{
|
||||
rt_base_t level;
|
||||
struct rt_thread *thread;
|
||||
lwp_sigset_t newset;
|
||||
|
||||
level = rt_hw_interrupt_disable();
|
||||
thread = rt_thread_self();
|
||||
|
||||
if (oset)
|
||||
{
|
||||
rt_memcpy(oset, &thread->signal_mask, sizeof(lwp_sigset_t));
|
||||
}
|
||||
|
||||
if (sigset)
|
||||
{
|
||||
switch (how)
|
||||
{
|
||||
case SIG_BLOCK:
|
||||
sigorsets(&newset, &thread->signal_mask, sigset);
|
||||
break;
|
||||
case SIG_UNBLOCK:
|
||||
sigandsets(&newset, &thread->signal_mask, sigset);
|
||||
break;
|
||||
case SIG_SETMASK:
|
||||
newset = *sigset;
|
||||
break;
|
||||
default:
|
||||
goto out;
|
||||
}
|
||||
|
||||
lwp_sigdelset(&newset, SIGKILL);
|
||||
lwp_sigdelset(&newset, SIGSTOP);
|
||||
|
||||
thread->signal_mask = newset;
|
||||
}
|
||||
out:
|
||||
rt_hw_interrupt_enable(level);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void _do_signal_wakeup(rt_thread_t thread, int sig)
|
||||
{
|
||||
if ((thread->stat & RT_THREAD_SUSPEND_MASK) == RT_THREAD_SUSPEND_MASK)
|
||||
{
|
||||
int need_schedule = 1;
|
||||
|
||||
if ((thread->stat & RT_SIGNAL_COMMON_WAKEUP_MASK) != RT_SIGNAL_COMMON_WAKEUP_MASK)
|
||||
{
|
||||
rt_thread_wakeup(thread);
|
||||
}
|
||||
else if ((sig == SIGKILL) && ((thread->stat & RT_SIGNAL_KILL_WAKEUP_MASK) != RT_SIGNAL_KILL_WAKEUP_MASK))
|
||||
{
|
||||
rt_thread_wakeup(thread);
|
||||
}
|
||||
else
|
||||
{
|
||||
need_schedule = 0;
|
||||
}
|
||||
|
||||
/* do schedule */
|
||||
if (need_schedule)
|
||||
{
|
||||
rt_schedule();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int lwp_kill(pid_t pid, int sig)
|
||||
{
|
||||
rt_base_t level;
|
||||
struct rt_lwp *lwp;
|
||||
int ret = -1;
|
||||
rt_thread_t thread;
|
||||
|
||||
if (sig < 0 || sig >= _LWP_NSIG)
|
||||
{
|
||||
rt_set_errno(EINVAL);
|
||||
return ret;
|
||||
}
|
||||
level = rt_hw_interrupt_disable();
|
||||
lwp = lwp_from_pid(pid);
|
||||
if (!lwp || lwp->finish)
|
||||
{
|
||||
rt_set_errno(ESRCH);
|
||||
goto out;
|
||||
}
|
||||
if (sig)
|
||||
{
|
||||
/* check main thread */
|
||||
thread = rt_list_entry(lwp->t_grp.prev, struct rt_thread, sibling);
|
||||
if (!lwp_sigismember(&lwp->signal_mask, sig)) /* if signal masked */
|
||||
{
|
||||
lwp_sigaddset(&lwp->signal, sig);
|
||||
_do_signal_wakeup(thread, sig);
|
||||
}
|
||||
}
|
||||
ret = 0;
|
||||
out:
|
||||
rt_hw_interrupt_enable(level);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int lwp_thread_kill(rt_thread_t thread, int sig)
|
||||
{
|
||||
rt_base_t level;
|
||||
int ret = -RT_EINVAL;
|
||||
|
||||
if (!thread)
|
||||
{
|
||||
rt_set_errno(ESRCH);
|
||||
return ret;
|
||||
}
|
||||
if (sig < 0 || sig >= _LWP_NSIG)
|
||||
{
|
||||
rt_set_errno(EINVAL);
|
||||
return ret;
|
||||
}
|
||||
level = rt_hw_interrupt_disable();
|
||||
if (!thread->lwp)
|
||||
{
|
||||
rt_set_errno(EPERM);
|
||||
goto out;
|
||||
}
|
||||
if (!lwp_sigismember(&thread->signal_mask, sig)) /* if signal masked */
|
||||
{
|
||||
lwp_sigaddset(&thread->signal, sig);
|
||||
_do_signal_wakeup(thread, sig);
|
||||
}
|
||||
ret = 0;
|
||||
out:
|
||||
rt_hw_interrupt_enable(level);
|
||||
return ret;
|
||||
}
|
39
components/lwp/lwp_signal.h
Normal file
39
components/lwp/lwp_signal.h
Normal file
|
@ -0,0 +1,39 @@
|
|||
/*
|
||||
* Copyright (c) 2006-2020, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
* 2020-02-23 Jesven first version.
|
||||
*/
|
||||
|
||||
#ifndef LWP_SIGNAL_H__
|
||||
#define LWP_SIGNAL_H__
|
||||
|
||||
#include <rtthread.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
int lwp_signal_check(void);
|
||||
int lwp_signal_backup(void *user_sp, void *user_pc, void* user_flag);
|
||||
struct rt_user_context *lwp_signal_restore(void);
|
||||
lwp_sighandler_t lwp_sighandler_get(int sig);
|
||||
void lwp_sighandler_set(int sig, lwp_sighandler_t func);
|
||||
#ifndef ARCH_MM_MMU
|
||||
void lwp_thread_sighandler_set(int sig, lwp_sighandler_t func);
|
||||
#endif
|
||||
int lwp_sigprocmask(int how, const lwp_sigset_t *sigset, lwp_sigset_t *oset);
|
||||
int lwp_sigaction(int sig, const struct lwp_sigaction *act, struct lwp_sigaction * oact, size_t sigsetsize);
|
||||
int lwp_thread_sigprocmask(int how, const lwp_sigset_t *sigset, lwp_sigset_t *oset);
|
||||
|
||||
int lwp_kill(pid_t pid, int sig);
|
||||
int lwp_thread_kill(rt_thread_t thread, int sig);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
97
components/lwp/lwp_sys_socket.h
Normal file
97
components/lwp/lwp_sys_socket.h
Normal file
|
@ -0,0 +1,97 @@
|
|||
/*
|
||||
* Copyright (c) 2006-2022, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
* 2023-03-13 RT-Thread Export as header
|
||||
*/
|
||||
#ifndef __LWP_SYS_SOCKET_H__
|
||||
#define __LWP_SYS_SOCKET_H__
|
||||
|
||||
/* socket levels */
|
||||
#define INTF_SOL_SOCKET 1
|
||||
#define IMPL_SOL_SOCKET 0xFFF
|
||||
|
||||
#define INTF_IPPROTO_IP 0
|
||||
#define IMPL_IPPROTO_IP 0
|
||||
|
||||
#define INTF_IPPROTO_TCP 6
|
||||
#define IMPL_IPPROTO_TCP 6
|
||||
|
||||
#define INTF_IPPROTO_IPV6 41
|
||||
#define IMPL_IPPROTO_IPV6 41
|
||||
|
||||
/* SOL_SOCKET option names */
|
||||
#define INTF_SO_BROADCAST 6
|
||||
#define INTF_SO_KEEPALIVE 9
|
||||
#define INTF_SO_REUSEADDR 2
|
||||
#define INTF_SO_TYPE 3
|
||||
#define INTF_SO_ERROR 4
|
||||
#define INTF_SO_SNDTIMEO 21
|
||||
#define INTF_SO_RCVTIMEO 20
|
||||
#define INTF_SO_RCVBUF 8
|
||||
#define INTF_SO_LINGER 13
|
||||
#define INTF_SO_NO_CHECK 11
|
||||
#define INTF_SO_ACCEPTCONN 30
|
||||
#define INTF_SO_DONTROUTE 5
|
||||
#define INTF_SO_OOBINLINE 10
|
||||
#define INTF_SO_REUSEPORT 15
|
||||
#define INTF_SO_SNDBUF 7
|
||||
#define INTF_SO_SNDLOWAT 19
|
||||
#define INTF_SO_RCVLOWAT 18
|
||||
|
||||
#define IMPL_SO_BROADCAST 0x0020
|
||||
#define IMPL_SO_KEEPALIVE 0x0008
|
||||
#define IMPL_SO_REUSEADDR 0x0004
|
||||
#define IMPL_SO_TYPE 0x1008
|
||||
#define IMPL_SO_ERROR 0x1007
|
||||
#define IMPL_SO_SNDTIMEO 0x1005
|
||||
#define IMPL_SO_RCVTIMEO 0x1006
|
||||
#define IMPL_SO_RCVBUF 0x1002
|
||||
#define IMPL_SO_LINGER 0x0080
|
||||
#define IMPL_SO_NO_CHECK 0x100a
|
||||
#define IMPL_SO_ACCEPTCONN 0x0002
|
||||
#define IMPL_SO_DONTROUTE 0x0010
|
||||
#define IMPL_SO_OOBINLINE 0x0100
|
||||
#define IMPL_SO_REUSEPORT 0x0200
|
||||
#define IMPL_SO_SNDBUF 0x1001
|
||||
#define IMPL_SO_SNDLOWAT 0x1003
|
||||
#define IMPL_SO_RCVLOWAT 0x1004
|
||||
|
||||
/* IPPROTO_IP option names */
|
||||
#define INTF_IP_TTL 2
|
||||
#define INTF_IP_TOS 1
|
||||
#define INTF_IP_MULTICAST_TTL 33
|
||||
#define INTF_IP_MULTICAST_IF 32
|
||||
#define INTF_IP_MULTICAST_LOOP 34
|
||||
#define INTF_IP_ADD_MEMBERSHIP 35
|
||||
#define INTF_IP_DROP_MEMBERSHIP 36
|
||||
|
||||
#define IMPL_IP_TTL 2
|
||||
#define IMPL_IP_TOS 1
|
||||
#define IMPL_IP_MULTICAST_TTL 5
|
||||
#define IMPL_IP_MULTICAST_IF 6
|
||||
#define IMPL_IP_MULTICAST_LOOP 7
|
||||
#define IMPL_IP_ADD_MEMBERSHIP 3
|
||||
#define IMPL_IP_DROP_MEMBERSHIP 4
|
||||
|
||||
/* IPPROTO_TCP option names */
|
||||
#define INTF_TCP_NODELAY 1
|
||||
#define INTF_TCP_KEEPALIVE 9
|
||||
#define INTF_TCP_KEEPIDLE 4
|
||||
#define INTF_TCP_KEEPINTVL 5
|
||||
#define INTF_TCP_KEEPCNT 6
|
||||
|
||||
#define IMPL_TCP_NODELAY 0x01
|
||||
#define IMPL_TCP_KEEPALIVE 0x02
|
||||
#define IMPL_TCP_KEEPIDLE 0x03
|
||||
#define IMPL_TCP_KEEPINTVL 0x04
|
||||
#define IMPL_TCP_KEEPCNT 0x05
|
||||
|
||||
/* IPPROTO_IPV6 option names */
|
||||
#define INTF_IPV6_V6ONLY 26
|
||||
#define IMPL_IPV6_V6ONLY 27
|
||||
|
||||
#endif /* __LWP_SYS_SOCKET_H__ */
|
5163
components/lwp/lwp_syscall.c
Normal file
5163
components/lwp/lwp_syscall.c
Normal file
File diff suppressed because it is too large
Load diff
114
components/lwp/lwp_syscall.h
Normal file
114
components/lwp/lwp_syscall.h
Normal file
|
@ -0,0 +1,114 @@
|
|||
/*
|
||||
* Copyright (c) 2006-2020, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
* 2019-11-12 Jesven the first version
|
||||
*/
|
||||
|
||||
#ifndef __LWP_SYSCALL_H__
|
||||
#define __LWP_SYSCALL_H__
|
||||
|
||||
#include <syscall_generic.h>
|
||||
|
||||
#include <stdint.h>
|
||||
#include <rtthread.h>
|
||||
#include <dfs_file.h>
|
||||
#include <unistd.h>
|
||||
#include <stdio.h> /* rename() */
|
||||
#include <sys/stat.h>
|
||||
#include <sys/statfs.h> /* statfs() */
|
||||
#include <poll.h>
|
||||
#include <sys/time.h>
|
||||
#include <sys/types.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
typedef long suseconds_t; /* microseconds (signed) */
|
||||
typedef uint32_t id_t; /* may contain pid, uid or gid */
|
||||
|
||||
/*
|
||||
* Process priority specifications to get/setpriority.
|
||||
*/
|
||||
#define PRIO_MIN (-20)
|
||||
#define PRIO_MAX 20
|
||||
|
||||
#define PRIO_PROCESS 0 /* only support lwp process */
|
||||
#define PRIO_PGRP 1
|
||||
#define PRIO_USER 2
|
||||
|
||||
const char *lwp_get_syscall_name(rt_uint32_t number);
|
||||
const void *lwp_get_sys_api(rt_uint32_t number);
|
||||
|
||||
void sys_exit(int value);
|
||||
ssize_t sys_read(int fd, void *buf, size_t nbyte);
|
||||
ssize_t sys_write(int fd, const void *buf, size_t nbyte);
|
||||
off_t sys_lseek(int fd, off_t offset, int whence);
|
||||
sysret_t sys_open(const char *name, int mode, ...);
|
||||
sysret_t sys_close(int fd);
|
||||
sysret_t sys_ioctl(int fd, unsigned long cmd, void* data);
|
||||
sysret_t sys_fstat(int file, struct stat *buf);
|
||||
sysret_t sys_poll(struct pollfd *fds, nfds_t nfds, int timeout);
|
||||
sysret_t sys_nanosleep(const struct timespec *rqtp, struct timespec *rmtp);
|
||||
sysret_t sys_gettimeofday(struct timeval *tp, struct timezone *tzp);
|
||||
sysret_t sys_settimeofday(const struct timeval *tv, const struct timezone *tzp);
|
||||
sysret_t sys_exec(char *filename, int argc, char **argv, char **envp);
|
||||
sysret_t sys_kill(int pid, int sig);
|
||||
sysret_t sys_getpid(void);
|
||||
sysret_t sys_getpriority(int which, id_t who);
|
||||
sysret_t sys_setpriority(int which, id_t who, int prio);
|
||||
rt_sem_t sys_sem_create(const char *name, rt_uint32_t value, rt_uint8_t flag);
|
||||
sysret_t sys_sem_delete(rt_sem_t sem);
|
||||
sysret_t sys_sem_take(rt_sem_t sem, rt_int32_t time);
|
||||
sysret_t sys_sem_release(rt_sem_t sem);
|
||||
rt_mutex_t sys_mutex_create(const char *name, rt_uint8_t flag);
|
||||
sysret_t sys_mutex_delete(rt_mutex_t mutex);
|
||||
sysret_t sys_mutex_take(rt_mutex_t mutex, rt_int32_t time);
|
||||
sysret_t sys_mutex_release(rt_mutex_t mutex);
|
||||
rt_event_t sys_event_create(const char *name, rt_uint8_t flag);
|
||||
sysret_t sys_event_delete(rt_event_t event);
|
||||
sysret_t sys_event_send(rt_event_t event, rt_uint32_t set);
|
||||
sysret_t sys_event_recv(rt_event_t event, rt_uint32_t set, rt_uint8_t opt, rt_int32_t timeout, rt_uint32_t *recved);
|
||||
rt_mailbox_t sys_mb_create(const char *name, rt_size_t size, rt_uint8_t flag);
|
||||
sysret_t sys_mb_delete(rt_mailbox_t mb);
|
||||
sysret_t sys_mb_send(rt_mailbox_t mb, rt_ubase_t value);
|
||||
sysret_t sys_mb_send_wait(rt_mailbox_t mb, rt_ubase_t value, rt_int32_t timeout);
|
||||
sysret_t sys_mb_recv(rt_mailbox_t mb, rt_ubase_t *value, rt_int32_t timeout);
|
||||
rt_mq_t sys_mq_create(const char *name, rt_size_t msg_size, rt_size_t max_msgs, rt_uint8_t flag);
|
||||
sysret_t sys_mq_delete(rt_mq_t mq);
|
||||
sysret_t sys_mq_send(rt_mq_t mq, void *buffer, rt_size_t size);
|
||||
sysret_t sys_mq_urgent(rt_mq_t mq, void *buffer, rt_size_t size);
|
||||
sysret_t sys_mq_recv(rt_mq_t mq, void *buffer, rt_size_t size, rt_int32_t timeout);
|
||||
rt_thread_t sys_thread_create(void *arg[]);
|
||||
sysret_t sys_thread_delete(rt_thread_t thread);
|
||||
sysret_t sys_thread_startup(rt_thread_t thread);
|
||||
rt_thread_t sys_thread_self(void);
|
||||
sysret_t sys_channel_open(const char *name, int flags);
|
||||
sysret_t sys_channel_close(int fd);
|
||||
sysret_t sys_channel_send(int fd, rt_channel_msg_t data);
|
||||
sysret_t sys_channel_send_recv(int fd, rt_channel_msg_t data, rt_channel_msg_t data_ret);
|
||||
sysret_t sys_channel_reply(int fd, rt_channel_msg_t data);
|
||||
sysret_t sys_channel_recv(int fd, rt_channel_msg_t data);
|
||||
void sys_enter_critical(void);
|
||||
void sys_exit_critical(void);
|
||||
|
||||
sysret_t sys_dup(int oldfd);
|
||||
sysret_t sys_dup2(int oldfd, int new);
|
||||
|
||||
sysret_t sys_log(const char* log, int size);
|
||||
|
||||
#ifdef ARCH_MM_MMU
|
||||
sysret_t sys_futex(int *uaddr, int op, int val, const struct timespec *timeout, int *uaddr2, int val3);
|
||||
sysret_t sys_pmutex(void *umutex, int op, void *arg);
|
||||
sysret_t sys_cacheflush(void *addr, int len, int cache);
|
||||
#endif /* ARCH_MM_MMU */
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
132
components/lwp/lwp_tid.c
Normal file
132
components/lwp/lwp_tid.c
Normal file
|
@ -0,0 +1,132 @@
|
|||
/*
|
||||
* Copyright (c) 2006-2021, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
* 2021-01-15 shaojinchun first version
|
||||
*/
|
||||
|
||||
#include <rthw.h>
|
||||
#include <rtthread.h>
|
||||
|
||||
#include "lwp.h"
|
||||
|
||||
#ifdef ARCH_MM_MMU
|
||||
#include "lwp_user_mm.h"
|
||||
#endif
|
||||
|
||||
#define DBG_TAG "LWP_TID"
|
||||
#define DBG_LVL DBG_INFO
|
||||
#include <rtdbg.h>
|
||||
|
||||
#define TID_MAX 10000
|
||||
|
||||
#define TID_CT_ASSERT(name, x) \
|
||||
struct assert_##name {char ary[2 * (x) - 1];}
|
||||
|
||||
TID_CT_ASSERT(tid_min_nr, LWP_TID_MAX_NR > 1);
|
||||
TID_CT_ASSERT(tid_max_nr, LWP_TID_MAX_NR < TID_MAX);
|
||||
|
||||
static struct lwp_avl_struct lwp_tid_ary[LWP_TID_MAX_NR];
|
||||
static struct lwp_avl_struct *lwp_tid_free_head = RT_NULL;
|
||||
static int lwp_tid_ary_alloced = 0;
|
||||
static struct lwp_avl_struct *lwp_tid_root = RT_NULL;
|
||||
static int current_tid = 0;
|
||||
|
||||
int lwp_tid_get(void)
|
||||
{
|
||||
rt_base_t level;
|
||||
struct lwp_avl_struct *p;
|
||||
int tid = 0;
|
||||
|
||||
level = rt_hw_interrupt_disable();
|
||||
p = lwp_tid_free_head;
|
||||
if (p)
|
||||
{
|
||||
lwp_tid_free_head = (struct lwp_avl_struct *)p->avl_right;
|
||||
}
|
||||
else if (lwp_tid_ary_alloced < LWP_TID_MAX_NR)
|
||||
{
|
||||
p = lwp_tid_ary + lwp_tid_ary_alloced;
|
||||
lwp_tid_ary_alloced++;
|
||||
}
|
||||
if (p)
|
||||
{
|
||||
int found_noused = 0;
|
||||
|
||||
RT_ASSERT(p->data == RT_NULL);
|
||||
for (tid = current_tid + 1; tid < TID_MAX; tid++)
|
||||
{
|
||||
if (!lwp_avl_find(tid, lwp_tid_root))
|
||||
{
|
||||
found_noused = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!found_noused)
|
||||
{
|
||||
for (tid = 1; tid <= current_tid; tid++)
|
||||
{
|
||||
if (!lwp_avl_find(tid, lwp_tid_root))
|
||||
{
|
||||
found_noused = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
p->avl_key = tid;
|
||||
lwp_avl_insert(p, &lwp_tid_root);
|
||||
current_tid = tid;
|
||||
}
|
||||
rt_hw_interrupt_enable(level);
|
||||
return tid;
|
||||
}
|
||||
|
||||
void lwp_tid_put(int tid)
|
||||
{
|
||||
rt_base_t level;
|
||||
struct lwp_avl_struct *p;
|
||||
|
||||
level = rt_hw_interrupt_disable();
|
||||
p = lwp_avl_find(tid, lwp_tid_root);
|
||||
if (p)
|
||||
{
|
||||
p->data = RT_NULL;
|
||||
lwp_avl_remove(p, &lwp_tid_root);
|
||||
p->avl_right = lwp_tid_free_head;
|
||||
lwp_tid_free_head = p;
|
||||
}
|
||||
rt_hw_interrupt_enable(level);
|
||||
}
|
||||
|
||||
rt_thread_t lwp_tid_get_thread(int tid)
|
||||
{
|
||||
rt_base_t level;
|
||||
struct lwp_avl_struct *p;
|
||||
rt_thread_t thread = RT_NULL;
|
||||
|
||||
level = rt_hw_interrupt_disable();
|
||||
p = lwp_avl_find(tid, lwp_tid_root);
|
||||
if (p)
|
||||
{
|
||||
thread = (rt_thread_t)p->data;
|
||||
}
|
||||
rt_hw_interrupt_enable(level);
|
||||
return thread;
|
||||
}
|
||||
|
||||
void lwp_tid_set_thread(int tid, rt_thread_t thread)
|
||||
{
|
||||
rt_base_t level;
|
||||
struct lwp_avl_struct *p;
|
||||
|
||||
level = rt_hw_interrupt_disable();
|
||||
p = lwp_avl_find(tid, lwp_tid_root);
|
||||
if (p)
|
||||
{
|
||||
p->data = thread;
|
||||
}
|
||||
rt_hw_interrupt_enable(level);
|
||||
}
|
763
components/lwp/lwp_user_mm.c
Normal file
763
components/lwp/lwp_user_mm.c
Normal file
|
@ -0,0 +1,763 @@
|
|||
/*
|
||||
* Copyright (c) 2006-2021, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
* 2019-10-28 Jesven first version
|
||||
* 2021-02-06 lizhirui fixed fixed vtable size problem
|
||||
* 2021-02-12 lizhirui add 64-bit support for lwp_brk
|
||||
* 2021-02-19 lizhirui add riscv64 support for lwp_user_accessable and lwp_get_from_user
|
||||
* 2021-06-07 lizhirui modify user space bound check
|
||||
* 2022-12-25 wangxiaoyao adapt to new mm
|
||||
*/
|
||||
|
||||
#include <rtthread.h>
|
||||
#include <rthw.h>
|
||||
#include <string.h>
|
||||
|
||||
#ifdef ARCH_MM_MMU
|
||||
|
||||
#include <lwp.h>
|
||||
#include <lwp_arch.h>
|
||||
#include <lwp_mm.h>
|
||||
#include <lwp_user_mm.h>
|
||||
|
||||
#include <mm_aspace.h>
|
||||
#include <mm_fault.h>
|
||||
#include <mm_flag.h>
|
||||
#include <mm_page.h>
|
||||
#include <mmu.h>
|
||||
#include <page.h>
|
||||
|
||||
#define DBG_TAG "LwP"
|
||||
#define DBG_LVL DBG_LOG
|
||||
#include <rtdbg.h>
|
||||
|
||||
static void _init_lwp_objs(struct rt_lwp_objs *lwp_objs, rt_aspace_t aspace);
|
||||
|
||||
int lwp_user_space_init(struct rt_lwp *lwp, rt_bool_t is_fork)
|
||||
{
|
||||
int err = -RT_ENOMEM;
|
||||
|
||||
lwp->lwp_obj = rt_malloc(sizeof(struct rt_lwp_objs));
|
||||
if (lwp->lwp_obj)
|
||||
{
|
||||
_init_lwp_objs(lwp->lwp_obj, lwp->aspace);
|
||||
|
||||
err = arch_user_space_init(lwp);
|
||||
if (!is_fork && err == RT_EOK)
|
||||
{
|
||||
void *addr = (void *)USER_STACK_VSTART;
|
||||
err = rt_aspace_map(lwp->aspace, &addr,
|
||||
USER_STACK_VEND - USER_STACK_VSTART,
|
||||
MMU_MAP_U_RWCB, 0, &lwp->lwp_obj->mem_obj, 0);
|
||||
}
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
void lwp_aspace_switch(struct rt_thread *thread)
|
||||
{
|
||||
struct rt_lwp *lwp = RT_NULL;
|
||||
rt_aspace_t aspace;
|
||||
void *from_tbl;
|
||||
|
||||
if (thread->lwp)
|
||||
{
|
||||
lwp = (struct rt_lwp *)thread->lwp;
|
||||
aspace = lwp->aspace;
|
||||
}
|
||||
else
|
||||
aspace = &rt_kernel_space;
|
||||
|
||||
from_tbl = rt_hw_mmu_tbl_get();
|
||||
if (aspace->page_table != from_tbl)
|
||||
{
|
||||
rt_hw_aspace_switch(aspace);
|
||||
}
|
||||
}
|
||||
|
||||
void lwp_unmap_user_space(struct rt_lwp *lwp)
|
||||
{
|
||||
arch_user_space_free(lwp);
|
||||
rt_free(lwp->lwp_obj);
|
||||
}
|
||||
|
||||
static const char *user_get_name(rt_varea_t varea)
|
||||
{
|
||||
char *name;
|
||||
if (varea->flag & MMF_TEXT)
|
||||
{
|
||||
name = "user.text";
|
||||
}
|
||||
else
|
||||
{
|
||||
if (varea->start == (void *)USER_STACK_VSTART)
|
||||
{
|
||||
name = "user.stack";
|
||||
}
|
||||
else if (varea->start >= (void *)USER_HEAP_VADDR &&
|
||||
varea->start < (void *)USER_HEAP_VEND)
|
||||
{
|
||||
name = "user.heap";
|
||||
}
|
||||
else
|
||||
{
|
||||
name = "user.data";
|
||||
}
|
||||
}
|
||||
return name;
|
||||
}
|
||||
|
||||
#define NO_AUTO_FETCH 0x1
|
||||
#define VAREA_CAN_AUTO_FETCH(varea) (!((rt_ubase_t)((varea)->data) & NO_AUTO_FETCH))
|
||||
|
||||
static void _user_do_page_fault(struct rt_varea *varea,
|
||||
struct rt_aspace_fault_msg *msg)
|
||||
{
|
||||
struct rt_lwp_objs *lwp_objs;
|
||||
lwp_objs = rt_container_of(varea->mem_obj, struct rt_lwp_objs, mem_obj);
|
||||
|
||||
if (lwp_objs->source)
|
||||
{
|
||||
char *paddr = rt_hw_mmu_v2p(lwp_objs->source, msg->fault_vaddr);
|
||||
if (paddr != ARCH_MAP_FAILED)
|
||||
{
|
||||
void *vaddr;
|
||||
vaddr = paddr - PV_OFFSET;
|
||||
|
||||
if (!(varea->flag & MMF_TEXT))
|
||||
{
|
||||
void *cp = rt_pages_alloc_ext(0, PAGE_ANY_AVAILABLE);
|
||||
if (cp)
|
||||
{
|
||||
memcpy(cp, vaddr, ARCH_PAGE_SIZE);
|
||||
rt_varea_pgmgr_insert(varea, cp);
|
||||
msg->response.status = MM_FAULT_STATUS_OK;
|
||||
msg->response.vaddr = cp;
|
||||
msg->response.size = ARCH_PAGE_SIZE;
|
||||
}
|
||||
else
|
||||
{
|
||||
LOG_W("%s: page alloc failed at %p", __func__,
|
||||
varea->start);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
rt_page_t page = rt_page_addr2page(vaddr);
|
||||
page->ref_cnt += 1;
|
||||
rt_varea_pgmgr_insert(varea, vaddr);
|
||||
msg->response.status = MM_FAULT_STATUS_OK;
|
||||
msg->response.vaddr = vaddr;
|
||||
msg->response.size = ARCH_PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
else if (!(varea->flag & MMF_TEXT))
|
||||
{
|
||||
/* if data segment not exist in source do a fallback */
|
||||
rt_mm_dummy_mapper.on_page_fault(varea, msg);
|
||||
}
|
||||
}
|
||||
else if (VAREA_CAN_AUTO_FETCH(varea))
|
||||
{
|
||||
/* if (!lwp_objs->source), no aspace as source data */
|
||||
rt_mm_dummy_mapper.on_page_fault(varea, msg);
|
||||
}
|
||||
}
|
||||
|
||||
static void _init_lwp_objs(struct rt_lwp_objs *lwp_objs, rt_aspace_t aspace)
|
||||
{
|
||||
if (lwp_objs)
|
||||
{
|
||||
/**
|
||||
* @brief one lwp_obj represent an base layout of page based memory in user space
|
||||
* This is useful on duplication. Where we only have a (lwp_objs and offset) to
|
||||
* provide identical memory. This is implemented by lwp_objs->source.
|
||||
*/
|
||||
lwp_objs->source = NULL;
|
||||
lwp_objs->mem_obj.get_name = user_get_name;
|
||||
lwp_objs->mem_obj.hint_free = NULL;
|
||||
lwp_objs->mem_obj.on_page_fault = _user_do_page_fault;
|
||||
lwp_objs->mem_obj.on_page_offload = rt_mm_dummy_mapper.on_page_offload;
|
||||
lwp_objs->mem_obj.on_varea_open = rt_mm_dummy_mapper.on_varea_open;
|
||||
lwp_objs->mem_obj.on_varea_close = rt_mm_dummy_mapper.on_varea_close;
|
||||
}
|
||||
}
|
||||
|
||||
static void *_lwp_map_user(struct rt_lwp *lwp, void *map_va, size_t map_size,
|
||||
int text)
|
||||
{
|
||||
void *va = map_va;
|
||||
int ret = 0;
|
||||
size_t flags = MMF_PREFETCH;
|
||||
if (text)
|
||||
flags |= MMF_TEXT;
|
||||
|
||||
rt_mem_obj_t mem_obj = &lwp->lwp_obj->mem_obj;
|
||||
|
||||
ret = rt_aspace_map(lwp->aspace, &va, map_size, MMU_MAP_U_RWCB, flags,
|
||||
mem_obj, 0);
|
||||
if (ret != RT_EOK)
|
||||
{
|
||||
va = RT_NULL;
|
||||
LOG_I("lwp_map_user: failed to map %lx with size %lx with errno %d", map_va,
|
||||
map_size, ret);
|
||||
}
|
||||
|
||||
return va;
|
||||
}
|
||||
|
||||
int lwp_unmap_user(struct rt_lwp *lwp, void *va)
|
||||
{
|
||||
int err = rt_aspace_unmap(lwp->aspace, va);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static void _dup_varea(rt_varea_t varea, struct rt_lwp *src_lwp,
|
||||
rt_aspace_t dst)
|
||||
{
|
||||
char *vaddr = varea->start;
|
||||
char *vend = vaddr + varea->size;
|
||||
if (vaddr < (char *)USER_STACK_VSTART || vaddr >= (char *)USER_STACK_VEND)
|
||||
{
|
||||
while (vaddr != vend)
|
||||
{
|
||||
void *paddr;
|
||||
paddr = lwp_v2p(src_lwp, vaddr);
|
||||
if (paddr != ARCH_MAP_FAILED)
|
||||
{
|
||||
rt_aspace_load_page(dst, vaddr, 1);
|
||||
}
|
||||
vaddr += ARCH_PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
while (vaddr != vend)
|
||||
{
|
||||
vend -= ARCH_PAGE_SIZE;
|
||||
void *paddr;
|
||||
paddr = lwp_v2p(src_lwp, vend);
|
||||
if (paddr != ARCH_MAP_FAILED)
|
||||
{
|
||||
rt_aspace_load_page(dst, vend, 1);
|
||||
}
|
||||
else
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int lwp_dup_user(rt_varea_t varea, void *arg)
|
||||
{
|
||||
int err;
|
||||
struct rt_lwp *self_lwp = lwp_self();
|
||||
struct rt_lwp *new_lwp = (struct rt_lwp *)arg;
|
||||
|
||||
void *pa = RT_NULL;
|
||||
void *va = RT_NULL;
|
||||
rt_mem_obj_t mem_obj = varea->mem_obj;
|
||||
|
||||
if (!mem_obj)
|
||||
{
|
||||
/* duplicate a physical mapping */
|
||||
pa = lwp_v2p(self_lwp, (void *)varea->start);
|
||||
RT_ASSERT(pa != ARCH_MAP_FAILED);
|
||||
struct rt_mm_va_hint hint = {.flags = MMF_MAP_FIXED,
|
||||
.limit_range_size = new_lwp->aspace->size,
|
||||
.limit_start = new_lwp->aspace->start,
|
||||
.prefer = varea->start,
|
||||
.map_size = varea->size};
|
||||
err = rt_aspace_map_phy(new_lwp->aspace, &hint, varea->attr,
|
||||
MM_PA_TO_OFF(pa), &va);
|
||||
if (err != RT_EOK)
|
||||
{
|
||||
LOG_W("%s: aspace map failed at %p with size %p", __func__,
|
||||
varea->start, varea->size);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
/* duplicate a mem_obj backing mapping */
|
||||
va = varea->start;
|
||||
err = rt_aspace_map(new_lwp->aspace, &va, varea->size, varea->attr,
|
||||
varea->flag, &new_lwp->lwp_obj->mem_obj,
|
||||
varea->offset);
|
||||
if (err != RT_EOK)
|
||||
{
|
||||
LOG_W("%s: aspace map failed at %p with size %p", __func__,
|
||||
varea->start, varea->size);
|
||||
}
|
||||
else
|
||||
{
|
||||
/* loading page frames for !MMF_PREFETCH varea */
|
||||
if (!(varea->flag & MMF_PREFETCH))
|
||||
{
|
||||
_dup_varea(varea, self_lwp, new_lwp->aspace);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (va != (void *)varea->start)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int lwp_unmap_user_phy(struct rt_lwp *lwp, void *va)
|
||||
{
|
||||
return lwp_unmap_user(lwp, va);
|
||||
}
|
||||
|
||||
void *lwp_map_user(struct rt_lwp *lwp, void *map_va, size_t map_size, int text)
|
||||
{
|
||||
void *ret = RT_NULL;
|
||||
size_t offset = 0;
|
||||
|
||||
if (!map_size)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
offset = (size_t)map_va & ARCH_PAGE_MASK;
|
||||
map_size += (offset + ARCH_PAGE_SIZE - 1);
|
||||
map_size &= ~ARCH_PAGE_MASK;
|
||||
map_va = (void *)((size_t)map_va & ~ARCH_PAGE_MASK);
|
||||
|
||||
ret = _lwp_map_user(lwp, map_va, map_size, text);
|
||||
|
||||
if (ret)
|
||||
{
|
||||
ret = (void *)((char *)ret + offset);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline size_t _flags_to_attr(size_t flags)
|
||||
{
|
||||
size_t attr;
|
||||
|
||||
if (flags & LWP_MAP_FLAG_NOCACHE)
|
||||
{
|
||||
attr = MMU_MAP_U_RW;
|
||||
}
|
||||
else
|
||||
{
|
||||
attr = MMU_MAP_U_RWCB;
|
||||
}
|
||||
|
||||
return attr;
|
||||
}
|
||||
|
||||
static inline mm_flag_t _flags_to_aspace_flag(size_t flags)
|
||||
{
|
||||
mm_flag_t mm_flag = 0;
|
||||
|
||||
return mm_flag;
|
||||
}
|
||||
|
||||
static rt_varea_t _lwp_map_user_varea(struct rt_lwp *lwp, void *map_va, size_t map_size, size_t flags)
|
||||
{
|
||||
void *va = map_va;
|
||||
int ret = 0;
|
||||
rt_mem_obj_t mem_obj = &lwp->lwp_obj->mem_obj;
|
||||
rt_varea_t varea;
|
||||
mm_flag_t mm_flags;
|
||||
size_t attr;
|
||||
|
||||
varea = rt_malloc(sizeof(*varea));
|
||||
if (varea)
|
||||
{
|
||||
attr = _flags_to_attr(flags);
|
||||
mm_flags = _flags_to_aspace_flag(flags);
|
||||
ret = rt_aspace_map_static(lwp->aspace, varea, &va, map_size,
|
||||
attr, mm_flags, mem_obj, 0);
|
||||
/* let aspace handle the free of varea */
|
||||
varea->flag &= ~MMF_STATIC_ALLOC;
|
||||
/* don't apply auto fetch on this */
|
||||
varea->data = (void *)NO_AUTO_FETCH;
|
||||
}
|
||||
else
|
||||
{
|
||||
ret = -RT_ENOMEM;
|
||||
}
|
||||
|
||||
if (ret != RT_EOK)
|
||||
{
|
||||
LOG_I("lwp_map_user: failed to map %lx with size %lx with errno %d", map_va,
|
||||
map_size, ret);
|
||||
}
|
||||
|
||||
return varea;
|
||||
}
|
||||
|
||||
static rt_varea_t _map_user_varea_ext(struct rt_lwp *lwp, void *map_va, size_t map_size, size_t flags)
|
||||
{
|
||||
rt_varea_t varea = RT_NULL;
|
||||
size_t offset = 0;
|
||||
|
||||
if (!map_size)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
offset = (size_t)map_va & ARCH_PAGE_MASK;
|
||||
map_size += (offset + ARCH_PAGE_SIZE - 1);
|
||||
map_size &= ~ARCH_PAGE_MASK;
|
||||
map_va = (void *)((size_t)map_va & ~ARCH_PAGE_MASK);
|
||||
|
||||
varea = _lwp_map_user_varea(lwp, map_va, map_size, flags);
|
||||
|
||||
return varea;
|
||||
}
|
||||
|
||||
rt_varea_t lwp_map_user_varea_ext(struct rt_lwp *lwp, void *map_va, size_t map_size, size_t flags)
|
||||
{
|
||||
return _map_user_varea_ext(lwp, map_va, map_size, flags);
|
||||
}
|
||||
|
||||
rt_varea_t lwp_map_user_varea(struct rt_lwp *lwp, void *map_va, size_t map_size)
|
||||
{
|
||||
return _map_user_varea_ext(lwp, map_va, map_size, LWP_MAP_FLAG_NONE);
|
||||
}
|
||||
|
||||
void *lwp_map_user_phy(struct rt_lwp *lwp, void *map_va, void *map_pa,
|
||||
size_t map_size, int cached)
|
||||
{
|
||||
int err;
|
||||
char *va;
|
||||
size_t offset = 0;
|
||||
|
||||
if (!map_size)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
if (map_va)
|
||||
{
|
||||
if (((size_t)map_va & ARCH_PAGE_MASK) !=
|
||||
((size_t)map_pa & ARCH_PAGE_MASK))
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
offset = (size_t)map_pa & ARCH_PAGE_MASK;
|
||||
map_size += (offset + ARCH_PAGE_SIZE - 1);
|
||||
map_size &= ~ARCH_PAGE_MASK;
|
||||
map_pa = (void *)((size_t)map_pa & ~ARCH_PAGE_MASK);
|
||||
|
||||
struct rt_mm_va_hint hint = {.flags = MMF_MAP_FIXED,
|
||||
.limit_range_size = lwp->aspace->size,
|
||||
.limit_start = lwp->aspace->start,
|
||||
.prefer = map_va,
|
||||
.map_size = map_size};
|
||||
rt_size_t attr = cached ? MMU_MAP_U_RWCB : MMU_MAP_U_RW;
|
||||
|
||||
err =
|
||||
rt_aspace_map_phy(lwp->aspace, &hint, attr, MM_PA_TO_OFF(map_pa), (void **)&va);
|
||||
if (err != RT_EOK)
|
||||
{
|
||||
va = RT_NULL;
|
||||
LOG_W("%s", __func__);
|
||||
}
|
||||
else
|
||||
{
|
||||
va += offset;
|
||||
}
|
||||
|
||||
return va;
|
||||
}
|
||||
|
||||
rt_base_t lwp_brk(void *addr)
|
||||
{
|
||||
rt_base_t ret = -1;
|
||||
struct rt_lwp *lwp = RT_NULL;
|
||||
|
||||
rt_mm_lock();
|
||||
lwp = rt_thread_self()->lwp;
|
||||
|
||||
if ((size_t)addr <= lwp->end_heap)
|
||||
{
|
||||
ret = (rt_base_t)lwp->end_heap;
|
||||
}
|
||||
else
|
||||
{
|
||||
size_t size = 0;
|
||||
void *va = RT_NULL;
|
||||
|
||||
if ((size_t)addr <= USER_HEAP_VEND)
|
||||
{
|
||||
size = (((size_t)addr - lwp->end_heap) + ARCH_PAGE_SIZE - 1) &
|
||||
~ARCH_PAGE_MASK;
|
||||
va = lwp_map_user(lwp, (void *)lwp->end_heap, size, 0);
|
||||
}
|
||||
if (va)
|
||||
{
|
||||
lwp->end_heap += size;
|
||||
ret = lwp->end_heap;
|
||||
}
|
||||
}
|
||||
rt_mm_unlock();
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define MAP_ANONYMOUS 0x20
|
||||
|
||||
void *lwp_mmap2(void *addr, size_t length, int prot, int flags, int fd,
|
||||
off_t pgoffset)
|
||||
{
|
||||
void *ret = (void *)-1;
|
||||
|
||||
if (fd == -1)
|
||||
{
|
||||
|
||||
ret = lwp_map_user(lwp_self(), addr, length, 0);
|
||||
|
||||
if (ret)
|
||||
{
|
||||
if ((flags & MAP_ANONYMOUS) != 0)
|
||||
{
|
||||
rt_memset(ret, 0, length);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
ret = (void *)-1;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
struct dfs_file *d;
|
||||
|
||||
d = fd_get(fd);
|
||||
if (d && d->vnode->type == FT_DEVICE)
|
||||
{
|
||||
struct dfs_mmap2_args mmap2;
|
||||
|
||||
mmap2.addr = addr;
|
||||
mmap2.length = length;
|
||||
mmap2.prot = prot;
|
||||
mmap2.flags = flags;
|
||||
mmap2.pgoffset = pgoffset;
|
||||
mmap2.ret = (void *)-1;
|
||||
|
||||
if (dfs_file_mmap2(d, &mmap2) == 0)
|
||||
{
|
||||
ret = mmap2.ret;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int lwp_munmap(void *addr)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
rt_mm_lock();
|
||||
ret = lwp_unmap_user(lwp_self(), addr);
|
||||
rt_mm_unlock();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
size_t lwp_get_from_user(void *dst, void *src, size_t size)
|
||||
{
|
||||
struct rt_lwp *lwp = RT_NULL;
|
||||
|
||||
/* check src */
|
||||
|
||||
if (src < (void *)USER_VADDR_START)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
if (src >= (void *)USER_VADDR_TOP)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
if ((void *)((char *)src + size) > (void *)USER_VADDR_TOP)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
lwp = lwp_self();
|
||||
if (!lwp)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
return lwp_data_get(lwp, dst, src, size);
|
||||
}
|
||||
|
||||
size_t lwp_put_to_user(void *dst, void *src, size_t size)
|
||||
{
|
||||
struct rt_lwp *lwp = RT_NULL;
|
||||
|
||||
/* check dst */
|
||||
if (dst < (void *)USER_VADDR_START)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
if (dst >= (void *)USER_VADDR_TOP)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
if ((void *)((char *)dst + size) > (void *)USER_VADDR_TOP)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
lwp = lwp_self();
|
||||
if (!lwp)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
return lwp_data_put(lwp, dst, src, size);
|
||||
}
|
||||
|
||||
int lwp_user_accessable(void *addr, size_t size)
|
||||
{
|
||||
void *addr_start = RT_NULL, *addr_end = RT_NULL, *next_page = RT_NULL;
|
||||
void *tmp_addr = RT_NULL;
|
||||
struct rt_lwp *lwp = lwp_self();
|
||||
|
||||
if (!lwp)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
if (!size || !addr)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
addr_start = addr;
|
||||
addr_end = (void *)((char *)addr + size);
|
||||
|
||||
#ifdef ARCH_RISCV64
|
||||
if (addr_start < (void *)USER_VADDR_START)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
if (addr_start >= (void *)USER_VADDR_TOP)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
if (addr_end > (void *)USER_VADDR_TOP)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
next_page =
|
||||
(void *)(((size_t)addr_start + ARCH_PAGE_SIZE) & ~(ARCH_PAGE_SIZE - 1));
|
||||
do
|
||||
{
|
||||
size_t len = (char *)next_page - (char *)addr_start;
|
||||
|
||||
if (size < len)
|
||||
{
|
||||
len = size;
|
||||
}
|
||||
tmp_addr = lwp_v2p(lwp, addr_start);
|
||||
if (tmp_addr == ARCH_MAP_FAILED)
|
||||
{
|
||||
if ((rt_ubase_t)addr_start >= USER_STACK_VSTART && (rt_ubase_t)addr_start < USER_STACK_VEND)
|
||||
tmp_addr = *(void **)addr_start;
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
addr_start = (void *)((char *)addr_start + len);
|
||||
size -= len;
|
||||
next_page = (void *)((char *)next_page + ARCH_PAGE_SIZE);
|
||||
} while (addr_start < addr_end);
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* src is in mmu_info space, dst is in current thread space */
|
||||
size_t lwp_data_get(struct rt_lwp *lwp, void *dst, void *src, size_t size)
|
||||
{
|
||||
size_t copy_len = 0;
|
||||
void *addr_start = RT_NULL, *addr_end = RT_NULL, *next_page = RT_NULL;
|
||||
void *tmp_dst = RT_NULL, *tmp_src = RT_NULL;
|
||||
|
||||
if (!size || !dst)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
tmp_dst = dst;
|
||||
addr_start = src;
|
||||
addr_end = (void *)((char *)src + size);
|
||||
next_page =
|
||||
(void *)(((size_t)addr_start + ARCH_PAGE_SIZE) & ~(ARCH_PAGE_SIZE - 1));
|
||||
do
|
||||
{
|
||||
size_t len = (char *)next_page - (char *)addr_start;
|
||||
|
||||
if (size < len)
|
||||
{
|
||||
len = size;
|
||||
}
|
||||
tmp_src = lwp_v2p(lwp, addr_start);
|
||||
if (tmp_src == ARCH_MAP_FAILED)
|
||||
{
|
||||
break;
|
||||
}
|
||||
tmp_src = (void *)((char *)tmp_src - PV_OFFSET);
|
||||
rt_memcpy(tmp_dst, tmp_src, len);
|
||||
tmp_dst = (void *)((char *)tmp_dst + len);
|
||||
addr_start = (void *)((char *)addr_start + len);
|
||||
size -= len;
|
||||
next_page = (void *)((char *)next_page + ARCH_PAGE_SIZE);
|
||||
copy_len += len;
|
||||
} while (addr_start < addr_end);
|
||||
return copy_len;
|
||||
}
|
||||
|
||||
/* dst is in kernel space, src is in current thread space */
|
||||
size_t lwp_data_put(struct rt_lwp *lwp, void *dst, void *src, size_t size)
|
||||
{
|
||||
size_t copy_len = 0;
|
||||
void *addr_start = RT_NULL, *addr_end = RT_NULL, *next_page = RT_NULL;
|
||||
void *tmp_dst = RT_NULL, *tmp_src = RT_NULL;
|
||||
|
||||
if (!size || !dst)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
tmp_src = src;
|
||||
addr_start = dst;
|
||||
addr_end = (void *)((char *)dst + size);
|
||||
next_page =
|
||||
(void *)(((size_t)addr_start + ARCH_PAGE_SIZE) & ~(ARCH_PAGE_SIZE - 1));
|
||||
do
|
||||
{
|
||||
size_t len = (char *)next_page - (char *)addr_start;
|
||||
|
||||
if (size < len)
|
||||
{
|
||||
len = size;
|
||||
}
|
||||
tmp_dst = lwp_v2p(lwp, addr_start);
|
||||
if (tmp_dst == ARCH_MAP_FAILED)
|
||||
{
|
||||
break;
|
||||
}
|
||||
tmp_dst = (void *)((char *)tmp_dst - PV_OFFSET);
|
||||
rt_memcpy(tmp_dst, tmp_src, len);
|
||||
tmp_src = (void *)((char *)tmp_src + len);
|
||||
addr_start = (void *)((char *)addr_start + len);
|
||||
size -= len;
|
||||
next_page = (void *)((char *)next_page + ARCH_PAGE_SIZE);
|
||||
copy_len += len;
|
||||
} while (addr_start < addr_end);
|
||||
return copy_len;
|
||||
}
|
||||
|
||||
#endif
|
75
components/lwp/lwp_user_mm.h
Normal file
75
components/lwp/lwp_user_mm.h
Normal file
|
@ -0,0 +1,75 @@
|
|||
/*
|
||||
* Copyright (c) 2006-2020, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
* 2019-10-28 Jesven first version
|
||||
* 2021-02-12 lizhirui add 64-bit support for lwp_brk
|
||||
*/
|
||||
#ifndef __LWP_USER_MM_H__
|
||||
#define __LWP_USER_MM_H__
|
||||
|
||||
#include <rthw.h>
|
||||
#include <rtthread.h>
|
||||
|
||||
#ifdef ARCH_MM_MMU
|
||||
#include <lwp.h>
|
||||
#include <mmu.h>
|
||||
#include <mm_aspace.h>
|
||||
#include <mm_fault.h>
|
||||
#include <mm_page.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#define LWP_MAP_FLAG_NONE 0x0000
|
||||
#define LWP_MAP_FLAG_NOCACHE 0x0001
|
||||
|
||||
int lwp_user_space_init(struct rt_lwp *lwp, rt_bool_t is_fork);
|
||||
void lwp_unmap_user_space(struct rt_lwp *lwp);
|
||||
|
||||
int lwp_unmap_user(struct rt_lwp *lwp, void *va);
|
||||
void *lwp_map_user(struct rt_lwp *lwp, void *map_va, size_t map_size, rt_bool_t text);
|
||||
|
||||
rt_varea_t lwp_map_user_varea(struct rt_lwp *lwp, void *map_va, size_t map_size);
|
||||
/* check LWP_MAP_FLAG_* */
|
||||
rt_varea_t lwp_map_user_varea_ext(struct rt_lwp *lwp, void *map_va, size_t map_size, size_t flags);
|
||||
|
||||
void *lwp_map_user_phy(struct rt_lwp *lwp, void *map_va, void *map_pa, size_t map_size, rt_bool_t cached);
|
||||
int lwp_unmap_user_phy(struct rt_lwp *lwp, void *va);
|
||||
|
||||
rt_base_t lwp_brk(void *addr);
|
||||
void* lwp_mmap2(void *addr, size_t length, int prot, int flags, int fd, off_t pgoffset);
|
||||
int lwp_munmap(void *addr);
|
||||
|
||||
size_t lwp_get_from_user(void *dst, void *src, size_t size);
|
||||
size_t lwp_put_to_user(void *dst, void *src, size_t size);
|
||||
int lwp_user_accessable(void *addr, size_t size);
|
||||
|
||||
size_t lwp_data_get(struct rt_lwp *lwp, void *dst, void *src, size_t size);
|
||||
size_t lwp_data_put(struct rt_lwp *lwp, void *dst, void *src, size_t size);
|
||||
void lwp_data_cache_flush(struct rt_lwp *lwp, void *vaddr, size_t size);
|
||||
|
||||
static inline void *_lwp_v2p(struct rt_lwp *lwp, void *vaddr)
|
||||
{
|
||||
return rt_hw_mmu_v2p(lwp->aspace, vaddr);
|
||||
}
|
||||
|
||||
static inline void *lwp_v2p(struct rt_lwp *lwp, void *vaddr)
|
||||
{
|
||||
RD_LOCK(lwp->aspace);
|
||||
void *paddr = _lwp_v2p(lwp, vaddr);
|
||||
RD_UNLOCK(lwp->aspace);
|
||||
return paddr;
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
#endif /*__LWP_USER_MM_H__*/
|
16
components/lwp/page.h
Normal file
16
components/lwp/page.h
Normal file
|
@ -0,0 +1,16 @@
|
|||
/*
|
||||
* Copyright (c) 2006-2019, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
* 2019-11-01 Jesven The first version
|
||||
*/
|
||||
|
||||
#ifndef __PAGE_H__
|
||||
#define __PAGE_H__
|
||||
|
||||
#include <mm_page.h>
|
||||
#endif /*__PAGE_H__*/
|
||||
|
43
components/lwp/syscall_generic.h
Normal file
43
components/lwp/syscall_generic.h
Normal file
|
@ -0,0 +1,43 @@
|
|||
/*
|
||||
* Copyright (c) 2006-2022, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
* 2022-11-10 RT-Thread The first version
|
||||
* 2023-03-13 WangXiaoyao syscall metadata as structure
|
||||
*/
|
||||
#ifndef __SYSCALL_DATA_H__
|
||||
#define __SYSCALL_DATA_H__
|
||||
|
||||
#include <rtthread.h>
|
||||
|
||||
typedef long sysret_t;
|
||||
|
||||
struct rt_syscall_def
|
||||
{
|
||||
void *func;
|
||||
char *name;
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief signature for syscall, used to locate syscall metadata.
|
||||
*
|
||||
* We don't allocate an exclusive section in ELF like Linux do
|
||||
* to avoid initializing necessary data by iterating that section,
|
||||
* which increases system booting time. We signature a pointer
|
||||
* just below each syscall entry in syscall table to make it
|
||||
* easy to locate every syscall's metadata by using syscall id.
|
||||
*/
|
||||
#define SYSCALL_SIGN(func) { \
|
||||
(void *)(func), \
|
||||
&RT_STRINGIFY(func)[4], \
|
||||
}
|
||||
|
||||
#define SET_ERRNO(no) rt_set_errno(-(no))
|
||||
#define GET_ERRNO() ({int _errno = rt_get_errno(); _errno > 0 ? -_errno : _errno;})
|
||||
|
||||
#define _SYS_WRAP(func) ({int _ret = func; _ret < 0 ? GET_ERRNO() : _ret;})
|
||||
|
||||
#endif /* __SYSCALL_DATA_H__ */
|
Loading…
Add table
Add a link
Reference in a new issue