import RT-Thread@9217865c without bsp, libcpu and components/net

This commit is contained in:
Zihao Yu 2023-05-20 16:23:33 +08:00
commit e2376a3709
1414 changed files with 390370 additions and 0 deletions

496
src/Kconfig Normal file
View file

@ -0,0 +1,496 @@
#include "rtconfig.h"
menu "RT-Thread Kernel"
config RT_NAME_MAX
int "The maximal size of kernel object name"
range 1 64
default 8
help
Each kernel object, such as thread, timer, semaphore etc, has a name,
the RT_NAME_MAX is the maximal size of this object name.
config RT_USING_ARCH_DATA_TYPE
bool "Use the data types defined in ARCH_CPU"
default n
help
For the data type like, `rt_uint8/int8_t, rt_uint16/int16_t, rt_uint32/int32_t`,
BSP can define these basic data types in ARCH_CPU level.
Please re-define these data types in rtconfig_project.h file.
config RT_USING_SMART
bool "Enable RT-Thread Smart (microkernel on kernel/userland)"
default n
select RT_USING_LWP
select RT_USING_DFS
select RT_USING_LIBC
select RT_USING_POSIX_CLOCKTIME
select RT_USING_TTY
select RT_USING_NULL
select RT_USING_ZERO
select RT_USING_RANDOM
select RT_USING_RTC
select RT_USING_POSIX_TIMER
select RT_USING_POSIX_CLOCK
select RT_USING_POSIX_FS
select RT_USING_POSIX_TERMIOS
depends on ARCH_ARM_CORTEX_M || ARCH_ARM_ARM9 || ARCH_ARM_CORTEX_A || ARCH_ARMV8 || ARCH_RISCV64
help
RT-Thread Smart is a microkernel based operating system on RT-Thread.
config RT_USING_SMP
bool "Enable SMP(Symmetric multiprocessing)"
default n
help
This option should be selected by machines which have an SMP-
capable CPU.
The only effect of this option is to make the SMP-related
options available to the user for configuration.
config RT_CPUS_NR
int "Number of CPUs"
default 2
depends on RT_USING_SMP
help
Number of CPUs in the system
config RT_ALIGN_SIZE
int "Alignment size for CPU architecture data access"
default 8
help
Alignment size for CPU architecture data access
choice
prompt "The maximal level value of priority of thread"
default RT_THREAD_PRIORITY_32
config RT_THREAD_PRIORITY_8
bool "8"
config RT_THREAD_PRIORITY_32
bool "32"
config RT_THREAD_PRIORITY_256
bool "256"
endchoice
config RT_THREAD_PRIORITY_MAX
int
default 8 if RT_THREAD_PRIORITY_8
default 32 if RT_THREAD_PRIORITY_32
default 256 if RT_THREAD_PRIORITY_256
config RT_TICK_PER_SECOND
int "Tick frequency, Hz"
range 10 1000
default 1000
help
System's tick frequency, Hz.
config RT_USING_OVERFLOW_CHECK
bool "Using stack overflow checking"
default y
help
Enable thread stack overflow checking. The stack overflow is checking when
each thread switch.
config RT_USING_HOOK
bool "Enable system hook"
default y
select RT_USING_IDLE_HOOK
help
Enable the hook function when system running, such as idle thread hook,
thread context switch etc.
if RT_USING_HOOK
config RT_HOOK_USING_FUNC_PTR
bool "Using function pointers as system hook"
default y
endif
config RT_USING_IDLE_HOOK
bool "Enable IDLE Task hook"
default y if RT_USING_HOOK
if RT_USING_IDLE_HOOK
config RT_IDLE_HOOK_LIST_SIZE
int "The max size of idle hook list"
default 4
range 1 16
help
The system has a hook list. This is the hook list size.
endif
config IDLE_THREAD_STACK_SIZE
int "The stack size of idle thread"
default 1024 if ARCH_CPU_64BIT
default 256
config SYSTEM_THREAD_STACK_SIZE
int "The stack size of system thread (for defunct etc.)"
depends on RT_USING_SMP
default IDLE_THREAD_STACK_SIZE
config RT_USING_TIMER_SOFT
bool "Enable software timer with a timer thread"
default y
help
the timeout function context of soft-timer is under a high priority timer
thread.
if RT_USING_TIMER_SOFT
config RT_TIMER_THREAD_PRIO
int "The priority level value of timer thread"
default 4
config RT_TIMER_THREAD_STACK_SIZE
int "The stack size of timer thread"
default 2048 if ARCH_CPU_64BIT
default 512
endif
menu "kservice optimization"
config RT_KSERVICE_USING_STDLIB
bool "Enable kservice to use standard C library"
default y
if RT_KSERVICE_USING_STDLIB
config RT_KSERVICE_USING_STDLIB_MEMORY
bool "Use stdlib memory functions to replace (faster, but not safe)"
default n
help
e.g. use memcpy to replace rt_memcpy
endif
config RT_KSERVICE_USING_TINY_SIZE
bool "Enable kservice to use tiny size"
default n
config RT_USING_TINY_FFS
bool "Enable kservice to use tiny finding first bit set method"
default n
config RT_KPRINTF_USING_LONGLONG
bool "Enable rt_printf-family functions to support long-long format"
default y if ARCH_CPU_64BIT
default n
help
Enable rt_printf()/rt_snprintf()/rt_sprintf()/rt_vsnprintf()/rt_vsprintf()
functions to support long-long format
endmenu
menuconfig RT_DEBUG
bool "Enable debugging features"
default y
if RT_DEBUG
config RT_DEBUG_COLOR
bool "Enable color debugging log"
default n
config RT_DEBUG_INIT_CONFIG
bool "Enable debugging of components initialization"
default n
config RT_DEBUG_INIT
int
default 1 if RT_DEBUG_INIT_CONFIG
config RT_DEBUG_THREAD_CONFIG
bool "Enable debugging of Thread State Changes"
default n
config RT_DEBUG_THREAD
int
default 1 if RT_DEBUG_THREAD_CONFIG
config RT_DEBUG_SCHEDULER_CONFIG
bool "Enable debugging of Scheduler"
default n
config RT_DEBUG_SCHEDULER
int
default 1 if RT_DEBUG_SCHEDULER_CONFIG
config RT_DEBUG_IPC_CONFIG
bool "Enable debugging of IPC"
default n
config RT_DEBUG_IPC
int
default 1 if RT_DEBUG_IPC_CONFIG
config RT_DEBUG_TIMER_CONFIG
bool "Enable debugging of Timer"
default n
config RT_DEBUG_TIMER
int
default 1 if RT_DEBUG_TIMER_CONFIG
config RT_DEBUG_IRQ_CONFIG
bool "Enable debugging of IRQ(Interrupt Request)"
default n
config RT_DEBUG_IRQ
int
default 1 if RT_DEBUG_IRQ_CONFIG
config RT_DEBUG_MEM_CONFIG
bool "Enable debugging of Small Memory Algorithm"
default n
config RT_DEBUG_MEM
int
default 1 if RT_DEBUG_MEM_CONFIG
config RT_DEBUG_SLAB_CONFIG
bool "Enable debugging of SLAB Memory Algorithm"
default n
config RT_DEBUG_SLAB
int
default 1 if RT_DEBUG_SLAB_CONFIG
config RT_DEBUG_MEMHEAP_CONFIG
bool "Enable debugging of Memory Heap Algorithm"
default n
config RT_DEBUG_MEMHEAP
int
default 1 if RT_DEBUG_MEMHEAP_CONFIG
if ARCH_MM_MMU
config RT_DEBUG_PAGE_LEAK
bool "Enable page leaking tracer"
default n
endif
config RT_DEBUG_MODULE_CONFIG
bool "Enable debugging of Application Module"
default n
config RT_DEBUG_MODULE
int
default 1 if RT_DEBUG_MODULE_CONFIG
endif
menu "Inter-Thread communication"
config RT_USING_SEMAPHORE
bool "Enable semaphore"
default y
config RT_USING_MUTEX
bool "Enable mutex"
default y
config RT_USING_EVENT
bool "Enable event flag"
default y
config RT_USING_MAILBOX
bool "Enable mailbox"
default y
config RT_USING_MESSAGEQUEUE
bool "Enable message queue"
default y
config RT_USING_SIGNALS
bool "Enable signals"
select RT_USING_MEMPOOL
default n
help
A signal is an asynchronous notification sent to a specific thread
in order to notify it of an event that occurred.
endmenu
menu "Memory Management"
if ARCH_MM_MMU
config RT_PAGE_MAX_ORDER
int "Max order of pages allocatable by page allocator"
default 11
help
For example, A value of 11 means the maximum chunk of contiguous memory
allocatable by page system is 2^(11 + ARCH_PAGE_BITS - 1) Bytes.
Large memory requirement can consume all system resource, and should
consider reserved memory instead to enhance system endurance.
Max order should at least satisfied usage by huge page.
endif
config RT_USING_MEMPOOL
bool "Using memory pool"
default y
help
Using static memory fixed partition
config RT_USING_SMALL_MEM
bool "Using Small Memory Algorithm"
default n
help
Using Small Memory Algorithm
config RT_USING_SLAB
bool "Using SLAB Memory Algorithm"
default n
help
The slab allocator of RT-Thread is a memory allocation algorithm
optimizedfor embedded systems based on the slab allocator
implemented by Matthew Dillon, founder of dragonfly BSD.
The original slab algorithm is an efficient kernel memory
allocation algorithm introduced by Jeff bonwick for
Solaris Operating System.
menuconfig RT_USING_MEMHEAP
bool "Using memheap Memory Algorithm"
default n
if RT_USING_MEMHEAP
choice
prompt "Memheap memory allocation mode"
default RT_MEMHEAP_FAST_MODE
config RT_MEMHEAP_FAST_MODE
bool "fast mode"
help
Speed priority mode.
As long as the memory block size meets the requirements, the search ends immediately.
config RT_MEMHEAP_BEST_MODE
bool "best mode"
help
Best size first.
The search does not end until the memory block of the most appropriate size is found
endchoice
endif
choice
prompt "System Heap Memory Management"
default RT_USING_SMALL_MEM_AS_HEAP
config RT_USING_SMALL_MEM_AS_HEAP
bool "Small Memory Algorithm"
select RT_USING_SMALL_MEM
config RT_USING_MEMHEAP_AS_HEAP
bool "Use memheap objects as heap"
select RT_USING_MEMHEAP
if RT_USING_MEMHEAP_AS_HEAP
config RT_USING_MEMHEAP_AUTO_BINDING
bool "Use all of memheap objects as heap"
default y
endif
config RT_USING_SLAB_AS_HEAP
bool "SLAB Algorithm for large memory"
select RT_USING_SLAB
config RT_USING_USERHEAP
bool "Use user heap"
help
If this option is selected, please implement these functions:
rt_malloc(), rt_malloc_sethook()
rt_free(), rt_free_sethook()
rt_calloc(), rt_realloc()
rt_memory_info()
rt_system_heap_init()
config RT_USING_NOHEAP
bool "Disable Heap"
endchoice
config RT_USING_MEMTRACE
bool "Enable memory trace"
default n
help
When enable RT_USING_MEMTRACE with shell, developer can call cmd:
1. memtrace
to dump memory block information.
2. memcheck
to check memory block to avoid memory overwritten.
And developer also can call memcheck() in each of scheduling
to check memory block to find which thread has wrongly modified
memory.
config RT_USING_HEAP_ISR
bool "Using heap in ISR"
default n
help
When this option is enabled, the critical zone will be protected with disable interrupt.
config RT_USING_HEAP
bool
default n if RT_USING_NOHEAP
default y if RT_USING_SMALL_MEM
default y if RT_USING_SLAB
default y if RT_USING_MEMHEAP_AS_HEAP
default y if RT_USING_USERHEAP
endmenu
menu "Kernel Device Object"
config RT_USING_DEVICE
bool "Using device object"
default y
config RT_USING_DEVICE_OPS
bool "Using ops for each device object"
default n
config RT_USING_DM
bool "Enable device driver model with device tree"
default n
help
Enable device driver model with device tree (FDT). It will use more memory
to parse and support device tree feature.
config RT_USING_DM_FDT
bool "Enablie builtin libfdt"
depends on RT_USING_DM
default y
help
libfdt - Flat Device Tree manipulation. If your code already contains the
libfdt, you can cancel this built-in libfdt to avoid link issue.
config RT_USING_INTERRUPT_INFO
bool "Enable additional interrupt trace information"
default n
help
Add name and counter information for interrupt trace.
config RT_USING_CONSOLE
bool "Using console for rt_kprintf"
default y
if RT_USING_CONSOLE
config RT_CONSOLEBUF_SIZE
int "the buffer size for console log printf"
default 128
config RT_CONSOLE_DEVICE_NAME
string "the device name for console"
default "uart"
endif
endmenu
config RT_VER_NUM
hex
default 0x50001
help
RT-Thread version number
config RT_USING_STDC_ATOMIC
bool "Use atomic implemented in stdatomic.h"
default n
endmenu

38
src/SConscript Normal file
View file

@ -0,0 +1,38 @@
from building import *
import os
src = Glob('*.c')
cwd = GetCurrentDir()
inc = [os.path.join(cwd, '..', 'include')]
if GetDepend('RT_USING_SMALL_MEM') == False:
SrcRemove(src, ['mem.c'])
if GetDepend('RT_USING_SLAB') == False:
SrcRemove(src, ['slab.c'])
if GetDepend('RT_USING_MEMPOOL') == False:
SrcRemove(src, ['mempool.c'])
if GetDepend('RT_USING_MEMHEAP') == False:
SrcRemove(src, ['memheap.c'])
if GetDepend('RT_USING_SIGNALS') == False:
SrcRemove(src, ['signal.c'])
if GetDepend('RT_USING_DEVICE') == False:
SrcRemove(src, ['device.c'])
if GetDepend('RT_USING_SMP') == False:
SrcRemove(src, ['cpu.c','scheduler_mp.c'])
if GetDepend('RT_USING_SMP') == True:
SrcRemove(src, ['scheduler_up.c'])
if GetDepend('RT_USING_DM') == False:
SrcRemove(src, ['driver.c'])
group = DefineGroup('Kernel', src, depend = [''], CPPPATH = inc, CPPDEFINES = ['__RTTHREAD__'])
Return('group')

178
src/clock.c Normal file
View file

@ -0,0 +1,178 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2006-03-12 Bernard first version
* 2006-05-27 Bernard add support for same priority thread schedule
* 2006-08-10 Bernard remove the last rt_schedule in rt_tick_increase
* 2010-03-08 Bernard remove rt_passed_second
* 2010-05-20 Bernard fix the tick exceeds the maximum limits
* 2010-07-13 Bernard fix rt_tick_from_millisecond issue found by kuronca
* 2011-06-26 Bernard add rt_tick_set function.
* 2018-11-22 Jesven add per cpu tick
* 2020-12-29 Meco Man implement rt_tick_get_millisecond()
* 2021-06-01 Meco Man add critical section projection for rt_tick_increase()
*/
#include <rthw.h>
#include <rtthread.h>
#ifdef RT_USING_SMP
#define rt_tick rt_cpu_index(0)->tick
#else
static volatile rt_tick_t rt_tick = 0;
#endif /* RT_USING_SMP */
#ifndef __on_rt_tick_hook
#define __on_rt_tick_hook() __ON_HOOK_ARGS(rt_tick_hook, ())
#endif
#if defined(RT_USING_HOOK) && defined(RT_HOOK_USING_FUNC_PTR)
static void (*rt_tick_hook)(void);
/**
* @addtogroup Hook
*/
/**@{*/
/**
* @brief This function will set a hook function, which will be invoked when tick increase
*
*
* @param hook the hook function
*/
void rt_tick_sethook(void (*hook)(void))
{
rt_tick_hook = hook;
}
/**@}*/
#endif /* RT_USING_HOOK */
/**
* @addtogroup Clock
*/
/**@{*/
/**
* @brief This function will return current tick from operating system startup.
*
* @return Return current tick.
*/
rt_tick_t rt_tick_get(void)
{
/* return the global tick */
return rt_tick;
}
RTM_EXPORT(rt_tick_get);
/**
* @brief This function will set current tick.
*
* @param tick is the value that you will set.
*/
void rt_tick_set(rt_tick_t tick)
{
rt_base_t level;
level = rt_hw_interrupt_disable();
rt_tick = tick;
rt_hw_interrupt_enable(level);
}
/**
* @brief This function will notify kernel there is one tick passed.
* Normally, this function is invoked by clock ISR.
*/
void rt_tick_increase(void)
{
struct rt_thread *thread;
rt_base_t level;
RT_OBJECT_HOOK_CALL(rt_tick_hook, ());
level = rt_hw_interrupt_disable();
/* increase the global tick */
#ifdef RT_USING_SMP
rt_cpu_self()->tick ++;
#else
++ rt_tick;
#endif /* RT_USING_SMP */
/* check time slice */
thread = rt_thread_self();
-- thread->remaining_tick;
if (thread->remaining_tick == 0)
{
/* change to initialized tick */
thread->remaining_tick = thread->init_tick;
thread->stat |= RT_THREAD_STAT_YIELD;
rt_hw_interrupt_enable(level);
rt_schedule();
}
else
{
rt_hw_interrupt_enable(level);
}
/* check timer */
rt_timer_check();
}
/**
* @brief This function will calculate the tick from millisecond.
*
* @param ms is the specified millisecond.
* - Negative Number wait forever
* - Zero not wait
* - Max 0x7fffffff
*
* @return Return the calculated tick.
*/
rt_tick_t rt_tick_from_millisecond(rt_int32_t ms)
{
rt_tick_t tick;
if (ms < 0)
{
tick = (rt_tick_t)RT_WAITING_FOREVER;
}
else
{
tick = RT_TICK_PER_SECOND * (ms / 1000);
tick += (RT_TICK_PER_SECOND * (ms % 1000) + 999) / 1000;
}
/* return the calculated tick */
return tick;
}
RTM_EXPORT(rt_tick_from_millisecond);
/**
* @brief This function will return the passed millisecond from boot.
*
* @note if the value of RT_TICK_PER_SECOND is lower than 1000 or
* is not an integral multiple of 1000, this function will not
* provide the correct 1ms-based tick.
*
* @return Return passed millisecond from boot.
*/
rt_weak rt_tick_t rt_tick_get_millisecond(void)
{
#if 1000 % RT_TICK_PER_SECOND == 0u
return rt_tick_get() * (1000u / RT_TICK_PER_SECOND);
#else
#warning "rt-thread cannot provide a correct 1ms-based tick any longer,\
please redefine this function in another file by using a high-precision hard-timer."
return 0;
#endif /* 1000 % RT_TICK_PER_SECOND == 0u */
}
/**@}*/

278
src/components.c Normal file
View file

@ -0,0 +1,278 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2012-09-20 Bernard Change the name to components.c
* And all components related header files.
* 2012-12-23 Bernard fix the pthread initialization issue.
* 2013-06-23 Bernard Add the init_call for components initialization.
* 2013-07-05 Bernard Remove initialization feature for MS VC++ compiler
* 2015-02-06 Bernard Remove the MS VC++ support and move to the kernel
* 2015-05-04 Bernard Rename it to components.c because compiling issue
* in some IDEs.
* 2015-07-29 Arda.Fu Add support to use RT_USING_USER_MAIN with IAR
* 2018-11-22 Jesven Add secondary cpu boot up
*/
#include <rthw.h>
#include <rtthread.h>
#ifdef RT_USING_USER_MAIN
#ifndef RT_MAIN_THREAD_STACK_SIZE
#define RT_MAIN_THREAD_STACK_SIZE 2048
#endif /* RT_MAIN_THREAD_STACK_SIZE */
#ifndef RT_MAIN_THREAD_PRIORITY
#define RT_MAIN_THREAD_PRIORITY (RT_THREAD_PRIORITY_MAX / 3)
#endif /* RT_MAIN_THREAD_PRIORITY */
#endif /* RT_USING_USER_MAIN */
#ifdef RT_USING_COMPONENTS_INIT
/*
* Components Initialization will initialize some driver and components as following
* order:
* rti_start --> 0
* BOARD_EXPORT --> 1
* rti_board_end --> 1.end
*
* DEVICE_EXPORT --> 2
* COMPONENT_EXPORT --> 3
* FS_EXPORT --> 4
* ENV_EXPORT --> 5
* APP_EXPORT --> 6
*
* rti_end --> 6.end
*
* These automatically initialization, the driver or component initial function must
* be defined with:
* INIT_BOARD_EXPORT(fn);
* INIT_DEVICE_EXPORT(fn);
* ...
* INIT_APP_EXPORT(fn);
* etc.
*/
static int rti_start(void)
{
return 0;
}
INIT_EXPORT(rti_start, "0");
static int rti_board_start(void)
{
return 0;
}
INIT_EXPORT(rti_board_start, "0.end");
static int rti_board_end(void)
{
return 0;
}
INIT_EXPORT(rti_board_end, "1.end");
static int rti_end(void)
{
return 0;
}
INIT_EXPORT(rti_end, "6.end");
/**
* @brief Onboard components initialization. In this function, the board-level
* initialization function will be called to complete the initialization
* of the on-board peripherals.
*/
void rt_components_board_init(void)
{
#if RT_DEBUG_INIT
int result;
const struct rt_init_desc *desc;
for (desc = &__rt_init_desc_rti_board_start; desc < &__rt_init_desc_rti_board_end; desc ++)
{
rt_kprintf("initialize %s", desc->fn_name);
result = desc->fn();
rt_kprintf(":%d done\n", result);
}
#else
volatile const init_fn_t *fn_ptr;
for (fn_ptr = &__rt_init_rti_board_start; fn_ptr < &__rt_init_rti_board_end; fn_ptr++)
{
(*fn_ptr)();
}
#endif /* RT_DEBUG_INIT */
}
/**
* @brief RT-Thread Components Initialization.
*/
void rt_components_init(void)
{
#if RT_DEBUG_INIT
int result;
const struct rt_init_desc *desc;
rt_kprintf("do components initialization.\n");
for (desc = &__rt_init_desc_rti_board_end; desc < &__rt_init_desc_rti_end; desc ++)
{
rt_kprintf("initialize %s", desc->fn_name);
result = desc->fn();
rt_kprintf(":%d done\n", result);
}
#else
volatile const init_fn_t *fn_ptr;
for (fn_ptr = &__rt_init_rti_board_end; fn_ptr < &__rt_init_rti_end; fn_ptr ++)
{
(*fn_ptr)();
}
#endif /* RT_DEBUG_INIT */
}
#endif /* RT_USING_COMPONENTS_INIT */
#ifdef RT_USING_USER_MAIN
void rt_application_init(void);
void rt_hw_board_init(void);
int rtthread_startup(void);
#ifdef __ARMCC_VERSION
extern int $Super$$main(void);
/* re-define main function */
int $Sub$$main(void)
{
rtthread_startup();
return 0;
}
#elif defined(__ICCARM__)
/* __low_level_init will auto called by IAR cstartup */
extern void __iar_data_init3(void);
int __low_level_init(void)
{
// call IAR table copy function.
__iar_data_init3();
rtthread_startup();
return 0;
}
#elif defined(__GNUC__)
/* Add -eentry to arm-none-eabi-gcc argument */
int entry(void)
{
rtthread_startup();
return 0;
}
#endif
#ifndef RT_USING_HEAP
/* if there is not enable heap, we should use static thread and stack. */
rt_align(RT_ALIGN_SIZE)
static rt_uint8_t main_thread_stack[RT_MAIN_THREAD_STACK_SIZE];
struct rt_thread main_thread;
#endif /* RT_USING_HEAP */
/**
* @brief The system main thread. In this thread will call the rt_components_init()
* for initialization of RT-Thread Components and call the user's programming
* entry main().
*
* @param parameter is the arg of the thread.
*/
void main_thread_entry(void *parameter)
{
extern int main(void);
#ifdef RT_USING_COMPONENTS_INIT
/* RT-Thread components initialization */
rt_components_init();
#endif /* RT_USING_COMPONENTS_INIT */
#ifdef RT_USING_SMP
rt_hw_secondary_cpu_up();
#endif /* RT_USING_SMP */
/* invoke system main function */
#ifdef __ARMCC_VERSION
{
extern int $Super$$main(void);
$Super$$main(); /* for ARMCC. */
}
#elif defined(__ICCARM__) || defined(__GNUC__) || defined(__TASKING__) || defined(__TI_COMPILER_VERSION__)
main();
#endif
}
/**
* @brief This function will create and start the main thread, but this thread
* will not run until the scheduler starts.
*/
void rt_application_init(void)
{
rt_thread_t tid;
#ifdef RT_USING_HEAP
tid = rt_thread_create("main", main_thread_entry, RT_NULL,
RT_MAIN_THREAD_STACK_SIZE, RT_MAIN_THREAD_PRIORITY, 20);
RT_ASSERT(tid != RT_NULL);
#else
rt_err_t result;
tid = &main_thread;
result = rt_thread_init(tid, "main", main_thread_entry, RT_NULL,
main_thread_stack, sizeof(main_thread_stack), RT_MAIN_THREAD_PRIORITY, 20);
RT_ASSERT(result == RT_EOK);
/* if not define RT_USING_HEAP, using to eliminate the warning */
(void)result;
#endif /* RT_USING_HEAP */
rt_thread_startup(tid);
}
/**
* @brief This function will call all levels of initialization functions to complete
* the initialization of the system, and finally start the scheduler.
*
* @return Normally never returns. If 0 is returned, the scheduler failed.
*/
int rtthread_startup(void)
{
rt_hw_interrupt_disable();
/* board level initialization
* NOTE: please initialize heap inside board initialization.
*/
rt_hw_board_init();
/* show RT-Thread version */
rt_show_version();
/* timer system initialization */
rt_system_timer_init();
/* scheduler system initialization */
rt_system_scheduler_init();
#ifdef RT_USING_SIGNALS
/* signal system initialization */
rt_system_signal_init();
#endif /* RT_USING_SIGNALS */
/* create init_thread */
rt_application_init();
/* timer thread initialization */
rt_system_timer_thread_init();
/* idle thread initialization */
rt_thread_idle_init();
#ifdef RT_USING_SMP
rt_hw_spin_lock(&_cpus_lock);
#endif /* RT_USING_SMP */
/* start scheduler */
rt_system_scheduler_start();
/* never reach here */
return 0;
}
#endif /* RT_USING_USER_MAIN */

263
src/cpu.c Normal file
View file

@ -0,0 +1,263 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2018-10-30 Bernard The first version
*/
#include <rthw.h>
#include <rtthread.h>
#ifdef RT_USING_SMART
#include <lwp.h>
#endif
#ifdef RT_USING_SMP
static struct rt_cpu _cpus[RT_CPUS_NR];
rt_hw_spinlock_t _cpus_lock;
/*
* @brief disable scheduler
*/
static void _cpu_preempt_disable(void)
{
rt_base_t level;
struct rt_thread *current_thread;
/* disable interrupt */
level = rt_hw_local_irq_disable();
current_thread = rt_thread_self();
if (!current_thread)
{
rt_hw_local_irq_enable(level);
return;
}
/* lock scheduler for local cpu */
current_thread->scheduler_lock_nest ++;
/* enable interrupt */
rt_hw_local_irq_enable(level);
}
/*
* @brief enable scheduler
*/
static void _cpu_preempt_enable(void)
{
rt_base_t level;
struct rt_thread *current_thread;
/* disable interrupt */
level = rt_hw_local_irq_disable();
current_thread = rt_thread_self();
if (!current_thread)
{
rt_hw_local_irq_enable(level);
return;
}
/* unlock scheduler for local cpu */
current_thread->scheduler_lock_nest --;
rt_schedule();
/* enable interrupt */
rt_hw_local_irq_enable(level);
}
#endif /* RT_USING_SMP */
/**
* @brief Initialize a static spinlock object.
*
* @param lock is a pointer to the spinlock to initialize.
*/
void rt_spin_lock_init(struct rt_spinlock *lock)
{
#ifdef RT_USING_SMP
rt_hw_spin_lock_init(&lock->lock);
#endif
}
RTM_EXPORT(rt_spin_lock_init)
/**
* @brief This function will lock the spinlock.
*
* @note If the spinlock is locked, the current CPU will keep polling the spinlock state
* until the spinlock is unlocked.
*
* @param lock is a pointer to the spinlock.
*/
void rt_spin_lock(struct rt_spinlock *lock)
{
#ifdef RT_USING_SMP
_cpu_preempt_disable();
rt_hw_spin_lock(&lock->lock);
#else
rt_enter_critical();
#endif
}
RTM_EXPORT(rt_spin_lock)
/**
* @brief This function will unlock the spinlock.
*
* @param lock is a pointer to the spinlock.
*/
void rt_spin_unlock(struct rt_spinlock *lock)
{
#ifdef RT_USING_SMP
rt_hw_spin_unlock(&lock->lock);
_cpu_preempt_enable();
#else
rt_exit_critical();
#endif
}
RTM_EXPORT(rt_spin_unlock)
/**
* @brief This function will disable the local interrupt and then lock the spinlock.
*
* @note If the spinlock is locked, the current CPU will keep polling the spinlock state
* until the spinlock is unlocked.
*
* @param lock is a pointer to the spinlock.
*
* @return Return current cpu interrupt status.
*/
rt_base_t rt_spin_lock_irqsave(struct rt_spinlock *lock)
{
#ifdef RT_USING_SMP
unsigned long level;
_cpu_preempt_disable();
level = rt_hw_local_irq_disable();
rt_hw_spin_lock(&lock->lock);
return level;
#else
return rt_hw_interrupt_disable();
#endif
}
RTM_EXPORT(rt_spin_lock_irqsave)
/**
* @brief This function will unlock the spinlock and then restore current cpu interrupt status.
*
* @param lock is a pointer to the spinlock.
*
* @param level is interrupt status returned by rt_spin_lock_irqsave().
*/
void rt_spin_unlock_irqrestore(struct rt_spinlock *lock, rt_base_t level)
{
#ifdef RT_USING_SMP
rt_hw_spin_unlock(&lock->lock);
rt_hw_local_irq_enable(level);
_cpu_preempt_enable();
#else
rt_hw_interrupt_enable(level);
#endif
}
RTM_EXPORT(rt_spin_unlock_irqrestore)
/**
* @brief This fucntion will return current cpu object.
*
* @return Return a pointer to the current cpu object.
*/
struct rt_cpu *rt_cpu_self(void)
{
return &_cpus[rt_hw_cpu_id()];
}
/**
* @brief This fucntion will return the cpu object corresponding to index.
*
* @param index is the index of target cpu object.
*
* @return Return a pointer to the cpu object corresponding to index.
*/
struct rt_cpu *rt_cpu_index(int index)
{
return &_cpus[index];
}
/**
* @brief This function will lock all cpus's scheduler and disable local irq.
*
* @return Return current cpu interrupt status.
*/
rt_base_t rt_cpus_lock(void)
{
rt_base_t level;
struct rt_cpu* pcpu;
level = rt_hw_local_irq_disable();
pcpu = rt_cpu_self();
if (pcpu->current_thread != RT_NULL)
{
register rt_ubase_t lock_nest = pcpu->current_thread->cpus_lock_nest;
pcpu->current_thread->cpus_lock_nest++;
if (lock_nest == 0)
{
pcpu->current_thread->scheduler_lock_nest++;
rt_hw_spin_lock(&_cpus_lock);
}
}
return level;
}
RTM_EXPORT(rt_cpus_lock);
/**
* @brief This function will restore all cpus's scheduler and restore local irq.
*
* @param level is interrupt status returned by rt_cpus_lock().
*/
void rt_cpus_unlock(rt_base_t level)
{
struct rt_cpu* pcpu = rt_cpu_self();
if (pcpu->current_thread != RT_NULL)
{
RT_ASSERT(pcpu->current_thread->cpus_lock_nest > 0);
pcpu->current_thread->cpus_lock_nest--;
if (pcpu->current_thread->cpus_lock_nest == 0)
{
pcpu->current_thread->scheduler_lock_nest--;
rt_hw_spin_unlock(&_cpus_lock);
}
}
rt_hw_local_irq_enable(level);
}
RTM_EXPORT(rt_cpus_unlock);
/**
* This function is invoked by scheduler.
* It will restore the lock state to whatever the thread's counter expects.
* If target thread not locked the cpus then unlock the cpus lock.
*
* @param thread is a pointer to the target thread.
*/
void rt_cpus_lock_status_restore(struct rt_thread *thread)
{
struct rt_cpu* pcpu = rt_cpu_self();
#if defined(ARCH_MM_MMU) && defined(RT_USING_SMART)
lwp_aspace_switch(thread);
#endif
pcpu->current_thread = thread;
if (!thread->cpus_lock_nest)
{
rt_hw_spin_unlock(&_cpus_lock);
}
}
RTM_EXPORT(rt_cpus_lock_status_restore);

547
src/device.c Normal file
View file

@ -0,0 +1,547 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2007-01-21 Bernard the first version
* 2010-05-04 Bernard add rt_device_init implementation
* 2012-10-20 Bernard add device check in register function,
* provided by Rob <rdent@iinet.net.au>
* 2012-12-25 Bernard return RT_EOK if the device interface not exist.
* 2013-07-09 Grissiom add ref_count support
* 2016-04-02 Bernard fix the open_flag initialization issue.
* 2021-03-19 Meco Man remove rt_device_init_all()
*/
#include <rtthread.h>
#ifdef RT_USING_POSIX_DEVIO
#include <rtdevice.h> /* for wqueue_init */
#endif /* RT_USING_POSIX_DEVIO */
#ifdef RT_USING_DEVICE
#ifdef RT_USING_DEVICE_OPS
#define device_init (dev->ops->init)
#define device_open (dev->ops->open)
#define device_close (dev->ops->close)
#define device_read (dev->ops->read)
#define device_write (dev->ops->write)
#define device_control (dev->ops->control)
#else
#define device_init (dev->init)
#define device_open (dev->open)
#define device_close (dev->close)
#define device_read (dev->read)
#define device_write (dev->write)
#define device_control (dev->control)
#endif /* RT_USING_DEVICE_OPS */
/**
* @brief This function registers a device driver with a specified name.
*
* @param dev is the pointer of device driver structure.
*
* @param name is the device driver's name.
*
* @param flags is the capabilities flag of device.
*
* @return the error code, RT_EOK on initialization successfully.
*/
rt_err_t rt_device_register(rt_device_t dev,
const char *name,
rt_uint16_t flags)
{
if (dev == RT_NULL)
return -RT_ERROR;
if (rt_device_find(name) != RT_NULL)
return -RT_ERROR;
rt_object_init(&(dev->parent), RT_Object_Class_Device, name);
dev->flag = flags;
dev->ref_count = 0;
dev->open_flag = 0;
#ifdef RT_USING_POSIX_DEVIO
dev->fops = RT_NULL;
rt_wqueue_init(&(dev->wait_queue));
#endif /* RT_USING_POSIX_DEVIO */
return RT_EOK;
}
RTM_EXPORT(rt_device_register);
/**
* @brief This function removes a previously registered device driver.
*
* @param dev is the pointer of device driver structure.
*
* @return the error code, RT_EOK on successfully.
*/
rt_err_t rt_device_unregister(rt_device_t dev)
{
/* parameter check */
RT_ASSERT(dev != RT_NULL);
RT_ASSERT(rt_object_get_type(&dev->parent) == RT_Object_Class_Device);
RT_ASSERT(rt_object_is_systemobject(&dev->parent));
rt_object_detach(&(dev->parent));
return RT_EOK;
}
RTM_EXPORT(rt_device_unregister);
/**
* @brief This function finds a device driver by specified name.
*
* @param name is the device driver's name.
*
* @return the registered device driver on successful, or RT_NULL on failure.
*/
rt_device_t rt_device_find(const char *name)
{
return (rt_device_t)rt_object_find(name, RT_Object_Class_Device);
}
RTM_EXPORT(rt_device_find);
#ifdef RT_USING_HEAP
/**
* @brief This function creates a device object with user data size.
*
* @param type is the type of the device object.
*
* @param attach_size is the size of user data.
*
* @return the allocated device object, or RT_NULL when failed.
*/
rt_device_t rt_device_create(int type, int attach_size)
{
int size;
rt_device_t device;
size = RT_ALIGN(sizeof(struct rt_device), RT_ALIGN_SIZE);
attach_size = RT_ALIGN(attach_size, RT_ALIGN_SIZE);
/* use the total size */
size += attach_size;
device = (rt_device_t)rt_malloc(size);
if (device)
{
rt_memset(device, 0x0, sizeof(struct rt_device));
device->type = (enum rt_device_class_type)type;
}
return device;
}
RTM_EXPORT(rt_device_create);
/**
* @brief This function destroy the specific device object.
*
* @param dev is a specific device object.
*/
void rt_device_destroy(rt_device_t dev)
{
/* parameter check */
RT_ASSERT(dev != RT_NULL);
RT_ASSERT(rt_object_get_type(&dev->parent) == RT_Object_Class_Device);
RT_ASSERT(rt_object_is_systemobject(&dev->parent) == RT_FALSE);
rt_object_detach(&(dev->parent));
/* release this device object */
rt_free(dev);
}
RTM_EXPORT(rt_device_destroy);
#endif /* RT_USING_HEAP */
/**
* @brief This function will initialize the specified device.
*
* @param dev is the pointer of device driver structure.
*
* @return the result, RT_EOK on successfully.
*/
rt_err_t rt_device_init(rt_device_t dev)
{
rt_err_t result = RT_EOK;
RT_ASSERT(dev != RT_NULL);
/* get device_init handler */
if (device_init != RT_NULL)
{
if (!(dev->flag & RT_DEVICE_FLAG_ACTIVATED))
{
result = device_init(dev);
if (result != RT_EOK)
{
RT_DEBUG_LOG(RT_DEBUG_DEVICE, ("To initialize device:%s failed. The error code is %d\n",
dev->parent.name, result));
}
else
{
dev->flag |= RT_DEVICE_FLAG_ACTIVATED;
}
}
}
return result;
}
/**
* @brief This function will open a device.
*
* @param dev is the pointer of device driver structure.
*
* @param oflag is the flags for device open.
*
* @return the result, RT_EOK on successfully.
*/
rt_err_t rt_device_open(rt_device_t dev, rt_uint16_t oflag)
{
rt_err_t result = RT_EOK;
/* parameter check */
RT_ASSERT(dev != RT_NULL);
RT_ASSERT(rt_object_get_type(&dev->parent) == RT_Object_Class_Device);
/* if device is not initialized, initialize it. */
if (!(dev->flag & RT_DEVICE_FLAG_ACTIVATED))
{
if (device_init != RT_NULL)
{
result = device_init(dev);
if (result != RT_EOK)
{
RT_DEBUG_LOG(RT_DEBUG_DEVICE, ("To initialize device:%s failed. The error code is %d\n",
dev->parent.name, result));
return result;
}
}
dev->flag |= RT_DEVICE_FLAG_ACTIVATED;
}
/* device is a stand alone device and opened */
if ((dev->flag & RT_DEVICE_FLAG_STANDALONE) &&
(dev->open_flag & RT_DEVICE_OFLAG_OPEN))
{
return -RT_EBUSY;
}
/* device is not opened or opened by other oflag, call device_open interface */
if (!(dev->open_flag & RT_DEVICE_OFLAG_OPEN) ||
((dev->open_flag & RT_DEVICE_OFLAG_MASK) != (oflag & RT_DEVICE_OFLAG_MASK)))
{
if (device_open != RT_NULL)
{
result = device_open(dev, oflag);
}
else
{
/* set open flag */
dev->open_flag = (oflag & RT_DEVICE_OFLAG_MASK);
}
}
/* set open flag */
if (result == RT_EOK || result == -RT_ENOSYS)
{
dev->open_flag |= RT_DEVICE_OFLAG_OPEN;
dev->ref_count++;
/* don't let bad things happen silently. If you are bitten by this assert,
* please set the ref_count to a bigger type. */
RT_ASSERT(dev->ref_count != 0);
}
return result;
}
RTM_EXPORT(rt_device_open);
/**
* @brief This function will close a device.
*
* @param dev is the pointer of device driver structure.
*
* @return the result, RT_EOK on successfully.
*/
rt_err_t rt_device_close(rt_device_t dev)
{
rt_err_t result = RT_EOK;
/* parameter check */
RT_ASSERT(dev != RT_NULL);
RT_ASSERT(rt_object_get_type(&dev->parent) == RT_Object_Class_Device);
if (dev->ref_count == 0)
return -RT_ERROR;
dev->ref_count--;
if (dev->ref_count != 0)
return RT_EOK;
/* call device_close interface */
if (device_close != RT_NULL)
{
result = device_close(dev);
}
/* set open flag */
if (result == RT_EOK || result == -RT_ENOSYS)
dev->open_flag = RT_DEVICE_OFLAG_CLOSE;
return result;
}
RTM_EXPORT(rt_device_close);
/**
* @brief This function will read some data from a device.
*
* @param dev is the pointer of device driver structure.
*
* @param pos is the position when reading.
*
* @param buffer is a data buffer to save the read data.
*
* @param size is the size of buffer.
*
* @return the actually read size on successful, otherwise 0 will be returned.
*
* @note the unit of size/pos is a block for block device.
*/
rt_ssize_t rt_device_read(rt_device_t dev,
rt_off_t pos,
void *buffer,
rt_size_t size)
{
/* parameter check */
RT_ASSERT(dev != RT_NULL);
RT_ASSERT(rt_object_get_type(&dev->parent) == RT_Object_Class_Device);
if (dev->ref_count == 0)
{
rt_set_errno(-RT_ERROR);
return 0;
}
/* call device_read interface */
if (device_read != RT_NULL)
{
return device_read(dev, pos, buffer, size);
}
/* set error code */
rt_set_errno(-RT_ENOSYS);
return 0;
}
RTM_EXPORT(rt_device_read);
/**
* @brief This function will write some data to a device.
*
* @param dev is the pointer of device driver structure.
*
* @param pos is the position when writing.
*
* @param buffer is the data buffer to be written to device.
*
* @param size is the size of buffer.
*
* @return the actually written size on successful, otherwise 0 will be returned.
*
* @note the unit of size/pos is a block for block device.
*/
rt_ssize_t rt_device_write(rt_device_t dev,
rt_off_t pos,
const void *buffer,
rt_size_t size)
{
/* parameter check */
RT_ASSERT(dev != RT_NULL);
RT_ASSERT(rt_object_get_type(&dev->parent) == RT_Object_Class_Device);
if (dev->ref_count == 0)
{
rt_set_errno(-RT_ERROR);
return 0;
}
/* call device_write interface */
if (device_write != RT_NULL)
{
return device_write(dev, pos, buffer, size);
}
/* set error code */
rt_set_errno(-RT_ENOSYS);
return 0;
}
RTM_EXPORT(rt_device_write);
/**
* @brief This function will perform a variety of control functions on devices.
*
* @param dev is the pointer of device driver structure.
*
* @param cmd is the command sent to device.
*
* @param arg is the argument of command.
*
* @return the result, -RT_ENOSYS for failed.
*/
rt_err_t rt_device_control(rt_device_t dev, int cmd, void *arg)
{
/* parameter check */
RT_ASSERT(dev != RT_NULL);
RT_ASSERT(rt_object_get_type(&dev->parent) == RT_Object_Class_Device);
/* call device_write interface */
if (device_control != RT_NULL)
{
return device_control(dev, cmd, arg);
}
return -RT_ENOSYS;
}
RTM_EXPORT(rt_device_control);
/**
* @brief This function will set the reception indication callback function. This callback function
* is invoked when this device receives data.
*
* @param dev is the pointer of device driver structure.
*
* @param rx_ind is the indication callback function.
*
* @return RT_EOK
*/
rt_err_t rt_device_set_rx_indicate(rt_device_t dev,
rt_err_t (*rx_ind)(rt_device_t dev,
rt_size_t size))
{
/* parameter check */
RT_ASSERT(dev != RT_NULL);
RT_ASSERT(rt_object_get_type(&dev->parent) == RT_Object_Class_Device);
dev->rx_indicate = rx_ind;
return RT_EOK;
}
RTM_EXPORT(rt_device_set_rx_indicate);
/**
* @brief This function will set a callback function. The callback function
* will be called when device has written data to physical hardware.
*
* @param dev is the pointer of device driver structure.
*
* @param tx_done is the indication callback function.
*
* @return RT_EOK
*/
rt_err_t rt_device_set_tx_complete(rt_device_t dev,
rt_err_t (*tx_done)(rt_device_t dev,
void *buffer))
{
/* parameter check */
RT_ASSERT(dev != RT_NULL);
RT_ASSERT(rt_object_get_type(&dev->parent) == RT_Object_Class_Device);
dev->tx_complete = tx_done;
return RT_EOK;
}
RTM_EXPORT(rt_device_set_tx_complete);
#ifdef RT_USING_DM
/**
* This function bind drvier and device
*
* @param device the pointer of device structure
* @param driver the pointer of driver structure
* @param node the pointer of fdt node structure
*
* @return the error code, RT_EOK on successfully.
*/
rt_err_t rt_device_bind_driver(rt_device_t device, rt_driver_t driver, void *node)
{
if((!driver) || (!device))
{
return -RT_EINVAL;
}
device->drv = driver;
#ifdef RT_USING_DEVICE_OPS
device->ops = driver->dev_ops;
#endif
device->dtb_node = node;
return RT_EOK;
}
RTM_EXPORT(rt_device_bind_driver);
/**
* This function create rt_device according to driver infomation
*
* @param drv the pointer of driver structure
* @param device_id specify the ID of the rt_device
*
* @return the error code, RT_EOK on successfully.
*/
rt_device_t rt_device_create_since_driver(rt_driver_t drv,int device_id)
{
rt_device_t device;
if (!drv)
{
return RT_NULL;
}
device = (rt_device_t)rt_calloc(1,drv->device_size);
if(device == RT_NULL)
{
return RT_NULL;
}
device->device_id = device_id;
rt_snprintf(device->parent.name, sizeof(device->parent.name), "%s%d", drv->name, device_id);
return device;
}
RTM_EXPORT(rt_device_create_since_driver);
/**
* This function rt_device probe and init
*
* @param device the pointer of rt_device structure
* @return the error code, RT_EOK on successfully.
*/
rt_err_t rt_device_probe_and_init(rt_device_t device)
{
int ret = -RT_ERROR;
if (!device)
{
return -RT_EINVAL;
}
if(!device->drv)
{
return -RT_ERROR;
}
if(device->drv->probe)
{
ret = device->drv->probe((rt_device_t)device);
}
if(device->drv->probe_init)
{
ret = device->drv->probe_init((rt_device_t)device);
}
return ret;
}
RTM_EXPORT(rt_device_probe_and_init);
#endif /* RT_USING_DM */
#endif /* RT_USING_DEVICE */

113
src/driver.c Normal file
View file

@ -0,0 +1,113 @@
/*
* Copyright (c) 2006-2018, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <rtthread.h>
#ifdef RT_USING_FDT
#include <dtb_node.h>
#endif
#if defined(RT_USING_POSIX_DEVIO)
#include <rtdevice.h> /* for wqueue_init */
#endif
/**
* This function driver device match with id
*
* @param drv the pointer of driver structure
* @param device_id the id of the device
*
* @return the error code, RT_EOK on successfully.
*/
rt_err_t rt_driver_match_with_id(const rt_driver_t drv,int device_id)
{
rt_device_t device;
int ret;
if (!drv)
{
return -RT_EINVAL;
}
device = rt_device_create_since_driver(drv,device_id);
if(!device)
{
return -RT_ERROR;
}
ret = rt_device_bind_driver(device,drv,RT_NULL);
if(ret != 0)
{
return -RT_ERROR;
}
ret = rt_device_probe_and_init(device);
if(ret != 0)
{
return -RT_ERROR;
}
return ret;
}
RTM_EXPORT(rt_driver_match_with_id);
#ifdef RT_USING_FDT
/**
* This function driver device match with dtb_node
*
* @param drv the pointer of driver structure
* @param from_node dtb node entry
* @param max_dev_num the max device support
*
* @return the error code, RT_EOK on successfully.
*/
rt_err_t rt_driver_match_with_dtb(const rt_driver_t drv,void *from_node,int max_dev_num)
{
struct dtb_node** node_list;
rt_device_t device;
int ret,i;
int total_dev_num = 0;
if ((!drv)||(!drv->dev_match)||(!drv->dev_match->compatible)||(!from_node)||(!drv->device_size))
{
return -RT_EINVAL;
}
node_list = rt_calloc(max_dev_num,sizeof(void *));
if(!node_list)
{
return -RT_ERROR;
}
ret = dtb_node_find_all_compatible_node(from_node,drv->dev_match->compatible,node_list,max_dev_num,&total_dev_num);
if((ret != 0) || (!total_dev_num))
{
return -RT_ERROR;
}
for(i = 0; i < total_dev_num; i ++)
{
if (!dtb_node_device_is_available(node_list[i]))
{
continue;
}
device = rt_device_create_since_driver(drv,i);
if(!device)
{
continue;
}
ret = rt_device_bind_driver(device,drv,node_list[i]);
if(ret != 0)
{
continue;
}
ret = rt_device_probe_and_init(device);
if(ret != 0)
{
continue;
}
}
rt_free(node_list);
return ret;
}
RTM_EXPORT(rt_driver_match_with_dtb);
#endif

374
src/idle.c Normal file
View file

@ -0,0 +1,374 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2006-03-23 Bernard the first version
* 2010-11-10 Bernard add cleanup callback function in thread exit.
* 2012-12-29 Bernard fix compiling warning.
* 2013-12-21 Grissiom let rt_thread_idle_excute loop until there is no
* dead thread.
* 2016-08-09 ArdaFu add method to get the handler of the idle thread.
* 2018-02-07 Bernard lock scheduler to protect tid->cleanup.
* 2018-07-14 armink add idle hook list
* 2018-11-22 Jesven add per cpu idle task
* combine the code of primary and secondary cpu
* 2021-11-15 THEWON Remove duplicate work between idle and _thread_exit
*/
#include <rthw.h>
#include <rtthread.h>
#ifdef RT_USING_MODULE
#include <dlmodule.h>
#endif /* RT_USING_MODULE */
#ifdef RT_USING_HOOK
#ifndef RT_USING_IDLE_HOOK
#define RT_USING_IDLE_HOOK
#endif /* RT_USING_IDLE_HOOK */
#endif /* RT_USING_HOOK */
#ifndef IDLE_THREAD_STACK_SIZE
#if defined (RT_USING_IDLE_HOOK) || defined(RT_USING_HEAP)
#define IDLE_THREAD_STACK_SIZE 256
#else
#define IDLE_THREAD_STACK_SIZE 128
#endif /* (RT_USING_IDLE_HOOK) || defined(RT_USING_HEAP) */
#endif /* IDLE_THREAD_STACK_SIZE */
#ifdef RT_USING_SMP
#define _CPUS_NR RT_CPUS_NR
#else
#define _CPUS_NR 1
#endif /* RT_USING_SMP */
static rt_list_t _rt_thread_defunct = RT_LIST_OBJECT_INIT(_rt_thread_defunct);
static struct rt_thread idle_thread[_CPUS_NR];
rt_align(RT_ALIGN_SIZE)
static rt_uint8_t idle_thread_stack[_CPUS_NR][IDLE_THREAD_STACK_SIZE];
#ifdef RT_USING_SMP
#ifndef SYSTEM_THREAD_STACK_SIZE
#define SYSTEM_THREAD_STACK_SIZE IDLE_THREAD_STACK_SIZE
#endif
static struct rt_thread rt_system_thread;
rt_align(RT_ALIGN_SIZE)
static rt_uint8_t rt_system_stack[SYSTEM_THREAD_STACK_SIZE];
static struct rt_semaphore system_sem;
#endif
#ifdef RT_USING_IDLE_HOOK
#ifndef RT_IDLE_HOOK_LIST_SIZE
#define RT_IDLE_HOOK_LIST_SIZE 4
#endif /* RT_IDLE_HOOK_LIST_SIZE */
static void (*idle_hook_list[RT_IDLE_HOOK_LIST_SIZE])(void);
/**
* @brief This function sets a hook function to idle thread loop. When the system performs
* idle loop, this hook function should be invoked.
*
* @param hook the specified hook function.
*
* @return RT_EOK: set OK.
* -RT_EFULL: hook list is full.
*
* @note the hook function must be simple and never be blocked or suspend.
*/
rt_err_t rt_thread_idle_sethook(void (*hook)(void))
{
rt_size_t i;
rt_base_t level;
rt_err_t ret = -RT_EFULL;
/* disable interrupt */
level = rt_hw_interrupt_disable();
for (i = 0; i < RT_IDLE_HOOK_LIST_SIZE; i++)
{
if (idle_hook_list[i] == RT_NULL)
{
idle_hook_list[i] = hook;
ret = RT_EOK;
break;
}
}
/* enable interrupt */
rt_hw_interrupt_enable(level);
return ret;
}
/**
* @brief delete the idle hook on hook list.
*
* @param hook the specified hook function.
*
* @return RT_EOK: delete OK.
* -RT_ENOSYS: hook was not found.
*/
rt_err_t rt_thread_idle_delhook(void (*hook)(void))
{
rt_size_t i;
rt_base_t level;
rt_err_t ret = -RT_ENOSYS;
/* disable interrupt */
level = rt_hw_interrupt_disable();
for (i = 0; i < RT_IDLE_HOOK_LIST_SIZE; i++)
{
if (idle_hook_list[i] == hook)
{
idle_hook_list[i] = RT_NULL;
ret = RT_EOK;
break;
}
}
/* enable interrupt */
rt_hw_interrupt_enable(level);
return ret;
}
#endif /* RT_USING_IDLE_HOOK */
/**
* @brief Enqueue a thread to defunct queue.
*
* @param thread the thread to be enqueued.
*
* @note It must be called between rt_hw_interrupt_disable and rt_hw_interrupt_enable
*/
void rt_thread_defunct_enqueue(rt_thread_t thread)
{
rt_list_insert_after(&_rt_thread_defunct, &thread->tlist);
#ifdef RT_USING_SMP
rt_sem_release(&system_sem);
#endif
}
/**
* @brief Dequeue a thread from defunct queue.
*/
rt_thread_t rt_thread_defunct_dequeue(void)
{
rt_base_t level;
rt_thread_t thread = RT_NULL;
rt_list_t *l = &_rt_thread_defunct;
#ifdef RT_USING_SMP
/* disable interrupt */
level = rt_hw_interrupt_disable();
if (l->next != l)
{
thread = rt_list_entry(l->next,
struct rt_thread,
tlist);
rt_list_remove(&(thread->tlist));
}
rt_hw_interrupt_enable(level);
#else
if (l->next != l)
{
thread = rt_list_entry(l->next,
struct rt_thread,
tlist);
level = rt_hw_interrupt_disable();
rt_list_remove(&(thread->tlist));
rt_hw_interrupt_enable(level);
}
#endif
return thread;
}
/**
* @brief This function will perform system background job when system idle.
*/
static void rt_defunct_execute(void)
{
/* Loop until there is no dead thread. So one call to rt_defunct_execute
* will do all the cleanups. */
while (1)
{
rt_thread_t thread;
rt_bool_t object_is_systemobject;
void (*cleanup)(struct rt_thread *tid);
#ifdef RT_USING_MODULE
struct rt_dlmodule *module = RT_NULL;
#endif
/* get defunct thread */
thread = rt_thread_defunct_dequeue();
if (thread == RT_NULL)
{
break;
}
#ifdef RT_USING_MODULE
module = (struct rt_dlmodule*)thread->parent.module_id;
if (module)
{
dlmodule_destroy(module);
}
#endif
#ifdef RT_USING_SIGNALS
rt_thread_free_sig(thread);
#endif
/* store the point of "thread->cleanup" avoid to lose */
cleanup = thread->cleanup;
/* if it's a system object, not delete it */
object_is_systemobject = rt_object_is_systemobject((rt_object_t)thread);
if (object_is_systemobject == RT_TRUE)
{
/* detach this object */
rt_object_detach((rt_object_t)thread);
}
/* invoke thread cleanup */
if (cleanup != RT_NULL)
{
cleanup(thread);
}
#ifdef RT_USING_HEAP
/* if need free, delete it */
if (object_is_systemobject == RT_FALSE)
{
/* release thread's stack */
RT_KERNEL_FREE(thread->stack_addr);
/* delete thread object */
rt_object_delete((rt_object_t)thread);
}
#endif
}
}
static void idle_thread_entry(void *parameter)
{
#ifdef RT_USING_SMP
if (rt_hw_cpu_id() != 0)
{
while (1)
{
rt_hw_secondary_cpu_idle_exec();
}
}
#endif /* RT_USING_SMP */
while (1)
{
#ifdef RT_USING_IDLE_HOOK
rt_size_t i;
void (*idle_hook)(void);
for (i = 0; i < RT_IDLE_HOOK_LIST_SIZE; i++)
{
idle_hook = idle_hook_list[i];
if (idle_hook != RT_NULL)
{
idle_hook();
}
}
#endif /* RT_USING_IDLE_HOOK */
#ifndef RT_USING_SMP
rt_defunct_execute();
#endif /* RT_USING_SMP */
#ifdef RT_USING_PM
void rt_system_power_manager(void);
rt_system_power_manager();
#endif /* RT_USING_PM */
}
}
#ifdef RT_USING_SMP
static void rt_thread_system_entry(void *parameter)
{
while (1)
{
int ret= rt_sem_take(&system_sem, RT_WAITING_FOREVER);
if (ret != RT_EOK)
{
RT_ASSERT(0);
}
rt_defunct_execute();
}
}
#endif
/**
* @brief This function will initialize idle thread, then start it.
*
* @note this function must be invoked when system init.
*/
void rt_thread_idle_init(void)
{
rt_ubase_t i;
#if RT_NAME_MAX > 0
char idle_thread_name[RT_NAME_MAX];
#endif /* RT_NAME_MAX > 0 */
for (i = 0; i < _CPUS_NR; i++)
{
#if RT_NAME_MAX > 0
rt_snprintf(idle_thread_name, RT_NAME_MAX, "tidle%d", i);
#endif /* RT_NAME_MAX > 0 */
rt_thread_init(&idle_thread[i],
#if RT_NAME_MAX > 0
idle_thread_name,
#else
"tidle",
#endif /* RT_NAME_MAX > 0 */
idle_thread_entry,
RT_NULL,
&idle_thread_stack[i][0],
sizeof(idle_thread_stack[i]),
RT_THREAD_PRIORITY_MAX - 1,
32);
#ifdef RT_USING_SMP
rt_thread_control(&idle_thread[i], RT_THREAD_CTRL_BIND_CPU, (void*)i);
#endif /* RT_USING_SMP */
/* startup */
rt_thread_startup(&idle_thread[i]);
}
#ifdef RT_USING_SMP
RT_ASSERT(RT_THREAD_PRIORITY_MAX > 2);
rt_sem_init(&system_sem, "defunct", 1, RT_IPC_FLAG_FIFO);
/* create defunct thread */
rt_thread_init(&rt_system_thread,
"tsystem",
rt_thread_system_entry,
RT_NULL,
rt_system_stack,
sizeof(rt_system_stack),
RT_THREAD_PRIORITY_MAX - 2,
32);
/* startup */
rt_thread_startup(&rt_system_thread);
#endif
}
/**
* @brief This function will get the handler of the idle thread.
*/
rt_thread_t rt_thread_idle_gethandler(void)
{
#ifdef RT_USING_SMP
int id = rt_hw_cpu_id();
#else
int id = 0;
#endif /* RT_USING_SMP */
return (rt_thread_t)(&idle_thread[id]);
}

3793
src/ipc.c Normal file

File diff suppressed because it is too large Load diff

142
src/irq.c Normal file
View file

@ -0,0 +1,142 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2006-02-24 Bernard first version
* 2006-05-03 Bernard add IRQ_DEBUG
* 2016-08-09 ArdaFu add interrupt enter and leave hook.
* 2018-11-22 Jesven rt_interrupt_get_nest function add disable irq
* 2021-08-15 Supperthomas fix the comment
* 2022-01-07 Gabriel Moving __on_rt_xxxxx_hook to irq.c
* 2022-07-04 Yunjie fix RT_DEBUG_LOG
*/
#include <rthw.h>
#include <rtthread.h>
#ifndef __on_rt_interrupt_enter_hook
#define __on_rt_interrupt_enter_hook() __ON_HOOK_ARGS(rt_interrupt_enter_hook, ())
#endif
#ifndef __on_rt_interrupt_leave_hook
#define __on_rt_interrupt_leave_hook() __ON_HOOK_ARGS(rt_interrupt_leave_hook, ())
#endif
#if defined(RT_USING_HOOK) && defined(RT_HOOK_USING_FUNC_PTR)
static void (*rt_interrupt_enter_hook)(void);
static void (*rt_interrupt_leave_hook)(void);
/**
* @ingroup Hook
*
* @brief This function set a hook function when the system enter a interrupt
*
* @note The hook function must be simple and never be blocked or suspend.
*
* @param hook the function point to be called
*/
void rt_interrupt_enter_sethook(void (*hook)(void))
{
rt_interrupt_enter_hook = hook;
}
/**
* @ingroup Hook
*
* @brief This function set a hook function when the system exit a interrupt.
*
* @note The hook function must be simple and never be blocked or suspend.
*
* @param hook the function point to be called
*/
void rt_interrupt_leave_sethook(void (*hook)(void))
{
rt_interrupt_leave_hook = hook;
}
#endif /* RT_USING_HOOK */
/**
* @addtogroup Kernel
*/
/**@{*/
#ifdef RT_USING_SMP
#define rt_interrupt_nest rt_cpu_self()->irq_nest
#else
volatile rt_uint8_t rt_interrupt_nest = 0;
#endif /* RT_USING_SMP */
/**
* @brief This function will be invoked by BSP, when enter interrupt service routine
*
* @note Please don't invoke this routine in application
*
* @see rt_interrupt_leave
*/
rt_weak void rt_interrupt_enter(void)
{
rt_base_t level;
level = rt_hw_interrupt_disable();
rt_interrupt_nest ++;
RT_OBJECT_HOOK_CALL(rt_interrupt_enter_hook,());
rt_hw_interrupt_enable(level);
RT_DEBUG_LOG(RT_DEBUG_IRQ, ("irq has come..., irq current nest:%d\n",
(rt_int32_t)rt_interrupt_nest));
}
RTM_EXPORT(rt_interrupt_enter);
/**
* @brief This function will be invoked by BSP, when leave interrupt service routine
*
* @note Please don't invoke this routine in application
*
* @see rt_interrupt_enter
*/
rt_weak void rt_interrupt_leave(void)
{
rt_base_t level;
RT_DEBUG_LOG(RT_DEBUG_IRQ, ("irq is going to leave, irq current nest:%d\n",
(rt_int32_t)rt_interrupt_nest));
level = rt_hw_interrupt_disable();
RT_OBJECT_HOOK_CALL(rt_interrupt_leave_hook,());
rt_interrupt_nest --;
rt_hw_interrupt_enable(level);
}
RTM_EXPORT(rt_interrupt_leave);
/**
* @brief This function will return the nest of interrupt.
*
* User application can invoke this function to get whether current
* context is interrupt context.
*
* @return the number of nested interrupts.
*/
rt_weak rt_uint8_t rt_interrupt_get_nest(void)
{
rt_uint8_t ret;
rt_base_t level;
level = rt_hw_interrupt_disable();
ret = rt_interrupt_nest;
rt_hw_interrupt_enable(level);
return ret;
}
RTM_EXPORT(rt_interrupt_get_nest);
RTM_EXPORT(rt_hw_interrupt_disable);
RTM_EXPORT(rt_hw_interrupt_enable);
/**@}*/

2028
src/kservice.c Normal file

File diff suppressed because it is too large Load diff

673
src/mem.c Normal file
View file

@ -0,0 +1,673 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2008-7-12 Bernard the first version
* 2010-06-09 Bernard fix the end stub of heap
* fix memory check in rt_realloc function
* 2010-07-13 Bernard fix RT_ALIGN issue found by kuronca
* 2010-10-14 Bernard fix rt_realloc issue when realloc a NULL pointer.
* 2017-07-14 armink fix rt_realloc issue when new size is 0
* 2018-10-02 Bernard Add 64bit support
*/
/*
* Copyright (c) 2001-2004 Swedish Institute of Computer Science.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
* Author: Adam Dunkels <adam@sics.se>
* Simon Goldschmidt
*
*/
#include <rthw.h>
#include <rtthread.h>
#if defined (RT_USING_SMALL_MEM)
/**
* memory item on the small mem
*/
struct rt_small_mem_item
{
rt_ubase_t pool_ptr; /**< small memory object addr */
#ifdef ARCH_CPU_64BIT
rt_uint32_t resv;
#endif /* ARCH_CPU_64BIT */
rt_size_t next; /**< next free item */
rt_size_t prev; /**< prev free item */
#ifdef RT_USING_MEMTRACE
#ifdef ARCH_CPU_64BIT
rt_uint8_t thread[8]; /**< thread name */
#else
rt_uint8_t thread[4]; /**< thread name */
#endif /* ARCH_CPU_64BIT */
#endif /* RT_USING_MEMTRACE */
};
/**
* Base structure of small memory object
*/
struct rt_small_mem
{
struct rt_memory parent; /**< inherit from rt_memory */
rt_uint8_t *heap_ptr; /**< pointer to the heap */
struct rt_small_mem_item *heap_end;
struct rt_small_mem_item *lfree;
rt_size_t mem_size_aligned; /**< aligned memory size */
};
#define HEAP_MAGIC 0x1ea0
#ifdef ARCH_CPU_64BIT
#define MIN_SIZE 24
#else
#define MIN_SIZE 12
#endif /* ARCH_CPU_64BIT */
#define MEM_MASK ((~(rt_size_t)0) - 1)
#define MEM_USED() ((((rt_base_t)(small_mem)) & MEM_MASK) | 0x1)
#define MEM_FREED() ((((rt_base_t)(small_mem)) & MEM_MASK) | 0x0)
#define MEM_ISUSED(_mem) \
(((rt_base_t)(((struct rt_small_mem_item *)(_mem))->pool_ptr)) & (~MEM_MASK))
#define MEM_POOL(_mem) \
((struct rt_small_mem *)(((rt_base_t)(((struct rt_small_mem_item *)(_mem))->pool_ptr)) & (MEM_MASK)))
#define MEM_SIZE(_heap, _mem) \
(((struct rt_small_mem_item *)(_mem))->next - ((rt_ubase_t)(_mem) - \
(rt_ubase_t)((_heap)->heap_ptr)) - RT_ALIGN(sizeof(struct rt_small_mem_item), RT_ALIGN_SIZE))
#define MIN_SIZE_ALIGNED RT_ALIGN(MIN_SIZE, RT_ALIGN_SIZE)
#define SIZEOF_STRUCT_MEM RT_ALIGN(sizeof(struct rt_small_mem_item), RT_ALIGN_SIZE)
#ifdef RT_USING_MEMTRACE
rt_inline void rt_smem_setname(struct rt_small_mem_item *mem, const char *name)
{
int index;
for (index = 0; index < sizeof(mem->thread); index ++)
{
if (name[index] == '\0') break;
mem->thread[index] = name[index];
}
for (; index < sizeof(mem->thread); index ++)
{
mem->thread[index] = ' ';
}
}
#endif /* RT_USING_MEMTRACE */
static void plug_holes(struct rt_small_mem *m, struct rt_small_mem_item *mem)
{
struct rt_small_mem_item *nmem;
struct rt_small_mem_item *pmem;
RT_ASSERT((rt_uint8_t *)mem >= m->heap_ptr);
RT_ASSERT((rt_uint8_t *)mem < (rt_uint8_t *)m->heap_end);
/* plug hole forward */
nmem = (struct rt_small_mem_item *)&m->heap_ptr[mem->next];
if (mem != nmem && !MEM_ISUSED(nmem) &&
(rt_uint8_t *)nmem != (rt_uint8_t *)m->heap_end)
{
/* if mem->next is unused and not end of m->heap_ptr,
* combine mem and mem->next
*/
if (m->lfree == nmem)
{
m->lfree = mem;
}
nmem->pool_ptr = 0;
mem->next = nmem->next;
((struct rt_small_mem_item *)&m->heap_ptr[nmem->next])->prev = (rt_uint8_t *)mem - m->heap_ptr;
}
/* plug hole backward */
pmem = (struct rt_small_mem_item *)&m->heap_ptr[mem->prev];
if (pmem != mem && !MEM_ISUSED(pmem))
{
/* if mem->prev is unused, combine mem and mem->prev */
if (m->lfree == mem)
{
m->lfree = pmem;
}
mem->pool_ptr = 0;
pmem->next = mem->next;
((struct rt_small_mem_item *)&m->heap_ptr[mem->next])->prev = (rt_uint8_t *)pmem - m->heap_ptr;
}
}
/**
* @brief This function will initialize small memory management algorithm.
*
* @param name is the name of the small memory management object.
*
* @param begin_addr the beginning address of memory.
*
* @param size is the size of the memory.
*
* @return Return a pointer to the memory object. When the return value is RT_NULL, it means the init failed.
*/
rt_smem_t rt_smem_init(const char *name,
void *begin_addr,
rt_size_t size)
{
struct rt_small_mem_item *mem;
struct rt_small_mem *small_mem;
rt_ubase_t start_addr, begin_align, end_align, mem_size;
small_mem = (struct rt_small_mem *)RT_ALIGN((rt_ubase_t)begin_addr, RT_ALIGN_SIZE);
start_addr = (rt_ubase_t)small_mem + sizeof(*small_mem);
begin_align = RT_ALIGN((rt_ubase_t)start_addr, RT_ALIGN_SIZE);
end_align = RT_ALIGN_DOWN((rt_ubase_t)begin_addr + size, RT_ALIGN_SIZE);
/* alignment addr */
if ((end_align > (2 * SIZEOF_STRUCT_MEM)) &&
((end_align - 2 * SIZEOF_STRUCT_MEM) >= start_addr))
{
/* calculate the aligned memory size */
mem_size = end_align - begin_align - 2 * SIZEOF_STRUCT_MEM;
}
else
{
rt_kprintf("mem init, error begin address 0x%x, and end address 0x%x\n",
(rt_ubase_t)begin_addr, (rt_ubase_t)begin_addr + size);
return RT_NULL;
}
rt_memset(small_mem, 0, sizeof(*small_mem));
/* initialize small memory object */
rt_object_init(&(small_mem->parent.parent), RT_Object_Class_Memory, name);
small_mem->parent.algorithm = "small";
small_mem->parent.address = begin_align;
small_mem->parent.total = mem_size;
small_mem->mem_size_aligned = mem_size;
/* point to begin address of heap */
small_mem->heap_ptr = (rt_uint8_t *)begin_align;
RT_DEBUG_LOG(RT_DEBUG_MEM, ("mem init, heap begin address 0x%x, size %d\n",
(rt_ubase_t)small_mem->heap_ptr, small_mem->mem_size_aligned));
/* initialize the start of the heap */
mem = (struct rt_small_mem_item *)small_mem->heap_ptr;
mem->pool_ptr = MEM_FREED();
mem->next = small_mem->mem_size_aligned + SIZEOF_STRUCT_MEM;
mem->prev = 0;
#ifdef RT_USING_MEMTRACE
rt_smem_setname(mem, "INIT");
#endif /* RT_USING_MEMTRACE */
/* initialize the end of the heap */
small_mem->heap_end = (struct rt_small_mem_item *)&small_mem->heap_ptr[mem->next];
small_mem->heap_end->pool_ptr = MEM_USED();
small_mem->heap_end->next = small_mem->mem_size_aligned + SIZEOF_STRUCT_MEM;
small_mem->heap_end->prev = small_mem->mem_size_aligned + SIZEOF_STRUCT_MEM;
#ifdef RT_USING_MEMTRACE
rt_smem_setname(small_mem->heap_end, "INIT");
#endif /* RT_USING_MEMTRACE */
/* initialize the lowest-free pointer to the start of the heap */
small_mem->lfree = (struct rt_small_mem_item *)small_mem->heap_ptr;
return &small_mem->parent;
}
RTM_EXPORT(rt_smem_init);
/**
* @brief This function will remove a small mem from the system.
*
* @param m the small memory management object.
*
* @return RT_EOK
*/
rt_err_t rt_smem_detach(rt_smem_t m)
{
RT_ASSERT(m != RT_NULL);
RT_ASSERT(rt_object_get_type(&m->parent) == RT_Object_Class_Memory);
RT_ASSERT(rt_object_is_systemobject(&m->parent));
rt_object_detach(&(m->parent));
return RT_EOK;
}
RTM_EXPORT(rt_smem_detach);
/**
* @addtogroup MM
*/
/**@{*/
/**
* @brief Allocate a block of memory with a minimum of 'size' bytes.
*
* @param m the small memory management object.
*
* @param size is the minimum size of the requested block in bytes.
*
* @return the pointer to allocated memory or NULL if no free memory was found.
*/
void *rt_smem_alloc(rt_smem_t m, rt_size_t size)
{
rt_size_t ptr, ptr2;
struct rt_small_mem_item *mem, *mem2;
struct rt_small_mem *small_mem;
if (size == 0)
return RT_NULL;
RT_ASSERT(m != RT_NULL);
RT_ASSERT(rt_object_get_type(&m->parent) == RT_Object_Class_Memory);
RT_ASSERT(rt_object_is_systemobject(&m->parent));
if (size != RT_ALIGN(size, RT_ALIGN_SIZE))
{
RT_DEBUG_LOG(RT_DEBUG_MEM, ("malloc size %d, but align to %d\n",
size, RT_ALIGN(size, RT_ALIGN_SIZE)));
}
else
{
RT_DEBUG_LOG(RT_DEBUG_MEM, ("malloc size %d\n", size));
}
small_mem = (struct rt_small_mem *)m;
/* alignment size */
size = RT_ALIGN(size, RT_ALIGN_SIZE);
/* every data block must be at least MIN_SIZE_ALIGNED long */
if (size < MIN_SIZE_ALIGNED)
size = MIN_SIZE_ALIGNED;
if (size > small_mem->mem_size_aligned)
{
RT_DEBUG_LOG(RT_DEBUG_MEM, ("no memory\n"));
return RT_NULL;
}
for (ptr = (rt_uint8_t *)small_mem->lfree - small_mem->heap_ptr;
ptr <= small_mem->mem_size_aligned - size;
ptr = ((struct rt_small_mem_item *)&small_mem->heap_ptr[ptr])->next)
{
mem = (struct rt_small_mem_item *)&small_mem->heap_ptr[ptr];
if ((!MEM_ISUSED(mem)) && (mem->next - (ptr + SIZEOF_STRUCT_MEM)) >= size)
{
/* mem is not used and at least perfect fit is possible:
* mem->next - (ptr + SIZEOF_STRUCT_MEM) gives us the 'user data size' of mem */
if (mem->next - (ptr + SIZEOF_STRUCT_MEM) >=
(size + SIZEOF_STRUCT_MEM + MIN_SIZE_ALIGNED))
{
/* (in addition to the above, we test if another struct rt_small_mem_item (SIZEOF_STRUCT_MEM) containing
* at least MIN_SIZE_ALIGNED of data also fits in the 'user data space' of 'mem')
* -> split large block, create empty remainder,
* remainder must be large enough to contain MIN_SIZE_ALIGNED data: if
* mem->next - (ptr + (2*SIZEOF_STRUCT_MEM)) == size,
* struct rt_small_mem_item would fit in but no data between mem2 and mem2->next
* @todo we could leave out MIN_SIZE_ALIGNED. We would create an empty
* region that couldn't hold data, but when mem->next gets freed,
* the 2 regions would be combined, resulting in more free memory
*/
ptr2 = ptr + SIZEOF_STRUCT_MEM + size;
/* create mem2 struct */
mem2 = (struct rt_small_mem_item *)&small_mem->heap_ptr[ptr2];
mem2->pool_ptr = MEM_FREED();
mem2->next = mem->next;
mem2->prev = ptr;
#ifdef RT_USING_MEMTRACE
rt_smem_setname(mem2, " ");
#endif /* RT_USING_MEMTRACE */
/* and insert it between mem and mem->next */
mem->next = ptr2;
if (mem2->next != small_mem->mem_size_aligned + SIZEOF_STRUCT_MEM)
{
((struct rt_small_mem_item *)&small_mem->heap_ptr[mem2->next])->prev = ptr2;
}
small_mem->parent.used += (size + SIZEOF_STRUCT_MEM);
if (small_mem->parent.max < small_mem->parent.used)
small_mem->parent.max = small_mem->parent.used;
}
else
{
/* (a mem2 struct does no fit into the user data space of mem and mem->next will always
* be used at this point: if not we have 2 unused structs in a row, plug_holes should have
* take care of this).
* -> near fit or excact fit: do not split, no mem2 creation
* also can't move mem->next directly behind mem, since mem->next
* will always be used at this point!
*/
small_mem->parent.used += mem->next - ((rt_uint8_t *)mem - small_mem->heap_ptr);
if (small_mem->parent.max < small_mem->parent.used)
small_mem->parent.max = small_mem->parent.used;
}
/* set small memory object */
mem->pool_ptr = MEM_USED();
#ifdef RT_USING_MEMTRACE
if (rt_thread_self())
rt_smem_setname(mem, rt_thread_self()->parent.name);
else
rt_smem_setname(mem, "NONE");
#endif /* RT_USING_MEMTRACE */
if (mem == small_mem->lfree)
{
/* Find next free block after mem and update lowest free pointer */
while (MEM_ISUSED(small_mem->lfree) && small_mem->lfree != small_mem->heap_end)
small_mem->lfree = (struct rt_small_mem_item *)&small_mem->heap_ptr[small_mem->lfree->next];
RT_ASSERT(((small_mem->lfree == small_mem->heap_end) || (!MEM_ISUSED(small_mem->lfree))));
}
RT_ASSERT((rt_ubase_t)mem + SIZEOF_STRUCT_MEM + size <= (rt_ubase_t)small_mem->heap_end);
RT_ASSERT((rt_ubase_t)((rt_uint8_t *)mem + SIZEOF_STRUCT_MEM) % RT_ALIGN_SIZE == 0);
RT_ASSERT((((rt_ubase_t)mem) & (RT_ALIGN_SIZE - 1)) == 0);
RT_DEBUG_LOG(RT_DEBUG_MEM,
("allocate memory at 0x%x, size: %d\n",
(rt_ubase_t)((rt_uint8_t *)mem + SIZEOF_STRUCT_MEM),
(rt_ubase_t)(mem->next - ((rt_uint8_t *)mem - small_mem->heap_ptr))));
/* return the memory data except mem struct */
return (rt_uint8_t *)mem + SIZEOF_STRUCT_MEM;
}
}
return RT_NULL;
}
RTM_EXPORT(rt_smem_alloc);
/**
* @brief This function will change the size of previously allocated memory block.
*
* @param m the small memory management object.
*
* @param rmem is the pointer to memory allocated by rt_mem_alloc.
*
* @param newsize is the required new size.
*
* @return the changed memory block address.
*/
void *rt_smem_realloc(rt_smem_t m, void *rmem, rt_size_t newsize)
{
rt_size_t size;
rt_size_t ptr, ptr2;
struct rt_small_mem_item *mem, *mem2;
struct rt_small_mem *small_mem;
void *nmem;
RT_ASSERT(m != RT_NULL);
RT_ASSERT(rt_object_get_type(&m->parent) == RT_Object_Class_Memory);
RT_ASSERT(rt_object_is_systemobject(&m->parent));
small_mem = (struct rt_small_mem *)m;
/* alignment size */
newsize = RT_ALIGN(newsize, RT_ALIGN_SIZE);
if (newsize > small_mem->mem_size_aligned)
{
RT_DEBUG_LOG(RT_DEBUG_MEM, ("realloc: out of memory\n"));
return RT_NULL;
}
else if (newsize == 0)
{
rt_smem_free(rmem);
return RT_NULL;
}
/* allocate a new memory block */
if (rmem == RT_NULL)
return rt_smem_alloc(&small_mem->parent, newsize);
RT_ASSERT((((rt_ubase_t)rmem) & (RT_ALIGN_SIZE - 1)) == 0);
RT_ASSERT((rt_uint8_t *)rmem >= (rt_uint8_t *)small_mem->heap_ptr);
RT_ASSERT((rt_uint8_t *)rmem < (rt_uint8_t *)small_mem->heap_end);
mem = (struct rt_small_mem_item *)((rt_uint8_t *)rmem - SIZEOF_STRUCT_MEM);
/* current memory block size */
ptr = (rt_uint8_t *)mem - small_mem->heap_ptr;
size = mem->next - ptr - SIZEOF_STRUCT_MEM;
if (size == newsize)
{
/* the size is the same as */
return rmem;
}
if (newsize + SIZEOF_STRUCT_MEM + MIN_SIZE < size)
{
/* split memory block */
small_mem->parent.used -= (size - newsize);
ptr2 = ptr + SIZEOF_STRUCT_MEM + newsize;
mem2 = (struct rt_small_mem_item *)&small_mem->heap_ptr[ptr2];
mem2->pool_ptr = MEM_FREED();
mem2->next = mem->next;
mem2->prev = ptr;
#ifdef RT_USING_MEMTRACE
rt_smem_setname(mem2, " ");
#endif /* RT_USING_MEMTRACE */
mem->next = ptr2;
if (mem2->next != small_mem->mem_size_aligned + SIZEOF_STRUCT_MEM)
{
((struct rt_small_mem_item *)&small_mem->heap_ptr[mem2->next])->prev = ptr2;
}
if (mem2 < small_mem->lfree)
{
/* the splited struct is now the lowest */
small_mem->lfree = mem2;
}
plug_holes(small_mem, mem2);
return rmem;
}
/* expand memory */
nmem = rt_smem_alloc(&small_mem->parent, newsize);
if (nmem != RT_NULL) /* check memory */
{
rt_memcpy(nmem, rmem, size < newsize ? size : newsize);
rt_smem_free(rmem);
}
return nmem;
}
RTM_EXPORT(rt_smem_realloc);
/**
* @brief This function will release the previously allocated memory block by
* rt_mem_alloc. The released memory block is taken back to system heap.
*
* @param rmem the address of memory which will be released.
*/
void rt_smem_free(void *rmem)
{
struct rt_small_mem_item *mem;
struct rt_small_mem *small_mem;
if (rmem == RT_NULL)
return;
RT_ASSERT((((rt_ubase_t)rmem) & (RT_ALIGN_SIZE - 1)) == 0);
/* Get the corresponding struct rt_small_mem_item ... */
mem = (struct rt_small_mem_item *)((rt_uint8_t *)rmem - SIZEOF_STRUCT_MEM);
/* ... which has to be in a used state ... */
small_mem = MEM_POOL(mem);
RT_ASSERT(small_mem != RT_NULL);
RT_ASSERT(MEM_ISUSED(mem));
RT_ASSERT(rt_object_get_type(&small_mem->parent.parent) == RT_Object_Class_Memory);
RT_ASSERT(rt_object_is_systemobject(&small_mem->parent.parent));
RT_ASSERT((rt_uint8_t *)rmem >= (rt_uint8_t *)small_mem->heap_ptr &&
(rt_uint8_t *)rmem < (rt_uint8_t *)small_mem->heap_end);
RT_ASSERT(MEM_POOL(&small_mem->heap_ptr[mem->next]) == small_mem);
RT_DEBUG_LOG(RT_DEBUG_MEM,
("release memory 0x%x, size: %d\n",
(rt_ubase_t)rmem,
(rt_ubase_t)(mem->next - ((rt_uint8_t *)mem - small_mem->heap_ptr))));
/* ... and is now unused. */
mem->pool_ptr = MEM_FREED();
#ifdef RT_USING_MEMTRACE
rt_smem_setname(mem, " ");
#endif /* RT_USING_MEMTRACE */
if (mem < small_mem->lfree)
{
/* the newly freed struct is now the lowest */
small_mem->lfree = mem;
}
small_mem->parent.used -= (mem->next - ((rt_uint8_t *)mem - small_mem->heap_ptr));
/* finally, see if prev or next are free also */
plug_holes(small_mem, mem);
}
RTM_EXPORT(rt_smem_free);
#ifdef RT_USING_FINSH
#include <finsh.h>
#ifdef RT_USING_MEMTRACE
int memcheck(int argc, char *argv[])
{
int position;
rt_base_t level;
struct rt_small_mem_item *mem;
struct rt_small_mem *m;
struct rt_object_information *information;
struct rt_list_node *node;
struct rt_object *object;
char *name;
name = argc > 1 ? argv[1] : RT_NULL;
level = rt_hw_interrupt_disable();
/* get mem object */
information = rt_object_get_information(RT_Object_Class_Memory);
for (node = information->object_list.next;
node != &(information->object_list);
node = node->next)
{
object = rt_list_entry(node, struct rt_object, list);
/* find the specified object */
if (name != RT_NULL && rt_strncmp(name, object->name, RT_NAME_MAX) != 0)
continue;
/* mem object */
m = (struct rt_small_mem *)object;
/* check mem */
for (mem = (struct rt_small_mem_item *)m->heap_ptr; mem != m->heap_end; mem = (struct rt_small_mem_item *)&m->heap_ptr[mem->next])
{
position = (rt_ubase_t)mem - (rt_ubase_t)m->heap_ptr;
if (position < 0) goto __exit;
if (position > (int)m->mem_size_aligned) goto __exit;
if (MEM_POOL(mem) != m) goto __exit;
}
}
rt_hw_interrupt_enable(level);
return 0;
__exit:
rt_kprintf("Memory block wrong:\n");
rt_kprintf(" name: %s\n", m->parent.parent.name);
rt_kprintf("address: 0x%08x\n", mem);
rt_kprintf(" pool: 0x%04x\n", mem->pool_ptr);
rt_kprintf(" size: %d\n", mem->next - position - SIZEOF_STRUCT_MEM);
rt_hw_interrupt_enable(level);
return 0;
}
MSH_CMD_EXPORT(memcheck, check memory data);
int memtrace(int argc, char **argv)
{
struct rt_small_mem_item *mem;
struct rt_small_mem *m;
struct rt_object_information *information;
struct rt_list_node *node;
struct rt_object *object;
char *name;
name = argc > 1 ? argv[1] : RT_NULL;
/* get mem object */
information = rt_object_get_information(RT_Object_Class_Memory);
for (node = information->object_list.next;
node != &(information->object_list);
node = node->next)
{
object = rt_list_entry(node, struct rt_object, list);
/* find the specified object */
if (name != RT_NULL && rt_strncmp(name, object->name, RT_NAME_MAX) != 0)
continue;
/* mem object */
m = (struct rt_small_mem *)object;
/* show memory information */
rt_kprintf("\nmemory heap address:\n");
rt_kprintf("name : %s\n", m->parent.parent.name);
rt_kprintf("total : 0x%d\n", m->parent.total);
rt_kprintf("used : 0x%d\n", m->parent.used);
rt_kprintf("max_used: 0x%d\n", m->parent.max);
rt_kprintf("heap_ptr: 0x%08x\n", m->heap_ptr);
rt_kprintf("lfree : 0x%08x\n", m->lfree);
rt_kprintf("heap_end: 0x%08x\n", m->heap_end);
rt_kprintf("\n--memory item information --\n");
for (mem = (struct rt_small_mem_item *)m->heap_ptr; mem != m->heap_end; mem = (struct rt_small_mem_item *)&m->heap_ptr[mem->next])
{
int size = MEM_SIZE(m, mem);
rt_kprintf("[0x%08x - ", mem);
if (size < 1024)
rt_kprintf("%5d", size);
else if (size < 1024 * 1024)
rt_kprintf("%4dK", size / 1024);
else
rt_kprintf("%4dM", size / (1024 * 1024));
rt_kprintf("] %c%c%c%c", mem->thread[0], mem->thread[1], mem->thread[2], mem->thread[3]);
if (MEM_POOL(mem) != m)
rt_kprintf(": ***\n");
else
rt_kprintf("\n");
}
}
return 0;
}
MSH_CMD_EXPORT(memtrace, dump memory trace information);
#endif /* RT_USING_MEMTRACE */
#endif /* RT_USING_FINSH */
#endif /* defined (RT_USING_SMALL_MEM) */
/**@}*/

1004
src/memheap.c Normal file

File diff suppressed because it is too large Load diff

470
src/mempool.c Normal file
View file

@ -0,0 +1,470 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2006-05-27 Bernard implement memory pool
* 2006-06-03 Bernard fix the thread timer init bug
* 2006-06-30 Bernard fix the allocate/free block bug
* 2006-08-04 Bernard add hook support
* 2006-08-10 Bernard fix interrupt bug in rt_mp_alloc
* 2010-07-13 Bernard fix RT_ALIGN issue found by kuronca
* 2010-10-26 yi.qiu add module support in rt_mp_delete
* 2011-01-24 Bernard add object allocation check.
* 2012-03-22 Bernard fix align issue in rt_mp_init and rt_mp_create.
* 2022-01-07 Gabriel Moving __on_rt_xxxxx_hook to mempool.c
*/
#include <rthw.h>
#include <rtthread.h>
#ifdef RT_USING_MEMPOOL
#ifndef __on_rt_mp_alloc_hook
#define __on_rt_mp_alloc_hook(mp, block) __ON_HOOK_ARGS(rt_mp_alloc_hook, (mp, block))
#endif
#ifndef __on_rt_mp_free_hook
#define __on_rt_mp_free_hook(mp, block) __ON_HOOK_ARGS(rt_mp_free_hook, (mp, block))
#endif
#if defined(RT_USING_HOOK) && defined(RT_HOOK_USING_FUNC_PTR)
static void (*rt_mp_alloc_hook)(struct rt_mempool *mp, void *block);
static void (*rt_mp_free_hook)(struct rt_mempool *mp, void *block);
/**
* @addtogroup Hook
*/
/**@{*/
/**
* @brief This function will set a hook function, which will be invoked when a memory
* block is allocated from the memory pool.
*
* @param hook the hook function
*/
void rt_mp_alloc_sethook(void (*hook)(struct rt_mempool *mp, void *block))
{
rt_mp_alloc_hook = hook;
}
/**
* @brief This function will set a hook function, which will be invoked when a memory
* block is released to the memory pool.
*
* @param hook the hook function
*/
void rt_mp_free_sethook(void (*hook)(struct rt_mempool *mp, void *block))
{
rt_mp_free_hook = hook;
}
/**@}*/
#endif /* RT_USING_HOOK */
/**
* @addtogroup MM
*/
/**@{*/
/**
* @brief This function will initialize a memory pool object, normally which is used
* for static object.
*
* @param mp is the memory pool object.
*
* @param name is the name of the memory pool.
*
* @param start is the start address of the memory pool.
*
* @param size is the total size of the memory pool.
*
* @param block_size is the size for each block..
*
* @return RT_EOK
*/
rt_err_t rt_mp_init(struct rt_mempool *mp,
const char *name,
void *start,
rt_size_t size,
rt_size_t block_size)
{
rt_uint8_t *block_ptr;
rt_size_t offset;
/* parameter check */
RT_ASSERT(mp != RT_NULL);
RT_ASSERT(name != RT_NULL);
RT_ASSERT(start != RT_NULL);
RT_ASSERT(size > 0 && block_size > 0);
/* initialize object */
rt_object_init(&(mp->parent), RT_Object_Class_MemPool, name);
/* initialize memory pool */
mp->start_address = start;
mp->size = RT_ALIGN_DOWN(size, RT_ALIGN_SIZE);
/* align the block size */
block_size = RT_ALIGN(block_size, RT_ALIGN_SIZE);
mp->block_size = block_size;
/* align to align size byte */
mp->block_total_count = mp->size / (mp->block_size + sizeof(rt_uint8_t *));
mp->block_free_count = mp->block_total_count;
/* initialize suspended thread list */
rt_list_init(&(mp->suspend_thread));
/* initialize free block list */
block_ptr = (rt_uint8_t *)mp->start_address;
for (offset = 0; offset < mp->block_total_count; offset ++)
{
*(rt_uint8_t **)(block_ptr + offset * (block_size + sizeof(rt_uint8_t *))) =
(rt_uint8_t *)(block_ptr + (offset + 1) * (block_size + sizeof(rt_uint8_t *)));
}
*(rt_uint8_t **)(block_ptr + (offset - 1) * (block_size + sizeof(rt_uint8_t *))) =
RT_NULL;
mp->block_list = block_ptr;
return RT_EOK;
}
RTM_EXPORT(rt_mp_init);
/**
* @brief This function will detach a memory pool from system object management.
*
* @param mp is the memory pool object.
*
* @return RT_EOK
*/
rt_err_t rt_mp_detach(struct rt_mempool *mp)
{
struct rt_thread *thread;
rt_base_t level;
/* parameter check */
RT_ASSERT(mp != RT_NULL);
RT_ASSERT(rt_object_get_type(&mp->parent) == RT_Object_Class_MemPool);
RT_ASSERT(rt_object_is_systemobject(&mp->parent));
/* wake up all suspended threads */
while (!rt_list_isempty(&(mp->suspend_thread)))
{
/* disable interrupt */
level = rt_hw_interrupt_disable();
/* get next suspend thread */
thread = rt_list_entry(mp->suspend_thread.next, struct rt_thread, tlist);
/* set error code to -RT_ERROR */
thread->error = -RT_ERROR;
/*
* resume thread
* In rt_thread_resume function, it will remove current thread from
* suspend list
*/
rt_thread_resume(thread);
/* enable interrupt */
rt_hw_interrupt_enable(level);
}
/* detach object */
rt_object_detach(&(mp->parent));
return RT_EOK;
}
RTM_EXPORT(rt_mp_detach);
#ifdef RT_USING_HEAP
/**
* @brief This function will create a mempool object and allocate the memory pool from
* heap.
*
* @param name is the name of memory pool.
*
* @param block_count is the count of blocks in memory pool.
*
* @param block_size is the size for each block.
*
* @return the created mempool object
*/
rt_mp_t rt_mp_create(const char *name,
rt_size_t block_count,
rt_size_t block_size)
{
rt_uint8_t *block_ptr;
struct rt_mempool *mp;
rt_size_t offset;
RT_DEBUG_NOT_IN_INTERRUPT;
/* parameter check */
RT_ASSERT(name != RT_NULL);
RT_ASSERT(block_count > 0 && block_size > 0);
/* allocate object */
mp = (struct rt_mempool *)rt_object_allocate(RT_Object_Class_MemPool, name);
/* allocate object failed */
if (mp == RT_NULL)
return RT_NULL;
/* initialize memory pool */
block_size = RT_ALIGN(block_size, RT_ALIGN_SIZE);
mp->block_size = block_size;
mp->size = (block_size + sizeof(rt_uint8_t *)) * block_count;
/* allocate memory */
mp->start_address = rt_malloc((block_size + sizeof(rt_uint8_t *)) *
block_count);
if (mp->start_address == RT_NULL)
{
/* no memory, delete memory pool object */
rt_object_delete(&(mp->parent));
return RT_NULL;
}
mp->block_total_count = block_count;
mp->block_free_count = mp->block_total_count;
/* initialize suspended thread list */
rt_list_init(&(mp->suspend_thread));
/* initialize free block list */
block_ptr = (rt_uint8_t *)mp->start_address;
for (offset = 0; offset < mp->block_total_count; offset ++)
{
*(rt_uint8_t **)(block_ptr + offset * (block_size + sizeof(rt_uint8_t *)))
= block_ptr + (offset + 1) * (block_size + sizeof(rt_uint8_t *));
}
*(rt_uint8_t **)(block_ptr + (offset - 1) * (block_size + sizeof(rt_uint8_t *)))
= RT_NULL;
mp->block_list = block_ptr;
return mp;
}
RTM_EXPORT(rt_mp_create);
/**
* @brief This function will delete a memory pool and release the object memory.
*
* @param mp is the memory pool object.
*
* @return RT_EOK
*/
rt_err_t rt_mp_delete(rt_mp_t mp)
{
struct rt_thread *thread;
rt_base_t level;
RT_DEBUG_NOT_IN_INTERRUPT;
/* parameter check */
RT_ASSERT(mp != RT_NULL);
RT_ASSERT(rt_object_get_type(&mp->parent) == RT_Object_Class_MemPool);
RT_ASSERT(rt_object_is_systemobject(&mp->parent) == RT_FALSE);
/* wake up all suspended threads */
while (!rt_list_isempty(&(mp->suspend_thread)))
{
/* disable interrupt */
level = rt_hw_interrupt_disable();
/* get next suspend thread */
thread = rt_list_entry(mp->suspend_thread.next, struct rt_thread, tlist);
/* set error code to -RT_ERROR */
thread->error = -RT_ERROR;
/*
* resume thread
* In rt_thread_resume function, it will remove current thread from
* suspend list
*/
rt_thread_resume(thread);
/* enable interrupt */
rt_hw_interrupt_enable(level);
}
/* release allocated room */
rt_free(mp->start_address);
/* detach object */
rt_object_delete(&(mp->parent));
return RT_EOK;
}
RTM_EXPORT(rt_mp_delete);
#endif /* RT_USING_HEAP */
/**
* @brief This function will allocate a block from memory pool.
*
* @param mp is the memory pool object.
*
* @param time is the maximum waiting time for allocating memory.
* - 0 for not waiting, allocating memory immediately.
*
* @return the allocated memory block or RT_NULL on allocated failed.
*/
void *rt_mp_alloc(rt_mp_t mp, rt_int32_t time)
{
rt_uint8_t *block_ptr;
rt_base_t level;
struct rt_thread *thread;
rt_uint32_t before_sleep = 0;
/* parameter check */
RT_ASSERT(mp != RT_NULL);
/* get current thread */
thread = rt_thread_self();
/* disable interrupt */
level = rt_hw_interrupt_disable();
while (mp->block_free_count == 0)
{
/* memory block is unavailable. */
if (time == 0)
{
/* enable interrupt */
rt_hw_interrupt_enable(level);
rt_set_errno(-RT_ETIMEOUT);
return RT_NULL;
}
RT_DEBUG_NOT_IN_INTERRUPT;
thread->error = RT_EOK;
/* need suspend thread */
rt_thread_suspend(thread);
rt_list_insert_after(&(mp->suspend_thread), &(thread->tlist));
if (time > 0)
{
/* get the start tick of timer */
before_sleep = rt_tick_get();
/* init thread timer and start it */
rt_timer_control(&(thread->thread_timer),
RT_TIMER_CTRL_SET_TIME,
&time);
rt_timer_start(&(thread->thread_timer));
}
/* enable interrupt */
rt_hw_interrupt_enable(level);
/* do a schedule */
rt_schedule();
if (thread->error != RT_EOK)
return RT_NULL;
if (time > 0)
{
time -= rt_tick_get() - before_sleep;
if (time < 0)
time = 0;
}
/* disable interrupt */
level = rt_hw_interrupt_disable();
}
/* memory block is available. decrease the free block counter */
mp->block_free_count--;
/* get block from block list */
block_ptr = mp->block_list;
RT_ASSERT(block_ptr != RT_NULL);
/* Setup the next free node. */
mp->block_list = *(rt_uint8_t **)block_ptr;
/* point to memory pool */
*(rt_uint8_t **)block_ptr = (rt_uint8_t *)mp;
/* enable interrupt */
rt_hw_interrupt_enable(level);
RT_OBJECT_HOOK_CALL(rt_mp_alloc_hook,
(mp, (rt_uint8_t *)(block_ptr + sizeof(rt_uint8_t *))));
return (rt_uint8_t *)(block_ptr + sizeof(rt_uint8_t *));
}
RTM_EXPORT(rt_mp_alloc);
/**
* @brief This function will release a memory block.
*
* @param block the address of memory block to be released.
*/
void rt_mp_free(void *block)
{
rt_uint8_t **block_ptr;
struct rt_mempool *mp;
struct rt_thread *thread;
rt_base_t level;
/* parameter check */
if (block == RT_NULL) return;
/* get the control block of pool which the block belongs to */
block_ptr = (rt_uint8_t **)((rt_uint8_t *)block - sizeof(rt_uint8_t *));
mp = (struct rt_mempool *)*block_ptr;
RT_OBJECT_HOOK_CALL(rt_mp_free_hook, (mp, block));
/* disable interrupt */
level = rt_hw_interrupt_disable();
/* increase the free block count */
mp->block_free_count ++;
/* link the block into the block list */
*block_ptr = mp->block_list;
mp->block_list = (rt_uint8_t *)block_ptr;
if (!rt_list_isempty(&(mp->suspend_thread)))
{
/* get the suspended thread */
thread = rt_list_entry(mp->suspend_thread.next,
struct rt_thread,
tlist);
/* set error */
thread->error = RT_EOK;
/* resume thread */
rt_thread_resume(thread);
/* enable interrupt */
rt_hw_interrupt_enable(level);
/* do a schedule */
rt_schedule();
return;
}
/* enable interrupt */
rt_hw_interrupt_enable(level);
}
RTM_EXPORT(rt_mp_free);
/**@}*/
#endif /* RT_USING_MEMPOOL */

717
src/object.c Normal file
View file

@ -0,0 +1,717 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2006-03-14 Bernard the first version
* 2006-04-21 Bernard change the scheduler lock to interrupt lock
* 2006-05-18 Bernard fix the object init bug
* 2006-08-03 Bernard add hook support
* 2007-01-28 Bernard rename RT_OBJECT_Class_Static to RT_Object_Class_Static
* 2010-10-26 yi.qiu add module support in rt_object_allocate and rt_object_free
* 2017-12-10 Bernard Add object_info enum.
* 2018-01-25 Bernard Fix the object find issue when enable MODULE.
* 2022-01-07 Gabriel Moving __on_rt_xxxxx_hook to object.c
*/
#include <rtthread.h>
#include <rthw.h>
#ifdef RT_USING_MODULE
#include <dlmodule.h>
#endif /* RT_USING_MODULE */
#ifdef RT_USING_SMART
#include <lwp.h>
#endif
struct rt_custom_object
{
struct rt_object parent;
rt_err_t (*destroy)(void *);
void *data;
};
/*
* define object_info for the number of _object_container items.
*/
enum rt_object_info_type
{
RT_Object_Info_Thread = 0, /**< The object is a thread. */
#ifdef RT_USING_SEMAPHORE
RT_Object_Info_Semaphore, /**< The object is a semaphore. */
#endif
#ifdef RT_USING_MUTEX
RT_Object_Info_Mutex, /**< The object is a mutex. */
#endif
#ifdef RT_USING_EVENT
RT_Object_Info_Event, /**< The object is a event. */
#endif
#ifdef RT_USING_MAILBOX
RT_Object_Info_MailBox, /**< The object is a mail box. */
#endif
#ifdef RT_USING_MESSAGEQUEUE
RT_Object_Info_MessageQueue, /**< The object is a message queue. */
#endif
#ifdef RT_USING_MEMHEAP
RT_Object_Info_MemHeap, /**< The object is a memory heap */
#endif
#ifdef RT_USING_MEMPOOL
RT_Object_Info_MemPool, /**< The object is a memory pool. */
#endif
#ifdef RT_USING_DEVICE
RT_Object_Info_Device, /**< The object is a device */
#endif
RT_Object_Info_Timer, /**< The object is a timer. */
#ifdef RT_USING_MODULE
RT_Object_Info_Module, /**< The object is a module. */
#endif
#ifdef RT_USING_HEAP
RT_Object_Info_Memory, /**< The object is a memory. */
#endif
#ifdef RT_USING_SMART
RT_Object_Info_Channel, /**< The object is a IPC channel */
#endif
#ifdef RT_USING_HEAP
RT_Object_Info_Custom, /**< The object is a custom object */
#endif
RT_Object_Info_Unknown, /**< The object is unknown. */
};
#define _OBJ_CONTAINER_LIST_INIT(c) \
{&(_object_container[c].object_list), &(_object_container[c].object_list)}
static struct rt_object_information _object_container[RT_Object_Info_Unknown] =
{
/* initialize object container - thread */
{RT_Object_Class_Thread, _OBJ_CONTAINER_LIST_INIT(RT_Object_Info_Thread), sizeof(struct rt_thread)},
#ifdef RT_USING_SEMAPHORE
/* initialize object container - semaphore */
{RT_Object_Class_Semaphore, _OBJ_CONTAINER_LIST_INIT(RT_Object_Info_Semaphore), sizeof(struct rt_semaphore)},
#endif
#ifdef RT_USING_MUTEX
/* initialize object container - mutex */
{RT_Object_Class_Mutex, _OBJ_CONTAINER_LIST_INIT(RT_Object_Info_Mutex), sizeof(struct rt_mutex)},
#endif
#ifdef RT_USING_EVENT
/* initialize object container - event */
{RT_Object_Class_Event, _OBJ_CONTAINER_LIST_INIT(RT_Object_Info_Event), sizeof(struct rt_event)},
#endif
#ifdef RT_USING_MAILBOX
/* initialize object container - mailbox */
{RT_Object_Class_MailBox, _OBJ_CONTAINER_LIST_INIT(RT_Object_Info_MailBox), sizeof(struct rt_mailbox)},
#endif
#ifdef RT_USING_MESSAGEQUEUE
/* initialize object container - message queue */
{RT_Object_Class_MessageQueue, _OBJ_CONTAINER_LIST_INIT(RT_Object_Info_MessageQueue), sizeof(struct rt_messagequeue)},
#endif
#ifdef RT_USING_MEMHEAP
/* initialize object container - memory heap */
{RT_Object_Class_MemHeap, _OBJ_CONTAINER_LIST_INIT(RT_Object_Info_MemHeap), sizeof(struct rt_memheap)},
#endif
#ifdef RT_USING_MEMPOOL
/* initialize object container - memory pool */
{RT_Object_Class_MemPool, _OBJ_CONTAINER_LIST_INIT(RT_Object_Info_MemPool), sizeof(struct rt_mempool)},
#endif
#ifdef RT_USING_DEVICE
/* initialize object container - device */
{RT_Object_Class_Device, _OBJ_CONTAINER_LIST_INIT(RT_Object_Info_Device), sizeof(struct rt_device)},
#endif
/* initialize object container - timer */
{RT_Object_Class_Timer, _OBJ_CONTAINER_LIST_INIT(RT_Object_Info_Timer), sizeof(struct rt_timer)},
#ifdef RT_USING_MODULE
/* initialize object container - module */
{RT_Object_Class_Module, _OBJ_CONTAINER_LIST_INIT(RT_Object_Info_Module), sizeof(struct rt_dlmodule)},
#endif
#ifdef RT_USING_HEAP
/* initialize object container - small memory */
{RT_Object_Class_Memory, _OBJ_CONTAINER_LIST_INIT(RT_Object_Info_Memory), sizeof(struct rt_memory)},
#endif
#ifdef RT_USING_SMART
/* initialize object container - module */
{RT_Object_Class_Channel, _OBJ_CONTAINER_LIST_INIT(RT_Object_Info_Channel), sizeof(struct rt_channel)},
{RT_Object_Class_Custom, _OBJ_CONTAINER_LIST_INIT(RT_Object_Info_Custom), sizeof(struct rt_custom_object)},
#endif
};
#ifndef __on_rt_object_attach_hook
#define __on_rt_object_attach_hook(obj) __ON_HOOK_ARGS(rt_object_attach_hook, (obj))
#endif
#ifndef __on_rt_object_detach_hook
#define __on_rt_object_detach_hook(obj) __ON_HOOK_ARGS(rt_object_detach_hook, (obj))
#endif
#ifndef __on_rt_object_trytake_hook
#define __on_rt_object_trytake_hook(parent) __ON_HOOK_ARGS(rt_object_trytake_hook, (parent))
#endif
#ifndef __on_rt_object_take_hook
#define __on_rt_object_take_hook(parent) __ON_HOOK_ARGS(rt_object_take_hook, (parent))
#endif
#ifndef __on_rt_object_put_hook
#define __on_rt_object_put_hook(parent) __ON_HOOK_ARGS(rt_object_put_hook, (parent))
#endif
#if defined(RT_USING_HOOK) && defined(RT_HOOK_USING_FUNC_PTR)
static void (*rt_object_attach_hook)(struct rt_object *object);
static void (*rt_object_detach_hook)(struct rt_object *object);
void (*rt_object_trytake_hook)(struct rt_object *object);
void (*rt_object_take_hook)(struct rt_object *object);
void (*rt_object_put_hook)(struct rt_object *object);
/**
* @addtogroup Hook
*/
/**@{*/
/**
* @brief This function will set a hook function, which will be invoked when object
* attaches to kernel object system.
*
* @param hook is the hook function.
*/
void rt_object_attach_sethook(void (*hook)(struct rt_object *object))
{
rt_object_attach_hook = hook;
}
/**
* @brief This function will set a hook function, which will be invoked when object
* detaches from kernel object system.
*
* @param hook is the hook function
*/
void rt_object_detach_sethook(void (*hook)(struct rt_object *object))
{
rt_object_detach_hook = hook;
}
/**
* @brief This function will set a hook function, which will be invoked when object
* is taken from kernel object system.
*
* The object is taken means:
* semaphore - semaphore is taken by thread
* mutex - mutex is taken by thread
* event - event is received by thread
* mailbox - mail is received by thread
* message queue - message is received by thread
*
* @param hook is the hook function.
*/
void rt_object_trytake_sethook(void (*hook)(struct rt_object *object))
{
rt_object_trytake_hook = hook;
}
/**
* @brief This function will set a hook function, which will be invoked when object
* have been taken from kernel object system.
*
* The object have been taken means:
* semaphore - semaphore have been taken by thread
* mutex - mutex have been taken by thread
* event - event have been received by thread
* mailbox - mail have been received by thread
* message queue - message have been received by thread
* timer - timer is started
*
* @param hook the hook function.
*/
void rt_object_take_sethook(void (*hook)(struct rt_object *object))
{
rt_object_take_hook = hook;
}
/**
* @brief This function will set a hook function, which will be invoked when object
* is put to kernel object system.
*
* @param hook is the hook function
*/
void rt_object_put_sethook(void (*hook)(struct rt_object *object))
{
rt_object_put_hook = hook;
}
/**@}*/
#endif /* RT_USING_HOOK */
/**
* @addtogroup KernelObject
*/
/**@{*/
/**
* @brief This function will return the specified type of object information.
*
* @param type is the type of object, which can be
* RT_Object_Class_Thread/Semaphore/Mutex... etc
*
* @return the object type information or RT_NULL
*/
struct rt_object_information *
rt_object_get_information(enum rt_object_class_type type)
{
int index;
for (index = 0; index < RT_Object_Info_Unknown; index ++)
if (_object_container[index].type == type) return &_object_container[index];
return RT_NULL;
}
RTM_EXPORT(rt_object_get_information);
/**
* @brief This function will return the length of object list in object container.
*
* @param type is the type of object, which can be
* RT_Object_Class_Thread/Semaphore/Mutex... etc
*
* @return the length of object list
*/
int rt_object_get_length(enum rt_object_class_type type)
{
int count = 0;
rt_base_t level;
struct rt_list_node *node = RT_NULL;
struct rt_object_information *information = RT_NULL;
information = rt_object_get_information((enum rt_object_class_type)type);
if (information == RT_NULL) return 0;
level = rt_hw_interrupt_disable();
/* get the count of objects */
rt_list_for_each(node, &(information->object_list))
{
count ++;
}
rt_hw_interrupt_enable(level);
return count;
}
RTM_EXPORT(rt_object_get_length);
/**
* @brief This function will copy the object pointer of the specified type,
* with the maximum size specified by maxlen.
*
* @param type is the type of object, which can be
* RT_Object_Class_Thread/Semaphore/Mutex... etc
*
* @param pointers is the pointer will be saved to.
*
* @param maxlen is the maximum number of pointers can be saved.
*
* @return the copied number of object pointers.
*/
int rt_object_get_pointers(enum rt_object_class_type type, rt_object_t *pointers, int maxlen)
{
int index = 0;
rt_base_t level;
struct rt_object *object;
struct rt_list_node *node = RT_NULL;
struct rt_object_information *information = RT_NULL;
if (maxlen <= 0) return 0;
information = rt_object_get_information((enum rt_object_class_type)type);
if (information == RT_NULL) return 0;
level = rt_hw_interrupt_disable();
/* retrieve pointer of object */
rt_list_for_each(node, &(information->object_list))
{
object = rt_list_entry(node, struct rt_object, list);
pointers[index] = object;
index ++;
if (index >= maxlen) break;
}
rt_hw_interrupt_enable(level);
return index;
}
RTM_EXPORT(rt_object_get_pointers);
/**
* @brief This function will initialize an object and add it to object system
* management.
*
* @param object is the specified object to be initialized.
*
* @param type is the object type.
*
* @param name is the object name. In system, the object's name must be unique.
*/
void rt_object_init(struct rt_object *object,
enum rt_object_class_type type,
const char *name)
{
rt_base_t level;
#ifdef RT_DEBUG
struct rt_list_node *node = RT_NULL;
#endif
struct rt_object_information *information;
#ifdef RT_USING_MODULE
struct rt_dlmodule *module = dlmodule_self();
#endif /* RT_USING_MODULE */
/* get object information */
information = rt_object_get_information(type);
RT_ASSERT(information != RT_NULL);
#ifdef RT_DEBUG
/* check object type to avoid re-initialization */
/* enter critical */
rt_enter_critical();
/* try to find object */
for (node = information->object_list.next;
node != &(information->object_list);
node = node->next)
{
struct rt_object *obj;
obj = rt_list_entry(node, struct rt_object, list);
RT_ASSERT(obj != object);
}
/* leave critical */
rt_exit_critical();
#endif
/* initialize object's parameters */
/* set object type to static */
object->type = type | RT_Object_Class_Static;
#if RT_NAME_MAX > 0
rt_strncpy(object->name, name, RT_NAME_MAX); /* copy name */
#else
object->name = name;
#endif /* RT_NAME_MAX > 0 */
RT_OBJECT_HOOK_CALL(rt_object_attach_hook, (object));
/* lock interrupt */
level = rt_hw_interrupt_disable();
#ifdef RT_USING_MODULE
if (module)
{
rt_list_insert_after(&(module->object_list), &(object->list));
object->module_id = (void *)module;
}
else
#endif /* RT_USING_MODULE */
{
/* insert object into information object list */
rt_list_insert_after(&(information->object_list), &(object->list));
}
/* unlock interrupt */
rt_hw_interrupt_enable(level);
}
/**
* @brief This function will detach a static object from object system,
* and the memory of static object is not freed.
*
* @param object the specified object to be detached.
*/
void rt_object_detach(rt_object_t object)
{
rt_base_t level;
/* object check */
RT_ASSERT(object != RT_NULL);
RT_OBJECT_HOOK_CALL(rt_object_detach_hook, (object));
/* reset object type */
object->type = 0;
/* lock interrupt */
level = rt_hw_interrupt_disable();
/* remove from old list */
rt_list_remove(&(object->list));
/* unlock interrupt */
rt_hw_interrupt_enable(level);
}
#ifdef RT_USING_HEAP
/**
* @brief This function will allocate an object from object system.
*
* @param type is the type of object.
*
* @param name is the object name. In system, the object's name must be unique.
*
* @return object
*/
rt_object_t rt_object_allocate(enum rt_object_class_type type, const char *name)
{
struct rt_object *object;
rt_base_t level;
struct rt_object_information *information;
#ifdef RT_USING_MODULE
struct rt_dlmodule *module = dlmodule_self();
#endif /* RT_USING_MODULE */
RT_DEBUG_NOT_IN_INTERRUPT;
/* get object information */
information = rt_object_get_information(type);
RT_ASSERT(information != RT_NULL);
object = (struct rt_object *)RT_KERNEL_MALLOC(information->object_size);
if (object == RT_NULL)
{
/* no memory can be allocated */
return RT_NULL;
}
/* clean memory data of object */
rt_memset(object, 0x0, information->object_size);
/* initialize object's parameters */
/* set object type */
object->type = type;
/* set object flag */
object->flag = 0;
#if RT_NAME_MAX > 0
rt_strncpy(object->name, name, RT_NAME_MAX); /* copy name */
#else
object->name = name;
#endif /* RT_NAME_MAX > 0 */
RT_OBJECT_HOOK_CALL(rt_object_attach_hook, (object));
/* lock interrupt */
level = rt_hw_interrupt_disable();
#ifdef RT_USING_MODULE
if (module)
{
rt_list_insert_after(&(module->object_list), &(object->list));
object->module_id = (void *)module;
}
else
#endif /* RT_USING_MODULE */
{
/* insert object into information object list */
rt_list_insert_after(&(information->object_list), &(object->list));
}
/* unlock interrupt */
rt_hw_interrupt_enable(level);
/* return object */
return object;
}
/**
* @brief This function will delete an object and release object memory.
*
* @param object is the specified object to be deleted.
*/
void rt_object_delete(rt_object_t object)
{
rt_base_t level;
/* object check */
RT_ASSERT(object != RT_NULL);
RT_ASSERT(!(object->type & RT_Object_Class_Static));
RT_OBJECT_HOOK_CALL(rt_object_detach_hook, (object));
/* reset object type */
object->type = RT_Object_Class_Null;
/* lock interrupt */
level = rt_hw_interrupt_disable();
/* remove from old list */
rt_list_remove(&(object->list));
/* unlock interrupt */
rt_hw_interrupt_enable(level);
/* free the memory of object */
RT_KERNEL_FREE(object);
}
#endif /* RT_USING_HEAP */
/**
* @brief This function will judge the object is system object or not.
*
* @note Normally, the system object is a static object and the type
* of object set to RT_Object_Class_Static.
*
* @param object is the specified object to be judged.
*
* @return RT_TRUE if a system object, RT_FALSE for others.
*/
rt_bool_t rt_object_is_systemobject(rt_object_t object)
{
/* object check */
RT_ASSERT(object != RT_NULL);
if (object->type & RT_Object_Class_Static)
return RT_TRUE;
return RT_FALSE;
}
/**
* @brief This function will return the type of object without
* RT_Object_Class_Static flag.
*
* @param object is the specified object to be get type.
*
* @return the type of object.
*/
rt_uint8_t rt_object_get_type(rt_object_t object)
{
/* object check */
RT_ASSERT(object != RT_NULL);
return object->type & ~RT_Object_Class_Static;
}
/**
* @brief This function will find specified name object from object
* container.
*
* @param name is the specified name of object.
*
* @param type is the type of object
*
* @return the found object or RT_NULL if there is no this object
* in object container.
*
* @note this function shall not be invoked in interrupt status.
*/
rt_object_t rt_object_find(const char *name, rt_uint8_t type)
{
struct rt_object *object = RT_NULL;
struct rt_list_node *node = RT_NULL;
struct rt_object_information *information = RT_NULL;
information = rt_object_get_information((enum rt_object_class_type)type);
/* parameter check */
if ((name == RT_NULL) || (information == RT_NULL)) return RT_NULL;
/* which is invoke in interrupt status */
RT_DEBUG_NOT_IN_INTERRUPT;
/* enter critical */
rt_enter_critical();
/* try to find object */
rt_list_for_each(node, &(information->object_list))
{
object = rt_list_entry(node, struct rt_object, list);
if (rt_strncmp(object->name, name, RT_NAME_MAX) == 0)
{
/* leave critical */
rt_exit_critical();
return object;
}
}
/* leave critical */
rt_exit_critical();
return RT_NULL;
}
/**
* @brief This function will return the name of the specified object container
*
* @param object the specified object to be get name
* @param name buffer to store the object name string
* @param name_size maximum size of the buffer to store object name
*
* @return -RT_EINVAL if any parameter is invalid or RT_EOK if the operation is successfully executed
*
* @note this function shall not be invoked in interrupt status
*/
rt_err_t rt_object_get_name(rt_object_t object, char *name, rt_uint8_t name_size)
{
rt_err_t result = -RT_EINVAL;
if ((object != RT_NULL) && (name != RT_NULL) && (name_size != 0U))
{
const char *obj_name = object->name;
(void) rt_strncpy(name, obj_name, (rt_size_t)name_size);
result = RT_EOK;
}
return result;
}
#ifdef RT_USING_HEAP
/**
* This function will create a custom object
* container.
*
* @param name the specified name of object.
* @param data the custom data
* @param data_destroy the custom object destroy callback
*
* @return the found object or RT_NULL if there is no this object
* in object container.
*
* @note this function shall not be invoked in interrupt status.
*/
rt_object_t rt_custom_object_create(const char *name, void *data, rt_err_t (*data_destroy)(void *))
{
struct rt_custom_object *cobj = RT_NULL;
cobj = (struct rt_custom_object *)rt_object_allocate(RT_Object_Class_Custom, name);
if (!cobj)
{
return RT_NULL;
}
cobj->destroy = data_destroy;
cobj->data = data;
return (struct rt_object *)cobj;
}
/**
* This function will destroy a custom object
* container.
*
* @param obj the specified name of object.
*
* @note this function shall not be invoked in interrupt status.
*/
rt_err_t rt_custom_object_destroy(rt_object_t obj)
{
rt_err_t ret = -1;
struct rt_custom_object *cobj = (struct rt_custom_object *)obj;
if (obj && obj->type == RT_Object_Class_Custom)
{
if (cobj->destroy)
{
ret = cobj->destroy(cobj->data);
}
rt_object_delete(obj);
}
return ret;
}
#endif
/**@}*/

758
src/scheduler_mp.c Normal file
View file

@ -0,0 +1,758 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2006-03-17 Bernard the first version
* 2006-04-28 Bernard fix the scheduler algorthm
* 2006-04-30 Bernard add SCHEDULER_DEBUG
* 2006-05-27 Bernard fix the scheduler algorthm for same priority
* thread schedule
* 2006-06-04 Bernard rewrite the scheduler algorithm
* 2006-08-03 Bernard add hook support
* 2006-09-05 Bernard add 32 priority level support
* 2006-09-24 Bernard add rt_system_scheduler_start function
* 2009-09-16 Bernard fix _rt_scheduler_stack_check
* 2010-04-11 yi.qiu add module feature
* 2010-07-13 Bernard fix the maximal number of rt_scheduler_lock_nest
* issue found by kuronca
* 2010-12-13 Bernard add defunct list initialization even if not use heap.
* 2011-05-10 Bernard clean scheduler debug log.
* 2013-12-21 Grissiom add rt_critical_level
* 2018-11-22 Jesven remove the current task from ready queue
* add per cpu ready queue
* add _scheduler_get_highest_priority_thread to find highest priority task
* rt_schedule_insert_thread won't insert current task to ready queue
* in smp version, rt_hw_context_switch_interrupt maybe switch to
* new task directly
* 2022-01-07 Gabriel Moving __on_rt_xxxxx_hook to scheduler.c
* 2023-03-27 rose_man Split into scheduler upc and scheduler_mp.c
*/
#include <rtthread.h>
#include <rthw.h>
rt_list_t rt_thread_priority_table[RT_THREAD_PRIORITY_MAX];
rt_uint32_t rt_thread_ready_priority_group;
#if RT_THREAD_PRIORITY_MAX > 32
/* Maximum priority level, 256 */
rt_uint8_t rt_thread_ready_table[32];
#endif /* RT_THREAD_PRIORITY_MAX > 32 */
#ifndef __on_rt_scheduler_hook
#define __on_rt_scheduler_hook(from, to) __ON_HOOK_ARGS(rt_scheduler_hook, (from, to))
#endif
#ifndef __on_rt_scheduler_switch_hook
#define __on_rt_scheduler_switch_hook(tid) __ON_HOOK_ARGS(rt_scheduler_switch_hook, (tid))
#endif
#if defined(RT_USING_HOOK) && defined(RT_HOOK_USING_FUNC_PTR)
static void (*rt_scheduler_hook)(struct rt_thread *from, struct rt_thread *to);
static void (*rt_scheduler_switch_hook)(struct rt_thread *tid);
/**
* @addtogroup Hook
*/
/**@{*/
/**
* @brief This function will set a hook function, which will be invoked when thread
* switch happens.
*
* @param hook is the hook function.
*/
void rt_scheduler_sethook(void (*hook)(struct rt_thread *from, struct rt_thread *to))
{
rt_scheduler_hook = hook;
}
/**
* @brief This function will set a hook function, which will be invoked when context
* switch happens.
*
* @param hook is the hook function.
*/
void rt_scheduler_switch_sethook(void (*hook)(struct rt_thread *tid))
{
rt_scheduler_switch_hook = hook;
}
/**@}*/
#endif /* RT_USING_HOOK */
#ifdef RT_USING_OVERFLOW_CHECK
static void _scheduler_stack_check(struct rt_thread *thread)
{
RT_ASSERT(thread != RT_NULL);
#ifdef RT_USING_SMART
#ifndef ARCH_MM_MMU
struct rt_lwp *lwp = thread ? (struct rt_lwp *)thread->lwp : 0;
/* if stack pointer locate in user data section skip stack check. */
if (lwp && ((rt_uint32_t)thread->sp > (rt_uint32_t)lwp->data_entry &&
(rt_uint32_t)thread->sp <= (rt_uint32_t)lwp->data_entry + (rt_uint32_t)lwp->data_size))
{
return;
}
#endif /* not defined ARCH_MM_MMU */
#endif /* RT_USING_SMART */
#ifdef ARCH_CPU_STACK_GROWS_UPWARD
if (*((rt_uint8_t *)((rt_ubase_t)thread->stack_addr + thread->stack_size - 1)) != '#' ||
#else
if (*((rt_uint8_t *)thread->stack_addr) != '#' ||
#endif /* ARCH_CPU_STACK_GROWS_UPWARD */
(rt_ubase_t)thread->sp <= (rt_ubase_t)thread->stack_addr ||
(rt_ubase_t)thread->sp >
(rt_ubase_t)thread->stack_addr + (rt_ubase_t)thread->stack_size)
{
rt_base_t level;
rt_kprintf("thread:%s stack overflow\n", thread->parent.name);
level = rt_hw_interrupt_disable();
while (level);
}
#ifdef ARCH_CPU_STACK_GROWS_UPWARD
else if ((rt_ubase_t)thread->sp > ((rt_ubase_t)thread->stack_addr + thread->stack_size))
{
rt_kprintf("warning: %s stack is close to the top of stack address.\n",
thread->parent.name);
}
#else
else if ((rt_ubase_t)thread->sp <= ((rt_ubase_t)thread->stack_addr + 32))
{
rt_kprintf("warning: %s stack is close to end of stack address.\n",
thread->parent.name);
}
#endif /* ARCH_CPU_STACK_GROWS_UPWARD */
}
#endif /* RT_USING_OVERFLOW_CHECK */
/*
* get the highest priority thread in ready queue
*/
static struct rt_thread* _scheduler_get_highest_priority_thread(rt_ubase_t *highest_prio)
{
struct rt_thread *highest_priority_thread;
rt_ubase_t highest_ready_priority, local_highest_ready_priority;
struct rt_cpu* pcpu = rt_cpu_self();
#if RT_THREAD_PRIORITY_MAX > 32
rt_ubase_t number;
number = __rt_ffs(rt_thread_ready_priority_group) - 1;
highest_ready_priority = (number << 3) + __rt_ffs(rt_thread_ready_table[number]) - 1;
number = __rt_ffs(pcpu->priority_group) - 1;
local_highest_ready_priority = (number << 3) + __rt_ffs(pcpu->ready_table[number]) - 1;
#else
highest_ready_priority = __rt_ffs(rt_thread_ready_priority_group) - 1;
local_highest_ready_priority = __rt_ffs(pcpu->priority_group) - 1;
#endif /* RT_THREAD_PRIORITY_MAX > 32 */
/* get highest ready priority thread */
if (highest_ready_priority < local_highest_ready_priority)
{
*highest_prio = highest_ready_priority;
highest_priority_thread = rt_list_entry(rt_thread_priority_table[highest_ready_priority].next,
struct rt_thread,
tlist);
}
else
{
*highest_prio = local_highest_ready_priority;
highest_priority_thread = rt_list_entry(pcpu->priority_table[local_highest_ready_priority].next,
struct rt_thread,
tlist);
}
return highest_priority_thread;
}
/**
* @brief This function will initialize the system scheduler.
*/
void rt_system_scheduler_init(void)
{
int cpu;
rt_base_t offset;
RT_DEBUG_LOG(RT_DEBUG_SCHEDULER, ("start scheduler: max priority 0x%02x\n",
RT_THREAD_PRIORITY_MAX));
for (offset = 0; offset < RT_THREAD_PRIORITY_MAX; offset ++)
{
rt_list_init(&rt_thread_priority_table[offset]);
}
for (cpu = 0; cpu < RT_CPUS_NR; cpu++)
{
struct rt_cpu *pcpu = rt_cpu_index(cpu);
for (offset = 0; offset < RT_THREAD_PRIORITY_MAX; offset ++)
{
rt_list_init(&pcpu->priority_table[offset]);
}
pcpu->irq_switch_flag = 0;
pcpu->current_priority = RT_THREAD_PRIORITY_MAX - 1;
pcpu->current_thread = RT_NULL;
pcpu->priority_group = 0;
#if RT_THREAD_PRIORITY_MAX > 32
rt_memset(pcpu->ready_table, 0, sizeof(pcpu->ready_table));
#endif /* RT_THREAD_PRIORITY_MAX > 32 */
}
/* initialize ready priority group */
rt_thread_ready_priority_group = 0;
#if RT_THREAD_PRIORITY_MAX > 32
/* initialize ready table */
rt_memset(rt_thread_ready_table, 0, sizeof(rt_thread_ready_table));
#endif /* RT_THREAD_PRIORITY_MAX > 32 */
}
/**
* @brief This function will startup the scheduler. It will select one thread
* with the highest priority level, then switch to it.
*/
void rt_system_scheduler_start(void)
{
struct rt_thread *to_thread;
rt_ubase_t highest_ready_priority;
to_thread = _scheduler_get_highest_priority_thread(&highest_ready_priority);
to_thread->oncpu = rt_hw_cpu_id();
rt_schedule_remove_thread(to_thread);
to_thread->stat = RT_THREAD_RUNNING;
/* switch to new thread */
rt_hw_context_switch_to((rt_ubase_t)&to_thread->sp, to_thread);
/* never come back */
}
/**
* @addtogroup Thread
* @cond
*/
/**@{*/
/**
* @brief This function will handle IPI interrupt and do a scheduling in system.
*
* @param vector is the number of IPI interrupt for system scheduling.
*
* @param param is not used, and can be set to RT_NULL.
*
* @note this function should be invoke or register as ISR in BSP.
*/
void rt_scheduler_ipi_handler(int vector, void *param)
{
rt_schedule();
}
/**
* @brief This function will perform one scheduling. It will select one thread
* with the highest priority level in global ready queue or local ready queue,
* then switch to it.
*/
void rt_schedule(void)
{
rt_base_t level;
struct rt_thread *to_thread;
struct rt_thread *current_thread;
struct rt_cpu *pcpu;
int cpu_id;
/* disable interrupt */
level = rt_hw_interrupt_disable();
cpu_id = rt_hw_cpu_id();
pcpu = rt_cpu_index(cpu_id);
current_thread = pcpu->current_thread;
/* whether do switch in interrupt */
if (pcpu->irq_nest)
{
pcpu->irq_switch_flag = 1;
rt_hw_interrupt_enable(level);
goto __exit;
}
#ifdef RT_USING_SIGNALS
if ((current_thread->stat & RT_THREAD_SUSPEND_MASK) == RT_THREAD_SUSPEND_MASK)
{
/* if current_thread signal is in pending */
if ((current_thread->stat & RT_THREAD_STAT_SIGNAL_MASK) & RT_THREAD_STAT_SIGNAL_PENDING)
{
#ifdef RT_USING_SMART
rt_thread_wakeup(current_thread);
#else
rt_thread_resume(current_thread);
#endif
}
}
#endif /* RT_USING_SIGNALS */
if (current_thread->scheduler_lock_nest == 1) /* whether lock scheduler */
{
rt_ubase_t highest_ready_priority;
if (rt_thread_ready_priority_group != 0 || pcpu->priority_group != 0)
{
to_thread = _scheduler_get_highest_priority_thread(&highest_ready_priority);
current_thread->oncpu = RT_CPU_DETACHED;
if ((current_thread->stat & RT_THREAD_STAT_MASK) == RT_THREAD_RUNNING)
{
if (current_thread->bind_cpu == RT_CPUS_NR || current_thread->bind_cpu == cpu_id)
{
if (current_thread->current_priority < highest_ready_priority)
{
to_thread = current_thread;
}
else if (current_thread->current_priority == highest_ready_priority && (current_thread->stat & RT_THREAD_STAT_YIELD_MASK) == 0)
{
to_thread = current_thread;
}
else
{
rt_schedule_insert_thread(current_thread);
}
}
else
{
rt_schedule_insert_thread(current_thread);
}
current_thread->stat &= ~RT_THREAD_STAT_YIELD_MASK;
}
to_thread->oncpu = cpu_id;
if (to_thread != current_thread)
{
/* if the destination thread is not the same as current thread */
pcpu->current_priority = (rt_uint8_t)highest_ready_priority;
RT_OBJECT_HOOK_CALL(rt_scheduler_hook, (current_thread, to_thread));
rt_schedule_remove_thread(to_thread);
to_thread->stat = RT_THREAD_RUNNING | (to_thread->stat & ~RT_THREAD_STAT_MASK);
/* switch to new thread */
RT_DEBUG_LOG(RT_DEBUG_SCHEDULER,
("[%d]switch to priority#%d "
"thread:%.*s(sp:0x%08x), "
"from thread:%.*s(sp: 0x%08x)\n",
pcpu->irq_nest, highest_ready_priority,
RT_NAME_MAX, to_thread->parent.name, to_thread->sp,
RT_NAME_MAX, current_thread->parent.name, current_thread->sp));
#ifdef RT_USING_OVERFLOW_CHECK
_scheduler_stack_check(to_thread);
#endif /* RT_USING_OVERFLOW_CHECK */
RT_OBJECT_HOOK_CALL(rt_scheduler_switch_hook, (current_thread));
rt_hw_context_switch((rt_ubase_t)&current_thread->sp,
(rt_ubase_t)&to_thread->sp, to_thread);
}
}
}
/* enable interrupt */
rt_hw_interrupt_enable(level);
#ifdef RT_USING_SIGNALS
/* check stat of thread for signal */
level = rt_hw_interrupt_disable();
if (current_thread->stat & RT_THREAD_STAT_SIGNAL_PENDING)
{
extern void rt_thread_handle_sig(rt_bool_t clean_state);
current_thread->stat &= ~RT_THREAD_STAT_SIGNAL_PENDING;
rt_hw_interrupt_enable(level);
/* check signal status */
rt_thread_handle_sig(RT_TRUE);
}
else
{
rt_hw_interrupt_enable(level);
}
#endif /* RT_USING_SIGNALS */
__exit:
return ;
}
/**
* @brief This function checks whether a scheduling is needed after an IRQ context switching. If yes,
* it will select one thread with the highest priority level, and then switch
* to it.
*/
void rt_scheduler_do_irq_switch(void *context)
{
int cpu_id;
rt_base_t level;
struct rt_cpu* pcpu;
struct rt_thread *to_thread;
struct rt_thread *current_thread;
level = rt_hw_interrupt_disable();
cpu_id = rt_hw_cpu_id();
pcpu = rt_cpu_index(cpu_id);
current_thread = pcpu->current_thread;
#ifdef RT_USING_SIGNALS
if ((current_thread->stat & RT_THREAD_SUSPEND_MASK) == RT_THREAD_SUSPEND_MASK)
{
/* if current_thread signal is in pending */
if ((current_thread->stat & RT_THREAD_STAT_SIGNAL_MASK) & RT_THREAD_STAT_SIGNAL_PENDING)
{
#ifdef RT_USING_SMART
rt_thread_wakeup(current_thread);
#else
rt_thread_resume(current_thread);
#endif
}
}
#endif /* RT_USING_SIGNALS */
if (pcpu->irq_switch_flag == 0)
{
rt_hw_interrupt_enable(level);
return;
}
if (current_thread->scheduler_lock_nest == 1 && pcpu->irq_nest == 0)
{
rt_ubase_t highest_ready_priority;
/* clear irq switch flag */
pcpu->irq_switch_flag = 0;
if (rt_thread_ready_priority_group != 0 || pcpu->priority_group != 0)
{
to_thread = _scheduler_get_highest_priority_thread(&highest_ready_priority);
current_thread->oncpu = RT_CPU_DETACHED;
if ((current_thread->stat & RT_THREAD_STAT_MASK) == RT_THREAD_RUNNING)
{
if (current_thread->bind_cpu == RT_CPUS_NR || current_thread->bind_cpu == cpu_id)
{
if (current_thread->current_priority < highest_ready_priority)
{
to_thread = current_thread;
}
else if (current_thread->current_priority == highest_ready_priority && (current_thread->stat & RT_THREAD_STAT_YIELD_MASK) == 0)
{
to_thread = current_thread;
}
else
{
rt_schedule_insert_thread(current_thread);
}
}
else
{
rt_schedule_insert_thread(current_thread);
}
current_thread->stat &= ~RT_THREAD_STAT_YIELD_MASK;
}
to_thread->oncpu = cpu_id;
if (to_thread != current_thread)
{
/* if the destination thread is not the same as current thread */
pcpu->current_priority = (rt_uint8_t)highest_ready_priority;
RT_OBJECT_HOOK_CALL(rt_scheduler_hook, (current_thread, to_thread));
rt_schedule_remove_thread(to_thread);
to_thread->stat = RT_THREAD_RUNNING | (to_thread->stat & ~RT_THREAD_STAT_MASK);
#ifdef RT_USING_OVERFLOW_CHECK
_scheduler_stack_check(to_thread);
#endif /* RT_USING_OVERFLOW_CHECK */
RT_DEBUG_LOG(RT_DEBUG_SCHEDULER, ("switch in interrupt\n"));
RT_ASSERT(current_thread->cpus_lock_nest > 0);
current_thread->cpus_lock_nest--;
current_thread->scheduler_lock_nest--;
RT_OBJECT_HOOK_CALL(rt_scheduler_switch_hook, (current_thread));
rt_hw_context_switch_interrupt(context, (rt_ubase_t)&current_thread->sp,
(rt_ubase_t)&to_thread->sp, to_thread);
}
}
}
rt_hw_interrupt_enable(level);
}
/**
* @brief This function will insert a thread to the system ready queue. The state of
* thread will be set as READY and the thread will be removed from suspend queue.
*
* @param thread is the thread to be inserted.
*
* @note Please do not invoke this function in user application.
*/
void rt_schedule_insert_thread(struct rt_thread *thread)
{
int cpu_id;
int bind_cpu;
rt_uint32_t cpu_mask;
rt_base_t level;
RT_ASSERT(thread != RT_NULL);
/* disable interrupt */
level = rt_hw_interrupt_disable();
/* it should be RUNNING thread */
if (thread->oncpu != RT_CPU_DETACHED)
{
thread->stat = RT_THREAD_RUNNING | (thread->stat & ~RT_THREAD_STAT_MASK);
goto __exit;
}
/* READY thread, insert to ready queue */
thread->stat = RT_THREAD_READY | (thread->stat & ~RT_THREAD_STAT_MASK);
cpu_id = rt_hw_cpu_id();
bind_cpu = thread->bind_cpu ;
/* insert thread to ready list */
if (bind_cpu == RT_CPUS_NR)
{
#if RT_THREAD_PRIORITY_MAX > 32
rt_thread_ready_table[thread->number] |= thread->high_mask;
#endif /* RT_THREAD_PRIORITY_MAX > 32 */
rt_thread_ready_priority_group |= thread->number_mask;
/* there is no time slices left(YIELD), inserting thread before ready list*/
if((thread->stat & RT_THREAD_STAT_YIELD_MASK) != 0)
{
rt_list_insert_before(&(rt_thread_priority_table[thread->current_priority]),
&(thread->tlist));
}
/* there are some time slices left, inserting thread after ready list to schedule it firstly at next time*/
else
{
rt_list_insert_after(&(rt_thread_priority_table[thread->current_priority]),
&(thread->tlist));
}
cpu_mask = RT_CPU_MASK ^ (1 << cpu_id);
rt_hw_ipi_send(RT_SCHEDULE_IPI, cpu_mask);
}
else
{
struct rt_cpu *pcpu = rt_cpu_index(bind_cpu);
#if RT_THREAD_PRIORITY_MAX > 32
pcpu->ready_table[thread->number] |= thread->high_mask;
#endif /* RT_THREAD_PRIORITY_MAX > 32 */
pcpu->priority_group |= thread->number_mask;
/* there is no time slices left(YIELD), inserting thread before ready list*/
if((thread->stat & RT_THREAD_STAT_YIELD_MASK) != 0)
{
rt_list_insert_before(&(rt_cpu_index(bind_cpu)->priority_table[thread->current_priority]),
&(thread->tlist));
}
/* there are some time slices left, inserting thread after ready list to schedule it firstly at next time*/
else
{
rt_list_insert_after(&(rt_cpu_index(bind_cpu)->priority_table[thread->current_priority]),
&(thread->tlist));
}
if (cpu_id != bind_cpu)
{
cpu_mask = 1 << bind_cpu;
rt_hw_ipi_send(RT_SCHEDULE_IPI, cpu_mask);
}
}
RT_DEBUG_LOG(RT_DEBUG_SCHEDULER, ("insert thread[%.*s], the priority: %d\n",
RT_NAME_MAX, thread->parent.name, thread->current_priority));
__exit:
/* enable interrupt */
rt_hw_interrupt_enable(level);
}
/**
* @brief This function will remove a thread from system ready queue.
*
* @param thread is the thread to be removed.
*
* @note Please do not invoke this function in user application.
*/
void rt_schedule_remove_thread(struct rt_thread *thread)
{
rt_base_t level;
RT_ASSERT(thread != RT_NULL);
/* disable interrupt */
level = rt_hw_interrupt_disable();
RT_DEBUG_LOG(RT_DEBUG_SCHEDULER, ("remove thread[%.*s], the priority: %d\n",
RT_NAME_MAX, thread->parent.name,
thread->current_priority));
/* remove thread from ready list */
rt_list_remove(&(thread->tlist));
if (thread->bind_cpu == RT_CPUS_NR)
{
if (rt_list_isempty(&(rt_thread_priority_table[thread->current_priority])))
{
#if RT_THREAD_PRIORITY_MAX > 32
rt_thread_ready_table[thread->number] &= ~thread->high_mask;
if (rt_thread_ready_table[thread->number] == 0)
{
rt_thread_ready_priority_group &= ~thread->number_mask;
}
#else
rt_thread_ready_priority_group &= ~thread->number_mask;
#endif /* RT_THREAD_PRIORITY_MAX > 32 */
}
}
else
{
struct rt_cpu *pcpu = rt_cpu_index(thread->bind_cpu);
if (rt_list_isempty(&(pcpu->priority_table[thread->current_priority])))
{
#if RT_THREAD_PRIORITY_MAX > 32
pcpu->ready_table[thread->number] &= ~thread->high_mask;
if (pcpu->ready_table[thread->number] == 0)
{
pcpu->priority_group &= ~thread->number_mask;
}
#else
pcpu->priority_group &= ~thread->number_mask;
#endif /* RT_THREAD_PRIORITY_MAX > 32 */
}
}
/* enable interrupt */
rt_hw_interrupt_enable(level);
}
/**
* @brief This function will lock the thread scheduler.
*/
void rt_enter_critical(void)
{
rt_base_t level;
struct rt_thread *current_thread;
/* disable interrupt */
level = rt_hw_local_irq_disable();
current_thread = rt_cpu_self()->current_thread;
if (!current_thread)
{
rt_hw_local_irq_enable(level);
return;
}
/*
* the maximal number of nest is RT_UINT16_MAX, which is big
* enough and does not check here
*/
{
rt_uint16_t lock_nest = current_thread->cpus_lock_nest;
current_thread->cpus_lock_nest++;
RT_ASSERT(current_thread->cpus_lock_nest != 0);
if (lock_nest == 0)
{
current_thread->scheduler_lock_nest ++;
rt_hw_spin_lock(&_cpus_lock);
}
}
/* critical for local cpu */
current_thread->critical_lock_nest ++;
/* lock scheduler for local cpu */
current_thread->scheduler_lock_nest ++;
/* enable interrupt */
rt_hw_local_irq_enable(level);
}
RTM_EXPORT(rt_enter_critical);
/**
* @brief This function will unlock the thread scheduler.
*/
void rt_exit_critical(void)
{
rt_base_t level;
struct rt_thread *current_thread;
/* disable interrupt */
level = rt_hw_local_irq_disable();
current_thread = rt_cpu_self()->current_thread;
if (!current_thread)
{
rt_hw_local_irq_enable(level);
return;
}
current_thread->scheduler_lock_nest --;
current_thread->critical_lock_nest --;
RT_ASSERT(current_thread->cpus_lock_nest > 0);
current_thread->cpus_lock_nest--;
if (current_thread->cpus_lock_nest == 0)
{
current_thread->scheduler_lock_nest --;
rt_hw_spin_unlock(&_cpus_lock);
}
if (current_thread->scheduler_lock_nest <= 0)
{
current_thread->scheduler_lock_nest = 0;
/* enable interrupt */
rt_hw_local_irq_enable(level);
rt_schedule();
}
else
{
/* enable interrupt */
rt_hw_local_irq_enable(level);
}
}
RTM_EXPORT(rt_exit_critical);
/**
* @brief Get the scheduler lock level.
*
* @return the level of the scheduler lock. 0 means unlocked.
*/
rt_uint16_t rt_critical_level(void)
{
struct rt_thread *current_thread = rt_cpu_self()->current_thread;
return current_thread->critical_lock_nest;
}
RTM_EXPORT(rt_critical_level);
/**@}*/
/**@endcond*/

504
src/scheduler_up.c Normal file
View file

@ -0,0 +1,504 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2006-03-17 Bernard the first version
* 2006-04-28 Bernard fix the scheduler algorthm
* 2006-04-30 Bernard add SCHEDULER_DEBUG
* 2006-05-27 Bernard fix the scheduler algorthm for same priority
* thread schedule
* 2006-06-04 Bernard rewrite the scheduler algorithm
* 2006-08-03 Bernard add hook support
* 2006-09-05 Bernard add 32 priority level support
* 2006-09-24 Bernard add rt_system_scheduler_start function
* 2009-09-16 Bernard fix _rt_scheduler_stack_check
* 2010-04-11 yi.qiu add module feature
* 2010-07-13 Bernard fix the maximal number of rt_scheduler_lock_nest
* issue found by kuronca
* 2010-12-13 Bernard add defunct list initialization even if not use heap.
* 2011-05-10 Bernard clean scheduler debug log.
* 2013-12-21 Grissiom add rt_critical_level
* 2018-11-22 Jesven remove the current task from ready queue
* add per cpu ready queue
* add _scheduler_get_highest_priority_thread to find highest priority task
* rt_schedule_insert_thread won't insert current task to ready queue
* in smp version, rt_hw_context_switch_interrupt maybe switch to
* new task directly
* 2022-01-07 Gabriel Moving __on_rt_xxxxx_hook to scheduler.c
* 2023-03-27 rose_man Split into scheduler upc and scheduler_mp.c
*/
#include <rtthread.h>
#include <rthw.h>
rt_list_t rt_thread_priority_table[RT_THREAD_PRIORITY_MAX];
rt_uint32_t rt_thread_ready_priority_group;
#if RT_THREAD_PRIORITY_MAX > 32
/* Maximum priority level, 256 */
rt_uint8_t rt_thread_ready_table[32];
#endif /* RT_THREAD_PRIORITY_MAX > 32 */
extern volatile rt_uint8_t rt_interrupt_nest;
static rt_int16_t rt_scheduler_lock_nest;
struct rt_thread *rt_current_thread = RT_NULL;
rt_uint8_t rt_current_priority;
#ifndef __on_rt_scheduler_hook
#define __on_rt_scheduler_hook(from, to) __ON_HOOK_ARGS(rt_scheduler_hook, (from, to))
#endif
#ifndef __on_rt_scheduler_switch_hook
#define __on_rt_scheduler_switch_hook(tid) __ON_HOOK_ARGS(rt_scheduler_switch_hook, (tid))
#endif
#if defined(RT_USING_HOOK) && defined(RT_HOOK_USING_FUNC_PTR)
static void (*rt_scheduler_hook)(struct rt_thread *from, struct rt_thread *to);
static void (*rt_scheduler_switch_hook)(struct rt_thread *tid);
/**
* @addtogroup Hook
*/
/**@{*/
/**
* @brief This function will set a hook function, which will be invoked when thread
* switch happens.
*
* @param hook is the hook function.
*/
void rt_scheduler_sethook(void (*hook)(struct rt_thread *from, struct rt_thread *to))
{
rt_scheduler_hook = hook;
}
/**
* @brief This function will set a hook function, which will be invoked when context
* switch happens.
*
* @param hook is the hook function.
*/
void rt_scheduler_switch_sethook(void (*hook)(struct rt_thread *tid))
{
rt_scheduler_switch_hook = hook;
}
/**@}*/
#endif /* RT_USING_HOOK */
#ifdef RT_USING_OVERFLOW_CHECK
static void _scheduler_stack_check(struct rt_thread *thread)
{
RT_ASSERT(thread != RT_NULL);
#ifdef RT_USING_SMART
#ifndef ARCH_MM_MMU
struct rt_lwp *lwp = thread ? (struct rt_lwp *)thread->lwp : 0;
/* if stack pointer locate in user data section skip stack check. */
if (lwp && ((rt_uint32_t)thread->sp > (rt_uint32_t)lwp->data_entry &&
(rt_uint32_t)thread->sp <= (rt_uint32_t)lwp->data_entry + (rt_uint32_t)lwp->data_size))
{
return;
}
#endif /* not defined ARCH_MM_MMU */
#endif /* RT_USING_SMART */
#ifdef ARCH_CPU_STACK_GROWS_UPWARD
if (*((rt_uint8_t *)((rt_ubase_t)thread->stack_addr + thread->stack_size - 1)) != '#' ||
#else
if (*((rt_uint8_t *)thread->stack_addr) != '#' ||
#endif /* ARCH_CPU_STACK_GROWS_UPWARD */
(rt_ubase_t)thread->sp <= (rt_ubase_t)thread->stack_addr ||
(rt_ubase_t)thread->sp >
(rt_ubase_t)thread->stack_addr + (rt_ubase_t)thread->stack_size)
{
rt_base_t level;
rt_kprintf("thread:%s stack overflow\n", thread->parent.name);
level = rt_hw_interrupt_disable();
while (level);
}
#ifdef ARCH_CPU_STACK_GROWS_UPWARD
else if ((rt_ubase_t)thread->sp > ((rt_ubase_t)thread->stack_addr + thread->stack_size))
{
rt_kprintf("warning: %s stack is close to the top of stack address.\n",
thread->parent.name);
}
#else
else if ((rt_ubase_t)thread->sp <= ((rt_ubase_t)thread->stack_addr + 32))
{
rt_kprintf("warning: %s stack is close to end of stack address.\n",
thread->parent.name);
}
#endif /* ARCH_CPU_STACK_GROWS_UPWARD */
}
#endif /* RT_USING_OVERFLOW_CHECK */
static struct rt_thread* _scheduler_get_highest_priority_thread(rt_ubase_t *highest_prio)
{
struct rt_thread *highest_priority_thread;
rt_ubase_t highest_ready_priority;
#if RT_THREAD_PRIORITY_MAX > 32
rt_ubase_t number;
number = __rt_ffs(rt_thread_ready_priority_group) - 1;
highest_ready_priority = (number << 3) + __rt_ffs(rt_thread_ready_table[number]) - 1;
#else
highest_ready_priority = __rt_ffs(rt_thread_ready_priority_group) - 1;
#endif /* RT_THREAD_PRIORITY_MAX > 32 */
/* get highest ready priority thread */
highest_priority_thread = rt_list_entry(rt_thread_priority_table[highest_ready_priority].next,
struct rt_thread,
tlist);
*highest_prio = highest_ready_priority;
return highest_priority_thread;
}
/**
* @brief This function will initialize the system scheduler.
*/
void rt_system_scheduler_init(void)
{
rt_base_t offset;
rt_scheduler_lock_nest = 0;
RT_DEBUG_LOG(RT_DEBUG_SCHEDULER, ("start scheduler: max priority 0x%02x\n",
RT_THREAD_PRIORITY_MAX));
for (offset = 0; offset < RT_THREAD_PRIORITY_MAX; offset ++)
{
rt_list_init(&rt_thread_priority_table[offset]);
}
/* initialize ready priority group */
rt_thread_ready_priority_group = 0;
#if RT_THREAD_PRIORITY_MAX > 32
/* initialize ready table */
rt_memset(rt_thread_ready_table, 0, sizeof(rt_thread_ready_table));
#endif /* RT_THREAD_PRIORITY_MAX > 32 */
}
/**
* @brief This function will startup the scheduler. It will select one thread
* with the highest priority level, then switch to it.
*/
void rt_system_scheduler_start(void)
{
struct rt_thread *to_thread;
rt_ubase_t highest_ready_priority;
to_thread = _scheduler_get_highest_priority_thread(&highest_ready_priority);
rt_current_thread = to_thread;
rt_schedule_remove_thread(to_thread);
to_thread->stat = RT_THREAD_RUNNING;
/* switch to new thread */
rt_hw_context_switch_to((rt_ubase_t)&to_thread->sp);
/* never come back */
}
/**
* @addtogroup Thread
* @cond
*/
/**@{*/
/**
* @brief This function will perform scheduling once. It will select one thread
* with the highest priority, and switch to it immediately.
*/
void rt_schedule(void)
{
rt_base_t level;
struct rt_thread *to_thread;
struct rt_thread *from_thread;
/* disable interrupt */
level = rt_hw_interrupt_disable();
/* check the scheduler is enabled or not */
if (rt_scheduler_lock_nest == 0)
{
rt_ubase_t highest_ready_priority;
if (rt_thread_ready_priority_group != 0)
{
/* need_insert_from_thread: need to insert from_thread to ready queue */
int need_insert_from_thread = 0;
to_thread = _scheduler_get_highest_priority_thread(&highest_ready_priority);
if ((rt_current_thread->stat & RT_THREAD_STAT_MASK) == RT_THREAD_RUNNING)
{
if (rt_current_thread->current_priority < highest_ready_priority)
{
to_thread = rt_current_thread;
}
else if (rt_current_thread->current_priority == highest_ready_priority && (rt_current_thread->stat & RT_THREAD_STAT_YIELD_MASK) == 0)
{
to_thread = rt_current_thread;
}
else
{
need_insert_from_thread = 1;
}
rt_current_thread->stat &= ~RT_THREAD_STAT_YIELD_MASK;
}
if (to_thread != rt_current_thread)
{
/* if the destination thread is not the same as current thread */
rt_current_priority = (rt_uint8_t)highest_ready_priority;
from_thread = rt_current_thread;
rt_current_thread = to_thread;
RT_OBJECT_HOOK_CALL(rt_scheduler_hook, (from_thread, to_thread));
if (need_insert_from_thread)
{
rt_schedule_insert_thread(from_thread);
}
rt_schedule_remove_thread(to_thread);
to_thread->stat = RT_THREAD_RUNNING | (to_thread->stat & ~RT_THREAD_STAT_MASK);
/* switch to new thread */
RT_DEBUG_LOG(RT_DEBUG_SCHEDULER,
("[%d]switch to priority#%d "
"thread:%.*s(sp:0x%08x), "
"from thread:%.*s(sp: 0x%08x)\n",
rt_interrupt_nest, highest_ready_priority,
RT_NAME_MAX, to_thread->parent.name, to_thread->sp,
RT_NAME_MAX, from_thread->parent.name, from_thread->sp));
#ifdef RT_USING_OVERFLOW_CHECK
_scheduler_stack_check(to_thread);
#endif /* RT_USING_OVERFLOW_CHECK */
if (rt_interrupt_nest == 0)
{
extern void rt_thread_handle_sig(rt_bool_t clean_state);
RT_OBJECT_HOOK_CALL(rt_scheduler_switch_hook, (from_thread));
rt_hw_context_switch((rt_ubase_t)&from_thread->sp,
(rt_ubase_t)&to_thread->sp);
/* enable interrupt */
rt_hw_interrupt_enable(level);
#ifdef RT_USING_SIGNALS
/* check stat of thread for signal */
level = rt_hw_interrupt_disable();
if (rt_current_thread->stat & RT_THREAD_STAT_SIGNAL_PENDING)
{
extern void rt_thread_handle_sig(rt_bool_t clean_state);
rt_current_thread->stat &= ~RT_THREAD_STAT_SIGNAL_PENDING;
rt_hw_interrupt_enable(level);
/* check signal status */
rt_thread_handle_sig(RT_TRUE);
}
else
{
rt_hw_interrupt_enable(level);
}
#endif /* RT_USING_SIGNALS */
goto __exit;
}
else
{
RT_DEBUG_LOG(RT_DEBUG_SCHEDULER, ("switch in interrupt\n"));
rt_hw_context_switch_interrupt((rt_ubase_t)&from_thread->sp,
(rt_ubase_t)&to_thread->sp, from_thread, to_thread);
}
}
else
{
rt_schedule_remove_thread(rt_current_thread);
rt_current_thread->stat = RT_THREAD_RUNNING | (rt_current_thread->stat & ~RT_THREAD_STAT_MASK);
}
}
}
/* enable interrupt */
rt_hw_interrupt_enable(level);
__exit:
return;
}
/**
* @brief This function will insert a thread to the system ready queue. The state of
* thread will be set as READY and the thread will be removed from suspend queue.
*
* @param thread is the thread to be inserted.
*
* @note Please do not invoke this function in user application.
*/
void rt_schedule_insert_thread(struct rt_thread *thread)
{
rt_base_t level;
RT_ASSERT(thread != RT_NULL);
/* disable interrupt */
level = rt_hw_interrupt_disable();
/* it's current thread, it should be RUNNING thread */
if (thread == rt_current_thread)
{
thread->stat = RT_THREAD_RUNNING | (thread->stat & ~RT_THREAD_STAT_MASK);
goto __exit;
}
/* READY thread, insert to ready queue */
thread->stat = RT_THREAD_READY | (thread->stat & ~RT_THREAD_STAT_MASK);
/* there is no time slices left(YIELD), inserting thread before ready list*/
if((thread->stat & RT_THREAD_STAT_YIELD_MASK) != 0)
{
rt_list_insert_before(&(rt_thread_priority_table[thread->current_priority]),
&(thread->tlist));
}
/* there are some time slices left, inserting thread after ready list to schedule it firstly at next time*/
else
{
rt_list_insert_after(&(rt_thread_priority_table[thread->current_priority]),
&(thread->tlist));
}
RT_DEBUG_LOG(RT_DEBUG_SCHEDULER, ("insert thread[%.*s], the priority: %d\n",
RT_NAME_MAX, thread->parent.name, thread->current_priority));
/* set priority mask */
#if RT_THREAD_PRIORITY_MAX > 32
rt_thread_ready_table[thread->number] |= thread->high_mask;
#endif /* RT_THREAD_PRIORITY_MAX > 32 */
rt_thread_ready_priority_group |= thread->number_mask;
__exit:
/* enable interrupt */
rt_hw_interrupt_enable(level);
}
/**
* @brief This function will remove a thread from system ready queue.
*
* @param thread is the thread to be removed.
*
* @note Please do not invoke this function in user application.
*/
void rt_schedule_remove_thread(struct rt_thread *thread)
{
rt_base_t level;
RT_ASSERT(thread != RT_NULL);
/* disable interrupt */
level = rt_hw_interrupt_disable();
RT_DEBUG_LOG(RT_DEBUG_SCHEDULER, ("remove thread[%.*s], the priority: %d\n",
RT_NAME_MAX, thread->parent.name,
thread->current_priority));
/* remove thread from ready list */
rt_list_remove(&(thread->tlist));
if (rt_list_isempty(&(rt_thread_priority_table[thread->current_priority])))
{
#if RT_THREAD_PRIORITY_MAX > 32
rt_thread_ready_table[thread->number] &= ~thread->high_mask;
if (rt_thread_ready_table[thread->number] == 0)
{
rt_thread_ready_priority_group &= ~thread->number_mask;
}
#else
rt_thread_ready_priority_group &= ~thread->number_mask;
#endif /* RT_THREAD_PRIORITY_MAX > 32 */
}
/* enable interrupt */
rt_hw_interrupt_enable(level);
}
/**
* @brief This function will lock the thread scheduler.
*/
void rt_enter_critical(void)
{
rt_base_t level;
/* disable interrupt */
level = rt_hw_interrupt_disable();
/*
* the maximal number of nest is RT_UINT16_MAX, which is big
* enough and does not check here
*/
rt_scheduler_lock_nest ++;
/* enable interrupt */
rt_hw_interrupt_enable(level);
}
RTM_EXPORT(rt_enter_critical);
/**
* @brief This function will unlock the thread scheduler.
*/
void rt_exit_critical(void)
{
rt_base_t level;
/* disable interrupt */
level = rt_hw_interrupt_disable();
rt_scheduler_lock_nest --;
if (rt_scheduler_lock_nest <= 0)
{
rt_scheduler_lock_nest = 0;
/* enable interrupt */
rt_hw_interrupt_enable(level);
if (rt_current_thread)
{
/* if scheduler is started, do a schedule */
rt_schedule();
}
}
else
{
/* enable interrupt */
rt_hw_interrupt_enable(level);
}
}
RTM_EXPORT(rt_exit_critical);
/**
* @brief Get the scheduler lock level.
*
* @return the level of the scheduler lock. 0 means unlocked.
*/
rt_uint16_t rt_critical_level(void)
{
return rt_scheduler_lock_nest;
}
RTM_EXPORT(rt_critical_level);
/**@}*/
/**@endcond*/

656
src/signal.c Normal file
View file

@ -0,0 +1,656 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2017/10/5 Bernard the first version
* 2018/09/17 Jesven fix: in _signal_deliver RT_THREAD_STAT_MASK to RT_THREAD_STAT_SIGNAL_MASK
* 2018/11/22 Jesven in smp version rt_hw_context_switch_to add a param
*/
#include <stdint.h>
#include <string.h>
#include <rthw.h>
#include <rtthread.h>
#ifdef RT_USING_SIGNALS
#ifndef RT_SIG_INFO_MAX
#define RT_SIG_INFO_MAX 32
#endif /* RT_SIG_INFO_MAX */
#define DBG_TAG "SIGN"
#define DBG_LVL DBG_WARNING
#include <rtdbg.h>
#define sig_mask(sig_no) (1u << sig_no)
#define sig_valid(sig_no) (sig_no >= 0 && sig_no < RT_SIG_MAX)
struct siginfo_node
{
siginfo_t si;
struct rt_slist_node list;
};
static struct rt_mempool *_siginfo_pool;
static void _signal_deliver(rt_thread_t tid);
void rt_thread_handle_sig(rt_bool_t clean_state);
static void _signal_default_handler(int signo)
{
LOG_I("handled signo[%d] with default action.", signo);
return ;
}
static void _signal_entry(void *parameter)
{
rt_thread_t tid = rt_thread_self();
/* handle signal */
rt_thread_handle_sig(RT_FALSE);
#ifdef RT_USING_SMP
{
struct rt_cpu* pcpu = rt_cpu_self();
RT_ASSERT(pcpu->current_thread->cpus_lock_nest > 0);
pcpu->current_thread->cpus_lock_nest--;
if (pcpu->current_thread->cpus_lock_nest == 0)
{
pcpu->current_thread->scheduler_lock_nest--;
}
}
#else
/* return to thread */
tid->sp = tid->sig_ret;
tid->sig_ret = RT_NULL;
#endif /* RT_USING_SMP */
LOG_D("switch back to: 0x%08x\n", tid->sp);
tid->stat &= ~RT_THREAD_STAT_SIGNAL;
#ifdef RT_USING_SMP
rt_hw_context_switch_to((rt_base_t)&parameter, tid);
#else
rt_hw_context_switch_to((rt_ubase_t)&(tid->sp));
#endif /* RT_USING_SMP */
}
/*
* To deliver a signal to thread, there are cases:
* 1. When thread is suspended, function resumes thread and
* set signal stat;
* 2. When thread is ready:
* - If function delivers a signal to self thread, just handle
* it.
* - If function delivers a signal to another ready thread, OS
* should build a slice context to handle it.
*/
static void _signal_deliver(rt_thread_t tid)
{
rt_base_t level;
level = rt_hw_interrupt_disable();
/* thread is not interested in pended signals */
if (!(tid->sig_pending & tid->sig_mask))
{
rt_hw_interrupt_enable(level);
return;
}
if ((tid->stat & RT_THREAD_SUSPEND_MASK) == RT_THREAD_SUSPEND_MASK)
{
/* resume thread to handle signal */
#ifdef RT_USING_SMART
rt_thread_wakeup(tid);
#else
rt_thread_resume(tid);
#endif
/* add signal state */
tid->stat |= (RT_THREAD_STAT_SIGNAL | RT_THREAD_STAT_SIGNAL_PENDING);
rt_hw_interrupt_enable(level);
/* re-schedule */
rt_schedule();
}
else
{
if (tid == rt_thread_self())
{
/* add signal state */
tid->stat |= RT_THREAD_STAT_SIGNAL;
rt_hw_interrupt_enable(level);
/* do signal action in self thread context */
if (rt_interrupt_get_nest() == 0)
{
rt_thread_handle_sig(RT_TRUE);
}
}
else if (!((tid->stat & RT_THREAD_STAT_SIGNAL_MASK) & RT_THREAD_STAT_SIGNAL))
{
/* add signal state */
tid->stat |= (RT_THREAD_STAT_SIGNAL | RT_THREAD_STAT_SIGNAL_PENDING);
#ifdef RT_USING_SMP
{
int cpu_id;
cpu_id = tid->oncpu;
if ((cpu_id != RT_CPU_DETACHED) && (cpu_id != rt_hw_cpu_id()))
{
rt_uint32_t cpu_mask;
cpu_mask = RT_CPU_MASK ^ (1 << cpu_id);
rt_hw_ipi_send(RT_SCHEDULE_IPI, cpu_mask);
}
}
#else
/* point to the signal handle entry */
tid->stat &= ~RT_THREAD_STAT_SIGNAL_PENDING;
tid->sig_ret = tid->sp;
tid->sp = rt_hw_stack_init((void *)_signal_entry, RT_NULL,
(void *)((char *)tid->sig_ret - 32), RT_NULL);
#endif /* RT_USING_SMP */
rt_hw_interrupt_enable(level);
LOG_D("signal stack pointer @ 0x%08x", tid->sp);
/* re-schedule */
rt_schedule();
}
else
{
rt_hw_interrupt_enable(level);
}
}
}
#ifdef RT_USING_SMP
void *rt_signal_check(void* context)
{
rt_base_t level;
int cpu_id;
struct rt_cpu* pcpu;
struct rt_thread *current_thread;
level = rt_hw_interrupt_disable();
cpu_id = rt_hw_cpu_id();
pcpu = rt_cpu_index(cpu_id);
current_thread = pcpu->current_thread;
if (pcpu->irq_nest)
{
rt_hw_interrupt_enable(level);
return context;
}
if (current_thread->cpus_lock_nest == 1)
{
if (current_thread->stat & RT_THREAD_STAT_SIGNAL_PENDING)
{
void *sig_context;
current_thread->stat &= ~RT_THREAD_STAT_SIGNAL_PENDING;
rt_hw_interrupt_enable(level);
sig_context = rt_hw_stack_init((void *)_signal_entry, context,
(void*)((char*)context - 32), RT_NULL);
return sig_context;
}
}
rt_hw_interrupt_enable(level);
return context;
}
#endif /* RT_USING_SMP */
/**
* @brief This function will install a processing function to a specific
* signal and return the old processing function of this signal.
*
* @note This function needs to be used in conjunction with the
* rt_signal_unmask() function to make the signal effective.
*
* @see rt_signal_unmask()
*
* @param signo is a specific signal value (range: 0 ~ RT_SIG_MAX).
*
* @param handler is sets the processing of signal value.
*
* @return Return the old processing function of this signal. ONLY When the
* return value is SIG_ERR, the operation is failed.
*/
rt_sighandler_t rt_signal_install(int signo, rt_sighandler_t handler)
{
rt_base_t level;
rt_sighandler_t old = RT_NULL;
rt_thread_t tid = rt_thread_self();
if (!sig_valid(signo)) return SIG_ERR;
level = rt_hw_interrupt_disable();
if (tid->sig_vectors == RT_NULL)
{
rt_thread_alloc_sig(tid);
}
if (tid->sig_vectors)
{
old = tid->sig_vectors[signo];
if (handler == SIG_IGN) tid->sig_vectors[signo] = RT_NULL;
else if (handler == SIG_DFL) tid->sig_vectors[signo] = _signal_default_handler;
else tid->sig_vectors[signo] = handler;
}
rt_hw_interrupt_enable(level);
return old;
}
/**
* @brief This function will block the specified signal.
*
* @note This function will block the specified signal, even if the
* rt_thread_kill() function is called to send this signal to
* the current thread, it will no longer take effect.
*
* @see rt_thread_kill()
*
* @param signo is a specific signal value (range: 0 ~ RT_SIG_MAX).
*/
void rt_signal_mask(int signo)
{
rt_base_t level;
rt_thread_t tid = rt_thread_self();
level = rt_hw_interrupt_disable();
tid->sig_mask &= ~sig_mask(signo);
rt_hw_interrupt_enable(level);
}
/**
* @brief This function will unblock the specified signal.
*
* @note This function will unblock the specified signal. After calling
* the rt_thread_kill() function to send this signal to the current
* thread, it will take effect.
*
* @see rt_thread_kill()
*
* @param signo is a specific signal value (range: 0 ~ RT_SIG_MAX).
*/
void rt_signal_unmask(int signo)
{
rt_base_t level;
rt_thread_t tid = rt_thread_self();
level = rt_hw_interrupt_disable();
tid->sig_mask |= sig_mask(signo);
/* let thread handle pended signals */
if (tid->sig_mask & tid->sig_pending)
{
rt_hw_interrupt_enable(level);
_signal_deliver(tid);
}
else
{
rt_hw_interrupt_enable(level);
}
}
/**
* @brief This function will wait for the arrival of the set signal. If it does not wait for this signal, the thread will be
* suspended until it waits for this signal or the waiting time exceeds the specified timeout: timeout.
*
* @param set is the set of signal values to be waited for. Use the function
* sigaddset() to add the signal.
*
* @param si is a pointer to the received signal info. If you don't care about this value, you can use RT_NULL to set.
*
* @param timeout is a timeout period (unit: an OS tick).
*
* @return Return the operation status. When the return value is RT_EOK, the operation is successful.
* If the return value is any other values, it means that the signal wait failed.
*/
int rt_signal_wait(const rt_sigset_t *set, rt_siginfo_t *si, rt_int32_t timeout)
{
int ret = RT_EOK;
rt_base_t level;
rt_thread_t tid = rt_thread_self();
struct siginfo_node *si_node = RT_NULL, *si_prev = RT_NULL;
/* current context checking */
RT_DEBUG_IN_THREAD_CONTEXT;
/* parameters check */
if (set == NULL || *set == 0 || si == NULL )
{
ret = -RT_EINVAL;
goto __done_return;
}
/* clear siginfo to avoid unknown value */
memset(si, 0x0, sizeof(rt_siginfo_t));
level = rt_hw_interrupt_disable();
/* already pending */
if (tid->sig_pending & *set) goto __done;
if (timeout == 0)
{
ret = -RT_ETIMEOUT;
goto __done_int;
}
/* suspend self thread */
rt_thread_suspend_with_flag(tid, RT_UNINTERRUPTIBLE);
/* set thread stat as waiting for signal */
tid->stat |= RT_THREAD_STAT_SIGNAL_WAIT;
/* start timeout timer */
if (timeout != RT_WAITING_FOREVER)
{
/* reset the timeout of thread timer and start it */
rt_timer_control(&(tid->thread_timer),
RT_TIMER_CTRL_SET_TIME,
&timeout);
rt_timer_start(&(tid->thread_timer));
}
rt_hw_interrupt_enable(level);
/* do thread scheduling */
rt_schedule();
level = rt_hw_interrupt_disable();
/* remove signal waiting flag */
tid->stat &= ~RT_THREAD_STAT_SIGNAL_WAIT;
/* check errno of thread */
if (tid->error == -RT_ETIMEOUT)
{
tid->error = RT_EOK;
rt_hw_interrupt_enable(level);
/* timer timeout */
ret = -RT_ETIMEOUT;
goto __done_return;
}
__done:
/* to get the first matched pending signals */
si_node = (struct siginfo_node *)tid->si_list;
while (si_node)
{
int signo;
signo = si_node->si.si_signo;
if (sig_mask(signo) & *set)
{
*si = si_node->si;
LOG_D("sigwait: %d sig raised!", signo);
if (si_prev) si_prev->list.next = si_node->list.next;
else
{
struct siginfo_node *node_next;
if (si_node->list.next)
{
node_next = (void *)rt_slist_entry(si_node->list.next, struct siginfo_node, list);
tid->si_list = node_next;
}
else
{
tid->si_list = RT_NULL;
}
}
/* clear pending */
tid->sig_pending &= ~sig_mask(signo);
rt_mp_free(si_node);
break;
}
si_prev = si_node;
if (si_node->list.next)
{
si_node = (void *)rt_slist_entry(si_node->list.next, struct siginfo_node, list);
}
else
{
si_node = RT_NULL;
}
}
__done_int:
rt_hw_interrupt_enable(level);
__done_return:
return ret;
}
void rt_thread_handle_sig(rt_bool_t clean_state)
{
rt_base_t level;
rt_thread_t tid = rt_thread_self();
struct siginfo_node *si_node;
level = rt_hw_interrupt_disable();
if (tid->sig_pending & tid->sig_mask)
{
/* if thread is not waiting for signal */
if (!(tid->stat & RT_THREAD_STAT_SIGNAL_WAIT))
{
while (tid->sig_pending & tid->sig_mask)
{
int signo, error;
rt_sighandler_t handler;
si_node = (struct siginfo_node *)tid->si_list;
if (!si_node) break;
/* remove this sig info node from list */
if (si_node->list.next == RT_NULL)
tid->si_list = RT_NULL;
else
tid->si_list = (void *)rt_slist_entry(si_node->list.next, struct siginfo_node, list);
signo = si_node->si.si_signo;
handler = tid->sig_vectors[signo];
tid->sig_pending &= ~sig_mask(signo);
rt_hw_interrupt_enable(level);
LOG_D("handle signal: %d, handler 0x%08x", signo, handler);
if (handler) handler(signo);
level = rt_hw_interrupt_disable();
error = -RT_EINTR;
rt_mp_free(si_node); /* release this siginfo node */
/* set errno in thread tcb */
tid->error = error;
}
/* whether clean signal status */
if (clean_state == RT_TRUE)
{
tid->stat &= ~RT_THREAD_STAT_SIGNAL;
}
else
{
return;
}
}
}
rt_hw_interrupt_enable(level);
}
void rt_thread_alloc_sig(rt_thread_t tid)
{
int index;
rt_base_t level;
rt_sighandler_t *vectors;
vectors = (rt_sighandler_t *)RT_KERNEL_MALLOC(sizeof(rt_sighandler_t) * RT_SIG_MAX);
RT_ASSERT(vectors != RT_NULL);
for (index = 0; index < RT_SIG_MAX; index ++)
{
vectors[index] = _signal_default_handler;
}
level = rt_hw_interrupt_disable();
tid->sig_vectors = vectors;
rt_hw_interrupt_enable(level);
}
void rt_thread_free_sig(rt_thread_t tid)
{
rt_base_t level;
struct siginfo_node *si_node;
rt_sighandler_t *sig_vectors;
level = rt_hw_interrupt_disable();
si_node = (struct siginfo_node *)tid->si_list;
tid->si_list = RT_NULL;
sig_vectors = tid->sig_vectors;
tid->sig_vectors = RT_NULL;
rt_hw_interrupt_enable(level);
if (si_node)
{
struct rt_slist_node *node;
struct rt_slist_node *node_to_free;
LOG_D("free signal info list");
node = &(si_node->list);
do
{
node_to_free = node;
node = node->next;
si_node = rt_slist_entry(node_to_free, struct siginfo_node, list);
rt_mp_free(si_node);
} while (node);
}
if (sig_vectors)
{
RT_KERNEL_FREE(sig_vectors);
}
}
/**
* @brief This function can be used to send any signal to any thread.
*
* @param tid is a pointer to the thread that receives the signal.
*
* @param sig is a specific signal value (range: 0 ~ RT_SIG_MAX).
*
* @return Return the operation status. When the return value is RT_EOK, the operation is successful.
* If the return value is any other values, it means that the signal send failed.
*/
int rt_thread_kill(rt_thread_t tid, int sig)
{
siginfo_t si;
rt_base_t level;
struct siginfo_node *si_node;
RT_ASSERT(tid != RT_NULL);
if (!sig_valid(sig)) return -RT_EINVAL;
LOG_I("send signal: %d", sig);
si.si_signo = sig;
si.si_code = SI_USER;
si.si_value.sival_ptr = RT_NULL;
level = rt_hw_interrupt_disable();
if (tid->sig_pending & sig_mask(sig))
{
/* whether already emits this signal? */
struct rt_slist_node *node;
struct siginfo_node *entry;
si_node = (struct siginfo_node *)tid->si_list;
if (si_node)
node = (struct rt_slist_node *)&si_node->list;
else
node = RT_NULL;
/* update sig info */
for (; (node) != RT_NULL; node = node->next)
{
entry = rt_slist_entry(node, struct siginfo_node, list);
if (entry->si.si_signo == sig)
{
memcpy(&(entry->si), &si, sizeof(siginfo_t));
rt_hw_interrupt_enable(level);
return 0;
}
}
}
rt_hw_interrupt_enable(level);
si_node = (struct siginfo_node *) rt_mp_alloc(_siginfo_pool, 0);
if (si_node)
{
rt_slist_init(&(si_node->list));
memcpy(&(si_node->si), &si, sizeof(siginfo_t));
level = rt_hw_interrupt_disable();
if (tid->si_list)
{
struct siginfo_node *si_list;
si_list = (struct siginfo_node *)tid->si_list;
rt_slist_append(&(si_list->list), &(si_node->list));
}
else
{
tid->si_list = si_node;
}
/* a new signal */
tid->sig_pending |= sig_mask(sig);
rt_hw_interrupt_enable(level);
}
else
{
LOG_E("The allocation of signal info node failed.");
}
/* deliver signal to this thread */
_signal_deliver(tid);
return RT_EOK;
}
int rt_system_signal_init(void)
{
_siginfo_pool = rt_mp_create("signal", RT_SIG_INFO_MAX, sizeof(struct siginfo_node));
if (_siginfo_pool == RT_NULL)
{
LOG_E("create memory pool for signal info failed.");
RT_ASSERT(0);
}
return 0;
}
#endif /* RT_USING_SIGNALS */

854
src/slab.c Normal file
View file

@ -0,0 +1,854 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*/
/*
* File : slab.c
*
* Change Logs:
* Date Author Notes
* 2008-07-12 Bernard the first version
* 2010-07-13 Bernard fix RT_ALIGN issue found by kuronca
* 2010-10-23 yi.qiu add module memory allocator
* 2010-12-18 yi.qiu fix zone release bug
*/
/*
* KERN_SLABALLOC.C - Kernel SLAB memory allocator
*
* Copyright (c) 2003,2004 The DragonFly Project. All rights reserved.
*
* This code is derived from software contributed to The DragonFly Project
* by Matthew Dillon <dillon@backplane.com>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* 3. Neither the name of The DragonFly Project nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific, prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
*/
#include <rthw.h>
#include <rtthread.h>
#if defined (RT_USING_SLAB)
/*
* slab allocator implementation
*
* A slab allocator reserves a ZONE for each chunk size, then lays the
* chunks out in an array within the zone. Allocation and deallocation
* is nearly instantanious, and fragmentation/overhead losses are limited
* to a fixed worst-case amount.
*
* The downside of this slab implementation is in the chunk size
* multiplied by the number of zones. ~80 zones * 128K = 10MB of VM per cpu.
* In a kernel implementation all this memory will be physical so
* the zone size is adjusted downward on machines with less physical
* memory. The upside is that overhead is bounded... this is the *worst*
* case overhead.
*
* Slab management is done on a per-cpu basis and no locking or mutexes
* are required, only a critical section. When one cpu frees memory
* belonging to another cpu's slab manager an asynchronous IPI message
* will be queued to execute the operation. In addition, both the
* high level slab allocator and the low level zone allocator optimize
* M_ZERO requests, and the slab allocator does not have to pre initialize
* the linked list of chunks.
*
* XXX Balancing is needed between cpus. Balance will be handled through
* asynchronous IPIs primarily by reassigning the z_Cpu ownership of chunks.
*
* XXX If we have to allocate a new zone and M_USE_RESERVE is set, use of
* the new zone should be restricted to M_USE_RESERVE requests only.
*
* Alloc Size Chunking Number of zones
* 0-127 8 16
* 128-255 16 8
* 256-511 32 8
* 512-1023 64 8
* 1024-2047 128 8
* 2048-4095 256 8
* 4096-8191 512 8
* 8192-16383 1024 8
* 16384-32767 2048 8
* (if RT_MM_PAGE_SIZE is 4K the maximum zone allocation is 16383)
*
* Allocations >= zone_limit go directly to kmem.
*
* API REQUIREMENTS AND SIDE EFFECTS
*
* To operate as a drop-in replacement to the FreeBSD-4.x malloc() we
* have remained compatible with the following API requirements:
*
* + small power-of-2 sized allocations are power-of-2 aligned (kern_tty)
* + all power-of-2 sized allocations are power-of-2 aligned (twe)
* + malloc(0) is allowed and returns non-RT_NULL (ahc driver)
* + ability to allocate arbitrarily large chunks of memory
*/
#define ZALLOC_SLAB_MAGIC 0x51ab51ab
#define ZALLOC_ZONE_LIMIT (16 * 1024) /* max slab-managed alloc */
#define ZALLOC_MIN_ZONE_SIZE (32 * 1024) /* minimum zone size */
#define ZALLOC_MAX_ZONE_SIZE (128 * 1024) /* maximum zone size */
#define ZONE_RELEASE_THRESH 2 /* threshold number of zones */
/*
* Misc constants. Note that allocations that are exact multiples of
* RT_MM_PAGE_SIZE, or exceed the zone limit, fall through to the kmem module.
*/
#define MIN_CHUNK_SIZE 8 /* in bytes */
#define MIN_CHUNK_MASK (MIN_CHUNK_SIZE - 1)
/*
* Array of descriptors that describe the contents of each page
*/
#define PAGE_TYPE_FREE 0x00
#define PAGE_TYPE_SMALL 0x01
#define PAGE_TYPE_LARGE 0x02
#define btokup(addr) \
(&slab->memusage[((rt_ubase_t)(addr) - slab->heap_start) >> RT_MM_PAGE_BITS])
/**
* Base structure of slab memory object
*/
/*
* The IN-BAND zone header is placed at the beginning of each zone.
*/
struct rt_slab_zone
{
rt_uint32_t z_magic; /**< magic number for sanity check */
rt_uint32_t z_nfree; /**< total free chunks / ualloc space in zone */
rt_uint32_t z_nmax; /**< maximum free chunks */
struct rt_slab_zone *z_next; /**< zoneary[] link if z_nfree non-zero */
rt_uint8_t *z_baseptr; /**< pointer to start of chunk array */
rt_uint32_t z_uindex; /**< current initial allocation index */
rt_uint32_t z_chunksize; /**< chunk size for validation */
rt_uint32_t z_zoneindex; /**< zone index */
struct rt_slab_chunk *z_freechunk; /**< free chunk list */
};
/*
* Chunk structure for free elements
*/
struct rt_slab_chunk
{
struct rt_slab_chunk *c_next;
};
struct rt_slab_memusage
{
rt_uint32_t type: 2 ; /**< page type */
rt_uint32_t size: 30; /**< pages allocated or offset from zone */
};
/*
* slab page allocator
*/
struct rt_slab_page
{
struct rt_slab_page *next; /**< next valid page */
rt_size_t page; /**< number of page */
/* dummy */
char dummy[RT_MM_PAGE_SIZE - (sizeof(struct rt_slab_page *) + sizeof(rt_size_t))];
};
#define RT_SLAB_NZONES 72 /* number of zones */
/*
* slab object
*/
struct rt_slab
{
struct rt_memory parent; /**< inherit from rt_memory */
rt_ubase_t heap_start; /**< memory start address */
rt_ubase_t heap_end; /**< memory end address */
struct rt_slab_memusage *memusage;
struct rt_slab_zone *zone_array[RT_SLAB_NZONES]; /* linked list of zones NFree > 0 */
struct rt_slab_zone *zone_free; /* whole zones that have become free */
rt_uint32_t zone_free_cnt;
rt_uint32_t zone_size;
rt_uint32_t zone_limit;
rt_uint32_t zone_page_cnt;
struct rt_slab_page *page_list;
};
/**
* @brief Alloc memory size by page.
*
* @param m the slab memory management object.
*
* @param npages the number of pages.
*/
void *rt_slab_page_alloc(rt_slab_t m, rt_size_t npages)
{
struct rt_slab_page *b, *n;
struct rt_slab_page **prev;
struct rt_slab *slab = (struct rt_slab *)m;
if (npages == 0)
return RT_NULL;
for (prev = &slab->page_list; (b = *prev) != RT_NULL; prev = &(b->next))
{
if (b->page > npages)
{
/* splite pages */
n = b + npages;
n->next = b->next;
n->page = b->page - npages;
*prev = n;
break;
}
if (b->page == npages)
{
/* this node fit, remove this node */
*prev = b->next;
break;
}
}
return b;
}
/**
* @brief Free memory by page.
*
* @param m the slab memory management object.
*
* @param addr is the head address of first page.
*
* @param npages is the number of pages.
*/
void rt_slab_page_free(rt_slab_t m, void *addr, rt_size_t npages)
{
struct rt_slab_page *b, *n;
struct rt_slab_page **prev;
struct rt_slab *slab = (struct rt_slab *)m;
RT_ASSERT(addr != RT_NULL);
RT_ASSERT((rt_ubase_t)addr % RT_MM_PAGE_SIZE == 0);
RT_ASSERT(npages != 0);
n = (struct rt_slab_page *)addr;
for (prev = &slab->page_list; (b = *prev) != RT_NULL; prev = &(b->next))
{
RT_ASSERT(b->page > 0);
RT_ASSERT(b > n || b + b->page <= n);
if (b + b->page == n)
{
if (b + (b->page += npages) == b->next)
{
b->page += b->next->page;
b->next = b->next->next;
}
return;
}
if (b == n + npages)
{
n->page = b->page + npages;
n->next = b->next;
*prev = n;
return;
}
if (b > n + npages)
break;
}
n->page = npages;
n->next = b;
*prev = n;
}
/*
* Initialize the page allocator
*/
static void rt_slab_page_init(struct rt_slab *slab, void *addr, rt_size_t npages)
{
RT_ASSERT(addr != RT_NULL);
RT_ASSERT(npages != 0);
slab->page_list = RT_NULL;
rt_slab_page_free((rt_slab_t)(&slab->parent), addr, npages);
}
/**
* @brief This function will init slab memory management algorithm
*
* @param name is the name of the slab memory management object.
*
* @param begin_addr the beginning address of system page.
*
* @param size is the size of the memory.
*
* @return Return a pointer to the slab memory object.
*/
rt_slab_t rt_slab_init(const char *name, void *begin_addr, rt_size_t size)
{
rt_uint32_t limsize, npages;
rt_ubase_t start_addr, begin_align, end_align;
struct rt_slab *slab;
slab = (struct rt_slab *)RT_ALIGN((rt_ubase_t)begin_addr, RT_ALIGN_SIZE);
start_addr = (rt_ubase_t)slab + sizeof(*slab);
/* align begin and end addr to page */
begin_align = RT_ALIGN((rt_ubase_t)start_addr, RT_MM_PAGE_SIZE);
end_align = RT_ALIGN_DOWN((rt_ubase_t)begin_addr + size, RT_MM_PAGE_SIZE);
if (begin_align >= end_align)
{
rt_kprintf("slab init errr. wrong address[0x%x - 0x%x]\n",
(rt_ubase_t)begin_addr, (rt_ubase_t)begin_addr + size);
return RT_NULL;
}
limsize = end_align - begin_align;
npages = limsize / RT_MM_PAGE_SIZE;
RT_DEBUG_LOG(RT_DEBUG_SLAB, ("heap[0x%x - 0x%x], size 0x%x, 0x%x pages\n",
begin_align, end_align, limsize, npages));
rt_memset(slab, 0, sizeof(*slab));
/* initialize slab memory object */
rt_object_init(&(slab->parent.parent), RT_Object_Class_Memory, name);
slab->parent.algorithm = "slab";
slab->parent.address = begin_align;
slab->parent.total = limsize;
slab->parent.used = 0;
slab->parent.max = 0;
slab->heap_start = begin_align;
slab->heap_end = end_align;
/* init pages */
rt_slab_page_init(slab, (void *)slab->heap_start, npages);
/* calculate zone size */
slab->zone_size = ZALLOC_MIN_ZONE_SIZE;
while (slab->zone_size < ZALLOC_MAX_ZONE_SIZE && (slab->zone_size << 1) < (limsize / 1024))
slab->zone_size <<= 1;
slab->zone_limit = slab->zone_size / 4;
if (slab->zone_limit > ZALLOC_ZONE_LIMIT)
slab->zone_limit = ZALLOC_ZONE_LIMIT;
slab->zone_page_cnt = slab->zone_size / RT_MM_PAGE_SIZE;
RT_DEBUG_LOG(RT_DEBUG_SLAB, ("zone size 0x%x, zone page count 0x%x\n",
slab->zone_size, slab->zone_page_cnt));
/* allocate slab->memusage array */
limsize = npages * sizeof(struct rt_slab_memusage);
limsize = RT_ALIGN(limsize, RT_MM_PAGE_SIZE);
slab->memusage = rt_slab_page_alloc((rt_slab_t)(&slab->parent), limsize / RT_MM_PAGE_SIZE);
RT_DEBUG_LOG(RT_DEBUG_SLAB, ("slab->memusage 0x%x, size 0x%x\n",
(rt_ubase_t)slab->memusage, limsize));
return &slab->parent;
}
RTM_EXPORT(rt_slab_init);
/**
* @brief This function will remove a slab object from the system.
*
* @param m the slab memory management object.
*
* @return RT_EOK
*/
rt_err_t rt_slab_detach(rt_slab_t m)
{
struct rt_slab *slab = (struct rt_slab *)m;
RT_ASSERT(slab != RT_NULL);
RT_ASSERT(rt_object_get_type(&slab->parent.parent) == RT_Object_Class_Memory);
RT_ASSERT(rt_object_is_systemobject(&slab->parent.parent));
rt_object_detach(&(slab->parent.parent));
return RT_EOK;
}
RTM_EXPORT(rt_slab_detach);
/*
* Calculate the zone index for the allocation request size and set the
* allocation request size to that particular zone's chunk size.
*/
rt_inline int zoneindex(rt_size_t *bytes)
{
/* unsigned for shift opt */
rt_ubase_t n = (rt_ubase_t)(*bytes);
if (n < 128)
{
*bytes = n = (n + 7) & ~7;
/* 8 byte chunks, 16 zones */
return (n / 8 - 1);
}
if (n < 256)
{
*bytes = n = (n + 15) & ~15;
return (n / 16 + 7);
}
if (n < 8192)
{
if (n < 512)
{
*bytes = n = (n + 31) & ~31;
return (n / 32 + 15);
}
if (n < 1024)
{
*bytes = n = (n + 63) & ~63;
return (n / 64 + 23);
}
if (n < 2048)
{
*bytes = n = (n + 127) & ~127;
return (n / 128 + 31);
}
if (n < 4096)
{
*bytes = n = (n + 255) & ~255;
return (n / 256 + 39);
}
*bytes = n = (n + 511) & ~511;
return (n / 512 + 47);
}
if (n < 16384)
{
*bytes = n = (n + 1023) & ~1023;
return (n / 1024 + 55);
}
rt_kprintf("Unexpected byte count %d", n);
return 0;
}
/**
* @addtogroup MM
*/
/**@{*/
/**
* @brief This function will allocate a block from slab object.
*
* @note the RT_NULL is returned if
* - the nbytes is less than zero.
* - there is no nbytes sized memory valid in system.
*
* @param m the slab memory management object.
*
* @param size is the size of memory to be allocated.
*
* @return the allocated memory.
*/
void *rt_slab_alloc(rt_slab_t m, rt_size_t size)
{
struct rt_slab_zone *z;
rt_int32_t zi;
struct rt_slab_chunk *chunk;
struct rt_slab_memusage *kup;
struct rt_slab *slab = (struct rt_slab *)m;
/* zero size, return RT_NULL */
if (size == 0)
return RT_NULL;
/*
* Handle large allocations directly. There should not be very many of
* these so performance is not a big issue.
*/
if (size >= slab->zone_limit)
{
size = RT_ALIGN(size, RT_MM_PAGE_SIZE);
chunk = rt_slab_page_alloc(m, size >> RT_MM_PAGE_BITS);
if (chunk == RT_NULL)
return RT_NULL;
/* set kup */
kup = btokup(chunk);
kup->type = PAGE_TYPE_LARGE;
kup->size = size >> RT_MM_PAGE_BITS;
RT_DEBUG_LOG(RT_DEBUG_SLAB,
("alloc a large memory 0x%x, page cnt %d, kup %d\n",
size,
size >> RT_MM_PAGE_BITS,
((rt_ubase_t)chunk - slab->heap_start) >> RT_MM_PAGE_BITS));
/* mem stat */
slab->parent.used += size;
if (slab->parent.used > slab->parent.max)
slab->parent.max = slab->parent.used;
return chunk;
}
/*
* Attempt to allocate out of an existing zone. First try the free list,
* then allocate out of unallocated space. If we find a good zone move
* it to the head of the list so later allocations find it quickly
* (we might have thousands of zones in the list).
*
* Note: zoneindex() will panic of size is too large.
*/
zi = zoneindex(&size);
RT_ASSERT(zi < RT_SLAB_NZONES);
RT_DEBUG_LOG(RT_DEBUG_SLAB, ("try to alloc 0x%x on zone: %d\n", size, zi));
if ((z = slab->zone_array[zi]) != RT_NULL)
{
RT_ASSERT(z->z_nfree > 0);
/* Remove us from the zone_array[] when we become full */
if (--z->z_nfree == 0)
{
slab->zone_array[zi] = z->z_next;
z->z_next = RT_NULL;
}
/*
* No chunks are available but nfree said we had some memory, so
* it must be available in the never-before-used-memory area
* governed by uindex. The consequences are very serious if our zone
* got corrupted so we use an explicit rt_kprintf rather then a KASSERT.
*/
if (z->z_uindex + 1 != z->z_nmax)
{
z->z_uindex = z->z_uindex + 1;
chunk = (struct rt_slab_chunk *)(z->z_baseptr + z->z_uindex * size);
}
else
{
/* find on free chunk list */
chunk = z->z_freechunk;
/* remove this chunk from list */
z->z_freechunk = z->z_freechunk->c_next;
}
/* mem stats */
slab->parent.used += z->z_chunksize;
if (slab->parent.used > slab->parent.max)
slab->parent.max = slab->parent.used;
return chunk;
}
/*
* If all zones are exhausted we need to allocate a new zone for this
* index.
*
* At least one subsystem, the tty code (see CROUND) expects power-of-2
* allocations to be power-of-2 aligned. We maintain compatibility by
* adjusting the base offset below.
*/
{
rt_uint32_t off;
if ((z = slab->zone_free) != RT_NULL)
{
/* remove zone from free zone list */
slab->zone_free = z->z_next;
-- slab->zone_free_cnt;
}
else
{
/* allocate a zone from page */
z = rt_slab_page_alloc(m, slab->zone_size / RT_MM_PAGE_SIZE);
if (z == RT_NULL)
{
return RT_NULL;
}
RT_DEBUG_LOG(RT_DEBUG_SLAB, ("alloc a new zone: 0x%x\n",
(rt_ubase_t)z));
/* set message usage */
for (off = 0, kup = btokup(z); off < slab->zone_page_cnt; off ++)
{
kup->type = PAGE_TYPE_SMALL;
kup->size = off;
kup ++;
}
}
/* clear to zero */
rt_memset(z, 0, sizeof(struct rt_slab_zone));
/* offset of slab zone struct in zone */
off = sizeof(struct rt_slab_zone);
/*
* Guarentee power-of-2 alignment for power-of-2-sized chunks.
* Otherwise just 8-byte align the data.
*/
if ((size | (size - 1)) + 1 == (size << 1))
off = (off + size - 1) & ~(size - 1);
else
off = (off + MIN_CHUNK_MASK) & ~MIN_CHUNK_MASK;
z->z_magic = ZALLOC_SLAB_MAGIC;
z->z_zoneindex = zi;
z->z_nmax = (slab->zone_size - off) / size;
z->z_nfree = z->z_nmax - 1;
z->z_baseptr = (rt_uint8_t *)z + off;
z->z_uindex = 0;
z->z_chunksize = size;
chunk = (struct rt_slab_chunk *)(z->z_baseptr + z->z_uindex * size);
/* link to zone array */
z->z_next = slab->zone_array[zi];
slab->zone_array[zi] = z;
/* mem stats */
slab->parent.used += z->z_chunksize;
if (slab->parent.used > slab->parent.max)
slab->parent.max = slab->parent.used;
}
return chunk;
}
RTM_EXPORT(rt_slab_alloc);
/**
* @brief This function will change the size of previously allocated memory block.
*
* @param m the slab memory management object.
*
* @param ptr is the previously allocated memory block.
*
* @param size is the new size of memory block.
*
* @return the allocated memory.
*/
void *rt_slab_realloc(rt_slab_t m, void *ptr, rt_size_t size)
{
void *nptr;
struct rt_slab_zone *z;
struct rt_slab_memusage *kup;
struct rt_slab *slab = (struct rt_slab *)m;
if (ptr == RT_NULL)
return rt_slab_alloc(m, size);
if (size == 0)
{
rt_slab_free(m, ptr);
return RT_NULL;
}
/*
* Get the original allocation's zone. If the new request winds up
* using the same chunk size we do not have to do anything.
*/
kup = btokup((rt_ubase_t)ptr & ~RT_MM_PAGE_MASK);
if (kup->type == PAGE_TYPE_LARGE)
{
rt_size_t osize;
osize = kup->size << RT_MM_PAGE_BITS;
if ((nptr = rt_slab_alloc(m, size)) == RT_NULL)
return RT_NULL;
rt_memcpy(nptr, ptr, size > osize ? osize : size);
rt_slab_free(m, ptr);
return nptr;
}
else if (kup->type == PAGE_TYPE_SMALL)
{
z = (struct rt_slab_zone *)(((rt_ubase_t)ptr & ~RT_MM_PAGE_MASK) -
kup->size * RT_MM_PAGE_SIZE);
RT_ASSERT(z->z_magic == ZALLOC_SLAB_MAGIC);
zoneindex(&size);
if (z->z_chunksize == size)
return (ptr); /* same chunk */
/*
* Allocate memory for the new request size. Note that zoneindex has
* already adjusted the request size to the appropriate chunk size, which
* should optimize our bcopy(). Then copy and return the new pointer.
*/
if ((nptr = rt_slab_alloc(m, size)) == RT_NULL)
return RT_NULL;
rt_memcpy(nptr, ptr, size > z->z_chunksize ? z->z_chunksize : size);
rt_slab_free(m, ptr);
return nptr;
}
return RT_NULL;
}
RTM_EXPORT(rt_slab_realloc);
/**
* @brief This function will release the previous allocated memory block by rt_slab_alloc.
*
* @note The released memory block is taken back to system heap.
*
* @param m the slab memory management object.
* @param ptr is the address of memory which will be released
*/
void rt_slab_free(rt_slab_t m, void *ptr)
{
struct rt_slab_zone *z;
struct rt_slab_chunk *chunk;
struct rt_slab_memusage *kup;
struct rt_slab *slab = (struct rt_slab *)m;
/* free a RT_NULL pointer */
if (ptr == RT_NULL)
return ;
/* get memory usage */
#if RT_DEBUG_SLAB
{
rt_ubase_t addr = ((rt_ubase_t)ptr & ~RT_MM_PAGE_MASK);
RT_DEBUG_LOG(RT_DEBUG_SLAB,
("free a memory 0x%x and align to 0x%x, kup index %d\n",
(rt_ubase_t)ptr,
(rt_ubase_t)addr,
((rt_ubase_t)(addr) - slab->heap_start) >> RT_MM_PAGE_BITS));
}
#endif /* RT_DEBUG_SLAB */
kup = btokup((rt_ubase_t)ptr & ~RT_MM_PAGE_MASK);
/* release large allocation */
if (kup->type == PAGE_TYPE_LARGE)
{
rt_ubase_t size;
/* clear page counter */
size = kup->size;
kup->size = 0;
/* mem stats */
slab->parent.used -= size * RT_MM_PAGE_SIZE;
RT_DEBUG_LOG(RT_DEBUG_SLAB,
("free large memory block 0x%x, page count %d\n",
(rt_ubase_t)ptr, size));
/* free this page */
rt_slab_page_free(m, ptr, size);
return;
}
/* zone case. get out zone. */
z = (struct rt_slab_zone *)(((rt_ubase_t)ptr & ~RT_MM_PAGE_MASK) -
kup->size * RT_MM_PAGE_SIZE);
RT_ASSERT(z->z_magic == ZALLOC_SLAB_MAGIC);
chunk = (struct rt_slab_chunk *)ptr;
chunk->c_next = z->z_freechunk;
z->z_freechunk = chunk;
/* mem stats */
slab->parent.used -= z->z_chunksize;
/*
* Bump the number of free chunks. If it becomes non-zero the zone
* must be added back onto the appropriate list.
*/
if (z->z_nfree++ == 0)
{
z->z_next = slab->zone_array[z->z_zoneindex];
slab->zone_array[z->z_zoneindex] = z;
}
/*
* If the zone becomes totally free, and there are other zones we
* can allocate from, move this zone to the FreeZones list. Since
* this code can be called from an IPI callback, do *NOT* try to mess
* with kernel_map here. Hysteresis will be performed at malloc() time.
*/
if (z->z_nfree == z->z_nmax &&
(z->z_next || slab->zone_array[z->z_zoneindex] != z))
{
struct rt_slab_zone **pz;
RT_DEBUG_LOG(RT_DEBUG_SLAB, ("free zone 0x%x\n",
(rt_ubase_t)z, z->z_zoneindex));
/* remove zone from zone array list */
for (pz = &slab->zone_array[z->z_zoneindex]; z != *pz; pz = &(*pz)->z_next)
;
*pz = z->z_next;
/* reset zone */
z->z_magic = RT_UINT32_MAX;
/* insert to free zone list */
z->z_next = slab->zone_free;
slab->zone_free = z;
++ slab->zone_free_cnt;
/* release zone to page allocator */
if (slab->zone_free_cnt > ZONE_RELEASE_THRESH)
{
register rt_uint32_t i;
z = slab->zone_free;
slab->zone_free = z->z_next;
-- slab->zone_free_cnt;
/* set message usage */
for (i = 0, kup = btokup(z); i < slab->zone_page_cnt; i ++)
{
kup->type = PAGE_TYPE_FREE;
kup->size = 0;
kup ++;
}
/* release pages */
rt_slab_page_free(m, z, slab->zone_size / RT_MM_PAGE_SIZE);
return;
}
}
}
RTM_EXPORT(rt_slab_free);
#endif /* defined (RT_USING_SLAB) */

1163
src/thread.c Normal file

File diff suppressed because it is too large Load diff

893
src/timer.c Normal file
View file

@ -0,0 +1,893 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2006-03-12 Bernard first version
* 2006-04-29 Bernard implement thread timer
* 2006-06-04 Bernard implement rt_timer_control
* 2006-08-10 Bernard fix the periodic timer bug
* 2006-09-03 Bernard implement rt_timer_detach
* 2009-11-11 LiJin add soft timer
* 2010-05-12 Bernard fix the timer check bug.
* 2010-11-02 Charlie re-implement tick overflow issue
* 2012-12-15 Bernard fix the next timeout issue in soft timer
* 2014-07-12 Bernard does not lock scheduler when invoking soft-timer
* timeout function.
* 2021-08-15 supperthomas add the comment
* 2022-01-07 Gabriel Moving __on_rt_xxxxx_hook to timer.c
* 2022-04-19 Stanley Correct descriptions
*/
#include <rtthread.h>
#include <rthw.h>
/* hard timer list */
static rt_list_t _timer_list[RT_TIMER_SKIP_LIST_LEVEL];
#ifdef RT_USING_TIMER_SOFT
#define RT_SOFT_TIMER_IDLE 1
#define RT_SOFT_TIMER_BUSY 0
#ifndef RT_TIMER_THREAD_STACK_SIZE
#define RT_TIMER_THREAD_STACK_SIZE 512
#endif /* RT_TIMER_THREAD_STACK_SIZE */
#ifndef RT_TIMER_THREAD_PRIO
#define RT_TIMER_THREAD_PRIO 0
#endif /* RT_TIMER_THREAD_PRIO */
/* soft timer status */
static rt_uint8_t _soft_timer_status = RT_SOFT_TIMER_IDLE;
/* soft timer list */
static rt_list_t _soft_timer_list[RT_TIMER_SKIP_LIST_LEVEL];
static struct rt_thread _timer_thread;
rt_align(RT_ALIGN_SIZE)
static rt_uint8_t _timer_thread_stack[RT_TIMER_THREAD_STACK_SIZE];
#endif /* RT_USING_TIMER_SOFT */
#ifndef __on_rt_object_take_hook
#define __on_rt_object_take_hook(parent) __ON_HOOK_ARGS(rt_object_take_hook, (parent))
#endif
#ifndef __on_rt_object_put_hook
#define __on_rt_object_put_hook(parent) __ON_HOOK_ARGS(rt_object_put_hook, (parent))
#endif
#ifndef __on_rt_timer_enter_hook
#define __on_rt_timer_enter_hook(t) __ON_HOOK_ARGS(rt_timer_enter_hook, (t))
#endif
#ifndef __on_rt_timer_exit_hook
#define __on_rt_timer_exit_hook(t) __ON_HOOK_ARGS(rt_timer_exit_hook, (t))
#endif
#if defined(RT_USING_HOOK) && defined(RT_HOOK_USING_FUNC_PTR)
extern void (*rt_object_take_hook)(struct rt_object *object);
extern void (*rt_object_put_hook)(struct rt_object *object);
static void (*rt_timer_enter_hook)(struct rt_timer *timer);
static void (*rt_timer_exit_hook)(struct rt_timer *timer);
/**
* @addtogroup Hook
*/
/**@{*/
/**
* @brief This function will set a hook function on timer,
* which will be invoked when enter timer timeout callback function.
*
* @param hook is the function point of timer
*/
void rt_timer_enter_sethook(void (*hook)(struct rt_timer *timer))
{
rt_timer_enter_hook = hook;
}
/**
* @brief This function will set a hook function, which will be
* invoked when exit timer timeout callback function.
*
* @param hook is the function point of timer
*/
void rt_timer_exit_sethook(void (*hook)(struct rt_timer *timer))
{
rt_timer_exit_hook = hook;
}
/**@}*/
#endif /* RT_USING_HOOK */
/**
* @brief [internal] The init funtion of timer
*
* The internal called function of rt_timer_init
*
* @see rt_timer_init
*
* @param timer is timer object
*
* @param timeout is the timeout function
*
* @param parameter is the parameter of timeout function
*
* @param time is the tick of timer
*
* @param flag the flag of timer
*/
static void _timer_init(rt_timer_t timer,
void (*timeout)(void *parameter),
void *parameter,
rt_tick_t time,
rt_uint8_t flag)
{
int i;
/* set flag */
timer->parent.flag = flag;
/* set deactivated */
timer->parent.flag &= ~RT_TIMER_FLAG_ACTIVATED;
timer->timeout_func = timeout;
timer->parameter = parameter;
timer->timeout_tick = 0;
timer->init_tick = time;
/* initialize timer list */
for (i = 0; i < RT_TIMER_SKIP_LIST_LEVEL; i++)
{
rt_list_init(&(timer->row[i]));
}
}
/**
* @brief Find the next emtpy timer ticks
*
* @param timer_list is the array of time list
*
* @param timeout_tick is the next timer's ticks
*
* @return Return the operation status. If the return value is RT_EOK, the function is successfully executed.
* If the return value is any other values, it means this operation failed.
*/
static rt_err_t _timer_list_next_timeout(rt_list_t timer_list[], rt_tick_t *timeout_tick)
{
struct rt_timer *timer;
rt_base_t level;
/* disable interrupt */
level = rt_hw_interrupt_disable();
if (!rt_list_isempty(&timer_list[RT_TIMER_SKIP_LIST_LEVEL - 1]))
{
timer = rt_list_entry(timer_list[RT_TIMER_SKIP_LIST_LEVEL - 1].next,
struct rt_timer, row[RT_TIMER_SKIP_LIST_LEVEL - 1]);
*timeout_tick = timer->timeout_tick;
/* enable interrupt */
rt_hw_interrupt_enable(level);
return RT_EOK;
}
/* enable interrupt */
rt_hw_interrupt_enable(level);
return -RT_ERROR;
}
/**
* @brief Remove the timer
*
* @param timer the point of the timer
*/
rt_inline void _timer_remove(rt_timer_t timer)
{
int i;
for (i = 0; i < RT_TIMER_SKIP_LIST_LEVEL; i++)
{
rt_list_remove(&timer->row[i]);
}
}
#if RT_DEBUG_TIMER
/**
* @brief The number of timer
*
* @param timer the head of timer
*
* @return count of timer
*/
static int _timer_count_height(struct rt_timer *timer)
{
int i, cnt = 0;
for (i = 0; i < RT_TIMER_SKIP_LIST_LEVEL; i++)
{
if (!rt_list_isempty(&timer->row[i]))
cnt++;
}
return cnt;
}
/**
* @brief dump the all timer information
*
* @param timer_heads the head of timer
*/
void rt_timer_dump(rt_list_t timer_heads[])
{
rt_list_t *list;
for (list = timer_heads[RT_TIMER_SKIP_LIST_LEVEL - 1].next;
list != &timer_heads[RT_TIMER_SKIP_LIST_LEVEL - 1];
list = list->next)
{
struct rt_timer *timer = rt_list_entry(list,
struct rt_timer,
row[RT_TIMER_SKIP_LIST_LEVEL - 1]);
rt_kprintf("%d", _timer_count_height(timer));
}
rt_kprintf("\n");
}
#endif /* RT_DEBUG_TIMER */
/**
* @addtogroup Clock
*/
/**@{*/
/**
* @brief This function will initialize a timer
* normally this function is used to initialize a static timer object.
*
* @param timer is the point of timer
*
* @param name is a pointer to the name of the timer
*
* @param timeout is the callback of timer
*
* @param parameter is the param of the callback
*
* @param time is timeout ticks of timer
*
* NOTE: The max timeout tick should be no more than (RT_TICK_MAX/2 - 1).
*
* @param flag is the flag of timer
*
*/
void rt_timer_init(rt_timer_t timer,
const char *name,
void (*timeout)(void *parameter),
void *parameter,
rt_tick_t time,
rt_uint8_t flag)
{
/* parameter check */
RT_ASSERT(timer != RT_NULL);
RT_ASSERT(timeout != RT_NULL);
RT_ASSERT(time < RT_TICK_MAX / 2);
/* timer object initialization */
rt_object_init(&(timer->parent), RT_Object_Class_Timer, name);
_timer_init(timer, timeout, parameter, time, flag);
}
RTM_EXPORT(rt_timer_init);
/**
* @brief This function will detach a timer from timer management.
*
* @param timer is the timer to be detached
*
* @return the status of detach
*/
rt_err_t rt_timer_detach(rt_timer_t timer)
{
rt_base_t level;
/* parameter check */
RT_ASSERT(timer != RT_NULL);
RT_ASSERT(rt_object_get_type(&timer->parent) == RT_Object_Class_Timer);
RT_ASSERT(rt_object_is_systemobject(&timer->parent));
/* disable interrupt */
level = rt_hw_interrupt_disable();
_timer_remove(timer);
/* stop timer */
timer->parent.flag &= ~RT_TIMER_FLAG_ACTIVATED;
/* enable interrupt */
rt_hw_interrupt_enable(level);
rt_object_detach(&(timer->parent));
return RT_EOK;
}
RTM_EXPORT(rt_timer_detach);
#ifdef RT_USING_HEAP
/**
* @brief This function will create a timer
*
* @param name is the name of timer
*
* @param timeout is the timeout function
*
* @param parameter is the parameter of timeout function
*
* @param time is timeout ticks of the timer
*
* NOTE: The max timeout tick should be no more than (RT_TICK_MAX/2 - 1).
*
* @param flag is the flag of timer. Timer will invoke the timeout function according to the selected values of flag, if one or more of the following flags is set.
*
* RT_TIMER_FLAG_ONE_SHOT One shot timing
* RT_TIMER_FLAG_PERIODIC Periodic timing
*
* RT_TIMER_FLAG_HARD_TIMER Hardware timer
* RT_TIMER_FLAG_SOFT_TIMER Software timer
*
* NOTE:
* You can use multiple values with "|" logical operator. By default, system will use the RT_TIME_FLAG_HARD_TIMER.
*
* @return the created timer object
*/
rt_timer_t rt_timer_create(const char *name,
void (*timeout)(void *parameter),
void *parameter,
rt_tick_t time,
rt_uint8_t flag)
{
struct rt_timer *timer;
/* parameter check */
RT_ASSERT(timeout != RT_NULL);
RT_ASSERT(time < RT_TICK_MAX / 2);
/* allocate a object */
timer = (struct rt_timer *)rt_object_allocate(RT_Object_Class_Timer, name);
if (timer == RT_NULL)
{
return RT_NULL;
}
_timer_init(timer, timeout, parameter, time, flag);
return timer;
}
RTM_EXPORT(rt_timer_create);
/**
* @brief This function will delete a timer and release timer memory
*
* @param timer the timer to be deleted
*
* @return the operation status, RT_EOK on OK; -RT_ERROR on error
*/
rt_err_t rt_timer_delete(rt_timer_t timer)
{
rt_base_t level;
/* parameter check */
RT_ASSERT(timer != RT_NULL);
RT_ASSERT(rt_object_get_type(&timer->parent) == RT_Object_Class_Timer);
RT_ASSERT(rt_object_is_systemobject(&timer->parent) == RT_FALSE);
/* disable interrupt */
level = rt_hw_interrupt_disable();
_timer_remove(timer);
/* stop timer */
timer->parent.flag &= ~RT_TIMER_FLAG_ACTIVATED;
/* enable interrupt */
rt_hw_interrupt_enable(level);
rt_object_delete(&(timer->parent));
return RT_EOK;
}
RTM_EXPORT(rt_timer_delete);
#endif /* RT_USING_HEAP */
/**
* @brief This function will start the timer
*
* @param timer the timer to be started
*
* @return the operation status, RT_EOK on OK, -RT_ERROR on error
*/
rt_err_t rt_timer_start(rt_timer_t timer)
{
unsigned int row_lvl;
rt_list_t *timer_list;
rt_base_t level;
rt_bool_t need_schedule;
rt_list_t *row_head[RT_TIMER_SKIP_LIST_LEVEL];
unsigned int tst_nr;
static unsigned int random_nr;
/* parameter check */
RT_ASSERT(timer != RT_NULL);
RT_ASSERT(rt_object_get_type(&timer->parent) == RT_Object_Class_Timer);
need_schedule = RT_FALSE;
/* stop timer firstly */
level = rt_hw_interrupt_disable();
/* remove timer from list */
_timer_remove(timer);
/* change status of timer */
timer->parent.flag &= ~RT_TIMER_FLAG_ACTIVATED;
RT_OBJECT_HOOK_CALL(rt_object_take_hook, (&(timer->parent)));
timer->timeout_tick = rt_tick_get() + timer->init_tick;
#ifdef RT_USING_TIMER_SOFT
if (timer->parent.flag & RT_TIMER_FLAG_SOFT_TIMER)
{
/* insert timer to soft timer list */
timer_list = _soft_timer_list;
}
else
#endif /* RT_USING_TIMER_SOFT */
{
/* insert timer to system timer list */
timer_list = _timer_list;
}
row_head[0] = &timer_list[0];
for (row_lvl = 0; row_lvl < RT_TIMER_SKIP_LIST_LEVEL; row_lvl++)
{
for (; row_head[row_lvl] != timer_list[row_lvl].prev;
row_head[row_lvl] = row_head[row_lvl]->next)
{
struct rt_timer *t;
rt_list_t *p = row_head[row_lvl]->next;
/* fix up the entry pointer */
t = rt_list_entry(p, struct rt_timer, row[row_lvl]);
/* If we have two timers that timeout at the same time, it's
* preferred that the timer inserted early get called early.
* So insert the new timer to the end the the some-timeout timer
* list.
*/
if ((t->timeout_tick - timer->timeout_tick) == 0)
{
continue;
}
else if ((t->timeout_tick - timer->timeout_tick) < RT_TICK_MAX / 2)
{
break;
}
}
if (row_lvl != RT_TIMER_SKIP_LIST_LEVEL - 1)
row_head[row_lvl + 1] = row_head[row_lvl] + 1;
}
/* Interestingly, this super simple timer insert counter works very very
* well on distributing the list height uniformly. By means of "very very
* well", I mean it beats the randomness of timer->timeout_tick very easily
* (actually, the timeout_tick is not random and easy to be attacked). */
random_nr++;
tst_nr = random_nr;
rt_list_insert_after(row_head[RT_TIMER_SKIP_LIST_LEVEL - 1],
&(timer->row[RT_TIMER_SKIP_LIST_LEVEL - 1]));
for (row_lvl = 2; row_lvl <= RT_TIMER_SKIP_LIST_LEVEL; row_lvl++)
{
if (!(tst_nr & RT_TIMER_SKIP_LIST_MASK))
rt_list_insert_after(row_head[RT_TIMER_SKIP_LIST_LEVEL - row_lvl],
&(timer->row[RT_TIMER_SKIP_LIST_LEVEL - row_lvl]));
else
break;
/* Shift over the bits we have tested. Works well with 1 bit and 2
* bits. */
tst_nr >>= (RT_TIMER_SKIP_LIST_MASK + 1) >> 1;
}
timer->parent.flag |= RT_TIMER_FLAG_ACTIVATED;
#ifdef RT_USING_TIMER_SOFT
if (timer->parent.flag & RT_TIMER_FLAG_SOFT_TIMER)
{
/* check whether timer thread is ready */
if ((_soft_timer_status == RT_SOFT_TIMER_IDLE) &&
((_timer_thread.stat & RT_THREAD_SUSPEND_MASK) == RT_THREAD_SUSPEND_MASK))
{
/* resume timer thread to check soft timer */
rt_thread_resume(&_timer_thread);
need_schedule = RT_TRUE;
}
}
#endif /* RT_USING_TIMER_SOFT */
/* enable interrupt */
rt_hw_interrupt_enable(level);
if (need_schedule)
{
rt_schedule();
}
return RT_EOK;
}
RTM_EXPORT(rt_timer_start);
/**
* @brief This function will stop the timer
*
* @param timer the timer to be stopped
*
* @return the operation status, RT_EOK on OK, -RT_ERROR on error
*/
rt_err_t rt_timer_stop(rt_timer_t timer)
{
rt_base_t level;
/* disable interrupt */
level = rt_hw_interrupt_disable();
/* timer check */
RT_ASSERT(timer != RT_NULL);
RT_ASSERT(rt_object_get_type(&timer->parent) == RT_Object_Class_Timer);
if (!(timer->parent.flag & RT_TIMER_FLAG_ACTIVATED))
{
rt_hw_interrupt_enable(level);
return -RT_ERROR;
}
RT_OBJECT_HOOK_CALL(rt_object_put_hook, (&(timer->parent)));
_timer_remove(timer);
/* change status */
timer->parent.flag &= ~RT_TIMER_FLAG_ACTIVATED;
/* enable interrupt */
rt_hw_interrupt_enable(level);
return RT_EOK;
}
RTM_EXPORT(rt_timer_stop);
/**
* @brief This function will get or set some options of the timer
*
* @param timer the timer to be get or set
* @param cmd the control command
* @param arg the argument
*
* @return the statu of control
*/
rt_err_t rt_timer_control(rt_timer_t timer, int cmd, void *arg)
{
rt_base_t level;
/* parameter check */
RT_ASSERT(timer != RT_NULL);
RT_ASSERT(rt_object_get_type(&timer->parent) == RT_Object_Class_Timer);
level = rt_hw_interrupt_disable();
switch (cmd)
{
case RT_TIMER_CTRL_GET_TIME:
*(rt_tick_t *)arg = timer->init_tick;
break;
case RT_TIMER_CTRL_SET_TIME:
RT_ASSERT((*(rt_tick_t *)arg) < RT_TICK_MAX / 2);
timer->init_tick = *(rt_tick_t *)arg;
break;
case RT_TIMER_CTRL_SET_ONESHOT:
timer->parent.flag &= ~RT_TIMER_FLAG_PERIODIC;
break;
case RT_TIMER_CTRL_SET_PERIODIC:
timer->parent.flag |= RT_TIMER_FLAG_PERIODIC;
break;
case RT_TIMER_CTRL_GET_STATE:
if(timer->parent.flag & RT_TIMER_FLAG_ACTIVATED)
{
/*timer is start and run*/
*(rt_uint32_t *)arg = RT_TIMER_FLAG_ACTIVATED;
}
else
{
/*timer is stop*/
*(rt_uint32_t *)arg = RT_TIMER_FLAG_DEACTIVATED;
}
break;
case RT_TIMER_CTRL_GET_REMAIN_TIME:
*(rt_tick_t *)arg = timer->timeout_tick;
break;
case RT_TIMER_CTRL_GET_FUNC:
*(void **)arg = (void *)timer->timeout_func;
break;
case RT_TIMER_CTRL_SET_FUNC:
timer->timeout_func = (void (*)(void*))arg;
break;
case RT_TIMER_CTRL_GET_PARM:
*(void **)arg = timer->parameter;
break;
case RT_TIMER_CTRL_SET_PARM:
timer->parameter = arg;
break;
default:
break;
}
rt_hw_interrupt_enable(level);
return RT_EOK;
}
RTM_EXPORT(rt_timer_control);
/**
* @brief This function will check timer list, if a timeout event happens,
* the corresponding timeout function will be invoked.
*
* @note This function shall be invoked in operating system timer interrupt.
*/
void rt_timer_check(void)
{
struct rt_timer *t;
rt_tick_t current_tick;
rt_base_t level;
rt_list_t list;
rt_list_init(&list);
RT_DEBUG_LOG(RT_DEBUG_TIMER, ("timer check enter\n"));
current_tick = rt_tick_get();
/* disable interrupt */
level = rt_hw_interrupt_disable();
while (!rt_list_isempty(&_timer_list[RT_TIMER_SKIP_LIST_LEVEL - 1]))
{
t = rt_list_entry(_timer_list[RT_TIMER_SKIP_LIST_LEVEL - 1].next,
struct rt_timer, row[RT_TIMER_SKIP_LIST_LEVEL - 1]);
/*
* It supposes that the new tick shall less than the half duration of
* tick max.
*/
if ((current_tick - t->timeout_tick) < RT_TICK_MAX / 2)
{
RT_OBJECT_HOOK_CALL(rt_timer_enter_hook, (t));
/* remove timer from timer list firstly */
_timer_remove(t);
if (!(t->parent.flag & RT_TIMER_FLAG_PERIODIC))
{
t->parent.flag &= ~RT_TIMER_FLAG_ACTIVATED;
}
/* add timer to temporary list */
rt_list_insert_after(&list, &(t->row[RT_TIMER_SKIP_LIST_LEVEL - 1]));
/* call timeout function */
t->timeout_func(t->parameter);
/* re-get tick */
current_tick = rt_tick_get();
RT_OBJECT_HOOK_CALL(rt_timer_exit_hook, (t));
RT_DEBUG_LOG(RT_DEBUG_TIMER, ("current tick: %d\n", current_tick));
/* Check whether the timer object is detached or started again */
if (rt_list_isempty(&list))
{
continue;
}
rt_list_remove(&(t->row[RT_TIMER_SKIP_LIST_LEVEL - 1]));
if ((t->parent.flag & RT_TIMER_FLAG_PERIODIC) &&
(t->parent.flag & RT_TIMER_FLAG_ACTIVATED))
{
/* start it */
t->parent.flag &= ~RT_TIMER_FLAG_ACTIVATED;
rt_timer_start(t);
}
}
else break;
}
/* enable interrupt */
rt_hw_interrupt_enable(level);
RT_DEBUG_LOG(RT_DEBUG_TIMER, ("timer check leave\n"));
}
/**
* @brief This function will return the next timeout tick in the system.
*
* @return the next timeout tick in the system
*/
rt_tick_t rt_timer_next_timeout_tick(void)
{
rt_tick_t next_timeout = RT_TICK_MAX;
_timer_list_next_timeout(_timer_list, &next_timeout);
return next_timeout;
}
#ifdef RT_USING_TIMER_SOFT
/**
* @brief This function will check software-timer list, if a timeout event happens, the
* corresponding timeout function will be invoked.
*/
void rt_soft_timer_check(void)
{
rt_tick_t current_tick;
struct rt_timer *t;
rt_base_t level;
rt_list_t list;
rt_list_init(&list);
RT_DEBUG_LOG(RT_DEBUG_TIMER, ("software timer check enter\n"));
/* disable interrupt */
level = rt_hw_interrupt_disable();
while (!rt_list_isempty(&_soft_timer_list[RT_TIMER_SKIP_LIST_LEVEL - 1]))
{
t = rt_list_entry(_soft_timer_list[RT_TIMER_SKIP_LIST_LEVEL - 1].next,
struct rt_timer, row[RT_TIMER_SKIP_LIST_LEVEL - 1]);
current_tick = rt_tick_get();
/*
* It supposes that the new tick shall less than the half duration of
* tick max.
*/
if ((current_tick - t->timeout_tick) < RT_TICK_MAX / 2)
{
RT_OBJECT_HOOK_CALL(rt_timer_enter_hook, (t));
/* remove timer from timer list firstly */
_timer_remove(t);
if (!(t->parent.flag & RT_TIMER_FLAG_PERIODIC))
{
t->parent.flag &= ~RT_TIMER_FLAG_ACTIVATED;
}
/* add timer to temporary list */
rt_list_insert_after(&list, &(t->row[RT_TIMER_SKIP_LIST_LEVEL - 1]));
_soft_timer_status = RT_SOFT_TIMER_BUSY;
/* enable interrupt */
rt_hw_interrupt_enable(level);
/* call timeout function */
t->timeout_func(t->parameter);
RT_OBJECT_HOOK_CALL(rt_timer_exit_hook, (t));
RT_DEBUG_LOG(RT_DEBUG_TIMER, ("current tick: %d\n", current_tick));
/* disable interrupt */
level = rt_hw_interrupt_disable();
_soft_timer_status = RT_SOFT_TIMER_IDLE;
/* Check whether the timer object is detached or started again */
if (rt_list_isempty(&list))
{
continue;
}
rt_list_remove(&(t->row[RT_TIMER_SKIP_LIST_LEVEL - 1]));
if ((t->parent.flag & RT_TIMER_FLAG_PERIODIC) &&
(t->parent.flag & RT_TIMER_FLAG_ACTIVATED))
{
/* start it */
t->parent.flag &= ~RT_TIMER_FLAG_ACTIVATED;
rt_timer_start(t);
}
}
else break; /* not check anymore */
}
/* enable interrupt */
rt_hw_interrupt_enable(level);
RT_DEBUG_LOG(RT_DEBUG_TIMER, ("software timer check leave\n"));
}
/**
* @brief System timer thread entry
*
* @param parameter is the arg of the thread
*/
static void _timer_thread_entry(void *parameter)
{
rt_tick_t next_timeout;
while (1)
{
/* get the next timeout tick */
if (_timer_list_next_timeout(_soft_timer_list, &next_timeout) != RT_EOK)
{
/* no software timer exist, suspend self. */
rt_thread_suspend_with_flag(rt_thread_self(), RT_UNINTERRUPTIBLE);
rt_schedule();
}
else
{
rt_tick_t current_tick;
/* get current tick */
current_tick = rt_tick_get();
if ((next_timeout - current_tick) < RT_TICK_MAX / 2)
{
/* get the delta timeout tick */
next_timeout = next_timeout - current_tick;
rt_thread_delay(next_timeout);
}
}
/* check software timer */
rt_soft_timer_check();
}
}
#endif /* RT_USING_TIMER_SOFT */
/**
* @ingroup SystemInit
*
* @brief This function will initialize system timer
*/
void rt_system_timer_init(void)
{
rt_size_t i;
for (i = 0; i < sizeof(_timer_list) / sizeof(_timer_list[0]); i++)
{
rt_list_init(_timer_list + i);
}
}
/**
* @ingroup SystemInit
*
* @brief This function will initialize system timer thread
*/
void rt_system_timer_thread_init(void)
{
#ifdef RT_USING_TIMER_SOFT
int i;
for (i = 0;
i < sizeof(_soft_timer_list) / sizeof(_soft_timer_list[0]);
i++)
{
rt_list_init(_soft_timer_list + i);
}
/* start software timer thread */
rt_thread_init(&_timer_thread,
"timer",
_timer_thread_entry,
RT_NULL,
&_timer_thread_stack[0],
sizeof(_timer_thread_stack),
RT_TIMER_THREAD_PRIO,
10);
/* startup */
rt_thread_startup(&_timer_thread);
#endif /* RT_USING_TIMER_SOFT */
}
/**@}*/