import RT-Thread@9217865c without bsp, libcpu and components/net

This commit is contained in:
Zihao Yu 2023-05-20 16:23:33 +08:00
commit e2376a3709
1414 changed files with 390370 additions and 0 deletions

21
components/mm/SConscript Normal file
View file

@ -0,0 +1,21 @@
import os
from building import *
objs = []
if GetDepend('ARCH_ARM_CORTEX_A') or GetDepend('ARCH_ARMV8') or GetDepend('ARCH_RISCV64'):
cwd = GetCurrentDir()
src = Glob('*.c') + Glob('*_gcc.S')
CPPPATH = [cwd]
group = DefineGroup('mm', src, depend = ['ARCH_MM_MMU'], CPPPATH = CPPPATH)
objs = [group]
list = os.listdir(cwd)
for d in list:
path = os.path.join(cwd, d)
if os.path.isfile(os.path.join(path, 'SConscript')):
objs = objs + SConscript(os.path.join(d, 'SConscript'))
Return('objs')

180
components/mm/avl_adpt.c Normal file
View file

@ -0,0 +1,180 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-11-14 WangXiaoyao the first version
*/
#include <rtdef.h>
#include <avl.h>
#include "avl_adpt.h"
#include "mm_aspace.h"
#include "mm_private.h"
#define DBG_TAG "MM"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
/**
* @brief Adapter Layer for lwp AVL BST
*/
rt_err_t _aspace_bst_init(struct rt_aspace *aspace)
{
aspace->tree.tree.root_node = AVL_ROOT;
return RT_EOK;
}
static int compare_overlap(void *as, void *ae, void *bs, void *be)
{
LOG_D("as %lx, ae %lx, bs %lx, be %lx", as, ae, bs, be);
int cmp;
if (as > be)
{
cmp = 1;
}
else if (ae < bs)
{
cmp = -1;
}
else
{
cmp = 0;
}
LOG_D("ret %d", cmp);
return cmp;
}
static int compare_exceed(void *as, void *ae, void *bs, void *be)
{
LOG_D("as %lx, ae %lx, bs %lx, be %lx", as, ae, bs, be);
int cmp;
if (as > bs)
{
cmp = 1;
}
else if (as < bs)
{
cmp = -1;
}
else
{
cmp = 0;
}
LOG_D("ret %d", cmp);
return cmp;
}
static struct rt_varea *search(struct util_avl_root *root,
struct _mm_range range,
int (*compare)(void *as, void *ae, void *bs,
void *be))
{
struct util_avl_struct *node = root->root_node;
while (node)
{
rt_varea_t varea = VAREA_ENTRY(node);
int cmp = compare(range.start, range.end, varea->start,
(char *)varea->start + varea->size - 1);
if (cmp < 0)
{
node = node->avl_left;
}
else if (cmp > 0)
{
node = node->avl_right;
}
else
{
return varea;
}
}
return NULL;
}
struct rt_varea *_aspace_bst_search(struct rt_aspace *aspace, void *key)
{
struct util_avl_root *root = &aspace->tree.tree;
struct _mm_range range = {key, key};
return search(root, range, compare_overlap);
}
rt_varea_t _aspace_bst_search_exceed(struct rt_aspace *aspace, void *start)
{
struct util_avl_root *root = &aspace->tree.tree;
struct util_avl_struct *node = root->root_node;
rt_varea_t closest = NULL;
ptrdiff_t min_off = PTRDIFF_MAX;
while (node)
{
rt_varea_t varea = VAREA_ENTRY(node);
void *va_s = varea->start;
int cmp = compare_exceed(start, start, va_s, va_s);
if (cmp < 0)
{
/* varae exceed start */
ptrdiff_t off = (char *)va_s - (char *)start;
if (off < min_off)
{
min_off = off;
closest = varea;
}
node = node->avl_left;
}
else if (cmp > 0)
{
/* find the next huger varea */
node = node->avl_right;
}
else
{
return varea;
}
}
return closest;
}
struct rt_varea *_aspace_bst_search_overlap(struct rt_aspace *aspace,
struct _mm_range range)
{
struct util_avl_root *root = &aspace->tree.tree;
return search(root, range, compare_overlap);
}
void _aspace_bst_insert(struct rt_aspace *aspace, struct rt_varea *varea)
{
struct util_avl_root *root = &aspace->tree.tree;
struct util_avl_struct *current = NULL;
struct util_avl_struct **next = &(root->root_node);
rt_ubase_t key = (rt_ubase_t)varea->start;
/* Figure out where to put new node */
while (*next)
{
current = *next;
struct rt_varea *data = VAREA_ENTRY(current);
if (key < (rt_ubase_t)data->start)
next = &(current->avl_left);
else if (key > (rt_ubase_t)data->start)
next = &(current->avl_right);
else
return;
}
/* Add new node and rebalance tree. */
util_avl_link(&varea->node.node, current, next);
util_avl_rebalance(current, root);
return;
}
void _aspace_bst_remove(struct rt_aspace *aspace, struct rt_varea *varea)
{
struct util_avl_struct *node = &varea->node.node;
util_avl_remove(node, &aspace->tree.tree);
}

40
components/mm/avl_adpt.h Normal file
View file

@ -0,0 +1,40 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-11-14 WangXiaoyao the first version
*/
#ifndef __MM_AVL_ADPT_H__
#define __MM_AVL_ADPT_H__
#include <avl.h>
#include <rtdef.h>
#include <rtthread.h>
#include <stdint.h>
#define VAREA_ENTRY(pnode) \
(pnode) \
? rt_container_of(rt_container_of(pnode, struct _aspace_node, node), \
struct rt_varea, node) \
: 0
#define ASPACE_VAREA_NEXT(pva) (VAREA_ENTRY(util_avl_next(&pva->node.node)))
#define ASPACE_VAREA_FIRST(aspace) (VAREA_ENTRY(util_avl_first(&aspace->tree.tree)))
#define ASPACE_VAREA_LAST(aspace) (VAREA_ENTRY(util_avl_last(&aspace->tree.tree)))
#define ASPACE_VAREA_PREV(pva) (VAREA_ENTRY(util_avl_prev(&pva->node.node)))
typedef struct _aspace_node
{
struct util_avl_struct node;
} *_aspace_node_t;
typedef struct _aspace_tree
{
struct util_avl_root tree;
} *_aspace_tree_t;
#endif /* __MM_AVL_ADPT_H__ */

123
components/mm/ioremap.c Normal file
View file

@ -0,0 +1,123 @@
/*
* Copyright (c) 2006-2018, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-05-06 Jesven first version
*/
#include <rthw.h>
#include <rtthread.h>
#include <mmu.h>
#include <mm_aspace.h>
#include <ioremap.h>
void *rt_ioremap_start;
size_t rt_ioremap_size;
#ifdef RT_USING_SMART
#include <lwp_mm.h>
#define DBG_TAG "mm.ioremap"
#define DBG_LVL DBG_LOG
#include <rtdbg.h>
enum ioremap_type
{
MM_AREA_TYPE_PHY,
MM_AREA_TYPE_PHY_WT,
MM_AREA_TYPE_PHY_CACHED
};
static void *_ioremap_type(void *paddr, size_t size, enum ioremap_type type)
{
char *v_addr = NULL;
size_t attr;
size_t lo_off;
int err;
lo_off = (rt_ubase_t)paddr & ARCH_PAGE_MASK;
struct rt_mm_va_hint hint = {
.prefer = RT_NULL,
.map_size = RT_ALIGN(size + lo_off, ARCH_PAGE_SIZE),
.flags = 0,
.limit_start = rt_ioremap_start,
.limit_range_size = rt_ioremap_size,
};
switch (type)
{
case MM_AREA_TYPE_PHY:
attr = MMU_MAP_K_DEVICE;
break;
case MM_AREA_TYPE_PHY_WT:
attr = MMU_MAP_K_RW;
break;
case MM_AREA_TYPE_PHY_CACHED:
attr = MMU_MAP_K_RWCB;
break;
default:
return v_addr;
}
err = rt_aspace_map_phy(&rt_kernel_space, &hint, attr, MM_PA_TO_OFF(paddr), (void **)&v_addr);
if (err)
{
LOG_W("IOREMAP 0x%lx failed %d\n", paddr, err);
v_addr = NULL;
}
else
{
v_addr = v_addr + lo_off;
}
return v_addr;
}
void *rt_ioremap(void *paddr, size_t size)
{
return _ioremap_type(paddr, size, MM_AREA_TYPE_PHY);
}
void *rt_ioremap_nocache(void *paddr, size_t size)
{
return _ioremap_type(paddr, size, MM_AREA_TYPE_PHY);
}
void *rt_ioremap_wt(void *paddr, size_t size)
{
return _ioremap_type(paddr, size, MM_AREA_TYPE_PHY_WT);
}
void *rt_ioremap_cached(void *paddr, size_t size)
{
return _ioremap_type(paddr, size, MM_AREA_TYPE_PHY_CACHED);
}
void rt_iounmap(volatile void *vaddr)
{
rt_aspace_unmap(&rt_kernel_space, (void *)vaddr);
}
#else
void *rt_ioremap(void *paddr, size_t size)
{
return paddr;
}
void *rt_ioremap_nocache(void *paddr, size_t size)
{
return paddr;
}
void *rt_ioremap_cached(void *paddr, size_t size)
{
return paddr;
}
void rt_iounmap(volatile void *vaddr)
{
}
#endif

46
components/mm/ioremap.h Normal file
View file

@ -0,0 +1,46 @@
/*
* Copyright (c) 2006-2020, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-05-06 Jesven first version
*/
#ifndef __IOREMAP_H__
#define __IOREMAP_H__
#include <stddef.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* IOREMAP family
* `rt_ioremap` default to map physical memory in MMIO region as DEVICE memory
* to kernel space. And there are 3 variants currently supported.
*
* name | attribution
* ------------------ | -----------
* rt_ioremap_nocache | Device (MMU_MAP_K_DEVICE)
* rt_ioremap_cache | Normal memory (MMU_MAP_K_RWCB)
* rt_ioremap_wt | Normal memory but guarantee that
* | Each write access should go to system memory directly
* | Currently as non-cacheable
*/
void *rt_ioremap(void *paddr, size_t size);
void *rt_ioremap_nocache(void *paddr, size_t size);
void *rt_ioremap_cached(void *paddr, size_t size);
void *rt_ioremap_wt(void *paddr, size_t size);
void rt_iounmap(volatile void *addr);
extern void *rt_ioremap_start;
extern size_t rt_ioremap_size;
#ifdef __cplusplus
}
#endif
#endif /*__LWP_IOREMAP_H__*/

950
components/mm/mm_aspace.c Normal file
View file

@ -0,0 +1,950 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-11-14 WangXiaoyao the first version
*/
/**
* @brief Virtual Address Space
*/
#include <rtdef.h>
#include <rtthread.h>
#include <stddef.h>
#include <stdint.h>
#define DBG_TAG "mm.aspace"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
#include "avl_adpt.h"
#include "mm_aspace.h"
#include "mm_fault.h"
#include "mm_flag.h"
#include "mm_page.h"
#include "mm_private.h"
#include <mmu.h>
#include <tlb.h>
static void *_find_free(rt_aspace_t aspace, void *prefer, rt_size_t req_size,
void *limit_start, rt_size_t limit_size,
mm_flag_t flags);
static void _varea_uninstall(rt_varea_t varea);
struct rt_aspace rt_kernel_space;
static int _init_lock(rt_aspace_t aspace)
{
int err;
MM_PGTBL_LOCK_INIT(aspace);
err = rt_mutex_init(&aspace->bst_lock, "aspace", RT_IPC_FLAG_FIFO);
return err;
}
rt_err_t rt_aspace_init(rt_aspace_t aspace, void *start, rt_size_t length, void *pgtbl)
{
int err = RT_EOK;
if (pgtbl)
{
aspace->page_table = pgtbl;
aspace->start = start;
aspace->size = length;
err = _aspace_bst_init(aspace);
if (err == RT_EOK)
{
/**
* It has the side effect that lock will be added to object
* system management. So it must be paired with a detach once
* the initialization return successfully.
*/
err = _init_lock(aspace);
}
}
else
{
err = -RT_EINVAL;
}
return err;
}
rt_aspace_t rt_aspace_create(void *start, rt_size_t length, void *pgtbl)
{
rt_aspace_t aspace = NULL;
int err;
RT_ASSERT(length <= 0 - (rt_size_t)start);
aspace = (rt_aspace_t)rt_malloc(sizeof(*aspace));
if (aspace)
{
rt_memset(aspace, 0, sizeof(*aspace));
err = rt_aspace_init(aspace, start, length, pgtbl);
if (err != RT_EOK)
{
LOG_W("%s(%p, %lx, %p): failed with code %d\n", __func__,
start, length, pgtbl, err);
rt_free(aspace);
aspace = RT_NULL;
}
}
return aspace;
}
void rt_aspace_detach(rt_aspace_t aspace)
{
WR_LOCK(aspace);
rt_varea_t varea = ASPACE_VAREA_FIRST(aspace);
while (varea)
{
rt_varea_t prev = varea;
_varea_uninstall(varea);
varea = ASPACE_VAREA_NEXT(varea);
if (!(prev->flag & MMF_STATIC_ALLOC))
{
rt_free(prev);
}
}
WR_UNLOCK(aspace);
rt_mutex_detach(&aspace->bst_lock);
}
void rt_aspace_delete(rt_aspace_t aspace)
{
RT_ASSERT(aspace);
rt_aspace_detach(aspace);
rt_free(aspace);
}
static int _do_named_map(rt_aspace_t aspace, void *vaddr, rt_size_t length,
rt_size_t offset, rt_size_t attr)
{
LOG_D("%s: va %p length %p", __func__, vaddr, length);
int err = RT_EOK;
/* it's ensured by caller that (void*)end will not overflow */
void *phyaddr = (void *)(offset << MM_PAGE_SHIFT);
void *ret = rt_hw_mmu_map(aspace, vaddr, phyaddr, length, attr);
if (ret == RT_NULL)
{
err = -RT_ERROR;
}
if (err == RT_EOK)
rt_hw_tlb_invalidate_range(aspace, vaddr, length, ARCH_PAGE_SIZE);
return err;
}
rt_inline void _do_page_fault(struct rt_aspace_fault_msg *msg, rt_size_t off,
void *vaddr, rt_mem_obj_t mem_obj,
rt_varea_t varea)
{
msg->off = off;
msg->fault_vaddr = vaddr;
msg->fault_op = MM_FAULT_OP_READ;
msg->fault_type = MM_FAULT_TYPE_PAGE_FAULT;
msg->response.status = MM_FAULT_STATUS_UNRECOVERABLE;
msg->response.vaddr = 0;
msg->response.size = 0;
mem_obj->on_page_fault(varea, msg);
}
int _varea_map_with_msg(rt_varea_t varea, struct rt_aspace_fault_msg *msg)
{
int err = -RT_ERROR;
if (msg->response.status == MM_FAULT_STATUS_OK)
{
/**
* the page returned by handler is not checked
* cause no much assumption can make on it
*/
char *store = msg->response.vaddr;
rt_size_t store_sz = msg->response.size;
if ((char *)msg->fault_vaddr + store_sz > (char *)varea->start + varea->size)
{
LOG_W("%s: too much (0x%lx) of buffer on vaddr %p is provided",
__func__, store_sz, msg->fault_vaddr);
}
else
{
void *map;
void *v_addr = msg->fault_vaddr;
void *p_addr = store + PV_OFFSET;
map = rt_hw_mmu_map(varea->aspace, v_addr, p_addr, store_sz, varea->attr);
if (!map)
{
LOG_W("%s: MMU mapping failed for va %p to %p of %lx", __func__,
msg->fault_vaddr, store + PV_OFFSET, store_sz);
}
else
{
rt_hw_tlb_invalidate_range(varea->aspace, v_addr, store_sz, ARCH_PAGE_SIZE);
err = RT_EOK;
}
}
}
else if (msg->response.status == MM_FAULT_STATUS_OK_MAPPED)
{
if (rt_hw_mmu_v2p(varea->aspace, msg->fault_vaddr) == ARCH_MAP_FAILED)
{
LOG_W("%s: no page is mapped on %p", __func__, msg->fault_vaddr);
}
else
{
err = RT_EOK;
}
}
else
{
LOG_W("%s: failed on va %p inside varea %p(%s)", __func__, msg->fault_vaddr, varea,
varea->mem_obj->get_name ? varea->mem_obj->get_name(varea) : "unknow");
}
return err;
}
/* allocate memory page for mapping range */
static int _do_prefetch(rt_aspace_t aspace, rt_varea_t varea, void *start,
rt_size_t size)
{
int err = RT_EOK;
/* it's ensured by caller that start & size ara page-aligned */
char *end = (char *)start + size;
char *vaddr = start;
rt_size_t off = varea->offset + ((vaddr - (char *)varea->start) >> ARCH_PAGE_SHIFT);
while (vaddr != end)
{
/* TODO try to map with huge TLB, when flag & HUGEPAGE */
struct rt_aspace_fault_msg msg;
_do_page_fault(&msg, off, vaddr, varea->mem_obj, varea);
if (_varea_map_with_msg(varea, &msg))
{
err = -RT_ENOMEM;
break;
}
/**
* It's hard to identify the mapping pattern on a customized handler
* So we terminate the prefetch process on that case
*/
if (msg.response.status == MM_FAULT_STATUS_OK_MAPPED)
break;
vaddr += msg.response.size;
off += msg.response.size >> ARCH_PAGE_SHIFT;
}
return err;
}
/* caller must hold the aspace lock */
static int _varea_install(rt_aspace_t aspace, rt_varea_t varea, rt_mm_va_hint_t hint)
{
void *alloc_va;
int err = RT_EOK;
/**
* find a suitable va range.
* even though this is sleepable, it's still ok for startup routine
*/
alloc_va =
_find_free(aspace, hint->prefer, hint->map_size, hint->limit_start,
hint->limit_range_size, hint->flags);
/* TODO try merge surrounding regions to optimize memory footprint */
if (alloc_va != RT_NULL)
{
varea->start = alloc_va;
_aspace_bst_insert(aspace, varea);
}
else
{
err = -RT_ENOSPC;
}
return err;
}
static inline void _varea_post_install(rt_varea_t varea, rt_aspace_t aspace,
rt_size_t attr, rt_size_t flags,
rt_mem_obj_t mem_obj, rt_size_t offset)
{
varea->aspace = aspace;
varea->attr = attr;
varea->mem_obj = mem_obj;
varea->flag = flags;
varea->offset = offset;
varea->frames = NULL;
if (varea->mem_obj && varea->mem_obj->on_varea_open)
varea->mem_obj->on_varea_open(varea);
}
/**
* restore context modified by varea install
* caller must NOT hold the aspace lock
*/
static void _varea_uninstall(rt_varea_t varea)
{
rt_aspace_t aspace = varea->aspace;
if (varea->mem_obj && varea->mem_obj->on_varea_close)
varea->mem_obj->on_varea_close(varea);
rt_hw_mmu_unmap(aspace, varea->start, varea->size);
rt_hw_tlb_invalidate_range(aspace, varea->start, varea->size, ARCH_PAGE_SIZE);
rt_varea_pgmgr_pop_all(varea);
WR_LOCK(aspace);
_aspace_bst_remove(aspace, varea);
WR_UNLOCK(aspace);
}
static int _mm_aspace_map(rt_aspace_t aspace, rt_varea_t varea, rt_size_t attr,
mm_flag_t flags, rt_mem_obj_t mem_obj,
rt_size_t offset)
{
int err = RT_EOK;
WR_LOCK(aspace);
/**
* @brief .prefer & .map_size are scratched from varea which setup by caller
* .limit_start & .limit_range_size have default to be in range of aspace
* .flags is from parameter, and will be fill in varea if install successfully
*/
struct rt_mm_va_hint hint = {.prefer = varea->start,
.map_size = varea->size,
.limit_start = aspace->start,
.limit_range_size = aspace->size,
.flags = flags};
if (mem_obj->hint_free)
{
/* mem object can control mapping range and so by modifing hint */
mem_obj->hint_free(&hint);
}
/* try to allocate a virtual address region for varea */
err = _varea_install(aspace, varea, &hint);
WR_UNLOCK(aspace);
if (err == RT_EOK)
{
/* fill in varea data */
_varea_post_install(varea, aspace, attr, flags, mem_obj, offset);
if (MMF_TEST_CNTL(flags, MMF_PREFETCH))
{
/* do the MMU & TLB business */
err = _do_prefetch(aspace, varea, varea->start, varea->size);
if (err)
{
/* restore data structure and MMU */
_varea_uninstall(varea);
}
}
}
return err;
}
rt_varea_t _varea_create(void *start, rt_size_t size)
{
rt_varea_t varea;
varea = (rt_varea_t)rt_malloc(sizeof(struct rt_varea));
if (varea)
{
varea->start = start;
varea->size = size;
}
return varea;
}
#define _IS_OVERFLOW(start, length) ((length) > (0ul - (uintptr_t)(start)))
#define _IS_OVERSIZE(start, length, limit_s, limit_sz) (((length) + (rt_size_t)((char *)(start) - (char *)(limit_start))) > (limit_size))
static inline int _not_in_range(void *start, rt_size_t length,
void *limit_start, rt_size_t limit_size)
{
if (start != RT_NULL)
LOG_D("%s: [%p : %p] [%p : %p]", __func__, start, length, limit_start, limit_size);
/* assuming (base + length) will not overflow except (0) */
return start != RT_NULL
? (_IS_OVERFLOW(start, length) || start < limit_start ||
_IS_OVERSIZE(start, length, limit_start, limit_size))
: length > limit_size;
}
static inline int _not_align(void *start, rt_size_t length, rt_size_t mask)
{
return (start != RT_NULL) &&
(((uintptr_t)start & mask) || (length & mask));
}
static inline int _not_support(rt_size_t flags)
{
rt_size_t support_ops = (MMF_PREFETCH | MMF_MAP_FIXED | MMF_TEXT |
MMF_STATIC_ALLOC | MMF_REQUEST_ALIGN);
return flags & ~(support_ops | _MMF_ALIGN_MASK);
}
int rt_aspace_map(rt_aspace_t aspace, void **addr, rt_size_t length,
rt_size_t attr, mm_flag_t flags, rt_mem_obj_t mem_obj,
rt_size_t offset)
{
/* TODO check not in atomic context: irq, spinlock, local intr disable... */
int err;
rt_varea_t varea;
if (!aspace || !addr || !mem_obj || length == 0)
{
err = -RT_EINVAL;
LOG_I("%s(%p, %p, %lx, %lx, %lx, %p, %lx): Invalid input",
__func__, aspace, addr, length, attr, flags, mem_obj, offset);
}
else if (_not_in_range(*addr, length, aspace->start, aspace->size))
{
err = -RT_EINVAL;
LOG_I("%s(addr:%p, len:%lx): out of range", __func__, *addr, length);
}
else if (_not_support(flags))
{
LOG_I("%s: no support flags 0x%lx", __func__, flags);
err = -RT_ENOSYS;
}
else
{
/* allocate the varea and fill in start and size */
varea = _varea_create(*addr, length);
if (varea)
{
err = _mm_aspace_map(aspace, varea, attr, flags, mem_obj, offset);
if (err != RT_EOK)
{
rt_free(varea);
}
else
{
*addr = varea->start;
}
}
else
{
LOG_W("%s: memory allocation failed", __func__);
err = -RT_ENOMEM;
}
}
if (err != RT_EOK)
{
*addr = NULL;
}
return err;
}
int rt_aspace_map_static(rt_aspace_t aspace, rt_varea_t varea, void **addr,
rt_size_t length, rt_size_t attr, mm_flag_t flags,
rt_mem_obj_t mem_obj, rt_size_t offset)
{
int err;
if (!aspace || !varea || !addr || !mem_obj || length == 0 ||
_not_in_range(*addr, length, aspace->start, aspace->size))
{
err = -RT_EINVAL;
LOG_W("%s: Invalid input", __func__);
}
else if (_not_support(flags))
{
LOG_W("%s: no support flags", __func__);
err = -RT_ENOSYS;
}
else
{
varea->size = length;
varea->start = *addr;
flags |= MMF_STATIC_ALLOC;
err = _mm_aspace_map(aspace, varea, attr, flags, mem_obj, offset);
}
if (err != RT_EOK)
{
*addr = NULL;
}
else
{
*addr = varea->start;
}
return err;
}
int _mm_aspace_map_phy(rt_aspace_t aspace, rt_varea_t varea,
rt_mm_va_hint_t hint, rt_size_t attr, rt_size_t pa_off,
void **ret_va)
{
int err;
void *vaddr;
if (!aspace || !hint || !hint->limit_range_size || !hint->map_size)
{
LOG_W("%s: Invalid input", __func__);
err = -RT_EINVAL;
}
else if (_not_align(hint->prefer, hint->map_size, ARCH_PAGE_MASK))
{
LOG_W("%s: not aligned", __func__);
err = -RT_EINVAL;
}
else if (_not_in_range(hint->limit_start, hint->limit_range_size, aspace->start,
aspace->size) ||
_not_in_range(hint->prefer, hint->map_size, aspace->start,
aspace->size))
{
LOG_W("%s: not in range", __func__);
err = -RT_EINVAL;
}
else
{
WR_LOCK(aspace);
err = _varea_install(aspace, varea, hint);
WR_UNLOCK(aspace);
if (err == RT_EOK)
{
_varea_post_install(varea, aspace, attr, hint->flags, NULL, pa_off);
vaddr = varea->start;
err = _do_named_map(aspace, varea->start, varea->size,
(rt_size_t)pa_off, attr);
if (err != RT_EOK)
{
_varea_uninstall(varea);
}
}
}
if (ret_va)
{
if (err == RT_EOK)
*ret_va = vaddr;
else
*ret_va = RT_NULL;
}
return err;
}
int rt_aspace_map_phy(rt_aspace_t aspace, rt_mm_va_hint_t hint, rt_size_t attr,
rt_size_t pa_off, void **ret_va)
{
int err;
if (hint)
{
rt_varea_t varea = _varea_create(hint->prefer, hint->map_size);
if (varea)
{
err = _mm_aspace_map_phy(aspace, varea, hint, attr, pa_off, ret_va);
if (err != RT_EOK)
{
rt_free(varea);
}
}
else
{
err = -RT_ENOMEM;
}
}
else
{
err = -RT_EINVAL;
}
return err;
}
int rt_aspace_map_phy_static(rt_aspace_t aspace, rt_varea_t varea,
rt_mm_va_hint_t hint, rt_size_t attr,
rt_size_t pa_off, void **ret_va)
{
int err;
if (varea && hint)
{
varea->start = hint->prefer;
varea->size = hint->map_size;
hint->flags |= (MMF_MAP_FIXED | MMF_STATIC_ALLOC);
LOG_D("%s: start %p size %p phy at %p", __func__, varea->start, varea->size, pa_off << MM_PAGE_SHIFT);
err = _mm_aspace_map_phy(aspace, varea, hint, attr, pa_off, ret_va);
}
else
{
err = -RT_EINVAL;
}
return err;
}
void _aspace_unmap(rt_aspace_t aspace, void *addr)
{
WR_LOCK(aspace);
rt_varea_t varea = _aspace_bst_search(aspace, addr);
WR_UNLOCK(aspace);
if (varea == RT_NULL)
{
LOG_I("%s: No such entry found at %p\n", __func__, addr);
}
_varea_uninstall(varea);
if (!(varea->flag & MMF_STATIC_ALLOC))
{
rt_free(varea);
}
}
int rt_aspace_unmap(rt_aspace_t aspace, void *addr)
{
if (!aspace)
{
LOG_I("%s: Invalid input", __func__);
return -RT_EINVAL;
}
if (_not_in_range(addr, 1, aspace->start, aspace->size))
{
LOG_I("%s: %lx not in range of aspace[%lx:%lx]", __func__, addr,
aspace->start, (char *)aspace->start + aspace->size);
return -RT_EINVAL;
}
_aspace_unmap(aspace, addr);
return RT_EOK;
}
static inline void *_lower(void *a, void *b)
{
return a < b ? a : b;
}
static inline void *_align(void *va, rt_ubase_t align_mask)
{
return (void *)((rt_ubase_t)((char *)va + ~align_mask) & align_mask);
}
static void *_ascending_search(rt_varea_t varea, rt_size_t req_size,
rt_ubase_t align_mask, struct _mm_range limit)
{
void *ret = RT_NULL;
while (varea && varea->start < limit.end)
{
char *candidate = (char *)varea->start + varea->size;
candidate = _align(candidate, align_mask);
if (candidate > (char *)limit.end || (char *)limit.end - candidate + 1 < req_size)
break;
rt_varea_t nx_va = ASPACE_VAREA_NEXT(varea);
if (nx_va)
{
rt_size_t gap_size =
(char *)_lower(limit.end, (char *)nx_va->start - 1) - candidate + 1;
if (gap_size >= req_size)
{
ret = candidate;
break;
}
}
else
{
ret = candidate;
}
varea = nx_va;
}
return ret;
}
/** find suitable place in [limit_start, limit_end] */
static void *_find_head_and_asc_search(rt_aspace_t aspace, rt_size_t req_size,
rt_ubase_t align_mask,
struct _mm_range limit)
{
void *va = RT_NULL;
rt_varea_t varea = _aspace_bst_search_exceed(aspace, limit.start);
if (varea)
{
char *candidate = _align(limit.start, align_mask);
rt_size_t gap_size = (char *)varea->start - candidate;
if (gap_size >= req_size)
{
rt_varea_t former = _aspace_bst_search(aspace, limit.start);
if (former)
{
candidate = _align((char *)former->start + former->size, align_mask);
gap_size = (char *)varea->start - candidate;
if (gap_size >= req_size)
va = candidate;
else
va = _ascending_search(varea, req_size, align_mask, limit);
}
else
{
va = candidate;
}
}
else
{
va = _ascending_search(varea, req_size, align_mask, limit);
}
}
else
{
char *candidate;
rt_size_t gap_size;
candidate = limit.start;
candidate = _align(candidate, align_mask);
gap_size = (char *)limit.end - candidate + 1;
if (gap_size >= req_size)
va = candidate;
}
return va;
}
static void *_find_free(rt_aspace_t aspace, void *prefer, rt_size_t req_size,
void *limit_start, rt_size_t limit_size,
mm_flag_t flags)
{
rt_varea_t varea = NULL;
void *va = RT_NULL;
struct _mm_range limit = {limit_start, (char *)limit_start + limit_size - 1};
rt_ubase_t align_mask = ~0ul;
if (flags & MMF_REQUEST_ALIGN)
{
align_mask = ~((1 << MMF_GET_ALIGN(flags)) - 1);
}
if (prefer != RT_NULL)
{
/* if prefer and free, just return the prefer region */
prefer = _align(prefer, align_mask);
struct _mm_range range = {prefer, (char *)prefer + req_size - 1};
varea = _aspace_bst_search_overlap(aspace, range);
if (!varea)
{
va = prefer;
}
else if (flags & MMF_MAP_FIXED)
{
/* OVERLAP */
}
else
{
/* search from `varea` in ascending order */
va = _ascending_search(varea, req_size, align_mask, limit);
if (va == RT_NULL)
{
/* rewind to first range */
limit.end = (char *)varea->start - 1;
va = _find_head_and_asc_search(aspace, req_size, align_mask,
limit);
}
}
}
else
{
va = _find_head_and_asc_search(aspace, req_size, align_mask, limit);
}
return va;
}
int rt_aspace_load_page(rt_aspace_t aspace, void *addr, rt_size_t npage)
{
int err = RT_EOK;
rt_varea_t varea;
char *end = (char *)addr + (npage << ARCH_PAGE_SHIFT);
WR_LOCK(aspace);
varea = _aspace_bst_search(aspace, addr);
WR_UNLOCK(aspace);
if (!varea)
{
LOG_W("%s: varea not exist", __func__);
err = -RT_ENOENT;
}
else if ((char *)addr >= end || (rt_size_t)addr & ARCH_PAGE_MASK ||
_not_in_range(addr, npage << ARCH_PAGE_SHIFT, varea->start,
varea->size))
{
LOG_W("%s: Unaligned parameter or out of range", __func__);
err = -RT_EINVAL;
}
else
{
err = _do_prefetch(aspace, varea, addr, npage << ARCH_PAGE_SHIFT);
}
return err;
}
int rt_varea_map_page(rt_varea_t varea, void *vaddr, void *page)
{
int err = RT_EOK;
void *page_pa = rt_kmem_v2p(page);
if (!varea || !vaddr || !page)
{
LOG_W("%s(%p,%p,%p): invalid input", __func__, varea, vaddr, page);
err = -RT_EINVAL;
}
else if (page_pa == ARCH_MAP_FAILED)
{
LOG_W("%s: page is not in kernel space", __func__);
err = -RT_ERROR;
}
else if (_not_in_range(vaddr, ARCH_PAGE_SIZE, varea->start, varea->size))
{
LOG_W("%s(%p,%lx): not in range of varea(%p,%lx)", __func__,
vaddr, ARCH_PAGE_SIZE, varea->start, varea->size);
err = -RT_EINVAL;
}
else
{
err = _do_named_map(
varea->aspace,
vaddr,
ARCH_PAGE_SIZE,
MM_PA_TO_OFF(page_pa),
varea->attr
);
}
return err;
}
#define ALIGNED(addr) (!((rt_size_t)(addr) & ARCH_PAGE_MASK))
int rt_varea_map_range(rt_varea_t varea, void *vaddr, void *paddr, rt_size_t length)
{
int err;
if (!varea || !vaddr || !paddr || !length ||
!ALIGNED(vaddr) || !ALIGNED(paddr) || !(ALIGNED(length)))
{
LOG_W("%s(%p,%p,%p,%lx): invalid input", __func__, varea, vaddr, paddr, length);
err = -RT_EINVAL;
}
else if (_not_in_range(vaddr, length, varea->start, varea->size))
{
LOG_W("%s(%p,%lx): not in range of varea(%p,%lx)", __func__,
vaddr, length, varea->start, varea->size);
err = -RT_EINVAL;
}
else
{
err = _do_named_map(
varea->aspace,
vaddr,
length,
MM_PA_TO_OFF(paddr),
varea->attr
);
}
return err;
}
int rt_aspace_offload_page(rt_aspace_t aspace, void *addr, rt_size_t npage)
{
return -RT_ENOSYS;
}
int rt_aspace_control(rt_aspace_t aspace, void *addr, enum rt_mmu_cntl cmd)
{
int err;
rt_varea_t varea;
WR_LOCK(aspace);
varea = _aspace_bst_search(aspace, addr);
WR_UNLOCK(aspace);
if (varea)
{
err = rt_hw_mmu_control(aspace, varea->start, varea->size, cmd);
if (err == RT_EOK)
{
rt_hw_tlb_invalidate_range(aspace, varea->start, varea->size, ARCH_PAGE_SIZE);
}
}
else
{
err = -RT_ENOENT;
}
return err;
}
int rt_aspace_traversal(rt_aspace_t aspace,
int (*fn)(rt_varea_t varea, void *arg), void *arg)
{
rt_varea_t varea;
WR_LOCK(aspace);
varea = ASPACE_VAREA_FIRST(aspace);
while (varea)
{
fn(varea, arg);
varea = ASPACE_VAREA_NEXT(varea);
}
WR_UNLOCK(aspace);
return 0;
}
static int _dump(rt_varea_t varea, void *arg)
{
if (varea->mem_obj && varea->mem_obj->get_name)
{
rt_kprintf("[%p - %p] %s\n", varea->start, (char *)varea->start + varea->size,
varea->mem_obj->get_name(varea));
}
else
{
rt_kprintf("[%p - %p] phy-map\n", varea->start, (char *)varea->start + varea->size);
rt_kprintf("\t\\_ paddr = %p\n", varea->offset << MM_PAGE_SHIFT);
}
return 0;
}
void rt_aspace_print_all(rt_aspace_t aspace)
{
rt_aspace_traversal(aspace, _dump, NULL);
}

239
components/mm/mm_aspace.h Normal file
View file

@ -0,0 +1,239 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-11-14 WangXiaoyao the first version
*/
#ifndef __MM_ASPACE_H__
#define __MM_ASPACE_H__
#include <rthw.h>
#include <rtthread.h>
#include <stddef.h>
#include "avl_adpt.h"
#include "mm_fault.h"
#include "mm_flag.h"
#define MM_PAGE_SHIFT 12
#define MM_PA_TO_OFF(pa) ((uintptr_t)(pa) >> MM_PAGE_SHIFT)
#define PV_OFFSET (rt_kmem_pvoff())
#ifndef RT_USING_SMP
typedef rt_spinlock_t mm_spinlock;
#define MM_PGTBL_LOCK_INIT(aspace)
#define MM_PGTBL_LOCK(aspace) (rt_hw_spin_lock(&((aspace)->pgtbl_lock)))
#define MM_PGTBL_UNLOCK(aspace) (rt_hw_spin_unlock(&((aspace)->pgtbl_lock)))
#else
typedef struct rt_spinlock mm_spinlock;
#define MM_PGTBL_LOCK_INIT(aspace) (rt_spin_lock_init(&((aspace)->pgtbl_lock)))
#define MM_PGTBL_LOCK(aspace) (rt_spin_lock(&((aspace)->pgtbl_lock)))
#define MM_PGTBL_UNLOCK(aspace) (rt_spin_unlock(&((aspace)->pgtbl_lock)))
#endif /* RT_USING_SMP */
struct rt_aspace;
struct rt_varea;
struct rt_mem_obj;
extern struct rt_aspace rt_kernel_space;
typedef struct rt_aspace
{
void *start;
rt_size_t size;
void *page_table;
mm_spinlock pgtbl_lock;
struct _aspace_tree tree;
struct rt_mutex bst_lock;
rt_uint64_t asid;
} *rt_aspace_t;
typedef struct rt_varea
{
void *start;
rt_size_t size;
rt_size_t offset;
rt_size_t attr;
rt_size_t flag;
struct rt_aspace *aspace;
struct rt_mem_obj *mem_obj;
struct _aspace_node node;
struct rt_page *frames;
void *data;
} *rt_varea_t;
typedef struct rt_mm_va_hint
{
void *limit_start;
rt_size_t limit_range_size;
void *prefer;
const rt_size_t map_size;
mm_flag_t flags;
} *rt_mm_va_hint_t;
typedef struct rt_mem_obj
{
void (*hint_free)(rt_mm_va_hint_t hint);
void (*on_page_fault)(struct rt_varea *varea, struct rt_aspace_fault_msg *msg);
/* do pre open bushiness like inc a ref */
void (*on_varea_open)(struct rt_varea *varea);
/* do post close bushiness like def a ref */
void (*on_varea_close)(struct rt_varea *varea);
void (*on_page_offload)(struct rt_varea *varea, void *vaddr, rt_size_t size);
const char *(*get_name)(rt_varea_t varea);
} *rt_mem_obj_t;
extern struct rt_mem_obj rt_mm_dummy_mapper;
enum rt_mmu_cntl
{
MMU_CNTL_NONCACHE,
MMU_CNTL_CACHE,
MMU_CNTL_READONLY,
MMU_CNTL_READWRITE,
MMU_CNTL_DUMMY_END,
};
/**
* @brief Lock to access page table of address space
*/
#define WR_LOCK(aspace) \
rt_thread_self() ? rt_mutex_take(&(aspace)->bst_lock, RT_WAITING_FOREVER) \
: 0
#define WR_UNLOCK(aspace) \
rt_thread_self() ? rt_mutex_release(&(aspace)->bst_lock) : 0
#define RD_LOCK(aspace) WR_LOCK(aspace)
#define RD_UNLOCK(aspace) WR_UNLOCK(aspace)
rt_aspace_t rt_aspace_create(void *start, rt_size_t length, void *pgtbl);
rt_err_t rt_aspace_init(rt_aspace_t aspace, void *start, rt_size_t length, void *pgtbl);
void rt_aspace_delete(rt_aspace_t aspace);
void rt_aspace_detach(rt_aspace_t aspace);
/**
* @brief Memory Map on Virtual Address Space to Mappable Object
* *INFO There is no restriction to use NULL address(physical/virtual).
* Vaddr passing in addr must be page aligned. If vaddr is RT_NULL,
* a suitable address will be chose automatically.
*
* @param aspace target virtual address space
* @param addr virtual address of the mapping
* @param length length of mapping region
* @param attr MMU attribution
* @param flags desired memory protection and behaviour of the mapping
* @param mem_obj memory map backing store object
* @param offset offset of mapping in 4KB page for mem_obj
* @return int E_OK on success, with addr set to vaddr of mapping
* E_INVAL
*/
int rt_aspace_map(rt_aspace_t aspace, void **addr, rt_size_t length, rt_size_t attr,
mm_flag_t flags, rt_mem_obj_t mem_obj, rt_size_t offset);
/** no malloc routines call */
int rt_aspace_map_static(rt_aspace_t aspace, rt_varea_t varea, void **addr,
rt_size_t length, rt_size_t attr, mm_flag_t flags,
rt_mem_obj_t mem_obj, rt_size_t offset);
/**
* @brief Memory Map on Virtual Address Space to Physical Memory
*
* @param aspace target virtual address space
* @param hint hint of mapping va
* @param attr MMU attribution
* @param pa_off (physical address >> 12)
* @param ret_va pointer to the location to store va
* @return int E_OK on success, with ret_va set to vaddr of mapping
* E_INVAL
*/
int rt_aspace_map_phy(rt_aspace_t aspace, rt_mm_va_hint_t hint, rt_size_t attr,
rt_size_t pa_off, void **ret_va);
/** no malloc routines call */
int rt_aspace_map_phy_static(rt_aspace_t aspace, rt_varea_t varea,
rt_mm_va_hint_t hint, rt_size_t attr, rt_size_t pa_off,
void **ret_va);
/**
* @brief Remove any mappings overlap the range [addr, addr + bytes)
*
* @param aspace
* @param addr
* @return int
*/
int rt_aspace_unmap(rt_aspace_t aspace, void *addr);
int rt_aspace_control(rt_aspace_t aspace, void *addr, enum rt_mmu_cntl cmd);
int rt_aspace_load_page(rt_aspace_t aspace, void *addr, rt_size_t npage);
int rt_aspace_offload_page(rt_aspace_t aspace, void *addr, rt_size_t npage);
int rt_aspace_traversal(rt_aspace_t aspace,
int (*fn)(rt_varea_t varea, void *arg), void *arg);
void rt_aspace_print_all(rt_aspace_t aspace);
/**
* @brief Map one page to varea
*
* @param varea target varea
* @param addr user address
* @param page the page frame to be mapped
* @return int
*/
int rt_varea_map_page(rt_varea_t varea, void *vaddr, void *page);
/**
* @brief Map a range of physical address to varea
*
* @param varea target varea
* @param vaddr user address
* @param paddr physical address
* @param length map range
* @return int
*/
int rt_varea_map_range(rt_varea_t varea, void *vaddr, void *paddr, rt_size_t length);
/**
* @brief Insert page to page manager of varea
* The page will be freed by varea on uninstall automatically
*
* @param varea target varea
* @param page_addr the page frame to be added
*/
void rt_varea_pgmgr_insert(rt_varea_t varea, void *page_addr);
rt_ubase_t rt_kmem_pvoff(void);
void rt_kmem_pvoff_set(rt_ubase_t pvoff);
int rt_kmem_map_phy(void *va, void *pa, rt_size_t length, rt_size_t attr);
void *rt_kmem_v2p(void *vaddr);
void rt_kmem_list(void);
#endif /* __MM_ASPACE_H__ */

128
components/mm/mm_fault.c Normal file
View file

@ -0,0 +1,128 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-12-06 WangXiaoyao the first version
*/
#include <rtthread.h>
#ifdef RT_USING_SMART
#define DBG_TAG "mm.fault"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
#include <lwp.h>
#include <lwp_syscall.h>
#include "mm_aspace.h"
#include "mm_fault.h"
#include "mm_flag.h"
#include "mm_private.h"
#include <mmu.h>
#include <tlb.h>
#define UNRECOVERABLE 0
#define RECOVERABLE 1
static int _fetch_page(rt_varea_t varea, struct rt_aspace_fault_msg *msg)
{
int err = UNRECOVERABLE;
msg->response.status = MM_FAULT_STATUS_UNRECOVERABLE;
msg->response.vaddr = 0;
msg->response.size = 0;
if (varea->mem_obj && varea->mem_obj->on_page_fault)
{
varea->mem_obj->on_page_fault(varea, msg);
err = _varea_map_with_msg(varea, msg);
err = (err == RT_EOK ? RECOVERABLE : UNRECOVERABLE);
}
return err;
}
static int _read_fault(rt_varea_t varea, void *pa, struct rt_aspace_fault_msg *msg)
{
int err = UNRECOVERABLE;
if (msg->fault_type == MM_FAULT_TYPE_PAGE_FAULT)
{
RT_ASSERT(pa == ARCH_MAP_FAILED);
RT_ASSERT(!(varea->flag & MMF_PREFETCH));
err = _fetch_page(varea, msg);
}
else
{
/* signal a fault to user? */
}
return err;
}
static int _write_fault(rt_varea_t varea, void *pa, struct rt_aspace_fault_msg *msg)
{
int err = UNRECOVERABLE;
if (msg->fault_type == MM_FAULT_TYPE_PAGE_FAULT)
{
RT_ASSERT(pa == ARCH_MAP_FAILED);
RT_ASSERT(!(varea->flag & MMF_PREFETCH));
err = _fetch_page(varea, msg);
}
else if (msg->fault_type == MM_FAULT_TYPE_ACCESS_FAULT &&
varea->flag & MMF_COW)
{
}
else
{
/* signal a fault to user? */
}
return err;
}
static int _exec_fault(rt_varea_t varea, void *pa, struct rt_aspace_fault_msg *msg)
{
int err = UNRECOVERABLE;
if (msg->fault_type == MM_FAULT_TYPE_PAGE_FAULT)
{
RT_ASSERT(pa == ARCH_MAP_FAILED);
RT_ASSERT(!(varea->flag & MMF_PREFETCH));
err = _fetch_page(varea, msg);
}
return err;
}
int rt_aspace_fault_try_fix(struct rt_aspace_fault_msg *msg)
{
struct rt_lwp *lwp = lwp_self();
int err = UNRECOVERABLE;
uintptr_t va = (uintptr_t)msg->fault_vaddr;
va &= ~ARCH_PAGE_MASK;
msg->fault_vaddr = (void *)va;
if (lwp)
{
rt_aspace_t aspace = lwp->aspace;
rt_varea_t varea = _aspace_bst_search(aspace, msg->fault_vaddr);
if (varea)
{
void *pa = rt_hw_mmu_v2p(aspace, msg->fault_vaddr);
msg->off = ((char *)msg->fault_vaddr - (char *)varea->start) >> ARCH_PAGE_SHIFT;
/* permission checked by fault op */
switch (msg->fault_op)
{
case MM_FAULT_OP_READ:
err = _read_fault(varea, pa, msg);
break;
case MM_FAULT_OP_WRITE:
err = _write_fault(varea, pa, msg);
break;
case MM_FAULT_OP_EXECUTE:
err = _exec_fault(varea, pa, msg);
break;
}
}
}
return err;
}
#endif /* RT_USING_SMART */

58
components/mm/mm_fault.h Normal file
View file

@ -0,0 +1,58 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-12-06 WangXiaoyao the first version
*/
#ifndef __MM_FAULT_H__
#define __MM_FAULT_H__
#include <rtthread.h>
#include <stddef.h>
#include <stdint.h>
/* fast path fault handler, a page frame on kernel space is returned */
#define MM_FAULT_STATUS_OK 0
/* customized fault handler, done by using rt_varea_map_* */
#define MM_FAULT_STATUS_OK_MAPPED 1
#define MM_FAULT_STATUS_UNRECOVERABLE 4
struct rt_mm_fault_res
{
void *vaddr;
rt_size_t size;
int status;
};
enum rt_mm_fault_op
{
MM_FAULT_OP_READ = 1,
MM_FAULT_OP_WRITE,
MM_FAULT_OP_EXECUTE,
};
enum rt_mm_fault_type
{
MM_FAULT_TYPE_ACCESS_FAULT,
MM_FAULT_TYPE_PAGE_FAULT,
MM_FAULT_TYPE_BUS_ERROR,
MM_FAULT_TYPE_GENERIC,
};
struct rt_aspace_fault_msg
{
enum rt_mm_fault_op fault_op;
enum rt_mm_fault_type fault_type;
rt_size_t off;
void *fault_vaddr;
struct rt_mm_fault_res response;
};
/* MMU base page fault handler, return 1 is */
int rt_aspace_fault_try_fix(struct rt_aspace_fault_msg *msg);
#endif /* __MM_FAULT_H__ */

91
components/mm/mm_flag.h Normal file
View file

@ -0,0 +1,91 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-11-23 WangXiaoyao the first version
*/
#ifndef __MM_FLAG_H__
#define __MM_FLAG_H__
/**
* @brief mm_flag_t
* |max ------- 7|6 ----- 0|
* | control | align |
*
* there should be no more than 25 flags
*/
typedef unsigned long mm_flag_t;
#define _MMF_CNTL_SHIFT 7
#define _MMF_ALIGN_MASK 0x7f
#define _MMF_CNTL_MASK (~((1 << _MMF_CNTL_SHIFT) - 1))
#define _DEF_FLAG(index) (1 << (_MMF_CNTL_SHIFT + (index)))
enum mm_flag_cntl
{
/**
* @brief Indicate a possible COW mapping
*/
MMF_MAP_PRIVATE = _DEF_FLAG(0),
MMF_COW = _DEF_FLAG(1),
/**
* @brief [POSIX MAP_FIXED] When MAP_FIXED is set in the flags argument, the
* implementation is informed that the value of pa shall be addr, exactly.
* If a MAP_FIXED request is successful, the mapping established
* by mmap() replaces any previous mappings for the pages in the range
* [pa,pa+len) of the process.
*/
MMF_MAP_FIXED = _DEF_FLAG(2),
/**
* @brief The backup page frame is allocated and setted only until it is
* truly necessary by the user
*/
MMF_PREFETCH = _DEF_FLAG(3),
MMF_HUGEPAGE = _DEF_FLAG(4),
MMF_TEXT = _DEF_FLAG(5),
MMF_STATIC_ALLOC = _DEF_FLAG(6),
/**
* @brief a non-locked memory can be swapped out when required, this is
* reserved for future
*/
MMF_NONLOCKED = _DEF_FLAG(20),
/**
* @brief An alignment is specified in flags that the mapping must admit
*/
MMF_REQUEST_ALIGN = _DEF_FLAG(21),
};
#define MMF_GET_ALIGN(src) ((src & _MMF_ALIGN_MASK))
#define MMF_SET_ALIGN(src, align) \
((src & ~_MMF_ALIGN_MASK) | (__builtin_ffsl(align) - 1))
#define MMF_GET_CNTL(src) (src & _MMF_CNTL_MASK)
#define MMF_TEST_CNTL(src, flag) (src & flag)
#define MMF_SET_CNTL(src, flag) ((src) | (flag))
#define MMF_CLEAR_CNTL(src, flag) ((src) & ~(flag))
/**
* @brief Create Flags
*
* example: MMF_CREATE(0, 0)
* MMF_CREATE(MM_MAP_FIXED, 0x2000)
*
* Direct use of flag is also acceptable: (MMF_MAP_FIXED | MMF_PREFETCH)
*/
#define MMF_CREATE(cntl, align) \
((align) ? (MMF_SET_CNTL((mm_flag_t)0, (cntl) | MMF_REQUEST_ALIGN) | \
MMF_SET_ALIGN((mm_flag_t)0, (align))) \
: (MMF_SET_CNTL((mm_flag_t)0, (cntl) & ~MMF_REQUEST_ALIGN)))
#undef _DEF_FLAG
#endif /* __MM_FLAG_H__ */

80
components/mm/mm_kmem.c Normal file
View file

@ -0,0 +1,80 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-11-14 WangXiaoyao the first version
*/
#include <rtthread.h>
#define DBG_TAG "mm.kmem"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
#include "mm_aspace.h"
#include "mm_private.h"
#include <mmu.h>
static void list_kmem(void)
{
rt_aspace_print_all(&rt_kernel_space);
}
MSH_CMD_EXPORT(list_kmem, List varea in kernel virtual memory space);
void rt_kmem_list(void) __attribute__((alias("list_kmem")));
static rt_ubase_t rt_pv_offset;
rt_ubase_t rt_kmem_pvoff(void)
{
return rt_pv_offset;
}
void rt_kmem_pvoff_set(rt_ubase_t pvoff)
{
rt_pv_offset = pvoff;
}
#define _KMEM_LO_OFF(addr) ((rt_ubase_t)(addr) & ARCH_PAGE_MASK)
int rt_kmem_map_phy(void *va, void *pa, rt_size_t length, rt_size_t attr)
{
int err;
size_t lo_off;
lo_off = _KMEM_LO_OFF(pa);
if (va == RT_NULL)
{
LOG_E("%s: va NULL is not a valid input", __func__);
err = -RT_EINVAL;
}
else if (_KMEM_LO_OFF(pa) != _KMEM_LO_OFF(va))
{
LOG_E("%s: misaligned PA(%p) to VA(%p)", __func__, pa, va);
err = -RT_EINVAL;
}
else
{
struct rt_mm_va_hint hint = {.flags = MMF_MAP_FIXED,
.limit_range_size = rt_kernel_space.size,
.limit_start = rt_kernel_space.start,
.prefer = va,
.map_size = RT_ALIGN(length + lo_off, ARCH_PAGE_SIZE)};
err = rt_aspace_map_phy(&rt_kernel_space, &hint, attr, MM_PA_TO_OFF(pa), &va);
if (err)
{
LOG_W("%s: map %p to %p (%p bytes) failed(err %d)", __func__, pa, va, length, err);
}
}
return err;
}
void *rt_kmem_v2p(void *vaddr)
{
return rt_hw_mmu_v2p(&rt_kernel_space, vaddr);
}

109
components/mm/mm_object.c Normal file
View file

@ -0,0 +1,109 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-11-30 WangXiaoyao the first version
*/
#include <rtthread.h>
#include "mm_aspace.h"
#include "mm_fault.h"
#include "mm_page.h"
#include <mmu.h>
#define DBG_TAG "mm.object"
#define DBG_LVL DBG_INFO
#include "rtdbg.h"
static const char *get_name(rt_varea_t varea)
{
return "dummy-mapper";
}
void rt_varea_pgmgr_insert(rt_varea_t varea, void *page_addr)
{
rt_page_t page = rt_page_addr2page(page_addr);
if (varea->frames == NULL)
{
varea->frames = page;
page->next = NULL;
}
else
{
varea->frames->pre = page;
page->next = varea->frames;
varea->frames = page;
}
}
void rt_varea_pgmgr_pop_all(rt_varea_t varea)
{
rt_page_t page = varea->frames;
while (page)
{
rt_page_t next = page->next;
void *pg_va = rt_page_page2addr(page);
rt_pages_free(pg_va, 0);
page = next;
}
}
void rt_varea_pgmgr_pop(rt_varea_t varea, void *vaddr, rt_size_t size)
{
void *vend = (char *)vaddr + size;
while (vaddr != vend)
{
rt_page_t page = rt_page_addr2page(vaddr);
page->pre->next = page->next;
page->next->pre = page->pre;
rt_pages_free(vaddr, 0);
vaddr = (char *)vaddr + ARCH_PAGE_SIZE;
}
}
static void on_page_fault(struct rt_varea *varea, struct rt_aspace_fault_msg *msg)
{
void *page;
page = rt_pages_alloc_ext(0, PAGE_ANY_AVAILABLE);
if (!page)
{
LOG_W("%s: page alloc failed", __func__);
return;
}
msg->response.status = MM_FAULT_STATUS_OK;
msg->response.size = ARCH_PAGE_SIZE;
msg->response.vaddr = page;
rt_varea_pgmgr_insert(varea, page);
}
static void on_varea_open(struct rt_varea *varea)
{
varea->data = NULL;
}
static void on_varea_close(struct rt_varea *varea)
{
}
static void on_page_offload(rt_varea_t varea, void *vaddr, rt_size_t size)
{
rt_varea_pgmgr_pop(varea, vaddr, size);
}
struct rt_mem_obj rt_mm_dummy_mapper = {
.get_name = get_name,
.on_page_fault = on_page_fault,
.hint_free = NULL,
.on_varea_open = on_varea_open,
.on_varea_close = on_varea_close,
.on_page_offload = on_page_offload,
};

857
components/mm/mm_page.c Normal file
View file

@ -0,0 +1,857 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2019-11-01 Jesven The first version
* 2022-12-13 WangXiaoyao Hot-pluggable, extensible
* page management algorithm
* 2023-02-20 WangXiaoyao Multi-list page-management
*/
#include <rtthread.h>
#include <stddef.h>
#include <stdint.h>
#include <string.h>
#include "mm_fault.h"
#include "mm_private.h"
#include "mm_aspace.h"
#include "mm_flag.h"
#include "mm_page.h"
#include <mmu.h>
#define DBG_TAG "mm.page"
#define DBG_LVL DBG_WARNING
#include <rtdbg.h>
RT_CTASSERT(order_huge_pg, RT_PAGE_MAX_ORDER > ARCH_PAGE_SHIFT - 2);
RT_CTASSERT(size_width, sizeof(rt_size_t) == sizeof(void *));
#ifdef RT_USING_SMART
#include "lwp_arch_comm.h"
#endif /* RT_USING_SMART */
static rt_size_t init_mpr_align_start;
static rt_size_t init_mpr_align_end;
static void *init_mpr_cont_start;
static struct rt_varea mpr_varea;
static struct rt_page *page_list_low[RT_PAGE_MAX_ORDER];
static struct rt_page *page_list_high[RT_PAGE_MAX_ORDER];
#define page_start ((rt_page_t)rt_mpr_start)
static rt_size_t page_nr;
static rt_size_t early_offset;
static const char *get_name(rt_varea_t varea)
{
return "master-page-record";
}
static void hint_free(rt_mm_va_hint_t hint)
{
hint->flags = MMF_MAP_FIXED;
hint->limit_start = rt_kernel_space.start;
hint->limit_range_size = rt_kernel_space.size;
hint->prefer = rt_mpr_start;
}
static void on_page_fault(struct rt_varea *varea, struct rt_aspace_fault_msg *msg)
{
char *init_start = (void *)init_mpr_align_start;
char *init_end = (void *)init_mpr_align_end;
if ((char *)msg->fault_vaddr < init_end && (char *)msg->fault_vaddr >= init_start)
{
rt_size_t offset = (char *)msg->fault_vaddr - init_start;
msg->response.status = MM_FAULT_STATUS_OK;
msg->response.vaddr = (char *)init_mpr_cont_start + offset;
msg->response.size = ARCH_PAGE_SIZE;
}
else
{
rt_mm_dummy_mapper.on_page_fault(varea, msg);
}
}
static struct rt_mem_obj mm_page_mapper = {
.get_name = get_name,
.on_page_fault = on_page_fault,
.hint_free = hint_free,
};
#ifdef RT_DEBUG_PAGE_LEAK
static volatile int enable;
static rt_page_t _trace_head;
#define TRACE_ALLOC(pg, size) _trace_alloc(pg, __builtin_return_address(0), size)
#define TRACE_FREE(pgaddr, size) _trace_free(pgaddr, __builtin_return_address(0), size)
void rt_page_leak_trace_start()
{
// TODO multicore safety
_trace_head = NULL;
enable = 1;
}
MSH_CMD_EXPORT(rt_page_leak_trace_start, start page leak tracer);
static void _collect()
{
rt_page_t page = _trace_head;
if (!page)
{
rt_kputs("ok!\n");
}
while (page)
{
rt_page_t next = page->next;
void *pg_va = rt_page_page2addr(page);
LOG_W("LEAK: %p, allocator: %p, size bits: %lx", pg_va, page->caller, page->trace_size);
rt_pages_free(pg_va, page->trace_size);
page = next;
}
}
void rt_page_leak_trace_stop()
{
// TODO multicore safety
enable = 0;
_collect();
}
MSH_CMD_EXPORT(rt_page_leak_trace_stop, stop page leak tracer);
static void _trace_alloc(rt_page_t page, void *caller, size_t size_bits)
{
if (enable)
{
page->caller = caller;
page->trace_size = size_bits;
page->tl_prev = NULL;
page->tl_next = NULL;
if (_trace_head == NULL)
{
_trace_head = page;
}
else
{
_trace_head->tl_prev = page;
page->tl_next = _trace_head;
_trace_head = page;
}
}
}
void _report(rt_page_t page, size_bits, char *msg)
{
void *pg_va = rt_page_page2addr(page);
LOG_W("%s: %p, allocator: %p, size bits: %lx", msg, pg_va, page->caller, page->trace_size);
rt_kputs("backtrace\n");
rt_hw_backtrace(0, 0);
}
static void _trace_free(rt_page_t page, void *caller, size_t size_bits)
{
if (enable)
{
/* free after free */
if (page->trace_size == 0xabadcafe)
{
_report("free after free")
return ;
}
else if (page->trace_size != size_bits)
{
rt_kprintf("free with size bits %lx\n", size_bits);
_report("incompatible size bits parameter");
return ;
}
if (page->ref_cnt == 1)
{
if (page->tl_prev)
page->tl_prev->tl_next = page->tl_next;
if (page->tl_next)
page->tl_next->tl_prev = page->tl_prev;
if (page == _trace_head)
_trace_head = page->next;
page->tl_prev = NULL;
page->tl_next = NULL;
page->trace_size = 0xabadcafe;
}
}
}
#else
#define TRACE_ALLOC(x, y)
#define TRACE_FREE(x, y)
#endif
static inline void *page_to_addr(rt_page_t page)
{
return (void *)(((page - page_start) << ARCH_PAGE_SHIFT) - PV_OFFSET);
}
static inline rt_page_t addr_to_page(rt_page_t pg_start, void *addr)
{
addr = (char *)addr + PV_OFFSET;
return &pg_start[((rt_ubase_t)addr >> ARCH_PAGE_SHIFT)];
}
#define FLOOR(val, align) (((rt_size_t)(val) + (align)-1) & ~((align)-1))
const rt_size_t shadow_mask =
((1ul << (RT_PAGE_MAX_ORDER + ARCH_PAGE_SHIFT - 1)) - 1);
const rt_size_t rt_mpr_size = FLOOR(
((1ul << (ARCH_VADDR_WIDTH - ARCH_PAGE_SHIFT))) * sizeof(struct rt_page),
ARCH_PAGE_SIZE);
void *rt_mpr_start;
rt_weak int rt_hw_clz(unsigned long n)
{
return __builtin_clzl(n);
}
rt_weak int rt_hw_ctz(unsigned long n)
{
return __builtin_ctzl(n);
}
rt_size_t rt_page_bits(rt_size_t size)
{
int bit = sizeof(rt_size_t) * 8 - rt_hw_clz(size) - 1;
if ((size ^ (1UL << bit)) != 0)
{
bit++;
}
bit -= ARCH_PAGE_SHIFT;
if (bit < 0)
{
bit = 0;
}
return bit;
}
struct rt_page *rt_page_addr2page(void *addr)
{
return addr_to_page(page_start, addr);
}
void *rt_page_page2addr(struct rt_page *p)
{
return page_to_addr(p);
}
static inline struct rt_page *_buddy_get(struct rt_page *p,
rt_uint32_t size_bits)
{
rt_size_t addr;
addr = (rt_size_t)rt_page_page2addr(p);
addr ^= (1UL << (size_bits + ARCH_PAGE_SHIFT));
return rt_page_addr2page((void *)addr);
}
static void _page_remove(rt_page_t page_list[], struct rt_page *p, rt_uint32_t size_bits)
{
if (p->pre)
{
p->pre->next = p->next;
}
else
{
page_list[size_bits] = p->next;
}
if (p->next)
{
p->next->pre = p->pre;
}
p->size_bits = ARCH_ADDRESS_WIDTH_BITS;
}
static void _page_insert(rt_page_t page_list[], struct rt_page *p, rt_uint32_t size_bits)
{
p->next = page_list[size_bits];
if (p->next)
{
p->next->pre = p;
}
p->pre = 0;
page_list[size_bits] = p;
p->size_bits = size_bits;
}
static void _pages_ref_inc(struct rt_page *p, rt_uint32_t size_bits)
{
struct rt_page *page_head;
int idx;
/* find page group head */
idx = p - page_start;
idx = idx & ~((1UL << size_bits) - 1);
page_head = page_start + idx;
page_head = (void *)((char *)page_head + early_offset);
page_head->ref_cnt++;
}
static int _pages_ref_get(struct rt_page *p, rt_uint32_t size_bits)
{
struct rt_page *page_head;
int idx;
/* find page group head */
idx = p - page_start;
idx = idx & ~((1UL << size_bits) - 1);
page_head = page_start + idx;
return page_head->ref_cnt;
}
static int _pages_free(rt_page_t page_list[], struct rt_page *p, rt_uint32_t size_bits)
{
rt_uint32_t level = size_bits;
struct rt_page *buddy;
RT_ASSERT(p >= page_start);
RT_ASSERT((char *)p < (char *)rt_mpr_start + rt_mpr_size);
RT_ASSERT(rt_kmem_v2p(p));
RT_ASSERT(p->ref_cnt > 0);
RT_ASSERT(p->size_bits == ARCH_ADDRESS_WIDTH_BITS);
RT_ASSERT(size_bits < RT_PAGE_MAX_ORDER);
p->ref_cnt--;
if (p->ref_cnt != 0)
{
return 0;
}
while (level < RT_PAGE_MAX_ORDER - 1)
{
buddy = _buddy_get(p, level);
if (buddy && buddy->size_bits == level)
{
_page_remove(page_list, buddy, level);
p = (p < buddy) ? p : buddy;
level++;
}
else
{
break;
}
}
_page_insert(page_list, p, level);
return 1;
}
static struct rt_page *_pages_alloc(rt_page_t page_list[], rt_uint32_t size_bits)
{
struct rt_page *p;
if (page_list[size_bits])
{
p = page_list[size_bits];
_page_remove(page_list, p, size_bits);
}
else
{
rt_uint32_t level;
for (level = size_bits + 1; level < RT_PAGE_MAX_ORDER; level++)
{
if (page_list[level])
{
break;
}
}
if (level == RT_PAGE_MAX_ORDER)
{
return 0;
}
p = page_list[level];
_page_remove(page_list, p, level);
while (level > size_bits)
{
_page_insert(page_list, p, level - 1);
p = _buddy_get(p, level - 1);
level--;
}
}
p->size_bits = ARCH_ADDRESS_WIDTH_BITS;
p->ref_cnt = 1;
return p;
}
static void _early_page_remove(rt_page_t page_list[], rt_page_t page, rt_uint32_t size_bits)
{
rt_page_t page_cont = (rt_page_t)((char *)page + early_offset);
if (page_cont->pre)
{
rt_page_t pre_cont = (rt_page_t)((char *)page_cont->pre + early_offset);
pre_cont->next = page_cont->next;
}
else
{
page_list[size_bits] = page_cont->next;
}
if (page_cont->next)
{
rt_page_t next_cont = (rt_page_t)((char *)page_cont->next + early_offset);
next_cont->pre = page_cont->pre;
}
page_cont->size_bits = ARCH_ADDRESS_WIDTH_BITS;
}
static void _early_page_insert(rt_page_t page_list[], rt_page_t page, int size_bits)
{
RT_ASSERT((void *)page >= rt_mpr_start &&
((char *)page - (char *)rt_mpr_start) < rt_mpr_size);
rt_page_t page_cont = (rt_page_t)((char *)page + early_offset);
page_cont->next = page_list[size_bits];
if (page_cont->next)
{
rt_page_t next_cont = (rt_page_t)((char *)page_cont->next + early_offset);
next_cont->pre = page;
}
page_cont->pre = 0;
page_list[size_bits] = page;
page_cont->size_bits = size_bits;
}
static struct rt_page *_early_pages_alloc(rt_page_t page_list[], rt_uint32_t size_bits)
{
struct rt_page *p;
if (page_list[size_bits])
{
p = page_list[size_bits];
_early_page_remove(page_list, p, size_bits);
}
else
{
rt_uint32_t level;
for (level = size_bits + 1; level < RT_PAGE_MAX_ORDER; level++)
{
if (page_list[level])
{
break;
}
}
if (level == RT_PAGE_MAX_ORDER)
{
return 0;
}
p = page_list[level];
_early_page_remove(page_list, p, level);
while (level > size_bits)
{
_early_page_insert(page_list, p, level - 1);
p = _buddy_get(p, level - 1);
level--;
}
}
rt_page_t page_cont = (rt_page_t)((char *)p + early_offset);
page_cont->size_bits = ARCH_ADDRESS_WIDTH_BITS;
page_cont->ref_cnt = 1;
return p;
}
static rt_page_t *_get_page_list(void *vaddr)
{
rt_ubase_t pa_int = (rt_ubase_t)vaddr + PV_OFFSET;
rt_page_t *list;
if (pa_int > UINT32_MAX)
{
list = page_list_high;
}
else
{
list = page_list_low;
}
return list;
}
int rt_page_ref_get(void *addr, rt_uint32_t size_bits)
{
struct rt_page *p;
rt_base_t level;
int ref;
p = rt_page_addr2page(addr);
level = rt_hw_interrupt_disable();
ref = _pages_ref_get(p, size_bits);
rt_hw_interrupt_enable(level);
return ref;
}
void rt_page_ref_inc(void *addr, rt_uint32_t size_bits)
{
struct rt_page *p;
rt_base_t level;
p = rt_page_addr2page(addr);
level = rt_hw_interrupt_disable();
_pages_ref_inc(p, size_bits);
rt_hw_interrupt_enable(level);
}
static rt_page_t (*pages_alloc_handler)(rt_page_t page_list[], rt_uint32_t size_bits);
/* if not, we skip the finding on page_list_high */
static size_t _high_page_configured = 0;
static rt_page_t *_flag_to_page_list(size_t flags)
{
rt_page_t *page_list;
if (_high_page_configured && (flags & PAGE_ANY_AVAILABLE))
{
page_list = page_list_high;
}
else
{
page_list = page_list_low;
}
return page_list;
}
static void *_do_pages_alloc(rt_uint32_t size_bits, size_t flags)
{
void *alloc_buf = RT_NULL;
struct rt_page *p;
rt_base_t level;
rt_page_t *page_list = _flag_to_page_list(flags);
level = rt_hw_interrupt_disable();
p = pages_alloc_handler(page_list, size_bits);
rt_hw_interrupt_enable(level);
if (!p && page_list != page_list_low)
{
/* fall back */
page_list = page_list_low;
level = rt_hw_interrupt_disable();
p = pages_alloc_handler(page_list, size_bits);
rt_hw_interrupt_enable(level);
}
if (p)
{
alloc_buf = page_to_addr(p);
#ifdef RT_DEBUG_PAGE_LEAK
level = rt_hw_interrupt_disable();
TRACE_ALLOC(p, size_bits);
rt_hw_interrupt_enable(level);
#endif
}
return alloc_buf;
}
void *rt_pages_alloc(rt_uint32_t size_bits)
{
return _do_pages_alloc(size_bits, 0);
}
void *rt_pages_alloc_ext(rt_uint32_t size_bits, size_t flags)
{
return _do_pages_alloc(size_bits, flags);
}
int rt_pages_free(void *addr, rt_uint32_t size_bits)
{
struct rt_page *p;
rt_page_t *page_list = _get_page_list(addr);
int real_free = 0;
p = rt_page_addr2page(addr);
if (p)
{
rt_base_t level;
level = rt_hw_interrupt_disable();
real_free = _pages_free(page_list, p, size_bits);
if (real_free)
TRACE_FREE(p, size_bits);
rt_hw_interrupt_enable(level);
}
return real_free;
}
void rt_page_list(void) __attribute__((alias("list_page")));
#warning TODO: improve list page
void list_page(void)
{
int i;
rt_size_t total = 0;
rt_base_t level;
level = rt_hw_interrupt_disable();
for (i = 0; i < RT_PAGE_MAX_ORDER; i++)
{
struct rt_page *p = page_list_low[i];
rt_kprintf("level %d ", i);
while (p)
{
total += (1UL << i);
rt_kprintf("[0x%08p]", rt_page_page2addr(p));
p = p->next;
}
rt_kprintf("\n");
}
for (i = 0; i < RT_PAGE_MAX_ORDER; i++)
{
struct rt_page *p = page_list_high[i];
rt_kprintf("level %d ", i);
while (p)
{
total += (1UL << i);
rt_kprintf("[0x%08p]", rt_page_page2addr(p));
p = p->next;
}
rt_kprintf("\n");
}
rt_hw_interrupt_enable(level);
rt_kprintf("free pages is 0x%08lx (%ld KB)\n", total, total * ARCH_PAGE_SIZE / 1024);
rt_kprintf("-------------------------------\n");
}
MSH_CMD_EXPORT(list_page, show page info);
void rt_page_get_info(rt_size_t *total_nr, rt_size_t *free_nr)
{
int i;
rt_size_t total_free = 0;
rt_base_t level;
level = rt_hw_interrupt_disable();
for (i = 0; i < RT_PAGE_MAX_ORDER; i++)
{
struct rt_page *p = page_list_low[i];
while (p)
{
total_free += (1UL << i);
p = p->next;
}
}
for (i = 0; i < RT_PAGE_MAX_ORDER; i++)
{
struct rt_page *p = page_list_high[i];
while (p)
{
total_free += (1UL << i);
p = p->next;
}
}
rt_hw_interrupt_enable(level);
*total_nr = page_nr;
*free_nr = total_free;
}
static void _install_page(rt_page_t mpr_head, rt_region_t region, void *insert_handler)
{
void (*insert)(rt_page_t *page_list, rt_page_t page, int size_bits) = insert_handler;
rt_region_t shadow;
shadow.start = region.start & ~shadow_mask;
shadow.end = FLOOR(region.end, shadow_mask + 1);
if (shadow.end + PV_OFFSET > UINT32_MAX)
_high_page_configured = 1;
rt_page_t shad_head = addr_to_page(mpr_head, (void *)shadow.start);
rt_page_t shad_tail = addr_to_page(mpr_head, (void *)shadow.end);
rt_page_t head = addr_to_page(mpr_head, (void *)region.start);
rt_page_t tail = addr_to_page(mpr_head, (void *)region.end);
/* mark shadow pages as illegal */
for (rt_page_t iter = shad_head; iter < head; iter++)
{
iter->size_bits = ARCH_ADDRESS_WIDTH_BITS;
}
for (rt_page_t iter = tail; iter < shad_tail; iter++)
{
iter->size_bits = ARCH_ADDRESS_WIDTH_BITS;
}
/* insert reserved pages to list */
const int max_order = RT_PAGE_MAX_ORDER + ARCH_PAGE_SHIFT - 1;
while (region.start != region.end)
{
struct rt_page *p;
int align_bits;
int size_bits;
size_bits =
ARCH_ADDRESS_WIDTH_BITS - 1 - rt_hw_clz(region.end - region.start);
align_bits = rt_hw_ctz(region.start);
if (align_bits < size_bits)
{
size_bits = align_bits;
}
if (size_bits > max_order)
{
size_bits = max_order;
}
p = addr_to_page(mpr_head, (void *)region.start);
p->size_bits = ARCH_ADDRESS_WIDTH_BITS;
p->ref_cnt = 0;
/* insert to list */
rt_page_t *page_list = _get_page_list((void *)region.start);
insert(page_list, (rt_page_t)((char *)p - early_offset), size_bits - ARCH_PAGE_SHIFT);
region.start += (1UL << size_bits);
}
}
void rt_page_init(rt_region_t reg)
{
int i;
rt_region_t shadow;
/* inclusive start, exclusive end */
reg.start += ARCH_PAGE_MASK;
reg.start &= ~ARCH_PAGE_MASK;
reg.end &= ~ARCH_PAGE_MASK;
if (reg.end <= reg.start)
{
LOG_E("region end(%p) must greater than start(%p)", reg.start, reg.end);
RT_ASSERT(0);
}
page_nr = ((reg.end - reg.start) >> ARCH_PAGE_SHIFT);
shadow.start = reg.start & ~shadow_mask;
shadow.end = FLOOR(reg.end, shadow_mask + 1);
LOG_D("[Init page] start: 0x%lx, end: 0x%lx, total: 0x%lx", reg.start,
reg.end, page_nr);
int err;
/* init free list */
for (i = 0; i < RT_PAGE_MAX_ORDER; i++)
{
page_list_low[i] = 0;
page_list_high[i] = 0;
}
/* map MPR area */
err = rt_aspace_map_static(&rt_kernel_space, &mpr_varea, &rt_mpr_start,
rt_mpr_size, MMU_MAP_K_RWCB, MMF_MAP_FIXED,
&mm_page_mapper, 0);
if (err != RT_EOK)
{
LOG_E("MPR map failed with size %lx at %p", rt_mpr_size, rt_mpr_start);
RT_ASSERT(0);
}
/* calculate footprint */
init_mpr_align_start =
(rt_size_t)addr_to_page(page_start, (void *)shadow.start) &
~ARCH_PAGE_MASK;
init_mpr_align_end =
FLOOR(addr_to_page(page_start, (void *)shadow.end), ARCH_PAGE_SIZE);
rt_size_t init_mpr_size = init_mpr_align_end - init_mpr_align_start;
rt_size_t init_mpr_npage = init_mpr_size >> ARCH_PAGE_SHIFT;
init_mpr_cont_start = (void *)reg.start;
rt_size_t init_mpr_cont_end = (rt_size_t)init_mpr_cont_start + init_mpr_size;
early_offset = (rt_size_t)init_mpr_cont_start - init_mpr_align_start;
rt_page_t mpr_cont = (void *)((char *)rt_mpr_start + early_offset);
/* mark init mpr pages as illegal */
rt_page_t head_cont = addr_to_page(mpr_cont, (void *)reg.start);
rt_page_t tail_cont = addr_to_page(mpr_cont, (void *)reg.end);
for (rt_page_t iter = head_cont; iter < tail_cont; iter++)
{
iter->size_bits = ARCH_ADDRESS_WIDTH_BITS;
}
reg.start = init_mpr_cont_end;
_install_page(mpr_cont, reg, _early_page_insert);
pages_alloc_handler = _early_pages_alloc;
/* doing the page table bushiness */
if (rt_aspace_load_page(&rt_kernel_space, (void *)init_mpr_align_start, init_mpr_npage))
{
LOG_E("%s: failed to load pages", __func__);
RT_ASSERT(0);
}
if (rt_hw_mmu_tbl_get() == rt_kernel_space.page_table)
rt_page_cleanup();
}
static int _load_mpr_area(void *head, void *tail)
{
int err = 0;
char *iter = (char *)((rt_ubase_t)head & ~ARCH_PAGE_MASK);
tail = (void *)FLOOR(tail, ARCH_PAGE_SIZE);
while (iter != tail)
{
void *paddr = rt_kmem_v2p(iter);
if (paddr == ARCH_MAP_FAILED)
{
err = rt_aspace_load_page(&rt_kernel_space, iter, 1);
if (err != RT_EOK)
{
LOG_E("%s: failed to load page", __func__);
break;
}
}
iter += ARCH_PAGE_SIZE;
}
return err;
}
int rt_page_install(rt_region_t region)
{
int err = -RT_EINVAL;
if (region.end != region.start && !(region.start & ARCH_PAGE_MASK) &&
!(region.end & ARCH_PAGE_MASK) &&
!((region.end - region.start) & shadow_mask))
{
void *head = addr_to_page(page_start, (void *)region.start);
void *tail = addr_to_page(page_start, (void *)region.end);
page_nr += ((region.end - region.start) >> ARCH_PAGE_SHIFT);
err = _load_mpr_area(head, tail);
if (err == RT_EOK)
{
_install_page(rt_mpr_start, region, _page_insert);
}
}
return err;
}
void rt_page_cleanup(void)
{
early_offset = 0;
pages_alloc_handler = _pages_alloc;
}

112
components/mm/mm_page.h Normal file
View file

@ -0,0 +1,112 @@
/*
* Copyright (c) 2006-2019, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2019-11-01 Jesven The first version
* 2022-12-13 WangXiaoyao Hot-pluggable, extensible
* page management algorithm
*/
#ifndef __MM_PAGE_H__
#define __MM_PAGE_H__
#include <rthw.h>
#include <rtthread.h>
#include <stdint.h>
#define GET_FLOOR(type) \
(1ul << (8 * sizeof(rt_size_t) - __builtin_clzl(2 * sizeof(type) - 1) - 1))
#define DEF_PAGE_T(fields) \
typedef struct rt_page {\
union {struct {fields}; char _padding[GET_FLOOR(struct {fields})];};\
} *rt_page_t
/**
* @brief PAGE ALLOC FLAGS
*
* @info PAGE_ANY_AVAILABLE
* page allocation default to use lower region, this behavior can change by setting
* PAGE_ANY_AVAILABLE
*/
#define PAGE_ANY_AVAILABLE 0x1ul
#ifdef RT_DEBUG_PAGE_LEAK
#define DEBUG_FIELD { \
/* trace list */ \
struct rt_page *tl_next; \
struct rt_page *tl_prev; \
void *caller; \
size_t trace_size; \
}
#else
#define DEBUG_FIELD
#endif
DEF_PAGE_T(
struct rt_page *next; /* same level next */
struct rt_page *pre; /* same level pre */
DEBUG_FIELD
rt_uint32_t size_bits; /* if is ARCH_ADDRESS_WIDTH_BITS, means not free */
rt_uint32_t ref_cnt; /* page group ref count */
);
#undef GET_FLOOR
#undef DEF_PAGE_T
typedef struct tag_region
{
rt_size_t start;
rt_size_t end;
} rt_region_t;
extern const rt_size_t rt_mpr_size;
extern void *rt_mpr_start;
void rt_page_init(rt_region_t reg);
void rt_page_cleanup(void);
void *rt_pages_alloc(rt_uint32_t size_bits);
void *rt_pages_alloc_ext(rt_uint32_t size_bits, size_t flags);
void rt_page_ref_inc(void *addr, rt_uint32_t size_bits);
int rt_page_ref_get(void *addr, rt_uint32_t size_bits);
int rt_pages_free(void *addr, rt_uint32_t size_bits);
void rt_page_list(void);
rt_size_t rt_page_bits(rt_size_t size);
void rt_page_get_info(rt_size_t *total_nr, rt_size_t *free_nr);
void *rt_page_page2addr(struct rt_page *p);
struct rt_page *rt_page_addr2page(void *addr);
/**
* @brief Install page frames at run-time
* Region size must be aligned to 2^(RT_PAGE_MAX_ORDER + ARCH_PAGE_SHIFT - 1)
* bytes currently (typically 2 MB).
*
* !WARNING this API will NOT check whether region is valid or not in list
*
* @param region region.start as first page frame(inclusive),
* region.end as first page frame after free region
* @return int 0 on success
*/
int rt_page_install(rt_region_t region);
void rt_page_leak_trace_start(void);
void rt_page_leak_trace_stop(void);
#endif /* __MM_PAGE_H__ */

View file

@ -0,0 +1,99 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-11-14 WangXiaoyao the first version
*/
#ifndef __MM_PRIVATE_H__
#define __MM_PRIVATE_H__
#include "mm_aspace.h"
#include <rtdef.h>
#include <stddef.h>
/**
* @brief DATA STRUCTURE & API USED INTERNALLY
*
* This is mainly a wrapper layer to actual data structure.
* In this way, we can switch to any BST we like by adding new
* wrapper code.
* Every BST must satisfy the API to support MM
*
* *INFO: varea range convention
* For API, a range is specified by a base and its length.
* This provides a clear interface without ambiguity.
* For implementation, a range is specified by [start, end] tuple
* where both start and end are inclusive.
*/
struct _mm_range
{
void *start;
void *end;
};
/**
* @brief
*
* @param aspace
* @return rt_err_t
*/
rt_err_t _aspace_bst_init(struct rt_aspace *aspace);
/**
* @brief Retrieve any varea if start in [varea->start, varea->end]
*
* @param aspace
* @param start
* @return struct rt_varea*
*/
struct rt_varea *_aspace_bst_search(struct rt_aspace *aspace, void *start);
/**
* @brief Retrieve lowest varea satisfies (varea->start >= start)
*
* @param aspace
* @param length
* @param struct _mm_range
* @return struct rt_varea*
*/
struct rt_varea *_aspace_bst_search_exceed(struct rt_aspace *aspace,
void *start);
/**
* @brief Retrieve any varea overlaps a specified address range
*
* @param aspace
* @param start
* @param length
* @return struct rt_varea*
*/
struct rt_varea *_aspace_bst_search_overlap(struct rt_aspace *aspace,
struct _mm_range range);
/**
* @brief Insert a varea into the bst
*
* @param aspace
* @param varea
*/
void _aspace_bst_insert(struct rt_aspace *aspace, struct rt_varea *varea);
/**
* @brief Remove a varea from the bst
*
* @param aspace
* @param varea
*/
void _aspace_bst_remove(struct rt_aspace *aspace, struct rt_varea *varea);
void rt_varea_pgmgr_pop(rt_varea_t varea, void *vaddr, rt_size_t size);
void rt_varea_pgmgr_pop_all(rt_varea_t varea);
int _varea_map_with_msg(rt_varea_t varea, struct rt_aspace_fault_msg *msg);
#endif /* __MM_PRIVATE_H__ */