import RT-Thread@9217865c without bsp, libcpu and components/net

This commit is contained in:
Zihao Yu 2023-05-20 16:23:33 +08:00
commit e2376a3709
1414 changed files with 390370 additions and 0 deletions

View file

@ -0,0 +1,13 @@
from building import *
cwd = GetCurrentDir()
src = Glob('*.c')
CPPPATH = [cwd + '/../include']
if not GetDepend('RT_USING_HEAP'):
SrcRemove(src, 'dataqueue.c')
SrcRemove(src, 'pipe.c')
group = DefineGroup('DeviceDrivers', src, depend = ['RT_USING_DEVICE_IPC'], CPPPATH = CPPPATH)
Return('group')

View file

@ -0,0 +1,161 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2012-09-30 Bernard first version.
* 2021-08-18 chenyingchun add comments
*/
#include <rthw.h>
#include <rtthread.h>
#include <rtdevice.h>
#define RT_COMPLETED 1
#define RT_UNCOMPLETED 0
/**
* @brief This function will initialize a completion object.
*
* @param completion is a pointer to a completion object.
*/
void rt_completion_init(struct rt_completion *completion)
{
rt_base_t level;
RT_ASSERT(completion != RT_NULL);
level = rt_hw_interrupt_disable();
completion->flag = RT_UNCOMPLETED;
rt_list_init(&completion->suspended_list);
rt_hw_interrupt_enable(level);
}
RTM_EXPORT(rt_completion_init);
/**
* @brief This function will wait for a completion, if the completion is unavailable, the thread shall wait for
* the completion up to a specified time.
*
* @param completion is a pointer to a completion object.
*
* @param timeout is a timeout period (unit: OS ticks). If the completion is unavailable, the thread will wait for
* the completion done up to the amount of time specified by the argument.
* NOTE: Generally, we use the macro RT_WAITING_FOREVER to set this parameter, which means that when the
* completion is unavailable, the thread will be waitting forever.
*
* @return Return the operation status. ONLY when the return value is RT_EOK, the operation is successful.
* If the return value is any other values, it means that the completion wait failed.
*
* @warning This function can ONLY be called in the thread context. It MUST NOT be called in interrupt context.
*/
rt_err_t rt_completion_wait(struct rt_completion *completion,
rt_int32_t timeout)
{
rt_err_t result;
rt_base_t level;
rt_thread_t thread;
RT_ASSERT(completion != RT_NULL);
/* current context checking */
RT_DEBUG_SCHEDULER_AVAILABLE(timeout != 0);
result = RT_EOK;
thread = rt_thread_self();
level = rt_hw_interrupt_disable();
if (completion->flag != RT_COMPLETED)
{
/* only one thread can suspend on complete */
RT_ASSERT(rt_list_isempty(&(completion->suspended_list)));
if (timeout == 0)
{
result = -RT_ETIMEOUT;
goto __exit;
}
else
{
/* reset thread error number */
thread->error = RT_EOK;
/* suspend thread */
rt_thread_suspend_with_flag(thread, RT_UNINTERRUPTIBLE);
/* add to suspended list */
rt_list_insert_before(&(completion->suspended_list),
&(thread->tlist));
/* current context checking */
RT_DEBUG_NOT_IN_INTERRUPT;
/* start timer */
if (timeout > 0)
{
/* reset the timeout of thread timer and start it */
rt_timer_control(&(thread->thread_timer),
RT_TIMER_CTRL_SET_TIME,
&timeout);
rt_timer_start(&(thread->thread_timer));
}
/* enable interrupt */
rt_hw_interrupt_enable(level);
/* do schedule */
rt_schedule();
/* thread is waked up */
result = thread->error;
level = rt_hw_interrupt_disable();
}
}
/* clean completed flag */
completion->flag = RT_UNCOMPLETED;
__exit:
rt_hw_interrupt_enable(level);
return result;
}
RTM_EXPORT(rt_completion_wait);
/**
* @brief This function indicates a completion has done.
*
* @param completion is a pointer to a completion object.
*/
void rt_completion_done(struct rt_completion *completion)
{
rt_base_t level;
RT_ASSERT(completion != RT_NULL);
if (completion->flag == RT_COMPLETED)
return;
level = rt_hw_interrupt_disable();
completion->flag = RT_COMPLETED;
if (!rt_list_isempty(&(completion->suspended_list)))
{
/* there is one thread in suspended list */
struct rt_thread *thread;
/* get thread entry */
thread = rt_list_entry(completion->suspended_list.next,
struct rt_thread,
tlist);
/* resume it */
rt_thread_resume(thread);
rt_hw_interrupt_enable(level);
/* perform a schedule */
rt_schedule();
}
else
{
rt_hw_interrupt_enable(level);
}
}
RTM_EXPORT(rt_completion_done);

View file

@ -0,0 +1,498 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2012-09-30 Bernard first version.
* 2016-10-31 armink fix some resume push and pop thread bugs
*/
#include <rtthread.h>
#include <rtdevice.h>
#include <rthw.h>
#define DATAQUEUE_MAGIC 0xbead0e0e
struct rt_data_item
{
const void *data_ptr;
rt_size_t data_size;
};
/**
* @brief This function will initialize the data queue. Calling this function will
* initialize the data queue control block and set the notification callback function.
*
* @param queue is a pointer to the data queue object.
*
* @param size is the maximum number of data in the data queue.
*
* @param lwm is low water mark.
* When the number of data in the data queue is less than this value, this function will
* wake up the thread waiting for write data.
*
* @param evt_notify is the notification callback function.
*
* @return Return the operation status. When the return value is RT_EOK, the initialization is successful.
* When the return value is -RT_ENOMEM, it means insufficient memory allocation failed.
*/
rt_err_t
rt_data_queue_init(struct rt_data_queue *queue,
rt_uint16_t size,
rt_uint16_t lwm,
void (*evt_notify)(struct rt_data_queue *queue, rt_uint32_t event))
{
RT_ASSERT(queue != RT_NULL);
RT_ASSERT(size > 0);
queue->evt_notify = evt_notify;
queue->magic = DATAQUEUE_MAGIC;
queue->size = size;
queue->lwm = lwm;
queue->get_index = 0;
queue->put_index = 0;
queue->is_empty = 1;
queue->is_full = 0;
rt_list_init(&(queue->suspended_push_list));
rt_list_init(&(queue->suspended_pop_list));
queue->queue = (struct rt_data_item *)rt_malloc(sizeof(struct rt_data_item) * size);
if (queue->queue == RT_NULL)
{
return -RT_ENOMEM;
}
return RT_EOK;
}
RTM_EXPORT(rt_data_queue_init);
/**
* @brief This function will write data to the data queue. If the data queue is full,
* the thread will suspend for the specified amount of time.
*
* @param queue is a pointer to the data queue object.
* .
* @param data_ptr is the buffer pointer of the data to be written.
*
* @param size is the size in bytes of the data to be written.
*
* @param timeout is the waiting time.
*
* @return Return the operation status. When the return value is RT_EOK, the operation is successful.
* When the return value is -RT_ETIMEOUT, it means the specified time out.
*/
rt_err_t rt_data_queue_push(struct rt_data_queue *queue,
const void *data_ptr,
rt_size_t data_size,
rt_int32_t timeout)
{
rt_base_t level;
rt_thread_t thread;
rt_err_t result;
RT_ASSERT(queue != RT_NULL);
RT_ASSERT(queue->magic == DATAQUEUE_MAGIC);
/* current context checking */
RT_DEBUG_SCHEDULER_AVAILABLE(timeout != 0);
result = RT_EOK;
thread = rt_thread_self();
level = rt_hw_interrupt_disable();
while (queue->is_full)
{
/* queue is full */
if (timeout == 0)
{
result = -RT_ETIMEOUT;
goto __exit;
}
/* reset thread error number */
thread->error = RT_EOK;
/* suspend thread on the push list */
rt_thread_suspend_with_flag(thread, RT_UNINTERRUPTIBLE);
rt_list_insert_before(&(queue->suspended_push_list), &(thread->tlist));
/* start timer */
if (timeout > 0)
{
/* reset the timeout of thread timer and start it */
rt_timer_control(&(thread->thread_timer),
RT_TIMER_CTRL_SET_TIME,
&timeout);
rt_timer_start(&(thread->thread_timer));
}
/* enable interrupt */
rt_hw_interrupt_enable(level);
/* do schedule */
rt_schedule();
/* thread is waked up */
result = thread->error;
level = rt_hw_interrupt_disable();
if (result != RT_EOK) goto __exit;
}
queue->queue[queue->put_index].data_ptr = data_ptr;
queue->queue[queue->put_index].data_size = data_size;
queue->put_index += 1;
if (queue->put_index == queue->size)
{
queue->put_index = 0;
}
queue->is_empty = 0;
if (queue->put_index == queue->get_index)
{
queue->is_full = 1;
}
/* there is at least one thread in suspended list */
if (!rt_list_isempty(&(queue->suspended_pop_list)))
{
/* get thread entry */
thread = rt_list_entry(queue->suspended_pop_list.next,
struct rt_thread,
tlist);
/* resume it */
rt_thread_resume(thread);
rt_hw_interrupt_enable(level);
/* perform a schedule */
rt_schedule();
return result;
}
__exit:
rt_hw_interrupt_enable(level);
if ((result == RT_EOK) && queue->evt_notify != RT_NULL)
{
queue->evt_notify(queue, RT_DATAQUEUE_EVENT_PUSH);
}
return result;
}
RTM_EXPORT(rt_data_queue_push);
/**
* @brief This function will pop data from the data queue. If the data queue is empty,the thread
* will suspend for the specified amount of time.
*
* @note When the number of data in the data queue is less than lwm(low water mark), will
* wake up the thread waiting for write data.
*
* @param queue is a pointer to the data queue object.
*
* @param data_ptr is the buffer pointer of the data to be fetched.
*
* @param size is the size in bytes of the data to be fetched.
*
* @param timeout is the waiting time.
*
* @return Return the operation status. When the return value is RT_EOK, the operation is successful.
* When the return value is -RT_ETIMEOUT, it means the specified time out.
*/
rt_err_t rt_data_queue_pop(struct rt_data_queue *queue,
const void **data_ptr,
rt_size_t *size,
rt_int32_t timeout)
{
rt_base_t level;
rt_thread_t thread;
rt_err_t result;
RT_ASSERT(queue != RT_NULL);
RT_ASSERT(queue->magic == DATAQUEUE_MAGIC);
RT_ASSERT(data_ptr != RT_NULL);
RT_ASSERT(size != RT_NULL);
/* current context checking */
RT_DEBUG_SCHEDULER_AVAILABLE(timeout != 0);
result = RT_EOK;
thread = rt_thread_self();
level = rt_hw_interrupt_disable();
while (queue->is_empty)
{
/* queue is empty */
if (timeout == 0)
{
result = -RT_ETIMEOUT;
goto __exit;
}
/* reset thread error number */
thread->error = RT_EOK;
/* suspend thread on the pop list */
rt_thread_suspend_with_flag(thread, RT_UNINTERRUPTIBLE);
rt_list_insert_before(&(queue->suspended_pop_list), &(thread->tlist));
/* start timer */
if (timeout > 0)
{
/* reset the timeout of thread timer and start it */
rt_timer_control(&(thread->thread_timer),
RT_TIMER_CTRL_SET_TIME,
&timeout);
rt_timer_start(&(thread->thread_timer));
}
/* enable interrupt */
rt_hw_interrupt_enable(level);
/* do schedule */
rt_schedule();
/* thread is waked up */
result = thread->error;
level = rt_hw_interrupt_disable();
if (result != RT_EOK)
goto __exit;
}
*data_ptr = queue->queue[queue->get_index].data_ptr;
*size = queue->queue[queue->get_index].data_size;
queue->get_index += 1;
if (queue->get_index == queue->size)
{
queue->get_index = 0;
}
queue->is_full = 0;
if (queue->put_index == queue->get_index)
{
queue->is_empty = 1;
}
if (rt_data_queue_len(queue) <= queue->lwm)
{
/* there is at least one thread in suspended list */
if (!rt_list_isempty(&(queue->suspended_push_list)))
{
/* get thread entry */
thread = rt_list_entry(queue->suspended_push_list.next,
struct rt_thread,
tlist);
/* resume it */
rt_thread_resume(thread);
rt_hw_interrupt_enable(level);
/* perform a schedule */
rt_schedule();
}
else
{
rt_hw_interrupt_enable(level);
}
if (queue->evt_notify != RT_NULL)
queue->evt_notify(queue, RT_DATAQUEUE_EVENT_LWM);
return result;
}
__exit:
rt_hw_interrupt_enable(level);
if ((result == RT_EOK) && (queue->evt_notify != RT_NULL))
{
queue->evt_notify(queue, RT_DATAQUEUE_EVENT_POP);
}
return result;
}
RTM_EXPORT(rt_data_queue_pop);
/**
* @brief This function will fetch but retaining data in the data queue.
*
* @param queue is a pointer to the data queue object.
*
* @param data_ptr is the buffer pointer of the data to be fetched.
*
* @param size is the size in bytes of the data to be fetched.
*
* @return Return the operation status. When the return value is RT_EOK, the operation is successful.
* When the return value is -RT_EEMPTY, it means the data queue is empty.
*/
rt_err_t rt_data_queue_peek(struct rt_data_queue *queue,
const void **data_ptr,
rt_size_t *size)
{
rt_base_t level;
RT_ASSERT(queue != RT_NULL);
RT_ASSERT(queue->magic == DATAQUEUE_MAGIC);
if (queue->is_empty)
{
return -RT_EEMPTY;
}
level = rt_hw_interrupt_disable();
*data_ptr = queue->queue[queue->get_index].data_ptr;
*size = queue->queue[queue->get_index].data_size;
rt_hw_interrupt_enable(level);
return RT_EOK;
}
RTM_EXPORT(rt_data_queue_peek);
/**
* @brief This function will reset the data queue.
*
* @note Calling this function will wake up all threads on the data queue
* that are hanging and waiting.
*
* @param queue is a pointer to the data queue object.
*/
void rt_data_queue_reset(struct rt_data_queue *queue)
{
rt_base_t level;
struct rt_thread *thread;
RT_ASSERT(queue != RT_NULL);
RT_ASSERT(queue->magic == DATAQUEUE_MAGIC);
level = rt_hw_interrupt_disable();
queue->get_index = 0;
queue->put_index = 0;
queue->is_empty = 1;
queue->is_full = 0;
rt_hw_interrupt_enable(level);
rt_enter_critical();
/* wakeup all suspend threads */
/* resume on pop list */
while (!rt_list_isempty(&(queue->suspended_pop_list)))
{
/* disable interrupt */
level = rt_hw_interrupt_disable();
/* get next suspend thread */
thread = rt_list_entry(queue->suspended_pop_list.next,
struct rt_thread,
tlist);
/* set error code to -RT_ERROR */
thread->error = -RT_ERROR;
/*
* resume thread
* In rt_thread_resume function, it will remove current thread from
* suspend list
*/
rt_thread_resume(thread);
/* enable interrupt */
rt_hw_interrupt_enable(level);
}
/* resume on push list */
while (!rt_list_isempty(&(queue->suspended_push_list)))
{
/* disable interrupt */
level = rt_hw_interrupt_disable();
/* get next suspend thread */
thread = rt_list_entry(queue->suspended_push_list.next,
struct rt_thread,
tlist);
/* set error code to -RT_ERROR */
thread->error = -RT_ERROR;
/*
* resume thread
* In rt_thread_resume function, it will remove current thread from
* suspend list
*/
rt_thread_resume(thread);
/* enable interrupt */
rt_hw_interrupt_enable(level);
}
rt_exit_critical();
rt_schedule();
}
RTM_EXPORT(rt_data_queue_reset);
/**
* @brief This function will deinit the data queue.
*
* @param queue is a pointer to the data queue object.
*
* @return Return the operation status. When the return value is RT_EOK, the operation is successful.
*/
rt_err_t rt_data_queue_deinit(struct rt_data_queue *queue)
{
rt_base_t level;
RT_ASSERT(queue != RT_NULL);
RT_ASSERT(queue->magic == DATAQUEUE_MAGIC);
/* wakeup all suspend threads */
rt_data_queue_reset(queue);
level = rt_hw_interrupt_disable();
queue->magic = 0;
rt_hw_interrupt_enable(level);
rt_free(queue->queue);
return RT_EOK;
}
RTM_EXPORT(rt_data_queue_deinit);
/**
* @brief This function will get the number of data in the data queue.
*
* @param queue is a pointer to the data queue object.
*
* @return Return the number of data in the data queue.
*/
rt_uint16_t rt_data_queue_len(struct rt_data_queue *queue)
{
rt_base_t level;
rt_int16_t len;
RT_ASSERT(queue != RT_NULL);
RT_ASSERT(queue->magic == DATAQUEUE_MAGIC);
if (queue->is_empty)
{
return 0;
}
level = rt_hw_interrupt_disable();
if (queue->put_index > queue->get_index)
{
len = queue->put_index - queue->get_index;
}
else
{
len = queue->size + queue->put_index - queue->get_index;
}
rt_hw_interrupt_enable(level);
return len;
}
RTM_EXPORT(rt_data_queue_len);

View file

@ -0,0 +1,767 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2012-09-30 Bernard first version.
* 2017-11-08 JasonJiaJie fix memory leak issue when close a pipe.
*/
#include <rthw.h>
#include <rtdevice.h>
#include <stdint.h>
#include <sys/errno.h>
#if defined(RT_USING_POSIX_DEVIO) && defined(RT_USING_POSIX_PIPE)
#include <unistd.h>
#include <fcntl.h>
#include <poll.h>
#include <sys/ioctl.h>
#include <dfs_file.h>
#include <resource_id.h>
/* check RT_UNAMED_PIPE_NUMBER */
#ifndef RT_UNAMED_PIPE_NUMBER
#define RT_UNAMED_PIPE_NUMBER 64
#endif
#define BITS(x) _BITS(x)
#define _BITS(x) (sizeof(#x) - 1)
struct check_rt_unamed_pipe_number
{
/* -4 for "pipe" prefix */
/* -1 for '\0' postfix */
char _check[RT_NAME_MAX - 4 - 1 - BITS(RT_UNAMED_PIPE_NUMBER)];
};
/* check end */
static void *resoure_id[RT_UNAMED_PIPE_NUMBER];
static resource_id_t id_mgr = RESOURCE_ID_INIT(RT_UNAMED_PIPE_NUMBER, resoure_id);
/**
* @brief This function will open a pipe.
*
* @param fd is the file descriptor.
*
* @return Return the operation status.
* When the return value is 0, it means the operation is successful.
* When the return value is -1, it means the file descriptor is invalid.
* When the return value is -RT_ENOMEM, it means insufficient memory allocation failed.
*/
static int pipe_fops_open(struct dfs_file *fd)
{
int rc = 0;
rt_pipe_t *pipe;
pipe = (rt_pipe_t *)fd->vnode->data;
if (!pipe)
{
return -1;
}
rt_mutex_take(&pipe->lock, RT_WAITING_FOREVER);
if ((fd->flags & O_RDONLY) == O_RDONLY)
{
pipe->reader = 1;
}
if ((fd->flags & O_WRONLY) == O_WRONLY)
{
pipe->writer = 1;
}
if (fd->vnode->ref_count == 1)
{
pipe->fifo = rt_ringbuffer_create(pipe->bufsz);
if (pipe->fifo == RT_NULL)
{
rc = -RT_ENOMEM;
goto __exit;
}
}
__exit:
rt_mutex_release(&pipe->lock);
return rc;
}
/**
* @brief This function will close a pipe.
*
* @param fd is the file descriptor.
*
* @return Return the operation status.
* When the return value is 0, it means the operation is successful.
* When the return value is -1, it means the file descriptor is invalid.
*/
static int pipe_fops_close(struct dfs_file *fd)
{
rt_device_t device;
rt_pipe_t *pipe;
pipe = (rt_pipe_t *)fd->vnode->data;
if (!pipe)
{
return -1;
}
device = &pipe->parent;
rt_mutex_take(&pipe->lock, RT_WAITING_FOREVER);
if ((fd->flags & O_RDONLY) == O_RDONLY)
{
pipe->reader = 0;
}
if ((fd->flags & O_WRONLY) == O_WRONLY)
{
pipe->writer = 0;
while (!rt_list_isempty(&pipe->reader_queue.waiting_list))
{
rt_wqueue_wakeup(&pipe->reader_queue, (void*)POLLIN);
}
}
if (fd->vnode->ref_count == 1)
{
if (pipe->fifo != RT_NULL)
{
rt_ringbuffer_destroy(pipe->fifo);
}
pipe->fifo = RT_NULL;
}
rt_mutex_release(&pipe->lock);
if (fd->vnode->ref_count == 1 && pipe->is_named == RT_FALSE)
{
/* delete the unamed pipe */
rt_pipe_delete(device->parent.name);
}
return 0;
}
/**
* @brief This function will get the pipe space size depends on the command.
*
* @param fd is the file descriptor.
*
* @param cmd is the command. It determines what data will get.
*
* FIONREAD The command to get the number of bytes in the pipe.
*
* FIONWRITE The command to get the number of bytes can be written to the pipe.
*
* @param args is the pointer to the data to store the read data.
*
* @return Return the operation status.
* When the return value is 0, it means the operation is successful.
* When the return value is -EINVAL, it means the command is invalid.
*/
static int pipe_fops_ioctl(struct dfs_file *fd, int cmd, void *args)
{
rt_pipe_t *pipe;
int ret = 0;
pipe = (rt_pipe_t *)fd->vnode->data;
switch (cmd)
{
case FIONREAD:
*((int*)args) = rt_ringbuffer_data_len(pipe->fifo);
break;
case FIONWRITE:
*((int*)args) = rt_ringbuffer_space_len(pipe->fifo);
break;
default:
ret = -EINVAL;
break;
}
return ret;
}
/**
* @brief This function will read data from pipe.
*
* @param fd is the file descriptor.
*
* @param buf is the buffer to store the read data.
*
* @param count is the length of data to be read.
*
* @return Return the length of data read.
* When the return value is 0, it means O_NONBLOCK is enabled and there is no thread that has the pipe open for writing.
* When the return value is -EAGAIN, it means there are no data to be read.
*/
static int pipe_fops_read(struct dfs_file *fd, void *buf, size_t count)
{
int len = 0;
rt_pipe_t *pipe;
pipe = (rt_pipe_t *)fd->vnode->data;
/* no process has the pipe open for writing, return end-of-file */
rt_mutex_take(&pipe->lock, RT_WAITING_FOREVER);
while (1)
{
len = rt_ringbuffer_get(pipe->fifo, buf, count);
if (len > 0 || pipe->writer == 0)
{
break;
}
else
{
if (fd->flags & O_NONBLOCK)
{
len = -EAGAIN;
goto out;
}
rt_mutex_release(&pipe->lock);
rt_wqueue_wakeup(&pipe->writer_queue, (void*)POLLOUT);
rt_wqueue_wait(&pipe->reader_queue, 0, -1);
rt_mutex_take(&pipe->lock, RT_WAITING_FOREVER);
}
}
/* wakeup writer */
rt_wqueue_wakeup(&pipe->writer_queue, (void*)POLLOUT);
out:
rt_mutex_release(&pipe->lock);
return len;
}
/**
* @brief This function will write data to pipe.
*
* @param fd is the file descriptor.
*
* @param buf is a pointer to the data buffer to be written.
*
* @param count is the length of data to be write.
*
* @return Return the length of data written.
* When the return value is -EAGAIN, it means O_NONBLOCK is enabled and there are no space to be written.
* When the return value is -EPIPE, it means there is no thread that has the pipe open for reading.
*/
static int pipe_fops_write(struct dfs_file *fd, const void *buf, size_t count)
{
int len;
rt_pipe_t *pipe;
int wakeup = 0;
int ret = 0;
uint8_t *pbuf;
pipe = (rt_pipe_t *)fd->vnode->data;
if (count == 0)
{
return 0;
}
pbuf = (uint8_t*)buf;
rt_mutex_take(&pipe->lock, -1);
while (1)
{
len = rt_ringbuffer_put(pipe->fifo, pbuf, count - ret);
ret += len;
pbuf += len;
wakeup = 1;
if (ret == count)
{
break;
}
else
{
if (fd->flags & O_NONBLOCK)
{
if (ret == 0)
{
ret = -EAGAIN;
}
break;
}
}
rt_mutex_release(&pipe->lock);
rt_wqueue_wakeup(&pipe->reader_queue, (void*)POLLIN);
/* pipe full, waiting on suspended write list */
rt_wqueue_wait(&pipe->writer_queue, 0, -1);
rt_mutex_take(&pipe->lock, -1);
}
rt_mutex_release(&pipe->lock);
if (wakeup)
{
rt_wqueue_wakeup(&pipe->reader_queue, (void*)POLLIN);
}
return ret;
}
/**
* @brief This function will get the pipe status.
*
* @param fd is the file descriptor.
*
* @param req is the request type.
*
* @return mask of the pipe status.
* POLLIN means there is data to be read.
* POLLHUP means there is no thread that occupied the pipe to open for writing.
* POLLOUT means there is space to be written.
* POLLERR means there is no thread that occupied the pipe to open for reading.
*/
static int pipe_fops_poll(struct dfs_file *fd, rt_pollreq_t *req)
{
int mask = 0;
rt_pipe_t *pipe;
int mode = 0;
pipe = (rt_pipe_t *)fd->vnode->data;
rt_poll_add(&pipe->reader_queue, req);
rt_poll_add(&pipe->writer_queue, req);
switch (fd->flags & O_ACCMODE)
{
case O_RDONLY:
mode = 1;
break;
case O_WRONLY:
mode = 2;
break;
case O_RDWR:
mode = 3;
break;
}
if (mode & 1)
{
if (rt_ringbuffer_data_len(pipe->fifo) != 0)
{
mask |= POLLIN;
}
}
if (mode & 2)
{
if (rt_ringbuffer_space_len(pipe->fifo) != 0)
{
mask |= POLLOUT;
}
}
return mask;
}
static const struct dfs_file_ops pipe_fops =
{
pipe_fops_open,
pipe_fops_close,
pipe_fops_ioctl,
pipe_fops_read,
pipe_fops_write,
RT_NULL,
RT_NULL,
RT_NULL,
pipe_fops_poll,
};
#endif /* defined(RT_USING_POSIX_DEVIO) && defined(RT_USING_POSIX_PIPE) */
/**
* @brief This function will open the pipe and actually creates the pipe buffer.
*
* @param device is a pointer to the pipe device descriptor.
*
* @param oflag is the open method, but it is not used yet.
*
* @return Return the operation status.
* When the return value is RT_EOK, the operation is successful.
* When the return value is -RT_EINVAL, it means the device handle is empty.
* When the return value is -RT_ENOMEM, it means insufficient memory allocation failed.
*/
rt_err_t rt_pipe_open(rt_device_t device, rt_uint16_t oflag)
{
rt_pipe_t *pipe = (rt_pipe_t *)device;
rt_err_t ret = RT_EOK;
if (device == RT_NULL)
{
ret = -RT_EINVAL;
goto __exit;
}
rt_mutex_take(&pipe->lock, RT_WAITING_FOREVER);
if (pipe->fifo == RT_NULL)
{
pipe->fifo = rt_ringbuffer_create(pipe->bufsz);
if (pipe->fifo == RT_NULL)
{
ret = -RT_ENOMEM;
}
}
rt_mutex_release(&pipe->lock);
__exit:
return ret;
}
/**
* @brief This function will close the pipe and release the pipe buffer.
*
* @param device is a pointer to the pipe device descriptor.
*
* @return Return the operation status.
* When the return value is RT_EOK, the operation is successful.
* When the return value is -RT_EINVAL, it means the device handle is empty.
*/
rt_err_t rt_pipe_close(rt_device_t device)
{
rt_pipe_t *pipe = (rt_pipe_t *)device;
if (device == RT_NULL)
{
return -RT_EINVAL;
}
rt_mutex_take(&pipe->lock, RT_WAITING_FOREVER);
rt_ringbuffer_destroy(pipe->fifo);
pipe->fifo = RT_NULL;
rt_mutex_release(&pipe->lock);
return RT_EOK;
}
/**
* @brief This function will read the specified length of data from the pipe.
*
* @param device is a pointer to the pipe device descriptor.
*
* @param pos is a parameter compatible with POSIX standard interface (currently meaningless, just pass in 0).
*
* @param buffer is a pointer to the buffer to store the read data.
*
* @param count is the length of data to be read.
*
* @return Return the length of data read.
* When the return value is 0, it means the pipe device handle is empty or the count is 0.
*/
rt_ssize_t rt_pipe_read(rt_device_t device, rt_off_t pos, void *buffer, rt_size_t count)
{
uint8_t *pbuf;
rt_size_t read_bytes = 0;
rt_pipe_t *pipe = (rt_pipe_t *)device;
if (device == RT_NULL)
{
rt_set_errno(-EINVAL);
return 0;
}
if (count == 0)
{
return 0;
}
pbuf = (uint8_t*)buffer;
rt_mutex_take(&pipe->lock, RT_WAITING_FOREVER);
while (read_bytes < count)
{
int len = rt_ringbuffer_get(pipe->fifo, &pbuf[read_bytes], count - read_bytes);
if (len <= 0)
{
break;
}
read_bytes += len;
}
rt_mutex_release(&pipe->lock);
return read_bytes;
}
/**
* @brief This function will write the specified length of data to the pipe.
*
* @param device is a pointer to the pipe device descriptor.
*
* @param pos is a parameter compatible with POSIX standard interface (currently meaningless, just pass in 0).
*
* @param buffer is a pointer to the data buffer to be written.
*
* @param count is the length of data to be written.
*
* @return Return the length of data written.
* When the return value is 0, it means the pipe device handle is empty or the count is 0.
*/
rt_ssize_t rt_pipe_write(rt_device_t device, rt_off_t pos, const void *buffer, rt_size_t count)
{
uint8_t *pbuf;
rt_size_t write_bytes = 0;
rt_pipe_t *pipe = (rt_pipe_t *)device;
if (device == RT_NULL)
{
rt_set_errno(-EINVAL);
return 0;
}
if (count == 0)
{
return 0;
}
pbuf = (uint8_t*)buffer;
rt_mutex_take(&pipe->lock, -1);
while (write_bytes < count)
{
int len = rt_ringbuffer_put(pipe->fifo, &pbuf[write_bytes], count - write_bytes);
if (len <= 0)
{
break;
}
write_bytes += len;
}
rt_mutex_release(&pipe->lock);
return write_bytes;
}
/**
* @brief This function is not used yet.
*
* @param dev is not used yet.
*
* @param cmd is not used yet.
*
* @param args is not used yet.
*
* @return Always return RT_EOK.
*/
rt_err_t rt_pipe_control(rt_device_t dev, int cmd, void *args)
{
return RT_EOK;
}
#ifdef RT_USING_DEVICE_OPS
const static struct rt_device_ops pipe_ops =
{
RT_NULL,
rt_pipe_open,
rt_pipe_close,
rt_pipe_read,
rt_pipe_write,
rt_pipe_control,
};
#endif /* RT_USING_DEVICE_OPS */
/**
* @brief This function will initialize a pipe device.
* The system allocates a pipe handle from dynamic heap memory, initializes the pipe handle
* with the specified value, and registers the pipe device with the system.
*
* @param name is the name of pipe device.
*
* @param bufsz is the size of pipe buffer.
*
* @return Return the pointer to the pipe device.
* When the return value is RT_NULL, it means the initialization failed.
*/
rt_pipe_t *rt_pipe_create(const char *name, int bufsz)
{
rt_pipe_t *pipe;
rt_device_t dev;
pipe = (rt_pipe_t *)rt_malloc(sizeof(rt_pipe_t));
if (pipe == RT_NULL) return RT_NULL;
rt_memset(pipe, 0, sizeof(rt_pipe_t));
pipe->is_named = RT_TRUE; /* initialize as a named pipe */
#if defined(RT_USING_POSIX_DEVIO) && defined(RT_USING_POSIX_PIPE)
pipe->pipeno = -1;
#endif
rt_mutex_init(&pipe->lock, name, RT_IPC_FLAG_FIFO);
rt_wqueue_init(&pipe->reader_queue);
rt_wqueue_init(&pipe->writer_queue);
pipe->writer = 0;
pipe->reader = 0;
RT_ASSERT(bufsz < 0xFFFF);
pipe->bufsz = bufsz;
dev = &pipe->parent;
dev->type = RT_Device_Class_Pipe;
#ifdef RT_USING_DEVICE_OPS
dev->ops = &pipe_ops;
#else
dev->init = RT_NULL;
dev->open = rt_pipe_open;
dev->read = rt_pipe_read;
dev->write = rt_pipe_write;
dev->close = rt_pipe_close;
dev->control = rt_pipe_control;
#endif
dev->rx_indicate = RT_NULL;
dev->tx_complete = RT_NULL;
if (rt_device_register(&pipe->parent, name, RT_DEVICE_FLAG_RDWR | RT_DEVICE_FLAG_REMOVABLE) != 0)
{
rt_mutex_detach(&pipe->lock);
#if defined(RT_USING_POSIX_DEVIO) && defined(RT_USING_POSIX_PIPE)
resource_id_put(&id_mgr, pipe->pipeno);
#endif
rt_free(pipe);
return RT_NULL;
}
#if defined(RT_USING_POSIX_DEVIO) && defined(RT_USING_POSIX_PIPE)
dev->fops = (void *)&pipe_fops;
#endif
return pipe;
}
/**
* @brief This function will delete a pipe device.
* The system will release the pipe handle and unregister the pipe device from the system.
*
* @param pipe is the pointer to the pipe device.
*
* @return Return the operation status.
* When the return value is 0, it means the operation is successful.
* When the return value is -RT_EINVAL, it means the pipe device is not found or the device isn't a pipe.
* When the return value is -RT_EBUSY, it means the pipe device is busy.
*/
int rt_pipe_delete(const char *name)
{
int result = 0;
rt_device_t device;
device = rt_device_find(name);
if (device)
{
if (device->type == RT_Device_Class_Pipe)
{
rt_pipe_t *pipe;
pipe = (rt_pipe_t *)device;
rt_mutex_detach(&pipe->lock);
#if defined(RT_USING_POSIX_DEVIO) && defined(RT_USING_POSIX_PIPE)
resource_id_put(&id_mgr, pipe->pipeno);
#endif
rt_device_unregister(device);
/* close fifo ringbuffer */
if (pipe->fifo)
{
rt_ringbuffer_destroy(pipe->fifo);
pipe->fifo = RT_NULL;
}
rt_free(pipe);
}
else
{
result = -ENODEV;
}
}
else
{
result = -ENODEV;
}
return result;
}
#if defined(RT_USING_POSIX_DEVIO) && defined(RT_USING_POSIX_PIPE)
/**
* @brief This function will creat a anonymous pipe.
*
* @param fildes[0] is the read handle.
* fildes[1] is the write handle.
*
* @return Return the operation status.
* When the return value is 0, it means the operation is successful.
* When the return value is -1, it means the operation is failed.
*/
int pipe(int fildes[2])
{
rt_pipe_t *pipe;
char dname[8];
char dev_name[32];
int pipeno = 0;
pipeno = resource_id_get(&id_mgr);
if (pipeno == -1)
{
return -1;
}
rt_snprintf(dname, sizeof(dname), "pipe%d", pipeno);
pipe = rt_pipe_create(dname, RT_USING_POSIX_PIPE_SIZE);
if (pipe == RT_NULL)
{
resource_id_put(&id_mgr, pipeno);
return -1;
}
pipe->is_named = RT_FALSE; /* unamed pipe */
pipe->pipeno = pipeno;
rt_snprintf(dev_name, sizeof(dev_name), "/dev/%s", dname);
fildes[0] = open(dev_name, O_RDONLY, 0);
if (fildes[0] < 0)
{
return -1;
}
fildes[1] = open(dev_name, O_WRONLY, 0);
if (fildes[1] < 0)
{
close(fildes[0]);
return -1;
}
return 0;
}
/**
* @brief This function will create a named pipe.
*
* @param path is the name of pipe device.
*
* @param mode is not used yet.
*
* @return Return the operation status.
* When the return value is 0, it means the operation is successful.
* When the return value is -1, it means the operation is failed.
*/
int mkfifo(const char *path, mode_t mode)
{
rt_pipe_t *pipe;
pipe = rt_pipe_create(path, RT_USING_POSIX_PIPE_SIZE);
if (pipe == RT_NULL)
{
return -1;
}
return 0;
}
#endif /* defined(RT_USING_POSIX_DEVIO) && defined(RT_USING_POSIX_PIPE) */

View file

@ -0,0 +1,596 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2018-08-25 armink the first version
*/
#include <rthw.h>
#include <rtthread.h>
#include <rtdevice.h>
/**
* ring block buffer object initialization
*
* @param rbb ring block buffer object
* @param buf buffer
* @param buf_size buffer size
* @param block_set block set
* @param blk_max_num max block number
*
* @note When your application need align access, please make the buffer address is aligned.
*/
void rt_rbb_init(rt_rbb_t rbb, rt_uint8_t *buf, rt_size_t buf_size, rt_rbb_blk_t block_set, rt_size_t blk_max_num)
{
rt_size_t i;
RT_ASSERT(rbb);
RT_ASSERT(buf);
RT_ASSERT(block_set);
rbb->buf = buf;
rbb->buf_size = buf_size;
rbb->blk_set = block_set;
rbb->blk_max_num = blk_max_num;
rbb->tail = &rbb->blk_list;
rt_slist_init(&rbb->blk_list);
rt_slist_init(&rbb->free_list);
/* initialize block status */
for (i = 0; i < blk_max_num; i++)
{
block_set[i].status = RT_RBB_BLK_UNUSED;
rt_slist_init(&block_set[i].list);
rt_slist_insert(&rbb->free_list, &block_set[i].list);
}
}
RTM_EXPORT(rt_rbb_init);
#ifdef RT_USING_HEAP
/**
* ring block buffer object create
*
* @param buf_size buffer size
* @param blk_max_num max block number
*
* @return != RT_NULL: ring block buffer object
* RT_NULL: create failed
*/
rt_rbb_t rt_rbb_create(rt_size_t buf_size, rt_size_t blk_max_num)
{
rt_rbb_t rbb = RT_NULL;
rt_uint8_t *buf;
rt_rbb_blk_t blk_set;
rbb = (rt_rbb_t)rt_malloc(sizeof(struct rt_rbb));
if (!rbb)
{
return RT_NULL;
}
buf = (rt_uint8_t *)rt_malloc(buf_size);
if (!buf)
{
rt_free(rbb);
return RT_NULL;
}
blk_set = (rt_rbb_blk_t)rt_malloc(sizeof(struct rt_rbb_blk) * blk_max_num);
if (!blk_set)
{
rt_free(buf);
rt_free(rbb);
return RT_NULL;
}
rt_rbb_init(rbb, buf, buf_size, blk_set, blk_max_num);
return rbb;
}
RTM_EXPORT(rt_rbb_create);
/**
* ring block buffer object destroy
*
* @param rbb ring block buffer object
*/
void rt_rbb_destroy(rt_rbb_t rbb)
{
RT_ASSERT(rbb);
rt_free(rbb->buf);
rt_free(rbb->blk_set);
rt_free(rbb);
}
RTM_EXPORT(rt_rbb_destroy);
#endif
static rt_rbb_blk_t find_empty_blk_in_set(rt_rbb_t rbb)
{
struct rt_rbb_blk *blk;
RT_ASSERT(rbb);
if (rt_slist_isempty(&rbb->free_list))
{
return RT_NULL;
}
blk = rt_slist_first_entry(&rbb->free_list, struct rt_rbb_blk, list);
rt_slist_remove(&rbb->free_list, &blk->list);
RT_ASSERT(blk->status == RT_RBB_BLK_UNUSED);
return blk;
}
rt_inline void list_append(rt_rbb_t rbb, rt_slist_t *n)
{
/* append the node to the tail */
rbb->tail->next = n;
n->next = RT_NULL;
/* save tail node */
rbb->tail = n;
}
rt_inline rt_slist_t *list_remove(rt_rbb_t rbb, rt_slist_t *n)
{
rt_slist_t *l = &rbb->blk_list;
struct rt_slist_node *node = l;
/* remove slist head */
while (node->next && node->next != n) node = node->next;
/* remove node */
if (node->next != (rt_slist_t *)0)
{
node->next = node->next->next;
n->next = RT_NULL;
/* update tail node */
if (rbb->tail == n)
rbb->tail = node;
}
return l;
}
/**
* Allocate a block by given size. The block will add to blk_list when allocate success.
*
* @param rbb ring block buffer object
* @param blk_size block size
*
* @note When your application need align access, please make the blk_szie is aligned.
*
* @return != RT_NULL: allocated block
* RT_NULL: allocate failed
*/
rt_rbb_blk_t rt_rbb_blk_alloc(rt_rbb_t rbb, rt_size_t blk_size)
{
rt_base_t level;
rt_size_t empty1 = 0, empty2 = 0;
rt_rbb_blk_t head, tail, new_rbb = RT_NULL;
RT_ASSERT(rbb);
RT_ASSERT(blk_size < (1L << 24));
level = rt_hw_interrupt_disable();
new_rbb = find_empty_blk_in_set(rbb);
if (new_rbb)
{
if (rt_slist_isempty(&rbb->blk_list) == 0)
{
head = rt_slist_first_entry(&rbb->blk_list, struct rt_rbb_blk, list);
/* get tail rbb blk object */
tail = rt_slist_entry(rbb->tail, struct rt_rbb_blk, list);
if (head->buf <= tail->buf)
{
/**
* head tail
* +--------------------------------------+-----------------+------------------+
* | empty2 | block1 | block2 | block3 | empty1 |
* +--------------------------------------+-----------------+------------------+
* rbb->buf
*/
empty1 = (rbb->buf + rbb->buf_size) - (tail->buf + tail->size);
empty2 = head->buf - rbb->buf;
if (empty1 >= blk_size)
{
list_append(rbb, &new_rbb->list);
new_rbb->status = RT_RBB_BLK_INITED;
new_rbb->buf = tail->buf + tail->size;
new_rbb->size = blk_size;
}
else if (empty2 >= blk_size)
{
list_append(rbb, &new_rbb->list);
new_rbb->status = RT_RBB_BLK_INITED;
new_rbb->buf = rbb->buf;
new_rbb->size = blk_size;
}
else
{
/* no space */
new_rbb = RT_NULL;
}
}
else
{
/**
* tail head
* +----------------+-------------------------------------+--------+-----------+
* | block3 | empty1 | block1 | block2 |
* +----------------+-------------------------------------+--------+-----------+
* rbb->buf
*/
empty1 = head->buf - (tail->buf + tail->size);
if (empty1 >= blk_size)
{
list_append(rbb, &new_rbb->list);
new_rbb->status = RT_RBB_BLK_INITED;
new_rbb->buf = tail->buf + tail->size;
new_rbb->size = blk_size;
}
else
{
/* no space */
new_rbb = RT_NULL;
}
}
}
else
{
/* the list is empty */
list_append(rbb, &new_rbb->list);
new_rbb->status = RT_RBB_BLK_INITED;
new_rbb->buf = rbb->buf;
new_rbb->size = blk_size;
}
}
else
{
new_rbb = RT_NULL;
}
rt_hw_interrupt_enable(level);
return new_rbb;
}
RTM_EXPORT(rt_rbb_blk_alloc);
/**
* put a block to ring block buffer object
*
* @param block the block
*/
void rt_rbb_blk_put(rt_rbb_blk_t block)
{
RT_ASSERT(block);
RT_ASSERT(block->status == RT_RBB_BLK_INITED);
block->status = RT_RBB_BLK_PUT;
}
RTM_EXPORT(rt_rbb_blk_put);
/**
* get a block from the ring block buffer object
*
* @param rbb ring block buffer object
*
* @return != RT_NULL: block
* RT_NULL: get failed
*/
rt_rbb_blk_t rt_rbb_blk_get(rt_rbb_t rbb)
{
rt_base_t level;
rt_rbb_blk_t block = RT_NULL;
rt_slist_t *node;
RT_ASSERT(rbb);
if (rt_slist_isempty(&rbb->blk_list))
return 0;
level = rt_hw_interrupt_disable();
for (node = rt_slist_first(&rbb->blk_list); node; node = rt_slist_next(node))
{
block = rt_slist_entry(node, struct rt_rbb_blk, list);
if (block->status == RT_RBB_BLK_PUT)
{
block->status = RT_RBB_BLK_GET;
goto __exit;
}
}
/* not found */
block = RT_NULL;
__exit:
rt_hw_interrupt_enable(level);
return block;
}
RTM_EXPORT(rt_rbb_blk_get);
/**
* return the block size
*
* @param block the block
*
* @return block size
*/
rt_size_t rt_rbb_blk_size(rt_rbb_blk_t block)
{
RT_ASSERT(block);
return block->size;
}
RTM_EXPORT(rt_rbb_blk_size);
/**
* return the block buffer
*
* @param block the block
*
* @return block buffer
*/
rt_uint8_t *rt_rbb_blk_buf(rt_rbb_blk_t block)
{
RT_ASSERT(block);
return block->buf;
}
RTM_EXPORT(rt_rbb_blk_buf);
/**
* free the block
*
* @param rbb ring block buffer object
* @param block the block
*/
void rt_rbb_blk_free(rt_rbb_t rbb, rt_rbb_blk_t block)
{
rt_base_t level;
RT_ASSERT(rbb);
RT_ASSERT(block);
RT_ASSERT(block->status != RT_RBB_BLK_UNUSED);
level = rt_hw_interrupt_disable();
/* remove it on rbb block list */
list_remove(rbb, &block->list);
block->status = RT_RBB_BLK_UNUSED;
rt_slist_insert(&rbb->free_list, &block->list);
rt_hw_interrupt_enable(level);
}
RTM_EXPORT(rt_rbb_blk_free);
/**
* get a continuous block to queue by given size
*
* tail head
* +------------------+---------------+--------+----------+--------+
* | block3 | empty1 | block1 | block2 |fragment|
* +------------------+------------------------+----------+--------+
* |<-- return_size -->| |
* |<--- queue_data_len --->|
*
* tail head
* +------------------+---------------+--------+----------+--------+
* | block3 | empty1 | block1 | block2 |fragment|
* +------------------+------------------------+----------+--------+
* |<-- return_size -->| out of len(b1+b2+b3) |
* |<-------------------- queue_data_len -------------------->|
*
* @param rbb ring block buffer object
* @param queue_data_len The max queue data size, and the return size must less then it.
* @param queue continuous block queue
*
* @return the block queue data total size
*/
rt_size_t rt_rbb_blk_queue_get(rt_rbb_t rbb, rt_size_t queue_data_len, rt_rbb_blk_queue_t blk_queue)
{
rt_base_t level;
rt_size_t data_total_size = 0;
rt_slist_t *node, *tmp = RT_NULL;
rt_rbb_blk_t last_block = RT_NULL, block;
RT_ASSERT(rbb);
RT_ASSERT(blk_queue);
if (rt_slist_isempty(&rbb->blk_list))
return 0;
level = rt_hw_interrupt_disable();
node = rt_slist_first(&rbb->blk_list);
if (node != RT_NULL)
{
tmp = rt_slist_next(node);
}
for (; node; node = tmp, tmp = rt_slist_next(node))
{
if (!last_block)
{
last_block = rt_slist_entry(node, struct rt_rbb_blk, list);
if (last_block->status == RT_RBB_BLK_PUT)
{
/* save the first put status block to queue */
blk_queue->blocks = last_block;
blk_queue->blk_num = 0;
}
else
{
/* the first block must be put status */
last_block = RT_NULL;
continue;
}
}
else
{
block = rt_slist_entry(node, struct rt_rbb_blk, list);
/*
* these following conditions will break the loop:
* 1. the current block is not put status
* 2. the last block and current block is not continuous
* 3. the data_total_size will out of range
*/
if (block->status != RT_RBB_BLK_PUT ||
last_block->buf > block->buf ||
data_total_size + block->size > queue_data_len)
{
break;
}
/* backup last block */
last_block = block;
}
/* remove current block */
data_total_size += last_block->size;
last_block->status = RT_RBB_BLK_GET;
blk_queue->blk_num++;
}
rt_hw_interrupt_enable(level);
return data_total_size;
}
RTM_EXPORT(rt_rbb_blk_queue_get);
/**
* get all block length on block queue
*
* @param blk_queue the block queue
*
* @return total length
*/
rt_size_t rt_rbb_blk_queue_len(rt_rbb_blk_queue_t blk_queue)
{
rt_size_t i = 0, data_total_size = 0;
rt_rbb_blk_t blk;
RT_ASSERT(blk_queue);
for (blk = blk_queue->blocks; i < blk_queue->blk_num; i++)
{
data_total_size += blk->size;
blk = rt_slist_entry(blk->list.next, struct rt_rbb_blk, list);
}
return data_total_size;
}
RTM_EXPORT(rt_rbb_blk_queue_len);
/**
* return the block queue buffer
*
* @param blk_queue the block queue
*
* @return block queue buffer
*/
rt_uint8_t *rt_rbb_blk_queue_buf(rt_rbb_blk_queue_t blk_queue)
{
RT_ASSERT(blk_queue);
return blk_queue->blocks[0].buf;
}
RTM_EXPORT(rt_rbb_blk_queue_buf);
/**
* free the block queue
*
* @param rbb ring block buffer object
* @param blk_queue the block queue
*/
void rt_rbb_blk_queue_free(rt_rbb_t rbb, rt_rbb_blk_queue_t blk_queue)
{
rt_size_t i = 0;
rt_rbb_blk_t blk, next_blk;
RT_ASSERT(rbb);
RT_ASSERT(blk_queue);
for (blk = blk_queue->blocks; i < blk_queue->blk_num; i++)
{
next_blk = rt_slist_entry(blk->list.next, struct rt_rbb_blk, list);
rt_rbb_blk_free(rbb, blk);
blk = next_blk;
}
}
RTM_EXPORT(rt_rbb_blk_queue_free);
/**
* The put status and buffer continuous blocks can be make a block queue.
* This function will return the length which from next can be make block queue.
*
* @param rbb ring block buffer object
*
* @return the next can be make block queue's length
*/
rt_size_t rt_rbb_next_blk_queue_len(rt_rbb_t rbb)
{
rt_base_t level;
rt_size_t data_len = 0;
rt_slist_t *node;
rt_rbb_blk_t last_block = RT_NULL, block;
RT_ASSERT(rbb);
if (rt_slist_isempty(&rbb->blk_list))
return 0;
level = rt_hw_interrupt_disable();
for (node = rt_slist_first(&rbb->blk_list); node; node = rt_slist_next(node))
{
if (!last_block)
{
last_block = rt_slist_entry(node, struct rt_rbb_blk, list);
if (last_block->status != RT_RBB_BLK_PUT)
{
/* the first block must be put status */
last_block = RT_NULL;
continue;
}
}
else
{
block = rt_slist_entry(node, struct rt_rbb_blk, list);
/*
* these following conditions will break the loop:
* 1. the current block is not put status
* 2. the last block and current block is not continuous
*/
if (block->status != RT_RBB_BLK_PUT || last_block->buf > block->buf)
{
break;
}
/* backup last block */
last_block = block;
}
data_len += last_block->size;
}
rt_hw_interrupt_enable(level);
return data_len;
}
RTM_EXPORT(rt_rbb_next_blk_queue_len);
/**
* get the ring block buffer object buffer size
*
* @param rbb ring block buffer object
*
* @return buffer size
*/
rt_size_t rt_rbb_get_buf_size(rt_rbb_t rbb)
{
RT_ASSERT(rbb);
return rbb->buf_size;
}
RTM_EXPORT(rt_rbb_get_buf_size);

View file

@ -0,0 +1,467 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2012-09-30 Bernard first version.
* 2013-05-08 Grissiom reimplement
* 2016-08-18 heyuanjie add interface
* 2021-07-20 arminker fix write_index bug in function rt_ringbuffer_put_force
* 2021-08-14 Jackistang add comments for function interface.
*/
#include <rtthread.h>
#include <rtdevice.h>
#include <string.h>
rt_inline enum rt_ringbuffer_state rt_ringbuffer_status(struct rt_ringbuffer *rb)
{
if (rb->read_index == rb->write_index)
{
if (rb->read_mirror == rb->write_mirror)
return RT_RINGBUFFER_EMPTY;
else
return RT_RINGBUFFER_FULL;
}
return RT_RINGBUFFER_HALFFULL;
}
/**
* @brief Initialize the ring buffer object.
*
* @param rb A pointer to the ring buffer object.
* @param pool A pointer to the buffer.
* @param size The size of the buffer in bytes.
*/
void rt_ringbuffer_init(struct rt_ringbuffer *rb,
rt_uint8_t *pool,
rt_int32_t size)
{
RT_ASSERT(rb != RT_NULL);
RT_ASSERT(size > 0);
/* initialize read and write index */
rb->read_mirror = rb->read_index = 0;
rb->write_mirror = rb->write_index = 0;
/* set buffer pool and size */
rb->buffer_ptr = pool;
rb->buffer_size = RT_ALIGN_DOWN(size, RT_ALIGN_SIZE);
}
RTM_EXPORT(rt_ringbuffer_init);
/**
* @brief Put a block of data into the ring buffer. If the capacity of ring buffer is insufficient, it will discard out-of-range data.
*
* @param rb A pointer to the ring buffer object.
* @param ptr A pointer to the data buffer.
* @param length The size of data in bytes.
*
* @return Return the data size we put into the ring buffer.
*/
rt_size_t rt_ringbuffer_put(struct rt_ringbuffer *rb,
const rt_uint8_t *ptr,
rt_uint32_t length)
{
rt_uint32_t size;
RT_ASSERT(rb != RT_NULL);
/* whether has enough space */
size = rt_ringbuffer_space_len(rb);
/* no space */
if (size == 0)
return 0;
/* drop some data */
if (size < length)
length = size;
if (rb->buffer_size - rb->write_index > length)
{
/* read_index - write_index = empty space */
rt_memcpy(&rb->buffer_ptr[rb->write_index], ptr, length);
/* this should not cause overflow because there is enough space for
* length of data in current mirror */
rb->write_index += length;
return length;
}
rt_memcpy(&rb->buffer_ptr[rb->write_index],
&ptr[0],
rb->buffer_size - rb->write_index);
rt_memcpy(&rb->buffer_ptr[0],
&ptr[rb->buffer_size - rb->write_index],
length - (rb->buffer_size - rb->write_index));
/* we are going into the other side of the mirror */
rb->write_mirror = ~rb->write_mirror;
rb->write_index = length - (rb->buffer_size - rb->write_index);
return length;
}
RTM_EXPORT(rt_ringbuffer_put);
/**
* @brief Put a block of data into the ring buffer. If the capacity of ring buffer is insufficient, it will overwrite the existing data in the ring buffer.
*
* @param rb A pointer to the ring buffer object.
* @param ptr A pointer to the data buffer.
* @param length The size of data in bytes.
*
* @return Return the data size we put into the ring buffer.
*/
rt_size_t rt_ringbuffer_put_force(struct rt_ringbuffer *rb,
const rt_uint8_t *ptr,
rt_uint32_t length)
{
rt_uint32_t space_length;
RT_ASSERT(rb != RT_NULL);
space_length = rt_ringbuffer_space_len(rb);
if (length > rb->buffer_size)
{
ptr = &ptr[length - rb->buffer_size];
length = rb->buffer_size;
}
if (rb->buffer_size - rb->write_index > length)
{
/* read_index - write_index = empty space */
rt_memcpy(&rb->buffer_ptr[rb->write_index], ptr, length);
/* this should not cause overflow because there is enough space for
* length of data in current mirror */
rb->write_index += length;
if (length > space_length)
rb->read_index = rb->write_index;
return length;
}
rt_memcpy(&rb->buffer_ptr[rb->write_index],
&ptr[0],
rb->buffer_size - rb->write_index);
rt_memcpy(&rb->buffer_ptr[0],
&ptr[rb->buffer_size - rb->write_index],
length - (rb->buffer_size - rb->write_index));
/* we are going into the other side of the mirror */
rb->write_mirror = ~rb->write_mirror;
rb->write_index = length - (rb->buffer_size - rb->write_index);
if (length > space_length)
{
if (rb->write_index <= rb->read_index)
rb->read_mirror = ~rb->read_mirror;
rb->read_index = rb->write_index;
}
return length;
}
RTM_EXPORT(rt_ringbuffer_put_force);
/**
* @brief Get data from the ring buffer.
*
* @param rb A pointer to the ring buffer.
* @param ptr A pointer to the data buffer.
* @param length The size of the data we want to read from the ring buffer.
*
* @return Return the data size we read from the ring buffer.
*/
rt_size_t rt_ringbuffer_get(struct rt_ringbuffer *rb,
rt_uint8_t *ptr,
rt_uint32_t length)
{
rt_size_t size;
RT_ASSERT(rb != RT_NULL);
/* whether has enough data */
size = rt_ringbuffer_data_len(rb);
/* no data */
if (size == 0)
return 0;
/* less data */
if (size < length)
length = size;
if (rb->buffer_size - rb->read_index > length)
{
/* copy all of data */
rt_memcpy(ptr, &rb->buffer_ptr[rb->read_index], length);
/* this should not cause overflow because there is enough space for
* length of data in current mirror */
rb->read_index += length;
return length;
}
rt_memcpy(&ptr[0],
&rb->buffer_ptr[rb->read_index],
rb->buffer_size - rb->read_index);
rt_memcpy(&ptr[rb->buffer_size - rb->read_index],
&rb->buffer_ptr[0],
length - (rb->buffer_size - rb->read_index));
/* we are going into the other side of the mirror */
rb->read_mirror = ~rb->read_mirror;
rb->read_index = length - (rb->buffer_size - rb->read_index);
return length;
}
RTM_EXPORT(rt_ringbuffer_get);
/**
* @brief Get the first readable byte of the ring buffer.
*
* @param rb A pointer to the ringbuffer.
* @param ptr When this function return, *ptr is a pointer to the first readable byte of the ring buffer.
*
* @note It is recommended to read only one byte, otherwise it may cause buffer overflow.
*
* @return Return the size of the ring buffer.
*/
rt_size_t rt_ringbuffer_peek(struct rt_ringbuffer *rb, rt_uint8_t **ptr)
{
rt_size_t size;
RT_ASSERT(rb != RT_NULL);
*ptr = RT_NULL;
/* whether has enough data */
size = rt_ringbuffer_data_len(rb);
/* no data */
if (size == 0)
return 0;
*ptr = &rb->buffer_ptr[rb->read_index];
if ((rt_size_t)(rb->buffer_size - rb->read_index) > size)
{
rb->read_index += size;
return size;
}
size = rb->buffer_size - rb->read_index;
/* we are going into the other side of the mirror */
rb->read_mirror = ~rb->read_mirror;
rb->read_index = 0;
return size;
}
RTM_EXPORT(rt_ringbuffer_peek);
/**
* @brief Put a byte into the ring buffer. If ring buffer is full, this operation will fail.
*
* @param rb A pointer to the ring buffer object.
* @param ch A byte put into the ring buffer.
*
* @return Return the data size we put into the ring buffer. The ring buffer is full if returns 0. Otherwise, it will return 1.
*/
rt_size_t rt_ringbuffer_putchar(struct rt_ringbuffer *rb, const rt_uint8_t ch)
{
RT_ASSERT(rb != RT_NULL);
/* whether has enough space */
if (!rt_ringbuffer_space_len(rb))
return 0;
rb->buffer_ptr[rb->write_index] = ch;
/* flip mirror */
if (rb->write_index == rb->buffer_size - 1)
{
rb->write_mirror = ~rb->write_mirror;
rb->write_index = 0;
}
else
{
rb->write_index++;
}
return 1;
}
RTM_EXPORT(rt_ringbuffer_putchar);
/**
* @brief Put a byte into the ring buffer. If ring buffer is full, it will discard an old data and put into a new data.
*
* @param rb A pointer to the ring buffer object.
* @param ch A byte put into the ring buffer.
*
* @return Return the data size we put into the ring buffer. Always return 1.
*/
rt_size_t rt_ringbuffer_putchar_force(struct rt_ringbuffer *rb, const rt_uint8_t ch)
{
enum rt_ringbuffer_state old_state;
RT_ASSERT(rb != RT_NULL);
old_state = rt_ringbuffer_status(rb);
rb->buffer_ptr[rb->write_index] = ch;
/* flip mirror */
if (rb->write_index == rb->buffer_size - 1)
{
rb->write_mirror = ~rb->write_mirror;
rb->write_index = 0;
if (old_state == RT_RINGBUFFER_FULL)
{
rb->read_mirror = ~rb->read_mirror;
rb->read_index = rb->write_index;
}
}
else
{
rb->write_index++;
if (old_state == RT_RINGBUFFER_FULL)
rb->read_index = rb->write_index;
}
return 1;
}
RTM_EXPORT(rt_ringbuffer_putchar_force);
/**
* @brief Get a byte from the ring buffer.
*
* @param rb The pointer to the ring buffer object.
* @param ch A pointer to the buffer, used to store one byte.
*
* @return 0 The ring buffer is empty.
* @return 1 Success
*/
rt_size_t rt_ringbuffer_getchar(struct rt_ringbuffer *rb, rt_uint8_t *ch)
{
RT_ASSERT(rb != RT_NULL);
/* ringbuffer is empty */
if (!rt_ringbuffer_data_len(rb))
return 0;
/* put byte */
*ch = rb->buffer_ptr[rb->read_index];
if (rb->read_index == rb->buffer_size - 1)
{
rb->read_mirror = ~rb->read_mirror;
rb->read_index = 0;
}
else
{
rb->read_index++;
}
return 1;
}
RTM_EXPORT(rt_ringbuffer_getchar);
/**
* @brief Get the size of data in the ring buffer in bytes.
*
* @param rb The pointer to the ring buffer object.
*
* @return Return the size of data in the ring buffer in bytes.
*/
rt_size_t rt_ringbuffer_data_len(struct rt_ringbuffer *rb)
{
switch (rt_ringbuffer_status(rb))
{
case RT_RINGBUFFER_EMPTY:
return 0;
case RT_RINGBUFFER_FULL:
return rb->buffer_size;
case RT_RINGBUFFER_HALFFULL:
default:
{
rt_size_t wi = rb->write_index, ri = rb->read_index;
if (wi > ri)
return wi - ri;
else
return rb->buffer_size - (ri - wi);
}
}
}
RTM_EXPORT(rt_ringbuffer_data_len);
/**
* @brief Reset the ring buffer object, and clear all contents in the buffer.
*
* @param rb A pointer to the ring buffer object.
*/
void rt_ringbuffer_reset(struct rt_ringbuffer *rb)
{
RT_ASSERT(rb != RT_NULL);
rb->read_mirror = 0;
rb->read_index = 0;
rb->write_mirror = 0;
rb->write_index = 0;
}
RTM_EXPORT(rt_ringbuffer_reset);
#ifdef RT_USING_HEAP
/**
* @brief Create a ring buffer object with a given size.
*
* @param size The size of the buffer in bytes.
*
* @return Return a pointer to ring buffer object. When the return value is RT_NULL, it means this creation failed.
*/
struct rt_ringbuffer *rt_ringbuffer_create(rt_uint32_t size)
{
struct rt_ringbuffer *rb;
rt_uint8_t *pool;
RT_ASSERT(size > 0);
size = RT_ALIGN_DOWN(size, RT_ALIGN_SIZE);
rb = (struct rt_ringbuffer *)rt_malloc(sizeof(struct rt_ringbuffer));
if (rb == RT_NULL)
goto exit;
pool = (rt_uint8_t *)rt_malloc(size);
if (pool == RT_NULL)
{
rt_free(rb);
rb = RT_NULL;
goto exit;
}
rt_ringbuffer_init(rb, pool, size);
exit:
return rb;
}
RTM_EXPORT(rt_ringbuffer_create);
/**
* @brief Destroy the ring buffer object, which is created by rt_ringbuffer_create() .
*
* @param rb A pointer to the ring buffer object.
*/
void rt_ringbuffer_destroy(struct rt_ringbuffer *rb)
{
RT_ASSERT(rb != RT_NULL);
rt_free(rb->buffer_ptr);
rt_free(rb);
}
RTM_EXPORT(rt_ringbuffer_destroy);
#endif

View file

@ -0,0 +1,200 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2018/06/26 Bernard Fix the wait queue issue when wakeup a soon
* to blocked thread.
* 2022-01-24 THEWON let rt_wqueue_wait return thread->error when using signal
*/
#include <stdint.h>
#include <rthw.h>
#include <rtdevice.h>
#include <rtservice.h>
/**
* @brief This function will insert a node to the wait queue.
*
* @param queue is a pointer to the wait queue.
*
* @param node is a pointer to the node to be inserted.
*/
void rt_wqueue_add(rt_wqueue_t *queue, struct rt_wqueue_node *node)
{
rt_base_t level;
level = rt_hw_interrupt_disable();
rt_list_insert_before(&(queue->waiting_list), &(node->list));
rt_hw_interrupt_enable(level);
}
/**
* @brief This function will remove a node from the wait queue.
*
* @param node is a pointer to the node to be removed.
*/
void rt_wqueue_remove(struct rt_wqueue_node *node)
{
rt_base_t level;
level = rt_hw_interrupt_disable();
rt_list_remove(&(node->list));
rt_hw_interrupt_enable(level);
}
/**
* @brief This function is the default wakeup function, but it doesn't do anything in actual.
* It always return 0, user should define their own wakeup function.
*
* @param wait is a pointer to the wait queue.
*
* @param key is the wakeup condition.
*
* @return always return 0.
*/
int __wqueue_default_wake(struct rt_wqueue_node *wait, void *key)
{
return 0;
}
/**
* @brief This function will wake up a pending thread on the specified waiting queue that meets the conditions.
*
* @param queue is a pointer to the wait queue.
*
* @param key is the wakeup conditions, but it is not effective now, because
* default wakeup function always return 0.
* If user wants to use it, user should define their own wakeup function.
*/
void rt_wqueue_wakeup(rt_wqueue_t *queue, void *key)
{
rt_base_t level;
int need_schedule = 0;
rt_list_t *queue_list;
struct rt_list_node *node;
struct rt_wqueue_node *entry;
queue_list = &(queue->waiting_list);
level = rt_hw_interrupt_disable();
/* set wakeup flag in the queue */
queue->flag = RT_WQ_FLAG_WAKEUP;
if (!(rt_list_isempty(queue_list)))
{
for (node = queue_list->next; node != queue_list; node = node->next)
{
entry = rt_list_entry(node, struct rt_wqueue_node, list);
if (entry->wakeup(entry, key) == 0)
{
rt_thread_resume(entry->polling_thread);
need_schedule = 1;
rt_wqueue_remove(entry);
break;
}
}
}
rt_hw_interrupt_enable(level);
if (need_schedule)
rt_schedule();
}
/**
* @brief This function will join a thread to the specified waiting queue, the thread will holds a wait or
* timeout return on the specified wait queue.
*
* @param queue is a pointer to the wait queue.
*
* @param condition is parameters compatible with POSIX standard interface (currently meaningless, just pass in 0).
*
* @param msec is the timeout value, unit is millisecond.
*
* @return Return 0 if the thread is woken up.
*/
static int _rt_wqueue_wait(rt_wqueue_t *queue, int condition, int msec, int suspend_flag)
{
int tick;
rt_thread_t tid = rt_thread_self();
rt_timer_t tmr = &(tid->thread_timer);
struct rt_wqueue_node __wait;
rt_base_t level;
rt_err_t ret;
/* current context checking */
RT_DEBUG_SCHEDULER_AVAILABLE(RT_TRUE);
tick = rt_tick_from_millisecond(msec);
if ((condition) || (tick == 0))
return 0;
__wait.polling_thread = rt_thread_self();
__wait.key = 0;
__wait.wakeup = __wqueue_default_wake;
rt_list_init(&__wait.list);
level = rt_hw_interrupt_disable();
/* reset thread error */
tid->error = RT_EOK;
if (queue->flag == RT_WQ_FLAG_WAKEUP)
{
/* already wakeup */
goto __exit_wakeup;
}
ret = rt_thread_suspend_with_flag(tid, suspend_flag);
if (ret != RT_EOK)
{
rt_hw_interrupt_enable(level);
/* suspend failed */
return -RT_EINTR;
}
rt_wqueue_add(queue, &__wait);
/* start timer */
if (tick != RT_WAITING_FOREVER)
{
rt_timer_control(tmr,
RT_TIMER_CTRL_SET_TIME,
&tick);
rt_timer_start(tmr);
}
rt_hw_interrupt_enable(level);
rt_schedule();
level = rt_hw_interrupt_disable();
__exit_wakeup:
queue->flag = RT_WQ_FLAG_CLEAN;
rt_hw_interrupt_enable(level);
rt_wqueue_remove(&__wait);
return tid->error;
}
int rt_wqueue_wait(rt_wqueue_t *queue, int condition, int msec)
{
return _rt_wqueue_wait(queue, condition, msec, RT_UNINTERRUPTIBLE);
}
int rt_wqueue_wait_killable(rt_wqueue_t *queue, int condition, int msec)
{
return _rt_wqueue_wait(queue, condition, msec, RT_KILLABLE);
}
int rt_wqueue_wait_interruptible(rt_wqueue_t *queue, int condition, int msec)
{
return _rt_wqueue_wait(queue, condition, msec, RT_INTERRUPTIBLE);
}

View file

@ -0,0 +1,502 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2017-02-27 Bernard fix the re-work issue.
* 2021-08-01 Meco Man remove rt_delayed_work_init()
* 2021-08-14 Jackistang add comments for function interface
* 2022-01-16 Meco Man add rt_work_urgent()
*/
#include <rthw.h>
#include <rtthread.h>
#include <rtdevice.h>
#ifdef RT_USING_HEAP
static void _delayed_work_timeout_handler(void *parameter);
rt_inline rt_err_t _workqueue_work_completion(struct rt_workqueue *queue)
{
rt_err_t result;
rt_enter_critical();
while (1)
{
/* try to take condition semaphore */
result = rt_sem_trytake(&(queue->sem));
if (result == -RT_ETIMEOUT)
{
/* it's timeout, release this semaphore */
rt_sem_release(&(queue->sem));
}
else if (result == RT_EOK)
{
/* keep the sem value = 0 */
result = RT_EOK;
break;
}
else
{
result = -RT_ERROR;
break;
}
}
rt_exit_critical();
return result;
}
static void _workqueue_thread_entry(void *parameter)
{
rt_base_t level;
struct rt_work *work;
struct rt_workqueue *queue;
queue = (struct rt_workqueue *) parameter;
RT_ASSERT(queue != RT_NULL);
while (1)
{
level = rt_hw_interrupt_disable();
if (rt_list_isempty(&(queue->work_list)))
{
/* no software timer exist, suspend self. */
rt_thread_suspend_with_flag(rt_thread_self(), RT_UNINTERRUPTIBLE);
rt_hw_interrupt_enable(level);
rt_schedule();
continue;
}
/* we have work to do with. */
work = rt_list_entry(queue->work_list.next, struct rt_work, list);
rt_list_remove(&(work->list));
queue->work_current = work;
work->flags &= ~RT_WORK_STATE_PENDING;
work->workqueue = RT_NULL;
rt_hw_interrupt_enable(level);
/* do work */
work->work_func(work, work->work_data);
/* clean current work */
queue->work_current = RT_NULL;
/* ack work completion */
_workqueue_work_completion(queue);
}
}
static rt_err_t _workqueue_submit_work(struct rt_workqueue *queue,
struct rt_work *work, rt_tick_t ticks)
{
rt_base_t level;
level = rt_hw_interrupt_disable();
/* remove list */
rt_list_remove(&(work->list));
work->flags &= ~RT_WORK_STATE_PENDING;
if (ticks == 0)
{
rt_list_insert_after(queue->work_list.prev, &(work->list));
work->flags |= RT_WORK_STATE_PENDING;
work->workqueue = queue;
/* whether the workqueue is doing work */
if (queue->work_current == RT_NULL &&
((queue->work_thread->stat & RT_THREAD_SUSPEND_MASK) == RT_THREAD_SUSPEND_MASK))
{
/* resume work thread */
rt_thread_resume(queue->work_thread);
rt_hw_interrupt_enable(level);
rt_schedule();
}
else
{
rt_hw_interrupt_enable(level);
}
return RT_EOK;
}
else if (ticks < RT_TICK_MAX / 2)
{
/* Timer started */
if (work->flags & RT_WORK_STATE_SUBMITTING)
{
rt_timer_stop(&work->timer);
rt_timer_control(&work->timer, RT_TIMER_CTRL_SET_TIME, &ticks);
}
else
{
rt_timer_init(&(work->timer), "work", _delayed_work_timeout_handler,
work, ticks, RT_TIMER_FLAG_ONE_SHOT | RT_TIMER_FLAG_SOFT_TIMER);
work->flags |= RT_WORK_STATE_SUBMITTING;
}
work->workqueue = queue;
/* insert delay work list */
rt_list_insert_after(queue->delayed_list.prev, &(work->list));
rt_hw_interrupt_enable(level);
rt_timer_start(&(work->timer));
return RT_EOK;
}
rt_hw_interrupt_enable(level);
return -RT_ERROR;
}
static rt_err_t _workqueue_cancel_work(struct rt_workqueue *queue, struct rt_work *work)
{
rt_base_t level;
rt_err_t err;
level = rt_hw_interrupt_disable();
rt_list_remove(&(work->list));
work->flags &= ~RT_WORK_STATE_PENDING;
/* Timer started */
if (work->flags & RT_WORK_STATE_SUBMITTING)
{
rt_timer_stop(&(work->timer));
rt_timer_detach(&(work->timer));
work->flags &= ~RT_WORK_STATE_SUBMITTING;
}
err = queue->work_current != work ? RT_EOK : -RT_EBUSY;
work->workqueue = RT_NULL;
rt_hw_interrupt_enable(level);
return err;
}
static void _delayed_work_timeout_handler(void *parameter)
{
struct rt_work *work;
struct rt_workqueue *queue;
rt_base_t level;
work = (struct rt_work *)parameter;
queue = work->workqueue;
RT_ASSERT(queue != RT_NULL);
level = rt_hw_interrupt_disable();
rt_timer_detach(&(work->timer));
work->flags &= ~RT_WORK_STATE_SUBMITTING;
/* remove delay list */
rt_list_remove(&(work->list));
/* insert work queue */
if (queue->work_current != work)
{
rt_list_insert_after(queue->work_list.prev, &(work->list));
work->flags |= RT_WORK_STATE_PENDING;
}
/* whether the workqueue is doing work */
if (queue->work_current == RT_NULL &&
((queue->work_thread->stat & RT_THREAD_SUSPEND_MASK) == RT_THREAD_SUSPEND_MASK))
{
/* resume work thread */
rt_thread_resume(queue->work_thread);
rt_hw_interrupt_enable(level);
rt_schedule();
}
else
{
rt_hw_interrupt_enable(level);
}
}
/**
* @brief Initialize a work item, binding with a callback function.
*
* @param work is a pointer to the work item object.
*
* @param work_func is a callback function that will be called when this work item is executed.
*
* @param work_data is a user data passed to the callback function as the second parameter.
*/
void rt_work_init(struct rt_work *work,
void (*work_func)(struct rt_work *work, void *work_data),
void *work_data)
{
RT_ASSERT(work != RT_NULL);
RT_ASSERT(work_func != RT_NULL);
rt_list_init(&(work->list));
work->work_func = work_func;
work->work_data = work_data;
work->workqueue = RT_NULL;
work->flags = 0;
work->type = 0;
}
/**
* @brief Create a work queue with a thread inside.
*
* @param name is a name of the work queue thread.
*
* @param stack_size is stack size of the work queue thread.
*
* @param priority is a priority of the work queue thread.
*
* @return Return a pointer to the workqueue object. It will return RT_NULL if failed.
*/
struct rt_workqueue *rt_workqueue_create(const char *name, rt_uint16_t stack_size, rt_uint8_t priority)
{
struct rt_workqueue *queue = RT_NULL;
queue = (struct rt_workqueue *)RT_KERNEL_MALLOC(sizeof(struct rt_workqueue));
if (queue != RT_NULL)
{
/* initialize work list */
rt_list_init(&(queue->work_list));
rt_list_init(&(queue->delayed_list));
queue->work_current = RT_NULL;
rt_sem_init(&(queue->sem), "wqueue", 0, RT_IPC_FLAG_FIFO);
/* create the work thread */
queue->work_thread = rt_thread_create(name, _workqueue_thread_entry, queue, stack_size, priority, 10);
if (queue->work_thread == RT_NULL)
{
RT_KERNEL_FREE(queue);
return RT_NULL;
}
rt_thread_startup(queue->work_thread);
}
return queue;
}
/**
* @brief Destroy a work queue.
*
* @param queue is a pointer to the workqueue object.
*
* @return RT_EOK Success.
*/
rt_err_t rt_workqueue_destroy(struct rt_workqueue *queue)
{
RT_ASSERT(queue != RT_NULL);
rt_workqueue_cancel_all_work(queue);
rt_thread_delete(queue->work_thread);
rt_sem_detach(&(queue->sem));
RT_KERNEL_FREE(queue);
return RT_EOK;
}
/**
* @brief Submit a work item to the work queue without delay.
*
* @param queue is a pointer to the workqueue object.
*
* @param work is a pointer to the work item object.
*
* @return RT_EOK Success.
* -RT_EBUSY This work item is executing.
*/
rt_err_t rt_workqueue_dowork(struct rt_workqueue *queue, struct rt_work *work)
{
RT_ASSERT(queue != RT_NULL);
RT_ASSERT(work != RT_NULL);
return _workqueue_submit_work(queue, work, 0);
}
/**
* @brief Submit a work item to the work queue with a delay.
*
* @param queue is a pointer to the workqueue object.
*
* @param work is a pointer to the work item object.
*
* @param ticks is the delay ticks for the work item to be submitted to the work queue.
*
* NOTE: The max timeout tick should be no more than (RT_TICK_MAX/2 - 1)
*
* @return RT_EOK Success.
* -RT_EBUSY This work item is executing.
* -RT_ERROR The ticks parameter is invalid.
*/
rt_err_t rt_workqueue_submit_work(struct rt_workqueue *queue, struct rt_work *work, rt_tick_t ticks)
{
RT_ASSERT(queue != RT_NULL);
RT_ASSERT(work != RT_NULL);
RT_ASSERT(ticks < RT_TICK_MAX / 2);
return _workqueue_submit_work(queue, work, ticks);
}
/**
* @brief Submit a work item to the work queue without delay. This work item will be executed after the current work item.
*
* @param queue is a pointer to the workqueue object.
*
* @param work is a pointer to the work item object.
*
* @return RT_EOK Success.
*/
rt_err_t rt_workqueue_urgent_work(struct rt_workqueue *queue, struct rt_work *work)
{
rt_base_t level;
RT_ASSERT(queue != RT_NULL);
RT_ASSERT(work != RT_NULL);
level = rt_hw_interrupt_disable();
/* NOTE: the work MUST be initialized firstly */
rt_list_remove(&(work->list));
rt_list_insert_after(&queue->work_list, &(work->list));
/* whether the workqueue is doing work */
if (queue->work_current == RT_NULL &&
((queue->work_thread->stat & RT_THREAD_SUSPEND_MASK) == RT_THREAD_SUSPEND_MASK))
{
/* resume work thread */
rt_thread_resume(queue->work_thread);
rt_hw_interrupt_enable(level);
rt_schedule();
}
else
{
rt_hw_interrupt_enable(level);
}
return RT_EOK;
}
/**
* @brief Cancel a work item in the work queue.
*
* @param queue is a pointer to the workqueue object.
*
* @param work is a pointer to the work item object.
*
* @return RT_EOK Success.
* -RT_EBUSY This work item is executing.
*/
rt_err_t rt_workqueue_cancel_work(struct rt_workqueue *queue, struct rt_work *work)
{
RT_ASSERT(work != RT_NULL);
RT_ASSERT(queue != RT_NULL);
return _workqueue_cancel_work(queue, work);
}
/**
* @brief Cancel a work item in the work queue. If the work item is executing, this function will block until it is done.
*
* @param queue is a pointer to the workqueue object.
*
* @param work is a pointer to the work item object.
*
* @return RT_EOK Success.
*/
rt_err_t rt_workqueue_cancel_work_sync(struct rt_workqueue *queue, struct rt_work *work)
{
RT_ASSERT(queue != RT_NULL);
RT_ASSERT(work != RT_NULL);
if (queue->work_current == work) /* it's current work in the queue */
{
/* wait for work completion */
rt_sem_take(&(queue->sem), RT_WAITING_FOREVER);
}
else
{
_workqueue_cancel_work(queue, work);
}
return RT_EOK;
}
/**
* @brief This function will cancel all work items in work queue.
*
* @param queue is a pointer to the workqueue object.
*
* @return RT_EOK Success.
*/
rt_err_t rt_workqueue_cancel_all_work(struct rt_workqueue *queue)
{
struct rt_work *work;
RT_ASSERT(queue != RT_NULL);
/* cancel work */
rt_enter_critical();
while (rt_list_isempty(&queue->work_list) == RT_FALSE)
{
work = rt_list_first_entry(&queue->work_list, struct rt_work, list);
_workqueue_cancel_work(queue, work);
}
/* cancel delay work */
while (rt_list_isempty(&queue->delayed_list) == RT_FALSE)
{
work = rt_list_first_entry(&queue->delayed_list, struct rt_work, list);
_workqueue_cancel_work(queue, work);
}
rt_exit_critical();
return RT_EOK;
}
#ifdef RT_USING_SYSTEM_WORKQUEUE
static struct rt_workqueue *sys_workq; /* system work queue */
/**
* @brief Submit a work item to the system work queue with a delay.
*
* @param work is a pointer to the work item object.
*
* @param ticks is the delay OS ticks for the work item to be submitted to the work queue.
*
* NOTE: The max timeout tick should be no more than (RT_TICK_MAX/2 - 1)
*
* @return RT_EOK Success.
* -RT_EBUSY This work item is executing.
* -RT_ERROR The ticks parameter is invalid.
*/
rt_err_t rt_work_submit(struct rt_work *work, rt_tick_t ticks)
{
return rt_workqueue_submit_work(sys_workq, work, ticks);
}
/**
* @brief Submit a work item to the system work queue without delay. This work item will be executed after the current work item.
*
* @param work is a pointer to the work item object.
*
* @return RT_EOK Success.
*/
rt_err_t rt_work_urgent(struct rt_work *work)
{
return rt_workqueue_urgent_work(sys_workq, work);
}
/**
* @brief Cancel a work item in the system work queue.
*
* @param work is a pointer to the work item object.
*
* @return RT_EOK Success.
* -RT_EBUSY This work item is executing.
*/
rt_err_t rt_work_cancel(struct rt_work *work)
{
return rt_workqueue_cancel_work(sys_workq, work);
}
static int rt_work_sys_workqueue_init(void)
{
if (sys_workq != RT_NULL)
return RT_EOK;
sys_workq = rt_workqueue_create("sys workq", RT_SYSTEM_WORKQUEUE_STACKSIZE,
RT_SYSTEM_WORKQUEUE_PRIORITY);
RT_ASSERT(sys_workq != RT_NULL);
return RT_EOK;
}
INIT_PREV_EXPORT(rt_work_sys_workqueue_init);
#endif /* RT_USING_SYSTEM_WORKQUEUE */
#endif /* RT_USING_HEAP */