diff --git a/Cargo.lock b/Cargo.lock index fff8951..b691e8c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10,7 +10,7 @@ checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" [[package]] name = "lrnrtos" -version = "0.4.2" +version = "0.4.5" dependencies = [ "arrayvec", ] diff --git a/Cargo.toml b/Cargo.toml index f3833d1..30b7052 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,17 +1,22 @@ [package] name = "lrnrtos" -version = "0.4.2" +version = "0.4.5" edition = "2024" [[bin]] -name= "lrnrtos" +name = "lrnrtos" test = false [dependencies] -arrayvec = {version = "0.7.6", default-features = false} +arrayvec = { version = "0.7.6", default-features = false } [features] default = ["logs", "kprint"] +# Enable kernel logs logs = [] +# Enable early print using statically define uart device kprint = [] +# Switch to test mode test = [] +# Enable the idle task. +idle_task = [] diff --git a/Documentation/kernel/data_structure.md b/Documentation/kernel/data_structure.md new file mode 100644 index 0000000..f135c7d --- /dev/null +++ b/Documentation/kernel/data_structure.md @@ -0,0 +1,57 @@ +# Kernel data structure + + +- [Kernel data structure](#kernel-data-structure) + - [Description](#description) + - [RingBuffer](#ringbuffer) + - [Invariants](#invariants) + - [AlignedStack16](#alignedstack16) + - [Invariants](#invariants-1) + - [IndexedLinkedList](#indexedlinkedlist) + - [Invariants](#invariants-2) + + +## Description + +The kernel as multiple data structure implementation inside the codebase, they are useful to store and manipulate data inside the kernel. +These are all the data structure implemented inside the kernel. + +### RingBuffer + +A simple Ring buffer, used as an FIFO. If the RingBuffer is full and you try to push anyways, it will be abort. +It's like a close RingBuffer, you can't push if it's full and you don't pop before. + +#### Invariants + +- Length is always in [0, capacity]. +- The length is len - 1; there's always an empty slot in the array. +- Head and tail always remain within the backing array bounds. +- Push is only valid when the buffer is not full; violating this is a logic error (abort). +- Pop is only valid when the buffer is not empty; violating this is a logic error (abort). + +### AlignedStack16 + +This is just a structure wrapping a buffer of bytes, but the structure use the `#[repr(align(16))]`. +This type is used when you need a stack on a buffer, and the `sp` must be aligned on `16 bytes`. + +#### Invariants + +- The backing storage is always 16-byte aligned. +- Any stack pointer derived from this type must remain 16-byte aligned at all call boundaries. +- This type provides a memory-layout guarantee only; it does not validate stack usage correctness. + +### IndexedLinkedList + +A linked list but store in an array, so each node is accessed from it's index in the array. +This is better to use this data structure as a side storage, like we use it to store blocked task. +Task are already store in a TaskList, so in the IndexedLinkedList used for blocked task we only store task id, and task awake tick for now. + +#### Invariants + +- All node should be accessible from the head node. +- The list is sorted naturally from the `value` field of a node. +- The `count` field should reflect the number of accessible node in the list. +- The list is empty when `count`, `head` and `tail` are equal to 0. +- If the `next_node` of a node is some, this `next_node` is valid. +- If the `next_node` of a node is none, then this node is the `tail`. +- The node `id` is unique, you can't add the same `id` in the list. diff --git a/Documentation/kernel/primitives.md b/Documentation/kernel/primitives.md new file mode 100644 index 0000000..aaf7412 --- /dev/null +++ b/Documentation/kernel/primitives.md @@ -0,0 +1,78 @@ +# Kernel primitives types and functions + + +- [Kernel primitives types and functions](#kernel-primitives-types-and-functions) + - [Description](#description) + - [Primitive type](#primitive-type) + - [Description](#description-1) + - [Task primitive](#task-primitive) + - [Description](#description-2) + - [yield](#yield) + - [sleep](#sleep) + - [task_awake_blocked](#taskawakeblocked) + - [Invariants](#invariants) + + +## Description + +This document describes the kernel primitives. + +In the context of this kernel, a *primitive* is defined as a low-level construct that +**directly affects kernel state or execution context**. +If using a type or function can change global kernel behavior, scheduling state, +or execution flow, it is considered a primitive. + +Two categories of primitives are documented here: + +- **Primitive types**: low-level types whose correct usage is required to preserve + kernel invariants. These types are not mere data structures; they encode execution, + synchronization, or memory-layout guarantees that the kernel relies on. + Examples include synchronization objects such as mutexes or types enforcing + strict alignment or execution constraints. + +- **Task primitives**: execution control operations that may only be used from + task context. These primitives modify the scheduling or blocking state of the + current task and therefore have observable effects on global kernel execution. + +Pure data structures that do not alter kernel state or execution context are +documented separately and are not considered primitives, even if they are used +internally by primitive implementations. +You can find data structure type here: `Documentation/kernel/data_structure.md`. + +### Primitive type + +#### Description + +There are currently no primitive type implemented in the kernel. + +### Task primitive + +#### Description + +To handle task correctly, the kernel need some primitives but only used in a task context. +These type can't and must not be used anywhere else than inside a task. + +#### yield + +Used in cooperative scheduling, when a task use `yield`, it will save it's context, and call a re-schedule. +It is used when you want a task to let another task take the control. + +#### sleep + +Put the task using the `sleep` primitive function to sleep for the given time. +It blocked the task until the kernel `GLOBAL_TICK` is equal or superior to the current tick + the given tick. +You can consider the given tick as `1ms`. + +#### task_awake_blocked + +Awake the oldest blocked task if it can. +This primitive is called from a timer interrupt, and only from a timer interrupt. +The timer interrupt will give the primitive `task_awake_blocked` the current `GLOBAL_TICK`, after updating it from the current interrupt. +The primitive will get the `oldest blocked task`, from the `BLOCKED_QUEUE`, then it'll check the reason why this task is blocked, and awake it if possible. + +#### Invariants + +- Task primitives must only be called from task context. +- The scheduler must be initialized before any task primitive is used. +- Time-based primitives rely on a functional timer subsystem. +- Principal data structure such as `RUN_QUEUE` and `BLOCKED_QUEUE` must be initialized before any task primitive is used. diff --git a/Documentation/kernel/scheduler.md b/Documentation/kernel/scheduler.md new file mode 100644 index 0000000..4569adc --- /dev/null +++ b/Documentation/kernel/scheduler.md @@ -0,0 +1,57 @@ +# Kernel scheduler + + +- [Kernel scheduler](#kernel-scheduler) + - [Description](#description) + - [Queues](#queues) + - [Run queue](#run-queue) + - [Blocked queue](#blocked-queue) + - [Preemption](#preemption) + - [Scheduling model](#scheduling-model) + - [Invariants](#invariants) + + +## Description + +The kernel scheduler is in charge of managing which task to run, updating the run queue and the blocked queue and updating the task state. + +## Queues + +There's 2 queues used in the scheduler. The `run queue` and the `blocked queue`. Each queue is specific for a `CPU core`. The `CPU core 1` has a different `run queue` than the `CPU core 2`. + +### Run queue + +The `run queue` contains all the task ready to be run. In the `ready` task state. + +The `run queue` work using a `FIFO` queue, there's a queue for each `priority`, up to `32 queues`. +To find which queue to use to execute a task, instead of iterating over all queues, we use a `bitmap`. +There's only `32 priorities`, so we use a `u32 bitmap`, each bit representing a `run queue`, if the bit is set, there's at least 1 task to run. +Else, the queue is empty. + +### Blocked queue + +The `blocked queue` contains all the task currently blocked. With different block reasons. +But currently the `blocked queue` contains uniquely the task blocked using the `sleep` task primitive. + +The `blocked queue` work using an `indexed linked list`, the list is sorted from the shortest awake tick to the largest awake tick. +The list is manage using `head` and `tail`, a bit like a `ring buffer` data structure. +So when we need to check the next task to awake, we just check the `head` of the `blocked queue`. +If it can be awake, it will update the `need_resched` flag, then it'll trigger a reschedule, the scheduler will be able to awake the task, and move it from the `blocked queue` to the `run queue`. + +## Preemption + +The scheduler is preemptive, meaning that if a task as a higher priority than the current task, it will run this higher priority task. +The current task having a lowest priority, will be saved, and re-execute when there's no higher priority task to run. + +## Scheduling model + +The current scheduling model is a `cooperative priority based`. Meaning that if there's `2 task` with the same `priority`, if they don't call `yield`, one task will `always run`. +Making the other task `starving`. +So if you want to switch from a `task` to another one, you need to use `cooperative` functions, like `yield` or `sleep`. + +## Invariants + +- The scheduler need at least one task in the `run queue`, if the `run queue` is empty, it will try to run the idle task. Make sure that there's always at least one task in the `run queue`, or enable the `idle task` feature. +- The scheduler assume that the `CpusState` is initialized to access the `CPU core scheduler state`. +- The scheduler can be called from the `trap epilogue`, if so, a `trap frame`, should be available and accessible for the scheduler to run on. +- For now the scheduler assume that there's always a `current running task`, if not, and the scheduler is triggered, this could lead to UB. diff --git a/Documentation/kernel/task.md b/Documentation/kernel/task.md index 6ce2749..71e4913 100644 --- a/Documentation/kernel/task.md +++ b/Documentation/kernel/task.md @@ -6,6 +6,7 @@ - [Purpose](#purpose) - [Structure](#structure) - [How task is store](#how-task-is-store) + - [Idle task](#idle-task) - [Invariants](#invariants) - [References](#references) @@ -31,11 +32,20 @@ enum TaskState { Terminated, } +pub enum TaskBlockControl { + // Store the awake tick for task awakening. + AwakeTick(usize), + // No reason for the task block + None, +} + #[repr(C)] struct Task { // Arch dependant context, don't handle this field in task, only use struct method when // interacting with it. context: TaskContext, + // Task block control, define the reason the task is blocked. + block_control: TaskBlockControl, // Fn ptr to task entry point, this must never return. // This will surely disappear func: fn() -> !, @@ -53,6 +63,8 @@ pub struct TaskContext { pub address_space: [u32; 2], pub pc: u32, pub sp: u32, + pub ra: u32, + pub mstatus: u32, pub flags: [u8; 3], pub instruction_register: u8, } @@ -76,6 +88,12 @@ pub struct TaskList { } ``` +## Idle task + +The idle task is used to ensure that the kernel as always at least one task able to run. +This task is created at the lowest priority to ensure it does not use any CPU time if there are higher priority application tasks in the run queue. +It is not possible to update the idle task, it's a static defined task. + ## Invariants - The task's function must never return. diff --git a/Documentation/kernel/timing_helpers.md b/Documentation/kernel/timing_helpers.md new file mode 100644 index 0000000..2a207fd --- /dev/null +++ b/Documentation/kernel/timing_helpers.md @@ -0,0 +1,22 @@ +# Kernel timing helpers + + +- [Kernel timing helpers](#kernel-timing-helpers) + - [Description](#description) + - [delay](#delay) + - [Invariants](#invariants) + + +## Description + +The kernel sometimes need to use some helpers to handle or manage timing. Here's a list of some helpers to help with that. + +### delay + +Block the CPU for the given time, in `ms`. +This is not really recommended to use, it will not put the CPU to sleep, just waiting for the next timer interrupt. +If you need a task to wait or something else, prefer the use of `yield`. + +### Invariants + +- The scheduler must be initialized before any timing helpers is used. diff --git a/linkers/linker_test_mode.ld b/linkers/linker_test_mode.ld index 3663103..16c830f 100644 --- a/linkers/linker_test_mode.ld +++ b/linkers/linker_test_mode.ld @@ -1,9 +1,9 @@ ENTRY(kstart) MEMORY { - RAM (rwx) : ORIGIN = 0x80200000, LENGTH = 128K + RAM (rwx) : ORIGIN = 0x80200000, LENGTH = 256K /* Not real ROM or flash from qemu virt machine, just use another RAM reg for now */ - ROM (rx) : ORIGIN = 0x80000000, LENGTH = 200K + ROM (rx) : ORIGIN = 0x80000000, LENGTH = 256K } SECTIONS { diff --git a/src/arch/riscv32/asm/mod.rs b/src/arch/riscv32/asm/mod.rs index 431ae31..3922002 100644 --- a/src/arch/riscv32/asm/mod.rs +++ b/src/arch/riscv32/asm/mod.rs @@ -10,6 +10,8 @@ global_asm! { include_str!("set_kernel_sp.S"), // Yield function for context switch and scheduling include_str!("yield.S"), + // Sleep function for task and scheduling + include_str!("sleep.S"), // Scheduler context switch include_str!("sched_context.S"), // All task context offsets diff --git a/src/arch/riscv32/asm/restore_context.S b/src/arch/riscv32/asm/restore_context.S index 4ae7603..2e89eba 100644 --- a/src/arch/riscv32/asm/restore_context.S +++ b/src/arch/riscv32/asm/restore_context.S @@ -19,4 +19,31 @@ restore_context: load_gp_context %i .set i, i+1 .endr + # Update mstatus + li t0, ((3 << 11) | (1 << 3)) + csrrs x0, mstatus, t0 ret + +.global trap_restore_context +.type trap_restore_context, @function +trap_restore_context: + # Move current task context struct save in caller in a0 reg to t6 + mv t6, a0 + # Update sp + # Restore sp from current task context structure + lw t0, OFFSET_SP(t6) + mv sp, t0 + # Update mepc + lw t0, OFFSET_PC(t6) + csrw mepc, t0 + # Restore current task context using GNU macro + # GNU macros from `src/arch/riscv32/asm/gnu_macro.S` + .set i, 1 + .rept 31 + load_gp_context %i + .set i, i+1 + .endr + # Update mstatus + li t0, ((3 << 11) | (1 << 7)) + csrrs x0, mstatus, t0 + mret diff --git a/src/arch/riscv32/asm/save_context.S b/src/arch/riscv32/asm/save_context.S index 97f0b54..9829e8e 100644 --- a/src/arch/riscv32/asm/save_context.S +++ b/src/arch/riscv32/asm/save_context.S @@ -3,12 +3,37 @@ .global save_context .type save_context, @function save_context: + # Clear mstatus.MPP and mstatus.mie to avoid interruption while saving and switching context + li t0, ((3 << 11) | (1 << 3)) + csrrc x0, mstatus, t0 # Move current task context struct save in caller in a0 reg to t6 mv t6, a0 # Store ra in task context structure sw a1, OFFSET_RA(t6) # Store word from t0(sp) in context structure sw a2, OFFSET_SP(t6) + sw a3, OFFSET_PC(t6) + # Save current task context using GNU macro + # GNU macros from `src/arch/riscv32/asm/gnu_macro.S` + .set i, 1 + .rept 31 + save_gp_context %i + .set i, i+1 + .endr + ret + +.global trap_save_context +.type trap_save_context, @function +trap_save_context: + # Clear mstatus.MPP and mstatus.mie to avoid interruption while saving and switching context + li t0, ((3 << 11) | (1 << 3)) + csrrc x0, mstatus, t0 + # Move current task context struct save in caller in a0 reg to t6 + mv t6, a0 + # Store ra in task context structure + sw a1, OFFSET_PC(t6) + # Store word from t0(sp) in context structure + sw a2, OFFSET_SP(t6) # Save current task context using GNU macro # GNU macros from `src/arch/riscv32/asm/gnu_macro.S` .set i, 1 diff --git a/src/arch/riscv32/asm/sleep.S b/src/arch/riscv32/asm/sleep.S new file mode 100644 index 0000000..8ebc97c --- /dev/null +++ b/src/arch/riscv32/asm/sleep.S @@ -0,0 +1,18 @@ +.global sleep +.type sleep, @function +sleep: + # Move tick to another reg, a0 is used in save_context + mv t5, a0 + # Get current task ptr + la t0, TASK_HANDLER # Address in RAM + lw t1, 0(t0) # Get the value behind the ref + mv a0, t1 + # Save current ra + mv a1, ra + # Save current sp + mv a2, sp + # Call the save context function + call save_context + # Once save_context return, call the task_set_wake_tick fn + mv a0, t5 + call task_set_wake_tick diff --git a/src/arch/riscv32/asm/trap_entry.S b/src/arch/riscv32/asm/trap_entry.S index 1deabe0..9d574a6 100644 --- a/src/arch/riscv32/asm/trap_entry.S +++ b/src/arch/riscv32/asm/trap_entry.S @@ -11,22 +11,19 @@ .global trap_entry .type trap_entry, @function trap_entry: - # Read mscratch into t6 + # Save task context + # Get current task ptr + la t0, TASK_HANDLER # Address in RAM + lw t1, 0(t0) # Get the value behind the ref + mv a0, t1 + # Save current pc + csrr a1, mepc + # Save current sp + mv a2, sp + # Call the save context function + call trap_save_context + # Read mscratch into t7 csrr t6, mscratch - # Save all GP registers using GNU macro - # Just a loop - .set i, 1 - .rept 31 - save_gp %i, t6 - .set i, i+1 - .endr - # Read CSR and store word in correct offset of trap frame structure - csrr t0, satp - sw t0, OFFSET_SATP(t6) # store satp - csrr t0, mhartid - sw t0, OFFSET_HARTID(t6) # store hartid (optional, redundant) - sw sp, (OFFSET_TRAP_STACK - 4)(t6) # store original sp just before trap_stack slot - # Load content of trap stack offset into t1 lw t1, OFFSET_TRAP_STACK(t6) # t1 = trap_stack pointer # Branch instruction to check if t1 = 0 beqz t1, trap_no_trapstack @@ -38,19 +35,22 @@ trap_entry: csrr a2, mcause csrr a3, mhartid csrr a4, mstatus - # Move trap frame structure into a3 function argument register - mv a3, t6 + # Move trap frame structure into a5 function argument register + mv a5, t6 # Call trap_handler rust function call trap_handler - # Load all register back to previous state - csrr t6, mscratch - # Restore all GP registers - .set i, 1 - .rept 31 - load_gp %i - .set i, i+1 - .endr - mret + + # Check if a re-schedule is needed or not. + call read_need_reschedule + # If a0 != 0, goto 1f, else mret + bnez a0, 1f + # If there's no need to a reschedule, restore the task context + la t0, TASK_HANDLER # Address in RAM + lw t1, 0(t0) # Get the value behind the ref + mv a0, t1 + call trap_restore_context +1: + call scheduler # Function used when the trap strack = 0, just infinite loop for debuging purpose trap_no_trapstack: diff --git a/src/arch/riscv32/asm/yield.S b/src/arch/riscv32/asm/yield.S index e23cd0f..3f4d010 100644 --- a/src/arch/riscv32/asm/yield.S +++ b/src/arch/riscv32/asm/yield.S @@ -12,4 +12,4 @@ yield: # Call the save context function call save_context # Once save_context return, call the scheduler - call dispatch + call scheduler diff --git a/src/arch/riscv32/helpers.rs b/src/arch/riscv32/helpers.rs new file mode 100644 index 0000000..34db408 --- /dev/null +++ b/src/arch/riscv32/helpers.rs @@ -0,0 +1,7 @@ +use core::arch::asm; + +pub fn current_cpu_core() -> usize { + let mut id: usize = 0; + unsafe { asm!("csrr {}, mhartid", out(reg) id) }; + id +} diff --git a/src/arch/riscv32/mod.rs b/src/arch/riscv32/mod.rs index 65da303..f724b04 100644 --- a/src/arch/riscv32/mod.rs +++ b/src/arch/riscv32/mod.rs @@ -1,4 +1,5 @@ pub mod asm; +pub mod helpers; pub mod mem; pub mod scheduler; pub mod start; diff --git a/src/arch/riscv32/task/mod.rs b/src/arch/riscv32/task/mod.rs index f227d84..b4916f1 100644 --- a/src/arch/riscv32/task/mod.rs +++ b/src/arch/riscv32/task/mod.rs @@ -2,8 +2,6 @@ pub mod task_context; // Asm function for task context switch unsafe extern "C" { - // Yield function for cooperative scheduling - pub fn r#yield(); // Restore the task context pub fn restore_context(context: usize); // Save the current task context diff --git a/src/arch/riscv32/task/task_context.rs b/src/arch/riscv32/task/task_context.rs index 09d4165..3ba7a60 100644 --- a/src/arch/riscv32/task/task_context.rs +++ b/src/arch/riscv32/task/task_context.rs @@ -25,7 +25,7 @@ pub struct TaskContext { pub pc: u32, // Offset 136 pub sp: u32, // Offset 140 pub ra: u32, // Offset 144 - pub flags: [u8; 3], // Offset 148 (first index 144; second index 145, third index 146) + pub flags: [u8; 3], // Offset 148 (first index 148; second index 149, third index 150) pub instruction_register: u8, // Offset 151 } @@ -37,6 +37,7 @@ impl TaskContext { pc: func as usize as u32, sp: size[0] as u32, ra: func as usize as u32, + // Set mstatus to 8 by default to enable mie flags: [0u8; 3], instruction_register: 0, } diff --git a/src/arch/riscv32/traps/handler.rs b/src/arch/riscv32/traps/handler.rs index effb92b..edab33b 100644 --- a/src/arch/riscv32/traps/handler.rs +++ b/src/arch/riscv32/traps/handler.rs @@ -21,7 +21,11 @@ Tests files: use crate::{ config::TICK_DURATION, - ktime::{set_ktime_ms, tick::increment_tick}, + ktime::{ + set_ktime_ms, + tick::{get_tick, increment_tick}, + }, + task::primitives::task_awake_blocked, }; use super::trap_frame::TrapFrame; @@ -90,5 +94,7 @@ fn timer_interrupt(hart: usize) { if hart == 0 { increment_tick(); } + let tick = get_tick(); + task_awake_blocked(tick); set_ktime_ms(TICK_DURATION); } diff --git a/src/arch/riscv32/traps/interrupt.rs b/src/arch/riscv32/traps/interrupt.rs index bac5507..3543b39 100644 --- a/src/arch/riscv32/traps/interrupt.rs +++ b/src/arch/riscv32/traps/interrupt.rs @@ -98,6 +98,12 @@ pub fn disable_mstatus_mie() { unsafe { asm!("csrrc zero, mstatus, {}", in(reg) MIE) }; } +pub fn read_mstatus() -> u32 { + let value: u32; + unsafe { asm!("csrr {}, mstatus", out(reg) value) }; + value +} + // Machine Trap-Vector CSR pub fn mtvec_switch_to_vectored_mode() { diff --git a/src/config.rs b/src/config.rs index 2400fe5..454be6e 100644 --- a/src/config.rs +++ b/src/config.rs @@ -17,7 +17,10 @@ pub static LOG_LEVEL: LogLevel = LogLevel::Debug; // Define the uart address to use in kprint pub static KPRINT_ADDRESS: usize = 0x1000_0000; - +// ———————————————————————————————————————————————————————————— +// ——————— Define the max priority available for a task ——————— +// ———————————————————————————————————————————————————————————— +pub static TASK_MAX_PRIORITY: usize = 32; // ———————————————————————————————————————————————————————————— // ———————— Define the max size of devices sub-systems ———————— // ———————————————————————————————————————————————————————————— @@ -35,6 +38,16 @@ pub static FDT_MAX_PROPS: usize = 128; // ————————————— Define the max size of Task list ————————————— // ———————————————————————————————————————————————————————————— pub static TASK_LIST_MAX_SIZE: usize = 4; +// ———————————————————————————————————————————————————————————— +// ———— Define the max size of the task run/blocked queue ————— +// ———————————————————————————————————————————————————————————— +// The run queue is len - 1, if the size is 4, it will only use 3 slot in the queue. +pub static RUN_QUEUE_MAX_SIZE: usize = 3; +pub static BLOCK_QUEUE_MAX_SIZE: usize = 3; +// ———————————————————————————————————————————————————————————— +// ————————————— Define the number of CPU core ———————————————— +// ———————————————————————————————————————————————————————————— +pub static CPU_CORE_NUMBER: usize = 1; // Kernel stack size // WARNING diff --git a/src/info.rs b/src/info.rs index d3760f2..f9b25c2 100644 --- a/src/info.rs +++ b/src/info.rs @@ -1,4 +1,4 @@ // The kernel exposes build-time version information at runtime. // This information is immutable and reflects the exact binary currently running. // It is intended for debugging, logging, and diagnostic purposes. -pub static KERNEL_VERSION: &str = "0.4.2"; +pub static KERNEL_VERSION: &str = "0.4.5"; diff --git a/src/main.rs b/src/main.rs index fe4f2a7..f01ba09 100644 --- a/src/main.rs +++ b/src/main.rs @@ -6,6 +6,7 @@ #![deny(clippy::expect_used)] #![deny(clippy::todo)] #![deny(clippy::unimplemented)] +#![feature(stmt_expr_attributes)] // Config module pub mod config; @@ -58,10 +59,9 @@ pub mod tests; use core::panic::PanicInfo; use logs::LogLevel; use mem::mem_kernel_stack_info; -use primitives::ring_buff::RingBuffer; -// Static buffer to use as a ready queue for task. -pub static mut BUFFER: RingBuffer = RingBuffer::init(); +#[cfg(feature = "idle_task")] +use task::task_idle_task; #[unsafe(no_mangle)] unsafe extern "C" fn main() -> ! { @@ -74,8 +74,10 @@ unsafe extern "C" fn main() -> ! { kernel_stack.bottom ); log!(LogLevel::Info, "LrnRTOS started!"); + #[cfg(feature = "idle_task")] + task_idle_task(); loop { - log!(LogLevel::Debug, "Main loop uptime."); + log!(LogLevel::Debug, "Main loop."); unsafe { arch::traps::interrupt::enable_and_halt(); } diff --git a/src/misc.rs b/src/misc.rs index 3abefa1..54461d9 100644 --- a/src/misc.rs +++ b/src/misc.rs @@ -1,5 +1,105 @@ +/* +File info: Miscellaneous + +Test coverage: ... + +Tested: + +Not tested: + +Reasons: +- Will be refactor and tested in the scheduler update task. + +Tests files: +- ... +*/ + +use crate::{arch, config::CPU_CORE_NUMBER}; + #[repr(C)] pub struct RawTraitObject { pub data: *const (), pub vtable: *const (), } + +static mut CPUS_STATE: CpusState = CpusState::init(); + +#[repr(C)] +struct CpusState { + // Flags for the CPU states, not used yet. + cpu_state: [u8; CPU_CORE_NUMBER], + // Flags for the CPU scheduler state. + // bit 0: scheduler state, init or not. + // bit 1: need reschedule or not. + // bit 2:7: reschedule reason. + scheduler_state: [u8; CPU_CORE_NUMBER], +} + +impl CpusState { + const fn init() -> Self { + CpusState { + cpu_state: [0u8; CPU_CORE_NUMBER], + scheduler_state: [0u8; CPU_CORE_NUMBER], + } + } + + fn read_scheduler_flag(&self, core: usize) -> &u8 { + &self.scheduler_state[core] + } + + fn scheduler_set_reschedule_bit(&mut self, core: usize) { + let mut state = self.scheduler_state[core]; + let mask = 1 << 1; + // Set need reschedule bit. + state |= mask; + self.scheduler_state[core] = state; + } + + fn scheduler_clear_reschedule_bit(&mut self, core: usize) { + let mut state = self.scheduler_state[core]; + let mask = 0 << 1; + // Clear need reschedule bit. + state &= mask; + self.scheduler_state[core] = state; + } + + fn scheduler_read_reschedule_bit(&self, core: usize) -> bool { + let state = self.scheduler_state[core]; + // Get the bit 1 + let flag = (state >> 1) & 1; + flag == 1 + } +} + +pub fn need_reschedule() { + let current_core = arch::helpers::current_cpu_core(); + #[allow(static_mut_refs)] + unsafe { + CPUS_STATE.scheduler_set_reschedule_bit(current_core) + }; +} + +pub fn clear_reschedule() { + let current_core = arch::helpers::current_cpu_core(); + #[allow(static_mut_refs)] + unsafe { + CPUS_STATE.scheduler_clear_reschedule_bit(current_core) + }; +} + +#[unsafe(no_mangle)] +pub fn read_need_reschedule() -> bool { + let current_core = arch::helpers::current_cpu_core(); + #[allow(static_mut_refs)] + unsafe { + CPUS_STATE.scheduler_read_reschedule_bit(current_core) + } +} + +pub fn read_scheduler_flag<'a>() -> &'a u8 { + let current_core = arch::helpers::current_cpu_core(); + #[allow(static_mut_refs)] + unsafe { + CPUS_STATE.read_scheduler_flag(current_core) + } +} diff --git a/src/primitives/bitmap.rs b/src/primitives/bitmap.rs new file mode 100644 index 0000000..6f4aa8e --- /dev/null +++ b/src/primitives/bitmap.rs @@ -0,0 +1,44 @@ +#[derive(Copy, Clone)] +pub struct Bitmap { + pub map: u32, +} + +impl Bitmap { + #[allow(clippy::new_without_default)] + pub const fn new() -> Self { + Bitmap { map: 0 } + } + + /// Set the given bit to 1. + pub fn set_bit(&mut self, bit: usize) { + let mask = 1 << bit; + self.map |= mask; + } + + /// Clear the given bit. Set it to 0. + pub fn clear_bit(&mut self, bit: usize) { + self.map &= !(1 << bit); + } + + /// Iterate over the bitmap and return the heavier bit. + /// Example: bitmap set to u8 -> map: 01001010. The function will return 6, because the first + /// bit set to 1, from the highest bit, is the bit 6. + /// Arguments: + /// &mut self: call the map initialized. Must be mutable. + pub fn find_leading_bit(&mut self) -> usize { + let bits = core::mem::size_of::() * 8; + let mut value: usize = 0; + for i in (0..bits).rev() { + let bit = (self.map >> i) & 1; + if bit == 1 { + value = i; + break; + } + } + value + } + + pub fn is_bitmap_zero(&self) -> bool { + self.map == 0 + } +} diff --git a/src/primitives/indexed_linked_list.rs b/src/primitives/indexed_linked_list.rs new file mode 100644 index 0000000..d2c79ec --- /dev/null +++ b/src/primitives/indexed_linked_list.rs @@ -0,0 +1,288 @@ +/* +File info: IndexedLinkedList primitive type. + +Test coverage: 80 + +Tested: +- push +- pop +- get_head + +Not tested: + +Reasons: +- Lot of other methods are used in push, pop, and get_head, that why the test coverage is at 80 for me. + +Tests files: +- 'src/tests/primitives/indexed_linked_list.rs' + +References: +*/ + +use crate::LogLevel; +use crate::log; + +#[derive(Clone, Copy)] +pub struct IndexedLinkedList { + list: [Option; N], + head: usize, + tail: usize, + count: usize, +} + +impl IndexedLinkedList { + #[allow(clippy::new_without_default)] + pub const fn new() -> Self { + IndexedLinkedList { + list: [const { None }; N], + // Oldest node, the top of the linked list + head: 0, + // Newest node, the bottom of the linked list, doesn't have a node below it. + tail: 0, + count: 0, + } + } + + /// Push the new node in the linked list. Can update the current node in it. + /// Avoid duplication on id. The id is unique in the list. + pub fn push(&mut self, id: usize, value: usize) { + // Get the size of the list + let size = self.size(); + if size == self.list.len() { + log!(LogLevel::Warn, "The delta-list is full, abort push."); + return; + } + // If the list is empty, push the new node to index 0 of the list + if size == 0 { + self.list[0] = Some(IndexedLinkedListNode { + id, + value, + next_node: None, + }); + self.head = 0; + self.count += 1; + return; + } + let mut current_node: usize = self.head; + let mut available_index: Option = None; + // Check if there's no id duplication possible by iterating over the linked list + { + let mut current_node: usize = self.head; + for _ in 0..self.list.len() { + // Allow expect use, if we can't get the current node, the linked-list is wrong, + // want to fail-fast + #[allow(clippy::expect_used)] + let get_current_node = self + .get_node(current_node) + .expect("Failed to get the asked node, linked-list may be empty or corrupted."); + let node = get_current_node; + if node.id == id { + log!( + LogLevel::Warn, + "The indexed-linked-list has already the id: {}, abort push.", + id + ); + return; + } else { + if let Some(next_node) = node.next_node { + current_node = next_node; + continue; + } else { + break; + } + } + } + } + // Iterate to find an available index. + for i in 0..self.list.len() { + let find_available_index = &self.list[i]; + if find_available_index.is_none() && available_index.is_none() { + available_index = Some(i); + } + } + if available_index.is_none() { + log!( + LogLevel::Error, + "The delta-list is full, abort push. Consider increasing the blocked queue size." + ); + return; + } + let mut new_node = IndexedLinkedListNode { + id, + value, + next_node: None, + }; + let mut prev_node_ptr: Option = None; + for _ in 0..self.list.len() { + // Allow expect, if we can't get the wanted node, the list may be corrupted. + #[allow(clippy::expect_used)] + let node: &mut IndexedLinkedListNode = self + .get_node(current_node) + .expect("Failed to get the asked node, linked list may be empty or corrupted"); + // If the current value is superior than the current node value, continue, or check the + // next_node. + if value > node.value { + if node.next_node.is_none() { + node.next_node = available_index; + // Update current node in list + // Allow expect, if available index is None, maybe there's no available space + // in the linked-list, and we shouldn't reach this point. + #[allow(clippy::expect_used)] + let check_available_index = available_index.expect("Failed to get the usize behind the Option<>. Maybe there's isn't available space in the linked-list."); + self.tail = check_available_index; + // Push new node to available index in list + // Allow unwrap, we check available index before + #[allow(clippy::unwrap_used)] + self.list[available_index.unwrap()] = Some(new_node); + break; + } + prev_node_ptr = Some(current_node); + // Allow expect, we check the next node before, if it's None, something wrong, we + // want to fail-fast + #[allow(clippy::expect_used)] + let node_next_node = node + .next_node + .expect("Failed to get the next_node behind the Option<>"); + current_node = node_next_node; + continue; + // Else if the current value is not superior, update the list to push the new_node + // before the current one. + } else { + // If there's no previous node, than we are at the head, so update the head to + // point to the new node. + if prev_node_ptr.is_none() { + // Get the previous head + let prev_head = self.head; + // Update the head to point to the new node + // Allow expect, the available index should not be None + #[allow(clippy::expect_used)] + let check_available_index = available_index + .expect("Failed to get the available_index behind the Option<>"); + self.head = check_available_index; + // Update the new_node to point to the old head + new_node.next_node = Some(prev_head); + // Update list to push new_node to head + self.list[self.head] = Some(new_node); + break; + } + // If there's a previous node. + new_node.next_node = Some(current_node); + // Get the previous node + // Allow expect, if the previous_node index is not reachable or else, we want to + // fail-fast, the linked-list could be corrupted. + #[allow(clippy::expect_used)] + let prev_node: &mut IndexedLinkedListNode = self + .get_node(prev_node_ptr.expect("Failed to get the previous_node index behind Option<>, linked-list may be corrupted")) + .expect("Failed to get the asked node, linked list may be empty or corrupted"); + // Update previous node to point to the new node + prev_node.next_node = available_index; + // Push the new node to the list + // Allow expect, if the Available index is wrong, we want to fail fast + #[allow(clippy::expect_used)] + self.list[available_index.expect("Available index should not be None")] = + Some(new_node); + break; + } + } + self.count += 1; + } + + /// Remove the node at the head and return the node. + /// Update the linked list head to point to the next node. + pub fn pop(&mut self) -> Option { + let head = self.head; + let head_next_node = { + // If we can't get the head node, return None + let head_node = self.get_node(head); + // if head_node.is_none() { + // return None; + // } + head_node.as_ref()?; + // Allow unwrap, we check the value before + #[allow(clippy::unwrap_used)] + head_node.unwrap().next_node + }; + if let Some(next_node) = head_next_node { + self.head = next_node; + } else { + self.head = 0; + } + self.count -= 1; + // Get the head node + self.take_node(head) + } + + pub fn get_head_node(&self) -> Option<&IndexedLinkedListNode> { + self.list[self.head].as_ref() + } + + pub fn get_node(&mut self, idx: usize) -> Option<&mut IndexedLinkedListNode> { + let node = self.list[idx].as_mut(); + if let Some(is_node) = node { + Some(is_node) + } else { + None + } + } + + /// Take the node from the given index, replace it with None in the list. + fn take_node(&mut self, idx: usize) -> Option { + let node = self.list[idx]; + if node.is_some() { + self.list[idx].take() + } else { + None + } + } + + pub fn size(&self) -> usize { + let mut output: usize = 0; + for i in 0..self.list.len() { + if self.list[i].is_none() { + output = i; + break; + } + } + output + } + + pub fn get_count(&self) -> usize { + self.count + } + + #[cfg(feature = "test")] + pub fn get_index(&self, idx: usize) -> IndexedLinkedListNode { + // Unwrap directly because it's in test env. + self.list[idx].unwrap() + } + + #[cfg(feature = "test")] + pub fn get_head(&self) -> usize { + self.head + } + + #[cfg(feature = "test")] + pub fn get_tail(&self) -> usize { + self.tail + } +} + +#[derive(Clone, Copy)] +pub struct IndexedLinkedListNode { + pub id: usize, + pub value: usize, + // The node before this one. + // If this is None, then this node is the tail. + pub next_node: Option, +} + +impl IndexedLinkedListNode { + #[allow(clippy::new_without_default)] + pub const fn new() -> Self { + IndexedLinkedListNode { + id: 0, + value: 0, + next_node: None, + } + } +} diff --git a/src/primitives/mod.rs b/src/primitives/mod.rs index 0aa67ee..11a2a5f 100644 --- a/src/primitives/mod.rs +++ b/src/primitives/mod.rs @@ -1,2 +1,4 @@ +pub mod bitmap; +pub mod indexed_linked_list; pub mod ring_buff; pub mod stack; diff --git a/src/primitives/ring_buff.rs b/src/primitives/ring_buff.rs index aa93e73..d68dad9 100644 --- a/src/primitives/ring_buff.rs +++ b/src/primitives/ring_buff.rs @@ -23,7 +23,7 @@ References: use crate::{log, logs::LogLevel}; -#[derive(Debug)] +#[derive(Copy, Clone)] pub struct RingBuffer { buff: [Option; N], // Oldest element in the buffer @@ -34,7 +34,7 @@ pub struct RingBuffer { count: usize, } -impl RingBuffer { +impl RingBuffer { pub const fn init() -> Self { RingBuffer { buff: [None; N], diff --git a/src/scheduler/mod.rs b/src/scheduler/mod.rs index d2c7d73..68e1034 100644 --- a/src/scheduler/mod.rs +++ b/src/scheduler/mod.rs @@ -1,31 +1,53 @@ /* File info: Scheduler main file -Test coverage: ... +Test coverage: 0 Tested: Not tested: -Reasons: Not even really implemented so there's no need to test something that doesn't even consider finish +Reasons: Test framework don't handle correctly trap, so timer interrupts cannot work, hard to test a scheduler when a part of the kernel don't work in the test framework. Tests files: +- 'src/tests/scheduler/mod.rs' References: */ use crate::{ - BUFFER, - arch::scheduler::{SCHEDULER_CTX, SchedulerCtx, sched_ctx_restore}, + LogLevel, + arch::{ + helpers::current_cpu_core, + scheduler::{SCHEDULER_CTX, SchedulerCtx, sched_ctx_restore}, + }, + config::{BLOCK_QUEUE_MAX_SIZE, CPU_CORE_NUMBER, RUN_QUEUE_MAX_SIZE, TASK_MAX_PRIORITY}, log, - logs::LogLevel, + misc::{clear_reschedule, read_need_reschedule}, + primitives::{bitmap::Bitmap, indexed_linked_list::IndexedLinkedList, ring_buff::RingBuffer}, task::{ TASK_HANDLER, TaskState, - list::{task_list_get_task_by_pid, task_list_update_task_by_pid}, - task_context_switch, task_pid, + list::{task_list_get_idle_task, task_list_get_task_by_pid, task_list_update_task_by_pid}, + task_awake_block_control, task_awake_tick, task_context_switch, task_pid, task_priority, }, }; +// Reflect the run queue state +// Array of bitmaps, one bitmap per CPU core +pub static mut RUN_QUEUE_BITMAP: [Bitmap; CPU_CORE_NUMBER] = + [const { Bitmap::new() }; CPU_CORE_NUMBER]; +// Array of Array of run queue per priority per CPU core. +// Each index of this array is specific a CPU core, index 0 is for CPU core 0, etc. +// Each index of the inside array is another array, each index is a priority. And at each index, there's a ring buffer of all task +// with that priority. +// We use the RUN_QUEUE_BITMAP to easily find the buffer with the highest priority to look into. +pub static mut RUN_QUEUE: [[RingBuffer; TASK_MAX_PRIORITY]; + CPU_CORE_NUMBER] = [[const { RingBuffer::init() }; TASK_MAX_PRIORITY]; CPU_CORE_NUMBER]; +// Queue containing all blocked task. +// Same data structure as the RUN_QUEUE. +pub static mut BLOCKED_QUEUE: [IndexedLinkedList; CPU_CORE_NUMBER] = + [const { IndexedLinkedList::new() }; CPU_CORE_NUMBER]; + /// Temporary function use to test the context switch and context restore on multiple task. /// Will certainly be used later on the real scheduler. /// Pop oldest task from RingBuffer, save the task context, update it, and repush it to the @@ -33,25 +55,95 @@ use crate::{ /// Read on the RingBuffer to get the next task, update it, and update the RingBuffer. /// Not the best way to use the RingBuffer but it will do. #[unsafe(no_mangle)] -pub fn dispatch() { +pub fn scheduler() { + let core: usize = current_cpu_core(); + #[allow(static_mut_refs)] + let current_run_queue = unsafe { &mut RUN_QUEUE[core] }; + #[allow(static_mut_refs)] + let current_blocked_queue = unsafe { &mut BLOCKED_QUEUE[core] }; + #[allow(static_mut_refs)] + let current_run_queue_bitmap = unsafe { &mut RUN_QUEUE_BITMAP[core] }; + // Check the need_reschedule flag + // If a resched has been trigger, pop the head of the blocked queue, update the task and push + // it to the run queue. + // Don't check the awake tick or anything else, we consider that if the need_resched flag is + // true, then the task is available to wake up. + let resched = read_need_reschedule(); + if resched { + log!( + LogLevel::Debug, + "Reschedule needed, updating queues, clearing the need reschedule bit." + ); + // Pop from blocked queue and move the task to the run queue + let wake_up_task = current_blocked_queue.pop(); + let pid: u16 = if let Some(task) = wake_up_task { + task.id as u16 + } else { + log!( + LogLevel::Error, + "Error getting the wake up task from blocked queue, blocked queue or need_reschedule flag can be corrupted." + ); + // Trigger a context switch on current task to avoid to fail-fast + // TODO: + return; + }; + // Consider the `pid` as init, if wake_up_task.is_none(), we switch on the current task, so + // we cannot reach this point unless wake_up_task is some and `pid` is set. + // Allow expect use, we check if we can't get the pid before. + // If we can't get the task with the pid, we wan't to fail-fast, because the pid and the + // task should be ok. + #[allow(clippy::expect_used)] + let task = task_list_get_task_by_pid(pid).expect("Failed to get the task by it's pid."); + let priority: u8 = task_priority(task); + task_awake_block_control(task); + task.state = TaskState::Ready; + task_list_update_task_by_pid(pid, *task); + current_run_queue[priority as usize].push(pid); + current_run_queue_bitmap.set_bit(priority as usize); + clear_reschedule(); + } // Current running task let mut current_task = unsafe { *TASK_HANDLER }; - current_task.state = TaskState::Ready; - let pid = task_pid(¤t_task); - task_list_update_task_by_pid(pid, current_task); - #[allow(static_mut_refs)] - unsafe { - BUFFER.push(pid) - }; + if current_task.state == TaskState::Blocked { + let pid = task_pid(¤t_task); + let priority = task_priority(¤t_task); + // Allow expect, if we can't get the awake tick of the blocked task, we want to fail-fast + #[allow(clippy::expect_used)] + let awake_tick = task_awake_tick(¤t_task).expect("Failed to get the task awake_tick"); + // Push the current task to the blocked queue + current_blocked_queue.push(pid as usize, awake_tick); + // Check the run queue from the current_task priority. + // If the run queue is empty, clean the run queue bitmap for this priority bit. + let is_run_queue_empty = current_run_queue[priority as usize].size(); + if is_run_queue_empty == 0 { + current_run_queue_bitmap.clear_bit(priority as usize); + } + } + + if current_task.state != TaskState::Blocked { + current_task.state = TaskState::Ready; + let pid = task_pid(¤t_task); + let priority: usize = task_priority(¤t_task).into(); + task_list_update_task_by_pid(pid, current_task); + // Push current task to the priority buffer + current_run_queue[priority as usize].push(pid); + // Update the bitmap priority bit. + current_run_queue_bitmap.set_bit(priority as usize); + } // Update and load next task #[allow(static_mut_refs)] - let get_next_task = unsafe { BUFFER.pop() }; - if get_next_task.is_none() { + let is_no_task = current_run_queue_bitmap.is_bitmap_zero(); + if is_no_task { log!( - LogLevel::Error, - "Error getting the last task from RingBuffer" + LogLevel::Debug, + "No task available in the run queue, enter idle task." ); + let idle = task_list_get_idle_task(); + #[allow(clippy::expect_used)] + task_context_switch(idle.expect("ERROR: failed to get the idle task, invariant violated.")); } + let highest_priority: usize = current_run_queue_bitmap.find_leading_bit(); + let get_next_task = current_run_queue[highest_priority].pop(); // Allow unwrap because it's a temporary function #[allow(clippy::unwrap_used)] let next_task_pid = get_next_task.unwrap(); diff --git a/src/task/list.rs b/src/task/list.rs index 9219433..a874910 100644 --- a/src/task/list.rs +++ b/src/task/list.rs @@ -88,6 +88,18 @@ impl TaskList { pub fn get_last_pid(&self) -> u16 { self.last_pid } + + pub fn get_by_priority(&mut self, priority: u8) -> Option<&mut Task> { + for i in 0..TASK_LIST_MAX_SIZE { + let task = unsafe { (*self.list[i].get()).as_mut() }; + if let Some(is_task) = task + && is_task.priority == priority + { + return Some(is_task); + } + } + None + } } pub static mut TASK_LIST: TaskList = TaskList::init(); @@ -131,3 +143,11 @@ pub fn task_list_get_last_pid() -> u16 { TASK_LIST.get_last_pid() } } + +pub fn task_list_get_idle_task<'a>() -> Option<&'a mut Task> { + // Allow static mut refs for now, kernel only run in monocore + #[allow(static_mut_refs)] + unsafe { + TASK_LIST.get_by_priority(0) + } +} diff --git a/src/task/mod.rs b/src/task/mod.rs index 230c779..33705cf 100644 --- a/src/task/mod.rs +++ b/src/task/mod.rs @@ -19,9 +19,15 @@ Tests files: use list::task_list_add_task; -use crate::{arch::task::task_context::TaskContext, log, logs::LogLevel, mem::mem_task_alloc}; +use crate::{ + arch::{task::task_context::TaskContext, traps::interrupt::enable_and_halt}, + log, + logs::LogLevel, + mem::mem_task_alloc, +}; pub mod list; +pub mod primitives; // Mutable static to keep track of the current task // Only relevant on a monocore CPU. @@ -36,21 +42,30 @@ pub static mut TASK_HANDLER: *mut Task = core::ptr::null_mut(); #[repr(u8)] // Allow unused for now because this issue doesn't need to handle all task state #[allow(unused)] -#[derive(Copy, Clone)] +#[derive(Copy, Clone, PartialEq)] pub enum TaskState { New, Running, Ready, Waiting, + Blocked, Terminated, } +#[derive(Copy, Clone)] +pub enum TaskBlockControl { + AwakeTick(usize), + None, +} + #[derive(Copy, Clone)] #[repr(C)] pub struct Task { // Arch dependant context, don't handle this field in task, only use struct method when // interacting with it. pub context: TaskContext, + // Task block control, define the reason the task is blocked. + pub block_control: TaskBlockControl, // Fn ptr to task entry point, this must never return. pub func: fn() -> !, pid: u16, @@ -89,6 +104,7 @@ impl Task { mem_reg.expect("Error: failed to get the task memory region"), func, ), + block_control: TaskBlockControl::None, func, pid: 0, name: buf, @@ -159,3 +175,34 @@ pub fn task_context_save(task: &Task, ra: usize, sp: usize) { pub fn task_pid(task: &Task) -> u16 { task.pid } + +pub fn task_priority(task: &Task) -> u8 { + task.priority +} + +pub fn task_awake_tick(task: &Task) -> Option { + match task.block_control { + TaskBlockControl::AwakeTick(tick) => Some(tick), + TaskBlockControl::None => None, + } +} + +pub fn task_awake_block_control(task: &mut Task) { + task.block_control = TaskBlockControl::None; +} + +/// Create the idle task +pub fn task_idle_task() { + let task_name: &str = "Idle task"; + let func: fn() -> ! = idle_task_fn; + let priority: u8 = 0; + let size: usize = 0x100; + task_create(task_name, func, priority, size); +} + +fn idle_task_fn() -> ! { + loop { + log!(LogLevel::Debug, "Idle task."); + unsafe { enable_and_halt() }; + } +} diff --git a/src/task/primitives.rs b/src/task/primitives.rs new file mode 100644 index 0000000..f821741 --- /dev/null +++ b/src/task/primitives.rs @@ -0,0 +1,136 @@ +/* +File info: Task primitives. + +Test coverage: yield and sleep. + +Tested: +- yield with two task. +- sleep with invariants from run queue and blocked queue. + +Not tested: +- delay + +Reasons: +- delay is hard to test, for now we test it by just checking it manually. + +Tests files: +- 'src/tests/task/primitives.rs' +*/ + +use crate::{ + arch::{helpers::current_cpu_core, traps::interrupt::enable_and_halt}, + ktime::{set_ktime_ms, tick::get_tick}, + log, + logs::LogLevel, + misc::need_reschedule, + scheduler::{BLOCKED_QUEUE, scheduler}, +}; + +use super::{TASK_HANDLER, Task, TaskBlockControl, TaskState, list::task_list_get_task_by_pid}; + +unsafe extern "C" { + // Put the current task to sleep until the number of tick given is passed + // tick: the number of tick the task need to sleep. + pub fn sleep(tick: usize); + // Yield function for cooperative scheduling + pub fn r#yield(); +} + +// Use no mangle because this function is called from an asm function +// Called from sleep primitive +#[unsafe(no_mangle)] +fn task_set_wake_tick(tick: usize) { + let current_tick = get_tick(); + let awake_tick = current_tick + tick; + // Call task primitive to update current task state + task_block_until(awake_tick); + // Call a re-schedule + scheduler(); +} + +/// Block the current task until the given tick is reach. +/// Update the current task to block it, but the task is still in the run queue, it'll be remove +/// from the run queue and saved in the blocked queue in the scheduler. +pub fn task_block_until(tick: usize) { + let current_task: *mut Task = unsafe { TASK_HANDLER }; + if current_task.is_null() { + log!( + LogLevel::Error, + "Error getting the current task, invariant violated. Sleep couldn't be used outside of a task." + ); + // See how to handle this, what to return or something else. + } + // Update task + // Update current state to block + // Update block control and pass the awake_tick to it + unsafe { + // Deref and cast current_task to &mut to update the Task behind the ptr. + let task: &mut Task = &mut *current_task; + task.state = TaskState::Blocked; + task.block_control = TaskBlockControl::AwakeTick(tick); + } +} + +/// Check all the blocked queue to find the task to awake. Just update the task that need to be +/// awake, make them ready. Don't handle the queue by itself. +/// TODO: Use a better data structure than a RingBuffer for the blocked queue. +pub fn task_awake_blocked(tick: usize) { + // Current CPU core + let core: usize = current_cpu_core(); + // Current blocked queue + let current_blocked_queue = unsafe { BLOCKED_QUEUE }[core]; + #[allow(static_mut_refs)] + let size = current_blocked_queue.get_count(); + if size == 0 { + return; + } + #[allow(static_mut_refs)] + let blocked_task = current_blocked_queue.get_head_node(); + let pid: u16 = if let Some(task) = blocked_task { + task.id as u16 + } else { + log!( + LogLevel::Error, + "Error getting the oldest task in run queue" + ); + #[allow(clippy::needless_return)] + return; + }; + // Allow expect, check the value before and if the pid become invalid we don't want to pursue + // run time. + #[allow(clippy::expect_used)] + let task = task_list_get_task_by_pid(pid); + if task.is_none() { + log!( + LogLevel::Error, + "Error getting the task by pid, the task may not exist" + ); + return; + } + // Allow expected, we check the value before, if it's some, there's shouldn't be any problem by + // unwrapping it. + // TODO: just need to correctly used the blocked queue to avoid getting the task from the + // task_list with pid, and matching on the block_control of the task to awake it. + #[allow(clippy::expect_used)] + match task + .expect("Failed to get the task behind the Option<>. This shouldn't be possible") + .block_control + { + TaskBlockControl::AwakeTick(awake_tick) => { + if tick >= awake_tick { + // push to run queue + #[allow(static_mut_refs)] + // Set the need reschedule flag, the scheduler will check the block queue to + // awake correctly the task. + need_reschedule(); + } + } + TaskBlockControl::None => (), + } +} + +/// Interrupt all operation on the CPU for the given time. +pub fn delay(ms: usize) { + set_ktime_ms(ms as u64); + unsafe { enable_and_halt() }; +} diff --git a/src/tests/arch/riscv32/task/task_context.rs b/src/tests/arch/riscv32/task/task_context.rs index 6e63da1..1c9cbac 100644 --- a/src/tests/arch/riscv32/task/task_context.rs +++ b/src/tests/arch/riscv32/task/task_context.rs @@ -1,16 +1,13 @@ -use crate::{BUFFER, print}; +use crate::print; +use crate::scheduler::RUN_QUEUE; use core::{mem, ptr}; use crate::{ - arch::{ - scheduler::init_sched_ctx, - task::{task_context::TaskContext, r#yield}, - traps::interrupt::halt, - }, - scheduler::dispatch, + arch::{scheduler::init_sched_ctx, task::task_context::TaskContext, traps::interrupt::halt}, + scheduler::{RUN_QUEUE_BITMAP, scheduler}, task::{ - CURRENT_TASK_PID, TASK_HANDLER, list::task_list_get_task_by_pid, task_context_switch, - task_create, + CURRENT_TASK_PID, TASK_HANDLER, list::task_list_get_task_by_pid, primitives::r#yield, + task_context_switch, task_create, }, test_failed, test_info, tests::{TEST_MANAGER, TestBehavior, TestCase, TestSuite, TestSuiteBehavior}, @@ -76,15 +73,15 @@ pub fn test_task_context_offset() -> u8 { } let ra_off = mem::offset_of!(TaskContext, ra); if ra_off != 144 { - panic!("Task context ra offset must be 144, got: {sp_off}"); + panic!("Task context ra offset must be 144, got: {ra_off}"); } let flags_off = mem::offset_of!(TaskContext, flags); if flags_off != 148 { - panic!("Task context flags offset must be 144, got: {flags_off}"); + panic!("Task context flags offset must be 148, got: {flags_off}"); } let instruction_reg_off = mem::offset_of!(TaskContext, instruction_register); if instruction_reg_off != 151 { - panic!("Task context instruction_register offset must be 147, got: {instruction_reg_off}"); + panic!("Task context instruction_register offset must be 151, got: {instruction_reg_off}"); }; 0 } @@ -124,11 +121,16 @@ fn test_context_switch_b() -> ! { /// sp ? pub fn test_task_context_switch() -> u8 { // Temporary task creation and retrieving to test context switch. - task_create("A", test_context_switch_a, 0, 0x1000); - task_create("B", test_context_switch_b, 0, 0x1000); + // pid 2 + task_create("A", test_context_switch_a, 1, 0x1000); + // pid 3 + task_create("B", test_context_switch_b, 1, 0x1000); #[allow(static_mut_refs)] unsafe { - BUFFER.push(3) + // Access the queue and bitmap from CPU core 0 + // run queue priority 1 + RUN_QUEUE[0][1].push(3); + RUN_QUEUE_BITMAP[0].set_bit(1); }; unsafe { CURRENT_TASK_PID = 2 }; let mut task = task_list_get_task_by_pid(unsafe { CURRENT_TASK_PID }); @@ -136,7 +138,7 @@ pub fn test_task_context_switch() -> u8 { test_info!( "The next output should be the task A and B, which print alternately A, and B, with a digit. The final output must be: from A: 31, and from B: 28" ); - init_sched_ctx(dispatch); + init_sched_ctx(scheduler); task_context_switch(task.unwrap()); 0 } diff --git a/src/tests/arch/riscv32/traps/interrupt.rs b/src/tests/arch/riscv32/traps/interrupt.rs index c7dc1ef..f945af0 100644 --- a/src/tests/arch/riscv32/traps/interrupt.rs +++ b/src/tests/arch/riscv32/traps/interrupt.rs @@ -6,7 +6,7 @@ use crate::{ mtvec_switch_to_direct_mode, mtvec_switch_to_vectored_mode, read_mie_msie, read_mie_mtie, trap_entry, }, - trap_frame::{KERNEL_TRAP_FRAME, init_trap_frame}, + trap_frame::KERNEL_TRAP_FRAME, }, tests::{TEST_MANAGER, TestBehavior, TestCase, TestSuite, TestSuiteBehavior}, }; @@ -32,7 +32,7 @@ pub fn test_mtvec_set_vectored_mode() -> u8 { pub fn test_mtvec_trap_entry() -> u8 { let mtvec_trap_entry = mtvec_read_trap_entry(); - let trap_entry_addr = trap_entry as usize as u32; + let trap_entry_addr = trap_entry as *const () as usize as u32; mtvec_set_trap_entry(); let updated_mtvec_trap_entry = mtvec_read_trap_entry(); if mtvec_trap_entry != 0 { @@ -45,8 +45,6 @@ pub fn test_mtvec_trap_entry() -> u8 { } pub fn test_mscratch_trap_frame() -> u8 { - // Init trap_frame and declare ptr to it - init_trap_frame(); #[allow(static_mut_refs)] // Ptr to KERNEL_TRAP_FRAME static let ptr = unsafe { &mut KERNEL_TRAP_FRAME } as *mut _ as u32; diff --git a/src/tests/mod.rs b/src/tests/mod.rs index a7f3dc9..66e4a18 100644 --- a/src/tests/mod.rs +++ b/src/tests/mod.rs @@ -11,6 +11,7 @@ mod ktime; mod mem; mod platform; mod primitives; +mod scheduler; mod suites; mod task; diff --git a/src/tests/primitives/indexed_linked_list.rs b/src/tests/primitives/indexed_linked_list.rs new file mode 100644 index 0000000..7559cd2 --- /dev/null +++ b/src/tests/primitives/indexed_linked_list.rs @@ -0,0 +1,128 @@ +use crate::{ + primitives::indexed_linked_list::IndexedLinkedList, + test_failed, + tests::{TEST_MANAGER, TestBehavior, TestCase, TestSuite, TestSuiteBehavior}, +}; + +fn test_indexed_linked_list_push() -> u8 { + let mut list: IndexedLinkedList<10> = IndexedLinkedList::new(); + // Push some task + list.push(1, 70); + list.push(2, 80); + list.push(3, 75); + list.push(4, 50); + // Get head and tail task + let head = list.get_head(); + let tail = list.get_tail(); + let head_node = list.get_index(head); + let tail_node = list.get_index(tail); + if head_node.id != 4 { + test_failed!("head node should be the task 4, got: {}\n", head_node.id); + return 1; + } + if head_node.next_node.unwrap() != 0 { + test_failed!( + "head node.next_node should be the task 1, got: {}\n", + head_node.next_node.unwrap() + ); + return 1; + } + if tail_node.id != 2 { + test_failed!("tail node should be the task 2, got: {}\n", tail_node.id); + return 1; + } + if tail_node.next_node.is_some() { + test_failed!( + "tail node should not have a next ask, got: {}\n", + tail_node.next_node.unwrap() + ); + return 1; + } + // Get number of node in the list + let count = list.get_count(); + if count != 4 { + test_failed!("count should be 4, got: {}\n", count); + return 1; + } + // Check duplication security + list.push(4, 80); + let count = list.get_count(); + if count != 4 { + test_failed!("count should be 4, got: {}\n", count); + return 1; + } + 0 +} + +fn test_indexed_linked_list_get_head_node() -> u8 { + let mut list: IndexedLinkedList<10> = IndexedLinkedList::new(); + // Push some task + list.push(1, 70); + list.push(2, 80); + list.push(3, 75); + let head = list.get_head_node().unwrap(); + if head.id != 1 { + test_failed!( + "head node.next_node should be the task 1, got: {}\n", + head.next_node.unwrap() + ); + return 1; + } + list.push(4, 50); + let head = list.get_head_node().unwrap(); + if head.id != 4 { + test_failed!( + "head node.next_node should be the task 4, got: {}\n", + head.next_node.unwrap() + ); + return 1; + } + 0 +} + +fn test_indexed_linked_list_pop() -> u8 { + let mut list: IndexedLinkedList<10> = IndexedLinkedList::new(); + // Push some task + list.push(1, 70); + list.push(2, 80); + list.push(3, 75); + let head = list.pop().unwrap(); + if head.id != 1 { + test_failed!("head node should be the task 1, got: {}\n", head.id); + return 1; + } + let head = list.pop().unwrap(); + if head.id != 3 { + test_failed!("head node should be the task 3, got: {}\n", head.id); + return 1; + } + 0 +} + +pub fn indexed_linked_list_primitive_test_suite() { + const INDEXED_LINKED_LIST_TEST_SUITE: TestSuite = TestSuite { + tests: &[ + TestCase::init( + "IndexedLinkedList push", + test_indexed_linked_list_push, + TestBehavior::Default, + ), + TestCase::init( + "IndexedLinkedList get_head_node", + test_indexed_linked_list_get_head_node, + TestBehavior::Default, + ), + TestCase::init( + "IndexedLinkedList pop", + test_indexed_linked_list_pop, + TestBehavior::Default, + ), + ], + name: "IndexedLinkedList primitive type", + behavior: TestSuiteBehavior::Default, + }; + #[allow(static_mut_refs)] + unsafe { + TEST_MANAGER.add_suite(&INDEXED_LINKED_LIST_TEST_SUITE) + }; +} diff --git a/src/tests/primitives/mod.rs b/src/tests/primitives/mod.rs index 38dd7d7..fc5dea1 100644 --- a/src/tests/primitives/mod.rs +++ b/src/tests/primitives/mod.rs @@ -1 +1,2 @@ +pub mod indexed_linked_list; pub mod ring_buff; diff --git a/src/tests/scheduler/mod.rs b/src/tests/scheduler/mod.rs new file mode 100644 index 0000000..f9d968b --- /dev/null +++ b/src/tests/scheduler/mod.rs @@ -0,0 +1 @@ +pub fn scheduler_test_suite() {} diff --git a/src/tests/suites.rs b/src/tests/suites.rs index 86fb8ad..df1519f 100644 --- a/src/tests/suites.rs +++ b/src/tests/suites.rs @@ -14,8 +14,10 @@ use super::{ ktime::ktime_test_suite, mem::memory_test_suite, platform::platform_test_suite, + primitives::indexed_linked_list::indexed_linked_list_primitive_test_suite, primitives::ring_buff::ring_buff_primitive_test_suite, - task::{list::task_list_test_suite, task_test_suite}, + scheduler::scheduler_test_suite, + task::{list::task_list_test_suite, primitives::task_primitives_test_suite, task_test_suite}, }; // Call all test suite function to auto register all suites in test manager. @@ -24,6 +26,7 @@ pub fn test_suites() { platform_test_suite(); serial_subsystem_test_suite(); ring_buff_primitive_test_suite(); + indexed_linked_list_primitive_test_suite(); timer_subsystem_test_suite(); cpu_intc_subsystem_test_suite(); ktime_test_suite(); @@ -35,4 +38,6 @@ pub fn test_suites() { task_list_test_suite(); task_test_suite(); task_context_test_suite(); + task_primitives_test_suite(); + scheduler_test_suite(); } diff --git a/src/tests/task/mod.rs b/src/tests/task/mod.rs index a11cbe5..a476400 100644 --- a/src/tests/task/mod.rs +++ b/src/tests/task/mod.rs @@ -5,6 +5,7 @@ use crate::{ }; pub mod list; +pub mod primitives; /// This function is only used to create task for testing purpose. /// This must never be used in other cases diff --git a/src/tests/task/primitives.rs b/src/tests/task/primitives.rs new file mode 100644 index 0000000..0dfe2d5 --- /dev/null +++ b/src/tests/task/primitives.rs @@ -0,0 +1,153 @@ +use crate::{ + arch::{ + helpers::current_cpu_core, + traps::{enable_interrupts, handler::trap_handler, trap_frame::TrapFrame}, + }, + config::TICK_SAFETY_DURATION, + kprint, + ktime::set_ktime_seconds, + scheduler::{BLOCKED_QUEUE, RUN_QUEUE, RUN_QUEUE_BITMAP}, + task::{ + CURRENT_TASK_PID, TASK_HANDLER, + list::task_list_get_task_by_pid, + primitives::{delay, sleep}, + task_context_switch, task_create, + }, + test_failed, test_info, + tests::{TEST_MANAGER, TestBehavior, TestCase, TestSuite, TestSuiteBehavior}, +}; +use core::ptr; + +fn task_fn() -> ! { + let mut i: usize = 0; + loop { + kprint!("delay\n"); + delay(1000); + if i >= 8 { + // Exit Qemu + unsafe { ptr::write_volatile(0x100000 as *mut u32, 0x5555) }; + } + i += 1; + } +} + +fn task_sleep_fn() -> ! { + loop { + unsafe { + sleep(2); + } + } +} + +fn task_testing_sleep() -> ! { + let cause: usize = 2147483655; + // Random mepc + // TODO: improve mepc security in trap handler + let mepc: usize = 125696; + let mut trap_frame = TrapFrame::init(); + unsafe { trap_handler(mepc, 0, cause, 0, 0, &mut trap_frame) }; + let core: usize = current_cpu_core(); + // Blocked queue should have been updated, checking it + #[allow(static_mut_refs)] + let current_blocked_queue = unsafe { &mut BLOCKED_QUEUE[core] }; + #[allow(static_mut_refs)] + let current_run_queue = unsafe { &mut RUN_QUEUE[core] }; + if current_run_queue[1].size() != 0 { + test_failed!( + "The run queue should be empty, got: {}", + current_run_queue[1].size() + ); + // Use infinite loop to make the CI crash from timeout. Can't return test failed from + // here. + loop {} + } + if current_blocked_queue.get_count() != 1 { + test_failed!( + "The block queue should have 1 task in it, got: {}", + current_blocked_queue.get_count() + ); + // Use infinite loop to make the CI crash from timeout. Can't return test failed from + // here. + loop {} + } + unsafe { trap_handler(mepc, 0, cause, 0, 0, &mut trap_frame) }; + if current_blocked_queue.get_count() != 0 { + test_failed!( + "The block queue should be empty, got: {}", + current_blocked_queue.get_count() + ); + // Use infinite loop to make the CI crash from timeout. Can't return test failed from + // here. + loop {} + } + if current_run_queue[1].size() != 1 { + test_failed!( + "The run queue should have 1 task in it, got: {}", + current_run_queue[1].size() + ); + // Use infinite loop to make the CI crash from timeout. Can't return test failed from + // here. + loop {} + } + test_info!("Invariant from sleep and blocked queue successfully respected. Exit qemu..."); + unsafe { ptr::write_volatile(0x100000 as *mut u32, 0x5555) }; + // Check with condition the invariant, blocked queue updated etc + // Recall trap handler, then check that the block queue is empty. + loop {} +} + +fn test_task_primitives_delay() -> u8 { + task_create("Test delay", task_fn, 1, 0x1000); + unsafe { CURRENT_TASK_PID = 2 }; + let mut task = task_list_get_task_by_pid(unsafe { CURRENT_TASK_PID }); + unsafe { TASK_HANDLER = *task.as_mut().unwrap() }; + test_info!( + "The next output should be the task 'Test delay' printing an integer. The final output should be: 'delay: '" + ); + set_ktime_seconds(TICK_SAFETY_DURATION); + enable_interrupts(); + task_context_switch(task.unwrap()); + 0 +} + +fn test_task_primitives_sleep() -> u8 { + // pid 2 + task_create("Test sleep", task_sleep_fn, 1, 0x1000); + // pid 3 + task_create("Test sleep invariants", task_testing_sleep, 1, 0x1000); + unsafe { CURRENT_TASK_PID = 2 }; + let mut task = task_list_get_task_by_pid(unsafe { CURRENT_TASK_PID }); + unsafe { TASK_HANDLER = *task.as_mut().unwrap() }; + #[allow(static_mut_refs)] + unsafe { + // Access the queue and bitmap from CPU core 0 + // run queue priority 1 + RUN_QUEUE[0][1].push(3); + RUN_QUEUE_BITMAP[0].set_bit(1); + } + task_context_switch(task.unwrap()); + 0 +} + +pub fn task_primitives_test_suite() { + const TASK_PRIMITIVES_TEST_SUITE: TestSuite = TestSuite { + tests: &[ + TestCase::init( + "Task primitive delay", + test_task_primitives_delay, + TestBehavior::Skipped, + ), + TestCase::init( + "Task primitive sleep", + test_task_primitives_sleep, + TestBehavior::Skipped, + ), + ], + name: "Task primitives", + behavior: TestSuiteBehavior::Default, + }; + #[allow(static_mut_refs)] + unsafe { + TEST_MANAGER.add_suite(&TASK_PRIMITIVES_TEST_SUITE) + }; +}