diff --git a/arch/riscv/boot.c b/arch/riscv/boot.c index ef025de..a1866a1 100644 --- a/arch/riscv/boot.c +++ b/arch/riscv/boot.c @@ -14,6 +14,8 @@ extern uint32_t _gp, _stack, _end; extern uint32_t _sbss, _ebss; +#define STACK_SIZE_PER_HART 524288 + /* C entry points */ void main(void); void do_trap(uint32_t cause, uint32_t epc); @@ -29,6 +31,12 @@ __attribute__((naked, section(".text.prologue"))) void _entry(void) /* Initialize Global Pointer (gp) and Stack Pointer (sp) */ "la gp, _gp\n" "la sp, _stack\n" + /* Set up stack for each hart */ + "csrr t0, mhartid\n" /* t0 = hartid */ + "la t1, _stack_top\n" /* t1 = base address of full stack region (top) */ + "li t2, %2\n" /* t2 = per-hart stack size */ + "mul t0, t0, t2\n" /* t0 = hartid * STACK_SIZE_PER_HART */ + "sub sp, t1, t0\n" /* sp = _stack_top - hartid * stack_size */ /* Initialize Thread Pointer (tp). The ABI requires tp to point to * a 64-byte aligned memory region for thread-local storage. Here, we @@ -62,10 +70,6 @@ __attribute__((naked, section(".text.prologue"))) void _entry(void) "csrw mideleg, zero\n" /* No interrupt delegation to S-mode */ "csrw medeleg, zero\n" /* No exception delegation to S-mode */ - /* Park secondary harts (cores) - only hart 0 continues */ - "csrr t0, mhartid\n" - "bnez t0, .Lpark_hart\n" - /* Set the machine trap vector (mtvec) to point to our ISR */ "la t0, _isr\n" "csrw mtvec, t0\n" @@ -79,17 +83,14 @@ __attribute__((naked, section(".text.prologue"))) void _entry(void) "csrw mie, t0\n" /* Jump to the C-level main function */ + "csrr a0, mhartid\n" "call main\n" /* If main() ever returns, it is a fatal error */ "call hal_panic\n" - ".Lpark_hart:\n" - "wfi\n" - "j .Lpark_hart\n" - : /* no outputs */ - : "i"(MSTATUS_MPP_MACH), "i"(MIE_MEIE) + : "i"(MSTATUS_MPP_MACH), "i"(MIE_MEIE), "i"(STACK_SIZE_PER_HART) : "memory"); } diff --git a/arch/riscv/build.mk b/arch/riscv/build.mk index 2b00dca..43c8617 100644 --- a/arch/riscv/build.mk +++ b/arch/riscv/build.mk @@ -17,10 +17,10 @@ DEFINES := -DF_CPU=$(F_CLK) \ -DF_TIMER=$(F_TICK) \ -include config.h -ASFLAGS = -march=rv32imzicsr -mabi=ilp32 +ASFLAGS = -march=rv32imzaicsr -mabi=ilp32 CFLAGS += -Wall -Wextra -Wshadow -Wno-unused-parameter -Werror CFLAGS += -O2 -std=gnu99 -CFLAGS += -march=rv32imzicsr -mabi=ilp32 +CFLAGS += -march=rv32imazicsr -mabi=ilp32 CFLAGS += -mstrict-align -ffreestanding -nostdlib -fomit-frame-pointer CFLAGS += $(INC_DIRS) $(DEFINES) -fdata-sections -ffunction-sections ARFLAGS = r @@ -49,4 +49,4 @@ $(BUILD_KERNEL_DIR)/%.o: $(ARCH_DIR)/%.c | $(BUILD_DIR) run: @$(call notice, Ready to launch Linmo kernel + application.) - $(Q)qemu-system-riscv32 -machine virt -nographic -bios none -kernel $(BUILD_DIR)/image.elf -nographic + $(Q)qemu-system-riscv32 -smp 4 -machine virt -nographic -bios none -kernel $(BUILD_DIR)/image.elf -nographic diff --git a/arch/riscv/hal.c b/arch/riscv/hal.c index 35703a8..410e575 100644 --- a/arch/riscv/hal.c +++ b/arch/riscv/hal.c @@ -136,11 +136,14 @@ static inline uint64_t mtime_r(void) /* Safely read the 64-bit 'mtimecmp' register */ static inline uint64_t mtimecmp_r(void) { - uint32_t hi, lo; + uint32_t hi, lo, hartid; + + hartid = read_csr(mhartid); + do { - hi = MTIMECMP_H; - lo = MTIMECMP_L; - } while (hi != MTIMECMP_H); + hi = * (volatile uint32_t *) (CLINT_BASE + 0x4004u + 8 * hartid); + lo = * (volatile uint32_t *) (CLINT_BASE + 0x4000u + 8 * hartid); + } while (hi != *(volatile uint32_t *) (CLINT_BASE + 0x4004u + 8 * hartid)); return CT64(hi, lo); } @@ -157,10 +160,11 @@ static inline void mtimecmp_w(uint64_t val) /* Disable timer interrupts during the critical section */ uint32_t old_mie = read_csr(mie); write_csr(mie, old_mie & ~MIE_MTIE); + uint32_t hartid = read_csr(mhartid); - MTIMECMP_L = 0xFFFFFFFF; /* Set to maximum to prevent spurious interrupt */ - MTIMECMP_H = (uint32_t) (val >> 32); /* Set high word */ - MTIMECMP_L = (uint32_t) val; /* Set low word to final value */ + * (volatile uint32_t *) (CLINT_BASE + 0x4000u + 8 * hartid) = 0xFFFFFFFF; /* Set to maximum to prevent spurious interrupt */ + * (volatile uint32_t *) (CLINT_BASE + 0x4004u + 8 * hartid) = (uint32_t) (val >> 32); /* Set high word */ + * (volatile uint32_t *) (CLINT_BASE + 0x4000u + 8 * hartid) = (uint32_t) val; /* Set low word to final value */ /* Re-enable timer interrupts if they were previously enabled */ write_csr(mie, old_mie); @@ -320,7 +324,7 @@ void hal_timer_disable(void) */ void hal_interrupt_tick(void) { - tcb_t *task = kcb->task_current->data; + tcb_t *task = get_task_current(kcb)->data; if (unlikely(!task)) hal_panic(); /* Fatal error - invalid task state */ diff --git a/arch/riscv/spinlock.h b/arch/riscv/spinlock.h new file mode 100644 index 0000000..e05ca5a --- /dev/null +++ b/arch/riscv/spinlock.h @@ -0,0 +1,74 @@ +#pragma once + +#include + +/* Spinlock structure */ +typedef struct { + volatile uint32_t lock; +} spinlock_t; + +#define SPINLOCK_INITIALIZER { 0 } + +/* Save and restore interrupt state */ +static inline uint32_t intr_save(void) +{ + uint32_t mstatus_val = read_csr(mstatus); + _di(); + return mstatus_val; +} + +static inline void intr_restore(uint32_t mstatus_val) +{ + write_csr(mstatus, mstatus_val); +} + +/* CPU relax */ +static inline void cpu_relax(void) +{ + asm volatile("nop"); +} + +/* Basic spinlock API */ +static inline void spin_lock(spinlock_t *lock) +{ + while (__sync_lock_test_and_set(&lock->lock, 1)) { + while (lock->lock) + cpu_relax(); + } +} + +static inline void spin_unlock(spinlock_t *lock) +{ + __sync_lock_release(&lock->lock); +} + +static inline int spin_trylock(spinlock_t *lock) +{ + return (__sync_lock_test_and_set(&lock->lock, 1) == 0); +} + +/* IRQ-safe spinlock (no state saving) */ +static inline void spin_lock_irq(spinlock_t *lock) +{ + _di(); + spin_lock(lock); +} + +static inline void spin_unlock_irq(spinlock_t *lock) +{ + spin_unlock(lock); + _ei(); +} + +/* IRQ-safe spinlock (with state saving) */ +static inline void spin_lock_irqsave(spinlock_t *lock, uint32_t *flags) +{ + *flags = intr_save(); + spin_lock(lock); +} + +static inline void spin_unlock_irqrestore(spinlock_t *lock, uint32_t flags) +{ + spin_unlock(lock); + intr_restore(flags); +} diff --git a/include/sys/task.h b/include/sys/task.h index 33d0b60..a918393 100644 --- a/include/sys/task.h +++ b/include/sys/task.h @@ -13,6 +13,7 @@ */ #include +#include #include #include @@ -84,6 +85,8 @@ typedef struct tcb { void *rt_prio; /* Opaque pointer for custom real-time scheduler hook */ } tcb_t; +#define MAX_HARTS 8 + /* Kernel Control Block (KCB) * * Singleton structure holding global kernel state, including task lists, @@ -92,7 +95,7 @@ typedef struct tcb { typedef struct { /* Task Management */ list_t *tasks; /* Master list of all tasks (nodes contain tcb_t) */ - list_node_t *task_current; /* Node of currently running task */ + list_node_t *task_current[MAX_HARTS]; /* Node of currently running task */ jmp_buf context; /* Saved context of main kernel thread before scheduling */ uint16_t next_tid; /* Monotonically increasing ID for next new task */ uint16_t task_count; /* Cached count of active tasks for quick access */ @@ -104,6 +107,9 @@ typedef struct { /* Timer Management */ list_t *timer_list; /* List of active software timers */ volatile uint32_t ticks; /* Global system tick, incremented by timer */ + /* Timers */ + + spinlock_t kcb_lock; } kcb_t; /* Global pointer to the singleton Kernel Control Block */ @@ -117,49 +123,21 @@ extern kcb_t *kcb; #define TASK_CACHE_SIZE \ 4 /* Task lookup cache size for frequently accessed tasks */ -/* Critical Section Macros - * - * Two levels of protection are provided: - * 1. CRITICAL_* macros disable ALL maskable interrupts globally - * 2. NOSCHED_* macros disable ONLY the scheduler timer interrupt - */ +/* Core Kernel and Task Management API */ -/* Disable/enable ALL maskable interrupts globally. - * Provides strongest protection against concurrency from both other tasks - * and all ISRs. Use when modifying data shared with any ISR. - * WARNING: Increases interrupt latency - use NOSCHED macros if protection - * is only needed against task preemption. - */ -#define CRITICAL_ENTER() \ - do { \ - if (kcb->preemptive) \ - _di(); \ - } while (0) - -#define CRITICAL_LEAVE() \ - do { \ - if (kcb->preemptive) \ - _ei(); \ - } while (0) - -/* Disable/enable ONLY the scheduler timer interrupt. - * Lighter-weight critical section that prevents task preemption but allows - * other hardware interrupts (e.g., UART) to be serviced, minimizing latency. - * Use when protecting data shared between tasks. - */ -#define NOSCHED_ENTER() \ - do { \ - if (kcb->preemptive) \ - hal_timer_disable(); \ - } while (0) - -#define NOSCHED_LEAVE() \ - do { \ - if (kcb->preemptive) \ - hal_timer_enable(); \ - } while (0) +static inline list_node_t *get_task_current() +{ + const uint32_t mhartid = read_csr(mhartid); -/* Core Kernel and Task Management API */ + return kcb->task_current[mhartid]; +} + +static inline void set_task_current(list_node_t *task) +{ + const uint32_t mhartid = read_csr(mhartid); + + kcb->task_current[mhartid] = task; +} /* System Control Functions */ diff --git a/kernel/main.c b/kernel/main.c index efa46ff..63f3a39 100644 --- a/kernel/main.c +++ b/kernel/main.c @@ -1,9 +1,19 @@ #include +#include #include #include #include "private/error.h" +static void idle_task(void) +{ + while (1) + mo_task_wfi(); +} + +static volatile bool finish = false; +static spinlock_t finish_lock = SPINLOCK_INITIALIZER; + /* C-level entry point for the kernel. * * This function is called from the boot code ('_entry'). It is responsible for @@ -12,27 +22,44 @@ * * Under normal operation, this function never returns. */ -int32_t main(void) +int32_t main(int32_t hartid) { /* Initialize hardware abstraction layer and memory heap. */ hal_hardware_init(); - printf("Linmo kernel is starting...\n"); + if (hartid == 0) { + printf("Linmo kernel is starting...\n"); + + mo_heap_init((void *) &_heap_start, (size_t) &_heap_size); + printf("Heap initialized, %u bytes available\n", + (unsigned int) (size_t) &_heap_size); - mo_heap_init((void *) &_heap_start, (size_t) &_heap_size); - printf("Heap initialized, %u bytes available\n", - (unsigned int) (size_t) &_heap_size); + /* Call the application's main entry point to create initial tasks. */ + kcb->preemptive = (bool) app_main(); + printf("Scheduler mode: %s\n", + kcb->preemptive ? "Preemptive" : "Cooperative"); - /* Call the application's main entry point to create initial tasks. */ - kcb->preemptive = (bool) app_main(); - printf("Scheduler mode: %s\n", - kcb->preemptive ? "Preemptive" : "Cooperative"); + spin_lock(&finish_lock); + finish = true; + spin_unlock(&finish_lock); + } + + /* Make sure hardware initialize before running the first task. */ + while (1) { + spin_lock(&finish_lock); + if (finish) + break; + spin_unlock(&finish_lock); + } + spin_unlock(&finish_lock); + + mo_task_spawn(idle_task, DEFAULT_STACK_SIZE); /* Verify that the application created at least one task. - * If 'kcb->task_current' is still NULL, it means mo_task_spawn was never + * If 'get_task_current()' is still NULL, it means mo_task_spawn was never * successfully called. */ - if (!kcb->task_current) + if (!get_task_current()) panic(ERR_NO_TASKS); /* Save the kernel's context. This is a formality to establish a base @@ -40,14 +67,19 @@ int32_t main(void) */ setjmp(kcb->context); + spin_lock(&finish_lock); + /* Launch the first task. - * 'kcb->task_current' was set by the first call to mo_task_spawn. + * 'get_task_current()' was set by the first call to mo_task_spawn. * This function transfers control and does not return. */ - tcb_t *first_task = kcb->task_current->data; + + tcb_t *first_task = get_task_current()->data; if (!first_task) panic(ERR_NO_TASKS); + spin_unlock(&finish_lock); + hal_dispatch_init(first_task->context); /* This line should be unreachable. */ diff --git a/kernel/mqueue.c b/kernel/mqueue.c index afc1b7c..2ae391e 100644 --- a/kernel/mqueue.c +++ b/kernel/mqueue.c @@ -6,9 +6,14 @@ #include #include +#include + #include "private/error.h" #include "private/utils.h" +static spinlock_t queue_lock = SPINLOCK_INITIALIZER; +static uint32_t queue_flags = 0; + mq_t *mo_mq_create(uint16_t max_items) { mq_t *mq = malloc(sizeof *mq); @@ -31,15 +36,15 @@ int32_t mo_mq_destroy(mq_t *mq) if (unlikely(!mq->q)) return ERR_FAIL; /* Invalid mqueue state */ - CRITICAL_ENTER(); + spin_lock_irqsave(&queue_lock, &queue_flags); - if (unlikely(queue_count(mq->q) != 0)) { /* refuse to destroy non-empty q */ - CRITICAL_LEAVE(); + if (queue_count(mq->q) != 0) { /* refuse to destroy non-empty q */ + spin_unlock_irqrestore(&queue_lock, queue_flags); return ERR_MQ_NOTEMPTY; } /* Safe to destroy now - no need to hold critical section */ - CRITICAL_LEAVE(); + spin_unlock_irqrestore(&queue_lock, queue_flags); queue_destroy(mq->q); free(mq); @@ -54,9 +59,9 @@ int32_t mo_mq_enqueue(mq_t *mq, message_t *msg) int32_t rc; - CRITICAL_ENTER(); + spin_lock_irqsave(&queue_lock, &queue_flags); rc = queue_enqueue(mq->q, msg); - CRITICAL_LEAVE(); + spin_unlock_irqrestore(&queue_lock, queue_flags); return rc; /* 0 on success, −1 on full */ } @@ -69,9 +74,9 @@ message_t *mo_mq_dequeue(mq_t *mq) message_t *msg; - CRITICAL_ENTER(); + spin_lock_irqsave(&queue_lock, &queue_flags); msg = queue_dequeue(mq->q); - CRITICAL_LEAVE(); + spin_unlock_irqrestore(&queue_lock, queue_flags); return msg; /* NULL when queue is empty */ } @@ -84,9 +89,9 @@ message_t *mo_mq_peek(mq_t *mq) message_t *msg; - CRITICAL_ENTER(); + spin_lock_irqsave(&queue_lock, &queue_flags); msg = queue_peek(mq->q); - CRITICAL_LEAVE(); + spin_unlock_irqrestore(&queue_lock, queue_flags); return msg; /* NULL when queue is empty */ } diff --git a/kernel/mutex.c b/kernel/mutex.c index 52a16a7..6554bfc 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c @@ -4,6 +4,7 @@ * that are independent of the semaphore module. */ +#include #include #include #include @@ -11,6 +12,9 @@ #include "private/error.h" #include "private/utils.h" +static spinlock_t mutex_lock = SPINLOCK_INITIALIZER; +static uint32_t mutex_flags = 0; + /* Validate mutex pointer and structure integrity */ static inline bool mutex_is_valid(const mutex_t *m) { @@ -45,11 +49,11 @@ static inline void cond_invalidate(cond_t *c) */ static bool remove_self_from_waiters(list_t *waiters) { - if (unlikely(!waiters || !kcb || !kcb->task_current || - !kcb->task_current->data)) + if (unlikely(!waiters || !kcb || !get_task_current() || + !get_task_current()->data)) return false; - tcb_t *self = kcb->task_current->data; + tcb_t *self = get_task_current()->data; /* Search for and remove self from waiters list */ list_node_t *curr = waiters->head->next; @@ -67,11 +71,11 @@ static bool remove_self_from_waiters(list_t *waiters) /* Atomic block operation with enhanced error checking */ static void mutex_block_atomic(list_t *waiters) { - if (unlikely(!waiters || !kcb || !kcb->task_current || - !kcb->task_current->data)) + if (unlikely(!waiters || !kcb || !get_task_current() || + !get_task_current()->data)) panic(ERR_SEM_OPERATION); - tcb_t *self = kcb->task_current->data; + tcb_t *self = get_task_current()->data; /* Add to waiters list */ if (unlikely(!list_pushback(waiters, self))) @@ -112,17 +116,17 @@ int32_t mo_mutex_destroy(mutex_t *m) if (unlikely(!mutex_is_valid(m))) return ERR_FAIL; - NOSCHED_ENTER(); + spin_lock_irqsave(&mutex_lock, &mutex_flags); /* Check if any tasks are waiting */ if (unlikely(!list_is_empty(m->waiters))) { - NOSCHED_LEAVE(); + spin_unlock_irqrestore(&mutex_lock, mutex_flags); return ERR_TASK_BUSY; } /* Check if mutex is still owned */ if (unlikely(m->owner_tid != 0)) { - NOSCHED_LEAVE(); + spin_unlock_irqrestore(&mutex_lock, mutex_flags); return ERR_TASK_BUSY; } @@ -132,7 +136,7 @@ int32_t mo_mutex_destroy(mutex_t *m) m->waiters = NULL; m->owner_tid = 0; - NOSCHED_LEAVE(); + spin_unlock_irqrestore(&mutex_lock, mutex_flags); /* Clean up resources outside critical section */ list_destroy(waiters); @@ -146,18 +150,18 @@ int32_t mo_mutex_lock(mutex_t *m) uint16_t self_tid = mo_task_id(); - NOSCHED_ENTER(); + spin_lock_irqsave(&mutex_lock, &mutex_flags); /* Non-recursive: reject if caller already owns it */ if (unlikely(m->owner_tid == self_tid)) { - NOSCHED_LEAVE(); + spin_unlock_irqrestore(&mutex_lock, mutex_flags); return ERR_TASK_BUSY; } /* Fast path: mutex is free, acquire immediately */ if (likely(m->owner_tid == 0)) { m->owner_tid = self_tid; - NOSCHED_LEAVE(); + spin_unlock_irqrestore(&mutex_lock, mutex_flags); return ERR_OK; } @@ -177,7 +181,7 @@ int32_t mo_mutex_trylock(mutex_t *m) uint16_t self_tid = mo_task_id(); int32_t result = ERR_TASK_BUSY; - NOSCHED_ENTER(); + spin_lock_irqsave(&mutex_lock, &mutex_flags); if (unlikely(m->owner_tid == self_tid)) { /* Already owned by caller (non-recursive) */ @@ -189,7 +193,7 @@ int32_t mo_mutex_trylock(mutex_t *m) } /* else: owned by someone else, return ERR_TASK_BUSY */ - NOSCHED_LEAVE(); + spin_unlock_irqrestore(&mutex_lock, mutex_flags); return result; } @@ -203,25 +207,25 @@ int32_t mo_mutex_timedlock(mutex_t *m, uint32_t ticks) uint16_t self_tid = mo_task_id(); - NOSCHED_ENTER(); + spin_lock_irqsave(&mutex_lock, &mutex_flags); /* Non-recursive check */ if (unlikely(m->owner_tid == self_tid)) { - NOSCHED_LEAVE(); + spin_unlock_irqrestore(&mutex_lock, mutex_flags); return ERR_TASK_BUSY; } /* Fast path: mutex is free */ if (m->owner_tid == 0) { m->owner_tid = self_tid; - NOSCHED_LEAVE(); + spin_unlock_irqrestore(&mutex_lock, mutex_flags); return ERR_OK; } /* Slow path: must block with timeout using delay mechanism */ - tcb_t *self = kcb->task_current->data; + tcb_t *self = get_task_current()->data; if (unlikely(!list_pushback(m->waiters, self))) { - NOSCHED_LEAVE(); + spin_unlock_irqrestore(&mutex_lock, mutex_flags); panic(ERR_SEM_OPERATION); } @@ -229,7 +233,7 @@ int32_t mo_mutex_timedlock(mutex_t *m, uint32_t ticks) self->delay = ticks; self->state = TASK_BLOCKED; - NOSCHED_LEAVE(); + spin_unlock_irqrestore(&mutex_lock, mutex_flags); /* Yield and let the scheduler handle timeout via delay mechanism */ mo_task_yield(); @@ -237,7 +241,8 @@ int32_t mo_mutex_timedlock(mutex_t *m, uint32_t ticks) /* Check result after waking up */ int32_t result; - NOSCHED_ENTER(); + spin_lock_irqsave(&mutex_lock, &mutex_flags); + if (self->state == TASK_BLOCKED) { /* We woke up due to timeout, not mutex unlock */ if (remove_self_from_waiters(m->waiters)) { @@ -252,7 +257,7 @@ int32_t mo_mutex_timedlock(mutex_t *m, uint32_t ticks) /* We were woken by mutex unlock - check ownership */ result = (m->owner_tid == self_tid) ? ERR_OK : ERR_FAIL; } - NOSCHED_LEAVE(); + spin_unlock_irqrestore(&mutex_lock, mutex_flags); return result; } @@ -264,11 +269,11 @@ int32_t mo_mutex_unlock(mutex_t *m) uint16_t self_tid = mo_task_id(); - NOSCHED_ENTER(); + spin_lock_irqsave(&mutex_lock, &mutex_flags); /* Verify caller owns the mutex */ if (unlikely(m->owner_tid != self_tid)) { - NOSCHED_LEAVE(); + spin_unlock_irqrestore(&mutex_lock, mutex_flags); return ERR_NOT_OWNER; } @@ -296,7 +301,7 @@ int32_t mo_mutex_unlock(mutex_t *m) } } - NOSCHED_LEAVE(); + spin_unlock_irqrestore(&mutex_lock, mutex_flags); return ERR_OK; } @@ -314,9 +319,9 @@ int32_t mo_mutex_waiting_count(mutex_t *m) return -1; int32_t count; - NOSCHED_ENTER(); + spin_lock_irqsave(&mutex_lock, &mutex_flags); count = m->waiters ? (int32_t) m->waiters->length : 0; - NOSCHED_LEAVE(); + spin_unlock_irqrestore(&mutex_lock, mutex_flags); return count; } @@ -348,11 +353,11 @@ int32_t mo_cond_destroy(cond_t *c) if (unlikely(!cond_is_valid(c))) return ERR_FAIL; - NOSCHED_ENTER(); + spin_lock_irqsave(&mutex_lock, &mutex_flags); /* Check if any tasks are waiting */ if (unlikely(!list_is_empty(c->waiters))) { - NOSCHED_LEAVE(); + spin_unlock_irqrestore(&mutex_lock, mutex_flags); return ERR_TASK_BUSY; } @@ -361,7 +366,7 @@ int32_t mo_cond_destroy(cond_t *c) list_t *waiters = c->waiters; c->waiters = NULL; - NOSCHED_LEAVE(); + spin_unlock_irqrestore(&mutex_lock, mutex_flags); /* Clean up resources outside critical section */ list_destroy(waiters); @@ -379,25 +384,25 @@ int32_t mo_cond_wait(cond_t *c, mutex_t *m) if (unlikely(!mo_mutex_owned_by_current(m))) return ERR_NOT_OWNER; - tcb_t *self = kcb->task_current->data; + tcb_t *self = get_task_current()->data; /* Atomically add to wait list */ - NOSCHED_ENTER(); + spin_lock_irqsave(&mutex_lock, &mutex_flags); if (unlikely(!list_pushback(c->waiters, self))) { - NOSCHED_LEAVE(); + spin_unlock_irqrestore(&mutex_lock, mutex_flags); panic(ERR_SEM_OPERATION); } self->state = TASK_BLOCKED; - NOSCHED_LEAVE(); + spin_unlock_irqrestore(&mutex_lock, mutex_flags); /* Release mutex */ int32_t unlock_result = mo_mutex_unlock(m); if (unlikely(unlock_result != ERR_OK)) { /* Failed to unlock - remove from wait list and restore state */ - NOSCHED_ENTER(); + spin_lock_irqsave(&mutex_lock, &mutex_flags); remove_self_from_waiters(c->waiters); self->state = TASK_READY; - NOSCHED_LEAVE(); + spin_unlock_irqrestore(&mutex_lock, mutex_flags); return unlock_result; } @@ -421,27 +426,27 @@ int32_t mo_cond_timedwait(cond_t *c, mutex_t *m, uint32_t ticks) return ERR_TIMEOUT; } - tcb_t *self = kcb->task_current->data; + tcb_t *self = get_task_current()->data; /* Atomically add to wait list with timeout */ - NOSCHED_ENTER(); + spin_lock_irqsave(&mutex_lock, &mutex_flags); if (unlikely(!list_pushback(c->waiters, self))) { - NOSCHED_LEAVE(); + spin_unlock_irqrestore(&mutex_lock, mutex_flags); panic(ERR_SEM_OPERATION); } self->delay = ticks; self->state = TASK_BLOCKED; - NOSCHED_LEAVE(); + spin_unlock_irqrestore(&mutex_lock, mutex_flags); /* Release mutex */ int32_t unlock_result = mo_mutex_unlock(m); if (unlikely(unlock_result != ERR_OK)) { /* Failed to unlock - cleanup and restore */ - NOSCHED_ENTER(); + spin_lock_irqsave(&mutex_lock, &mutex_flags); remove_self_from_waiters(c->waiters); self->state = TASK_READY; self->delay = 0; - NOSCHED_LEAVE(); + spin_unlock_irqrestore(&mutex_lock, mutex_flags); return unlock_result; } @@ -450,7 +455,7 @@ int32_t mo_cond_timedwait(cond_t *c, mutex_t *m, uint32_t ticks) /* Determine why we woke up */ int32_t wait_status; - NOSCHED_ENTER(); + spin_lock_irqsave(&mutex_lock, &mutex_flags); if (self->state == TASK_BLOCKED) { /* Timeout occurred - remove from wait list */ @@ -463,7 +468,7 @@ int32_t mo_cond_timedwait(cond_t *c, mutex_t *m, uint32_t ticks) wait_status = ERR_OK; } - NOSCHED_LEAVE(); + spin_unlock_irqrestore(&mutex_lock, mutex_flags); /* Re-acquire mutex regardless of timeout status */ int32_t lock_result = mo_mutex_lock(m); @@ -477,7 +482,7 @@ int32_t mo_cond_signal(cond_t *c) if (unlikely(!cond_is_valid(c))) return ERR_FAIL; - NOSCHED_ENTER(); + spin_lock_irqsave(&mutex_lock, &mutex_flags); if (!list_is_empty(c->waiters)) { tcb_t *waiter = (tcb_t *) list_pop(c->waiters); @@ -494,7 +499,7 @@ int32_t mo_cond_signal(cond_t *c) } } - NOSCHED_LEAVE(); + spin_unlock_irqrestore(&mutex_lock, mutex_flags); return ERR_OK; } @@ -503,7 +508,7 @@ int32_t mo_cond_broadcast(cond_t *c) if (unlikely(!cond_is_valid(c))) return ERR_FAIL; - NOSCHED_ENTER(); + spin_lock_irqsave(&mutex_lock, &mutex_flags); /* Wake all waiting tasks */ while (!list_is_empty(c->waiters)) { @@ -521,7 +526,7 @@ int32_t mo_cond_broadcast(cond_t *c) } } - NOSCHED_LEAVE(); + spin_unlock_irqrestore(&mutex_lock, mutex_flags); return ERR_OK; } @@ -531,9 +536,9 @@ int32_t mo_cond_waiting_count(cond_t *c) return -1; int32_t count; - NOSCHED_ENTER(); + spin_lock_irqsave(&mutex_lock, &mutex_flags); count = c->waiters ? (int32_t) c->waiters->length : 0; - NOSCHED_LEAVE(); + spin_unlock_irqrestore(&mutex_lock, mutex_flags); return count; } diff --git a/kernel/pipe.c b/kernel/pipe.c index 169c6f9..4eb9a8f 100644 --- a/kernel/pipe.c +++ b/kernel/pipe.c @@ -1,4 +1,6 @@ #include +#include +#include #include #include @@ -17,8 +19,11 @@ static inline bool pipe_is_valid(const pipe_t *p) p->head <= p->mask && p->tail <= p->mask; } +static spinlock_t pipe_lock = SPINLOCK_INITIALIZER; +static uint32_t pipe_flags = 0; + /* empty/full checks using the used counter */ -static inline bool pipe_is_empty(const pipe_t *p) +static inline int pipe_is_empty(const pipe_t *p) { return p->used == 0; } @@ -171,9 +176,9 @@ void mo_pipe_flush(pipe_t *p) if (unlikely(!pipe_is_valid(p))) return; - CRITICAL_ENTER(); + spin_lock_irqsave(&pipe_lock, &pipe_flags); p->head = p->tail = p->used = 0; - CRITICAL_LEAVE(); + spin_unlock_irqrestore(&pipe_lock, pipe_flags); } int32_t mo_pipe_size(pipe_t *p) @@ -198,9 +203,9 @@ int32_t mo_pipe_free_space(pipe_t *p) if (unlikely(!pipe_is_valid(p))) return -1; - CRITICAL_ENTER(); + spin_lock_irqsave(&pipe_lock, &pipe_flags); int32_t free_space = (int32_t) pipe_free_space_internal(p); - CRITICAL_LEAVE(); + spin_unlock_irqrestore(&pipe_lock, pipe_flags); return free_space; } @@ -209,13 +214,13 @@ int32_t mo_pipe_free_space(pipe_t *p) static void pipe_wait_until_readable(pipe_t *p) { while (1) { - CRITICAL_ENTER(); + spin_lock_irqsave(&pipe_lock, &pipe_flags); if (!pipe_is_empty(p)) { - CRITICAL_LEAVE(); + spin_unlock_irqrestore(&pipe_lock, pipe_flags); return; } /* Nothing to read – drop critical section and yield CPU */ - CRITICAL_LEAVE(); + spin_unlock_irqrestore(&pipe_lock, pipe_flags); mo_task_wfi(); /* Yield CPU without blocking task state */ } } @@ -223,13 +228,13 @@ static void pipe_wait_until_readable(pipe_t *p) static void pipe_wait_until_writable(pipe_t *p) { while (1) { - CRITICAL_ENTER(); + spin_lock_irqsave(&pipe_lock, &pipe_flags); if (!pipe_is_full(p)) { - CRITICAL_LEAVE(); + spin_unlock_irqrestore(&pipe_lock, pipe_flags); return; } /* Buffer full – yield until space is available */ - CRITICAL_LEAVE(); + spin_unlock_irqrestore(&pipe_lock, pipe_flags); mo_task_wfi(); /* Yield CPU without blocking task state */ } } @@ -247,9 +252,9 @@ int32_t mo_pipe_read(pipe_t *p, char *dst, uint16_t len) pipe_wait_until_readable(p); /* Read as much as possible in one critical section */ - CRITICAL_ENTER(); + spin_lock_irqsave(&pipe_lock, &pipe_flags); uint16_t chunk = pipe_bulk_read(p, dst + bytes_read, len - bytes_read); - CRITICAL_LEAVE(); + spin_unlock_irqrestore(&pipe_lock, pipe_flags); bytes_read += chunk; @@ -272,10 +277,10 @@ int32_t mo_pipe_write(pipe_t *p, const char *src, uint16_t len) pipe_wait_until_writable(p); /* Write as much as possible in one critical section */ - CRITICAL_ENTER(); + spin_lock_irqsave(&pipe_lock, &pipe_flags); uint16_t chunk = pipe_bulk_write(p, src + bytes_written, len - bytes_written); - CRITICAL_LEAVE(); + spin_unlock_irqrestore(&pipe_lock, pipe_flags); bytes_written += chunk; @@ -294,9 +299,9 @@ int32_t mo_pipe_nbread(pipe_t *p, char *dst, uint16_t len) uint16_t bytes_read; - CRITICAL_ENTER(); + spin_lock_irqsave(&pipe_lock, &pipe_flags); bytes_read = pipe_bulk_read(p, dst, len); - CRITICAL_LEAVE(); + spin_unlock_irqrestore(&pipe_lock, pipe_flags); return (int32_t) bytes_read; } @@ -309,9 +314,9 @@ int32_t mo_pipe_nbwrite(pipe_t *p, const char *src, uint16_t len) uint16_t bytes_written; - CRITICAL_ENTER(); + spin_lock_irqsave(&pipe_lock, &pipe_flags); bytes_written = pipe_bulk_write(p, src, len); - CRITICAL_LEAVE(); + spin_unlock_irqrestore(&pipe_lock, pipe_flags); return (int32_t) bytes_written; } diff --git a/kernel/semaphore.c b/kernel/semaphore.c index 640e372..87d8dc3 100644 --- a/kernel/semaphore.c +++ b/kernel/semaphore.c @@ -8,6 +8,7 @@ */ #include +#include #include #include @@ -25,6 +26,9 @@ struct sem_t { /* Magic number for semaphore validation */ #define SEM_MAGIC 0x53454D00 /* "SEM\0" */ +static spinlock_t semaphore_lock = SPINLOCK_INITIALIZER; +static uint32_t semaphore_flags = 0; + static inline bool sem_is_valid(const sem_t *s) { return s && s->magic == SEM_MAGIC && s->wait_q && s->max_waiters > 0 && @@ -80,11 +84,11 @@ int32_t mo_sem_destroy(sem_t *s) if (unlikely(!sem_is_valid(s))) return ERR_FAIL; - NOSCHED_ENTER(); + spin_lock_irqsave(&semaphore_lock, &semaphore_flags); /* Check if any tasks are waiting - unsafe to destroy if so */ if (unlikely(queue_count(s->wait_q) > 0)) { - NOSCHED_LEAVE(); + spin_unlock_irqrestore(&semaphore_lock, semaphore_flags); return ERR_TASK_BUSY; } @@ -93,7 +97,7 @@ int32_t mo_sem_destroy(sem_t *s) queue_t *wait_q = s->wait_q; s->wait_q = NULL; - NOSCHED_LEAVE(); + spin_unlock_irqrestore(&semaphore_lock, semaphore_flags); /* Clean up resources outside critical section */ queue_destroy(wait_q); @@ -108,19 +112,19 @@ void mo_sem_wait(sem_t *s) panic(ERR_SEM_OPERATION); } - NOSCHED_ENTER(); + spin_lock_irqsave(&semaphore_lock, &semaphore_flags); /* Fast path: resource available and no waiters (preserves FIFO ordering) */ if (likely(s->count > 0 && queue_count(s->wait_q) == 0)) { s->count--; - NOSCHED_LEAVE(); + spin_unlock_irqrestore(&semaphore_lock, semaphore_flags); return; } /* Slow path: must wait for resource */ /* Verify wait queue has capacity before attempting to block */ if (unlikely(queue_count(s->wait_q) >= s->max_waiters)) { - NOSCHED_LEAVE(); + spin_unlock_irqrestore(&semaphore_lock, semaphore_flags); panic(ERR_SEM_OPERATION); /* Queue overflow - system error */ } @@ -145,7 +149,7 @@ int32_t mo_sem_trywait(sem_t *s) int32_t result = ERR_FAIL; - NOSCHED_ENTER(); + spin_lock_irqsave(&semaphore_lock, &semaphore_flags); /* Only succeed if resource available AND no waiters (preserves FIFO) */ if (s->count > 0 && queue_count(s->wait_q) == 0) { @@ -153,7 +157,7 @@ int32_t mo_sem_trywait(sem_t *s) result = ERR_OK; } - NOSCHED_LEAVE(); + spin_unlock_irqrestore(&semaphore_lock, semaphore_flags); return result; } @@ -167,7 +171,7 @@ void mo_sem_signal(sem_t *s) bool should_yield = false; tcb_t *awakened_task = NULL; - NOSCHED_ENTER(); + spin_lock_irqsave(&semaphore_lock, &semaphore_flags); /* Check if any tasks are waiting for resources */ if (queue_count(s->wait_q) > 0) { @@ -198,7 +202,7 @@ void mo_sem_signal(sem_t *s) */ } - NOSCHED_LEAVE(); + spin_unlock_irqrestore(&semaphore_lock, semaphore_flags); /* Yield outside critical section if we awakened a task. * This improves system responsiveness by allowing the awakened task to run @@ -228,9 +232,9 @@ int32_t mo_sem_waiting_count(sem_t *s) int32_t count; - NOSCHED_ENTER(); + spin_lock_irqsave(&semaphore_lock, &semaphore_flags); count = queue_count(s->wait_q); - NOSCHED_LEAVE(); + spin_unlock_irqrestore(&semaphore_lock, semaphore_flags); return count; } diff --git a/kernel/task.c b/kernel/task.c index 59ffdae..cff2391 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -6,6 +6,7 @@ */ #include +#include #include #include @@ -18,13 +19,14 @@ void _timer_tick_handler(void); /* Kernel-wide control block (KCB) */ static kcb_t kernel_state = { .tasks = NULL, - .task_current = NULL, + .task_current = {}, .rt_sched = noop_rtsched, .timer_list = NULL, /* Managed by timer.c, but stored here. */ .next_tid = 1, /* Start from 1 to avoid confusion with invalid ID 0 */ .task_count = 0, .ticks = 0, .preemptive = true, /* Default to preemptive mode */ + .kcb_lock = SPINLOCK_INITIALIZER, }; kcb_t *kcb = &kernel_state; @@ -106,6 +108,7 @@ static inline uint8_t extract_priority_level(uint16_t prio) return 4; /* Default to normal priority */ } } +static uint32_t task_flags = 0; static inline bool is_valid_task(tcb_t *task) { @@ -142,10 +145,10 @@ static void task_stack_check(void) if (!should_check) return; - if (unlikely(!kcb || !kcb->task_current || !kcb->task_current->data)) + if (unlikely(!kcb || !get_task_current() || !get_task_current()->data)) panic(ERR_STACK_CHECK); - tcb_t *self = kcb->task_current->data; + tcb_t *self = get_task_current()->data; if (unlikely(!is_valid_task(self))) panic(ERR_STACK_CHECK); @@ -363,10 +366,10 @@ void sched_dequeue_task(tcb_t *task) /* Handle time slice expiration for current task */ void sched_tick_current_task(void) { - if (unlikely(!kcb->task_current || !kcb->task_current->data)) + if (unlikely(!get_task_current() || !get_task_current()->data)) return; - tcb_t *current_task = kcb->task_current->data; + tcb_t *current_task = get_task_current()->data; /* Decrement time slice */ if (current_task->time_slice > 0) @@ -411,17 +414,17 @@ void sched_wakeup_task(tcb_t *task) */ uint16_t sched_select_next_task(void) { - if (unlikely(!kcb->task_current || !kcb->task_current->data)) + if (unlikely(!get_task_current() || !get_task_current()->data)) panic(ERR_NO_TASKS); - tcb_t *current_task = kcb->task_current->data; + tcb_t *current_task = get_task_current()->data; /* Mark current task as ready if it was running */ if (current_task->state == TASK_RUNNING) current_task->state = TASK_READY; /* Round-robin search: find next ready task in the master task list */ - list_node_t *start_node = kcb->task_current; + list_node_t *start_node = get_task_current(); list_node_t *node = start_node; int iterations = 0; /* Safety counter to prevent infinite loops */ @@ -438,7 +441,7 @@ uint16_t sched_select_next_task(void) continue; /* Found a ready task */ - kcb->task_current = node; + set_task_current(node); task->state = TASK_RUNNING; task->time_slice = get_priority_timeslice(task->prio_level); @@ -460,7 +463,11 @@ static int32_t noop_rtsched(void) /* The main entry point from the system tick interrupt. */ void dispatcher(void) { + uint32_t flag = 0; + + spin_lock_irqsave(&kcb->kcb_lock, &flag); kcb->ticks++; + spin_unlock_irqrestore(&kcb->kcb_lock, flag); /* Handle time slice for current task */ sched_tick_current_task(); @@ -475,14 +482,14 @@ void dispatcher(void) /* Top-level context-switch for preemptive scheduling. */ void dispatch(void) { - if (unlikely(!kcb || !kcb->task_current || !kcb->task_current->data)) + if (unlikely(!kcb || !get_task_current() || !get_task_current()->data)) panic(ERR_NO_TASKS); /* Save current context using dedicated HAL routine that handles both * execution context and processor state for context switching. * Returns immediately if this is the restore path. */ - if (hal_context_save(((tcb_t *) kcb->task_current->data)->context) != 0) + if (hal_context_save(((tcb_t *) get_task_current()->data)->context) != 0) return; #if CONFIG_STACK_PROTECTION @@ -502,20 +509,22 @@ void dispatch(void) hal_interrupt_tick(); /* Restore next task context */ - hal_context_restore(((tcb_t *) kcb->task_current->data)->context, 1); + hal_context_restore(((tcb_t *) get_task_current()->data)->context, 1); } /* Cooperative context switch */ void yield(void) { - if (unlikely(!kcb || !kcb->task_current || !kcb->task_current->data)) + uint32_t flag = 0; + + if (unlikely(!kcb || !get_task_current() || !get_task_current()->data)) return; /* Process deferred timer work during yield */ process_deferred_timer_work(); /* HAL context switching is used for preemptive scheduling. */ - if (hal_context_save(((tcb_t *) kcb->task_current->data)->context) != 0) + if (hal_context_save(((tcb_t *) get_task_current()->data)->context) != 0) return; #if CONFIG_STACK_PROTECTION @@ -523,11 +532,13 @@ void yield(void) #endif /* In cooperative mode, delays are only processed on an explicit yield. */ + spin_lock_irqsave(&kcb->kcb_lock, &flag); + if (!kcb->preemptive) list_foreach(kcb->tasks, delay_update, NULL); sched_select_next_task(); /* Use O(1) priority scheduler */ - hal_context_restore(((tcb_t *) kcb->task_current->data)->context, 1); + hal_context_restore(((tcb_t *) get_task_current()->data)->context, 1); } /* Stack initialization with minimal overhead */ @@ -591,12 +602,12 @@ int32_t mo_task_spawn(void *task_entry, uint16_t stack_size_req) } /* Minimize critical section duration */ - CRITICAL_ENTER(); + spin_lock_irqsave(&kcb->kcb_lock, &task_flags); if (!kcb->tasks) { kcb->tasks = list_create(); if (!kcb->tasks) { - CRITICAL_LEAVE(); + spin_unlock_irqrestore(&kcb->kcb_lock, task_flags); free(tcb->stack); free(tcb); panic(ERR_KCB_ALLOC); @@ -605,7 +616,7 @@ int32_t mo_task_spawn(void *task_entry, uint16_t stack_size_req) list_node_t *node = list_pushback(kcb->tasks, tcb); if (!node) { - CRITICAL_LEAVE(); + spin_unlock_irqrestore(&kcb->kcb_lock, task_flags); free(tcb->stack); free(tcb); panic(ERR_TCB_ALLOC); @@ -615,10 +626,10 @@ int32_t mo_task_spawn(void *task_entry, uint16_t stack_size_req) tcb->id = kcb->next_tid++; kcb->task_count++; /* Cached count of active tasks for quick access */ - if (!kcb->task_current) - kcb->task_current = node; + if (!get_task_current()) + set_task_current(node); - CRITICAL_LEAVE(); + spin_unlock_irqrestore(&kcb->kcb_lock, task_flags); /* Initialize execution context outside critical section. */ hal_context_init(&tcb->context, (size_t) tcb->stack, new_stack_size, @@ -640,16 +651,16 @@ int32_t mo_task_cancel(uint16_t id) if (id == 0 || id == mo_task_id()) return ERR_TASK_CANT_REMOVE; - CRITICAL_ENTER(); + spin_lock_irqsave(&kcb->kcb_lock, &task_flags); list_node_t *node = find_task_node_by_id(id); if (!node) { - CRITICAL_LEAVE(); + spin_unlock_irqrestore(&kcb->kcb_lock, task_flags); return ERR_TASK_NOT_FOUND; } tcb_t *tcb = node->data; if (!tcb || tcb->state == TASK_RUNNING) { - CRITICAL_LEAVE(); + spin_unlock_irqrestore(&kcb->kcb_lock, task_flags); return ERR_TASK_CANT_REMOVE; } @@ -665,7 +676,7 @@ int32_t mo_task_cancel(uint16_t id) } } - CRITICAL_LEAVE(); + spin_unlock_irqrestore(&kcb->kcb_lock, task_flags); /* Free memory outside critical section */ free(tcb->stack); @@ -687,18 +698,18 @@ void mo_task_delay(uint16_t ticks) if (!ticks) return; - NOSCHED_ENTER(); - if (unlikely(!kcb || !kcb->task_current || !kcb->task_current->data)) { - NOSCHED_LEAVE(); + spin_lock_irqsave(&kcb->kcb_lock, &task_flags); + if (unlikely(!kcb || !get_task_current() || !get_task_current()->data)) { + spin_unlock_irqrestore(&kcb->kcb_lock, task_flags); return; } - tcb_t *self = kcb->task_current->data; + tcb_t *self = get_task_current()->data; /* Set delay and blocked state - scheduler will skip blocked tasks */ self->delay = ticks; self->state = TASK_BLOCKED; - NOSCHED_LEAVE(); + spin_unlock_irqrestore(&kcb->kcb_lock, task_flags); mo_task_yield(); } @@ -708,24 +719,24 @@ int32_t mo_task_suspend(uint16_t id) if (id == 0) return ERR_TASK_NOT_FOUND; - CRITICAL_ENTER(); + spin_lock_irqsave(&kcb->kcb_lock, &task_flags); list_node_t *node = find_task_node_by_id(id); if (!node) { - CRITICAL_LEAVE(); + spin_unlock_irqrestore(&kcb->kcb_lock, task_flags); return ERR_TASK_NOT_FOUND; } tcb_t *task = node->data; if (!task || (task->state != TASK_READY && task->state != TASK_RUNNING && task->state != TASK_BLOCKED)) { - CRITICAL_LEAVE(); + spin_unlock_irqrestore(&kcb->kcb_lock, task_flags); return ERR_TASK_CANT_SUSPEND; } task->state = TASK_SUSPENDED; - bool is_current = (kcb->task_current == node); + bool is_current = (get_task_current() == node); - CRITICAL_LEAVE(); + spin_unlock_irqrestore(&kcb->kcb_lock, task_flags); if (is_current) mo_task_yield(); @@ -738,23 +749,22 @@ int32_t mo_task_resume(uint16_t id) if (id == 0) return ERR_TASK_NOT_FOUND; - CRITICAL_ENTER(); + spin_lock_irqsave(&kcb->kcb_lock, &task_flags); list_node_t *node = find_task_node_by_id(id); if (!node) { - CRITICAL_LEAVE(); + spin_unlock_irqrestore(&kcb->kcb_lock, task_flags); return ERR_TASK_NOT_FOUND; } tcb_t *task = node->data; if (!task || task->state != TASK_SUSPENDED) { - CRITICAL_LEAVE(); + spin_unlock_irqrestore(&kcb->kcb_lock, task_flags); return ERR_TASK_CANT_RESUME; } /* mark as ready - scheduler will find it */ task->state = TASK_READY; - - CRITICAL_LEAVE(); + spin_unlock_irqrestore(&kcb->kcb_lock, task_flags); return ERR_OK; } @@ -763,16 +773,16 @@ int32_t mo_task_priority(uint16_t id, uint16_t priority) if (id == 0 || !is_valid_priority(priority)) return ERR_TASK_INVALID_PRIO; - CRITICAL_ENTER(); + spin_lock_irqsave(&kcb->kcb_lock, &task_flags); list_node_t *node = find_task_node_by_id(id); if (!node) { - CRITICAL_LEAVE(); + spin_unlock_irqrestore(&kcb->kcb_lock, task_flags); return ERR_TASK_NOT_FOUND; } tcb_t *task = node->data; if (!task) { - CRITICAL_LEAVE(); + spin_unlock_irqrestore(&kcb->kcb_lock, task_flags); return ERR_TASK_NOT_FOUND; } @@ -781,7 +791,8 @@ int32_t mo_task_priority(uint16_t id, uint16_t priority) task->prio_level = extract_priority_level(priority); task->time_slice = get_priority_timeslice(task->prio_level); - CRITICAL_LEAVE(); + spin_unlock_irqrestore(&kcb->kcb_lock, task_flags); + return ERR_OK; } @@ -790,39 +801,42 @@ int32_t mo_task_rt_priority(uint16_t id, void *priority) if (id == 0) return ERR_TASK_NOT_FOUND; - CRITICAL_ENTER(); + spin_lock_irqsave(&kcb->kcb_lock, &task_flags); list_node_t *node = find_task_node_by_id(id); if (!node) { - CRITICAL_LEAVE(); + spin_unlock_irqrestore(&kcb->kcb_lock, task_flags); return ERR_TASK_NOT_FOUND; } tcb_t *task = node->data; if (!task) { - CRITICAL_LEAVE(); + spin_unlock_irqrestore(&kcb->kcb_lock, task_flags); return ERR_TASK_NOT_FOUND; } task->rt_prio = priority; - CRITICAL_LEAVE(); + spin_unlock_irqrestore(&kcb->kcb_lock, task_flags); return ERR_OK; } uint16_t mo_task_id(void) { - if (unlikely(!kcb || !kcb->task_current || !kcb->task_current->data)) + if (unlikely(!kcb || !get_task_current() || !get_task_current()->data)) return 0; - return ((tcb_t *) kcb->task_current->data)->id; + return ((tcb_t *) get_task_current()->data)->id; } int32_t mo_task_idref(void *task_entry) { - if (!task_entry || !kcb->tasks) + spin_lock_irqsave(&kcb->kcb_lock, &task_flags); + + if (!task_entry || !kcb->tasks) { + spin_unlock_irqrestore(&kcb->kcb_lock, task_flags); return ERR_TASK_NOT_FOUND; + } - CRITICAL_ENTER(); list_node_t *node = list_foreach(kcb->tasks, refcmp, task_entry); - CRITICAL_LEAVE(); + spin_unlock_irqrestore(&kcb->kcb_lock, task_flags); return node ? ((tcb_t *) node->data)->id : ERR_TASK_NOT_FOUND; } @@ -831,23 +845,46 @@ void mo_task_wfi(void) { /* Process deferred timer work before waiting */ process_deferred_timer_work(); + uint32_t flag = 0; if (!kcb->preemptive) return; + spin_lock_irqsave(&kcb->kcb_lock, &flag); volatile uint32_t current_ticks = kcb->ticks; - while (current_ticks == kcb->ticks) + spin_unlock_irqrestore(&kcb->kcb_lock, flag); + + while (1) { + spin_lock_irqsave(&kcb->kcb_lock, &flag); + if (current_ticks != kcb->ticks) { + spin_unlock_irqrestore(&kcb->kcb_lock, flag); + break; + } + spin_unlock_irqrestore(&kcb->kcb_lock, flag); hal_cpu_idle(); + } } uint16_t mo_task_count(void) { - return kcb->task_count; + uint32_t task_count; + uint32_t flag; + + spin_lock_irqsave(&kcb->kcb_lock, &flag); + task_count = kcb->task_count; + spin_unlock_irqrestore(&kcb->kcb_lock, flag); + return task_count; } uint32_t mo_ticks(void) { - return kcb->ticks; + uint32_t ticks; + uint32_t flag; + + spin_lock_irqsave(&kcb->kcb_lock, &flag); + ticks = kcb->ticks; + spin_unlock_irqrestore(&kcb->kcb_lock, flag); + return ticks; } uint64_t mo_uptime(void) @@ -857,14 +894,14 @@ uint64_t mo_uptime(void) void _sched_block(queue_t *wait_q) { - if (unlikely(!wait_q || !kcb || !kcb->task_current || - !kcb->task_current->data)) + if (unlikely(!wait_q || !kcb || !get_task_current() || + !get_task_current()->data)) panic(ERR_SEM_OPERATION); /* Process deferred timer work before blocking */ process_deferred_timer_work(); - tcb_t *self = kcb->task_current->data; + tcb_t *self = get_task_current()->data; if (queue_enqueue(wait_q, self) != 0) panic(ERR_SEM_OPERATION); diff --git a/kernel/timer.c b/kernel/timer.c index 4cda708..df2d7f1 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -8,6 +8,7 @@ */ #include +#include #include #include #include @@ -32,6 +33,9 @@ static struct { } timer_cache[4]; static uint8_t timer_cache_index = 0; +static spinlock_t timer_lock = SPINLOCK_INITIALIZER; +static uint32_t timer_flags = 0; + /* Get a node from the pool, fall back to malloc if pool is empty */ static list_node_t *get_timer_node(void) { @@ -83,9 +87,9 @@ static int32_t timer_subsystem_init(void) if (unlikely(timer_initialized)) return ERR_OK; - NOSCHED_ENTER(); + spin_lock_irqsave(&timer_lock, &timer_flags); if (timer_initialized) { - NOSCHED_LEAVE(); + spin_unlock_irqrestore(&timer_lock, timer_flags); return ERR_OK; } @@ -101,7 +105,7 @@ static int32_t timer_subsystem_init(void) list_destroy(kcb->timer_list); kcb->timer_list = NULL; } - NOSCHED_LEAVE(); + spin_unlock_irqrestore(&timer_lock, timer_flags); return ERR_FAIL; } @@ -112,7 +116,7 @@ static int32_t timer_subsystem_init(void) } timer_initialized = true; - NOSCHED_LEAVE(); + spin_unlock_irqrestore(&timer_lock, timer_flags); return ERR_OK; } @@ -294,7 +298,7 @@ int32_t mo_timer_create(void *(*callback)(void *arg), if (unlikely(!t)) return ERR_FAIL; - NOSCHED_ENTER(); + spin_lock_irqsave(&timer_lock, &timer_flags); /* Initialize timer */ t->id = next_id++; @@ -307,7 +311,7 @@ int32_t mo_timer_create(void *(*callback)(void *arg), /* Insert into sorted all_timers_list */ if (unlikely(timer_insert_sorted_by_id(t) != ERR_OK)) { - NOSCHED_LEAVE(); + spin_unlock_irqrestore(&timer_lock, timer_flags); free(t); return ERR_FAIL; } @@ -315,7 +319,7 @@ int32_t mo_timer_create(void *(*callback)(void *arg), /* Add to cache */ cache_timer(t->id, t); - NOSCHED_LEAVE(); + spin_unlock_irqrestore(&timer_lock, timer_flags); return t->id; } @@ -324,11 +328,11 @@ int32_t mo_timer_destroy(uint16_t id) if (unlikely(!timer_initialized)) return ERR_FAIL; - NOSCHED_ENTER(); + spin_lock_irqsave(&timer_lock, &timer_flags); list_node_t *node = timer_find_node_by_id(id); if (unlikely(!node)) { - NOSCHED_LEAVE(); + spin_unlock_irqrestore(&timer_lock, timer_flags); return ERR_FAIL; } @@ -351,7 +355,7 @@ int32_t mo_timer_destroy(uint16_t id) free(t); return_timer_node(node); - NOSCHED_LEAVE(); + spin_unlock_irqrestore(&timer_lock, timer_flags); return ERR_OK; } @@ -362,11 +366,11 @@ int32_t mo_timer_start(uint16_t id, uint8_t mode) if (unlikely(!timer_initialized)) return ERR_FAIL; - NOSCHED_ENTER(); + spin_lock_irqsave(&timer_lock, &timer_flags); timer_t *t = timer_find_by_id_fast(id); if (unlikely(!t)) { - NOSCHED_LEAVE(); + spin_unlock_irqrestore(&timer_lock, timer_flags); return ERR_FAIL; } @@ -380,11 +384,11 @@ int32_t mo_timer_start(uint16_t id, uint8_t mode) if (unlikely(timer_sorted_insert(t) != ERR_OK)) { t->mode = TIMER_DISABLED; - NOSCHED_LEAVE(); + spin_unlock_irqrestore(&timer_lock, timer_flags); return ERR_FAIL; } - NOSCHED_LEAVE(); + spin_unlock_irqrestore(&timer_lock, timer_flags); return ERR_OK; } @@ -393,17 +397,17 @@ int32_t mo_timer_cancel(uint16_t id) if (unlikely(!timer_initialized)) return ERR_FAIL; - NOSCHED_ENTER(); + spin_lock_irqsave(&timer_lock, &timer_flags); timer_t *t = timer_find_by_id_fast(id); if (unlikely(!t || t->mode == TIMER_DISABLED)) { - NOSCHED_LEAVE(); + spin_unlock_irqrestore(&timer_lock, timer_flags); return ERR_FAIL; } timer_remove_item_by_data(kcb->timer_list, t); t->mode = TIMER_DISABLED; - NOSCHED_LEAVE(); + spin_unlock_irqrestore(&timer_lock, timer_flags); return ERR_OK; } diff --git a/lib/malloc.c b/lib/malloc.c index ac439ab..5446269 100644 --- a/lib/malloc.c +++ b/lib/malloc.c @@ -3,6 +3,7 @@ #include #include #include +#include #include "private/utils.h" @@ -26,6 +27,8 @@ typedef struct __memblock { static memblock_t *first_free; static void *heap_start, *heap_end; static uint32_t free_blocks_count; /* track fragmentation */ +static spinlock_t malloc_lock = SPINLOCK_INITIALIZER; +static uint32_t malloc_flags = 0; /* Block manipulation macros */ #define IS_USED(b) ((b)->size & 1L) @@ -64,13 +67,13 @@ void free(void *ptr) if (!ptr) return; - CRITICAL_ENTER(); + spin_lock_irqsave(&malloc_lock, &malloc_flags); memblock_t *p = ((memblock_t *) ptr) - 1; /* Validate the block being freed */ if (!validate_block(p) || !IS_USED(p)) { - CRITICAL_LEAVE(); + spin_unlock_irqrestore(&malloc_lock, malloc_flags); return; /* Invalid or double-free */ } @@ -102,7 +105,7 @@ void free(void *ptr) free_blocks_count--; } - CRITICAL_LEAVE(); + spin_unlock_irqrestore(&malloc_lock, malloc_flags); } /* Selective coalescing: only when fragmentation becomes significant */ @@ -138,7 +141,7 @@ void *malloc(uint32_t size) if (size < MALLOC_MIN_SIZE) size = MALLOC_MIN_SIZE; - CRITICAL_ENTER(); + spin_lock_irqsave(&malloc_lock, &malloc_flags); /* Trigger coalescing only when fragmentation is high */ if (free_blocks_count > COALESCE_THRESHOLD) @@ -147,7 +150,7 @@ void *malloc(uint32_t size) memblock_t *p = first_free; while (p) { if (!validate_block(p)) { - CRITICAL_LEAVE(); + spin_unlock_irqrestore(&malloc_lock, malloc_flags); return NULL; /* Heap corruption detected */ } @@ -170,13 +173,13 @@ void *malloc(uint32_t size) if (free_blocks_count > 0) free_blocks_count--; - CRITICAL_LEAVE(); + spin_unlock_irqrestore(&malloc_lock, malloc_flags); return (void *) (p + 1); } p = p->next; } - CRITICAL_LEAVE(); + spin_unlock_irqrestore(&malloc_lock, malloc_flags); return NULL; /* allocation failed */ } diff --git a/lib/stdio.c b/lib/stdio.c index a89cf8a..b12127b 100644 --- a/lib/stdio.c +++ b/lib/stdio.c @@ -7,9 +7,13 @@ #include #include +#include #include "private/stdio.h" +static spinlock_t printf_lock = SPINLOCK_INITIALIZER; +static uint32_t printf_flags = 0; + /* Ignores output character, returns 0 (success). */ static int stdout_null(int c) { @@ -261,9 +265,11 @@ int32_t printf(const char *fmt, ...) va_list args; int32_t v; + spin_lock_irqsave(&printf_lock, &printf_flags); va_start(args, fmt); v = vsprintf(0, fmt, args); va_end(args); + spin_unlock_irqrestore(&printf_lock, printf_flags); return v; }