added async and hashmap lib to the stdlib as well as tests for them, improved the performance of the compiler, improved the buildt in doc tool and fixed some other small issues

This commit is contained in:
igor
2026-03-11 14:41:40 +01:00
parent 4e51e8803f
commit 0ac7f26a18
7 changed files with 3377 additions and 265 deletions

2524
main.py

File diff suppressed because it is too large Load Diff

447
stdlib/async.sl Normal file
View File

@@ -0,0 +1,447 @@
# Async — Cooperative coroutine scheduler
#
# Provides lightweight cooperative multitasking built on context switching.
# Each task has its own data stack; the scheduler round-robins between
# ready tasks whenever `yield` is called.
#
# Task layout at address `task`:
# [task + 0] status (qword) 0=ready, 1=running, 2=done
# [task + 8] data_sp (qword) saved data stack pointer (r12)
# [task + 16] ret_sp (qword) saved return address for resume
# [task + 24] stack_base (qword) base of allocated stack buffer
# [task + 32] stack_size (qword) size of allocated stack buffer
# [task + 40] entry_fn (qword) pointer to the word to execute
#
# Scheduler layout at address `sched`:
# [sched + 0] task_count (qword)
# [sched + 8] current_idx (qword)
# [sched + 16] tasks_ptr (qword) pointer to array of task pointers
# [sched + 24] main_sp (qword) saved main data stack pointer
# [sched + 32] main_ret (qword) saved main return address
#
# Usage:
# 16 sched_new # create scheduler with capacity 16
# &my_worker1 sched_spawn # spawn task running my_worker1
# &my_worker2 sched_spawn # spawn task running my_worker2
# sched_run # run all tasks to completion
# sched_free # clean up
#
# Inside a task word, call `yield` to yield to the next ready task.
import mem.sl
# ── Constants ─────────────────────────────────────────────────
# Default per-task stack size: 8 KiB
macro ASYNC_STACK_SIZE 0 8192 ;
# Task status values
macro TASK_READY 0 0 ;
macro TASK_RUNNING 0 1 ;
macro TASK_DONE 0 2 ;
# ── Task accessors ────────────────────────────────────────────
#task_status [* | task] -> [* | status]
word task_status @ end
#task_set_status [*, task | status] -> [*]
word task_set_status ! end
#task_data_sp [* | task] -> [* | sp]
word task_data_sp 8 + @ end
#task_set_data_sp [*, task | sp] -> [*]
word task_set_data_sp swap 8 + swap ! end
#task_ret_sp [* | task] -> [* | ret]
word task_ret_sp 16 + @ end
#task_set_ret_sp [*, task | ret] -> [*]
word task_set_ret_sp swap 16 + swap ! end
#task_stack_base [* | task] -> [* | base]
word task_stack_base 24 + @ end
#task_stack_size [* | task] -> [* | size]
word task_stack_size 32 + @ end
#task_entry_fn [* | task] -> [* | fn_ptr]
word task_entry_fn 40 + @ end
# ── Scheduler accessors ──────────────────────────────────────
#sched_task_count [* | sched] -> [* | n]
word sched_task_count @ end
#sched_current_idx [* | sched] -> [* | idx]
word sched_current_idx 8 + @ end
#sched_set_current_idx [*, sched | idx] -> [*]
word sched_set_current_idx swap 8 + swap ! end
#sched_tasks_ptr [* | sched] -> [* | ptr]
word sched_tasks_ptr 16 + @ end
#sched_main_sp [* | sched] -> [* | sp]
word sched_main_sp 24 + @ end
#sched_main_ret [* | sched] -> [* | ret]
word sched_main_ret 32 + @ end
# ── Global scheduler pointer (one active at a time) ──────────
# We store the current scheduler pointer in a global cell
# accessible via `mem`. Offset 0 of persistent buffer = scheduler ptr.
#__async_sched_ptr [*] -> [* | ptr]
# Get the global scheduler pointer
word __async_sched_ptr
mem @
end
#__async_set_sched_ptr [* | sched] -> [*]
# Set the global scheduler pointer
word __async_set_sched_ptr
mem swap !
end
# ── Task creation ─────────────────────────────────────────────
#task_new [* | fn_ptr] -> [* | task]
# Create a new task that will execute the given word.
word task_new
>r # save fn_ptr; R: [fn_ptr]; stack: [*]
# Allocate task struct (48 bytes)
48 alloc # stack: [* | task]
# Allocate task stack
ASYNC_STACK_SIZE alloc >r # R: [fn_ptr, stk_base]; stack: [* | task]
# status = READY (0)
dup 0 !
# stack_base = stk_base
r@ over 24 + swap !
# stack_size = ASYNC_STACK_SIZE
ASYNC_STACK_SIZE over 32 + swap !
# data_sp = stk_base + ASYNC_STACK_SIZE - 8 (top of stack, aligned)
r@ ASYNC_STACK_SIZE + 8 - over 8 + swap !
# ret_sp = 0 (not yet started)
dup 16 + 0 !
# entry_fn = fn_ptr
rdrop r> over 40 + swap !
end
#task_free [* | task] -> [*]
# Free a task and its stack buffer.
word task_free
dup task_stack_base over task_stack_size free
48 free
end
# ── Scheduler creation ───────────────────────────────────────
#sched_new [* | max_tasks] -> [* | sched]
# Create a new scheduler with room for max_tasks.
word sched_new
# Allocate scheduler struct (40 bytes)
40 alloc # stack: [*, max_tasks | sched]
# task_count = 0
dup 0 !
# current_idx = 0
dup 8 + 0 !
# Allocate tasks pointer array (max_tasks * 8)
over 8 * alloc
over 16 + over ! drop # sched.tasks_ptr = array
# main_sp = 0 (set when run starts)
dup 24 + 0 !
# main_ret = 0
dup 32 + 0 !
nip
end
#sched_free [* | sched] -> [*]
# Free the scheduler and all its tasks.
word sched_free
# Free each task
dup sched_task_count
0
while 2dup > do
2 pick sched_tasks_ptr over 8 * + @
task_free
1 +
end
2drop
40 free
end
# ── Spawning tasks ────────────────────────────────────────────
#sched_spawn [*, sched | fn_ptr] -> [* | sched]
# Spawn a new task in the scheduler.
word sched_spawn
task_new >r # save task; R:[task]; stack: [* | sched]
# Store task at tasks_ptr[count]
dup sched_tasks_ptr over @ 8 * + # [sched, &tasks[count]]
r@ ! # tasks[count] = task
# Increment task_count
dup @ 1 + over swap !
rdrop
end
# ── Context switch (the core of async) ───────────────────────
#yield [*] -> [*]
# Yield execution to the next ready task.
# Saves current data stack pointer, restores the next task's.
:asm yield {
; Save current r12 (data stack pointer) into current task
; Load scheduler pointer from mem (persistent buffer)
lea rax, [rel persistent]
mov rax, [rax] ; sched ptr
; Get current_idx
mov rbx, [rax + 8] ; current_idx
mov rcx, [rax + 16] ; tasks_ptr
mov rdx, [rcx + rbx*8] ; current task ptr
; Save r12 into task.data_sp
mov [rdx + 8], r12
; Save return address: caller's return is on the x86 stack
; We pop it and save it in task.ret_sp
pop rsi ; return address
mov [rdx + 16], rsi
; Mark current task as READY (it was RUNNING)
mov qword [rdx], 0 ; TASK_READY
; Find next ready task (round-robin)
mov r8, [rax] ; task_count
mov r9, rbx ; start from current_idx
.find_next:
inc r9
cmp r9, r8
jl .no_wrap
xor r9, r9 ; wrap to 0
.no_wrap:
cmp r9, rbx
je .no_other ; looped back: only one task
mov r10, [rcx + r9*8] ; candidate task
mov r11, [r10] ; status
cmp r11, 0 ; TASK_READY?
je .found_task
jmp .find_next
.no_other:
; Only one ready task (self): re-schedule self
mov r10, rdx
mov r9, rbx
.found_task:
; Update scheduler current_idx
mov [rax + 8], r9
; Mark new task as RUNNING
mov qword [r10], 1
; Check if task has a saved return address (non-zero means resumed)
mov rsi, [r10 + 16]
cmp rsi, 0
je .first_run
; Resume: restore data stack and jump to saved return address
mov r12, [r10 + 8]
push rsi
ret
.first_run:
; First run: set up data stack and call entry function
mov r12, [r10 + 8] ; task's data stack
; Save our scheduler info so the task can find it
; The task entry function needs no args — it uses the stack.
; Get entry function pointer
mov rdi, [r10 + 40]
; When the entry returns, we need to mark it done and yield
; Push a return address that handles cleanup
lea rsi, [rel .task_done]
push rsi
jmp rdi ; tail-call into task entry
.task_done:
; Task finished: mark as DONE
lea rax, [rel persistent]
mov rax, [rax] ; sched ptr
mov rbx, [rax + 8] ; current_idx
mov rcx, [rax + 16] ; tasks_ptr
mov rdx, [rcx + rbx*8] ; current task
mov qword [rdx], 2 ; TASK_DONE
; Find next ready task
mov r8, [rax] ; task_count
mov r9, rbx
.find_next2:
inc r9
cmp r9, r8
jl .no_wrap2
xor r9, r9
.no_wrap2:
cmp r9, rbx
je .all_done ; no more tasks
mov r10, [rcx + r9*8]
mov r11, [r10]
cmp r11, 0 ; TASK_READY?
je .found_task2
cmp r11, 1 ; TASK_RUNNING? (shouldn't happen)
je .found_task2
jmp .find_next2
.all_done:
; All tasks done: restore main context
mov r12, [rax + 24] ; main_sp
mov rsi, [rax + 32] ; main_ret
push rsi
ret
.found_task2:
mov [rax + 8], r9
mov qword [r10], 1
mov rsi, [r10 + 16]
cmp rsi, 0
je .first_run2
mov r12, [r10 + 8]
push rsi
ret
.first_run2:
mov r12, [r10 + 8]
mov rdi, [r10 + 40]
lea rsi, [rel .task_done]
push rsi
jmp rdi
} ;
# ── Scheduler run ─────────────────────────────────────────────
#sched_run [* | sched] -> [* | sched]
# Run all spawned tasks to completion.
# Saves the main context and starts the first task.
:asm sched_run {
mov rax, [r12] ; sched ptr (peek, keep on data stack)
; Store as global scheduler
lea rbx, [rel persistent]
mov [rbx], rax
; Save main data stack pointer (sched still on stack)
mov [rax + 24], r12
; Save main return address (where to come back)
pop rsi
mov [rax + 32], rsi
; Find first ready task
mov r8, [rax] ; task_count
cmp r8, 0
je .no_tasks
mov rcx, [rax + 16] ; tasks_ptr
xor r9, r9 ; idx = 0
.scan:
cmp r9, r8
jge .no_tasks
mov r10, [rcx + r9*8]
mov r11, [r10]
cmp r11, 0 ; TASK_READY?
je .start
inc r9
jmp .scan
.start:
mov [rax + 8], r9 ; set current_idx
mov qword [r10], 1 ; TASK_RUNNING
mov r12, [r10 + 8] ; task's data stack
mov rdi, [r10 + 40] ; entry function
lea rsi, [rel .task_finished]
push rsi
jmp rdi
.task_finished:
; Task returned — mark done and find next
lea rax, [rel persistent]
mov rax, [rax]
mov rbx, [rax + 8]
mov rcx, [rax + 16]
mov rdx, [rcx + rbx*8]
mov qword [rdx], 2 ; TASK_DONE
mov r8, [rax]
mov r9, rbx
.find_next_run:
inc r9
cmp r9, r8
jl .no_wrap_run
xor r9, r9
.no_wrap_run:
cmp r9, rbx
je .all_done_run
mov r10, [rcx + r9*8]
mov r11, [r10]
cmp r11, 0
je .found_run
jmp .find_next_run
.all_done_run:
; Restore main context
mov r12, [rax + 24]
mov rsi, [rax + 32]
push rsi
ret
.found_run:
mov [rax + 8], r9
mov qword [r10], 1
mov rsi, [r10 + 16]
cmp rsi, 0
je .first_run_entry
mov r12, [r10 + 8]
push rsi
ret
.first_run_entry:
mov r12, [r10 + 8]
mov rdi, [r10 + 40]
lea rsi, [rel .task_finished]
push rsi
jmp rdi
.no_tasks:
; Nothing to run — restore and return
mov r12, [rax + 24]
mov rsi, [rax + 32]
push rsi
ret
} ;

457
stdlib/hashmap.sl Normal file
View File

@@ -0,0 +1,457 @@
# Hash Map (open-addressing, linear probing)
#
# Layout at address `hm`:
# [hm + 0] count (qword) — number of live entries
# [hm + 8] capacity (qword) — number of slots (always power of 2)
# [hm + 16] keys_ptr (qword) — pointer to keys array (cap * 8 bytes)
# [hm + 24] vals_ptr (qword) — pointer to values array (cap * 8 bytes)
# [hm + 32] flags_ptr (qword) — pointer to flags array (cap bytes, 0=empty 1=live 2=tombstone)
#
# Keys and values are 64-bit integers. For string keys, store
# a hash or pointer; the caller is responsible for hashing.
#
# Allocation: mmap; free: munmap.
# Growth: doubles capacity when load factor exceeds 70%.
import mem.sl
# ── Hash function ─────────────────────────────────────────────
#__hm_hash [* | key] -> [* | hash]
# Integer hash (splitmix64-style mixing)
:asm __hm_hash {
mov rax, [r12]
mov rcx, rax
shr rcx, 30
xor rax, rcx
mov rcx, 0xbf58476d1ce4e5b9
imul rax, rcx
mov rcx, rax
shr rcx, 27
xor rax, rcx
mov rcx, 0x94d049bb133111eb
imul rax, rcx
mov rcx, rax
shr rcx, 31
xor rax, rcx
mov [r12], rax
} ;
# ── Accessors ─────────────────────────────────────────────────
#hm_count [* | hm] -> [* | count]
word hm_count @ end
#hm_capacity [* | hm] -> [* | cap]
word hm_capacity 8 + @ end
#hm_keys [* | hm] -> [* | ptr]
word hm_keys 16 + @ end
#hm_vals [* | hm] -> [* | ptr]
word hm_vals 24 + @ end
#hm_flags [* | hm] -> [* | ptr]
word hm_flags 32 + @ end
# ── Constructor / Destructor ──────────────────────────────────
#hm_new [* | cap_hint] -> [* | hm]
# Create a new hash map. Capacity is rounded up to next power of 2 (min 8).
# Note: alloc uses mmap(MAP_ANONYMOUS) which returns zeroed pages.
word hm_new
dup 8 < if drop 8 end
# Round up to power of 2
1 while 2dup swap < do 2 * end nip
>r # r0 = cap
# Allocate header (40 bytes)
40 alloc # stack: [* | hm]
# count = 0
0 over swap !
# capacity
r@ over 8 + swap !
# keys array: cap * 8 (zeroed by mmap)
r@ 8 * alloc
over 16 + swap !
# vals array: cap * 8 (zeroed by mmap)
r@ 8 * alloc
over 24 + swap !
# flags array: cap bytes (zeroed by mmap)
r> alloc
over 32 + swap !
end
#hm_free [* | hm] -> [*]
# Free a hash map and all its internal buffers.
word hm_free
dup hm_capacity >r
dup hm_keys r@ 8 * free
dup hm_vals r@ 8 * free
dup hm_flags r> free
40 free
end
# ── Core probe: find slot in assembly ─────────────────────────
#__hm_probe [*, hm | key] -> [*, slot_idx | found_flag]
# Linear probe. Returns slot index and 1 if found, or first empty slot and 0.
:asm __hm_probe {
; TOS = key, NOS = hm
push r14 ; save callee-saved reg
mov rdi, [r12] ; key
mov rsi, [r12 + 8] ; hm ptr
; Hash the key
mov rax, rdi
mov rcx, rax
shr rcx, 30
xor rax, rcx
mov rcx, 0xbf58476d1ce4e5b9
imul rax, rcx
mov rcx, rax
shr rcx, 27
xor rax, rcx
mov rcx, 0x94d049bb133111eb
imul rax, rcx
mov rcx, rax
shr rcx, 31
xor rax, rcx
; rax = hash
mov r8, [rsi + 8] ; capacity
mov r9, r8
dec r9 ; mask = cap - 1
and rax, r9 ; idx = hash & mask
mov r10, [rsi + 16] ; keys_ptr
mov r11, [rsi + 32] ; flags_ptr
; r14 = first tombstone slot (-1 = none)
mov r14, -1
.loop:
movzx ecx, byte [r11 + rax] ; flags[idx]
cmp ecx, 0 ; empty?
je .empty
cmp ecx, 2 ; tombstone?
je .tombstone
; live: check key match
cmp rdi, [r10 + rax*8]
je .found
; advance
inc rax
and rax, r9
jmp .loop
.tombstone:
; remember first tombstone
cmp r14, -1
jne .skip_save
mov r14, rax
.skip_save:
inc rax
and rax, r9
jmp .loop
.empty:
; Use first tombstone if available
cmp r14, -1
je .use_empty
mov rax, r14
.use_empty:
; Return: slot=rax, found=0
mov [r12 + 8], rax ; overwrite hm slot with idx
mov qword [r12], 0 ; found = 0
pop r14
ret
.found:
; Return: slot=rax, found=1
mov [r12 + 8], rax
mov qword [r12], 1
pop r14
} ;
# ── Internal: rehash ──────────────────────────────────────────
#__hm_rehash [* | hm] -> [* | hm]
# Double capacity and re-insert all live entries.
# Strategy: create new map, copy entries, swap internals, free old arrays.
:asm __hm_rehash {
push r14 ; save callee-saved regs
push r15
mov rbx, [r12] ; hm
; Load old state
mov r8, [rbx + 8] ; old_cap
mov r9, [rbx + 16] ; old_keys
mov r10, [rbx + 24] ; old_vals
mov r11, [rbx + 32] ; old_flags
; New capacity = old_cap * 2
mov rdi, r8
shl rdi, 1 ; new_cap
; Save hm, old_cap, old_keys, old_vals, old_flags, new_cap on x86 stack
push rbx
push r8
push r9
push r10
push r11
push rdi
; Allocate new_keys = alloc(new_cap * 8)
; mmap(0, size, PROT_READ|PROT_WRITE=3, MAP_PRIVATE|MAP_ANON=34, -1, 0)
mov rax, 9
xor rdi, rdi
mov rsi, [rsp] ; new_cap
shl rsi, 3 ; new_cap * 8
mov rdx, 3
mov r10, 34
push r8 ; save r8
mov r8, -1
xor r9, r9
syscall
pop r8
push rax ; save new_keys
; Allocate new_vals = alloc(new_cap * 8)
mov rax, 9
xor rdi, rdi
mov rsi, [rsp + 8] ; new_cap
shl rsi, 3
mov rdx, 3
mov r10, 34
push r8
mov r8, -1
xor r9, r9
syscall
pop r8
push rax ; save new_vals
; Allocate new_flags = alloc(new_cap)
mov rax, 9
xor rdi, rdi
mov rsi, [rsp + 16] ; new_cap
mov rdx, 3
mov r10, 34
push r8
mov r8, -1
xor r9, r9
syscall
pop r8
push rax ; save new_flags
; Stack: new_flags, new_vals, new_keys, new_cap, old_flags, old_vals, old_keys, old_cap, hm
; Offsets: [rsp]=new_flags, [rsp+8]=new_vals, [rsp+16]=new_keys
; [rsp+24]=new_cap, [rsp+32]=old_flags, [rsp+40]=old_vals
; [rsp+48]=old_keys, [rsp+56]=old_cap, [rsp+64]=hm
mov r14, [rsp + 24] ; new_cap
dec r14 ; new_mask
; Re-insert loop: for i in 0..old_cap
xor rcx, rcx ; i = 0
mov r8, [rsp + 56] ; old_cap
.rehash_loop:
cmp rcx, r8
jge .rehash_done
; Check old_flags[i]
mov rdi, [rsp + 32] ; old_flags
movzx eax, byte [rdi + rcx]
cmp eax, 1 ; live?
jne .rehash_next
; Get key and val
mov rdi, [rsp + 48] ; old_keys
mov rsi, [rdi + rcx*8] ; key
mov rdi, [rsp + 40] ; old_vals
mov rdx, [rdi + rcx*8] ; val
; Hash key to find slot in new map
push rcx
push rsi
push rdx
; Hash rsi (key)
mov rax, rsi
mov rbx, rax
shr rbx, 30
xor rax, rbx
mov rbx, 0xbf58476d1ce4e5b9
imul rax, rbx
mov rbx, rax
shr rbx, 27
xor rax, rbx
mov rbx, 0x94d049bb133111eb
imul rax, rbx
mov rbx, rax
shr rbx, 31
xor rax, rbx
and rax, r14 ; slot = hash & new_mask
; Linear probe (new map is all empty, so first empty slot is fine)
mov rdi, [rsp + 24] ; new_flags (3 pushes offset: +24)
.probe_new:
movzx ebx, byte [rdi + rax]
cmp ebx, 0
je .probe_found
inc rax
and rax, r14
jmp .probe_new
.probe_found:
; Store key, val, flag
pop rdx ; val
pop rsi ; key
mov rdi, [rsp + 16 + 8] ; new_keys (adjusted for 1 remaining push: rcx)
mov [rdi + rax*8], rsi
mov rdi, [rsp + 8 + 8] ; new_vals
mov [rdi + rax*8], rdx
mov rdi, [rsp + 0 + 8] ; new_flags
mov byte [rdi + rax], 1
pop rcx ; restore i
.rehash_next:
inc rcx
jmp .rehash_loop
.rehash_done:
; Free old arrays
; munmap(old_keys, old_cap * 8)
mov rax, 11
mov rdi, [rsp + 48] ; old_keys
mov rsi, [rsp + 56] ; old_cap
shl rsi, 3
syscall
; munmap(old_vals, old_cap * 8)
mov rax, 11
mov rdi, [rsp + 40] ; old_vals
mov rsi, [rsp + 56]
shl rsi, 3
syscall
; munmap(old_flags, old_cap)
mov rax, 11
mov rdi, [rsp + 32] ; old_flags
mov rsi, [rsp + 56] ; old_cap
syscall
; Update hm header
mov rbx, [rsp + 64] ; hm
mov rax, [rsp + 24] ; new_cap
mov [rbx + 8], rax
mov rax, [rsp + 16] ; new_keys
mov [rbx + 16], rax
mov rax, [rsp + 8] ; new_vals
mov [rbx + 24], rax
mov rax, [rsp] ; new_flags
mov [rbx + 32], rax
; Clean up x86 stack (9 pushes + 2 callee-saved)
add rsp, 72
pop r15
pop r14
; hm is still on r12 stack, unchanged
} ;
# ── Public API ────────────────────────────────────────────────
#hm_set [*, hm, key | val] -> [* | hm]
# Insert or update a key-value pair. Returns the (possibly moved) hm.
word hm_set
>r >r # r0 = val, r1 = key, stack: [* | hm]
# Return stack: [... | val | key] (key on top, 0 rpick=key, 1 rpick=val)
# Check load: count * 10 >= capacity * 7 → rehash
dup hm_count 10 * over hm_capacity 7 * >= if
__hm_rehash
end
# Probe for key (r@ = key, top of return stack)
dup r@ __hm_probe # stack: [*, hm | slot, found]
swap >r # push slot; R: [val, key, slot]
# Now: 0 rpick=slot, 1 rpick=key, 2 rpick=val
# Store key at keys[slot]
over hm_keys r@ 8 * + 1 rpick !
# Store val at vals[slot]
over hm_vals r@ 8 * + 2 rpick !
# Set flag = 1
over hm_flags r> + 1 c!
# If found=0 (new entry), increment count
0 == if
dup @ 1 + over swap !
end
rdrop rdrop # drop key, val
end
#hm_get [*, hm | key] -> [*, hm | val, found_flag]
# Look up a key. Returns (val 1) if found, (0 0) if not.
word hm_get
over swap __hm_probe # stack: [*, hm | slot, found]
dup 0 == if
nip 0 swap # stack: [*, hm | 0, 0]
else
swap
2 pick hm_vals swap 8 * + @
swap # stack: [*, hm | val, 1]
end
end
#hm_has [*, hm | key] -> [*, hm | bool]
# Check if key exists. Returns 1 or 0.
word hm_has
hm_get nip
end
#hm_del [*, hm | key] -> [*, hm | deleted_flag]
# Delete a key. Returns 1 if deleted, 0 if not found.
word hm_del
over swap __hm_probe # stack: [*, hm | slot, found]
dup 0 == if
nip # stack: [*, hm | 0]
else
drop # drop found=1; stack: [*, hm | slot]
# Set flag to tombstone (2)
over hm_flags over + 2 c!
drop # drop slot
# Decrement count
dup @ 1 - over swap !
1 # stack: [*, hm | 1]
end
end
#__hm_bzero [*, len | addr] -> [*]
# Zero len bytes at addr
:asm __hm_bzero {
mov rdi, [r12] ; addr
mov rcx, [r12 + 8] ; len
add r12, 16
xor al, al
rep stosb
} ;
#hm_clear [* | hm] -> [*]
# Remove all entries without freeing the map.
word hm_clear
dup 0 ! # count = 0
dup hm_capacity
over hm_flags __hm_bzero
end

27
tests/async.expected Normal file
View File

@@ -0,0 +1,27 @@
0
1
8192
1
1
0
0
1
1
2
1
2
3
4
5
6
99
42
3
1
2
42
3
4
5
6
100

79
tests/async.sl Normal file
View File

@@ -0,0 +1,79 @@
import ../stdlib/stdlib.sl
import ../stdlib/io.sl
import ../stdlib/mem.sl
import ../stdlib/async.sl
# ── Worker words for scheduler tests ─────────────────────────
word worker_a
1 puti cr
yield
3 puti cr
yield
5 puti cr
end
word worker_b
2 puti cr
yield
4 puti cr
yield
6 puti cr
end
word worker_single
42 puti cr
end
word main
# ── task_new / task_status / task_entry_fn / task_stack_base/size ──
&worker_single task_new
dup task_status puti cr # 0 (TASK_READY)
dup task_stack_base 0 != puti cr # 1 (non-null)
dup task_stack_size puti cr # 8192
dup task_entry_fn 0 != puti cr # 1 (non-null fn ptr)
dup task_data_sp 0 != puti cr # 1 (non-null)
dup task_ret_sp puti cr # 0 (not yet started)
task_free
# ── sched_new / sched_task_count ──
8 sched_new
dup sched_task_count puti cr # 0
dup sched_tasks_ptr 0 != puti cr # 1 (non-null)
# ── sched_spawn ──
&worker_a sched_spawn
dup sched_task_count puti cr # 1
&worker_b sched_spawn
dup sched_task_count puti cr # 2
# ── sched_run (interleaved output) ──
sched_run # prints: 1 2 3 4 5 6
# ── post-run: verify we returned cleanly ──
99 puti cr # 99
sched_free
# ── single-task scheduler (no yield in worker) ──
4 sched_new
&worker_single sched_spawn
sched_run # prints: 42
sched_free
# ── three workers to test round-robin with more tasks ──
8 sched_new
&worker_a sched_spawn
&worker_b sched_spawn
&worker_single sched_spawn
dup sched_task_count puti cr # 3
sched_run # worker_a:1, worker_b:2, worker_single:42
# worker_a:3, worker_b:4
# worker_a:5, worker_b:6
sched_free
100 puti cr # 100 (clean exit)
end

28
tests/hashmap.expected Normal file
View File

@@ -0,0 +1,28 @@
0
8
3
100
200
300
00
1
0
111
3
1
0
0
2
999
3
1
1
1
0
0
1
7
10
40
70
77

80
tests/hashmap.sl Normal file
View File

@@ -0,0 +1,80 @@
import ../stdlib/stdlib.sl
import ../stdlib/io.sl
import ../stdlib/mem.sl
import ../stdlib/hashmap.sl
word main
# ── hm_new / hm_count / hm_capacity ──
8 hm_new
dup hm_count puti cr # 0
dup hm_capacity puti cr # 8
# ── hm_set / hm_get ──
dup 42 100 hm_set
dup 99 200 hm_set
dup 7 300 hm_set
dup hm_count puti cr # 3
dup 42 hm_get drop puti cr # 100
dup 99 hm_get drop puti cr # 200
dup 7 hm_get drop puti cr # 300
# ── hm_get miss ──
dup 999 hm_get # should be 0, 0
puti dup puti cr drop # 00
# ── hm_has ──
dup 42 hm_has puti cr # 1
dup 999 hm_has puti cr # 0
# ── hm_set overwrite ──
dup 42 111 hm_set
dup 42 hm_get drop puti cr # 111
dup hm_count puti cr # 3 (no new entry)
# ── hm_del ──
dup 99 hm_del puti cr # 1 (deleted)
dup 99 hm_del puti cr # 0 (already gone)
dup 99 hm_has puti cr # 0
dup hm_count puti cr # 2
# ── insert after delete (tombstone reuse) ──
dup 99 999 hm_set
dup 99 hm_get drop puti cr # 999
dup hm_count puti cr # 3
# ── hm_keys / hm_vals / hm_flags raw access ──
dup hm_keys 0 != puti cr # 1 (non-null pointer)
dup hm_vals 0 != puti cr # 1
dup hm_flags 0 != puti cr # 1
# ── hm_clear ──
hm_clear
dup hm_count puti cr # 0
dup 42 hm_has puti cr # 0 (cleared)
# ── rehash (force growth) ──
# insert enough to trigger rehash on the cleared map
dup 1 10 hm_set
dup 2 20 hm_set
dup 3 30 hm_set
dup 4 40 hm_set
dup 5 50 hm_set
dup 6 60 hm_set # load > 70% → rehash
dup 7 70 hm_set
dup hm_capacity 8 > puti cr # 1 (grew)
dup hm_count puti cr # 7
# verify all entries survived rehash
dup 1 hm_get drop puti cr # 10
dup 4 hm_get drop puti cr # 40
dup 7 hm_get drop puti cr # 70
# ── large key values ──
dup 1000000 77 hm_set
dup 1000000 hm_get drop puti cr # 77
hm_free
end