general update
This commit is contained in:
2
SPEC.md
2
SPEC.md
@@ -9,7 +9,7 @@ This document reflects the implementation that ships in this repository today (`
|
||||
- **Unsafe by design** – Memory, syscalls, inline assembly, and FFI expose raw machine power. The standard library is intentionally thin and policy-free.
|
||||
|
||||
## 2. Toolchain and Repository Layout
|
||||
- **Driver (`main.py`)** – Supports `python main.py source.sl -o a.out`, `--emit-asm`, `--run`, `--dbg`, `--repl`, `--temp-dir`, `--clean`, repeated `-I/--include` paths, and repeated `-l` linker flags (either `-lfoo` or `-l libc.so.6`). Unknown `-l` flags are collected and forwarded to the linker. Pass `--ct-run-main` to run the program's `main` word on the compile-time VM before NASM/ld run, which surfaces discrepancies between compile-time and runtime semantics. Pass `--no-artifact` to stop after compilation/assembly emission without building an output file, or use `--script` as shorthand for `--no-artifact --ct-run-main`.
|
||||
- **Driver (`main.py`)** – Supports `python main.py source.sl -o a.out`, `--emit-asm`, `--run`, `--dbg`, `--repl`, `--temp-dir`, `--clean`, repeated `-I/--include` paths, and repeated `-l` linker flags (either `-lfoo` or `-l libc.so.6`). Unknown `-l` flags are collected and forwarded to the linker. Pass `--ct-run-main` to run the program's `main` word on the compile-time VM before NASM/ld run, which surfaces discrepancies between compile-time and runtime semantics. Pass `--no-artifact` to stop after compilation/assembly emission without building an output file, or use `--script` as shorthand for `--no-artifact --ct-run-main`. Pass `--docs` to open a searchable TUI that scans stack-effect comments and nearby docs from `.sl` files (`--docs-query` sets initial filter and `--docs-root` adds scan roots). `--no-folding` disables constant folding and `--no-peephole` disables peephole rewrites (for example `swap drop` → `nip`, `dup drop` removed, `swap over` → `tuck`, `nip drop` → `2drop`).
|
||||
- **REPL** – `--repl` launches a stateful session with commands such as `:help`, `:reset`, `:load`, `:call <word>`, `:edit`, and `:show`. The REPL still emits/links entire programs for each run; it simply manages the session source for you.
|
||||
- **Imports** – `import relative/or/absolute/path.sl` inserts the referenced file textually. Resolution order: (1) absolute path, (2) relative to the importing file, (3) each include path (defaults: project root and `./stdlib`). Each file is included at most once per compilation unit. Import lines leave blank placeholders so error spans stay meaningful.
|
||||
- **Workspace** – `stdlib/` holds library modules, `tests/` contains executable samples with `.expected` outputs, `extra_tests/` houses standalone integration demos, and `libs/` collects opt-in extensions such as `libs/fn.sl` and `libs/nob.sl`.
|
||||
|
||||
10
args.sl
10
args.sl
@@ -1,10 +0,0 @@
|
||||
import stdlib/stdlib.sl
|
||||
|
||||
word main
|
||||
0 argc for
|
||||
dup
|
||||
argv@ dup strlen puts
|
||||
1 +
|
||||
end
|
||||
0
|
||||
end
|
||||
26
c_extern.sl
26
c_extern.sl
@@ -1,26 +0,0 @@
|
||||
import stdlib.sl
|
||||
import float.sl
|
||||
|
||||
# C-style externs (auto ABI handling)
|
||||
extern long labs(long n)
|
||||
extern void exit(int status)
|
||||
extern double atan2(double y, double x)
|
||||
|
||||
word main
|
||||
# Test C-style extern with implicit ABI handling
|
||||
-10 labs puti cr
|
||||
|
||||
# Basic math (scaled to avoid libc printf dependency)
|
||||
1.5 2.5 f+ # 4.0
|
||||
1000000.0 f*
|
||||
float>int puti cr # Prints 4000000 (6 decimal places of 4.0)
|
||||
|
||||
# External math library (libm)
|
||||
10.0 10.0 atan2 # Result is pi/4
|
||||
4.0 f* # Approx pi
|
||||
1000000.0 f*
|
||||
float>int puti cr # Prints scaled pi value
|
||||
|
||||
# Test extern void
|
||||
0 exit
|
||||
end
|
||||
@@ -31,7 +31,7 @@ end
|
||||
immediate
|
||||
compile-only
|
||||
|
||||
|
||||
#extend-syntax [*] -> [*]
|
||||
word extend-syntax
|
||||
"call-syntax-rewrite" set-token-hook
|
||||
end
|
||||
|
||||
495
main.py
495
main.py
@@ -14,6 +14,7 @@ import argparse
|
||||
import ctypes
|
||||
import mmap
|
||||
import os
|
||||
import re
|
||||
import shlex
|
||||
import subprocess
|
||||
import sys
|
||||
@@ -2860,6 +2861,7 @@ class Assembler:
|
||||
dictionary: Dictionary,
|
||||
*,
|
||||
enable_constant_folding: bool = True,
|
||||
enable_peephole_optimization: bool = True,
|
||||
loop_unroll_threshold: int = 8,
|
||||
) -> None:
|
||||
self.dictionary = dictionary
|
||||
@@ -2872,8 +2874,65 @@ class Assembler:
|
||||
self._emit_stack: List[str] = []
|
||||
self._export_all_defs: bool = False
|
||||
self.enable_constant_folding = enable_constant_folding
|
||||
self.enable_peephole_optimization = enable_peephole_optimization
|
||||
self.loop_unroll_threshold = loop_unroll_threshold
|
||||
|
||||
def _peephole_optimize_definition(self, definition: Definition) -> None:
|
||||
# Rewrite short stack-manipulation sequences into canonical forms.
|
||||
# Rules are ordered longest-first by matching logic below.
|
||||
rules: List[Tuple[Tuple[str, ...], Tuple[str, ...]]] = [
|
||||
(("swap", "drop"), ("nip",)),
|
||||
# Stack no-ops
|
||||
(("dup", "drop"), tuple()),
|
||||
(("swap", "swap"), tuple()),
|
||||
(("over", "drop"), tuple()),
|
||||
(("dup", "nip"), tuple()),
|
||||
(("2dup", "2drop"), tuple()),
|
||||
(("2swap", "2swap"), tuple()),
|
||||
(("rot", "rot", "rot"), tuple()),
|
||||
# Canonicalizations
|
||||
(("swap", "over"), ("tuck",)),
|
||||
(("swap", "nip"), ("drop",)),
|
||||
(("nip", "drop"), ("2drop",)),
|
||||
(("tuck", "drop"), ("swap",)),
|
||||
]
|
||||
|
||||
max_pat_len = max(len(pattern) for pattern, _ in rules)
|
||||
|
||||
nodes = definition.body
|
||||
changed = True
|
||||
while changed:
|
||||
changed = False
|
||||
optimized: List[Op] = []
|
||||
idx = 0
|
||||
while idx < len(nodes):
|
||||
matched = False
|
||||
for window in range(min(max_pat_len, len(nodes) - idx), 1, -1):
|
||||
segment = nodes[idx:idx + window]
|
||||
if any(node.op != "word" for node in segment):
|
||||
continue
|
||||
names = tuple(str(node.data) for node in segment)
|
||||
replacement: Optional[Tuple[str, ...]] = None
|
||||
for pattern, repl in rules:
|
||||
if names == pattern:
|
||||
replacement = repl
|
||||
break
|
||||
if replacement is None:
|
||||
continue
|
||||
base_loc = segment[0].loc
|
||||
for repl_name in replacement:
|
||||
optimized.append(Op(op="word", data=repl_name, loc=base_loc))
|
||||
idx += window
|
||||
changed = True
|
||||
matched = True
|
||||
break
|
||||
if matched:
|
||||
continue
|
||||
optimized.append(nodes[idx])
|
||||
idx += 1
|
||||
nodes = optimized
|
||||
definition.body = nodes
|
||||
|
||||
def _fold_constants_in_definition(self, definition: Definition) -> None:
|
||||
optimized: List[Op] = []
|
||||
for node in definition.body:
|
||||
@@ -3072,6 +3131,10 @@ class Assembler:
|
||||
for defn in definitions:
|
||||
if isinstance(defn, Definition):
|
||||
self._unroll_constant_for_loops(defn)
|
||||
if self.enable_peephole_optimization:
|
||||
for defn in definitions:
|
||||
if isinstance(defn, Definition):
|
||||
self._peephole_optimize_definition(defn)
|
||||
if self.enable_constant_folding:
|
||||
for defn in definitions:
|
||||
if isinstance(defn, Definition):
|
||||
@@ -5332,6 +5395,401 @@ def _repl_build_source(
|
||||
return "\n".join(lines) + "\n"
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class DocEntry:
|
||||
name: str
|
||||
stack_effect: str
|
||||
description: str
|
||||
kind: str
|
||||
path: Path
|
||||
line: int
|
||||
|
||||
|
||||
_DOC_STACK_RE = re.compile(r"^\s*#\s*([^\s]+)\s*(.*)$")
|
||||
_DOC_WORD_RE = re.compile(r"^\s*word\s+([^\s]+)\b")
|
||||
_DOC_ASM_RE = re.compile(r"^\s*:asm\s+([^\s{]+)")
|
||||
_DOC_PY_RE = re.compile(r"^\s*:py\s+([^\s{]+)")
|
||||
|
||||
|
||||
def _extract_stack_comment(text: str) -> Optional[Tuple[str, str]]:
|
||||
match = _DOC_STACK_RE.match(text)
|
||||
if match is None:
|
||||
return None
|
||||
name = match.group(1).strip()
|
||||
tail = match.group(2).strip()
|
||||
if not name:
|
||||
return None
|
||||
if "->" not in tail:
|
||||
return None
|
||||
return name, tail
|
||||
|
||||
|
||||
def _extract_definition_name(text: str) -> Optional[Tuple[str, str]]:
|
||||
for kind, regex in (("word", _DOC_WORD_RE), ("asm", _DOC_ASM_RE), ("py", _DOC_PY_RE)):
|
||||
match = regex.match(text)
|
||||
if match is not None:
|
||||
return kind, match.group(1)
|
||||
return None
|
||||
|
||||
|
||||
def _is_doc_symbol_name(name: str, *, include_private: bool = False) -> bool:
|
||||
if not name:
|
||||
return False
|
||||
if not include_private and name.startswith("__"):
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def _collect_leading_doc_comments(lines: Sequence[str], def_index: int, name: str) -> Tuple[str, str]:
|
||||
comments: List[str] = []
|
||||
stack_effect = ""
|
||||
|
||||
idx = def_index - 1
|
||||
while idx >= 0:
|
||||
raw = lines[idx]
|
||||
stripped = raw.strip()
|
||||
if not stripped:
|
||||
break
|
||||
if not stripped.startswith("#"):
|
||||
break
|
||||
|
||||
parsed = _extract_stack_comment(raw)
|
||||
if parsed is not None:
|
||||
comment_name, effect = parsed
|
||||
if comment_name == name and not stack_effect:
|
||||
stack_effect = effect
|
||||
idx -= 1
|
||||
continue
|
||||
|
||||
text = stripped[1:].strip()
|
||||
if text:
|
||||
comments.append(text)
|
||||
idx -= 1
|
||||
|
||||
comments.reverse()
|
||||
return stack_effect, " ".join(comments)
|
||||
|
||||
|
||||
def _scan_doc_file(
|
||||
path: Path,
|
||||
*,
|
||||
include_undocumented: bool = False,
|
||||
include_private: bool = False,
|
||||
) -> List[DocEntry]:
|
||||
try:
|
||||
text = path.read_text(encoding="utf-8", errors="ignore")
|
||||
except Exception:
|
||||
return []
|
||||
|
||||
lines = text.splitlines()
|
||||
entries: List[DocEntry] = []
|
||||
defined_names: Set[str] = set()
|
||||
|
||||
for idx, line in enumerate(lines):
|
||||
parsed = _extract_definition_name(line)
|
||||
if parsed is None:
|
||||
continue
|
||||
kind, name = parsed
|
||||
if not _is_doc_symbol_name(name, include_private=include_private):
|
||||
continue
|
||||
defined_names.add(name)
|
||||
stack_effect, description = _collect_leading_doc_comments(lines, idx, name)
|
||||
if not include_undocumented and not stack_effect and not description:
|
||||
continue
|
||||
entries.append(
|
||||
DocEntry(
|
||||
name=name,
|
||||
stack_effect=stack_effect,
|
||||
description=description,
|
||||
kind=kind,
|
||||
path=path,
|
||||
line=idx + 1,
|
||||
)
|
||||
)
|
||||
|
||||
return entries
|
||||
|
||||
|
||||
def _iter_doc_files(roots: Sequence[Path], *, include_tests: bool = False) -> List[Path]:
|
||||
seen: Set[Path] = set()
|
||||
files: List[Path] = []
|
||||
skip_parts = {"build", ".git", ".venv", "raylib-5.5_linux_amd64"}
|
||||
if not include_tests:
|
||||
skip_parts.update({"tests", "extra_tests"})
|
||||
|
||||
def _should_skip(candidate: Path) -> bool:
|
||||
parts = set(candidate.parts)
|
||||
return any(part in parts for part in skip_parts)
|
||||
|
||||
for root in roots:
|
||||
resolved = root.expanduser().resolve()
|
||||
if not resolved.exists():
|
||||
continue
|
||||
if resolved.is_file() and resolved.suffix == ".sl":
|
||||
if _should_skip(resolved):
|
||||
continue
|
||||
if resolved not in seen:
|
||||
seen.add(resolved)
|
||||
files.append(resolved)
|
||||
continue
|
||||
if not resolved.is_dir():
|
||||
continue
|
||||
for path in resolved.rglob("*.sl"):
|
||||
if _should_skip(path):
|
||||
continue
|
||||
candidate = path.resolve()
|
||||
if candidate in seen:
|
||||
continue
|
||||
seen.add(candidate)
|
||||
files.append(candidate)
|
||||
files.sort()
|
||||
return files
|
||||
|
||||
|
||||
def collect_docs(
|
||||
roots: Sequence[Path],
|
||||
*,
|
||||
include_undocumented: bool = False,
|
||||
include_private: bool = False,
|
||||
include_tests: bool = False,
|
||||
) -> List[DocEntry]:
|
||||
entries: List[DocEntry] = []
|
||||
for doc_file in _iter_doc_files(roots, include_tests=include_tests):
|
||||
entries.extend(
|
||||
_scan_doc_file(
|
||||
doc_file,
|
||||
include_undocumented=include_undocumented,
|
||||
include_private=include_private,
|
||||
)
|
||||
)
|
||||
# Deduplicate by symbol name; keep first (roots/files are stable-sorted)
|
||||
dedup: Dict[str, DocEntry] = {}
|
||||
for entry in entries:
|
||||
dedup.setdefault(entry.name, entry)
|
||||
entries = list(dedup.values())
|
||||
entries.sort(key=lambda item: (item.name.lower(), str(item.path), item.line))
|
||||
return entries
|
||||
|
||||
|
||||
def _filter_docs(entries: Sequence[DocEntry], query: str) -> List[DocEntry]:
|
||||
q = query.strip().lower()
|
||||
if not q:
|
||||
return list(entries)
|
||||
|
||||
try:
|
||||
raw_terms = [term.lower() for term in shlex.split(q) if term]
|
||||
except Exception:
|
||||
raw_terms = [term.lower() for term in q.split() if term]
|
||||
terms = raw_terms
|
||||
if not terms:
|
||||
return list(entries)
|
||||
|
||||
positive_terms: List[str] = []
|
||||
negative_terms: List[str] = []
|
||||
field_terms: Dict[str, List[str]] = {"name": [], "effect": [], "desc": [], "path": [], "kind": []}
|
||||
for term in terms:
|
||||
if term.startswith("-") and len(term) > 1:
|
||||
negative_terms.append(term[1:])
|
||||
continue
|
||||
if ":" in term:
|
||||
prefix, value = term.split(":", 1)
|
||||
if prefix in field_terms and value:
|
||||
field_terms[prefix].append(value)
|
||||
continue
|
||||
positive_terms.append(term)
|
||||
|
||||
ranked: List[Tuple[int, DocEntry]] = []
|
||||
for entry in entries:
|
||||
name = entry.name.lower()
|
||||
effect = entry.stack_effect.lower()
|
||||
desc = entry.description.lower()
|
||||
path_text = entry.path.as_posix().lower()
|
||||
kind = entry.kind.lower()
|
||||
all_text = " ".join([name, effect, desc, path_text, kind])
|
||||
|
||||
if any(term in all_text for term in negative_terms):
|
||||
continue
|
||||
|
||||
if any(term not in name for term in field_terms["name"]):
|
||||
continue
|
||||
if any(term not in effect for term in field_terms["effect"]):
|
||||
continue
|
||||
if any(term not in desc for term in field_terms["desc"]):
|
||||
continue
|
||||
if any(term not in path_text for term in field_terms["path"]):
|
||||
continue
|
||||
if any(term not in kind for term in field_terms["kind"]):
|
||||
continue
|
||||
|
||||
score = 0
|
||||
matches_all = True
|
||||
for term in positive_terms:
|
||||
term_score = 0
|
||||
if name == term:
|
||||
term_score = 400
|
||||
elif name.startswith(term):
|
||||
term_score = 220
|
||||
elif term in name:
|
||||
term_score = 140
|
||||
elif term in effect:
|
||||
term_score = 100
|
||||
elif term in desc:
|
||||
term_score = 70
|
||||
elif term in path_text:
|
||||
term_score = 40
|
||||
if term_score == 0:
|
||||
matches_all = False
|
||||
break
|
||||
score += term_score
|
||||
|
||||
if not matches_all:
|
||||
continue
|
||||
if len(positive_terms) == 1 and positive_terms[0] in effect and positive_terms[0] not in name:
|
||||
score -= 5
|
||||
if field_terms["name"]:
|
||||
score += 60
|
||||
if field_terms["kind"]:
|
||||
score += 20
|
||||
ranked.append((score, entry))
|
||||
|
||||
ranked.sort(key=lambda item: (-item[0], item[1].name.lower(), str(item[1].path), item[1].line))
|
||||
return [entry for _, entry in ranked]
|
||||
|
||||
|
||||
def _run_docs_tui(entries: Sequence[DocEntry], initial_query: str = "") -> int:
|
||||
if not entries:
|
||||
print("[info] no documentation entries found")
|
||||
return 0
|
||||
|
||||
if not sys.stdin.isatty() or not sys.stdout.isatty():
|
||||
filtered = _filter_docs(entries, initial_query)
|
||||
print(f"[info] docs entries: {len(filtered)}/{len(entries)}")
|
||||
for entry in filtered[:200]:
|
||||
effect = entry.stack_effect if entry.stack_effect else "(no stack effect)"
|
||||
print(f"{entry.name:24} {effect} [{entry.path}:{entry.line}]")
|
||||
if len(filtered) > 200:
|
||||
print(f"[info] ... {len(filtered) - 200} more entries")
|
||||
return 0
|
||||
|
||||
import curses
|
||||
|
||||
def _app(stdscr: Any) -> int:
|
||||
try:
|
||||
curses.curs_set(0)
|
||||
except Exception:
|
||||
pass
|
||||
stdscr.keypad(True)
|
||||
|
||||
query = initial_query
|
||||
selected = 0
|
||||
scroll = 0
|
||||
|
||||
while True:
|
||||
filtered = _filter_docs(entries, query)
|
||||
if selected >= len(filtered):
|
||||
selected = max(0, len(filtered) - 1)
|
||||
|
||||
height, width = stdscr.getmaxyx()
|
||||
list_height = max(1, height - 4)
|
||||
if selected < scroll:
|
||||
scroll = selected
|
||||
if selected >= scroll + list_height:
|
||||
scroll = selected - list_height + 1
|
||||
max_scroll = max(0, len(filtered) - list_height)
|
||||
if scroll > max_scroll:
|
||||
scroll = max_scroll
|
||||
|
||||
stdscr.erase()
|
||||
header = f"L2 docs {len(filtered)}/{len(entries)} filter: {query}"
|
||||
stdscr.addnstr(0, 0, header, max(1, width - 1), curses.A_BOLD)
|
||||
stdscr.addnstr(
|
||||
1,
|
||||
0,
|
||||
"Type to search · name:/effect:/desc:/path:/kind: · -term excludes · Up/Down j/k · PgUp/PgDn · q/Esc",
|
||||
max(1, width - 1),
|
||||
0,
|
||||
)
|
||||
|
||||
for row in range(list_height):
|
||||
idx = scroll + row
|
||||
if idx >= len(filtered):
|
||||
break
|
||||
entry = filtered[idx]
|
||||
effect = entry.stack_effect if entry.stack_effect else "(no stack effect)"
|
||||
line = f"{entry.name:24} {effect}"
|
||||
attr = curses.A_REVERSE if idx == selected else 0
|
||||
stdscr.addnstr(2 + row, 0, line, max(1, width - 1), attr)
|
||||
|
||||
if filtered:
|
||||
current = filtered[selected]
|
||||
detail = f"{current.path}:{current.line} [{current.kind}]"
|
||||
if current.description:
|
||||
detail += f" {current.description}"
|
||||
stdscr.addnstr(height - 1, 0, detail, max(1, width - 1), 0)
|
||||
else:
|
||||
stdscr.addnstr(height - 1, 0, "No matches", max(1, width - 1), 0)
|
||||
|
||||
stdscr.refresh()
|
||||
key = stdscr.getch()
|
||||
|
||||
if key in (27, ord("q")):
|
||||
return 0
|
||||
if key in (curses.KEY_UP, ord("k")):
|
||||
if selected > 0:
|
||||
selected -= 1
|
||||
continue
|
||||
if key in (curses.KEY_DOWN, ord("j")):
|
||||
if selected + 1 < len(filtered):
|
||||
selected += 1
|
||||
continue
|
||||
if key == curses.KEY_PPAGE:
|
||||
selected = max(0, selected - list_height)
|
||||
continue
|
||||
if key == curses.KEY_NPAGE:
|
||||
selected = min(max(0, len(filtered) - 1), selected + list_height)
|
||||
continue
|
||||
if key in (curses.KEY_BACKSPACE, 127, 8):
|
||||
query = query[:-1]
|
||||
selected = 0
|
||||
scroll = 0
|
||||
continue
|
||||
if 32 <= key <= 126:
|
||||
query += chr(key)
|
||||
selected = 0
|
||||
scroll = 0
|
||||
continue
|
||||
|
||||
return 0
|
||||
|
||||
return int(curses.wrapper(_app))
|
||||
|
||||
|
||||
def run_docs_explorer(
|
||||
*,
|
||||
source: Optional[Path],
|
||||
include_paths: Sequence[Path],
|
||||
explicit_roots: Sequence[Path],
|
||||
initial_query: str,
|
||||
include_undocumented: bool = False,
|
||||
include_private: bool = False,
|
||||
include_tests: bool = False,
|
||||
) -> int:
|
||||
roots: List[Path] = [Path("."), Path("./stdlib"), Path("./libs")]
|
||||
roots.extend(include_paths)
|
||||
roots.extend(explicit_roots)
|
||||
if source is not None:
|
||||
roots.append(source.parent)
|
||||
roots.append(source)
|
||||
|
||||
entries = collect_docs(
|
||||
roots,
|
||||
include_undocumented=include_undocumented,
|
||||
include_private=include_private,
|
||||
include_tests=include_tests,
|
||||
)
|
||||
return _run_docs_tui(entries, initial_query=initial_query)
|
||||
|
||||
|
||||
def cli(argv: Sequence[str]) -> int:
|
||||
parser = argparse.ArgumentParser(description="L2 compiler driver")
|
||||
parser.add_argument("source", type=Path, nargs="?", default=None, help="input .sl file (optional when --clean is used)")
|
||||
@@ -5355,8 +5813,32 @@ def cli(argv: Sequence[str]) -> int:
|
||||
parser.add_argument("--repl", action="store_true", help="interactive REPL; source file is optional")
|
||||
parser.add_argument("-l", dest="libs", action="append", default=[], help="pass library to linker (e.g. -l m or -l libc.so.6)")
|
||||
parser.add_argument("--no-folding", action="store_true", help="disable constant folding optimization")
|
||||
parser.add_argument("--no-peephole", action="store_true", help="disable peephole optimizations")
|
||||
parser.add_argument("--ct-run-main", action="store_true", help="execute 'main' via the compile-time VM after parsing")
|
||||
parser.add_argument("--no-artifact", action="store_true", help="compile source but skip producing final output artifact")
|
||||
parser.add_argument("--docs", action="store_true", help="open searchable TUI for word/function documentation")
|
||||
parser.add_argument(
|
||||
"--docs-root",
|
||||
action="append",
|
||||
default=[],
|
||||
type=Path,
|
||||
help="extra file/directory root to scan for docs (repeatable)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--docs-query",
|
||||
default="",
|
||||
help="initial filter query for --docs mode",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--docs-all",
|
||||
action="store_true",
|
||||
help="include undocumented and private symbols in docs index",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--docs-include-tests",
|
||||
action="store_true",
|
||||
help="include tests/extra_tests in docs index",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--script",
|
||||
action="store_true",
|
||||
@@ -5383,6 +5865,7 @@ def cli(argv: Sequence[str]) -> int:
|
||||
|
||||
artifact_kind = args.artifact
|
||||
folding_enabled = not args.no_folding
|
||||
peephole_enabled = not args.no_peephole
|
||||
|
||||
if args.ct_run_main and artifact_kind != "exe":
|
||||
parser.error("--ct-run-main requires --artifact exe")
|
||||
@@ -5405,6 +5888,17 @@ def cli(argv: Sequence[str]) -> int:
|
||||
return 1
|
||||
return 0
|
||||
|
||||
if args.docs:
|
||||
return run_docs_explorer(
|
||||
source=args.source,
|
||||
include_paths=args.include_paths,
|
||||
explicit_roots=args.docs_root,
|
||||
initial_query=str(args.docs_query or ""),
|
||||
include_undocumented=args.docs_all,
|
||||
include_private=args.docs_all,
|
||||
include_tests=args.docs_include_tests,
|
||||
)
|
||||
|
||||
if args.source is None and not args.repl:
|
||||
parser.error("the following arguments are required: source")
|
||||
|
||||
@@ -5423,6 +5917,7 @@ def cli(argv: Sequence[str]) -> int:
|
||||
|
||||
compiler = Compiler(include_paths=[Path("."), Path("./stdlib"), *args.include_paths])
|
||||
compiler.assembler.enable_constant_folding = folding_enabled
|
||||
compiler.assembler.enable_peephole_optimization = peephole_enabled
|
||||
try:
|
||||
if args.repl:
|
||||
return run_repl(compiler, args.temp_dir, args.libs, debug=args.debug, initial_source=args.source)
|
||||
|
||||
@@ -44,7 +44,8 @@ word arr_free
|
||||
dup arr_cap 8 * 24 + free
|
||||
end
|
||||
|
||||
# Helper: copy n qwords from src to dst [*, dst, src | n] -> [*]
|
||||
# Helper: copy n qwords from src to dst
|
||||
#arr_copy_elements [*, dst, src | n] -> [*]
|
||||
word arr_copy_elements
|
||||
while dup 0 > do
|
||||
over @ 3 pick swap ! # dst = *src
|
||||
|
||||
@@ -1,13 +1,11 @@
|
||||
import stdlib.sl
|
||||
import io.sl
|
||||
|
||||
#dump [* | n] -> [*]
|
||||
|
||||
# dump takes the firts element from the stack
|
||||
# and prints that much consequent elements
|
||||
# from the stack while not modifying it
|
||||
# all variations have the same stack effect
|
||||
|
||||
#dump [* | n] -> [*]
|
||||
word dump
|
||||
1 swap
|
||||
for
|
||||
@@ -18,6 +16,7 @@ word dump
|
||||
drop
|
||||
end
|
||||
|
||||
#rdump [* | n] -> [*]
|
||||
# dump return stack
|
||||
word rdump
|
||||
1 swap
|
||||
@@ -29,6 +28,8 @@ word rdump
|
||||
drop
|
||||
end
|
||||
|
||||
#fdump [* | n] -> [*]
|
||||
#dump the stack with additional formatting
|
||||
word fdump
|
||||
"[*, " write_buf
|
||||
1 swap 1 +
|
||||
@@ -44,6 +45,8 @@ word fdump
|
||||
"]\n" write_buf
|
||||
end
|
||||
|
||||
#frdump [* | n] -> [*]
|
||||
#dump the return stack with additional formatting
|
||||
word frdump
|
||||
"[*, " write_buf
|
||||
1 swap 1 -
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
# L2 Floating Point Library (Double Precision)
|
||||
|
||||
# Arithmetic
|
||||
#f+ [*, x1 | x2] -> [* | x3]
|
||||
:asm f+ {
|
||||
movq xmm0, [r12]
|
||||
add r12, 8
|
||||
@@ -9,6 +10,7 @@
|
||||
movq [r12], xmm1
|
||||
} ;
|
||||
|
||||
#f- [*, x1 | x2] -> [* | x3]
|
||||
:asm f- {
|
||||
movq xmm0, [r12]
|
||||
add r12, 8
|
||||
@@ -17,6 +19,7 @@
|
||||
movq [r12], xmm1
|
||||
} ;
|
||||
|
||||
#f* [*, x1 | x2] -> [* | x3]
|
||||
:asm f* {
|
||||
movq xmm0, [r12]
|
||||
add r12, 8
|
||||
@@ -25,6 +28,7 @@
|
||||
movq [r12], xmm1
|
||||
} ;
|
||||
|
||||
#f/ [*, x1 | x2] -> [* | x3]
|
||||
:asm f/ {
|
||||
movq xmm0, [r12]
|
||||
add r12, 8
|
||||
@@ -33,6 +37,7 @@
|
||||
movq [r12], xmm1
|
||||
} ;
|
||||
|
||||
#fneg [* | x] -> [* | -x]
|
||||
:asm fneg {
|
||||
movq xmm0, [r12]
|
||||
mov rax, 0x8000000000000000
|
||||
@@ -42,6 +47,7 @@
|
||||
} ;
|
||||
|
||||
# Comparison
|
||||
#f== [*, x1 | x2] -> [* | flag]
|
||||
:asm f== {
|
||||
movq xmm0, [r12]
|
||||
add r12, 8
|
||||
@@ -52,6 +58,7 @@
|
||||
mov [r12], rax
|
||||
} ;
|
||||
|
||||
#f< [*, x1 | x2] -> [* | flag]
|
||||
:asm f< {
|
||||
movq xmm0, [r12] ; a
|
||||
add r12, 8
|
||||
@@ -62,6 +69,7 @@
|
||||
mov [r12], rax
|
||||
} ;
|
||||
|
||||
#f> [*, x1 | x2] -> [* | flag]
|
||||
:asm f> {
|
||||
movq xmm0, [r12] ; a
|
||||
add r12, 8
|
||||
@@ -73,25 +81,30 @@
|
||||
} ;
|
||||
|
||||
# Conversion
|
||||
#int>float [* | x] -> [* | xf]
|
||||
:asm int>float {
|
||||
cvtsi2sd xmm0, [r12]
|
||||
movq [r12], xmm0
|
||||
} ;
|
||||
|
||||
#float>int [* | xf] -> [* | x]
|
||||
:asm float>int {
|
||||
cvttsd2si rax, [r12]
|
||||
mov [r12], rax
|
||||
} ;
|
||||
|
||||
# Output
|
||||
# extern declarations are required for runtime linking
|
||||
extern int printf(char* fmt, double x)
|
||||
extern int fflush(void* stream)
|
||||
|
||||
#fput [* | xf] -> [*]
|
||||
word fput
|
||||
"%f" drop swap printf drop
|
||||
0 fflush drop
|
||||
end
|
||||
|
||||
#fputln [* | xf] -> [*]
|
||||
word fputln
|
||||
"%f\n" drop swap printf drop
|
||||
0 fflush drop
|
||||
|
||||
@@ -1,9 +1,8 @@
|
||||
# L2 IO Primitives
|
||||
|
||||
#read_file [*, path_addr | path_len] -> [*, addr | len] || [*, tag | neg_errno]
|
||||
#Reads the file at the given path (pointer+length, not null-terminated),
|
||||
#returns (addr len) of mapped file, or (tag neg_errno) on error.
|
||||
|
||||
#read_file [*, path_addr | path_len] -> [*, addr | len] || [*, tag | neg_errno]
|
||||
:asm read_file {
|
||||
; stack: path_addr (NOS), path_len (TOS)
|
||||
mov rdx, [r12] ; path_len
|
||||
@@ -191,6 +190,7 @@
|
||||
}
|
||||
;
|
||||
|
||||
#print [* | x] -> [*]
|
||||
:asm print (effects string-io) {
|
||||
mov rax, [r12] ; len or int value
|
||||
mov rbx, [r12 + 8] ; possible address
|
||||
@@ -353,8 +353,11 @@
|
||||
}
|
||||
;
|
||||
|
||||
#cr [*] -> [*]
|
||||
inline word cr 10 putc end
|
||||
|
||||
#puts [* | x] -> [*]
|
||||
inline word puts write_buf cr end
|
||||
|
||||
#eputs [* | x] -> [*]
|
||||
inline word eputs ewrite_buf cr end
|
||||
|
||||
@@ -213,12 +213,11 @@ end
|
||||
# Multi-substitution format words
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
#formats [*, sN_addr, sN_len, ..., s1_addr, s1_len, fmt_addr | fmt_len]
|
||||
# -> [*, result_addr | result_len]
|
||||
# Replaces each '%' in fmt with the corresponding string argument.
|
||||
# s1 (just below fmt) maps to the first '%', s2 to the second, etc.
|
||||
# The number of string-pair args must equal the number of '%' markers.
|
||||
# Returns a newly allocated string, or fmt as-is when there are no '%'.
|
||||
#formats [*, sN_addr, sN_len, ..., s1_addr, s1_len, fmt_addr | fmt_len] -> [*, result_addr | result_len]
|
||||
word formats
|
||||
2dup 37 count_char_in_str nip nip # count '%' -> n
|
||||
dup 0 == if
|
||||
@@ -236,12 +235,12 @@ word formats
|
||||
end
|
||||
end
|
||||
|
||||
#formati [*, iN, ..., i1, fmt_addr | fmt_len]
|
||||
# -> [*, result_addr | result_len]
|
||||
|
||||
# Replaces each '%' in fmt with the decimal string of the corresponding
|
||||
# integer argument. i1 (just below fmt) maps to the first '%', etc.
|
||||
# The number of int args must equal the number of '%' markers.
|
||||
# Returns a newly allocated string, or fmt as-is when there are no '%'.
|
||||
#formati [*, iN, ..., i1, fmt_addr | fmt_len] -> [*, result_addr | result_len]
|
||||
word formati
|
||||
2dup 37 count_char_in_str nip nip # count '%' -> n
|
||||
dup 0 == if
|
||||
|
||||
Reference in New Issue
Block a user