diff --git a/.github/workflows/docs.yaml b/.github/workflows/docs.yaml index 22590f0..689af14 100644 --- a/.github/workflows/docs.yaml +++ b/.github/workflows/docs.yaml @@ -24,7 +24,9 @@ jobs: pip install mkdocs - name: Build MkDocs site - run: mkdocs build --clean + run: | + mkdocs build --clean + echo 'docs.metaprompt-lang.org' > ./site/CNAME - name: Deploy to GitHub Pages uses: peaceiris/actions-gh-pages@v3 diff --git a/README.md b/README.md index cc97410..4238983 100644 --- a/README.md +++ b/README.md @@ -56,7 +56,6 @@ This is an early work-in-progress. Follow [me on twitter](https://x.com/klntsky) - [x] `[:if ... :then ... :else ...]` - [x] `[$ meta-prompt]` - [x] `[:use module :param1=value1]` - - [ ] `[:model model-id ...]` for dynamic model selection - [x] `[# comments]` - [ ] `[:status some-status]` - to show during prompt evaluation - [ ] `[:call ffi-function :param1=foo :param2=bar]` @@ -69,28 +68,53 @@ This is an early work-in-progress. Follow [me on twitter](https://x.com/klntsky) - [x] OpenAI - [ ] Anthropic - [ ] llama - - [ ] dynamic model switching - [ ] Runtime system - [x] Support variable definition at runtime + - [x] dynamic model switching (via `MODEL` variable - [example](./python/examples/model-change.metaprompt)) - [ ] exceptions - [ ] throwing exceptions - [ ] recovering from exceptions + - [ ] LLM output validation? + - [ ] via regexps? + - [ ] via parsing? - [ ] FFI - - [ ] syntax + - [ ] syntax - preferably via `[:use @ffi-function :param1=foo :param2=bar]` + - [ ] how to throw exceptions from FFI - [ ] API - [ ] standard library + - [ ] text processing + - [ ] shell access + - [ ] running executables + - [ ] file system access + - [ ] isolation? + - [ ] HTTP stack - Utils - - [x] Unbound variable auto discovery to turn metaprompts into interfaces + - [x] Unbound variable auto discovery + - [ ] Machinery to turn metaprompts into interfaces (parameters become form fields) + - [ ] static validation? - [ ] Add function definitions - [ ] enable function scopes - [ ] Add a module system - [x] syntax - - [x] runtime + - [x] module loading at runtime + - [ ] preload modules on startup - is needed? + - [ ] module caching - [ ] tests - [ ] Add a package system - [ ] specify package format - [ ] create a package registry - - [ ] package installer + - [ ] on-the-fly package installer + +## Architecture decisions + +- functions, files, and modules are essentially the same - invoked with `[:use ...]` +- metaprompt parameters are just variables that are not bound before first use - this and the above decision allow to get rid of function syntax entirely + +### To consider + +- dynamic module loading vs. static module loading: dynamic is lazy, so skips unneeded modules, but static loading guarantees absence of runtime errors due to module resultion failures (which saves costs) +- exception system. how to pass payloads with exceptions +- turning exceptions into continuations in spirit of [hurl](https://hurl.wtf) ## Notable sources of inspiration diff --git a/python/src/loader.py b/python/src/loader.py index 7567c4b..e2d9863 100644 --- a/python/src/loader.py +++ b/python/src/loader.py @@ -4,6 +4,10 @@ def _discover_variables(ast): yield from _discover_variables(node) elif isinstance(ast, dict): if "type" in ast: + # TODO: evaluate both :if branches in parallel, to cover this case: + # [:if foo :then [:bar=baz] :else [:bar]] + # -- [:bar] should be unbound here, because it is unbound in the + # first branch if ast["type"] == "comment": return elif ast["type"] == "var": diff --git a/python/src/main.py b/python/src/main.py index b3b9929..7e2d668 100644 --- a/python/src/main.py +++ b/python/src/main.py @@ -37,10 +37,14 @@ def parse_arguments(): "--model", type=str, help="LLM id to use", - default="interactive" # TODO: use dynamic model selection + default="interactive", # TODO: use dynamic model selection ) - parser.add_argument("--list-models", action="store_true", help="List available LLMs for use with --model, based on the available LLM providers") + parser.add_argument( + "--list-models", + action="store_true", + help="List available LLMs for use with --model, based on the available LLM providers", + ) parser.add_argument( "--set", @@ -54,17 +58,22 @@ def parse_arguments(): return parser.parse_args() +def print_models(config): + print("Available models:") + print() + print("\n".join(["- " + key for key in sorted(config.providers)])) + print() + print("Use --model to specify the default model.") + + async def main(): args = parse_arguments() config = load_config() if args.list_models: - print("Available models:") - print() - print("\n".join(["- " + key for key in config.providers])) - print() - print("Use --model to specify") + print_models(config) return + config.parameters = dict(args.variables or {}) for file_path in args.INPUT_FILES: if os.path.isfile(file_path): diff --git a/python/src/providers/openai.py b/python/src/providers/openai.py index aabfdff..5957c1e 100644 --- a/python/src/providers/openai.py +++ b/python/src/providers/openai.py @@ -14,7 +14,7 @@ def __init__(self, api_key: str = None, models=None, *args, **kwargs): openai.api_key = api_key or os.getenv("OPENAI_API_KEY") models = models or [ model.id for model in openai.models.list().data - if "gpt" in model.id + if "gpt" in model.id or "o1" in model.id ] for model_name in models: self.add(