mirror of
https://github.com/thinking-machines-lab/tinker.git
synced 2026-04-19 12:58:01 +00:00
Sync contents
This commit is contained in:
parent
3e4e4e3560
commit
951d660110
32 changed files with 3895 additions and 635 deletions
|
|
@ -1,22 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
if [ -f "Brewfile" ] && [ "$(uname -s)" = "Darwin" ] && [ "$SKIP_BREW" != "1" ]; then
|
||||
brew bundle check >/dev/null 2>&1 || {
|
||||
echo "==> Installing Homebrew dependencies…"
|
||||
brew bundle
|
||||
}
|
||||
fi
|
||||
|
||||
echo "==> Installing Python…"
|
||||
uv python install
|
||||
|
||||
echo "==> Installing Python dependencies…"
|
||||
uv sync --all-extras
|
||||
|
||||
echo "==> Exporting Python dependencies…"
|
||||
# note: `--no-hashes` is required because of https://github.com/pypa/pip/issues/4995
|
||||
uv export -o requirements-dev.lock --no-hashes
|
||||
|
|
@ -1,14 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
echo "==> Running ruff"
|
||||
uv run ruff format
|
||||
uv run ruff check --fix .
|
||||
# run formatting again to fix any inconsistencies when imports are stripped
|
||||
uv run ruff format
|
||||
|
||||
echo "==> Formatting docs"
|
||||
uv run python scripts/utils/ruffen-docs.py README.md api.md
|
||||
231
scripts/generate_docs.py
Executable file
231
scripts/generate_docs.py
Executable file
|
|
@ -0,0 +1,231 @@
|
|||
#!/usr/bin/env python3
|
||||
# /// script
|
||||
# dependencies = [
|
||||
# "pydoc-markdown>=4.8.0",
|
||||
# "pyyaml>=6.0",
|
||||
# ]
|
||||
# ///
|
||||
|
||||
|
||||
import ast
|
||||
import json
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import Dict, List
|
||||
|
||||
import yaml
|
||||
|
||||
|
||||
def cd_to_project_root():
|
||||
"""Change to the project root (parent of the scripts directory)."""
|
||||
script_dir = Path(__file__).resolve().parent
|
||||
project_root = script_dir.parent
|
||||
os.chdir(project_root)
|
||||
print(f"Changed to project root: {project_root}")
|
||||
|
||||
|
||||
class ModuleAnalyzer:
|
||||
"""Analyze Python modules to extract public API information."""
|
||||
|
||||
def __init__(self, src_path: Path):
|
||||
self.src_path = src_path
|
||||
|
||||
def get_module_exports(self, module_path: Path) -> List[str]:
|
||||
"""Extract __all__ exports from a module."""
|
||||
try:
|
||||
content = module_path.read_text()
|
||||
tree = ast.parse(content)
|
||||
|
||||
for node in ast.walk(tree):
|
||||
if isinstance(node, ast.Assign):
|
||||
for target in node.targets:
|
||||
if isinstance(target, ast.Name) and target.id == '__all__':
|
||||
if isinstance(node.value, ast.List):
|
||||
return [
|
||||
elt.s for elt in node.value.elts
|
||||
if isinstance(elt, ast.Str)
|
||||
] or [
|
||||
elt.value for elt in node.value.elts
|
||||
if isinstance(elt, ast.Constant) and isinstance(elt.value, str)
|
||||
]
|
||||
except Exception as e:
|
||||
print(f"Warning: Could not parse {module_path}: {e}")
|
||||
return []
|
||||
|
||||
def find_all_modules(self) -> Dict[str, Path]:
|
||||
"""Find all Python modules in the package."""
|
||||
modules = {}
|
||||
tinker_path = self.src_path / "tinker"
|
||||
|
||||
for py_file in tinker_path.rglob("*.py"):
|
||||
# Skip test files and private modules
|
||||
if any(part.startswith('test') or part.startswith('_test') for part in py_file.parts):
|
||||
continue
|
||||
if '__pycache__' in py_file.parts:
|
||||
continue
|
||||
|
||||
# Calculate module name
|
||||
relative_path = py_file.relative_to(self.src_path)
|
||||
module_parts = list(relative_path.parts[:-1]) # Remove .py file
|
||||
module_parts.append(relative_path.stem)
|
||||
|
||||
# Skip __init__ files in module name
|
||||
if module_parts[-1] == '__init__':
|
||||
module_parts = module_parts[:-1]
|
||||
|
||||
module_name = '.'.join(module_parts)
|
||||
if module_name: # Skip empty module names
|
||||
modules[module_name] = py_file
|
||||
|
||||
return modules
|
||||
|
||||
|
||||
class DocumentationGenerator:
|
||||
"""Generate documentation using pydoc-markdown."""
|
||||
|
||||
def __init__(self, config_path: Path, output_dir: Path):
|
||||
self.config_path = config_path
|
||||
self.output_dir = output_dir
|
||||
self.output_dir.mkdir(parents=True, exist_ok=True)
|
||||
self.analyzer = ModuleAnalyzer(Path('src'))
|
||||
|
||||
def run_pydoc_markdown(self, modules: List[str], output_file: Path) -> bool:
|
||||
"""Run pydoc-markdown for specific modules."""
|
||||
try:
|
||||
# Build the command
|
||||
cmd = ['pydoc-markdown', 'pydoc-markdown.yml', '-I', 'src']
|
||||
|
||||
# Add modules
|
||||
for module in modules:
|
||||
cmd.extend(['-m', module])
|
||||
|
||||
# Run pydoc-markdown
|
||||
result = subprocess.run(
|
||||
cmd,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
check=False
|
||||
)
|
||||
|
||||
if result.returncode == 0:
|
||||
# Write output to file
|
||||
output_file.parent.mkdir(parents=True, exist_ok=True)
|
||||
output_file.write_text(result.stdout)
|
||||
print(f"Generated: {output_file}")
|
||||
return True
|
||||
else:
|
||||
print(f"Error generating {output_file}: {result.stderr}")
|
||||
return False
|
||||
|
||||
except Exception as e:
|
||||
print(f"Exception generating {output_file}: {e}")
|
||||
return False
|
||||
|
||||
def generate_public_interfaces(self):
|
||||
"""Generate documentation for public interface classes."""
|
||||
print("\n=== Generating Public Interfaces Documentation ===")
|
||||
|
||||
# Generate individual pages for each client
|
||||
client_modules = [
|
||||
('ServiceClient', 'tinker.lib.public_interfaces.service_client'),
|
||||
('TrainingClient', 'tinker.lib.public_interfaces.training_client'),
|
||||
('SamplingClient', 'tinker.lib.public_interfaces.sampling_client'),
|
||||
('RestClient', 'tinker.lib.public_interfaces.rest_client'),
|
||||
('APIFuture', 'tinker.lib.public_interfaces.api_future'),
|
||||
]
|
||||
|
||||
for class_name, module in client_modules:
|
||||
output_file = self.output_dir / f'{class_name.lower().replace("_", "-")}.md'
|
||||
self.run_pydoc_markdown([module], output_file)
|
||||
|
||||
def generate_all_types(self):
|
||||
"""Generate complete types reference."""
|
||||
print("\n=== Generating Complete Types Reference ===")
|
||||
|
||||
# Get all type modules
|
||||
all_modules = self.analyzer.find_all_modules()
|
||||
type_modules = [m for m in all_modules.keys() if m.startswith('tinker.types')]
|
||||
|
||||
if type_modules:
|
||||
output_file = self.output_dir / 'types.md'
|
||||
self.run_pydoc_markdown(type_modules, output_file)
|
||||
|
||||
def generate_exceptions(self):
|
||||
"""Generate exception hierarchy documentation."""
|
||||
print("\n=== Generating Exception Documentation ===")
|
||||
|
||||
output_file = self.output_dir / 'exceptions.md'
|
||||
self.run_pydoc_markdown(['tinker._exceptions'], output_file)
|
||||
|
||||
def generate_nextra_meta(self):
|
||||
"""Generate _meta.json for Nextra navigation."""
|
||||
print("\n=== Generating Nextra Navigation Metadata ===")
|
||||
|
||||
meta = {
|
||||
"serviceclient": "ServiceClient",
|
||||
"trainingclient": "TrainingClient",
|
||||
"samplingclient": "SamplingClient",
|
||||
"restclient": "RestClient",
|
||||
"apifuture": "APIFuture",
|
||||
"types": "Parameters",
|
||||
"exceptions": "Exceptions"
|
||||
}
|
||||
|
||||
meta_file = self.output_dir / '_meta.json'
|
||||
meta_file.write_text(json.dumps(meta, indent=2))
|
||||
print(f"Generated: {meta_file}")
|
||||
|
||||
def generate_all(self):
|
||||
"""Generate all documentation."""
|
||||
print("Starting documentation generation...")
|
||||
print(f"Output directory: {self.output_dir}")
|
||||
|
||||
# Generate documentation for each category
|
||||
self.generate_public_interfaces()
|
||||
self.generate_all_types()
|
||||
self.generate_exceptions()
|
||||
|
||||
# Generate Nextra metadata
|
||||
self.generate_nextra_meta()
|
||||
|
||||
print("\n=== Documentation Generation Complete ===")
|
||||
print(f"Markdown files generated in: {self.output_dir}")
|
||||
print("\nGenerated files:")
|
||||
for file in sorted(self.output_dir.rglob("*.md")):
|
||||
print(f" - {file.relative_to(self.output_dir)}")
|
||||
|
||||
|
||||
def main():
|
||||
"""Main entry point."""
|
||||
# Change to project root first
|
||||
cd_to_project_root()
|
||||
|
||||
# Paths
|
||||
project_root = Path.cwd()
|
||||
config_path = project_root / 'pydoc-markdown.yml'
|
||||
output_dir = project_root / 'docs' / 'api'
|
||||
|
||||
# Check if config exists
|
||||
if not config_path.exists():
|
||||
print(f"Error: Configuration file not found at {config_path}")
|
||||
print("Please run this script from the project root directory")
|
||||
sys.exit(1)
|
||||
|
||||
# Create generator and run
|
||||
generator = DocumentationGenerator(config_path, output_dir)
|
||||
generator.generate_all()
|
||||
|
||||
# Print usage instructions
|
||||
print("\n" + "=" * 50)
|
||||
print("To use these docs in your Nextra project:")
|
||||
print("1. Copy the docs/api directory to your Nextra project")
|
||||
print("2. The markdown files are ready to use with Nextra")
|
||||
print("3. Navigation structure is defined in _meta.json")
|
||||
print("\nTo regenerate docs after code changes:")
|
||||
print(" uv run scripts/generate_docs.py")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
17
scripts/lint
17
scripts/lint
|
|
@ -1,17 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
echo "==> Running ruff"
|
||||
uv run ruff check .
|
||||
|
||||
echo "==> Running pyright"
|
||||
uv run pyright --threads 64
|
||||
|
||||
echo "==> Running mypy"
|
||||
uv run mypy .
|
||||
|
||||
echo "==> Making sure it imports"
|
||||
uv run python -c 'import tinker'
|
||||
41
scripts/mock
41
scripts/mock
|
|
@ -1,41 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
if [[ -n "$1" && "$1" != '--'* ]]; then
|
||||
URL="$1"
|
||||
shift
|
||||
else
|
||||
URL="$(grep 'openapi_spec_url' .stats.yml | cut -d' ' -f2)"
|
||||
fi
|
||||
|
||||
# Check if the URL is empty
|
||||
if [ -z "$URL" ]; then
|
||||
echo "Error: No OpenAPI spec path/url provided or found in .stats.yml"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "==> Starting mock server with URL ${URL}"
|
||||
|
||||
# Run prism mock on the given spec
|
||||
if [ "$1" == "--daemon" ]; then
|
||||
npm exec --package=@stainless-api/prism-cli@5.15.0 -- prism mock "$URL" &>.prism.log &
|
||||
|
||||
# Wait for server to come online
|
||||
echo -n "Waiting for server"
|
||||
while ! grep -q "✖ fatal\|Prism is listening" ".prism.log"; do
|
||||
echo -n "."
|
||||
sleep 0.1
|
||||
done
|
||||
|
||||
if grep -q "✖ fatal" ".prism.log"; then
|
||||
cat .prism.log
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo
|
||||
else
|
||||
npm exec --package=@stainless-api/prism-cli@5.15.0 -- prism mock "$URL"
|
||||
fi
|
||||
7
scripts/publish-pypi
Normal file
7
scripts/publish-pypi
Normal file
|
|
@ -0,0 +1,7 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
set -eux
|
||||
rm -rf dist
|
||||
mkdir -p dist
|
||||
uv build
|
||||
uv publish --token=$PYPI_TOKEN
|
||||
77
scripts/test
77
scripts/test
|
|
@ -1,77 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[0;33m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
function prism_is_running() {
|
||||
curl --silent "http://localhost:4010" >/dev/null 2>&1
|
||||
}
|
||||
|
||||
kill_server_on_port() {
|
||||
pids=$(lsof -t -i tcp:"$1" || echo "")
|
||||
if [ "$pids" != "" ]; then
|
||||
kill "$pids"
|
||||
echo "Stopped $pids."
|
||||
fi
|
||||
}
|
||||
|
||||
function is_overriding_api_base_url() {
|
||||
[ -n "$TEST_API_BASE_URL" ]
|
||||
}
|
||||
|
||||
if ! is_overriding_api_base_url && ! prism_is_running; then
|
||||
# When we exit this script, make sure to kill the background mock server process
|
||||
trap 'kill_server_on_port 4010' EXIT
|
||||
|
||||
# Start the dev server
|
||||
./scripts/mock --daemon
|
||||
fi
|
||||
|
||||
if is_overriding_api_base_url; then
|
||||
echo -e "${GREEN}✔ Running tests against ${TEST_API_BASE_URL}${NC}"
|
||||
echo
|
||||
elif ! prism_is_running; then
|
||||
echo -e "${RED}ERROR:${NC} The test suite will not run without a mock Prism server"
|
||||
echo -e "running against your OpenAPI spec."
|
||||
echo
|
||||
echo -e "To run the server, pass in the path or url of your OpenAPI"
|
||||
echo -e "spec to the prism command:"
|
||||
echo
|
||||
echo -e " \$ ${YELLOW}npm exec --package=@stainless-api/prism-cli@5.15.0 -- prism mock path/to/your.openapi.yml${NC}"
|
||||
echo
|
||||
|
||||
exit 1
|
||||
else
|
||||
echo -e "${GREEN}✔ Mock prism server is running with your OpenAPI spec${NC}"
|
||||
echo
|
||||
fi
|
||||
|
||||
export DEFER_PYDANTIC_BUILD=false
|
||||
|
||||
function run_tests() {
|
||||
echo "==> Running tests with Pydantic v2"
|
||||
uv run --all-extras --all-groups pytest "$@"
|
||||
|
||||
echo "==> Running tests with Pydantic v1"
|
||||
uv pip install 'pydantic<2'
|
||||
uv run --all-extras --all-groups pytest "$@"
|
||||
}
|
||||
|
||||
# If UV_PYTHON is already set in the environment, just run the command once
|
||||
if [[ -n "$UV_PYTHON" ]]; then
|
||||
run_tests "$@"
|
||||
else
|
||||
# If UV_PYTHON is not set, run the command for min and max versions
|
||||
|
||||
echo "==> Running tests for Python 3.9"
|
||||
UV_PYTHON=3.9 run_tests "$@"
|
||||
|
||||
echo "==> Running tests for Python 3.13"
|
||||
UV_PYTHON=3.13 run_tests "$@"
|
||||
fi
|
||||
|
|
@ -1,167 +0,0 @@
|
|||
# fork of https://github.com/asottile/blacken-docs adapted for ruff
|
||||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
import sys
|
||||
import argparse
|
||||
import textwrap
|
||||
import contextlib
|
||||
import subprocess
|
||||
from typing import Match, Optional, Sequence, Generator, NamedTuple, cast
|
||||
|
||||
MD_RE = re.compile(
|
||||
r"(?P<before>^(?P<indent> *)```\s*python\n)" r"(?P<code>.*?)" r"(?P<after>^(?P=indent)```\s*$)",
|
||||
re.DOTALL | re.MULTILINE,
|
||||
)
|
||||
MD_PYCON_RE = re.compile(
|
||||
r"(?P<before>^(?P<indent> *)```\s*pycon\n)" r"(?P<code>.*?)" r"(?P<after>^(?P=indent)```.*$)",
|
||||
re.DOTALL | re.MULTILINE,
|
||||
)
|
||||
PYCON_PREFIX = ">>> "
|
||||
PYCON_CONTINUATION_PREFIX = "..."
|
||||
PYCON_CONTINUATION_RE = re.compile(
|
||||
rf"^{re.escape(PYCON_CONTINUATION_PREFIX)}( |$)",
|
||||
)
|
||||
DEFAULT_LINE_LENGTH = 100
|
||||
|
||||
|
||||
class CodeBlockError(NamedTuple):
|
||||
offset: int
|
||||
exc: Exception
|
||||
|
||||
|
||||
def format_str(
|
||||
src: str,
|
||||
) -> tuple[str, Sequence[CodeBlockError]]:
|
||||
errors: list[CodeBlockError] = []
|
||||
|
||||
@contextlib.contextmanager
|
||||
def _collect_error(match: Match[str]) -> Generator[None, None, None]:
|
||||
try:
|
||||
yield
|
||||
except Exception as e:
|
||||
errors.append(CodeBlockError(match.start(), e))
|
||||
|
||||
def _md_match(match: Match[str]) -> str:
|
||||
code = textwrap.dedent(match["code"])
|
||||
with _collect_error(match):
|
||||
code = format_code_block(code)
|
||||
code = textwrap.indent(code, match["indent"])
|
||||
return f"{match['before']}{code}{match['after']}"
|
||||
|
||||
def _pycon_match(match: Match[str]) -> str:
|
||||
code = ""
|
||||
fragment = cast(Optional[str], None)
|
||||
|
||||
def finish_fragment() -> None:
|
||||
nonlocal code
|
||||
nonlocal fragment
|
||||
|
||||
if fragment is not None:
|
||||
with _collect_error(match):
|
||||
fragment = format_code_block(fragment)
|
||||
fragment_lines = fragment.splitlines()
|
||||
code += f"{PYCON_PREFIX}{fragment_lines[0]}\n"
|
||||
for line in fragment_lines[1:]:
|
||||
# Skip blank lines to handle Black adding a blank above
|
||||
# functions within blocks. A blank line would end the REPL
|
||||
# continuation prompt.
|
||||
#
|
||||
# >>> if True:
|
||||
# ... def f():
|
||||
# ... pass
|
||||
# ...
|
||||
if line:
|
||||
code += f"{PYCON_CONTINUATION_PREFIX} {line}\n"
|
||||
if fragment_lines[-1].startswith(" "):
|
||||
code += f"{PYCON_CONTINUATION_PREFIX}\n"
|
||||
fragment = None
|
||||
|
||||
indentation = None
|
||||
for line in match["code"].splitlines():
|
||||
orig_line, line = line, line.lstrip()
|
||||
if indentation is None and line:
|
||||
indentation = len(orig_line) - len(line)
|
||||
continuation_match = PYCON_CONTINUATION_RE.match(line)
|
||||
if continuation_match and fragment is not None:
|
||||
fragment += line[continuation_match.end() :] + "\n"
|
||||
else:
|
||||
finish_fragment()
|
||||
if line.startswith(PYCON_PREFIX):
|
||||
fragment = line[len(PYCON_PREFIX) :] + "\n"
|
||||
else:
|
||||
code += orig_line[indentation:] + "\n"
|
||||
finish_fragment()
|
||||
return code
|
||||
|
||||
def _md_pycon_match(match: Match[str]) -> str:
|
||||
code = _pycon_match(match)
|
||||
code = textwrap.indent(code, match["indent"])
|
||||
return f"{match['before']}{code}{match['after']}"
|
||||
|
||||
src = MD_RE.sub(_md_match, src)
|
||||
src = MD_PYCON_RE.sub(_md_pycon_match, src)
|
||||
return src, errors
|
||||
|
||||
|
||||
def format_code_block(code: str) -> str:
|
||||
return subprocess.check_output(
|
||||
[
|
||||
sys.executable,
|
||||
"-m",
|
||||
"ruff",
|
||||
"format",
|
||||
"--stdin-filename=script.py",
|
||||
f"--line-length={DEFAULT_LINE_LENGTH}",
|
||||
],
|
||||
encoding="utf-8",
|
||||
input=code,
|
||||
)
|
||||
|
||||
|
||||
def format_file(
|
||||
filename: str,
|
||||
skip_errors: bool,
|
||||
) -> int:
|
||||
with open(filename, encoding="UTF-8") as f:
|
||||
contents = f.read()
|
||||
new_contents, errors = format_str(contents)
|
||||
for error in errors:
|
||||
lineno = contents[: error.offset].count("\n") + 1
|
||||
print(f"{filename}:{lineno}: code block parse error {error.exc}")
|
||||
if errors and not skip_errors:
|
||||
return 1
|
||||
if contents != new_contents:
|
||||
print(f"{filename}: Rewriting...")
|
||||
with open(filename, "w", encoding="UTF-8") as f:
|
||||
f.write(new_contents)
|
||||
return 0
|
||||
else:
|
||||
return 0
|
||||
|
||||
|
||||
def main(argv: Sequence[str] | None = None) -> int:
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
"-l",
|
||||
"--line-length",
|
||||
type=int,
|
||||
default=DEFAULT_LINE_LENGTH,
|
||||
)
|
||||
parser.add_argument(
|
||||
"-S",
|
||||
"--skip-string-normalization",
|
||||
action="store_true",
|
||||
)
|
||||
parser.add_argument("-E", "--skip-errors", action="store_true")
|
||||
parser.add_argument("filenames", nargs="*")
|
||||
args = parser.parse_args(argv)
|
||||
|
||||
retv = 0
|
||||
for filename in args.filenames:
|
||||
retv |= format_file(filename, skip_errors=args.skip_errors)
|
||||
return retv
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
raise SystemExit(main())
|
||||
Loading…
Add table
Add a link
Reference in a new issue