diff --git a/.github/workflows/docs-build.yml b/.github/workflows/docs-build.yml index 3c141546..3b0468f4 100644 --- a/.github/workflows/docs-build.yml +++ b/.github/workflows/docs-build.yml @@ -4,7 +4,7 @@ on: push: branches: [main] paths: - - "docs/**" + - "fern/**" workflow_dispatch: defaults: @@ -19,7 +19,7 @@ concurrency: cancel-in-progress: true jobs: - build: + publish: runs-on: build-arm64 container: image: ghcr.io/nvidia/openshell/ci:latest @@ -33,102 +33,10 @@ jobs: - name: Install tools run: mise install - - name: Build documentation - run: mise run docs:build:strict - - - name: Delete unnecessary files - run: | - find _build -name .doctrees -prune -exec rm -rf {} \; - find _build -name .buildinfo -exec rm {} \; - - - name: Upload HTML - uses: actions/upload-artifact@v4 - with: - name: html-build-artifact - path: _build/docs - if-no-files-found: error - retention-days: 1 + - name: Install Fern CLI + run: npm install -g fern-api - publish: - if: false # disabled until GitHub Pages is configured - needs: [build] - runs-on: build-arm64 - container: - image: ghcr.io/nvidia/openshell/ci:latest - credentials: - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - steps: - - uses: actions/checkout@v4 - with: - ref: "gh-pages" - - - name: Initialize Git configuration - run: | - git config --global --add safe.directory "$GITHUB_WORKSPACE" - git config --global user.name "github-actions[bot]" - git config --global user.email "github-actions[bot]@users.noreply.github.com" - - - name: Download artifacts - uses: actions/download-artifact@v4 - with: - name: html-build-artifact - path: ${{ github.ref_name }} - - - name: Copy HTML directories - run: | - ls -asl - for i in `ls -d *` - do - echo "Git adding ${i}" - git add "${i}" - done - - name: Check or create dot-no-jekyll file - - run: | - if [ -f ".nojekyll" ]; then - echo "The dot-no-jekyll file already exists." - exit 0 - fi - touch .nojekyll - git add .nojekyll - - - name: Check or create redirect page + - name: Publish documentation env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - run: | - resp=$(grep 'http-equiv="refresh"' index.html 2>/dev/null) || true - if [ -n "${resp}" ]; then - echo "The redirect file already exists." - exit 0 - fi - def_branch=$(gh api "repos/${GITHUB_REPOSITORY}" --jq ".default_branch") - html_url=$(gh api "repos/${GITHUB_REPOSITORY}/pages" --jq ".html_url") - echo '' > index.html - echo '' >> index.html - echo ' ' >> index.html - echo ' Redirect to documentation' >> index.html - echo ' ' >> index.html - echo ' ' >> index.html - echo ' ' >> index.html - echo ' ' >> index.html - echo ' ' >> index.html - echo ' ' >> index.html - echo '

Please follow the link to the ' >> index.html - echo ${def_branch}' branch documentation.

' >> index.html - echo ' ' >> index.html - echo '' >> index.html - git add index.html - - - name: Commit changes to the GitHub Pages branch - run: | - git status - if git commit -m 'Pushing changes to GitHub Pages.'; then - git push -f - else - echo "Nothing changed." - fi + FERN_TOKEN: ${{ secrets.FERN_TOKEN }} + run: fern generate --docs diff --git a/.github/workflows/docs-preview-pr.yml b/.github/workflows/docs-preview-pr.yml index a18c7575..65c86685 100644 --- a/.github/workflows/docs-preview-pr.yml +++ b/.github/workflows/docs-preview-pr.yml @@ -3,16 +3,16 @@ name: docs-preview-pr on: pull_request: branches: [main] - types: [opened, reopened, synchronize, closed] + types: [opened, reopened, synchronize] paths: - - "docs/**" + - "fern/**" concurrency: group: preview-${{ github.ref }} cancel-in-progress: true permissions: - contents: write + contents: read pull-requests: write packages: read @@ -38,20 +38,21 @@ jobs: - name: Install tools run: mise install - - name: Build documentation - if: github.event.action != 'closed' - run: mise run docs:build:strict + - name: Install Fern CLI + run: npm install -g fern-api - - name: Delete unnecessary files - if: github.event.action != 'closed' + - name: Generate preview URL + id: generate-docs + env: + FERN_TOKEN: ${{ secrets.FERN_TOKEN }} run: | - find _build -name .doctrees -prune -exec rm -rf {} \; - find _build -name .buildinfo -exec rm {} \; - - - name: Deploy preview - uses: rossjrw/pr-preview-action@v1 + OUTPUT=$(fern generate --docs --preview 2>&1) || true + echo "$OUTPUT" + URL=$(echo "$OUTPUT" | grep -oP 'Published docs to \K.*(?= \()') + echo "Preview URL: $URL" + echo "Preview your docs: $URL" > preview_url.txt + + - name: Comment preview URL on PR + uses: thollander/actions-comment-pull-request@v2.4.3 with: - source-dir: ./_build/docs/ - preview-branch: gh-pages - umbrella-dir: pr-preview - action: auto + filePath: preview_url.txt diff --git a/fern/README.md b/fern/README.md new file mode 100644 index 00000000..c856b42e --- /dev/null +++ b/fern/README.md @@ -0,0 +1,100 @@ +# NVIDIA OpenShell Fern Documentation + +This folder contains the Fern Docs configuration for NVIDIA OpenShell. + +## Installation + +```bash +npm install -g fern-api +# Or: npx fern-api --version +``` + +## Local Preview + +```bash +cd fern/ +fern docs dev +# Or from project root: fern docs dev --project ./fern +``` + +Docs available at `http://localhost:3000`. + +## Folder Structure + +``` +fern/ +├── docs.yml # Global config (title, colors, versions) +├── fern.config.json # Fern CLI config +├── versions/ +│ └── v0.0.1.yml # Navigation for v0.0.1 +├── v0.0.1/ +│ └── pages/ # MDX content for v0.0.1 +├── scripts/ # Migration and conversion scripts +├── components/ # Custom React components (footer) +└── assets/ # Favicon, logos, images +``` + +## Migration Workflow + +To migrate or update docs from `docs/` to Fern: + +```bash +# 1. Copy docs to fern (run from repo root) +python3 fern/scripts/copy_docs_to_fern.py v0.0.1 + +# 2. Expand {include} directives (index) +python3 fern/scripts/expand_includes.py fern/v0.0.1/pages + +# 3. Convert OpenShell-specific syntax ({doc} roles, {ref} roles) +python3 fern/scripts/convert_openshell_specific.py fern/v0.0.1/pages + +# 4. Convert MyST to Fern MDX +python3 fern/scripts/convert_myst_to_fern.py fern/v0.0.1/pages + +# 5. Add frontmatter +python3 fern/scripts/add_frontmatter.py fern/v0.0.1/pages + +# 6. Update internal links +python3 fern/scripts/update_links.py fern/v0.0.1/pages + +# 7. Remove duplicate H1s (when title matches frontmatter) +python3 fern/scripts/remove_duplicate_h1.py fern/v0.0.1/pages + +# 8. Fix MyST frontmatter for Fern compatibility +python3 fern/scripts/fix_frontmatter.py fern/v0.0.1/pages + +# 9. Validate +./fern/scripts/check_unconverted.sh fern/v0.0.1/pages +``` + +## MDX Components + +```mdx +Informational note +Helpful tip +Warning message +Info callout + + + Description + + + + ```python\ncode\n``` + + +Collapsible content +``` + +## Deploying + +```bash +fern generate --docs +fern docs deploy +``` + +## Useful Links + +- [Fern Docs](https://buildwithfern.com/learn/docs) +- [MDX Components](https://buildwithfern.com/learn/docs/components) +- [Versioning Guide](https://buildwithfern.com/learn/docs/configuration/versions) diff --git a/fern/assets/NVIDIA_dark.svg b/fern/assets/NVIDIA_dark.svg new file mode 100644 index 00000000..04850d9d --- /dev/null +++ b/fern/assets/NVIDIA_dark.svg @@ -0,0 +1,35 @@ + + + + + + + + + + + + + + + + + + + diff --git a/fern/assets/NVIDIA_light.svg b/fern/assets/NVIDIA_light.svg new file mode 100644 index 00000000..9ee045c3 --- /dev/null +++ b/fern/assets/NVIDIA_light.svg @@ -0,0 +1,34 @@ + + + + + + + + + + + + + + + + + + + diff --git a/fern/assets/NVIDIA_symbol.svg b/fern/assets/NVIDIA_symbol.svg new file mode 100644 index 00000000..c0507afe --- /dev/null +++ b/fern/assets/NVIDIA_symbol.svg @@ -0,0 +1,22 @@ + + + + + + + + + + + + + + + diff --git a/fern/assets/images/architecture.svg b/fern/assets/images/architecture.svg new file mode 100644 index 00000000..b0bcd4d5 --- /dev/null +++ b/fern/assets/images/architecture.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/fern/assets/images/openshell-terminal.png b/fern/assets/images/openshell-terminal.png new file mode 100644 index 00000000..09fe2b76 Binary files /dev/null and b/fern/assets/images/openshell-terminal.png differ diff --git a/fern/assets/openshell-terminal.png b/fern/assets/openshell-terminal.png new file mode 100644 index 00000000..09fe2b76 Binary files /dev/null and b/fern/assets/openshell-terminal.png differ diff --git a/fern/components/BadgeLinks.tsx b/fern/components/BadgeLinks.tsx new file mode 100644 index 00000000..a5635b7e --- /dev/null +++ b/fern/components/BadgeLinks.tsx @@ -0,0 +1,42 @@ +/** + * Badge links for GitHub, License, PyPI, etc. + * Uses a custom wrapper to avoid Fern's external-link icon stacking under badges. + */ +export type BadgeItem = { + href: string; + src: string; + alt: string; +}; + +const DEFAULT_BADGES: BadgeItem[] = [ + { + href: "https://github.com/NVIDIA/OpenShell", + src: "https://img.shields.io/badge/github-repo-green?logo=github", + alt: "GitHub", + }, + { + href: "https://github.com/NVIDIA/OpenShell/blob/main/LICENSE", + src: "https://img.shields.io/badge/License-Apache_2.0-blue", + alt: "License", + }, + { + href: "https://pypi.org/project/openshell/", + src: "https://img.shields.io/badge/PyPI-openshell-orange?logo=pypi", + alt: "PyPI", + }, +]; + +export function BadgeLinks({ badges = DEFAULT_BADGES }: { badges?: BadgeItem[] }) { + return ( +
+ {badges.map((b) => ( + + {b.alt} + + ))} +
+ ); +} diff --git a/fern/components/CustomFooter.tsx b/fern/components/CustomFooter.tsx new file mode 100644 index 00000000..fab392c4 --- /dev/null +++ b/fern/components/CustomFooter.tsx @@ -0,0 +1,91 @@ +/** + * Custom footer for NVIDIA docs (Fern native header/footer). + * Markup and class names match the original custom-app footer 1:1 so that + * fern/main.css (footer + Built with Fern styles) applies correctly: + * dark mode logo, responsive layout, and Built with Fern tooltip. + */ +export default function CustomFooter() { + const currentYear = new Date().getFullYear(); + const logoUrl = + "https://fern-image-hosting.s3.us-east-1.amazonaws.com/nvidia/NVIDIA_Logo_0.svg"; + + return ( + + ); +} diff --git a/fern/components/GetStartedTerminal.tsx b/fern/components/GetStartedTerminal.tsx new file mode 100644 index 00000000..54ddb020 --- /dev/null +++ b/fern/components/GetStartedTerminal.tsx @@ -0,0 +1,86 @@ +/** + * Animated terminal block for the Get Started section. + * Renders install + sandbox create commands with cycling agent options. + */ +export function GetStartedTerminal() { + return ( + <> + +
+
+ + + +
+
+
+ $ uv pip install openshell +
+
+ $ openshell sandbox create{" "} + + -- claude + --from openclaw + -- opencode + -- codex + + +
+
+
+ + ); +} diff --git a/fern/docs.yml b/fern/docs.yml new file mode 100644 index 00000000..ec2990db --- /dev/null +++ b/fern/docs.yml @@ -0,0 +1,54 @@ +# SPDX-FileCopyrightText: Copyright (c) 2025-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 + +instances: + - url: https://openshell.docs.buildwithfern.com + +title: NVIDIA OpenShell + +versions: + - display-name: v0.0.1 + path: versions/v0.0.1.yml + slug: v0.0.1 + +footer: ./components/CustomFooter.tsx + +layout: + searchbar-placement: header + page-width: 1376px + sidebar-width: 248px + content-width: 812px + tabs-placement: header + hide-feedback: true + +colors: + accentPrimary: + dark: "#76B900" + light: "#76B900" + background: + light: "#FFFFFF" + dark: "#000000" + +theme: + page-actions: toolbar + footer-nav: minimal + +logo: + dark: ./assets/NVIDIA_dark.svg + light: ./assets/NVIDIA_light.svg + height: 20 + href: / + right-text: OpenShell + +favicon: ./assets/NVIDIA_symbol.svg + +css: + - ./main.css + +navbar-links: + - type: github + value: https://github.com/NVIDIA/OpenShell + +experimental: + mdx-components: + - ./components diff --git a/fern/fern.config.json b/fern/fern.config.json new file mode 100644 index 00000000..560f831c --- /dev/null +++ b/fern/fern.config.json @@ -0,0 +1 @@ +{"organization":"nvidia","version":"latest"} diff --git a/fern/main.css b/fern/main.css new file mode 100644 index 00000000..936c895a --- /dev/null +++ b/fern/main.css @@ -0,0 +1,872 @@ +/*! + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: LicenseRef-NvidiaProprietary + * + * NVIDIA CORPORATION, its affiliates and licensors retain all intellectual + * property and proprietary rights in and to this material, related + * documentation and any modifications thereto. Any use, reproduction, + * disclosure or distribution of this material and related documentation + * without an express license agreement from NVIDIA CORPORATION or + * its affiliates is strictly prohibited. + */ + +/* Color themes for light and dark modes */ +:root { + /* Brand Colors */ + --nv-color-green: #74B900; + --nv-color-green-2: #004B31; + --nv-color-black: #000000; + --nv-color-white: #FFFFFF; + + /* Grey Scale - Light */ + --nv-light-grey-1: #f7f7f7; + --nv-light-grey-2: #EEEEEE; + --nv-light-grey-3: #DDDDDD; + --nv-light-grey-4: #CCCCCC; + --nv-light-grey-5: #999999; + + /* Grey Scale - Dark */ + --nv-dark-grey-1: #111111; + --nv-dark-grey-2: #1A1A1A; + --nv-dark-grey-3: #222222; + --nv-dark-grey-4: #333333; + --nv-dark-grey-5: #666666; + + /* Colors by Usage */ + --nv-color-text: #000000; + --nv-color-bg-default: #FFFFFF; + --nv-color-bg-alt: #f7f7f7; + --nv-color-success: #76B900; + --nv-color-error: #f44336; + + /* Theme-independent settings */ + --rounded: 999px; +} +main { + min-height: calc(100vh - 200px); + } +/* Typography - Headers */ +h1 { + font-size: 36px; + font-weight: 700; + line-height: 1.25em; /* 45px */ +} + +h2 { + font-size: 28px; + font-weight: 700; + line-height: 1.25em; /* 35px */ +} + +h3 { + font-size: 24px; + font-weight: 700; + line-height: 1.25em; /* 30px */ +} + +h4 { + font-size: 20px; + font-weight: 700; + line-height: 1.25em; /* 25px */ +} + +/* Typography - Paragraphs */ +.prose{ + color: var(--nv-dark-grey-2) !important; +} +.dark .prose{ + color: var(--nv-light-grey-2) !important; +} +p { + text-decoration-thickness: 3px; +} +.fern-mdx-link { + color: var(--tw-prose-body); + text-decoration-color: var(--accent); + font-weight: var(--font-weight-normal); +} + +/* Badge links: hide redundant external-link icon (badges already indicate links) */ +.badge-links .fern-mdx-link svg { + display: none; +} + +/* Light theme (default) */ +html:not([data-theme]),html[data-theme=light] { + --pst-color-background: #fff; + --pst-color-on-background: #fff; + --pst-color-shadow: #ccc; + --pst-color-heading: #000; + --pst-color-text-base: #1a1a1a; + --pst-color-text-muted: #666; + --pst-color-surface: #f7f7f7; + --pst-color-on-surface: #333; + --pst-color-primary: var(--nv-color-green-2); + --pst-color-table-row-hover-bg: var(--nv-color-green); + --pst-color-link: var(--pst-color-text-base); + --pst-color-link-hover: var(--pst-color-text-base); + --pst-color-inline-code: var(--pst-color-primary); + --pst-color-inline-code-links: var(--pst-color-primary); + --pst-color-secondary: var(--pst-color-primary); + --pst-color-secondary-bg: var(--nv-color-green); + --pst-color-accent: var(--nv-color-green); +} + +/* Dark theme */ +html[data-theme=dark] { + --pst-color-background: #111; + --pst-color-on-background: #000; + --pst-color-shadow: #000; + --pst-color-heading: #fff; + --pst-color-text-base: #eee; + --pst-color-text-muted: #999; + --pst-color-surface: #1a1a1a; + --pst-color-on-surface: #ddd; + --pst-color-primary: var(--nv-color-green); + --pst-color-table-row-hover-bg: var(--nv-color-green-2); + --pst-color-link: var(--pst-color-text-base); + --pst-color-link-hover: var(--pst-color-text-base); + --pst-color-inline-code: var(--pst-color-primary); + --pst-color-inline-code-links: var(--pst-color-primary); + --pst-color-secondary: var(--pst-color-primary); + --pst-color-secondary-bg: var(--nv-color-green-2); + --pst-color-accent: var(--nv-color-green); +} + +/* Product and verion selector styling */ + +.fern-product-selector { + border-radius: 8px; + pointer-events: none !important; + padding-right: 2px; +} + +.product-dropdown-trigger svg{ + display: none !important; +} + +.fern-product-selector .product-dropdown-trigger p{ + font-weight: bold !important; +} +.fern-product-selector-radio-group { + display: grid; + grid-template-columns: repeat(3, 1fr); + gap: 8px; + max-width: 1000px; +} + +@media (max-width: 768px) { + .fern-product-selector-radio-group { + grid-template-columns: repeat(2, 1fr); + } +} +.fern-version-selector { + transform: translateY(-1px); +} + +.fern-version-selector .version-dropdown-trigger{ + outline: 1px solid var(--border, var(--grayscale-a5)) !important; + border-radius: 5px; + transition: box-shadow 0.3s ease, outline 0.3s ease; +} +.product-dropdown-trigger{ + padding-left: 0px !important; +} + +.product-dropdown-trigger, .version-dropdown-trigger{ + background-color: transparent !important; +} +.product-dropdown-trigger svg:hover{ + stroke: var(--nv-color-green) !important; +} +.version-dropdown-trigger:hover{ + box-shadow: 0 0 0 1px var(--nv-color-green) !important; +} +.version-dropdown-trigger svg:hover{ + stroke: var(--nv-color-green) !important; +} +/* Sidebar styling */ +#fern-sidebar { + border-right: 1px solid var(--border, var(--grayscale-a5)) !important; + height: 100vh !important; +} +.fern-sidebar-link:not(:hover){ + background-color: transparent !important; +} +.fern-sidebar-link { + padding-left: 1rem !important; + padding-right: 1rem !important; + padding-top: 0.5rem !important; + padding-bottom: 0.5rem !important; + border-radius: 0px !important; + &.nested { + padding-left: 1rem !important; + } +} +/* Section-level sidebar links (pages that have children) should match sidebar heading padding */ +.fern-sidebar-group > li > .fern-sidebar-link:has(+ .fern-sidebar-group) { + padding-left: 0.25rem !important; +} +.fern-sidebar-group{ + padding: 0 !important +} +#fern-sidebar-scroll-area{ + padding-right: 0 !important +} + +/* header styling */ +.fern-header-content{ + padding-left: 18.5px; + margin-top: -5px; + margin-bottom: -5px; +} +#fern-header { + border-color: var(--border, var(--grayscale-a5)) !important; +} +@keyframes header-background-fade { + 0% { + background-color: transparent; + } + 100% { + background-color: var(--header-background); + } + } + +[data-theme=default]#fern-header { +animation: header-background-fade linear; +animation-timeline: scroll(); +animation-range: 0 50px; +} +.fern-header-navbar-links .fern-button{ + background-color: transparent !important; +} +.fern-header-navbar-links > button{ + background-color: transparent !important; +} +.fern-header-logo-container > div > div > a > img{ + padding-right: 0.5rem; +} +.fern-header-logo-container .font-heading{ + font-size: 16px !important; + font-weight: bold !important; + color: var(--grayscale-a12) !important; + border-inline: 1px solid var(--border, var(--grayscale-a5)); + padding: 15px 1rem; + margin: -20px 0.5rem; +} +@media (max-width: 1024px) { + .fern-header-logo-container .font-heading{ + display: none !important; + } +} +/* Search bar styling */ +#fern-search-button{ + background-color: transparent !important; + border-radius: var(--rounded); + transition: box-shadow 0.3s ease, outline 0.3s ease; +} +#fern-search-button:hover{ + box-shadow: 0 0 0 1px var(--nv-color-green) !important; +} +#fern-search-button .fern-kbd{ + display: none; +} + +.fern-layout-footer-toolbar button{ + background-color: transparent !important; + border-color: transparent !important; + padding-inline: 0px !important; +} + +/* ========== Custom footer (native React component) – 1:1 with original ========== */ +.bd-footer { + border-top: 1px solid var(--border, var(--grayscale-a5)) !important; + font-family: NVIDIA, -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, sans-serif !important; + font-size: 0.875rem; + padding: 2rem 0; + width: 100%; +} +.bd-footer * { + font-family: inherit; +} +.bd-footer__inner { + padding: 0 2rem; +} +.footer-items__start { + display: flex; + flex-direction: column; + gap: 1.5rem; +} +.footer-logos-container { + display: flex; + align-items: center; + justify-content: space-between; + width: 100%; + gap: 1rem; +} +.footer-brand { + display: inline-block; + text-decoration: none; +} +.footer-brand .logo__image { + height: 24px; + width: auto; + transition: opacity 0.2s ease; +} +.footer-brand:hover .logo__image { + opacity: 0.8; +} +.footer-brand-fern { + display: flex; + align-items: center; + margin-left: auto; +} +/* Logo theme visibility – .dark is on ancestor in Fern */ +.only-light { + display: block; + filter: invert(1); +} +.only-dark { + display: none; +} +.dark .only-light { + display: none; +} +.dark .only-dark { + display: block; + filter: none; +} +.footer-links { + display: flex; + flex-wrap: wrap; + gap: 0.25rem 0.5rem; + line-height: 1.65; + margin: 0; + padding: 0; +} +.footer-links a { + color: var(--grayscale-a11); + text-decoration: none; + transition: color 0.2s ease; + white-space: nowrap; +} +.pipe-separator { + color: var(--grayscale-a11); + white-space: nowrap; +} +.copyright { + color: var(--grayscale-a11); + font-size: 0.875rem; + line-height: 1.65; + margin: 0; +} +@media (max-width: 768px) { + .bd-footer { padding: 1.5rem 0; } + .bd-footer__inner { padding: 0 1.5rem; } + .footer-items__start { gap: 1rem; } + .footer-links { flex-direction: row; gap: 0.5rem 0.75rem; } + .footer-links a { white-space: normal; word-break: break-word; } +} +@media (max-width: 480px) { + .footer-links { gap: 0.5rem; } + .footer-links a { font-size: 0.8125rem; } + .copyright { font-size: 0.8125rem; } +} +/* Built with Fern link + tooltip */ +.built-with-fern-link { + display: flex; + align-items: baseline; + gap: 0.25rem; + text-decoration: none; + position: relative; +} +.built-with-fern-logo { + height: 1rem; + margin: 0; + transition: filter 150ms ease; +} +.built-with-fern-logo path { fill: var(--grayscale-a12); } +.built-with-fern-link:hover .built-with-fern-logo { filter: saturate(1) opacity(1); } +.built-with-fern-link:hover .built-with-fern-logo path:nth-child(2) { fill: #51C233; } +.built-with-fern-tooltip { + position: absolute; + top: 50%; + right: calc(100%); + bottom: auto; + left: auto; + transform: translateY(-50%); + margin: 0; + margin-right: 0.5rem; + padding: 0.5rem 0.75rem; + background-color: #FFFFFF; + color: #000000; + font-size: 0.85rem; + border-radius: 0.375rem; + border: 1px solid var(--grayscale-a5); + white-space: nowrap; + pointer-events: none; + opacity: 0; + transition: opacity 150ms ease; + transition-delay: 0s; + z-index: 50; + box-shadow: 0 4px 12px rgba(0, 0, 0, 0.15); + width: max-content; +} +.built-with-fern-link:hover .built-with-fern-tooltip { + opacity: 1; + transition-delay: 0.75s; +} +.dark .built-with-fern-tooltip { + background-color: #000000; + color: #FFFFFF; +} +.built-with-fern-logo-dark { display: none; } +.dark .built-with-fern-logo-light { display: none; } +.dark .built-with-fern-logo-dark { display: block; } +@media (prefers-color-scheme: dark) { + .built-with-fern-logo-light { display: none; } + .built-with-fern-logo-dark { display: block; } +} + +/* Footer styling */ +.fern-footer-nav{ + border-radius: var(--rounded); + background-color: transparent !important; + transition: box-shadow 0.3s ease, outline 0.3s ease; +} +/* Hide line numbers */ +.code-block-line-gutter { + display: none !important; +} +.fern-footer-prev h4, .fern-footer-next h4{ + font-size: inherit !important; +} +.fern-sidebar-link.nested[data-state="active"]:before { + left: -0px !important; + bottom: -0px !important; + top: -0px !important; + width: 2px !important; +} +.fern-sidebar-link[data-state="active"] { + color: unset !important; +} + +.fern-selection-item .fern-selection-item-icon{ + border-color: transparent !important; +} +/* Button styling */ +.fern-button{ + border-radius: var(--rounded); + font-weight: bold; +} +.fern-button.filled.primary{ + color: var(--nv-color-black); +} +.dark .fern-button.filled.primary{ + background-color: var(--nv-color-white); +} +.dark .fern-button.filled.primary:hover{ + background-color: var(--nv-light-grey-2); +} +.fern-button.outlined.normal{ + background-color: transparent; + --tw-ring-color: transparent; + color: var(--nv-color-black); +} +.fern-button.outlined.normal:hover{ + color: var(--nv-color-green) +} +.dark .fern-button.outlined.normal{ + color: var(--nv-color-white); +} +.dark .fern-button.outlined.normal:hover{ + color: var(--nv-color-green); +} +/* Card styling */ +.fern-card{ + transition: box-shadow 0.3s ease, outline 0.3s ease; +} +svg.card-icon{ + height: 24px !important; + width: 24px !important; +} +.card-icon{ + background-color: transparent !important; +} +.fern-card:hover{ + box-shadow: 0 0 0 1px var(--nv-color-green) !important; +} +.fern-docs-badge{ + border-radius: var(--rounded); +} +.fern-page-actions button:hover{ + background-color: transparent !important; +} +.fern-page-actions a:hover{ + background-color: transparent !important; +} +/* Moving logo to footer */ +#builtwithfern, #builtwithfern * { + display: none !important; +} + +/* Landing Page Gradients */ +/* Top: Simple radial gradient (no mask, responsive) */ +.landing-gradient-top { + position: absolute; + top: 0; + left: 0; + right: 0; + height: 800px; + background: radial-gradient(ellipse 100% 100% at 50% 10%, + rgba(191, 242, 48, 0.15) 0%, + rgba(158, 228, 179, 0.12) 30%, + rgba(124, 215, 254, 0.12) 50%, + rgba(124, 215, 254, 0.06) 75%, + transparent 100%); + pointer-events: none; + z-index: 0; +} + +/* Bottom: Masked gradient for organic transition */ +.landing-gradient-bottom { + position: absolute; + bottom: -282px; + left: 0; + right: 0; + height: 1232px; + background: linear-gradient(85deg, #BFF230 41.98%, #7CD7FE 99.52%); + opacity: 0.05; + pointer-events: none; + z-index: 5; + mask-image: url('https://www.figma.com/api/mcp/asset/27509afa-9c16-46bb-8415-4395e2e5a347'); + mask-repeat: no-repeat; + mask-position: 0% -17px; + mask-size: 100% auto; + -webkit-mask-image: url('https://www.figma.com/api/mcp/asset/27509afa-9c16-46bb-8415-4395e2e5a347'); + -webkit-mask-repeat: no-repeat; + -webkit-mask-position: 0% -17px; + -webkit-mask-size: 100% auto; +} + +/* Landing Page Gradients Wrapper */ +.landing-page-gradients { + position: relative; + width: 100%; + margin-top: -100px; + padding-top: 100px; + overflow: visible; + background: #181818; +} + +/* Hero Section (Landing page only) */ +.hero-section { + position: relative; + width: 100%; + padding: 3rem 6rem; + margin: 0 auto; + overflow: visible; + display: flex; + flex-direction: column; + align-items: center; + z-index: 10; +} + +/* Hero Section Content - constrain width */ +.hero-section > * { + position: relative; + z-index: 100; + max-width: 1440px; + width: 100%; +} + +/* Tablet and Mobile: fix spacing and layout */ +@media (max-width: 1024px) { + /* Extend dark background behind header */ + .landing-page body, .landing-page html, .landing-page main { + background: #181818 !important; + } + + .landing-page-gradients { + margin-top: -100px; + padding-top: 100px; + } + + .hero-section { + padding: 2rem 2rem; + } + + .hero-section > * { + max-width: none; + } + + .hero-content-grid { + grid-template-columns: 1fr; + gap: 2rem; + } + + .hero-heading { + font-size: 36px; + } + + .hero-subtitle { + font-size: 16px; + } + + .hero-title-section { + margin-bottom: 2rem; + } +} + +/* Small mobile only */ +@media (max-width: 600px) { + .hero-heading { + font-size: 28px; + } + + .hero-section { + padding: 1.5rem 1.5rem; + } +} + +.hero-section h1, +.hero-section h2, +.hero-section h3, +.hero-section h4, +.hero-section h5, +.hero-section h6 { + pointer-events: none !important; +} +/* Hero Title Section */ +.hero-title-section { + text-align: center; + margin-bottom: 4rem; + position: relative; + z-index: 100; +} + +.hero-heading { + font-size: 48px; + font-weight: 700; + line-height: 1.2; + margin: 0 0 1rem 0; + color: var(--nv-color-white); +} + +.hero-subtitle { + font-size: 18px; + line-height: 1.5; + margin: 0; + color: var(--nv-color-white); +} + +/* Hero Content Grid */ +.hero-content-grid { + display: grid; + grid-template-columns: repeat(2, 1fr); + gap: 3rem; + align-items: start; + position: relative; + z-index: 100; +} + +.hero-column { + display: flex; + flex-direction: column; + gap: 1rem; +} + +.hero-column-title { + font-size: 24px; + font-weight: 700; + margin: 0; + color: var(--nv-color-white); +} + +.hero-column-subtitle { + font-size: 16px; + margin: 0 0 1rem 0; + color: var(--nv-color-white); +} + +/* Hero Card Container (Left Column) */ +.hero-card-container { + display: flex; + flex-direction: column; + border-radius: 8px; + overflow: hidden; + border: 1px solid var(--border, var(--grayscale-a5)); + margin-top: 1.5rem !important; + background: rgba(26, 26, 26, 0.2); + backdrop-filter: blur(6px); +} + +.hero-card-image { + width: 100%; + height: auto; + display: block; +} + +.hero-card-content { + padding: 1.5rem; + display: flex; + flex-direction: row; + gap: 1rem; + align-items: center; + justify-content: space-between; + background: rgba(26, 26, 26, 0.2); + backdrop-filter: blur(6px); +} + +.hero-card-text-wrapper { + flex: 1; +} + +.hero-card-text { + margin: 0; + font-size: 14px; + line-height: 1.5; + color: var(--nv-color-white); +} + +.hero-card-button-wrapper { + flex-shrink: 0; +} +.hero-card-button-wrapper .fern-mdx-link{ + text-decoration: none !important; +} + +.hero-card-button { + white-space: nowrap; +} + +/* Hero Cards */ + +.hero-column .fern-card { + padding: 9px 17px; + background-color: rgba(26, 26, 26, 0.2) !important; + backdrop-filter: blur(6px); +} + +.hero-section .fern-card{ + color: white !important; +} + +.hero-column .card-icon { + font-size: 64px !important; + width: 64px !important; + height: 64px !important; +} + +.hero-column .card-icon svg, +.hero-column .card-icon i { + font-size: 64px !important; + width: 64px !important; + height: 64px !important; +} + +.hero-column .fern-card-title { + font-size: 16px; + font-weight: 500; + line-height: 24px; +} + +.hero-column .fern-card p { + font-size: 14px; + line-height: 20px; + color: white !important; +} + +/* Body Section */ +.body-section { + display: flex; + padding: 4rem 16rem; + flex-direction: column; + justify-content: center; + align-items: center; + gap: 4rem; + align-self: stretch; + position: relative; + z-index: 1; + background: #181818; +} + +/* Body Section Content - constrain width */ +.body-section > * { + max-width: 1440px; + width: 100%; + position: relative; + z-index: 10; +} + +.code-block .fern-code-link{ + text-decoration: underline !important; + text-decoration-color: var(--accent) !important; + text-underline-offset: 1px !important; + text-decoration-style: underline !important; +} + +/* Mobile Styles */ +@media (max-width: 768px) { + .hero-section { + padding: 2rem 1.5rem; + } + + .hero-title-section { + margin-bottom: 2rem; + } + + .hero-heading { + font-size: 32px; + } + + .hero-subtitle { + font-size: 16px; + } + + .hero-content-grid { + grid-template-columns: 1fr; + gap: 2rem; + } + + .hero-column-title { + font-size: 20px; + } + + .hero-column-subtitle { + font-size: 14px; + } + + .hero-card-content { + flex-direction: column; + align-items: flex-start; + } + + .hero-card-button-wrapper { + align-self: flex-start; + } + + .hero-column .card-icon, + .hero-column .card-icon svg, + .hero-column .card-icon i { + font-size: 40px !important; + width: 40px !important; + height: 40px !important; + } + + .hero-column .fern-card-title { + font-size: 14px; + } + + .hero-column .fern-card p { + font-size: 11px; + } + + .body-section { + padding: 2rem 1.5rem; + } + + .fern-selection-item-icon.use-icon { + display: none !important; + } +} \ No newline at end of file diff --git a/fern/scripts/add_frontmatter.py b/fern/scripts/add_frontmatter.py new file mode 100644 index 00000000..6fc78fb0 --- /dev/null +++ b/fern/scripts/add_frontmatter.py @@ -0,0 +1,67 @@ +#!/usr/bin/env python3 +# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 + +"""Add frontmatter (title, description) to MDX files derived from first H1.""" + +import argparse +import re +from pathlib import Path + + +def derive_title(content: str) -> str: + """Extract title from first # Heading.""" + match = re.search(r"^#\s+(.+)$", content, re.MULTILINE) + if match: + title = match.group(1).strip() + title = re.sub(r"\{[^}]+\}`[^`]*`", "", title).strip() + return title or "Untitled" + return "Untitled" + + +def add_frontmatter(filepath: Path) -> bool: + """Add frontmatter if missing. Returns True if changes were made.""" + content = filepath.read_text() + + if content.strip().startswith("---"): + return False + + title = derive_title(content) + title_escaped = title.replace('"', '\\"') + frontmatter = f'---\ntitle: "{title_escaped}"\ndescription: ""\n---\n\n' + body = content.lstrip() + + # Remove duplicate H1 that matches title (Fern uses frontmatter title) + body = re.sub(r"^#\s+" + re.escape(title) + r"\s*\n+", "", body, count=1) + + new_content = frontmatter + body + filepath.write_text(new_content) + return True + + +def main() -> None: + parser = argparse.ArgumentParser( + description="Add frontmatter to MDX files" + ) + parser.add_argument( + "pages_dir", + type=Path, + help="Path to pages directory (e.g. fern/v0.2.0/pages)", + ) + args = parser.parse_args() + + pages_dir = args.pages_dir.resolve() + if not pages_dir.exists(): + raise SystemExit(f"Error: pages directory not found at {pages_dir}") + + changed = [] + for mdx_file in sorted(pages_dir.rglob("*.mdx")): + if add_frontmatter(mdx_file): + changed.append(mdx_file.relative_to(pages_dir)) + print(f" Added frontmatter: {mdx_file.relative_to(pages_dir)}") + + print(f"\nAdded frontmatter to {len(changed)} files") + + +if __name__ == "__main__": + main() diff --git a/fern/scripts/check_unconverted.sh b/fern/scripts/check_unconverted.sh new file mode 100755 index 00000000..50790a6e --- /dev/null +++ b/fern/scripts/check_unconverted.sh @@ -0,0 +1,77 @@ +#!/bin/bash +# SPDX-FileCopyrightText: Copyright (c) 2025-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 + +# Check for unconverted MyST syntax in Fern docs + +set -e + +PAGES_DIR="${1:-fern/v0.0.1/pages}" + +echo "=== Checking for unconverted MyST syntax in $PAGES_DIR ===" +echo "" + +ISSUES_FOUND=0 + +echo "Checking for MyST directives (:::)..." +if grep -r ':::' "$PAGES_DIR" 2>/dev/null; then + echo "⚠️ Found unconverted MyST directives (see above)" + ISSUES_FOUND=1 +else + echo "✓ No MyST directives found" +fi +echo "" + +echo "Checking for {ref} references (Sphinx cross-refs, not LaTeX \\text{ref})..." +if grep -rE '\{ref\}`' "$PAGES_DIR" 2>/dev/null || grep -rE '\{ref\} ' "$PAGES_DIR" 2>/dev/null; then + echo "⚠️ Found unconverted {ref} references" + ISSUES_FOUND=1 +else + echo "✓ No {ref} references found" +fi +echo "" + +echo "Checking for {octicon} icons..." +if grep -r '{octicon}' "$PAGES_DIR" 2>/dev/null; then + echo "⚠️ Found unconverted {octicon} icons" + ISSUES_FOUND=1 +else + echo "✓ No {octicon} icons found" +fi +echo "" + +echo "Checking for {py:class} / {py:meth} / {py:mod} / {py:attr} / {py:func} / {doc}..." +if grep -rE '\{py:(class|meth|mod|attr|func)\}' "$PAGES_DIR" 2>/dev/null || grep -rE '\{doc\}`' "$PAGES_DIR" 2>/dev/null; then + echo "⚠️ Found unconverted py: or doc: roles" + ISSUES_FOUND=1 +else + echo "✓ No py:/doc roles found" +fi +echo "" + +echo "Checking for sphinx-design badges..." +if grep -r '{bdg-' "$PAGES_DIR" 2>/dev/null; then + echo "⚠️ Found unconverted badges" + ISSUES_FOUND=1 +else + echo "✓ No badges found" +fi +echo "" + +echo "Checking for MyST mermaid syntax..." +if grep -r '```{mermaid}' "$PAGES_DIR" 2>/dev/null; then + echo "⚠️ Found unconverted mermaid blocks (should be \`\`\`mermaid)" + ISSUES_FOUND=1 +else + echo "✓ No MyST mermaid syntax found" +fi +echo "" + +echo "=== Summary ===" +if [ $ISSUES_FOUND -eq 0 ]; then + echo "✓ All checks passed" + exit 0 +else + echo "⚠️ Some issues found - review and fix above" + exit 1 +fi diff --git a/fern/scripts/convert_myst_to_fern.py b/fern/scripts/convert_myst_to_fern.py new file mode 100644 index 00000000..bf68f034 --- /dev/null +++ b/fern/scripts/convert_myst_to_fern.py @@ -0,0 +1,388 @@ +#!/usr/bin/env python3 +# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 + +"""Convert MyST Markdown syntax to Fern MDX components. + +Handles: admonitions, dropdowns, tab sets, grid cards, toctree removal, +HTML comments, plus: {image}, {contents}, {literalinclude}, {admonition}, +{code-block}, {doctest}. +Also converts and to Markdown links so MDX doesn't parse them as JSX. +Run convert_automodel_specific.py first if needed. +""" + +import argparse +import re +from pathlib import Path + +API_DOCS_BASE = "https://docs.nvidia.com/openshell/latest" + + +def convert_admonitions(content: str) -> str: + """Convert MyST admonitions to Fern components.""" + admonition_map = { + "note": "Note", + "warning": "Warning", + "tip": "Tip", + "important": "Info", + "seealso": "Note", + "caution": "Warning", + "danger": "Warning", + "attention": "Warning", + "hint": "Tip", + } + + for myst_type, fern_component in admonition_map.items(): + pattern = rf"```\{{{myst_type}\}}\s*\n(.*?)```" + replacement = rf"<{fern_component}>\n\1" + content = re.sub(pattern, replacement, content, flags=re.DOTALL | re.IGNORECASE) + + # :::, ::::, ::::: with optional space before { + for colons in [r":::", r"::::", r":::::"]: + pattern = rf"{colons}\s*\{{{myst_type}\}}\s*\n(.*?){colons}" + content = re.sub(pattern, replacement, content, flags=re.DOTALL | re.IGNORECASE) + pattern = rf"{colons}\s+\{{{myst_type}\}}\s*\n(.*?){colons}" + content = re.sub(pattern, replacement, content, flags=re.DOTALL | re.IGNORECASE) + + # Shorthand :::note (no braces) + pattern = rf":::\s*{myst_type}\s*\n(.*?):::" + content = re.sub(pattern, replacement, content, flags=re.DOTALL | re.IGNORECASE) + for colons in [r"::::", r":::::"]: + pattern = rf"{colons}\s*{myst_type}\s*\n(.*?){colons}" + content = re.sub(pattern, replacement, content, flags=re.DOTALL | re.IGNORECASE) + + return content + + +def convert_admonition_directive(content: str) -> str: + """Convert {admonition} Title :class: dropdown to Accordion.""" + pattern = r"```\{admonition\}\s+([^\n]+)(?:\s*\n(?::[^\n]+\n)*)?\n(.*?)```" + def replace(match: re.Match[str]) -> str: + title = match.group(1).strip().replace('"', "'") + body = match.group(2).strip() + return f'\n{body}\n' + return re.sub(pattern, replace, content, flags=re.DOTALL) + + +def convert_dropdowns(content: str) -> str: + """Convert MyST dropdowns to Fern Accordion components.""" + pattern = r"```\{dropdown\}\s+([^\n]+)\s*\n(.*?)```" + def replace_dropdown(match: re.Match[str]) -> str: + title = match.group(1).strip() + body = match.group(2).strip() + if '"' in title: + title = title.replace('"', "'") + return f'\n{body}\n' + return re.sub(pattern, replace_dropdown, content, flags=re.DOTALL) + + +def convert_tab_sets(content: str) -> str: + """Convert MyST tab sets to Fern Tabs components.""" + content = re.sub(r"::::+\s*\{tab-set\}\s*", "\n", content) + content = re.sub(r"```\{tab-set\}\s*", "\n", content) + + def replace_tab_item(match: re.Match[str]) -> str: + title = match.group(1).strip() + return f'' + + content = re.sub(r"::::*\s*\{tab-item\}\s+([^\n]+)", replace_tab_item, content) + content = re.sub(r":::*\s*\{tab-item\}\s+([^\n]+)", replace_tab_item, content) + + lines = content.split("\n") + result = [] + in_tab = False + + for line in lines: + if '\n") + in_tab = True + result.append(line) + elif line.strip() in [":::::", "::::", ":::", ""]: + if in_tab and line.strip() != "": + result.append("") + in_tab = False + if line.strip() in [":::::", "::::"]: + result.append("") + else: + result.append(line) + else: + result.append(line) + + content = "\n".join(result) + content = re.sub(r"\n::::+\n", "\n", content) + content = re.sub(r"\n:::+\n", "\n", content) + return content + + +def convert_grid_cards(content: str) -> str: + """Convert MyST grid cards to Fern Cards components.""" + content = re.sub(r"::::+\s*\{grid\}[^\n]*\n", "\n", content) + content = re.sub(r"```\{grid\}[^\n]*\n", "\n", content) + + def replace_card(match: re.Match[str]) -> str: + full_match = match.group(0) + title_match = re.search(r"\{grid-item-card\}\s+(.+?)(?:\n|$)", full_match) + title = title_match.group(1).strip() if title_match else "Card" + link_match = re.search(r":link:\s*(\S+)", full_match) + href = link_match.group(1) if link_match else "" + if href and href != "apidocs/index": + if not href.startswith("http"): + href = "/" + href.replace(".md", "").replace(".mdx", "") + return f'' + if href == "apidocs/index": + return f'' + return f'' + + content = re.sub( + r"::::*\s*\{grid-item-card\}[^\n]*(?:\n:link:[^\n]*)?(?:\n:link-type:[^\n]*)?", + replace_card, + content, + ) + content = re.sub( + r":::*\s*\{grid-item-card\}[^\n]*(?:\n:link:[^\n]*)?(?:\n:link-type:[^\n]*)?", + replace_card, + content, + ) + + lines = content.split("\n") + result = [] + in_card = False + + for line in lines: + if '\n") + in_card = True + result.append(line) + elif line.strip() in [":::::", "::::", ":::", ""]: + if in_card and line.strip() != "": + result.append("\n") + in_card = False + if line.strip() in [":::::", "::::"]: + result.append("\n") + else: + result.append(line) + + return "\n".join(result) + + +def remove_toctree(content: str) -> str: + """Remove toctree blocks entirely.""" + content = re.sub(r"```\{toctree\}.*?```", "", content, flags=re.DOTALL) + content = re.sub(r":::\{toctree\}.*?:::", "", content, flags=re.DOTALL) + return content + + +def remove_contents(content: str) -> str: + """Remove {contents} directive (Fern has its own nav).""" + content = re.sub(r"```\{contents\}.*?```", "", content, flags=re.DOTALL) + content = re.sub(r":::\{contents\}.*?:::", "", content, flags=re.DOTALL) + return content + + +def convert_figure(content: str, filepath: Path) -> str: + """Convert {figure} directive to markdown image.""" + # ::: or :::: or :::::{figure} ./path.png with optional :alt: :name: and caption + + def replace(match: re.Match[str]) -> str: + img_path = match.group(1).strip() + full_match = match.group(0) + alt_match = re.search(r":alt:\s*(.+)", full_match) + alt = alt_match.group(1).strip() if alt_match else img_path.split("/")[-1] + if img_path.startswith("./"): + img_name = img_path[2:] + else: + img_name = img_path + return f"![{alt}](/assets/training/images/{img_name})" + + for colons in [r"::::+", r":::"]: + pattern = rf"{colons}\s*\{{figure\}}\s+([^\s\n]+)[\s\S]*?{colons}" + content = re.sub(pattern, replace, content) + return content + + +def convert_raw_html(content: str) -> str: + """Convert {raw} html directive - extract and pass through HTML content.""" + pattern = r":::\s*\{raw\}\s+html\s*\n(.*?):::" + def replace(match: re.Match[str]) -> str: + return match.group(1).strip() + return re.sub(pattern, replace, content, flags=re.DOTALL) + + +def convert_image(content: str, filepath: Path, repo_root: Path) -> str: + """Convert {image} path to markdown image. Path relative to current file.""" + pattern = r"```\{image\}\s+([^\s\n]+)(?:\s*\n(?::[^\n]+\n)*)?```" + def replace(match: re.Match[str]) -> str: + img_path = match.group(1).strip() + img_name = img_path.split("images/")[-1] if "images/" in img_path else img_path.split("/")[-1] + return f"![{img_name}](/assets/training/images/{img_name})" + return re.sub(pattern, replace, content) + + +def convert_literalinclude(content: str, filepath: Path, repo_root: Path) -> str: + """Convert {literalinclude} to fenced code block. Inlines full file.""" + pattern = r"```\{literalinclude\}\s+([^\s\n]+)(?:\s*\n(?::[^\n]+\n)*)?\s*```" + def replace(match: re.Match[str]) -> str: + inc_path = match.group(1).strip() + resolved = (repo_root / "docs" / inc_path).resolve() + if not resolved.exists(): + resolved = (repo_root / inc_path.replace("../", "")).resolve() + if not resolved.exists(): + return f"" + lang = "python" if resolved.suffix == ".py" else "" + try: + body = resolved.read_text() + except Exception: + return f"" + return f"```{lang}\n{body}\n```" + return re.sub(pattern, replace, content) + + +def convert_code_block(content: str) -> str: + """Convert {code-block} lang to standard ```lang.""" + pattern = r"```\{code-block\}\s+(\w+)(?:\s*\n(?::[^\n]+\n)*)?\n(.*?)```" + def replace(match: re.Match[str]) -> str: + lang = match.group(1) + body = match.group(2).rstrip() + return f"```{lang}\n{body}\n```" + return re.sub(pattern, replace, content, flags=re.DOTALL) + + +def convert_doctest(content: str) -> str: + """Convert {doctest} to standard code block.""" + pattern = r"```\{doctest\}\s*\n(.*?)```" + def replace(match: re.Match[str]) -> str: + body = match.group(1).strip() + return f"```python\n{body}\n```" + return re.sub(pattern, replace, content, flags=re.DOTALL) + + +def escape_sphinx_doc_refs(content: str) -> str: + """Escape Sphinx doc refs like that MDX parses as JSX.""" + content = re.sub( + r"", + f"[API Documentation]({API_DOCS_BASE}/)", + content, + ) + return content + + +def convert_picture_to_img(content: str) -> str: + """Convert to for MDX compatibility.""" + pattern = r"[\s\S]*?]*)/?>[\s\S]*?" + def replace(match: re.Match[str]) -> str: + img_attrs = match.group(1).strip() + return f"" + return re.sub(pattern, replace, content, flags=re.IGNORECASE) + + +def convert_angle_bracket_urls_and_emails(content: str) -> str: + """Convert and to Markdown links so MDX doesn't parse them as JSX tags.""" + content = re.sub( + r"<(https?://[^>]+)>", + r"[\1](\1)", + content, + ) + content = re.sub( + r"<([a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,})>", + r"[\1](mailto:\1)", + content, + ) + return content + + +def convert_html_comments(content: str) -> str: + """Convert HTML comments to JSX comments.""" + return re.sub(r"", r"{/* \1 */}", content, flags=re.DOTALL) + + +def remove_directive_options(content: str) -> str: + """Remove MyST directive options.""" + for opt in [ + ":icon:", ":class:", ":columns:", ":gutter:", ":margin:", ":padding:", + ":link-type:", ":maxdepth:", ":titlesonly:", ":hidden:", ":link:", + ":caption:", ":language:", ":pyobject:", ":linenos:", ":emphasize-lines:", + ":width:", ":align:", ":relative-docs:", + ]: + content = re.sub(rf"\n{re.escape(opt)}[^\n]*", "", content) + return content + + +def fix_malformed_tags(content: str) -> str: + """Fix common malformed tag issues.""" + content = re.sub(r'title=""', 'title="Details"', content) + content = re.sub( + r"<(Note|Warning|Tip|Info)([^>]*)/>\s*\n([^<]+)", + r"<\1\2>\n\3", + content, + ) + return content + + +def clean_multiple_newlines(content: str) -> str: + """Clean up excessive newlines.""" + content = re.sub(r"\n{3,}", "\n\n", content) + return content.strip() + "\n" + + +def convert_file(filepath: Path, repo_root: Path) -> bool: + """Convert a single file. Returns True if changes were made.""" + content = filepath.read_text() + original = content + + content = convert_figure(content, filepath) + content = convert_raw_html(content) + content = convert_admonitions(content) + content = convert_admonition_directive(content) + content = convert_dropdowns(content) + content = convert_grid_cards(content) + content = convert_tab_sets(content) + content = remove_toctree(content) + content = remove_contents(content) + content = convert_image(content, filepath, repo_root) + content = convert_literalinclude(content, filepath, repo_root) + content = convert_code_block(content) + content = convert_doctest(content) + content = escape_sphinx_doc_refs(content) + content = convert_picture_to_img(content) + content = convert_angle_bracket_urls_and_emails(content) + content = convert_html_comments(content) + content = remove_directive_options(content) + content = fix_malformed_tags(content) + content = clean_multiple_newlines(content) + + if content != original: + filepath.write_text(content) + return True + return False + + +def main() -> None: + parser = argparse.ArgumentParser( + description="Convert MyST syntax to Fern MDX in pages directory" + ) + parser.add_argument( + "pages_dir", + type=Path, + help="Path to pages directory (e.g. fern/v0.0.1/pages)", + ) + args = parser.parse_args() + + pages_dir = args.pages_dir.resolve() + if not pages_dir.exists(): + raise SystemExit(f"Error: pages directory not found at {pages_dir}") + + repo_root = pages_dir.parent.parent.parent + + changed = [] + for mdx_file in sorted(pages_dir.rglob("*.mdx")): + if convert_file(mdx_file, repo_root): + changed.append(mdx_file.relative_to(pages_dir)) + print(f" Converted: {mdx_file.relative_to(pages_dir)}") + + print(f"\nConverted {len(changed)} files") + + +if __name__ == "__main__": + main() diff --git a/fern/scripts/convert_openshell_specific.py b/fern/scripts/convert_openshell_specific.py new file mode 100644 index 00000000..750d2e0c --- /dev/null +++ b/fern/scripts/convert_openshell_specific.py @@ -0,0 +1,128 @@ +#!/usr/bin/env python3 +# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 + +"""Convert OpenShell-specific syntax for Fern MDX compatibility. + +Handles: {doc} roles (internal doc links), escaping {variable} in code blocks, +and OpenShell-specific directives like policy_table. +""" + +import argparse +import re +from pathlib import Path + + +def resolve_doc_path(path: str, file_dir: Path | None) -> str: + """Resolve doc path to Fern URL.""" + path = path.replace("../", "").replace(".md", "").replace(".mdx", "").strip() + if "/" not in path and file_dir: + rel_parts = file_dir.parts + path = "/".join(rel_parts) + "/" + path + if not path.startswith("/"): + path = "/" + path + return path + + +def convert_doc_roles(content: str, filepath: Path | None = None) -> str: + """Convert {doc}`display ` and {doc}`path` to internal links.""" + file_dir = None + if filepath: + try: + pages_idx = filepath.parts.index("pages") + file_dir = Path(*filepath.parts[pages_idx + 1 : filepath.parts.index(filepath.name)]) + except (ValueError, IndexError): + pass + + def replace_doc_with_path(match: re.Match[str]) -> str: + display = match.group(1).strip() + path = match.group(2).strip() + clean = resolve_doc_path(path, file_dir) + return f"[{display}]({clean})" + + def replace_doc_path_only(match: re.Match[str]) -> str: + path = match.group(1).strip() + clean = resolve_doc_path(path, file_dir) + display = path.split("/")[-1].replace("-", " ").replace("_", " ").title() + return f"[{display}]({clean})" + + content = re.sub(r"\{doc\}`([^`]+?)\s*<([^>]+)>`", replace_doc_with_path, content) + content = re.sub(r"\{doc\}`([^`]+)`", replace_doc_path_only, content) + return content + + +def convert_ref_roles(content: str) -> str: + """Convert {ref}`display ` and {ref}`target` to links or bold text.""" + def replace_ref_with_display(match: re.Match[str]) -> str: + display = match.group(1).strip() + return f"**{display}**" + + def replace_ref_only(match: re.Match[str]) -> str: + target = match.group(1).strip() + display = target.replace("-", " ").replace("_", " ").title() + return f"**{display}**" + + content = re.sub(r"\{ref\}`([^`]+?)\s*<([^>]+)>`", replace_ref_with_display, content) + content = re.sub(r"\{ref\}`([^`]+)`", replace_ref_only, content) + return content + + +def remove_policy_table_directive(content: str) -> str: + """Remove {policy_table} directives (custom Sphinx extension).""" + content = re.sub(r"```\{policy_table\}.*?```", "", content, flags=re.DOTALL) + content = re.sub(r":::\{policy_table\}.*?:::", "", content, flags=re.DOTALL) + return content + + +def escape_mdx_curly_braces_in_code(content: str) -> str: + """Escape {variable} patterns in code blocks so MDX doesn't parse as JSX.""" + def escape_in_code_block(match: re.Match[str]) -> str: + lang = match.group(1) or "" + code = match.group(2) + code = re.sub(r"\{(\w+)\}", r"\\{\1\\}", code) + return f"```{lang}\n{code}```" + + return re.sub(r"```(\w*)\n(.*?)```", escape_in_code_block, content, flags=re.DOTALL) + + +def convert_file(filepath: Path) -> bool: + """Convert a single file. Returns True if changes were made.""" + content = filepath.read_text() + original = content + + content = convert_doc_roles(content, filepath) + content = convert_ref_roles(content) + content = remove_policy_table_directive(content) + + if content != original: + filepath.write_text(content) + return True + return False + + +def main() -> None: + parser = argparse.ArgumentParser( + description="Convert OpenShell-specific syntax for Fern MDX" + ) + parser.add_argument( + "pages_dir", + type=Path, + help="Path to pages directory (e.g. fern/v0.0.1/pages)", + ) + args = parser.parse_args() + + pages_dir = args.pages_dir.resolve() + if not pages_dir.exists(): + raise SystemExit(f"Error: pages directory not found at {pages_dir}") + + changed = [] + for mdx_file in sorted(pages_dir.rglob("*.mdx")): + if convert_file(mdx_file): + changed.append(mdx_file.relative_to(pages_dir)) + print(f" Converted: {mdx_file.relative_to(pages_dir)}") + + print(f"\nConverted {len(changed)} files") + + +if __name__ == "__main__": + main() diff --git a/fern/scripts/copy_docs_to_fern.py b/fern/scripts/copy_docs_to_fern.py new file mode 100644 index 00000000..99a60735 --- /dev/null +++ b/fern/scripts/copy_docs_to_fern.py @@ -0,0 +1,100 @@ +#!/usr/bin/env python3 +# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 + +"""Copy docs/*.md to fern//pages/*.mdx preserving directory structure.""" + +import argparse +import shutil +from pathlib import Path + +SKIP_FILES = { + "conf.py", + "Makefile", + "helpers.py", + "versions1.json", + "project.json", +} +SKIP_DIRS = {"_templates", "_build", "_ext", ".venv", ".git", "__pycache__"} + + +def main() -> None: + parser = argparse.ArgumentParser( + description="Copy docs/*.md to fern//pages/*.mdx" + ) + parser.add_argument( + "version", + help="Version folder name (e.g. v0.0.1)", + ) + parser.add_argument( + "--docs-dir", + default="docs", + help="Source docs directory (default: docs)", + ) + parser.add_argument( + "--fern-dir", + default="fern", + help="Fern root directory (default: fern)", + ) + args = parser.parse_args() + + repo_root = Path(__file__).resolve().parent.parent.parent + docs_dir = repo_root / args.docs_dir + fern_dir = repo_root / args.fern_dir + pages_dir = fern_dir / args.version / "pages" + + if not docs_dir.exists(): + raise SystemExit(f"Error: docs directory not found at {docs_dir}") + + pages_dir.mkdir(parents=True, exist_ok=True) + + # Copy docs/assets to fern/assets if they exist + docs_assets = docs_dir / "assets" + fern_assets = fern_dir / "assets" + if docs_assets.exists(): + for asset in docs_assets.rglob("*"): + if asset.is_file(): + rel = asset.relative_to(docs_assets) + dst = fern_assets / rel + dst.parent.mkdir(parents=True, exist_ok=True) + shutil.copy2(asset, dst) + print(f"Copied assets from {docs_assets} to {fern_assets}") + + # Copy docs/images to fern/assets/images if they exist + fern_images = fern_assets / "images" + fern_images.mkdir(parents=True, exist_ok=True) + + docs_images = docs_dir / "images" + if docs_images.exists(): + for img in docs_images.iterdir(): + if img.is_file(): + shutil.copy2(img, fern_images / img.name) + print(f"Copied docs/images to {fern_images}") + + # Copy images from docs subdirs (e.g. docs/sandboxes/*.png) + for ext in ["*.png", "*.jpg", "*.jpeg", "*.gif", "*.svg"]: + for img_file in docs_dir.rglob(ext): + if img_file.is_file() and not any(part in SKIP_DIRS for part in img_file.parts): + shutil.copy2(img_file, fern_images / img_file.name) + print(f"Copied {img_file.relative_to(docs_dir)} to {fern_images}") + + copied = 0 + for md_file in docs_dir.rglob("*.md"): + rel = md_file.relative_to(docs_dir) + + if rel.name in SKIP_FILES: + continue + if any(part in SKIP_DIRS or part.startswith(".") for part in rel.parts): + continue + + mdx_path = pages_dir / rel.with_suffix(".mdx") + mdx_path.parent.mkdir(parents=True, exist_ok=True) + shutil.copy2(md_file, mdx_path) + copied += 1 + print(f" {rel} -> {args.version}/pages/{rel.with_suffix('.mdx')}") + + print(f"\nCopied {copied} files to {pages_dir}") + + +if __name__ == "__main__": + main() diff --git a/fern/scripts/expand_includes.py b/fern/scripts/expand_includes.py new file mode 100644 index 00000000..0bf198e9 --- /dev/null +++ b/fern/scripts/expand_includes.py @@ -0,0 +1,88 @@ +#!/usr/bin/env python3 +# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 + +"""Expand {include} directives in MDX files. Run after copy_docs_to_fern.py. + +Processes index.mdx, replacing {include} blocks with the actual content +of the referenced files (README.md). +""" + +import argparse +import re +from pathlib import Path + + +def expand_include_in_content( + content: str, file_path: Path, pages_dir: Path, docs_dir: Path +) -> str: + """Replace {include} directives with file content. Paths are relative to the source doc.""" + # Match ```{include} path with optional options (e.g. :relative-docs:) + pattern = r"```\{include\}\s+([^\s\n]+)(?:\s*\n(?::[^\n]+\n)*)?```" + + def replace_include(match: re.Match[str]) -> str: + include_path_str = match.group(1).strip() + # Include paths are relative to the source doc's directory in docs/ + # e.g. docs/index.md has ../README.md -> repo_root/README.md + rel = file_path.relative_to(pages_dir) + source_dir = docs_dir / rel.parent + if rel.name == "index.mdx": + source_dir = docs_dir + resolved = (source_dir / include_path_str).resolve() + + if not resolved.exists(): + return f"" + return resolved.read_text() + + return re.sub(pattern, replace_include, content) + + +def expand_file(filepath: Path, pages_dir: Path, docs_dir: Path) -> bool: + """Expand includes in a single file. Returns True if changes were made.""" + content = filepath.read_text() + if "{include}" not in content: + return False + + new_content = expand_include_in_content(content, filepath, pages_dir, docs_dir) + if new_content != content: + filepath.write_text(new_content) + return True + return False + + +def main() -> None: + parser = argparse.ArgumentParser( + description="Expand {include} directives in MDX files" + ) + parser.add_argument( + "pages_dir", + type=Path, + help="Path to pages directory (e.g. fern/v0.0.1/pages)", + ) + parser.add_argument( + "--docs-dir", + type=Path, + default=None, + help="Path to docs directory (default: repo_root/docs)", + ) + args = parser.parse_args() + + pages_dir = args.pages_dir.resolve() + if not pages_dir.exists(): + raise SystemExit(f"Error: pages directory not found at {pages_dir}") + + repo_root = pages_dir.parent.parent.parent + docs_dir = args.docs_dir.resolve() if args.docs_dir else repo_root / "docs" + + expanded = [] + for pattern in ["index.mdx"]: + filepath = pages_dir / pattern + if filepath.exists() and expand_file(filepath, pages_dir, docs_dir): + expanded.append(filepath.relative_to(pages_dir)) + print(f" Expanded: {filepath.relative_to(pages_dir)}") + + print(f"\nExpanded {len(expanded)} files") + + +if __name__ == "__main__": + main() diff --git a/fern/scripts/fix_frontmatter.py b/fern/scripts/fix_frontmatter.py new file mode 100644 index 00000000..9de1fbc4 --- /dev/null +++ b/fern/scripts/fix_frontmatter.py @@ -0,0 +1,80 @@ +#!/usr/bin/env python3 +# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 + +"""Fix MyST-style frontmatter for Fern compatibility. + +Converts nested title: {page:, nav:} to simple title: string. +Removes MyST-specific frontmatter keys (topics, tags, content). +Keeps title and description only. +""" + +import argparse +import re +from pathlib import Path + + +def fix_frontmatter(filepath: Path) -> bool: + """Fix frontmatter in a single file. Returns True if changes were made.""" + content = filepath.read_text() + if not content.strip().startswith("---"): + return False + + fm_match = re.match(r"^---\s*\n(.*?)---\s*\n", content, re.DOTALL) + if not fm_match: + return False + + fm_block = fm_match.group(1) + rest = content[fm_match.end():] + + title_match = re.search(r"^\s+page:\s*(.+)$", fm_block, re.MULTILINE) + if not title_match: + title_match = re.search(r"^title:\s*(.+)$", fm_block, re.MULTILINE) + if title_match: + title = title_match.group(1).strip().strip('"\'') + else: + return False + else: + title = title_match.group(1).strip().strip('"\'') + + desc_match = re.search(r"^description:\s*(.+)$", fm_block, re.MULTILINE) + description = desc_match.group(1).strip() if desc_match else "" + + title_escaped = title.replace('"', '\\"') + desc_escaped = description.replace('"', '\\"') if description else "" + + new_fm = f'---\ntitle: "{title_escaped}"\ndescription: "{desc_escaped}"\n---\n' + new_content = new_fm + rest + + if new_content != content: + filepath.write_text(new_content) + return True + return False + + +def main() -> None: + parser = argparse.ArgumentParser( + description="Fix MyST frontmatter for Fern compatibility" + ) + parser.add_argument( + "pages_dir", + type=Path, + help="Path to pages directory", + ) + args = parser.parse_args() + + pages_dir = args.pages_dir.resolve() + if not pages_dir.exists(): + raise SystemExit(f"Error: pages directory not found at {pages_dir}") + + changed = [] + for mdx_file in sorted(pages_dir.rglob("*.mdx")): + if fix_frontmatter(mdx_file): + changed.append(mdx_file.relative_to(pages_dir)) + print(f" Fixed: {mdx_file.relative_to(pages_dir)}") + + print(f"\nFixed frontmatter in {len(changed)} files") + + +if __name__ == "__main__": + main() diff --git a/fern/scripts/remove_duplicate_h1.py b/fern/scripts/remove_duplicate_h1.py new file mode 100644 index 00000000..1488122f --- /dev/null +++ b/fern/scripts/remove_duplicate_h1.py @@ -0,0 +1,59 @@ +#!/usr/bin/env python3 +# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 + +"""Remove duplicate H1 that matches frontmatter title.""" + +import argparse +import re +from pathlib import Path + + +def remove_duplicate_h1(filepath: Path) -> bool: + """Remove H1 after frontmatter if it duplicates the title. Returns True if changed.""" + content = filepath.read_text() + + if not content.strip().startswith("---"): + return False + + # Extract title from frontmatter + match = re.search(r"^---\s*\ntitle:\s*(.+?)\n", content, re.MULTILINE) + if not match: + return False + + title = match.group(1).strip().strip('"\'') + pattern = rf"(---\s*\n.*?---\s*\n\n)#\s+{re.escape(title)}\s*\n+" + new_content = re.sub(pattern, r"\1", content, count=1, flags=re.DOTALL) + + if new_content != content: + filepath.write_text(new_content) + return True + return False + + +def main() -> None: + parser = argparse.ArgumentParser( + description="Remove duplicate H1 that matches frontmatter title" + ) + parser.add_argument( + "pages_dir", + type=Path, + help="Path to pages directory", + ) + args = parser.parse_args() + + pages_dir = args.pages_dir.resolve() + if not pages_dir.exists(): + raise SystemExit(f"Error: pages directory not found at {pages_dir}") + + changed = [] + for mdx_file in sorted(pages_dir.rglob("*.mdx")): + if remove_duplicate_h1(mdx_file): + changed.append(mdx_file.relative_to(pages_dir)) + print(f" Removed H1: {mdx_file.relative_to(pages_dir)}") + + print(f"\nRemoved duplicate H1 from {len(changed)} files") + + +if __name__ == "__main__": + main() diff --git a/fern/scripts/update_links.py b/fern/scripts/update_links.py new file mode 100644 index 00000000..214a1166 --- /dev/null +++ b/fern/scripts/update_links.py @@ -0,0 +1,88 @@ +#!/usr/bin/env python3 +# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 + +"""Update internal links: .md -> Fern paths, relative paths -> absolute.""" + +import argparse +import re +from pathlib import Path + + +def normalize_url(url: str) -> str: + """Normalize a URL to Fern path format.""" + clean = url.replace(".md", "").replace(".mdx", "") + if url.startswith(("http://", "https://", "#", "mailto:")): + return url + # Normalize asset paths to /assets/ + if "assets/" in clean or clean.startswith("./assets") or clean.startswith("../assets"): + clean = "/assets/" + clean.split("assets/")[-1] + # Normalize images/ to /assets/images/ + elif "images/" in clean or clean.startswith("./images") or clean.startswith("../images"): + img_name = clean.split("images/")[-1] if "images/" in clean else clean.split("/")[-1] + clean = "/assets/images/" + img_name + # Repo root images + elif clean.endswith(".png") and "/" not in clean: + clean = "/assets/" + clean + elif not clean.startswith("/"): + clean = "/" + clean + return clean + + +def update_links_in_content(content: str, file_dir: Path, pages_root: Path) -> str: + """Update markdown links and image paths: .md/.mdx -> Fern paths.""" + + def replace_link(match: re.Match[str]) -> str: + text, url = match.group(1), match.group(2) + clean = normalize_url(url) + return f"[{text}]({clean})" + + def replace_image(match: re.Match[str]) -> str: + alt, url = match.group(1), match.group(2) + clean = normalize_url(url) + return f"![{alt}]({clean})" + + # Process images first, then links (negative lookbehind avoids matching images) + content = re.sub(r"!\[([^\]]*)\]\(([^)]+)\)", replace_image, content) + content = re.sub(r"(? bool: + """Update links in a single file. Returns True if changes were made.""" + content = filepath.read_text() + file_dir = filepath.parent + new_content = update_links_in_content(content, file_dir, pages_root) + + if new_content != content: + filepath.write_text(new_content) + return True + return False + + +def main() -> None: + parser = argparse.ArgumentParser( + description="Update internal links in MDX files" + ) + parser.add_argument( + "pages_dir", + type=Path, + help="Path to pages directory (e.g. fern/v0.2.0/pages)", + ) + args = parser.parse_args() + + pages_dir = args.pages_dir.resolve() + if not pages_dir.exists(): + raise SystemExit(f"Error: pages directory not found at {pages_dir}") + + changed = [] + for mdx_file in sorted(pages_dir.rglob("*.mdx")): + if update_file(mdx_file, pages_dir): + changed.append(mdx_file.relative_to(pages_dir)) + print(f" Updated: {mdx_file.relative_to(pages_dir)}") + + print(f"\nUpdated {len(changed)} files") + + +if __name__ == "__main__": + main() diff --git a/fern/v0.0.1/pages/about/architecture.mdx b/fern/v0.0.1/pages/about/architecture.mdx new file mode 100644 index 00000000..7ab7fe1c --- /dev/null +++ b/fern/v0.0.1/pages/about/architecture.mdx @@ -0,0 +1,53 @@ +--- +title: "How OpenShell Works" +description: "OpenShell architecture overview covering the gateway, sandbox, policy engine, and privacy router." +--- +{/* SPDX-FileCopyrightText: Copyright (c) 2025-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + SPDX-License-Identifier: Apache-2.0 */} + +OpenShell runs as a [K3s](https://k3s.io/) Kubernetes cluster inside a Docker container. Each sandbox is an isolated Kubernetes pod managed through the gateway. Four components work together to keep agents secure. + +![OpenShell architecture diagram showing the component layout](/assets/images/architecture.svg) + +## Components + +The following table describes each component and its role in the system: + +| Component | Role | +|---|---| +| **Gateway** | Control-plane API that coordinates sandbox lifecycle and state, acts as the auth boundary, and brokers requests across the platform. | +| **Sandbox** | Isolated runtime that includes container supervision and policy-enforced egress routing. | +| **Policy Engine** | Policy definition and enforcement layer for filesystem, network, and process constraints. Defense in depth enforces policies from the application layer down to infrastructure and kernel layers. | +| **Privacy Router** | Privacy-aware LLM routing layer that keeps sensitive context on sandbox compute and routes based on cost and privacy policy. | + +## How a Request Flows + +Every outbound connection from agent code passes through the same decision path: + +1. The agent process opens an outbound connection (API call, package install, git clone, and so on). +2. The proxy inside the sandbox intercepts the connection and identifies which binary opened it. +3. The proxy queries the policy engine with the destination, port, and calling binary. +4. The policy engine returns one of three decisions: + - **Allow** — the destination and binary match a policy block. Traffic flows directly to the external service. + - **Route for inference** — no policy block matched, but inference routing is configured. The privacy router intercepts the request, strips the original credentials, injects the configured backend credentials, and forwards to the managed model endpoint. + - **Deny** — no match and no inference route. The connection is blocked and logged. + +For REST endpoints with TLS termination enabled, the proxy also decrypts TLS and checks each HTTP request against per-method, per-path rules before allowing it through. + +## Deployment Modes + +OpenShell can run locally or on a remote host. The architecture is identical in both cases — only the Docker container location changes. + +- **Local**: the k3s cluster runs inside Docker on your workstation. The CLI provisions it automatically on first use. +- **Remote**: the cluster runs on a remote host. Deploy with `openshell gateway start --remote user@host`. For example, connect to your DGX Spark. + ```console + $ openshell gateway start --remote @.local + $ openshell status + ``` + +## Next Steps + +Continue with one of the following: + +- To create your first sandbox, refer to the [Quickstart](/get-started/quickstart). +- To learn how OpenShell enforces isolation across all protection layers, refer to [Sandboxes](/sandboxes/index). diff --git a/fern/v0.0.1/pages/about/overview.mdx b/fern/v0.0.1/pages/about/overview.mdx new file mode 100644 index 00000000..e6a9623f --- /dev/null +++ b/fern/v0.0.1/pages/about/overview.mdx @@ -0,0 +1,57 @@ +--- +title: "Overview of NVIDIA OpenShell" +description: "OpenShell is the safe, private runtime for autonomous AI agents. Run agents in sandboxed environments that protect your data, credentials, and infrastructure." +--- +{/* SPDX-FileCopyrightText: Copyright (c) 2025-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + SPDX-License-Identifier: Apache-2.0 */} + +NVIDIA OpenShell is an open-source runtime for executing autonomous AI agents in sandboxed environments with kernel-level isolation. It combines sandbox runtime controls and a declarative YAML policy so teams can run agents without giving them unrestricted access to local files, credentials, and external networks. + +## Why OpenShell Exists + +AI agents are most useful when they can read files, install packages, call APIs, and use credentials. That same access can create material risk. OpenShell is designed for this tradeoff: preserve agent capability while enforcing explicit controls over what the agent can access. + +## Common Risks and Controls + +The table below summarizes common failure modes and how OpenShell mitigates them. + +| Threat | Without controls | With OpenShell | +|---|---|---| +| Data exfiltration | Agent uploads source code or internal files to unauthorized endpoints. | Network policies allow only approved destinations; other outbound traffic is denied. | +| Credential theft | Agent reads local secrets such as SSH keys or cloud credentials. | Filesystem restrictions (Landlock) confine access to declared paths only. | +| Unauthorized API usage | Agent sends prompts or data to unapproved model providers. | Privacy routing and network policies control where inference traffic can go. | +| Privilege escalation | Agent attempts `sudo`, setuid paths, or dangerous syscall behavior. | Unprivileged process identity and seccomp restrictions block escalation paths. | + +## Protection Layers at a Glance + +OpenShell applies defense in depth across the following policy domains. + +| Layer | What it protects | When it applies | +|---|---|---| +| Filesystem | Prevents reads/writes outside allowed paths. | Locked at sandbox creation. | +| Network | Blocks unauthorized outbound connections. | Hot-reloadable at runtime. | +| Process | Blocks privilege escalation and dangerous syscalls. | Locked at sandbox creation. | +| Inference | Reroutes model API calls to controlled backends. | Hot-reloadable at runtime. | + +For details, refer to [Built-in Default Policy](/sandboxes/index#built-in-default-policy) and [Customize Sandbox Policies](/sandboxes/policies). + +## Common Use Cases + +OpenShell supports a range of agent deployment patterns. + +| Use Case | Description | +|-----------------------------|----------------------------------------------------------------------------------------------------------| +| Secure coding agents | Run Claude Code, OpenCode, or OpenClaw with constrained file and network access. | +| Private enterprise development | Route inference to self-hosted or private backends while keeping sensitive context under your control. | +| Compliance and audit | Treat policy YAML as version-controlled security controls that can be reviewed and audited. | +| Reusable environments | Use community sandbox images or bring your own containerized runtime. | + +--- + +## Next Steps + +Explore these topics to go deeper: + +- To understand the components that make up the OpenShell runtime, refer to the [Architecture Overview](/about/architecture). +- To install the CLI and create your first sandbox, refer to the [Quickstart](/get-started/quickstart). +- To learn how OpenShell enforces isolation across all protection layers, refer to [Sandboxes](/sandboxes/index). diff --git a/fern/v0.0.1/pages/about/release-notes.mdx b/fern/v0.0.1/pages/about/release-notes.mdx new file mode 100644 index 00000000..ea307144 --- /dev/null +++ b/fern/v0.0.1/pages/about/release-notes.mdx @@ -0,0 +1,21 @@ +--- +title: "NVIDIA OpenShell Release Notes" +description: "Track the latest changes and improvements to NVIDIA OpenShell." +--- +{/* SPDX-FileCopyrightText: Copyright (c) 2025-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + SPDX-License-Identifier: Apache-2.0 */} + +Track the latest changes and improvements to NVIDIA OpenShell. +This page covers the highlights of the release. +For more details, refer to the [OpenShell GitHub Releases](https://github.com/NVIDIA/OpenShell/releases). + +## 0.0.1 + +This is the first release of NVIDIA OpenShell. It introduces sandboxed AI agent execution with kernel-level isolation, policy enforcement, and credential management. + +### Highlights + +- Introduces sandboxed AI agent execution with kernel-level isolation, policy enforcement, and credential management. +- Introduces the `openshell` CLI for creating, managing, and customizing sandboxes. +- Introduces the `openshell-gateway` service for managing the gateway and sandboxes. +- Introduces the `openshell-sandbox` service for running the sandboxed agent. diff --git a/fern/v0.0.1/pages/get-started/github-sandbox.mdx b/fern/v0.0.1/pages/get-started/github-sandbox.mdx new file mode 100644 index 00000000..d9225113 --- /dev/null +++ b/fern/v0.0.1/pages/get-started/github-sandbox.mdx @@ -0,0 +1,354 @@ +--- +title: "Set Up a Sandbox of Claude Code with a Custom GitHub Policy" +description: "Learn the iterative policy workflow by launching a sandbox, diagnosing a GitHub access denial, and applying a custom policy to fix it." +--- +{/* SPDX-FileCopyrightText: Copyright (c) 2025-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + SPDX-License-Identifier: Apache-2.0 */} + +This tutorial walks through an iterative sandbox policy workflow. You launch a sandbox, ask Claude Code to push code to GitHub, and observe the default network policy denying the request. +You then diagnose the denial from your machine and from inside the sandbox, apply a policy update, and verify that the policy update to the sandbox takes effect. + +After completing this tutorial, you will have: + +- A running sandbox with Claude Code that can push to a GitHub repository. +- A custom network policy that grants GitHub access for a specific repository. +- Experience with the policy iteration workflow: fail, diagnose, update, verify. + + +This tutorial shows example prompts and responses from Claude Code. The exact wording you see might vary between sessions. Use the examples as a guide for the type of interaction, not as expected output. + + +## Prerequisites + +This tutorial requires the following: + +- A working OpenShell installation. Complete the [Quickstart](/get-started/quickstart) before proceeding. +- A GitHub personal access token (PAT) with `repo` scope. Generate one from the [GitHub personal access token settings page](https://github.com/settings/tokens) by selecting **Generate new token (classic)** and enabling the `repo` scope. +- An [Anthropic account](https://console.anthropic.com/) with access to Claude Code. OpenShell provides the sandbox runtime, not the agent. You must authenticate with your own account. +- A GitHub repository you own to use as the push target. A scratch repository is sufficient. You can [create one](https://github.com/new) with a README if needed. + +This tutorial uses two terminals to demonstrate the iterative policy workflow: + +- **Terminal 1**: The sandbox terminal. You create the sandbox in this terminal by running `openshell sandbox create` and interact with Claude Code inside it. +- **Terminal 2**: A terminal outside the sandbox on your machine. You use this terminal for viewing the sandbox logs with `openshell term` and applying an updated policy with `openshell policy set`. + +Each section below indicates which terminal to use. + +## Set Up a Sandbox with Your GitHub Token + +Depending on whether you start a new sandbox or use an existing sandbox, choose the appropriate tab and follow the instructions. + + + + +In terminal 2, create a new sandbox with Claude Code. The [default policy](/reference/default-policy) is applied automatically, which allows read-only access to GitHub. + +Create a [credential provider](/sandboxes/providers) that injects your GitHub token into the sandbox automatically. The provider reads `GITHUB_TOKEN` from your host environment and sets it as an environment variable inside the sandbox: + +```console +$ GITHUB_TOKEN= +$ openshell provider create --name my-github --type github --from-existing +$ openshell sandbox create --provider my-github -- claude +``` + +`openshell sandbox create` keeps the sandbox running after Claude Code exits, so you can apply policy updates later without recreating the environment. Add `--no-keep` if you want the sandbox deleted automatically instead. + +Claude Code starts inside the sandbox. It prints an authentication link. Open it in your browser, sign in to your Anthropic account, and return to the terminal. When prompted, trust the `/sandbox` workspace to allow Claude Code to read and write files. + + + + + +In terminal 1, connect to a sandbox that is already running and set your GitHub token as an environment variable: + +```console +$ openshell sandbox connect +$ export GITHUB_TOKEN= +``` + +To find the name of running sandboxes, run `openshell sandbox list` in terminal 2. + + + + +## Push Code to GitHub + +In terminal 1, ask Claude Code to write a simple script and push it to your repository. Replace `` with your GitHub organization or username and `` with your repository name. + + + +Write a `hello_world.py` script and push it to `https://github.com//`. + + + +Claude recognizes that it needs GitHub credentials. It asks how you want to authenticate. Provide your GitHub personal access token by pasting it into the conversation. Claude configures authentication and attempts the push. + +The push fails. Claude reports an error, but the failure is not an authentication problem. The default sandbox policy permits read-only access to GitHub and blocks write operations, so the proxy denies the push before the request reaches the GitHub server. + +## Diagnose the Denial + +In this section, you diagnose the denial from your machine and from inside the sandbox. + +### View the Logs from Your Machine + +In terminal 2, launch the OpenShell terminal: + +```console +$ openshell term +``` + +The dashboard shows sandbox status and a live stream of policy decisions. Look for entries with `l7_decision=deny`. Select a deny entry to see the full detail: + +```text +l7_action: PUT +l7_target: /repos///contents/hello_world.py +l7_decision: deny +dst_host: api.github.com +dst_port: 443 +l7_protocol: rest +policy: github_rest_api +l7_deny_reason: PUT /repos///contents/hello_world.py not permitted by policy +``` + +The log shows that the sandbox proxy intercepted an outbound `PUT` request to `api.github.com` and denied it. The `github_rest_api` policy allows read operations (GET) but blocks write operations (PUT, POST, DELETE) to the GitHub API. A similar denial appears for `github.com` if Claude attempted a git push over HTTPS. + +### Ask Claude Code to Check the Sandbox Logs + +In terminal 1, ask Claude Code to check the sandbox logs for denied requests: + + + +Check the sandbox logs for any denied network requests. What is blocking the push? + + + +Claude reads the deny entries and identifies the root cause. It explains that the failure is a sandbox network policy restriction, not a token permissions issue. For example, the following is a possible response: + + + +The sandbox runs a proxy that enforces policies on outbound traffic. +The `github_rest_api` policy allows GET requests (used to read the file) +but blocks PUT/write requests to GitHub. This is a sandbox-level restriction, +not a token issue. No matter what token you provide, pushes through the API +will be blocked until the policy is updated. + + + +Both perspectives confirm the same thing: the proxy is doing its job. The default policy is designed to be restrictive. To allow GitHub pushes, you need to update the network policy. + +Copy the deny reason from Claude's response. You paste it into an agent running on your machine in the next step. + +## Update the Policy from Your Machine + +In terminal 2, paste the deny reason from the previous step into your coding agent on your machine, such as Claude Code or Cursor, and ask it to recommend a policy update. The deny reason gives the agent the context it needs to generate the correct policy rules. After pasting the following prompt sample, properly provide the GitHub organization and repository names of the repository you are pushing to. + + + +Based on the following deny reasons, recommend a sandbox policy update that allows GitHub pushes to `https://github.com//`, and save to `/tmp/sandbox-policy-update.yaml`: + +The `filesystem_policy`, `landlock`, and `process` sections are static. They are read once at sandbox creation and cannot be changed by a hot-reload. They are included here for completeness so the file is self-contained, but only the `network_policies` section takes effect when you apply this to a running sandbox. + + + +The following steps outline the expected process done by the agent: + +1. Inspects the deny reasons. +2. Writes an updated policy that adds `github_git` and `github_api` blocks that grant write access to your repository. +3. Saves the policy to `/tmp/sandbox-policy-update.yaml`. + +## Review the Generated Policy + +Refer to the following policy example to compare with the generated policy before applying it. Confirm that the policy grants only the access you expect. In this case, `git push` operations and GitHub REST API access scoped to a single repository. + + + +The following YAML shows a complete policy that extends the [default policy](/reference/default-policy) with GitHub access for a single repository. Replace `` with your GitHub organization or username and `` with your repository name. + +The `filesystem_policy`, `landlock`, and `process` sections are static. They are read once at sandbox creation and cannot be changed by a hot-reload. They are included here for completeness so the file is self-contained, but only the `network_policies` section takes effect when you apply this to a running sandbox. + +```yaml +version: 1 + +# ── Static (locked at sandbox creation) ────────────────────────── + +filesystem_policy: + include_workdir: true + read_only: + - /usr + - /lib + - /proc + - /dev/urandom + - /app + - /etc + - /var/log + read_write: + - /sandbox + - /tmp + - /dev/null + +landlock: + compatibility: best_effort + +process: + run_as_user: sandbox + run_as_group: sandbox + +# ── Dynamic (hot-reloadable) ───────────────────────────────────── + +network_policies: + + # Claude Code ↔ Anthropic API + claude_code: + name: claude-code + endpoints: + - { host: api.anthropic.com, port: 443, protocol: rest, enforcement: enforce, access: full, tls: terminate } + - { host: statsig.anthropic.com, port: 443 } + - { host: sentry.io, port: 443 } + - { host: raw.githubusercontent.com, port: 443 } + - { host: platform.claude.com, port: 443 } + binaries: + - { path: /usr/local/bin/claude } + - { path: /usr/bin/node } + + # NVIDIA inference endpoint + nvidia_inference: + name: nvidia-inference + endpoints: + - { host: integrate.api.nvidia.com, port: 443 } + binaries: + - { path: /usr/bin/curl } + - { path: /bin/bash } + - { path: /usr/local/bin/opencode } + + # ── GitHub: git operations (clone, fetch, push) ────────────── + + github_git: + name: github-git + endpoints: + - host: github.com + port: 443 + protocol: rest + tls: terminate + enforcement: enforce + rules: + - allow: + method: GET + path: "//.git/info/refs*" + - allow: + method: POST + path: "//.git/git-upload-pack" + - allow: + method: POST + path: "//.git/git-receive-pack" + binaries: + - { path: /usr/bin/git } + + # ── GitHub: REST API ───────────────────────────────────────── + + github_api: + name: github-api + endpoints: + - host: api.github.com + port: 443 + protocol: rest + tls: terminate + enforcement: enforce + rules: + # GraphQL API (used by gh CLI) + - allow: + method: POST + path: "/graphql" + # Full read-write access to the repository + - allow: + method: "*" + path: "/repos///**" + binaries: + - { path: /usr/local/bin/claude } + - { path: /usr/local/bin/opencode } + - { path: /usr/bin/gh } + - { path: /usr/bin/curl } + + # ── Package managers ───────────────────────────────────────── + + pypi: + name: pypi + endpoints: + - { host: pypi.org, port: 443 } + - { host: files.pythonhosted.org, port: 443 } + - { host: github.com, port: 443 } + - { host: objects.githubusercontent.com, port: 443 } + - { host: api.github.com, port: 443 } + - { host: downloads.python.org, port: 443 } + binaries: + - { path: /sandbox/.venv/bin/python } + - { path: /sandbox/.venv/bin/python3 } + - { path: /sandbox/.venv/bin/pip } + - { path: "/sandbox/.uv/python/**/python*" } + - { path: /usr/local/bin/uv } + - { path: "/sandbox/.uv/python/**" } + + # ── VS Code Remote ────────────────────────────────────────── + + vscode: + name: vscode + endpoints: + - { host: update.code.visualstudio.com, port: 443 } + - { host: "*.vo.msecnd.net", port: 443 } + - { host: vscode.download.prss.microsoft.com, port: 443 } + - { host: marketplace.visualstudio.com, port: 443 } + - { host: "*.gallerycdn.vsassets.io", port: 443 } + binaries: + - { path: /usr/bin/curl } + - { path: /usr/bin/wget } + - { path: "/sandbox/.vscode-server/**" } + - { path: "/sandbox/.vscode-remote-containers/**" } +``` + +The following table summarizes the two GitHub-specific blocks: + +| Block | Endpoint | Behavior | +|---|---|---| +| `github_git` | `github.com:443` | Git Smart HTTP protocol with TLS termination. Permits `info/refs` (clone/fetch), `git-upload-pack` (fetch data), and `git-receive-pack` (push) for the specified repository. Denies all operations on unlisted repositories. | +| `github_api` | `api.github.com:443` | REST API with TLS termination. Permits all HTTP methods for the specified repository and GraphQL queries. Denies API access to unlisted repositories. | + +The remaining blocks (`claude_code`, `nvidia_inference`, `pypi`, `vscode`) are identical to the [default policy](/reference/default-policy). The default policy's `github_ssh_over_https` and `github_rest_api` blocks are replaced by the `github_git` and `github_api` blocks above, which grant write access to the specified repository. Sandbox behavior outside of GitHub operations is unchanged. + +For details on policy block structure, refer to [Network Access Rules](/sandboxes/index#network-access-rules). + + + +## Apply the Policy + +After you have reviewed the generated policy, apply it to the running sandbox: + +```console +$ openshell policy set --policy /tmp/sandbox-policy-update.yaml --wait +``` + +Network policies are hot-reloadable. The `--wait` flag blocks until the policy engine confirms the new revision loaded, and the update takes effect immediately without restarting the sandbox or reconnecting Claude Code. + +## Retry the Push + +In terminal 1, ask Claude Code to retry the push: + +```text +The sandbox policy has been updated. Try pushing to the repository again. +``` + +The push completes successfully. The `openshell term` dashboard now shows `l7_decision=allow` entries for `api.github.com` and `github.com` where it previously showed denials. + +## Clean Up + +When you are finished, delete the sandbox to free cluster resources: + +```console +$ openshell sandbox delete +``` + +## Next Steps + +The following resources cover related topics in greater depth: + +- To add per-repository access levels (read-write vs read-only) or restrict to specific API methods, refer to the [Policy Schema Reference](/reference/policy-schema). +- To learn the full policy iteration workflow (pull, edit, push, verify), refer to [Policies](/sandboxes/policies). +- To inject credentials automatically instead of pasting tokens, refer to [Providers](/sandboxes/providers). diff --git a/fern/v0.0.1/pages/get-started/quickstart.mdx b/fern/v0.0.1/pages/get-started/quickstart.mdx new file mode 100644 index 00000000..28638d91 --- /dev/null +++ b/fern/v0.0.1/pages/get-started/quickstart.mdx @@ -0,0 +1,137 @@ +--- +title: "Quickstart" +description: "Install the OpenShell CLI and create your first sandboxed AI agent in two commands." +--- +{/* SPDX-FileCopyrightText: Copyright (c) 2025-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + SPDX-License-Identifier: Apache-2.0 */} + +This page gets you from zero to a running, policy-enforced sandbox in two commands. + +## Prerequisites + +Before you begin, make sure you have: + +- Python 3.12 or later +- [uv](https://docs.astral.sh/uv/) installed +- Docker Desktop running on your machine + +## Install the OpenShell CLI + +Install the `openshell` package into a virtual environment. + +Activate your virtual environment: + +```bash +uv venv && source .venv/bin/activate +``` + +Install the CLI: + +```bash +uv pip install openshell +``` + +## Connect to a Remote Gateway (Optional) + +If you're running locally, skip this step. The OpenShell CLI creates a gateway automatically when you create your first sandbox. + + + + + +Deploy an OpenShell gateway on Brev by hitting **Deploy** on the [OpenShell Launchable](https://brev.nvidia.com/launchable/deploy/now?launchableID=env-3AaK9NmCzWp3pVyUDNNFBt805FT). + + +After the instance is running, find the gateway URL in the Brev console under **Using Secure Links**. Copy the shareable URL for **port 8080** — this is the gateway endpoint. + +```console +$ openshell gateway add https://.brevlab.com +$ openshell status +``` + + + + + + +Set up your Spark with NVIDIA Sync first, or make sure SSH access is configured (such as SSH keys added to the host). + + +Deploy to a DGX Spark machine over SSH: + +```console +$ openshell gateway start --remote @.local +$ openshell status +``` + +After `openshell status` shows the gateway as healthy, all subsequent commands route through the SSH tunnel. + + + + + +## Create Your First OpenShell Sandbox + +Choose the tab that matches your agent: + + + + + +Run the following command to create a sandbox with Claude Code: + +```console +$ openshell sandbox create -- claude +``` + +The CLI prompts you to create a provider from local credentials — type `yes` to continue. If `ANTHROPIC_API_KEY` is set in your environment, it is picked up automatically. If not, you can configure it from inside the sandbox after it launches. + + + + + +Run the following command to create a sandbox with OpenCode: + +```console +$ openshell sandbox create -- opencode +``` + +The CLI prompts you to create a provider from local credentials. Type `yes` to continue. If `OPENAI_API_KEY` or `OPENROUTER_API_KEY` is set in your environment, it is picked up automatically. If not, you can configure it from inside the sandbox after it launches. + + + + + +Run the following command to create a sandbox with Codex: + +```console +$ openshell sandbox create -- codex +``` + +The CLI prompts you to create a provider from local credentials. Type `yes` to continue. If `OPENAI_API_KEY` is set in your environment, it is picked up automatically. If not, you can configure it from inside the sandbox after it launches. + + + + + +Run the following command to create a sandbox with OpenClaw: + +```console +$ openshell sandbox create --from openclaw +``` + +The `--from` flag pulls a pre-built sandbox definition from the [OpenShell Community](https://github.com/NVIDIA/OpenShell-Community) catalog. Each definition bundles a container image, a tailored policy, and optional skills into a single package. + + + + + +You can use the `--from` flag to pull other OpenShell sandbox images from the [NVIDIA Container Registry](https://registry.nvidia.com/). For example, to pull the `base` image, run the following command: + +```console +$ openshell sandbox create --from base +``` + + + + diff --git a/fern/v0.0.1/pages/index.mdx b/fern/v0.0.1/pages/index.mdx new file mode 100644 index 00000000..e4370644 --- /dev/null +++ b/fern/v0.0.1/pages/index.mdx @@ -0,0 +1,80 @@ +--- +title: "NVIDIA OpenShell Developer Guide" +description: "OpenShell is the safe, private runtime for autonomous AI agents. Run agents in sandboxed environments that protect your data, credentials, and infrastructure." +--- +{/* SPDX-FileCopyrightText: Copyright (c) 2025-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + SPDX-License-Identifier: Apache-2.0 */} + +import { BadgeLinks } from "@/components/BadgeLinks"; +import { GetStartedTerminal } from "@/components/GetStartedTerminal"; + + + +NVIDIA OpenShell is the safe, private runtime for autonomous AI agents. It provides sandboxed execution environments +that protect your data, credentials, and infrastructure. Agents run with exactly the permissions they need and +nothing more, governed by declarative policies that prevent unauthorized file access, data exfiltration, and +uncontrolled network activity. + +## Get Started + +Install the CLI and create your first sandbox in two commands. + + + +Refer to the [Quickstart](/get-started/quickstart) for more details. + +--- + +## Explore + + + + + +Learn about OpenShell and its capabilities. + +`Concept` + + + + + +Install the CLI and create your first sandbox in two commands. + +`Tutorial` + + + + + +End-to-end guides for GitHub repo access, custom policies, and more. + +`Tutorial` + + + + + +Create, manage, and customize sandboxes. Configure policies, providers, and community images for your AI agents. + +`Concept` + + + + + +Keep inference traffic private by routing API calls to local or self-hosted backends. + +`Concept` + + + + + +Policy schema, environment variables, and system architecture. + +`Reference` + + + + diff --git a/fern/v0.0.1/pages/inference/configure.mdx b/fern/v0.0.1/pages/inference/configure.mdx new file mode 100644 index 00000000..0426b5c5 --- /dev/null +++ b/fern/v0.0.1/pages/inference/configure.mdx @@ -0,0 +1,140 @@ +--- +title: "Configure Inference Routing" +description: "Set up the managed local inference endpoint with provider credentials and model configuration." +--- +{/* SPDX-FileCopyrightText: Copyright (c) 2025-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + SPDX-License-Identifier: Apache-2.0 */} + +This page covers the managed local inference endpoint (`https://inference.local`). External inference endpoints go through sandbox `network_policies` — refer to [Network Access Rules](/sandboxes/index#network-access-rules) for details. + +The configuration consists of two values: + +| Value | Description | +|---|---| +| Provider record | The credential backend OpenShell uses to authenticate with the upstream model host. | +| Model ID | The model to use for generation requests. | + +## Step 1: Create a Provider + +Create a provider that holds the backend credentials you want OpenShell to use. + + + + +```console +$ openshell provider create --name nvidia-prod --type nvidia --from-existing +``` + +This reads `NVIDIA_API_KEY` from your environment. + + + + + +```console +$ openshell provider create \ + --name my-local-model \ + --type openai \ + --credential OPENAI_API_KEY=empty-if-not-required \ + --config OPENAI_BASE_URL=http://192.168.10.15/v1 +``` + +Use `--config OPENAI_BASE_URL` to point to any OpenAI-compatible server running on your network. Set `OPENAI_API_KEY` to a dummy value if the server does not require authentication. + + + + + +```console +$ openshell provider create --name anthropic-prod --type anthropic --from-existing +``` + +This reads `ANTHROPIC_API_KEY` from your environment. + + + + + +## Step 2: Set Inference Routing + +Point `inference.local` at that provider and choose the model to use: + +```console +$ openshell inference set \ + --provider nvidia-prod \ + --model nvidia/nemotron-3-nano-30b-a3b +``` + +## Step 3: Verify the Active Config + +Confirm that the provider and model are set correctly: + +```console +$ openshell inference get +Gateway inference: + + Provider: nvidia-prod + Model: nvidia/nemotron-3-nano-30b-a3b + Version: 1 +``` + +## Step 4: Update Part of the Config + +Use `update` when you want to change only one field: + +```console +$ openshell inference update --model nvidia/nemotron-3-nano-30b-a3b +``` + +Or switch providers without repeating the current model: + +```console +$ openshell inference update --provider openai-prod +``` + +## Use It from a Sandbox + +After inference is configured, code inside any sandbox can call `https://inference.local` directly: + +```python +from openai import OpenAI + +client = OpenAI(base_url="https://inference.local/v1", api_key="dummy") + +response = client.chat.completions.create( + model="anything", + messages=[{"role": "user", "content": "Hello"}], +) +``` + +The client-supplied model is ignored for generation requests. OpenShell rewrites it to the configured model before forwarding upstream. + +Use this endpoint when inference should stay local to the host for privacy and security reasons. External providers that should be reached directly belong in `network_policies` instead. + +### Verify the Endpoint from a Sandbox + +`openshell inference get` confirms the configuration was saved, but does not verify the upstream endpoint is reachable. To confirm end-to-end connectivity, connect to a sandbox and run: + +```bash +curl https://inference.local/v1/responses \ + -H "Content-Type: application/json" \ + -d '{ + "instructions": "You are a helpful assistant.", + "input": "Hello!" + }' +``` + +A successful response confirms the privacy router can reach the configured backend and the model is serving requests. + + +- **Gateway-scoped** — every sandbox on the active gateway sees the same `inference.local` backend. +- **HTTPS only** — `inference.local` is intercepted only for HTTPS traffic. + + +## Next Steps + +Explore related topics: + +- To understand the inference routing flow and supported API patterns, refer to [About Inference Routing](/inference/index). +- To control external endpoints, refer to [Network Access Rules](/sandboxes/index#network-access-rules). +- To manage provider records, refer to [Providers](/sandboxes/providers). diff --git a/fern/v0.0.1/pages/inference/index.mdx b/fern/v0.0.1/pages/inference/index.mdx new file mode 100644 index 00000000..da2b3019 --- /dev/null +++ b/fern/v0.0.1/pages/inference/index.mdx @@ -0,0 +1,63 @@ +--- +title: "About Inference Routing" +description: "Understand how OpenShell routes inference traffic through external endpoints and the local privacy router." +--- +{/* SPDX-FileCopyrightText: Copyright (c) 2025-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + SPDX-License-Identifier: Apache-2.0 */} + +NVIDIA OpenShell handles inference traffic through two endpoints: `inference.local` and external endpoints. +The following table summarizes how OpenShell handles inference traffic. + +| Path | How It Works | +|---|---| +| **External endpoints** | Traffic to hosts like `api.openai.com` or `api.anthropic.com` is treated like any other outbound request — allowed or denied by `network_policies`. Refer to [Network Access Rules](/sandboxes/index#network-access-rules). | +| **`inference.local`** | A special endpoint exposed inside every sandbox for inference that should stay local to the host for privacy and security. The [privacy router](/about/architecture) strips the original credentials, injects the configured backend credentials, and forwards to the managed model endpoint. | + +## How `inference.local` Works + +When code inside a sandbox calls `https://inference.local`, the privacy router routes the request to the configured backend for that gateway. The configured model is applied to generation requests, and provider credentials are supplied by OpenShell rather than by code inside the sandbox. + +If code calls an external inference host directly, that traffic is evaluated only by `network_policies`. + +| Property | Detail | +|---|---| +| Credentials | No sandbox API keys needed — credentials come from the configured provider record. | +| Configuration | One provider and one model define sandbox inference. | +| Provider support | OpenAI, Anthropic, and NVIDIA providers all work through the same endpoint. | +| Hot-refresh | Provider credential changes and inference updates are picked up without recreating sandboxes. | + +## Supported API Patterns + +Supported request patterns depend on the provider configured for `inference.local`. + + + + +| Pattern | Method | Path | +|---|---|---| +| Chat Completions | `POST` | `/v1/chat/completions` | +| Completions | `POST` | `/v1/completions` | +| Responses | `POST` | `/v1/responses` | +| Model Discovery | `GET` | `/v1/models` | +| Model Discovery | `GET` | `/v1/models/*` | + + + + + +| Pattern | Method | Path | +|---|---|---| +| Messages | `POST` | `/v1/messages` | + + + + + +Requests to `inference.local` that do not match the configured provider's supported patterns are denied. + +## Next Steps + +Continue with one of the following: + +- To set up the backend behind `inference.local`, refer to [Configure](/inference/configure). +- To control external endpoints, refer to [Network Access Rules](/sandboxes/index#network-access-rules). diff --git a/fern/v0.0.1/pages/reference/default-policy.mdx b/fern/v0.0.1/pages/reference/default-policy.mdx new file mode 100644 index 00000000..d1716c0c --- /dev/null +++ b/fern/v0.0.1/pages/reference/default-policy.mdx @@ -0,0 +1,26 @@ +--- +title: "Default Policy Reference" +description: "Breakdown of the built-in default policy applied when you create an OpenShell sandbox without a custom policy." +--- +{/* SPDX-FileCopyrightText: Copyright (c) 2025-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + SPDX-License-Identifier: Apache-2.0 */} + +The default policy is the policy applied when you create an OpenShell sandbox without `--policy`. It is baked into the community base image ([`ghcr.io/nvidia/openshell-community/sandboxes/base`](https://github.com/nvidia/openshell-community)) and defined in the community repo's `dev-sandbox-policy.yaml`. + +## Agent Compatibility + +The following table shows the coverage of the default policy for common agents. + +| Agent | Coverage | Action Required | +|---|---|---| +| Claude Code | Full | None. Works out of the box. | +| OpenCode | Partial | Add `opencode.ai` endpoint and OpenCode binary paths. | +| Codex | None | Provide a complete custom policy with OpenAI endpoints and Codex binary paths. | + + +If you run a non-Claude agent without a custom policy, the agent's API calls are denied by the proxy. You must provide a policy that declares the agent's endpoints and binaries. + + +## Default Policy Blocks + +The default policy blocks are defined in the community base image. See the [openshell-community repository](https://github.com/nvidia/openshell-community) for the full `dev-sandbox-policy.yaml` source. diff --git a/fern/v0.0.1/pages/reference/policy-schema.mdx b/fern/v0.0.1/pages/reference/policy-schema.mdx new file mode 100644 index 00000000..c775f0d6 --- /dev/null +++ b/fern/v0.0.1/pages/reference/policy-schema.mdx @@ -0,0 +1,213 @@ +--- +title: "Policy Schema Reference" +description: "Complete field reference for the sandbox policy YAML including static and dynamic sections." +--- +{/* SPDX-FileCopyrightText: Copyright (c) 2025-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + SPDX-License-Identifier: Apache-2.0 */} + +Complete field reference for the sandbox policy YAML. Each field is documented with its type, whether it is required, and whether it is static (locked at sandbox creation) or dynamic (hot-reloadable on a running sandbox). + +## Top-Level Structure + +A policy YAML file contains the following top-level fields: + +```yaml +version: 1 +filesystem_policy: { ... } +landlock: { ... } +process: { ... } +network_policies: { ... } +``` + +| Field | Type | Required | Category | Description | +|---|---|---|---|---| +| `version` | integer | Yes | -- | Policy schema version. Must be `1`. | +| `filesystem_policy` | object | No | Static | Controls which directories the agent can read and write. | +| `landlock` | object | No | Static | Configures Landlock LSM enforcement behavior. | +| `process` | object | No | Static | Sets the user and group the agent process runs as. | +| `network_policies` | map | No | Dynamic | Declares which binaries can reach which network endpoints. | + +Static fields are set at sandbox creation time. Changing them requires destroying and recreating the sandbox. Dynamic fields can be updated on a running sandbox with `openshell policy set` and take effect without restarting. + +## Version + +The version field identifies which schema the policy uses: + +| Field | Type | Required | Description | +|---|---|---|---| +| `version` | integer | Yes | Schema version number. Currently must be `1`. | + +## Filesystem Policy + +**Category:** Static + +Controls filesystem access inside the sandbox. Paths not listed in either `read_only` or `read_write` are inaccessible. + +| Field | Type | Required | Description | +|---|---|---|---| +| `include_workdir` | bool | No | When `true`, automatically adds the agent's working directory to `read_write`. | +| `read_only` | list of strings | No | Paths the agent can read but not modify. Typically system directories like `/usr`, `/lib`, `/etc`. | +| `read_write` | list of strings | No | Paths the agent can read and write. Typically `/sandbox` (working directory) and `/tmp`. | + +**Validation constraints:** + +- Every path must be absolute (start with `/`). +- Paths must not contain `..` traversal components. The server normalizes paths before storage, but rejects policies where traversal would escape the intended scope. +- Read-write paths must not be overly broad (for example, `/` alone is rejected). +- Each individual path must not exceed 4096 characters. +- The combined total of `read_only` and `read_write` paths must not exceed 256. + +Policies that violate these constraints are rejected with `INVALID_ARGUMENT` at creation or update time. Disk-loaded YAML policies that fail validation fall back to a restrictive default. + +Example: + +```yaml +filesystem_policy: + include_workdir: true + read_only: + - /usr + - /lib + - /proc + - /dev/urandom + - /etc + read_write: + - /sandbox + - /tmp + - /dev/null +``` + +## Landlock + +**Category:** Static + +Configures [Landlock LSM](https://docs.kernel.org/security/landlock.html) enforcement at the kernel level. Landlock provides mandatory filesystem access control below what UNIX permissions allow. + +| Field | Type | Required | Values | Description | +|---|---|---|---|---| +| `compatibility` | string | No | `best_effort`, `hard_requirement` | How OpenShell handles kernel ABI differences. `best_effort` uses the highest Landlock ABI the host kernel supports. `hard_requirement` fails if the required ABI is unavailable. | + +Example: + +```yaml +landlock: + compatibility: best_effort +``` + +## Process + +**Category:** Static + +Sets the OS-level identity for the agent process inside the sandbox. + +| Field | Type | Required | Description | +|---|---|---|---| +| `run_as_user` | string | No | The user name or UID the agent process runs as. Default: `sandbox`. | +| `run_as_group` | string | No | The group name or GID the agent process runs as. Default: `sandbox`. | + +**Validation constraint:** Neither `run_as_user` nor `run_as_group` may be set to `root` or `0`. Policies that request root process identity are rejected at creation or update time. + +Example: + +```yaml +process: + run_as_user: sandbox + run_as_group: sandbox +``` + +## Network Policies + +**Category:** Dynamic + +A map of named network policy entries. Each entry declares a set of endpoints and a set of binaries. Only the listed binaries are permitted to connect to the listed endpoints. The map key is a logical identifier. The `name` field inside the entry is the display name used in logs. + +### Network Policy Entry + +Each entry in the `network_policies` map has the following fields: + +| Field | Type | Required | Description | +|---|---|---|---| +| `name` | string | No | Display name for the policy entry. Used in log output. Defaults to the map key. | +| `endpoints` | list of endpoint objects | Yes | Hosts and ports this entry permits. | +| `binaries` | list of binary objects | Yes | Executables allowed to connect to these endpoints. | + +### Endpoint Object + +Each endpoint defines a reachable destination and optional inspection rules. + +| Field | Type | Required | Description | +|---|---|---|---| +| `host` | string | Yes | Hostname or IP address. Supports wildcards: `*.example.com` matches any subdomain. | +| `port` | integer | Yes | TCP port number. | +| `protocol` | string | No | Set to `rest` to enable HTTP request inspection. Omit for TCP passthrough. | +| `tls` | string | No | TLS handling mode. `terminate` decrypts TLS at the proxy for inspection. `passthrough` forwards encrypted traffic without inspection. Only relevant when `protocol` is `rest`. | +| `enforcement` | string | No | `enforce` actively blocks disallowed requests. `audit` logs violations but allows traffic through. | +| `access` | string | No | HTTP access level. One of `read-only`, `read-write`, or `full`. Mutually exclusive with `rules`. | +| `rules` | list of rule objects | No | Fine-grained per-method, per-path allow rules. Mutually exclusive with `access`. | + +#### Access Levels + +The `access` field accepts one of the following values: + +| Value | Allowed HTTP Methods | +|---|---| +| `full` | All methods and paths. | +| `read-only` | `GET`, `HEAD`, `OPTIONS`. | +| `read-write` | `GET`, `HEAD`, `OPTIONS`, `POST`, `PUT`, `PATCH`. | + +#### Rule Object + +Used when `access` is not set. Each rule explicitly allows a method and path combination. + +| Field | Type | Required | Description | +|---|---|---|---| +| `allow.method` | string | Yes | HTTP method to allow (for example, `GET`, `POST`). | +| `allow.path` | string | Yes | URL path pattern. Supports `*` and `**` glob syntax. | + +Example with rules: + +```yaml +rules: + - allow: + method: GET + path: /**/info/refs* + - allow: + method: POST + path: /**/git-upload-pack +``` + +### Binary Object + +Identifies an executable that is permitted to use the associated endpoints. + +| Field | Type | Required | Description | +|---|---|---|---| +| `path` | string | Yes | Filesystem path to the executable. Supports glob patterns with `*` and `**`. For example, `/sandbox/.vscode-server/**` matches any executable under that directory tree. | + +### Full Example + +The following policy grants read-only GitHub API access and npm registry access: + +```yaml +network_policies: + github_rest_api: + name: github-rest-api + endpoints: + - host: api.github.com + port: 443 + protocol: rest + tls: terminate + enforcement: enforce + access: read-only + binaries: + - path: /usr/local/bin/claude + - path: /usr/bin/node + - path: /usr/bin/gh + npm_registry: + name: npm-registry + endpoints: + - host: registry.npmjs.org + port: 443 + binaries: + - path: /usr/bin/npm + - path: /usr/bin/node +``` diff --git a/fern/v0.0.1/pages/reference/support-matrix.mdx b/fern/v0.0.1/pages/reference/support-matrix.mdx new file mode 100644 index 00000000..683bc88e --- /dev/null +++ b/fern/v0.0.1/pages/reference/support-matrix.mdx @@ -0,0 +1,66 @@ +--- +title: "Support Matrix" +description: "Platform, software, runtime, and kernel requirements for running OpenShell." +--- +{/* SPDX-FileCopyrightText: Copyright (c) 2025-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + SPDX-License-Identifier: Apache-2.0 */} + +This page lists the platform, software, runtime, and kernel requirements for running OpenShell. + +## Supported Platforms + +OpenShell publishes multi-architecture container images for `linux/amd64` and `linux/arm64`. The CLI is supported on the following host platforms: + +| Platform | Architecture | Status | +| -------------------------------- | --------------------- | --------- | +| Linux (Debian/Ubuntu) | x86_64 (amd64) | Supported | +| Linux (Debian/Ubuntu) | aarch64 (arm64) | Supported | +| macOS (Docker Desktop) | Apple Silicon (arm64) | Supported | +| Windows (WSL 2 + Docker Desktop) | x86_64 | Untested | + +## Software Prerequisites + +The following software must be installed on the host before using the OpenShell CLI: + +| Component | Minimum Version | Notes | +| ------------------------------- | --------------- | ----------------------------------------------- | +| Docker Desktop or Docker Engine | 28.04 | Must be running before any `openshell` command. | + +## Sandbox Runtime Versions + +Sandbox container images are maintained in the [openshell-community](https://github.com/nvidia/openshell-community) repository. Refer to that repository for the current list of installed components and their versions. + +## Container Images + +OpenShell publishes two container images. Both are published for `linux/amd64` and `linux/arm64`. + +| Image | Reference | Pulled When | +| ------- | ----------------------------------------- | -------------------------------- | +| Cluster | `ghcr.io/nvidia/openshell/cluster:latest` | `openshell gateway start` | +| Gateway | `ghcr.io/nvidia/openshell/gateway:latest` | Cluster startup (via Helm chart) | + +The cluster image bundles the Helm charts, Kubernetes manifests, and the `openshell-sandbox` supervisor binary required to bootstrap the control plane. The supervisor binary is side-loaded into sandbox pods at runtime via a read-only host volume mount. The gateway image is pulled at cluster startup and runs the API server. + +Sandbox images are maintained separately in the [openshell-community](https://github.com/nvidia/openshell-community) repository. + +To override the default image references, set the following environment variables: + +| Variable | Purpose | +| ------------------------------ | --------------------------------------------------- | +| `OPENSHELL_CLUSTER_IMAGE` | Override the cluster image reference. | +| `OPENSHELL_COMMUNITY_REGISTRY` | Override the registry for community sandbox images. | + +## Kernel Requirements + +OpenShell enforces sandbox isolation through two Linux kernel security modules: + +| Module | Requirement | Details | +| -------------------------------------------------------------- | ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| [Landlock LSM](https://docs.kernel.org/security/landlock.html) | Recommended | Enforces filesystem access restrictions at the kernel level. The `best_effort` compatibility mode uses the highest Landlock ABI the host kernel supports. The `hard_requirement` mode fails sandbox creation if the required ABI is unavailable. | +| seccomp | Required | Filters dangerous system calls. Available on all modern Linux kernels (3.17+). | + +On macOS, these kernel modules run inside the Docker Desktop Linux VM, not on the host kernel. + +## Agent Compatibility + +For the full list of supported agents and their default policy coverage, refer to the [Supported Agents](/sandboxes/index#supported-agents) table. diff --git a/fern/v0.0.1/pages/resources/eula.mdx b/fern/v0.0.1/pages/resources/eula.mdx new file mode 100644 index 00000000..d6e6fe93 --- /dev/null +++ b/fern/v0.0.1/pages/resources/eula.mdx @@ -0,0 +1,6 @@ +--- +title: "License" +description: "NVIDIA OpenShell license agreement and terms of use." +--- +{/* SPDX-FileCopyrightText: Copyright (c) 2025-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + SPDX-License-Identifier: Apache-2.0 */} diff --git a/fern/v0.0.1/pages/sandboxes/community-sandboxes.mdx b/fern/v0.0.1/pages/sandboxes/community-sandboxes.mdx new file mode 100644 index 00000000..e0d69938 --- /dev/null +++ b/fern/v0.0.1/pages/sandboxes/community-sandboxes.mdx @@ -0,0 +1,96 @@ +--- +title: "Community Sandboxes" +description: "Use pre-built sandboxes from the OpenShell Community catalog or contribute your own." +--- +{/* SPDX-FileCopyrightText: Copyright (c) 2025-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + SPDX-License-Identifier: Apache-2.0 */} + +Use pre-built sandboxes from the OpenShell Community catalog, or contribute your +own. + +## What Are Community Sandboxes + +Community sandboxes are ready-to-use environments published in the +[OpenShell Community](https://github.com/NVIDIA/OpenShell-Community) repository. +Each sandbox bundles a Dockerfile, policy, optional skills, and startup scripts +into a single package that you can launch with one command. + +## Current Catalog + +The following community sandboxes are available in the catalog. + +| Sandbox | Description | +|---|---| +| `base` | Foundational image with system tools and dev environment | +| `openclaw` | Open agent manipulation and control | +| `sdg` | Synthetic data generation workflows | +| `simulation` | General-purpose simulation sandboxes | + +## Use a Community Sandbox + +Launch a community sandbox by name with the `--from` flag: + +```console +$ openshell sandbox create --from openclaw +``` + +When you pass `--from` with a community sandbox name, the CLI: + +1. Resolves the name against the + [OpenShell Community](https://github.com/NVIDIA/OpenShell-Community) repository. +2. Pulls the Dockerfile, policy, skills, and any startup scripts. +3. Builds the container image locally. +4. Creates the sandbox with the bundled configuration applied. + +You end up with a running sandbox whose image, policy, and tooling are all +preconfigured by the community package. + +### Other Sources + +The `--from` flag also accepts: + +- Local directory paths: Point to a directory on disk that contains a + Dockerfile and optional policy/skills: + + ```console + $ openshell sandbox create --from ./my-sandbox-dir + ``` + +- Container image references: Use an existing container image directly: + + ```console + $ openshell sandbox create --from my-registry.example.com/my-image:latest + ``` + +## Contribute a Community Sandbox + +Each community sandbox is a directory under `sandboxes/` in the +[OpenShell Community](https://github.com/NVIDIA/OpenShell-Community) repository. +At minimum, a sandbox directory must contain the following files: + +- `Dockerfile` that defines the container image. +- `README.md` that describes the sandbox and how to use it. + +You can also include the following optional files: + +- `policy.yaml` that defines the default policy applied when the sandbox launches. +- `skills/` that contains agent skill definitions bundled with the sandbox. +- Startup scripts that are any scripts the Dockerfile or entrypoint invokes. + +To contribute, fork the repository, add your sandbox directory, and open a pull +request. Refer to the repository's +[CONTRIBUTING.md](https://github.com/NVIDIA/OpenShell-Community/blob/main/CONTRIBUTING.md) +for submission guidelines. + + +The community catalog is designed to grow. If you have built a sandbox that +supports a particular workflow (data processing, simulation, code review, +or anything else), consider contributing it back so others can use it. + + +## Next Steps + +Explore related topics: + +- **Need to supply API keys or tokens?** Set up [Providers](/sandboxes/providers) for credential management. +- **Want to customize the sandbox policy?** Write custom rules in [Policies](/sandboxes/policies). diff --git a/fern/v0.0.1/pages/sandboxes/create-and-manage.mdx b/fern/v0.0.1/pages/sandboxes/create-and-manage.mdx new file mode 100644 index 00000000..bd7ba2ff --- /dev/null +++ b/fern/v0.0.1/pages/sandboxes/create-and-manage.mdx @@ -0,0 +1,189 @@ +--- +title: "Create and Manage Sandboxes" +description: "Create, inspect, connect to, monitor, transfer files, and delete OpenShell sandboxes." +--- +{/* SPDX-FileCopyrightText: Copyright (c) 2025-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + SPDX-License-Identifier: Apache-2.0 */} + +This page walks you through the full sandbox lifecycle: creating, inspecting, connecting to, monitoring, and deleting sandboxes. For background on what sandboxes are and how the runtime works, refer to [About Sandboxes](/sandboxes/index). + + +Docker must be running before you create a sandbox. If it is not, the CLI +returns a connection-refused error (`os error 61`) without explaining +the cause. Start Docker and try again. + + +## Create a Sandbox + +Run a single command to create a sandbox and launch your agent: + +```console +$ openshell sandbox create -- claude +``` + +If you have an existing gateway, the sandbox is created in it. Otherwise, a gateway is created automatically. + +To request GPU resources explicitly, add `--gpu`: + +```console +$ openshell sandbox create --gpu -- claude +``` + +If no gateway is running, the auto-bootstrap path starts a GPU-enabled gateway first. + +A fully specified creation command might look like: + +```console +$ openshell sandbox create \ + --name dev \ + --provider my-claude \ + --policy policy.yaml \ + --upload \ + -- claude +``` + + +Sandboxes created with `openshell sandbox create` stay running by default after +the initial command or shell exits. Use `--no-keep` when you want the sandbox +deleted automatically instead. + + +## Create from a Community Sandbox or Custom Image + +Use `--from` to create a sandbox from a pre-built community package, a local directory, or a container image: + +```console +$ openshell sandbox create --from openclaw +``` + +The CLI resolves the name against the [OpenShell Community](https://github.com/NVIDIA/OpenShell-Community) catalog, pulls the bundled Dockerfile and policy, builds the image locally, and creates the sandbox. For the full catalog and how to contribute your own, refer to [Community Sandboxes](/sandboxes/community-sandboxes). + +You can also point `--from` at a local directory or a container image reference: + +```console +$ openshell sandbox create --from ./my-sandbox-dir +$ openshell sandbox create --from my-registry.example.com/my-image:latest +``` + +Images whose final name component contains `gpu` also trigger GPU sandbox requests automatically. For example, `--from nvidia-gpu` behaves like a GPU sandbox request even without `--gpu`. + +## List and Inspect Sandboxes + +Check the status of your sandboxes and retrieve detailed information about individual ones. + +List all sandboxes: + +```console +$ openshell sandbox list +``` + +Get detailed information about a specific sandbox: + +```console +$ openshell sandbox get my-sandbox +``` + +## Connect to a Sandbox + +Access a running sandbox through an interactive SSH session or VS Code Remote-SSH. + +### Interactive SSH + +Open an SSH session into a running sandbox: + +```console +$ openshell sandbox connect my-sandbox +``` + +### Open in a remote editor + +Launch VS Code or Cursor directly into the sandbox workspace: + +```console +$ openshell sandbox create --editor vscode --name my-sandbox +$ openshell sandbox connect my-sandbox --editor cursor +``` + +When `--editor` is used, OpenShell keeps the sandbox alive and installs an +OpenShell-managed SSH include file instead of cluttering your main +`~/.ssh/config` with generated host blocks. + +## View Logs + +Stream and filter sandbox logs to monitor agent activity and diagnose policy decisions. + +Stream sandbox logs: + +```console +$ openshell logs my-sandbox +``` + +Use flags to filter and follow output: + +| Flag | Purpose | Example | +|---|---|---| +| `--tail` | Stream logs in real time | `openshell logs my-sandbox --tail` | +| `--source` | Filter by log source | `--source sandbox` | +| `--level` | Filter by severity | `--level warn` | +| `--since` | Show logs from a time window | `--since 5m` | + +## Monitor Your Sandbox + +OpenShell Terminal is a real-time dashboard that combines sandbox status and live logs in a single view. + +```console +$ openshell term +``` + +The dashboard shows the following information. + +- **Sandbox status**: Name, phase, image, attached providers, age, and active port forwards. +- **Live log stream**: Outbound connections, policy decisions, and inference interceptions as they happen. Logs are labeled by source: `sandbox` (proxy and policy events) or `gateway` (lifecycle events). + +Use the terminal to spot blocked connections (`action=deny` entries) and inference interceptions (`action=inspect_for_inference` entries). If a connection is blocked unexpectedly, add the host to your network policy — refer to [Policies](/sandboxes/policies) for the workflow. + +## Transfer Files + +Transfer files between your host machine and a running sandbox. + +Upload files from your host into the sandbox: + +```console +$ openshell sandbox upload my-sandbox ./src /sandbox/src +``` + +Download files from the sandbox to your host: + +```console +$ openshell sandbox download my-sandbox /sandbox/output ./local +``` + + +You can also upload files at creation time with the `--upload` flag on +`openshell sandbox create`. + + +## Delete Sandboxes + +Remove sandboxes when they are no longer needed. Deleting a sandbox stops all processes, releases cluster resources, and purges injected credentials. + +Delete a sandbox by name: + +```console +$ openshell sandbox delete my-sandbox +``` + +Delete all sandboxes in the active gateway: + +```console +$ openshell sandbox delete --all +``` + +## Next Steps + +Explore related topics: + +- To follow a complete end-to-end example, refer to the [GitHub Sandbox](/get-started/github-sandbox) tutorial. +- To supply API keys or tokens, refer to [Providers](/sandboxes/providers). +- To control what the agent can access, refer to [Policies](/sandboxes/policies). +- To use a pre-built environment, refer to the [Community Sandboxes](/sandboxes/community-sandboxes) catalog. diff --git a/fern/v0.0.1/pages/sandboxes/index.mdx b/fern/v0.0.1/pages/sandboxes/index.mdx new file mode 100644 index 00000000..411b8434 --- /dev/null +++ b/fern/v0.0.1/pages/sandboxes/index.mdx @@ -0,0 +1,107 @@ +--- +title: "About Sandboxes" +description: "Understand sandbox lifecycle, supported agents, built-in default policy, and network access rules in OpenShell." +--- +{/* SPDX-FileCopyrightText: Copyright (c) 2025-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + SPDX-License-Identifier: Apache-2.0 */} + +An OpenShell sandbox is a safe, private execution environment for an AI agent. Each sandbox runs with multiple layers of protection that prevent unauthorized data access, credential exposure, and network exfiltration. Protection layers include filesystem restrictions (Landlock), system call filtering (seccomp), network namespace isolation, and a privacy-enforcing HTTP CONNECT proxy. + +## Sandbox Lifecycle + +Every sandbox moves through a defined set of phases: + +| Phase | Description | +|---|---| +| Provisioning | The runtime is setting up the sandbox environment, injecting credentials, and applying your policy. | +| Ready | The sandbox is running. The agent process is active and all isolation layers are enforced. You can connect, sync files, and view logs. | +| Error | Something went wrong during provisioning or execution. Check logs with `openshell logs` for details. | +| Deleting | The sandbox is being torn down. The system releases resources and purges credentials. | + +## Supported Agents + +The following table summarizes the agents that run in OpenShell sandboxes. All agent sandbox images are maintained in the [OpenShell Community](https://github.com/NVIDIA/OpenShell-Community) repository. Agents in the base image are auto-configured when passed as the trailing command to `openshell sandbox create`. More community agent sandboxes are available in the [Community Sandboxes](/sandboxes/community-sandboxes) catalog. + +| Agent | Source | Default Policy | Notes | +|---|---|---|---| +| [Claude Code](https://docs.anthropic.com/en/docs/claude-code) | [`base`](https://github.com/NVIDIA/OpenShell-Community/tree/main/sandboxes/base) | Full coverage | Works out of the box. Requires `ANTHROPIC_API_KEY`. | +| [OpenCode](https://opencode.ai/) | [`base`](https://github.com/NVIDIA/OpenShell-Community/tree/main/sandboxes/base) | Partial coverage | Pre-installed. Add `opencode.ai` endpoint and OpenCode binary paths to the policy for full functionality. | +| [Codex](https://developers.openai.com/codex) | [`base`](https://github.com/NVIDIA/OpenShell-Community/tree/main/sandboxes/base) | No coverage | Pre-installed. Requires a custom policy with OpenAI endpoints and Codex binary paths. Requires `OPENAI_API_KEY`. | +| [OpenClaw](https://openclaw.ai/) | [`openclaw`](https://github.com/NVIDIA/OpenShell-Community/tree/main/sandboxes/openclaw) | Bundled | Agent orchestration layer. Launch with `openshell sandbox create --from openclaw`. | + +{/* | [NemoClaw](https://github.com/NVIDIA/OpenShell-Community) | [OpenShell Community](https://github.com/NVIDIA/OpenShell-Community/tree/main/sandboxes/nemoclaw) | Bundled | OpenClaw with NVIDIA DevX UI extension. Launch with `openshell sandbox create --from nemoclaw`. | */} + +## Built-in Default Policy + +OpenShell ships a built-in policy that covers common agent workflows out of the box. +When you create a sandbox without `--policy`, the default policy is applied. It controls three things: + +| Layer | What It Controls | How It Works | +|---|---|---| +| Filesystem | What the agent can access on disk | Paths are split into read-only and read-write sets. [Landlock LSM](https://docs.kernel.org/security/landlock.html) enforces these restrictions at the kernel level. | +| Network | What the agent can reach on the network | Each policy block pairs allowed destinations (host and port) with allowed binaries (executable paths). The proxy matches every outbound connection to the binary that opened it. Both must match or the connection is denied. | +| Process | What privileges the agent has | The agent runs as an unprivileged user with seccomp filters that block dangerous system calls. No `sudo`, no `setuid`, no path to elevated privileges. | + +For the full breakdown of each default policy block and agent compatibility details, refer to [Default Policy](/reference/default-policy). + +## Policy Structure + +A policy has static sections (`filesystem_policy`, `landlock`, `process`) that are locked at sandbox creation, and dynamic sections (`network_policies`, `inference`) that are hot-reloadable on a running sandbox. + +```yaml +version: 1 + +# Static: locked at sandbox creation. Paths the agent can read vs read/write. +filesystem_policy: + read_only: [/usr, /lib, /etc] + read_write: [/sandbox, /tmp] + +# Static: Landlock LSM kernel enforcement. best_effort uses highest ABI the host supports. +landlock: + compatibility: best_effort + +# Static: Unprivileged user/group the agent process runs as. +process: + run_as_user: sandbox + run_as_group: sandbox + +# Dynamic: hot-reloadable. Named blocks of endpoints + binaries allowed to reach them. +network_policies: + my_api: + name: my-api + endpoints: + - host: api.example.com + port: 443 + protocol: rest + tls: terminate + enforcement: enforce + access: full + binaries: + - path: /usr/bin/curl + +# Dynamic: hot-reloadable. Routing hints this sandbox can use for inference (e.g. local, nvidia). +inference: + allowed_routes: [local] +``` + +For the complete structure and every field, refer to the [Policy Schema Reference](/reference/policy-schema). + +## Network Access Rules + +Network access is controlled by policy blocks under `network_policies`. Each block has a name, a list of endpoints (host, port, protocol, and optional rules), and a list of binaries that are allowed to use those endpoints. + +Every outbound connection from the sandbox goes through the proxy: + +- The proxy queries the [policy engine](/about/architecture) with the destination (host and port) and the calling binary. A connection is allowed only when both match an entry in the same policy block. +- For endpoints with `protocol: rest` and `tls: terminate`, each HTTP request is checked against that endpoint's `rules` (method and path). +- If no endpoint matches and inference routes are configured, the request may be rerouted for inference. +- Otherwise the connection is denied. Endpoints without `protocol` or `tls` allow the TCP stream through without inspecting payloads. + +## Next Steps + +Continue with one of the following: + +- To create your first sandbox, refer to [Create and Manage](/sandboxes/create-and-manage). +- To supply API keys or tokens, refer to [Providers](/sandboxes/providers). +- To control what the agent can access, refer to [Policies](/sandboxes/policies). +- To use a pre-built environment, refer to the [Community Sandboxes](/sandboxes/community-sandboxes) catalog. diff --git a/fern/v0.0.1/pages/sandboxes/policies.mdx b/fern/v0.0.1/pages/sandboxes/policies.mdx new file mode 100644 index 00000000..74fbf9eb --- /dev/null +++ b/fern/v0.0.1/pages/sandboxes/policies.mdx @@ -0,0 +1,175 @@ +--- +title: "Customize Sandbox Policies" +description: "Apply, iterate, and debug sandbox network policies with hot-reload on running OpenShell sandboxes." +--- +{/* SPDX-FileCopyrightText: Copyright (c) 2025-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + SPDX-License-Identifier: Apache-2.0 */} + +Use this page to apply and iterate policy changes on running sandboxes. For a full field-by-field YAML definition, use the [Policy Schema Reference](/reference/policy-schema). + +## Quick Start: Apply a Custom Policy + +Pass a policy YAML file when creating the sandbox: + +```console +$ openshell sandbox create --policy ./my-policy.yaml -- claude +``` + +`openshell sandbox create` keeps the sandbox running after the initial command exits, which is useful when you plan to iterate on the policy. Add `--no-keep` if you want the sandbox deleted automatically instead. + +To avoid passing `--policy` every time, set a default policy with an environment variable: + +```console +$ export OPENSHELL_SANDBOX_POLICY=./my-policy.yaml +$ openshell sandbox create -- claude +``` + +The CLI uses the policy from `OPENSHELL_SANDBOX_POLICY` whenever `--policy` is not explicitly provided. + +## Iterate on a Running Sandbox + +To change what the sandbox can access, pull the current policy, edit the YAML, and push the update. The workflow is iterative: create the sandbox, monitor logs for denied actions, pull the policy, modify it, push, and verify. + +```mermaid +flowchart TD + A["1. Create sandbox with initial policy"] --> B["2. Monitor logs for denied actions"] + B --> C["3. Pull current policy"] + C --> D["4. Modify the policy YAML"] + D --> E["5. Push updated policy"] + E --> F["6. Verify the new revision loaded"] + F --> B + + style A fill:#76b900,stroke:#000000,color:#000000 + style B fill:#76b900,stroke:#000000,color:#000000 + style C fill:#76b900,stroke:#000000,color:#000000 + style D fill:#ffffff,stroke:#000000,color:#000000 + style E fill:#76b900,stroke:#000000,color:#000000 + style F fill:#76b900,stroke:#000000,color:#000000 + + linkStyle default stroke:#76b900,stroke-width:2px +``` + +The following steps outline the hot-reload policy update workflow. + +1. Create the sandbox with your initial policy by following [Quick Start: Apply a Custom Policy](#quick-start-apply-a-custom-policy) above (or set `OPENSHELL_SANDBOX_POLICY`). + +2. Monitor denials. Each log entry shows host, port, binary, and reason. Alternatively, use `openshell term` for a live dashboard. + + ```console + $ openshell logs --tail --source sandbox + ``` + +3. Pull the current policy. Strip the metadata header (Version, Hash, Status) before reusing the file. + + ```console + $ openshell policy get --full > current-policy.yaml + ``` + +4. Edit the YAML: add or adjust `network_policies` entries, binaries, `access` or `rules`, or `inference.allowed_routes`. + +5. Push the updated policy. Exit codes: 0 = loaded, 1 = validation failed, 124 = timeout. + + ```console + $ openshell policy set --policy current-policy.yaml --wait + ``` + +6. Verify the new revision. If status is `loaded`, repeat from step 2 as needed; if `failed`, fix the policy and repeat from step 4. + + ```console + $ openshell policy list + ``` + +## Debug Denied Requests + +Check `openshell logs --tail --source sandbox` for the denied host, path, and binary. + +When triaging denied requests, check: + +- Destination host and port to confirm which endpoint is missing. +- Calling binary path to confirm which `binaries` entry needs to be added or adjusted. +- HTTP method and path (for REST endpoints) to confirm which `rules` entry needs to be added or adjusted. + +Then push the updated policy as described above. + +## Examples + +Add these blocks to the `network_policies` section of your sandbox policy. Apply with `openshell policy set --policy --wait`. +Use **Simple endpoint** for host-level allowlists and **Granular rules** for method/path control. + + + +Allow `pip install` and `uv pip install` to reach PyPI: + +```yaml + pypi: + name: pypi + endpoints: + - host: pypi.org + port: 443 + - host: files.pythonhosted.org + port: 443 + binaries: + - { path: /usr/bin/pip } + - { path: /usr/local/bin/uv } +``` + +Endpoints without `protocol` or `tls` use TCP passthrough — the proxy allows the stream without inspecting payloads. + + + + +Allow Claude and the GitHub CLI to reach `api.github.com` with per-path rules: read-only (GET, HEAD, OPTIONS) and GraphQL (POST) for all paths; full write access for `alpha-repo`; and create/edit issues only for `bravo-repo`. Replace `` with your GitHub org or username. + + +For an end-to-end walkthrough that combines this policy with a GitHub credential provider and sandbox creation, refer to [GitHub Sandbox](/get-started/github-sandbox). + + +```yaml + github_repos: + name: github_repos + endpoints: + - host: api.github.com + port: 443 + protocol: rest + tls: terminate + enforcement: enforce + rules: + - allow: + method: GET + path: "/**" + - allow: + method: HEAD + path: "/**" + - allow: + method: OPTIONS + path: "/**" + - allow: + method: POST + path: "/graphql" + - allow: + method: "*" + path: "/repos//alpha-repo/**" + - allow: + method: POST + path: "/repos//bravo-repo/issues" + - allow: + method: PATCH + path: "/repos//bravo-repo/issues/*" + binaries: + - { path: /usr/local/bin/claude } + - { path: /usr/bin/gh } +``` + +Endpoints with `protocol: rest` and `tls: terminate` enable HTTP request inspection — the proxy decrypts TLS and checks each HTTP request against the `rules` list. + + + + + +## Next Steps + +Explore related topics: + +- To learn about policy structure and network access rules, refer to [About Sandboxes](/sandboxes/index). +- To view the full field-by-field YAML definition, refer to the [Policy Schema Reference](/reference/policy-schema). +- To review the default policy breakdown, refer to [Default Policy](/reference/default-policy). diff --git a/fern/v0.0.1/pages/sandboxes/providers.mdx b/fern/v0.0.1/pages/sandboxes/providers.mdx new file mode 100644 index 00000000..7df6f7a0 --- /dev/null +++ b/fern/v0.0.1/pages/sandboxes/providers.mdx @@ -0,0 +1,131 @@ +--- +title: "Providers" +description: "Create and manage credential providers that inject API keys and tokens into OpenShell sandboxes." +--- +{/* SPDX-FileCopyrightText: Copyright (c) 2025-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + SPDX-License-Identifier: Apache-2.0 */} + +AI agents typically need credentials to access external services: an API key for the AI model provider, a token for GitHub or GitLab, and so on. OpenShell manages these credentials as first-class entities called *providers*. + +Create and manage providers that supply credentials to sandboxes. + +## Create a Provider + +Providers can be created from local environment variables or with explicit credential values. + +### From Local Credentials + +The fastest way to create a provider is to let the CLI discover credentials from +your shell environment: + +```console +$ openshell provider create --name my-claude --type claude --from-existing +``` + +This reads `ANTHROPIC_API_KEY` or `CLAUDE_API_KEY` from your current environment +and stores them in the provider. + +### With Explicit Credentials + +Supply a credential value directly: + +```console +$ openshell provider create --name my-api --type generic --credential API_KEY=sk-abc123 +``` + +### Bare Key Form + +Pass a key name without a value to read the value from the environment variable +of that name: + +```console +$ openshell provider create --name my-api --type generic --credential API_KEY +``` + +This looks up the current value of `$API_KEY` in your shell and stores it. + +## Manage Providers + +List, inspect, update, and delete providers from the active cluster. + +List all providers: + +```console +$ openshell provider list +``` + +Inspect a provider: + +```console +$ openshell provider get my-claude +``` + +Update a provider's credentials: + +```console +$ openshell provider update my-claude --type claude --from-existing +``` + +Delete a provider: + +```console +$ openshell provider delete my-claude +``` + +## Attach Providers to Sandboxes + +Pass one or more `--provider` flags when creating a sandbox: + +```console +$ openshell sandbox create --provider my-claude --provider my-github -- claude +``` + +Each `--provider` flag attaches one provider. The sandbox receives all +credentials from every attached provider at runtime. + + +Providers cannot be added to a running sandbox. If you need to attach an +additional provider, delete the sandbox and recreate it with all required +providers specified. + + +### Auto-Discovery Shortcut + +When the trailing command in `openshell sandbox create` is a recognized tool name (`claude`, `codex`, or `opencode`), the CLI auto-creates the required +provider from your local credentials if one does not already exist. You do not +need to create the provider separately: + +```console +$ openshell sandbox create -- claude +``` + +This detects `claude` as a known tool, finds your `ANTHROPIC_API_KEY`, creates +a provider, attaches it to the sandbox, and launches Claude Code. + +## Supported Provider Types + +The following provider types are supported. + +| Type | Environment Variables Injected | Typical Use | +|---|---|---| +| `claude` | `ANTHROPIC_API_KEY`, `CLAUDE_API_KEY` | Claude Code, Anthropic API | +| `codex` | `OPENAI_API_KEY` | OpenAI Codex | +| `opencode` | `OPENCODE_API_KEY`, `OPENROUTER_API_KEY`, `OPENAI_API_KEY` | opencode tool | +| `github` | `GITHUB_TOKEN`, `GH_TOKEN` | GitHub API, `gh` CLI — refer to [GitHub Sandbox](/get-started/github-sandbox) | +| `gitlab` | `GITLAB_TOKEN`, `GLAB_TOKEN`, `CI_JOB_TOKEN` | GitLab API, `glab` CLI | +| `nvidia` | `NVIDIA_API_KEY` | NVIDIA API Catalog | +| `generic` | User-defined | Any service with custom credentials | +| `outlook` | *(none: no auto-discovery)* | Microsoft Outlook integration | + + +Use the `generic` type for any service not listed above. You define the +environment variable names and values yourself with `--credential`. + + +## Next Steps + +Explore related topics: + +- To control what the agent can access, refer to [Policies](/sandboxes/policies). +- To use a pre-built environment, refer to the [Community Sandboxes](/sandboxes/community-sandboxes) catalog. +- To view the complete field reference for the policy YAML, refer to the [Policy Schema Reference](/reference/policy-schema). diff --git a/fern/versions/v0.0.1.yml b/fern/versions/v0.0.1.yml new file mode 100644 index 00000000..09c990f8 --- /dev/null +++ b/fern/versions/v0.0.1.yml @@ -0,0 +1,52 @@ +# SPDX-FileCopyrightText: Copyright (c) 2025-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 + +navigation: + - section: Home + contents: + - page: Welcome + path: ../v0.0.1/pages/index.mdx + - section: About NVIDIA OpenShell + contents: + - page: Overview + path: ../v0.0.1/pages/about/overview.mdx + - page: How It Works + path: ../v0.0.1/pages/about/architecture.mdx + - page: Release Notes + path: ../v0.0.1/pages/about/release-notes.mdx + - section: Get Started + contents: + - page: Quickstart + path: ../v0.0.1/pages/get-started/quickstart.mdx + - page: GitHub Policy Tutorial + path: ../v0.0.1/pages/get-started/github-sandbox.mdx + - section: Sandboxes + contents: + - page: About Sandboxes + path: ../v0.0.1/pages/sandboxes/index.mdx + - page: Create and Manage Sandboxes + path: ../v0.0.1/pages/sandboxes/create-and-manage.mdx + - page: Providers + path: ../v0.0.1/pages/sandboxes/providers.mdx + - page: Customize Sandbox Policies + path: ../v0.0.1/pages/sandboxes/policies.mdx + - page: Community Sandboxes + path: ../v0.0.1/pages/sandboxes/community-sandboxes.mdx + - section: Inference Routing + contents: + - page: About Inference Routing + path: ../v0.0.1/pages/inference/index.mdx + - page: Configure Inference Routing + path: ../v0.0.1/pages/inference/configure.mdx + - section: Reference + contents: + - page: Default Policy Reference + path: ../v0.0.1/pages/reference/default-policy.mdx + - page: Policy Schema Reference + path: ../v0.0.1/pages/reference/policy-schema.mdx + - page: Support Matrix + path: ../v0.0.1/pages/reference/support-matrix.mdx + - section: Resources + contents: + - page: License + path: ../v0.0.1/pages/resources/eula.mdx diff --git a/mise.toml b/mise.toml index 5b6ce835..600fb2a2 100644 --- a/mise.toml +++ b/mise.toml @@ -12,6 +12,7 @@ redactions = ["*_TOKEN", "*_PASSWORD"] experimental = true [tools] +node = "22" python = "3.13.12" rust = "stable" kubectl = "1.35.1" diff --git a/node_modules/.package-lock.json b/node_modules/.package-lock.json new file mode 100644 index 00000000..f69239b6 --- /dev/null +++ b/node_modules/.package-lock.json @@ -0,0 +1,22 @@ +{ + "name": "OpenShell", + "lockfileVersion": 3, + "requires": true, + "packages": { + "node_modules/@boundaryml/baml-darwin-arm64": { + "version": "0.220.0", + "resolved": "https://registry.npmjs.org/@boundaryml/baml-darwin-arm64/-/baml-darwin-arm64-0.220.0.tgz", + "integrity": "sha512-HhiSDwVKcqfA+RU7HeJJMdQSZsMw/sSml90ekSwu9KHvrgu8PXrtuLxlsDQbIHcwTXKHdOGUhHSnpT8wj76RzQ==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "os": [ + "darwin" + ], + "engines": { + "node": ">= 10" + } + } + } +} diff --git a/node_modules/@boundaryml/baml-darwin-arm64/README.md b/node_modules/@boundaryml/baml-darwin-arm64/README.md new file mode 100644 index 00000000..292aec5d --- /dev/null +++ b/node_modules/@boundaryml/baml-darwin-arm64/README.md @@ -0,0 +1,3 @@ +# `@boundaryml/baml-darwin-arm64` + +This is the **aarch64-apple-darwin** binary for `@boundaryml/baml` diff --git a/node_modules/@boundaryml/baml-darwin-arm64/baml.darwin-arm64.node b/node_modules/@boundaryml/baml-darwin-arm64/baml.darwin-arm64.node new file mode 100644 index 00000000..e3f947c7 Binary files /dev/null and b/node_modules/@boundaryml/baml-darwin-arm64/baml.darwin-arm64.node differ diff --git a/node_modules/@boundaryml/baml-darwin-arm64/package.json b/node_modules/@boundaryml/baml-darwin-arm64/package.json new file mode 100644 index 00000000..734ba4cc --- /dev/null +++ b/node_modules/@boundaryml/baml-darwin-arm64/package.json @@ -0,0 +1,41 @@ +{ + "name": "@boundaryml/baml-darwin-arm64", + "version": "0.220.0", + "cpu": [ + "arm64" + ], + "main": "baml.darwin-arm64.node", + "files": [ + "baml.darwin-arm64.node" + ], + "description": "BAML typescript bindings (package.json)", + "keywords": [ + "napi-rs", + "NAPI", + "N-API", + "Rust", + "node-addon", + "node-addon-api" + ], + "author": "", + "homepage": "https://github.com/BoundaryML/baml#readme", + "license": "MIT", + "engines": { + "node": ">= 10" + }, + "repository": { + "type": "git", + "url": "git+https://github.com/BoundaryML/baml.git", + "directory": "engine/language_client_typescript" + }, + "bugs": { + "url": "https://github.com/BoundaryML/baml/issues" + }, + "publishConfig": { + "registry": "https://registry.npmjs.org/", + "access": "public" + }, + "os": [ + "darwin" + ] +} \ No newline at end of file diff --git a/package-lock.json b/package-lock.json new file mode 100644 index 00000000..451f64b6 --- /dev/null +++ b/package-lock.json @@ -0,0 +1,27 @@ +{ + "name": "OpenShell", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "dependencies": { + "@boundaryml/baml-darwin-arm64": "^0.220.0" + } + }, + "node_modules/@boundaryml/baml-darwin-arm64": { + "version": "0.220.0", + "resolved": "https://registry.npmjs.org/@boundaryml/baml-darwin-arm64/-/baml-darwin-arm64-0.220.0.tgz", + "integrity": "sha512-HhiSDwVKcqfA+RU7HeJJMdQSZsMw/sSml90ekSwu9KHvrgu8PXrtuLxlsDQbIHcwTXKHdOGUhHSnpT8wj76RzQ==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "os": [ + "darwin" + ], + "engines": { + "node": ">= 10" + } + } + } +} diff --git a/package.json b/package.json new file mode 100644 index 00000000..fcbef020 --- /dev/null +++ b/package.json @@ -0,0 +1,5 @@ +{ + "dependencies": { + "@boundaryml/baml-darwin-arm64": "^0.220.0" + } +} diff --git a/pyproject.toml b/pyproject.toml index 731a8033..48adab7a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -38,15 +38,6 @@ dev = [ "setuptools-scm>=8", "grpcio-tools>=1.60", ] -docs = [ - "sphinx<=7.5", - "myst-parser<=5", - "sphinx-copybutton<=0.6", - "sphinx-design", - "sphinx-autobuild", - "sphinxcontrib-mermaid", - "nvidia-sphinx-theme", -] [tool.uv] # Don't try to install the root package with uv sync - use uv pip install . instead diff --git a/tasks/docs.toml b/tasks/docs.toml index 5dd1897d..d8179eb4 100644 --- a/tasks/docs.toml +++ b/tasks/docs.toml @@ -1,35 +1,29 @@ # SPDX-FileCopyrightText: Copyright (c) 2025-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 -# Documentation build, serve, and check tasks +# Documentation build and preview tasks (Fern) +# Requires Node.js (provided by mise). Fern CLI runs via npx. ["docs"] -description = "Build HTML documentation" -run = """ -if ! uv run sphinx-build -b html docs _build/docs; then - echo "" - echo "Build failed. If this is a dependency issue, try: mise run docs:deps" - exit 1 -fi -echo "" -echo "Documentation built successfully." -echo "file://$(pwd)/_build/docs/index.html" -""" +description = "Publish Fern documentation to hosted site" +run = "npx fern-api generate --docs" -["docs:deps"] -description = "Install documentation dependencies" -run = "uv sync --group docs" +["docs:build"] +description = "Alias for docs" +depends = ["docs"] ["docs:build:strict"] -description = "Build HTML documentation with warnings as errors" -depends = ["docs:deps"] -run = "uv run sphinx-build -b html -W --keep-going docs _build/docs" +description = "Alias for docs (Fern builds are always strict)" +depends = ["docs"] -["docs:serve"] -description = "Serve documentation with live reload on port 8000" -depends = ["docs:deps"] -run = "uv run sphinx-autobuild docs _build/docs --port 8000 --open-browser" +["docs:preview"] +description = "Generate a Fern preview URL for the current docs" +run = "npx fern-api generate --docs --preview" + +["docs:dev"] +description = "Run Fern docs preview server at localhost:3000" +run = "npx fern-api docs dev" ["docs:clean"] -description = "Remove built documentation" -run = "rm -rf _build/docs" +description = "Remove Fern build cache" +run = "rm -rf .fern/" diff --git a/tasks/fern.toml b/tasks/fern.toml new file mode 100644 index 00000000..3627c0ac --- /dev/null +++ b/tasks/fern.toml @@ -0,0 +1,13 @@ +# SPDX-FileCopyrightText: Copyright (c) 2025-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 + +# Fern documentation build and preview tasks +# Requires Node.js (mise installs it). Fern CLI runs via npx fern-api. + +["fern:build"] +description = "Build and publish Fern documentation" +run = "npx fern-api generate --docs" + +["fern:dev"] +description = "Run Fern docs preview server at localhost:3000" +run = "npx fern-api docs dev"