diff --git a/client/main.js b/client/main.js index a9e6339..66a171e 100644 --- a/client/main.js +++ b/client/main.js @@ -54,6 +54,18 @@ function createMenu() { label: "Electron", submenu: [{ role: "toggleDevTools" }, { role: "quit" }], }, + { + label: "Edit", + submenu: [ + { role: "undo" }, + { role: "redo" }, + { type: "separator" }, + { role: "cut" }, + { role: "copy" }, + { role: "paste" }, + { role: "selectAll" }, + ], + }, { label: "Views", submenu: [ diff --git a/client/src/components/FilePickerModal.js b/client/src/components/FilePickerModal.js index 05b2400..4ed2ff3 100644 --- a/client/src/components/FilePickerModal.js +++ b/client/src/components/FilePickerModal.js @@ -36,8 +36,7 @@ const FilePickerModal = ({ const [loading, setLoading] = useState(false); const [previewStatus, setPreviewStatus] = useState({}); const [onlyImages, setOnlyImages] = useState(false); - const previewBaseUrl = - apiClient.defaults.baseURL || "http://localhost:4242"; + const previewBaseUrl = apiClient.defaults.baseURL || "http://localhost:4242"; // Refactored fetch to get all files once const [allData, setAllData] = useState([]); @@ -46,6 +45,7 @@ const FilePickerModal = ({ if (visible) { setCurrentPath("root"); setOnlyImages(false); + setPreviewStatus({}); loadAllData(); } }, [visible]); @@ -180,12 +180,13 @@ const FilePickerModal = ({ const isImageFile = (item) => { if (!item || item.is_folder) return false; if (item.type && item.type.startsWith("image/")) return true; - const ext = `.${String(item.name || "").split(".").pop()}`.toLowerCase(); + const ext = `.${String(item.name || "") + .split(".") + .pop()}`.toLowerCase(); return IMAGE_EXTENSIONS.has(ext); }; - const getPreviewUrl = (item) => - `${previewBaseUrl}/files/preview/${item.id}`; + const getPreviewUrl = (item) => `${previewBaseUrl}/files/preview/${item.id}`; const markPreviewLoaded = (id) => { setPreviewStatus((prev) => ({ ...prev, [id]: "loaded" })); diff --git a/client/src/views/FilesManager.js b/client/src/views/FilesManager.js index 4a6ad8d..c83231f 100644 --- a/client/src/views/FilesManager.js +++ b/client/src/views/FilesManager.js @@ -95,8 +95,7 @@ function FilesManager() { const containerRef = useRef(null); const itemRefs = useRef({}); const isDragSelecting = useRef(false); - const previewBaseUrl = - apiClient.defaults.baseURL || "http://localhost:4242"; + const previewBaseUrl = apiClient.defaults.baseURL || "http://localhost:4242"; // Sidebar Resize Logic const [sidebarWidth, setSidebarWidth] = useState(250); @@ -215,7 +214,9 @@ function FilesManager() { const isImageFile = (file) => { if (!file || file.is_folder) return false; if (file.type && file.type.startsWith("image/")) return true; - const ext = `.${String(file.name || "").split(".").pop()}`.toLowerCase(); + const ext = `.${String(file.name || "") + .split(".") + .pop()}`.toLowerCase(); return IMAGE_EXTENSIONS.has(ext); }; diff --git a/scripts/dev.sh b/scripts/dev.sh index 9d161b1..1f6332b 100755 --- a/scripts/dev.sh +++ b/scripts/dev.sh @@ -8,31 +8,31 @@ DEFAULT_OLLAMA_BASE_URL="http://localhost:11434" DEFAULT_OLLAMA_MODEL="llama3.1:8b" if ! command -v uv >/dev/null 2>&1; then - echo "uv is required. Run scripts/bootstrap.sh first." >&2 - exit 1 + echo "uv is required. Run scripts/bootstrap.sh first." >&2 + exit 1 fi if ! command -v npm >/dev/null 2>&1; then - echo "npm is required to launch the Electron client." >&2 - exit 1 + echo "npm is required to launch the Electron client." >&2 + exit 1 fi cleanup() { - local exit_code=$? - if [[ -n "${API_PID:-}" ]] && ps -p "${API_PID}" >/dev/null 2>&1; then - kill "${API_PID}" >/dev/null 2>&1 || true - fi - if [[ -n "${PYTC_PID:-}" ]] && ps -p "${PYTC_PID}" >/dev/null 2>&1; then - kill "${PYTC_PID}" >/dev/null 2>&1 || true - fi - if [[ -n "${DATA_SERVER_PID:-}" ]] && ps -p "${DATA_SERVER_PID}" >/dev/null 2>&1; then - kill "${DATA_SERVER_PID}" >/dev/null 2>&1 || true - fi - if [[ -n "${REACT_PID:-}" ]] && ps -p "${REACT_PID}" >/dev/null 2>&1; then - kill "${REACT_PID}" >/dev/null 2>&1 || true - fi - wait || true - exit "${exit_code}" + local exit_code=$? + if [[ -n "${API_PID:-}" ]] && ps -p "${API_PID}" >/dev/null 2>&1; then + kill "${API_PID}" >/dev/null 2>&1 || true + fi + if [[ -n "${PYTC_PID:-}" ]] && ps -p "${PYTC_PID}" >/dev/null 2>&1; then + kill "${PYTC_PID}" >/dev/null 2>&1 || true + fi + if [[ -n "${DATA_SERVER_PID:-}" ]] && ps -p "${DATA_SERVER_PID}" >/dev/null 2>&1; then + kill "${DATA_SERVER_PID}" >/dev/null 2>&1 || true + fi + if [[ -n "${REACT_PID:-}" ]] && ps -p "${REACT_PID}" >/dev/null 2>&1; then + kill "${REACT_PID}" >/dev/null 2>&1 || true + fi + wait || true + exit "${exit_code}" } trap cleanup EXIT INT TERM @@ -43,8 +43,8 @@ DATA_SERVER_PID=$! echo "Starting API server (port 4242)..." OLLAMA_BASE_URL="${OLLAMA_BASE_URL:-${DEFAULT_OLLAMA_BASE_URL}}" \ -OLLAMA_MODEL="${OLLAMA_MODEL:-${DEFAULT_OLLAMA_MODEL}}" \ -PYTHONDONTWRITEBYTECODE=1 uv run --directory "${ROOT_DIR}" python -m server_api.main & + OLLAMA_MODEL="${OLLAMA_MODEL:-${DEFAULT_OLLAMA_MODEL}}" \ + PYTHONDONTWRITEBYTECODE=1 uv run --directory "${ROOT_DIR}" python -m server_api.main & API_PID=$! echo "Starting PyTC server (port 4243)..." @@ -53,33 +53,33 @@ PYTC_PID=$! echo "Starting React dev server (port 3000)..." pushd "${CLIENT_DIR}" >/dev/null -PORT=3000 BROWSER=none npm start > react.log 2>&1 & +PORT=3000 BROWSER=none npm start >react.log 2>&1 & REACT_PID=$! # Robust readiness check with progress feedback wait_for_react() { - local max_attempts=60 - local attempt=1 - while [[ ${attempt} -le ${max_attempts} ]]; do - if curl -sf http://localhost:3000 >/dev/null 2>&1; then - echo "React dev server is ready!" - return 0 - fi - echo "Waiting for React (attempt ${attempt}/${max_attempts})..." - attempt=$((attempt + 1)) - sleep 1 - done - echo "ERROR: React dev server failed to start within ${max_attempts} seconds" >&2 - echo "Check client/react.log for details." >&2 - return 1 + local max_attempts=60 + local attempt=1 + while [[ ${attempt} -le ${max_attempts} ]]; do + if curl -sf http://localhost:3000 >/dev/null 2>&1; then + echo "React dev server is ready!" + return 0 + fi + echo "Waiting for React (attempt ${attempt}/${max_attempts})..." + attempt=$((attempt + 1)) + sleep 1 + done + echo "ERROR: React dev server failed to start within ${max_attempts} seconds" >&2 + echo "Check client/react.log for details." >&2 + return 1 } if wait_for_react; then - echo "Launching Electron client..." - ENVIRONMENT=development npm run electron + echo "Launching Electron client..." + ENVIRONMENT=development npm run electron else - echo "Failed to start React dev server" >&2 - exit 1 + echo "Failed to start React dev server" >&2 + exit 1 fi popd >/dev/null diff --git a/server_api/chatbot/chatbot.py b/server_api/chatbot/chatbot.py index 96ecd2f..03bc671 100644 --- a/server_api/chatbot/chatbot.py +++ b/server_api/chatbot/chatbot.py @@ -1,13 +1,13 @@ -"""Multi-agent chatbot system for PyTorch Connectomics. +# Multi-agent chatbot system for PyTorch Connectomics. -Architecture: -- Supervisor Agent: Routes tasks to appropriate sub-agents -- Training Agent: Handles config selection and training command generation -- Inference Agent: Handles checkpoint listing and inference command generation -- RAG: Documentation search via FAISS vector store -""" +# Architecture: +# - Supervisor Agent: Routes tasks to appropriate sub-agents +# - Training Agent: Handles config selection and training command generation +# - Inference Agent: Handles checkpoint listing and inference command generation +# - RAG: Documentation search via FAISS vector store import os +from pathlib import Path from langchain_ollama import OllamaEmbeddings, ChatOllama from langchain_community.vectorstores import FAISS @@ -24,6 +24,11 @@ You help users set up and configure training jobs for biomedical image segmentation. +CRITICAL RULES: +1. **Only report values that your tools return.** Do NOT invent hyperparameter values, config names, or file paths. +2. **Always use tools before answering.** Call list_training_configs or read_config first — never guess. +3. **Be concise.** Report the facts, generate the command, and stop. + Tools: - list_training_configs: List available config files with descriptions - read_config: Read a config file to see its hyperparameters @@ -36,21 +41,15 @@ Command Format: ``` -cd /path/to/pytorch_connectomics python scripts/main.py --config [OVERRIDES] ``` -Override Format (append to command): -- SOLVER.BASE_LR=0.001 # Learning rate -- SOLVER.SAMPLES_PER_BATCH=8 # Batch size -- SOLVER.ITERATION_TOTAL=50000 # Total iterations -- SYSTEM.NUM_GPUS=2 # Number of GPUs -- MODEL.INPUT_SIZE=[32,256,256] # Input dimensions - +Overrides use YAML key paths appended to the command: SECTION.KEY=value Example: ``` python scripts/main.py --config configs/Lucchi-Mitochondria.yaml SOLVER.BASE_LR=0.001 SOLVER.SAMPLES_PER_BATCH=16 ``` +Use read_config output to determine the correct key paths for any parameter. Always generate commands for the user to run - never execute directly.""" @@ -59,6 +58,11 @@ You help users run inference and evaluation with trained segmentation models. +CRITICAL RULES: +1. **Only report values that your tools return.** Do NOT invent checkpoint paths, config names, or settings. +2. **Always use tools before answering.** Call list_checkpoints or read_config first — never guess. +3. **Be concise.** Report the facts, generate the command, and stop. + Tools: - list_checkpoints: Find available trained model checkpoints - read_config: Read config to find default inference settings @@ -70,55 +74,52 @@ Command Format: ``` -cd /path/to/pytorch_connectomics python scripts/main.py --config --checkpoint --inference [OVERRIDES] ``` -Override Format (append to command): -- INFERENCE.IMAGE_NAME=/path/to/test_image.h5 # Test volume -- INFERENCE.OUTPUT_PATH=/path/to/output # Output directory -- INFERENCE.AUG_MODE=mean # Enable test-time augmentation - +Overrides use YAML key paths appended to the command: SECTION.KEY=value Example: ``` -python scripts/main.py --config configs/Lucchi-Mitochondria.yaml --checkpoint outputs/Lucchi/checkpoint_100000.pth --inference +python scripts/main.py --config configs/Lucchi-Mitochondria.yaml --checkpoint outputs/Lucchi/checkpoint_100000.pth --inference INFERENCE.OUTPUT_PATH=/path/to/output ``` +Use read_config output to determine the correct key paths for any parameter. Always generate commands for the user to run - never execute directly.""" -SUPERVISOR_PROMPT = """You are the **Supervisor Agent** for PyTorch Connectomics. +SUPERVISOR_PROMPT = """You are the **Supervisor Agent** for PyTorch Connectomics (PyTC Client). -You coordinate between specialized sub-agents to help users with biomedical image segmentation tasks. +You help end users navigate and use the PyTC Client application. -Sub-agents available: -1. **Training Agent**: Config selection, training job setup, hyperparameter overrides -2. **Inference Agent**: Checkpoint management, inference/evaluation commands +ROUTING — decide which tool to use BEFORE calling anything: +- **UI, navigation, features, shortcuts, workflows** → search_documentation +- **Training config, hyperparameters, training commands** → delegate_to_training_agent +- **Inference, checkpoints, evaluation commands** → delegate_to_inference_agent +- **General/greeting/off-topic** → answer directly, no tool needed -Your responsibilities: -- Understand user requests and delegate to the right agent -- For documentation/UI questions, use search_documentation directly -- Pass context between agents when needed (e.g., after discussing training, help with inference) -- Synthesize responses from sub-agents into clear user-friendly answers +CRITICAL RULES: +1. **For application questions, ground answers in retrieved documentation.** Call search_documentation and base your answer on the returned text. Do NOT invent features, shortcuts, buttons, or workflows. +2. **Do not fabricate specifics.** Never make up keyboard shortcuts, button labels, or step-by-step instructions unless they come from retrieved docs or a sub-agent response. +3. **Answer every part of the user's question.** If they ask about two things, address both. +4. **Use retrieved content even if wording differs.** If the documentation describes relevant features or workflows, use that information to answer the question. Don't claim something isn't documented just because it uses different terminology than the user's question. +5. **HARD LIMIT: You may call search_documentation EXACTLY 2 times per user question.** After the second call, you MUST answer with the information already retrieved. Do NOT attempt a third search. If the tool returns "Search limit reached", immediately stop and answer based on what you already have. +6. **Delegate, don't search, for training/inference tasks.** If the user asks for a training command or inference command, use the appropriate sub-agent directly. + +Sub-agents: +- **Training Agent**: Config selection, training job setup, hyperparameter overrides +- **Inference Agent**: Checkpoint management, inference/evaluation commands Tools: -- search_documentation: Search PyTC docs for UI guides and feature explanations +- search_documentation: Search PyTC docs for UI guides and feature explanations. Use ONLY for questions about the application interface, pages, buttons, or workflows. - delegate_to_training_agent: Send training-related tasks to training agent -- delegate_to_inference_agent: Send inference-related tasks to inference agent - -Routing guidelines: -- "train", "training", "config", "hyperparameters", "learning rate", "batch size" → Training Agent -- "inference", "predict", "evaluate", "checkpoint", "test", "model output" → Inference Agent -- "how to", "where is", "what does", "UI", "interface" → search_documentation - -Provide clear, user-friendly responses. Focus on what users can do, not technical details.""" +- delegate_to_inference_agent: Send inference-related tasks to inference agent""" def build_chain(): """Build the multi-agent system with supervisor, training, and inference agents.""" ollama_base_url = os.getenv("OLLAMA_BASE_URL", "http://localhost:11434") ollama_model = os.getenv("OLLAMA_MODEL", "mistral:latest") - ollama_embed_model = os.getenv("OLLAMA_EMBED_MODEL", "mistral:latest") + ollama_embed_model = os.getenv("OLLAMA_EMBED_MODEL", "qwen3-embedding:8b") llm = ChatOllama(model=ollama_model, base_url=ollama_base_url, temperature=0) embeddings = OllamaEmbeddings(model=ollama_embed_model, base_url=ollama_base_url) faiss_path = process_path("server_api/chatbot/faiss_index") @@ -127,7 +128,22 @@ def build_chain(): embeddings, allow_dangerous_deserialization=True, ) - retriever = vectorstore.as_retriever() + retriever = vectorstore.as_retriever(search_kwargs={"k": 2}) + + # Load all docs from markdown files for reliable keyword search + summaries_dir = Path(process_path("server_api/chatbot/file_summaries")) + _all_docs = {} + for md_file in summaries_dir.rglob("*.md"): + _all_docs[md_file.name] = md_file.read_text(encoding="utf-8") + print( + f"[SEARCH] Loaded {len(_all_docs)} docs for keyword search: {list(_all_docs.keys())}" + ) + + # Call counter to prevent infinite search loops (reset before each user message) + _search_call_count = [0] + + def reset_search_counter(): + _search_call_count[0] = 0 training_agent = create_agent( model=llm, @@ -153,10 +169,42 @@ def search_documentation(query: str) -> str: Returns: Relevant documentation content """ + _search_call_count[0] += 1 + print( + f"[TOOL] search_documentation(query={query!r}) [call {_search_call_count[0]}]" + ) + if _search_call_count[0] > 2: + print("[TOOL] search limit reached (max 2 per question)") + return "Search limit reached. Please answer based on the documentation already retrieved." + + # Primary: FAISS semantic search (chunked embeddings) docs = retriever.invoke(query) - if not docs: - return "No relevant documentation found." - return "\n\n".join([doc.page_content for doc in docs]) + if docs: + sources = [d.metadata.get("source", "?") for d in docs] + print(f"[TOOL] RAG → {len(docs)} chunks: {sources}") + return "\n\n".join([doc.page_content for doc in docs]) + + # Fallback: keyword scoring against full docs + print("[TOOL] RAG returned nothing, trying keyword fallback") + query_lower = query.lower() + query_words = [w for w in query_lower.split() if len(w) > 2] + scored = [] + for filename, content in _all_docs.items(): + content_lower = content.lower() + name_lower = filename.replace(".md", "").lower() + word_hits = sum(1 for w in query_words if w in content_lower) + name_hits = sum(3 for w in query_words if w in name_lower) + score = word_hits + name_hits + if score > 0: + scored.append((score, filename, content)) + scored.sort(key=lambda x: x[0], reverse=True) + if scored: + top = scored[:3] + print(f"[TOOL] keyword fallback → {len(top)} docs: {[s[1] for s in top]}") + return "\n\n".join([s[2] for s in top]) + + print("[TOOL] search_documentation → no results") + return "No relevant documentation found." @tool def delegate_to_training_agent(task: str) -> str: @@ -170,11 +218,16 @@ def delegate_to_training_agent(task: str) -> str: Returns: Response from the training agent """ + print(f"[TOOL] delegate_to_training_agent(task={task!r})") result = training_agent.invoke( {"messages": [{"role": "user", "content": task}]} ) messages = result.get("messages", []) - return messages[-1].content if messages else "Training agent did not respond." + response = ( + messages[-1].content if messages else "Training agent did not respond." + ) + print(f"[TOOL] training_agent responded ({len(response)} chars)") + return response @tool def delegate_to_inference_agent(task: str) -> str: @@ -188,11 +241,16 @@ def delegate_to_inference_agent(task: str) -> str: Returns: Response from the inference agent """ + print(f"[TOOL] delegate_to_inference_agent(task={task!r})") result = inference_agent.invoke( {"messages": [{"role": "user", "content": task}]} ) messages = result.get("messages", []) - return messages[-1].content if messages else "Inference agent did not respond." + response = ( + messages[-1].content if messages else "Inference agent did not respond." + ) + print(f"[TOOL] inference_agent responded ({len(response)} chars)") + return response supervisor_tools = [ search_documentation, @@ -206,4 +264,4 @@ def delegate_to_inference_agent(task: str) -> str: system_prompt=SUPERVISOR_PROMPT, ) - return supervisor, None + return supervisor, reset_search_counter diff --git a/server_api/chatbot/faiss_index/index.faiss b/server_api/chatbot/faiss_index/index.faiss index bbc0f1e..ff53351 100644 Binary files a/server_api/chatbot/faiss_index/index.faiss and b/server_api/chatbot/faiss_index/index.faiss differ diff --git a/server_api/chatbot/faiss_index/index.pkl b/server_api/chatbot/faiss_index/index.pkl index efb059b..318d8b4 100644 Binary files a/server_api/chatbot/faiss_index/index.pkl and b/server_api/chatbot/faiss_index/index.pkl differ diff --git a/server_api/chatbot/file_summaries/App.md b/server_api/chatbot/file_summaries/App.md deleted file mode 100644 index 6203548..0000000 --- a/server_api/chatbot/file_summaries/App.md +++ /dev/null @@ -1,34 +0,0 @@ -# client/src/App.js - -Root component of the PyTC Client application. Wraps the app in context providers and performs cache clearing on boot. - -## Structure - -``` -App - ├── ContextWrapper (GlobalContext) - │ └── YamlContextWrapper (YamlContext) - │ └── CacheBootstrapper - │ └── MainContent (Views) -``` - -## Components - -### `CacheBootstrapper` - -- Runs `resetFileState()` from `AppContext` on mount -- Clears local cache (files, fileList, etc.) before rendering children -- Renders nothing until cache is cleared; then renders main content - -### `MainContent` - -- Renders `Views` component (main application layout with tabs) - -## Contexts - -- **AppContext** — Global state (files, configs, paths, etc.) -- **YamlContext** — YAML-specific state (GPUs, batch size, etc.) - -## Usage - -Used as the root in `index.js` via `ReactDOM.createRoot`. diff --git a/server_api/chatbot/file_summaries/ErrorHandlingTool.md b/server_api/chatbot/file_summaries/ErrorHandlingTool.md new file mode 100644 index 0000000..46d3323 --- /dev/null +++ b/server_api/chatbot/file_summaries/ErrorHandlingTool.md @@ -0,0 +1,132 @@ +# Error Handling Tool (EHT) + +The Error Handling Tool lets you detect and classify errors in image stacks. It is used for quality control of segmentation results — you load a dataset of image layers, review them visually, classify each layer as correct, incorrect, or unsure, and optionally edit masks to fix errors. + +## Getting Started: Loading a Dataset + +When you first open the Error Handling Tool tab, you see the **Load Dataset** form: + +1. **Project Name** — Enter a name for your project (defaults to "My Project"). +2. **Dataset Path** — Path to your image data on the server. Supports: + - Single TIFF file (2D or 3D stack) + - Directory of images (PNG, JPG, TIFF) + - Glob pattern (e.g., `/path/to/images/*.tif`) +3. **Mask Path (Optional)** — Path to a corresponding mask file or directory, if available. +4. Click **Load Dataset** to begin. + +After loading, the main detection interface appears. + +## Main Detection Interface Layout + +The interface has three panels: + +### Left Panel: Progress Tracker + +- **Project Info** — Shows the project name and total number of layers. +- **Progress** — A progress bar showing how many layers have been reviewed out of the total, with a percentage. +- **Classification Summary** — Four counters showing how many layers are classified as: + - Correct (green checkmark) + - Incorrect (red X) + - Unsure (yellow question mark) + - Unreviewed (gray exclamation mark) +- **Proofread Incorrect Layers** button — Appears when there are layers marked as "incorrect." Clicking it opens the first incorrect layer in the image editor for detailed inspection and mask editing. +- **Load New Dataset** button — Starts a new session by returning to the dataset loader form. + +### Center Panel: Layer Grid + +Layers are displayed as a paginated grid of thumbnail cards (12 per page, in a 3×4 layout). Each card shows: + +- A **thumbnail image** of the layer, with the mask overlaid semi-transparently if a mask exists. +- A **classification ribbon** in the top-right corner showing the current status (Correct, Incorrect, Unsure, or Unreviewed) with a color-coded badge. +- A **checkbox** in the top-left corner for selecting the layer (click the checkbox without opening the editor). +- The **layer name** and **layer number** at the bottom of the card. + +**Interactions:** + +- **Click a card** to open the Image Inspection modal for detailed viewing and mask editing. +- **Click the checkbox** to select/deselect a layer for bulk classification. +- **Use the pagination controls** at the bottom to navigate between pages. + +### Right Panel: Classification Panel + +- **Selected count** — A tag showing how many layers are currently selected (e.g., "3 layers selected"). +- **Classification buttons:** + - **Correct (C)** — Green button. Classify selected layers as correct. + - **Incorrect (X)** — Red button. Classify selected layers as incorrect. + - **Unsure (U)** — Yellow button. Classify selected layers as unsure. +- **Selection buttons:** + - **Select All (Ctrl+A)** — Select all layers on the current page. + - **Clear Selection** — Deselect all layers. +- **Keyboard Shortcuts** reference card at the bottom. + +## Keyboard Shortcuts (Main Grid) + +These shortcuts work when the main grid is visible (not when the image editor modal is open) and you are not typing in an input field: + +| Shortcut | Action | +| -------- | ------------------------------------- | +| C | Classify selected layers as Correct | +| X | Classify selected layers as Incorrect | +| U | Classify selected layers as Unsure | +| Ctrl+A | Select all layers on the current page | + +## Image Inspection Modal + +Click on any layer card to open a full-screen modal for detailed inspection. The modal title shows the layer name and number (e.g., "Image Inspection: layer_042.tif (Layer 43)"). + +### Modal Header Controls + +- **Classification radio buttons** — Toggle between Correct (C), Incorrect (X), and Unsure (U) to set the classification for this individual layer. +- **Save (S)** button — Saves the current mask edits and classification, then closes the modal. + +### Modal Layout + +The modal contains two areas: + +**Left Panel: Tools** + +- **Minimap** — A small overview of the full image. Click anywhere on the minimap to jump the main canvas to that location. A red rectangle shows the current viewport. +- **Mode** — Three tool buttons: + - **Paint (P)** — Draw on the mask to add regions. + - **Erase (E)** — Remove regions from the mask. + - **Hand (H)** — Pan the canvas without drawing. +- **Paint/Erase Size** — A slider (1–64) and number input to adjust the brush size. Shown only when Paint or Erase mode is active. +- **History:** + - **Undo (Ctrl+Z)** — Undo the last brush stroke. + - **Redo (Ctrl+Shift+Z or Ctrl+Y)** — Redo an undone stroke. +- **Hide/Show Mask** — Toggle the mask overlay visibility. +- **Zoom** — Shows the current zoom percentage. Buttons to zoom in, zoom out, or reset to 100%. + +**Center: Canvas** + +The main editing area displays the image with the mask overlay. Interactions: + +- **Scroll wheel** — Zoom in/out (zooms toward the cursor position). +- **Click and drag** with Paint or Erase tool to draw or erase mask regions. A circular cursor preview follows the mouse showing the brush size. +- **Ctrl+click and drag** or use the **Hand tool** to pan the canvas. + +### Image Editor Keyboard Shortcuts + +| Shortcut | Action | +| -------------------------- | ------------------------------- | +| P | Switch to Paint mode | +| E | Switch to Erase mode | +| H | Switch to Hand (pan) mode | +| C | Set classification to Correct | +| X | Set classification to Incorrect | +| U | Set classification to Unsure | +| Ctrl+Z / Cmd+Z | Undo | +| Ctrl+Shift+Z / Cmd+Shift+Z | Redo | +| Ctrl+Y / Cmd+Y | Redo (alternative) | +| Ctrl+S / Cmd+S | Save mask and classification | +| Escape | Close the modal | + +## Typical Workflow + +1. Load a dataset using the **Load Dataset** form. +2. Review layers in the grid. Use the checkboxes to select batches of obviously correct or incorrect layers. +3. Press **C**, **X**, or **U** to classify selected layers in bulk. +4. Click on questionable layers to open the Image Inspection modal for closer examination. +5. In the modal, use the Paint/Erase tools to correct the mask if needed, set the classification, and press **Save**. +6. Use the **Proofread Incorrect Layers** button to revisit layers you marked as incorrect. +7. Monitor your progress in the Progress Tracker on the left. diff --git a/server_api/chatbot/file_summaries/FileManager.md b/server_api/chatbot/file_summaries/FileManager.md new file mode 100644 index 0000000..8fe1dc1 --- /dev/null +++ b/server_api/chatbot/file_summaries/FileManager.md @@ -0,0 +1,93 @@ +# File Manager + +The File Manager lets you browse, upload, organize, and manage files and folders on the server. It is the central hub for working with your data before visualization, training, or inference. + +## Layout + +The File Manager page has three main areas: + +1. **Sidebar (left)** — A collapsible folder tree showing your mounted project directories. Click a folder to navigate into it. You can drag the right edge of the sidebar to resize it, or click the collapse button to hide it entirely. + +2. **Toolbar (top of main area)** — Contains action buttons and a breadcrumb trail showing your current path. Click any segment in the breadcrumb to jump back to that folder. + +3. **File/Folder Grid or List (center)** — Displays the contents of the current folder. You can switch between **Grid View** (icon cards) and **List View** (table with name, size, modified date) using the view toggle buttons in the toolbar. + +## Mounting and Unmounting Projects + +Before you can browse files, you need to mount a project directory: + +1. Click the **Mount Project** button in the toolbar. +2. Enter the server path to the directory you want to mount (e.g., `/data/my_project`). +3. The directory will appear in the sidebar as a top-level folder. + +To unmount a project, right-click it in the sidebar and select **Unmount**, or use the **Unmount Project** option in the toolbar. + +## Browsing Files + +- **Navigate into a folder**: Double-click a folder in the main area, or click it in the sidebar tree. +- **Go back**: Click a parent folder in the breadcrumb trail, or click the **Up** button. +- **Refresh**: Click the **Refresh** button in the toolbar to reload the current folder's contents. + +## Creating Files and Folders + +- Click the **New Folder** button in the toolbar and enter a name to create a new folder. +- Click the **Upload** button in the toolbar to upload files from your local computer to the current server directory. + +## Selecting Items + +- **Single select**: Click a file or folder to select it. +- **Multi-select**: Hold **Ctrl** (or **Cmd** on Mac) and click multiple items, or hold **Shift** and click to select a range. +- **Select All**: Press **Ctrl+A** (or **Cmd+A**) to select all items in the current view. + +## Context Menu (Right-Click) + +Right-click on a file or folder to see available actions: + +- **Open** — Open a folder or preview a file +- **Rename** — Change the name of the file or folder +- **Copy** — Copy the item to the clipboard +- **Cut** — Cut the item (move it when pasted) +- **Paste** — Paste copied/cut items into the current folder +- **Delete** — Permanently delete the selected item(s) +- **Properties** — View details such as file size, path, and modification date + +You can also right-click on empty space in the main area to access folder-level actions like **New Folder** and **Paste**. + +## Drag and Drop + +- **Move files/folders**: Drag items from the main area and drop them onto a folder in the sidebar or main area to move them. +- **Upload from desktop**: Drag files from your desktop or file explorer and drop them into the main area to upload them to the current server directory. + +## File Preview + +Click on a file (single click or via the context menu **Open** action) to preview it. Supported preview types include images (PNG, JPG, TIFF) and text files. + +## Toolbar Actions Summary + +| Button | Action | +| --------------------- | -------------------------------------------- | +| Mount Project | Add a server directory to the sidebar | +| Unmount Project | Remove a mounted directory from the sidebar | +| New Folder | Create a new folder in the current directory | +| Upload | Upload files from your local machine | +| Refresh | Reload the current folder contents | +| Grid View / List View | Toggle between icon grid and detail table | + +## Keyboard Shortcuts + +| Shortcut | Action | +| ------------------ | -------------------------------- | +| Ctrl+A / Cmd+A | Select all items | +| Delete / Backspace | Delete selected items | +| Ctrl+C / Cmd+C | Copy selected items | +| Ctrl+X / Cmd+X | Cut selected items | +| Ctrl+V / Cmd+V | Paste items | +| Enter | Open/navigate into selected item | + +## File Input Fields (Used Across the App) + +Throughout the application (e.g., when selecting image paths for training or visualization), file path inputs support three ways to choose a file or folder: + +1. **Type a path** — Manually type or paste a server path into the text field. +2. **Browse** — Click the folder icon on the left side of the input to open a file picker dialog. The picker shows your mounted server directories and lets you navigate and select files or folders. +3. **Drag and drop** — Drag a file from your desktop onto the input field to set its path. diff --git a/server_api/chatbot/file_summaries/GettingStarted.md b/server_api/chatbot/file_summaries/GettingStarted.md new file mode 100644 index 0000000..d215ce5 --- /dev/null +++ b/server_api/chatbot/file_summaries/GettingStarted.md @@ -0,0 +1,51 @@ +# Getting Started with PyTC Client + +PyTC Client (PyTorch Connectomics Client) is a desktop application for biomedical image segmentation. It provides tools for managing files, visualizing data in Neuroglancer, training and running inference with deep learning models, proofreading synapse annotations, and detecting errors in image stacks. + +## Launching the Application + +When you first open PyTC Client, a **Change Views** dialog appears. This lets you choose which workflow tabs to enable. The available workflows are: + +- **File Management** — Browse, upload, and organize files on the server +- **Visualization** — View image and label data in Neuroglancer +- **Model Training** — Configure and launch training jobs +- **Model Inference** — Run inference with trained models +- **Tensorboard** — Monitor training metrics in real time +- **SynAnno** — Proofread synapse annotations +- **Worm Error Handling** — Detect and classify errors in worm image stacks + +Check the workflows you want, then click **Launch Selected**. You can change your selection later by clicking **Change Views** in the top navigation bar. + +## Application Layout + +The application has three main areas: + +1. **Top Navigation Bar** — Displays the PyTC logo, application title, and a row of tabs for each enabled workflow. Click a tab to switch between pages. The bar also includes a **Change Views** button and an **AI Chat** toggle button. + +2. **Main Content Area** — Shows the currently selected workflow page (e.g., File Manager, Visualization, Model Training). + +3. **AI Chat Drawer** — A collapsible panel on the right side of the screen. Click the **AI Chat** button in the top bar to open or close it. You can drag the left edge of the chat drawer to resize it. Use the chat to ask questions about the application, get help with workflows, or request training/inference commands. + +## Using the AI Chat + +The chat panel appears as a sliding drawer on the right. To use it: + +1. Click the **AI Chat** button in the top navigation bar to open the drawer. +2. Type your question in the text input at the bottom and press **Enter** or click **Send**. +3. The assistant will respond with guidance based on the application's documentation. +4. Click **Clear Chat** to start a new conversation. + +The chat supports markdown formatting, including tables, code blocks, and lists. + +## Keyboard Shortcuts (Global) + +These standard editing shortcuts work throughout the application: + +| Shortcut | Action | +| ----------- | ---------- | +| Cmd+C | Copy | +| Cmd+V | Paste | +| Cmd+X | Cut | +| Cmd+A | Select All | +| Cmd+Z | Undo | +| Cmd+Shift+Z | Redo | diff --git a/server_api/chatbot/file_summaries/ModelInference.md b/server_api/chatbot/file_summaries/ModelInference.md new file mode 100644 index 0000000..e6a4fe6 --- /dev/null +++ b/server_api/chatbot/file_summaries/ModelInference.md @@ -0,0 +1,66 @@ +# Model Inference Page + +The Model Inference page lets you run inference (prediction) using a trained segmentation model. It uses the same 3-step configuration wizard as Model Training, with slight differences in the required inputs and available settings. + +## 3-Step Configuration Wizard + +### Step 1: Set Inputs + +Specify the file paths for your inference data: + +- **Input Image** — Path to the image data you want to run inference on (file or directory on the server). Click the folder icon to browse, type a path, or drag and drop. +- **Input Label** — Path to ground-truth labels (optional, used if you want to evaluate accuracy). +- **Output Path** — Directory where inference results will be saved. +- **Checkpoint Path** — Path to the trained model checkpoint file (e.g., `/path/to/checkpoint_100000.pth.tar`). This is the model that will be used for prediction. + +All fields except Input Label are required before you can proceed. + +### Step 2: Base Configuration + +Choose a YAML configuration file, just like in training: + +- **Upload YAML File** — Upload a config from your local machine. +- **Choose a preset config** — Select a preset from the server dropdown. + +Once loaded, you will see: + +- **Loaded** indicator and **Revert to preset** option (if modified). +- **Effective dataset paths** summary. +- **Model architecture** dropdown. +- **Sliders** for quick parameter adjustment: + - **Batch size** (1–32) + - **Augmentations** (1–16) — Number of test-time augmentations to average over. + +### Step 3: Advanced Configuration + +Fine-tune inference-specific parameters: + +**Common inference knobs:** + +- Batch size +- Augmentations (AUG_NUM) +- Blending mode (gaussian or constant) +- Eval mode (on/off — whether to compute evaluation metrics) + +**Inference (advanced):** + +- Run singly (process volumes one at a time) +- Unpad output (remove padding from output) +- Augment mode (mean or max — how to combine augmented predictions) +- Test count (number of test volumes) + +Each setting is displayed as a dropdown, number input, or toggle switch. + +**Open raw YAML** — Opens a full-screen YAML text editor modal for direct editing. Includes **Format YAML** and **Copy** buttons. + +Click **Done** to save the configuration. + +## Starting and Stopping Inference + +After completing the wizard: + +1. Click **Start Inference** to launch the inference job on the server. +2. The page shows the current inference status. +3. Click **Stop Inference** at any time to terminate the job. + +The inference status is polled automatically so you can monitor progress without refreshing. diff --git a/server_api/chatbot/file_summaries/ModelTraining.md b/server_api/chatbot/file_summaries/ModelTraining.md new file mode 100644 index 0000000..b872af1 --- /dev/null +++ b/server_api/chatbot/file_summaries/ModelTraining.md @@ -0,0 +1,98 @@ +# Model Training Page + +The Model Training page lets you configure and launch deep learning training jobs for biomedical image segmentation using PyTorch Connectomics. Setup follows a guided 3-step process, after which you can start and monitor your training run. + +## 3-Step Configuration Wizard + +The configuration wizard (stepper) walks you through three steps. You must complete each step before advancing to the next. Click **Next** to advance and **Previous** to go back. + +### Step 1: Set Inputs + +Specify the file paths for your training data: + +- **Input Image** — Path to your training image data (file or directory on the server). Click the folder icon to browse, type a path, or drag and drop. +- **Input Label** — Path to the corresponding ground-truth labels. +- **Output Path** — Directory where training outputs (checkpoints, logs) will be saved. +- **Log Path** — Directory for training logs (used by TensorBoard for monitoring). + +All four fields are required before you can proceed. The application will show a warning listing any missing fields if you try to advance without filling them in. + +### Step 2: Base Configuration + +Choose a starting YAML configuration file for your training job: + +- **Upload YAML File** — Click the **Upload YAML File** button to upload a configuration file from your local machine. +- **Choose a preset config** — Select from a dropdown of preset configurations available on the server (e.g., `Lucchi-Mitochondria.yaml`, `CREMI-Synapse.yaml`). + +Once a config is loaded, you will see: + +- **Loaded** indicator showing which file or preset is active. If you modify settings, a **Modified** label appears with a **Revert to preset** link to restore the original values. +- **Effective dataset paths** — A summary box showing the common folder, image name, label name, and output path that will be written into the YAML config. +- **Model architecture** dropdown — Select the neural network architecture (e.g., `unet_super`, `fpn`). The available options are fetched from the server. +- **Sliders** for quick adjustment of common parameters: + - **Batch size** (1–32) + - **GPUs** (0–8) + - **CPUs** (1–16) + +### Step 3: Advanced Configuration + +Fine-tune detailed training parameters using structured controls organized into sections: + +**Common training knobs:** + +- Optimizer (SGD, Adam, AdamW) +- LR scheduler (MultiStepLR, CosineAnnealingLR, StepLR) +- Learning rate +- Batch size +- Total iterations +- Save interval (how often to save checkpoints) +- Validation interval + +**System:** + +- Distributed training (on/off) +- Parallel mode (DP or DDP) +- Debug mode (on/off) + +**Model:** + +- Block type (residual, plain) +- Backbone (resnet, repvgg, botnet) +- Normalization (bn, sync_bn, in, gn, none) +- Activation (relu, elu, leaky) +- Pooling layer (on/off) +- Mixed precision (on/off) +- Aux output (on/off) + +**Dataset:** + +- 2D dataset (on/off) +- Load 2D slices (on/off) +- Isotropic data (on/off) +- Drop channels (on/off) +- Reduce labels (on/off) +- Ensure min size (on/off) +- Pad mode (reflect, constant, symmetric) + +**Solver (advanced):** + +- Weight decay +- Momentum +- Clip gradients (on/off) +- Clip value + +Each setting is displayed as a dropdown, number input, or toggle switch. + +**Open raw YAML** — Click this button at the bottom to open a full-screen YAML text editor modal where you can directly edit the raw YAML configuration. The modal includes a **Format YAML** button to auto-format the text and a **Copy** button to copy the YAML to your clipboard. If there is a syntax error, a red warning appears. + +When finished, click **Done** to save the configuration. + +## Starting and Stopping Training + +After completing the 3-step wizard: + +1. Click **Start Training** to launch the training job on the server. +2. The page displays the current training status (e.g., "Training in progress…"). +3. Click **Stop Training** at any time to terminate the running job. + +The training status is polled automatically so you can see updates without refreshing. You can switch to the **Tensorboard** tab to monitor training metrics like loss curves in real time. diff --git a/server_api/chatbot/file_summaries/Monitoring.md b/server_api/chatbot/file_summaries/Monitoring.md new file mode 100644 index 0000000..8e96888 --- /dev/null +++ b/server_api/chatbot/file_summaries/Monitoring.md @@ -0,0 +1,20 @@ +# TensorBoard Monitoring Page + +The Monitoring page displays a live TensorBoard dashboard embedded directly inside the application. TensorBoard is used to track and visualize training metrics such as loss curves, learning rate schedules, and validation scores. + +## How It Works + +1. Navigate to the **Tensorboard** tab in the top navigation bar. +2. If a training job is running (or has been run), TensorBoard will load automatically and display inside the page. +3. The dashboard shows the standard TensorBoard interface with all its features — scalar plots, image samples, histograms, etc. + +## Requirements + +- TensorBoard monitoring requires an active or completed training run with logs saved to the output directory. +- The server automatically starts a TensorBoard instance pointed at your training output directory when you launch training from the Model Training page. + +## Tips + +- If the TensorBoard panel appears blank, make sure a training job has been started at least once and that the output path was set correctly in the Model Training configuration. +- TensorBoard updates in real time as new training data is written, so you can watch loss curves evolve during training. +- You can interact with TensorBoard normally — zoom into charts, toggle runs on/off, and switch between the Scalars, Images, and other TensorBoard tabs within the embedded view. diff --git a/server_api/chatbot/file_summaries/Proofreading.md b/server_api/chatbot/file_summaries/Proofreading.md new file mode 100644 index 0000000..a970404 --- /dev/null +++ b/server_api/chatbot/file_summaries/Proofreading.md @@ -0,0 +1,59 @@ +# SynAnno Proofreading Page + +The SynAnno (Synapse Annotation) proofreading page lets you review and classify predicted synapse detections. It displays a Neuroglancer 3D viewer alongside a list of synapses and classification controls, enabling efficient annotation workflows. + +## Layout + +The proofreading page is divided into three panels: + +1. **Synapse List (left panel)** — A scrollable list of all synapses in the current project. Each entry shows: + - **Synapse ID** (e.g., "Synapse #1") + - **Position** coordinates (x, y, z) + - **Confidence** score (percentage) + - **Status icon** — A colored icon indicating the current classification: + - Green checkmark = Correct + - Red X = Incorrect + - Yellow question mark = Unsure + - No icon = Unreviewed + - A **progress bar** at the top showing how many synapses have been reviewed out of the total. + +2. **Neuroglancer Viewer (center panel)** — A 3D viewer displaying the image volume. When you select a synapse, the viewer centers on that synapse's location. A **Refresh** button appears in the top-right corner to reload the viewer. The current synapse ID is displayed next to the refresh button. + +3. **Proofreading Controls (right panel)** — Controls for classifying the selected synapse and editing metadata. + +## Reviewing Synapses + +1. Click on any synapse in the **Synapse List** to select it. The list highlights the selected synapse with a blue background and left border. The Neuroglancer viewer navigates to that synapse's 3D position. + +2. In the **Proofreading Controls** panel, you will see: + - **Synapse info** — The synapse ID, position, and confidence score. + - **Status Classification** buttons: + - **Correct (C)** — Green button. Mark the synapse as a true positive. + - **Incorrect (X)** — Red button. Mark the synapse as a false positive. + - **Unsure (U)** — Yellow button. Mark the synapse as uncertain. + - **Pre-synaptic Neuron ID** — A text input to enter or edit the pre-synaptic neuron ID number. + - **Post-synaptic Neuron ID** — A text input to enter or edit the post-synaptic neuron ID number. + +3. After setting the classification and optionally entering neuron IDs, save your work: + - Click **Save (S)** to save the current synapse's classification and neuron IDs. + - Click **Save & Next (→)** to save and automatically advance to the next synapse in the list. + +## Keyboard Shortcuts + +These shortcuts work when the proofreading page is active and you are not typing in an input field: + +| Shortcut | Action | +| --------------- | --------------------------------- | +| C | Mark current synapse as Correct | +| X | Mark current synapse as Incorrect | +| U | Mark current synapse as Unsure | +| S | Save current synapse | +| Arrow Right (→) | Move to the next synapse | +| Arrow Left (←) | Move to the previous synapse | + +## Workflow Tips + +- Use **Save & Next** (or press **S** then **→**) for rapid sequential review. +- The progress bar at the top of the synapse list helps you track how many synapses you have reviewed. +- You can click any synapse in the list at any time to jump to it — you do not have to review them in order. +- Neuron ID fields are optional and can be filled in during a second pass. diff --git a/server_api/chatbot/file_summaries/Visualization.md b/server_api/chatbot/file_summaries/Visualization.md new file mode 100644 index 0000000..1a0bf69 --- /dev/null +++ b/server_api/chatbot/file_summaries/Visualization.md @@ -0,0 +1,31 @@ +# Visualization Page + +The Visualization page lets you view your image and label data in Neuroglancer, a web-based 3D viewer for volumetric data. You can open multiple viewers in separate tabs to compare different datasets side by side. + +## How to Visualize Data + +1. Navigate to the **Visualization** tab in the top navigation bar. +2. Fill in the following fields: + - **Image** — Path to your image data file or directory on the server. You can type a path, click the folder icon to browse server files, or drag and drop a file onto the field. + - **Label** — (Optional) Path to the corresponding label/segmentation data. + - **Scales (z,y,x)** — The voxel resolution of your data, entered as three comma-separated numbers (e.g., `1,1,1` or `30,8,8`). This tells Neuroglancer how to scale the data for correct 3D rendering. +3. Click the **Visualize** button. + +A new Neuroglancer viewer tab will open below the input fields, displaying your data. + +## Managing Viewer Tabs + +- Each time you click **Visualize** with a new set of inputs, a new viewer tab is created. +- Click on a viewer tab to switch between open viewers. +- Each viewer tab has a **Refresh** button (circular arrow icon) in the top-right corner to reload the viewer if needed. +- Close a viewer tab by clicking the close (×) button on the tab. + +## Empty State + +When no viewers are open, the page shows a message prompting you to enter image and label paths and click **Visualize** to get started. + +## Tips + +- Make sure your image and label files are accessible on the server. Use the File Manager to upload or verify file paths. +- The **Scales** field defaults to `1,1,1`. Adjust it to match your dataset's actual voxel resolution for correct spatial rendering. +- Neuroglancer supports common volumetric formats including HDF5, TIFF stacks, and Zarr. diff --git a/server_api/chatbot/file_summaries/WormErrorHandling.md b/server_api/chatbot/file_summaries/WormErrorHandling.md new file mode 100644 index 0000000..c0788bf --- /dev/null +++ b/server_api/chatbot/file_summaries/WormErrorHandling.md @@ -0,0 +1,41 @@ +# Worm Error Handling Page + +The Worm Error Handling page provides the same Error Handling Tool interface for detecting and classifying errors, specifically tailored for worm segmentation image stacks. + +## How It Works + +The Worm Error Handling tab uses the same full Error Handling Tool workflow described in the Error Handling Tool documentation. This includes: + +- **Loading a dataset** — Enter a project name, dataset path, and optional mask path, then click **Load Dataset**. +- **Layer grid** — Browse paginated layer thumbnails, select layers with checkboxes, and classify them in bulk. +- **Classification** — Use the **Correct (C)**, **Incorrect (X)**, and **Unsure (U)** buttons or keyboard shortcuts to classify layers. +- **Image Inspection modal** — Click any layer to open a full-screen editor with Paint, Erase, and Hand tools for mask correction, plus undo/redo, zoom, and a minimap. +- **Progress tracking** — Monitor how many layers have been reviewed with the Progress Tracker panel. + +## Keyboard Shortcuts + +All keyboard shortcuts are identical to the Error Handling Tool: + +**Main Grid:** +| Shortcut | Action | +|----------|--------| +| C | Classify selected layers as Correct | +| X | Classify selected layers as Incorrect | +| U | Classify selected layers as Unsure | +| Ctrl+A | Select all layers on the current page | + +**Image Editor Modal:** +| Shortcut | Action | +|----------|--------| +| P | Paint mode | +| E | Erase mode | +| H | Hand (pan) mode | +| C | Set classification to Correct | +| X | Set classification to Incorrect | +| U | Set classification to Unsure | +| Ctrl+Z / Cmd+Z | Undo | +| Ctrl+Shift+Z / Cmd+Shift+Z | Redo | +| Ctrl+S / Cmd+S | Save | +| Escape | Close modal | + +For the complete workflow guide, see the **Error Handling Tool** documentation. diff --git a/server_api/chatbot/file_summaries/api.md b/server_api/chatbot/file_summaries/api.md deleted file mode 100644 index d1df7ba..0000000 --- a/server_api/chatbot/file_summaries/api.md +++ /dev/null @@ -1,55 +0,0 @@ -# client/src/api.js - -Central API client for the PyTC Client frontend. Exposes functions for all backend communication. - -## API Base URL - -`${REACT_APP_SERVER_PROTOCOL || "http"}://${REACT_APP_SERVER_URL || "localhost:4242"}` - -## Exports - -### `apiClient` - -Axios instance with base URL, `withCredentials: true`. Used for general requests (e.g. `/files`, `/files/upload`). - -### Visualization - -- **`getNeuroglancerViewer(image, label, scales)`** — Launches Neuroglancer viewer. Accepts file objects or path strings. Uses FormData for browser uploads. - -### File Checks - -- **`checkFile(file)`** — POST to `/check_files` to detect if file is likely a label (heuristic). - -### Generic - -- **`makeApiRequest(url, method, data)`** — HTTP request helper with JSON Content-Type. - -### Model Training - -- **`startModelTraining(trainingConfig, logPath, outputPath)`** — Injects OUTPUT_PATH into YAML config and POSTs to `/start_model_training` -- **`stopModelTraining()`** -- **`getTrainingStatus()`** -- **`getTensorboardURL()`** - -### Model Inference - -- **`startModelInference(inferenceConfig, outputPath, checkpointPath)`** — Injects OUTPUT_PATH into YAML, sets NUM_GPUS=1, POSTs to `/start_model_inference` -- **`stopModelInference()`** -- **`getInferenceStatus()`** - -### Chatbot - -- **`queryChatBot(query)`** — POST to `/chat/query` -- **`clearChat()`** — POST to `/chat/clear` - -### Config Presets - -- **`getConfigPresets()`** — GET `/pytc/configs` -- **`getConfigPresetContent(path)`** — GET `/pytc/config` -- **`getModelArchitectures()`** — GET `/pytc/architectures` - -## Helper Functions - -- `buildFilePath(file)` — Extracts path from various file object shapes ( Ant Design Upload, folderPath+name, etc.) -- `hasBrowserFile(file)` — Checks if file is a browser File object -- `handleError(error)` — Throws errors with response detail when available diff --git a/server_api/chatbot/file_summaries/components/Chatbot.md b/server_api/chatbot/file_summaries/components/Chatbot.md deleted file mode 100644 index c4cf73b..0000000 --- a/server_api/chatbot/file_summaries/components/Chatbot.md +++ /dev/null @@ -1,25 +0,0 @@ -# client/src/components/Chatbot.js - -AI assistant panel for helping users navigate PyTC Client. Renders a chat UI with message history, Markdown rendering, and server-backed responses. - -## Props - -- **`onClose`** — Callback when user closes the chat (e.g. drawer) - -## Features - -- **Message persistence** — Saves messages to `localStorage` under `chatMessages` -- **Markdown rendering** — Uses `react-markdown` with `remarkGfm` for lists, tables, code blocks -- **Keyboard shortcut** — Enter (without Shift) sends message -- **Clear chat** — Popconfirm to clear; calls `clearChat()` API and resets to initial greeting - -## API Calls - -- `queryChatBot(query)` — Sends user message, displays response -- `clearChat()` — Clears server-side history and local state - -## UI Layout - -- Header: "AI Assistant" title, Clear button, Close button -- Scrollable message list (user messages right-aligned, blue; bot messages left-aligned, gray) -- TextArea + Send button at bottom diff --git a/server_api/chatbot/file_summaries/components/Configurator.md b/server_api/chatbot/file_summaries/components/Configurator.md deleted file mode 100644 index 8557fe2..0000000 --- a/server_api/chatbot/file_summaries/components/Configurator.md +++ /dev/null @@ -1,30 +0,0 @@ -# client/src/components/Configurator.js - -Stepper-based configuration workflow for **training** or **inference**. Drives users through setting inputs, base config, and advanced config. - -## Props - -- **`fileList`** — File list (from context or props) -- **`type`** — `"training"` or `"inference"` - -## Steps - -1. **Set Inputs** — `InputSelector` (image, label, output path, log/checkpoint path) -2. **Base Configuration** — `YamlFileUploader` (preset or upload) -3. **Advanced Configuration** — `YamlFileEditor` (tweak YAML controls) - -## Validation - -- Prevents advancing if required inputs are missing -- Shows warning alert: "Before you continue, add: ..." -- Missing inputs: input image, input label, output path; log path (training) or checkpoint path (inference) -- Missing base config: "base configuration (preset or upload)" - -## Persistence - -- Step index saved to `localStorage` under `configStep:${type}` -- On "Done": saves `trainingConfig` or `inferenceConfig` to `localStorage` - -## Dependencies - -- `AppContext` — inputImage, inputLabel, outputPath, logPath, checkpointPath, trainingConfig, inferenceConfig diff --git a/server_api/chatbot/file_summaries/components/FilePickerModal.md b/server_api/chatbot/file_summaries/components/FilePickerModal.md deleted file mode 100644 index 1c6f45c..0000000 --- a/server_api/chatbot/file_summaries/components/FilePickerModal.md +++ /dev/null @@ -1,27 +0,0 @@ -# client/src/components/FilePickerModal.js - -Modal for browsing and selecting files or directories from server storage. Uses the `/files` API to list items. - -## Props - -- **`visible`** — Whether modal is open -- **`onCancel`** — Close callback -- **`onSelect`** — Callback with selected item `{ ...item, logical_path }` -- **`title`** — Modal title (default: "Select File") -- **`selectionType`** — `"file"`, `"directory"`, or `"fileOrDirectory"` - -## Behavior - -- **Breadcrumb navigation** — Click to jump to parent folders -- **Up button** — Navigate to parent -- **Select Current Directory** — Visible when `selectionType` is `directory` or `fileOrDirectory`; selects current folder -- **File/Directory selection** — Click file or folder; for files, can use "Select" action or double-click -- **Sorting** — Folders first, then files by name - -## API - -- `GET /files` — Loads full file tree; filters client-side by current path - -## Note - -- Uses `physical_path` or constructs path from `path` + `name` for backend; `logical_path` for display diff --git a/server_api/chatbot/file_summaries/components/FileTreeSidebar.md b/server_api/chatbot/file_summaries/components/FileTreeSidebar.md deleted file mode 100644 index 2a31991..0000000 --- a/server_api/chatbot/file_summaries/components/FileTreeSidebar.md +++ /dev/null @@ -1,20 +0,0 @@ -# client/src/components/FileTreeSidebar.js - -Collapsible sidebar showing a tree of folders and files. Used by `FilesManager` for navigation. - -## Props - -- **`folders`** — Array of `{ key, title, parent }` -- **`files`** — Object mapping parent key to array of `{ key, name, size, type }` -- **`currentFolder`** — Currently selected folder key -- **`onSelect`** — Callback when folder is selected (receives key without `folder-` prefix) -- **`onDrop`** — Optional callback for drag-and-drop -- **`onContextMenu`** — Optional callback for right-click -- **`width`** — Sidebar width (default 250); 0 hides it - -## UI - -- "Explorer" header -- Ant Design `DirectoryTree` with folders (expandable) and files (leaf nodes) -- Icons: FolderFilled, FolderOpenFilled, FileOutlined -- Draggable nodes; `blockNode` layout diff --git a/server_api/chatbot/file_summaries/components/InputSelector.md b/server_api/chatbot/file_summaries/components/InputSelector.md deleted file mode 100644 index 87d7d5e..0000000 --- a/server_api/chatbot/file_summaries/components/InputSelector.md +++ /dev/null @@ -1,19 +0,0 @@ -# client/src/components/InputSelector.js - -Form for selecting input image, label, output path, and (depending on type) log path or checkpoint path. Used inside `Configurator` step 1. - -## Props - -- **`type`** — `"training"` or `"inference"` - -## Form Items - -- **Input Image** — `UnifiedFileInput` (file or directory for training/inference) -- **Input Label** — Same -- **Output Path** — Directory -- **Log Path** (training) — Directory for training logs -- **Checkpoint Path** (inference) — Path to model checkpoint (.pth.tar) - -## Context - -- Reads/writes `AppContext`: inputImage, inputLabel, outputPath, logPath, checkpointPath diff --git a/server_api/chatbot/file_summaries/components/NeuroglancerViewer.md b/server_api/chatbot/file_summaries/components/NeuroglancerViewer.md deleted file mode 100644 index f782dc9..0000000 --- a/server_api/chatbot/file_summaries/components/NeuroglancerViewer.md +++ /dev/null @@ -1,24 +0,0 @@ -# client/src/components/NeuroglancerViewer.js - -Loads and displays a Neuroglancer viewer in an iframe for a given project. Used in SynAnno / proofreading flow. - -## Props - -- **`projectId`** — Project ID (default 1) -- **`currentSynapse`** — Optional; shows synapse ID in UI for reference - -## API - -- `GET /api/synanno/ng-url/${projectId}` — Returns `{ url }` or `{ message }` - -## States - -- **Loading** — Spinner while fetching -- **Error** — Alert + "Try Again" button -- **Setup in Progress** — When URL not yet available; shows "Converting data to NIfTI format..." -- **Ready** — iframe with Neuroglancer - -## UI - -- Refresh button (top-right) -- Optional synapse ID display diff --git a/server_api/chatbot/file_summaries/components/ProofreadingControls.md b/server_api/chatbot/file_summaries/components/ProofreadingControls.md deleted file mode 100644 index 7a44921..0000000 --- a/server_api/chatbot/file_summaries/components/ProofreadingControls.md +++ /dev/null @@ -1,21 +0,0 @@ -# client/src/components/ProofreadingControls.js - -Controls for classifying and editing a single synapse during proofreading. Displays status buttons, neuron ID inputs, and save actions. - -## Props - -- **`currentSynapse`** — Selected synapse object -- **`onSave`** — Callback with `{ status, pre_neuron_id, post_neuron_id }` -- **`onNext`** — Navigate to next synapse - -## UI Sections - -1. **Synapse info** — ID, position (x,y,z), confidence -2. **Status Classification** — Correct (C), Incorrect (X), Unsure (U) buttons -3. **Pre-synaptic Neuron ID** — Input -4. **Post-synaptic Neuron ID** — Input -5. **Actions** — Save (S), Save & Next (→) - -## Empty State - -Shows "No synapse selected" when `currentSynapse` is null. diff --git a/server_api/chatbot/file_summaries/components/SynapseList.md b/server_api/chatbot/file_summaries/components/SynapseList.md deleted file mode 100644 index e5072b2..0000000 --- a/server_api/chatbot/file_summaries/components/SynapseList.md +++ /dev/null @@ -1,27 +0,0 @@ -# client/src/components/SynapseList.js - -Scrollable list of synapses with status icons and progress. Highlights the currently selected synapse. - -## Props - -- **`synapses`** — Array of synapse objects -- **`currentIndex`** — Index of selected synapse -- **`onSelectSynapse`** — Callback when user clicks a synapse -- **`reviewedCount`** — Number of reviewed synapses (for progress bar) - -## Status Icons - -- `correct` — Green check -- `incorrect` — Red X -- `unsure` — Yellow question -- `error` / default — No icon - -## Progress - -- Progress bar: `reviewedCount / totalErrors` where `totalErrors` = synapses with status `"error"` -- Text: "X / Y reviewed" - -## Display - -- Each item: Synapse ID, position (x,y,z), confidence -- Selected item: blue background, left border highlight diff --git a/server_api/chatbot/file_summaries/components/UnifiedFileInput.md b/server_api/chatbot/file_summaries/components/UnifiedFileInput.md deleted file mode 100644 index c042b0a..0000000 --- a/server_api/chatbot/file_summaries/components/UnifiedFileInput.md +++ /dev/null @@ -1,29 +0,0 @@ -# client/src/components/UnifiedFileInput.js - -Unified file/directory input supporting text entry, drag-and-drop, and a file picker for local or server storage. - -## Props - -- **`value`** — Current value: string path or `{ path, display }` -- **`onChange`** — Callback with `{ path, display }` -- **`placeholder`** -- **`style`**, **`disabled`** -- **`selectionType`** — `"file"`, `"directory"`, or `"fileOrDirectory"` (default: `"file"`) - -## Input Methods - -1. **Text input** — Type path directly -2. **Browse (folder icon)** — Opens "Select Source" modal: - - **Local Machine** — Electron `ipcRenderer.invoke("open-local-file", …)` for native dialog - - **Server Storage** — Opens `FilePickerModal` -3. **Drag and drop** — Drop file/folder; uses `file.path` (Electron) - -## Features - -- Drag-over visual feedback -- Display value can differ from `path` (e.g. logical vs physical path) - -## Dependencies - -- Electron for local file picker -- `FilePickerModal` for server selection diff --git a/server_api/chatbot/file_summaries/components/WorkflowSelector.md b/server_api/chatbot/file_summaries/components/WorkflowSelector.md deleted file mode 100644 index 0b20bb0..0000000 --- a/server_api/chatbot/file_summaries/components/WorkflowSelector.md +++ /dev/null @@ -1,23 +0,0 @@ -# client/src/components/WorkflowSelector.js - -Modal for selecting which workflows/tabs to show in the main app. Used on first launch and via "Change Views". - -## Props - -- **`visible`** — Whether modal is open -- **`onSelect`** — Callback with array of selected mode keys -- **`onCancel`** — Close callback - -## Options - -- File Management (`files`) -- Visualization (`visualization`) -- Model Training (`training`) -- Model Inference (`inference`) -- Tensorboard (`monitoring`) -- SynAnno (`synanno`) -- Worm Error Handling (`worm-error-handling`) - -## Default - -"files" is selected by default. User can select multiple; "Launch Selected" submits. diff --git a/server_api/chatbot/file_summaries/components/YamlFileEditor.md b/server_api/chatbot/file_summaries/components/YamlFileEditor.md deleted file mode 100644 index cba37ae..0000000 --- a/server_api/chatbot/file_summaries/components/YamlFileEditor.md +++ /dev/null @@ -1,33 +0,0 @@ -# client/src/components/YamlFileEditor.js - -Advanced YAML configuration editor with structured controls (switches, selects, numbers) and a raw YAML modal. - -## Props - -- **`type`** — `"training"` or `"inference"` - -## Sections (CONTROL_SECTIONS) - -### Training - -- Common training knobs: Optimizer, LR scheduler, learning rate, batch size, iterations, save/validation intervals -- System: Distributed, Parallel mode, Debug mode -- Model: Block type, Backbone, Normalization, Activation, Pooling, Mixed precision, Aux output -- Dataset: 2D dataset, Load 2D slices, Isotropic, Drop channels, Reduce labels, Ensure min size, Pad mode -- Solver (advanced): Weight decay, Momentum, Clip gradients - -### Inference - -- Common: Batch size, Augmentations, Blending, Eval mode -- Advanced: Run singly, Unpad output, Augment mode, Test count - -## Features - -- **Structured controls** — Switch, Select, InputNumber per YAML path -- **Raw YAML modal** — "Open raw YAML" for full editing; Format, Copy buttons -- **Validation** — Shows "YAML has a syntax error" when invalid -- **Context sync** — Updates `AppContext` trainingConfig or inferenceConfig on change - -## Dependency - -- `AppContext` — trainingConfig, inferenceConfig, uploadedYamlFile, selectedYamlPreset diff --git a/server_api/chatbot/file_summaries/components/YamlFileUploader.md b/server_api/chatbot/file_summaries/components/YamlFileUploader.md deleted file mode 100644 index 9828dda..0000000 --- a/server_api/chatbot/file_summaries/components/YamlFileUploader.md +++ /dev/null @@ -1,27 +0,0 @@ -# client/src/components/YamlFileUploader.js - -Base configuration step: upload YAML file or choose preset, set model architecture, and adjust sliders (batch size, GPUs, etc.). - -## Props - -- **`type`** — `"training"` or `"inference"` - -## Features - -- **Upload** — FileReader to parse YAML -- **Preset select** — Fetches via `getConfigPresetContent(path)` -- **Revert to preset** — Restores original preset when modified -- **Effective dataset paths** — Shows common folder, image name, label name, output path (from AppContext) -- **Model architecture** — Select from `getModelArchitectures()` -- **Sliders** — Training: batch size, GPUs, CPUs; Inference: batch size, augmentations - -## Context Sync - -- Injects `DATASET.INPUT_PATH`, `IMAGE_NAME`, `LABEL_NAME`, `OUTPUT_PATH` from AppContext into loaded YAML -- Syncs `YamlContext` (numGPUs, numCPUs, solverSamplesPerBatch, etc.) from parsed YAML - -## Dependencies - -- `AppContext`, `YamlContext` -- `findCommonPartOfString` from utils -- API: getConfigPresets, getConfigPresetContent, getModelArchitectures diff --git a/server_api/chatbot/file_summaries/contexts/GlobalContext.md b/server_api/chatbot/file_summaries/contexts/GlobalContext.md deleted file mode 100644 index 5236c22..0000000 --- a/server_api/chatbot/file_summaries/contexts/GlobalContext.md +++ /dev/null @@ -1,23 +0,0 @@ -# client/src/contexts/GlobalContext.js - -Global application state via React Context. Persists file-related and config state to IndexedDB via `localforage`. - -## State Keys (FILE_CACHE_KEYS) - -- `files`, `fileList`, `imageFileList`, `labelFileList` -- `currentImage`, `currentLabel` -- `inputImage`, `inputLabel` -- `outputPath`, `logPath`, `checkpointPath` -- `trainingConfig`, `inferenceConfig` -- `uploadedYamlFile`, `selectedYamlPreset` (selectedYamlPreset not persisted) -- `viewer`, `tensorBoardURL` - -## Persistence - -- `usePersistedState(key, defaultValue)` — Loads from localforage on mount, saves on change -- `sanitizePersistedState` — Removes volatile fields from file objects before save -- `resetFileState` — Clears all file-related keys from localforage and resets to defaults - -## ContextWrapper - -Provides `AppContext.Provider` with all state and setters. Used at app root in App.js. diff --git a/server_api/chatbot/file_summaries/contexts/YamlContext.md b/server_api/chatbot/file_summaries/contexts/YamlContext.md deleted file mode 100644 index 54f18c1..0000000 --- a/server_api/chatbot/file_summaries/contexts/YamlContext.md +++ /dev/null @@ -1,18 +0,0 @@ -# client/src/contexts/YamlContext.js - -Context for YAML-related UI state (GPUs, batch size, etc.). Used by `YamlFileUploader` to drive sliders and sync with loaded config. - -## State - -### Training - -- `numGPUs`, `numCPUs` -- `solverSamplesPerBatch`, `learningRate` - -### Inference - -- `inferenceSamplesPerBatch`, `augNum` - -## YamlContextWrapper - -Provides `YamlContext.Provider`. Wraps children inside `AppContext` in App.js. diff --git a/server_api/chatbot/file_summaries/main.md b/server_api/chatbot/file_summaries/main.md deleted file mode 100644 index bed9130..0000000 --- a/server_api/chatbot/file_summaries/main.md +++ /dev/null @@ -1,59 +0,0 @@ -# server_api/main.py - -FastAPI server that powers the PyTC Client application. Provides API endpoints for model training, inference, Neuroglancer visualization, file management, authentication, and an optional RAG chatbot. - -## Overview - -- **Framework**: FastAPI with CORS enabled -- **Port**: 4242 (default) -- **Database**: SQLAlchemy models (auth) created on startup -- **Static Files**: `/uploads` directory mounted for file storage - -## Routers - -- **Auth** (`/`): User authentication -- **SynAnno** (`/`): Synapse annotation endpoints -- **EHTool** (`/eh`): Error handling tool (detection, classification, layers) - -## Key Endpoints - -### Health & Config - -- `GET /health` — Server status check -- `GET /pytc/configs` — List PyTorch Connectomics config presets (YAML) -- `GET /pytc/config?path=...` — Get config file content -- `GET /pytc/architectures` — List model architectures from build.py - -### Model Training & Inference - -- `POST /start_model_training` — Proxy to PyTC server (localhost:4243) -- `POST /stop_model_training` — Stop training -- `GET /training_status` — Training status polling -- `POST /start_model_inference` — Run inference -- `POST /stop_model_inference` — Stop inference -- `GET /get_tensorboard_url` — Returns TensorBoard URL (default: localhost:6006) - -### Visualization - -- `POST /neuroglancer` — Create Neuroglancer viewer for image/label volumes. Accepts JSON or multipart/form-data with image, label, scales. - -### Data & Files - -- `POST /check_files` — Heuristic check if file is a label (integer type, low unique values, or binary) - -### Chatbot (RAG) - -- `POST /chat/query` — Chat query (requires chatbot configured) -- `POST /chat/clear` — Clear chat history -- `GET /chat/status` — Check if chatbot is configured - -## Chatbot Notes - -- Chatbot is optional; server continues if dependencies fail -- Lazy initialization via `_ensure_chatbot()` on first request -- Returns 503 if chatbot not configured - -## Environment - -- `REACT_APP_SERVER_PROTOCOL = "http"` -- `REACT_APP_SERVER_URL = "localhost:4243"` (PyTC server) diff --git a/server_api/chatbot/file_summaries/views/EHTool.md b/server_api/chatbot/file_summaries/views/EHTool.md deleted file mode 100644 index 253fbe0..0000000 --- a/server_api/chatbot/file_summaries/views/EHTool.md +++ /dev/null @@ -1,15 +0,0 @@ -# client/src/views/EHTool.js - -Error Handling Tool wrapper. Renders header and `DetectionWorkflow` for detecting and classifying errors in image stacks. - -## Props - -- **`onStartProofreading`** — Callback when user starts proofreading -- **`onSessionChange`** — Callback when session ID changes -- **`refreshTrigger`** — Incremented to force refresh -- **`savedSessionId`** — Persisted session ID to restore - -## Layout - -- Header: "Error Handling Tool" with description -- Content: `DetectionWorkflow` with session management diff --git a/server_api/chatbot/file_summaries/views/FilesManager.md b/server_api/chatbot/file_summaries/views/FilesManager.md deleted file mode 100644 index 99fb799..0000000 --- a/server_api/chatbot/file_summaries/views/FilesManager.md +++ /dev/null @@ -1,29 +0,0 @@ -# client/src/views/FilesManager.js - -File manager: tree sidebar, breadcrumb navigation, grid/list view, and full CRUD on files/folders. - -## Features - -- **Sidebar** — `FileTreeSidebar`; resizable; toggle visibility -- **Breadcrumb** — Navigate up; click to jump -- **View modes** — Grid or list -- **Selection** — Single/multi (Ctrl, Shift); drag selection box -- **Context menu** — New folder, Upload, Rename, Copy, Delete, Preview, Properties -- **Drag & drop** — Internal move; external OS files → upload to current folder -- **Keyboard** — Delete, Ctrl+C/X/V, Ctrl+A - -## API Endpoints - -- `GET /files` — List files/folders -- `POST /files/folder` — Create folder -- `PUT /files/:id` — Rename or move -- `DELETE /files/:id` — Delete -- `POST /files/upload` — Upload file -- `POST /files/copy` — Copy file - -## State - -- folders, files (transformed from API), currentFolder -- selectedItems, clipboard, editingItem, newItemType, tempName -- contextMenu, previewFile, propertiesData, selectionBox -- sidebarWidth, isSidebarVisible diff --git a/server_api/chatbot/file_summaries/views/ModelInference.md b/server_api/chatbot/file_summaries/views/ModelInference.md deleted file mode 100644 index 3d42aa2..0000000 --- a/server_api/chatbot/file_summaries/views/ModelInference.md +++ /dev/null @@ -1,18 +0,0 @@ -# client/src/views/ModelInference.js - -Model inference view: Configurator + Start/Stop inference buttons. - -## Flow - -1. Configurator (InputSelector, YamlFileUploader, YamlFileEditor) -2. Start Inference — Calls `startModelInference(inferenceConfig, outputPath, checkpointPath)` -3. Uses `localStorage.inferenceConfig` and context for output/checkpoint paths -4. Stop Inference — `stopModelInference` - -## Props - -- **`isInferring`**, **`setIsInferring`** — Lifted from parent; disables Start when running, Stop when not - -## Note - -- `context.uploadedYamlFile.name` passed to API (likely for compatibility); inference config comes from localStorage diff --git a/server_api/chatbot/file_summaries/views/ModelTraining.md b/server_api/chatbot/file_summaries/views/ModelTraining.md deleted file mode 100644 index 5777270..0000000 --- a/server_api/chatbot/file_summaries/views/ModelTraining.md +++ /dev/null @@ -1,19 +0,0 @@ -# client/src/views/ModelTraining.js - -Model training view: Configurator + Start/Stop training buttons. - -## Flow - -1. Configurator (InputSelector, YamlFileUploader, YamlFileEditor) -2. Start Training — Validates uploaded YAML, outputPath, logPath; calls `startModelTraining` -3. Polls `getTrainingStatus` every 2s while training -4. Stop Training — `stopModelTraining` - -## Validation - -- Requires `uploadedYamlFile`, `outputPath`, `logPath` before starting -- Uses `localStorage.trainingConfig` or `context.trainingConfig` - -## Status Display - -Shows training status message (starting, monitoring, completed, error, stopped). diff --git a/server_api/chatbot/file_summaries/views/Monitoring.md b/server_api/chatbot/file_summaries/views/Monitoring.md deleted file mode 100644 index 4ec8b59..0000000 --- a/server_api/chatbot/file_summaries/views/Monitoring.md +++ /dev/null @@ -1,12 +0,0 @@ -# client/src/views/Monitoring.js - -TensorBoard monitoring view. Fetches TensorBoard URL and displays it in an iframe. - -## API - -- `getTensorboardURL()` — Returns URL (default `http://localhost:6006/`) - -## UI - -- Full-width iframe, height 800px -- Fetches URL on mount when not yet set diff --git a/server_api/chatbot/file_summaries/views/ProofReading.md b/server_api/chatbot/file_summaries/views/ProofReading.md deleted file mode 100644 index af3d209..0000000 --- a/server_api/chatbot/file_summaries/views/ProofReading.md +++ /dev/null @@ -1,28 +0,0 @@ -# client/src/views/ProofReading.js - -SynAnno proofreading view: synapse list, Neuroglancer viewer, and proofreading controls. - -## Layout - -- **Left (15%)** — `SynapseList` (progress + clickable list) -- **Center** — `NeuroglancerViewer` (project-based; `/api/synanno/ng-url/:projectId`) -- **Right (15%)** — `ProofreadingControls` - -## API - -- `GET /api/projects/:projectId/synapses` — Fetch synapses -- `GET /api/synanno/ng-url/:projectId` — Neuroglancer URL -- `PUT /api/synapses/:id` — Update synapse (status, pre_neuron_id, post_neuron_id) - -## Keyboard Shortcuts - -- C — Correct -- X — Incorrect -- U — Unsure -- Arrow Right — Next -- Arrow Left — Previous -- S — Save - -## States - -- Loading, empty (no synapses), main layout diff --git a/server_api/chatbot/file_summaries/views/Views.md b/server_api/chatbot/file_summaries/views/Views.md deleted file mode 100644 index add6405..0000000 --- a/server_api/chatbot/file_summaries/views/Views.md +++ /dev/null @@ -1,26 +0,0 @@ -# client/src/views/Views.js - -Main view shell: horizontal menu tabs, content area, and optional AI chat drawer. - -## Tabs - -- File Management, Visualization, Model Training, Model Inference, Tensorboard, SynAnno, Worm Error Handling -- Tabs can be shown/hidden via `WorkflowSelector` (first launch or "Change Views") -- Only visited tabs render content (lazy) - -## Persistence - -- Reads `workflow_preference.json` from `/files` API to restore tab selection -- Saves preferences via POST to `/files/upload` when user selects workflows -- Polls `/health` until API ready before loading preferences - -## IPC (Electron) - -- `toggle-tab` — Show/hide tab by key -- `change-views` — Open `WorkflowSelector` modal - -## Chat Drawer - -- Right-side Drawer with resizable width (280–800px) -- Chatbot button in header; `Chatbot` component inside drawer -- `destroyOnClose` so chat state resets on close diff --git a/server_api/chatbot/file_summaries/views/Visualization.md b/server_api/chatbot/file_summaries/views/Visualization.md deleted file mode 100644 index 82e7d53..0000000 --- a/server_api/chatbot/file_summaries/views/Visualization.md +++ /dev/null @@ -1,25 +0,0 @@ -# client/src/views/Visualization.js - -Neuroglancer visualization view. Users select image and label, set scales, and open Neuroglancer viewers in tabs. - -## Props - -- **`viewers`**, **`setViewers`** — Lifted state; array of `{ key, title, viewer }` (viewer = URL) - -## Inputs - -- Image — `UnifiedFileInput` -- Label — `UnifiedFileInput` -- Scales (z,y,x) — Text input (default "30,6,6") - -## Flow - -1. User selects image/label, clicks "Visualize" -2. `getNeuroglancerViewer(imagePath, labelPath, scalesArray)` → server returns viewer URL -3. URL rewritten to use `localhost` instead of server host -4. New viewer added to `viewers`; tab created -5. Tabs are editable (close); Refresh button per tab - -## Empty State - -"InboxOutlined" icon + "Select an image and click Visualize to get started" diff --git a/server_api/chatbot/file_summaries/views/WormErrorHandling.md b/server_api/chatbot/file_summaries/views/WormErrorHandling.md deleted file mode 100644 index d91c305..0000000 --- a/server_api/chatbot/file_summaries/views/WormErrorHandling.md +++ /dev/null @@ -1,12 +0,0 @@ -# client/src/views/WormErrorHandling.js - -Container for the Worm Error Handling workflow. Wraps `EHTool` and manages session state. - -## State - -- `ehToolSession` — Current session ID (passed to EHTool as `savedSessionId`) -- `refreshTrigger` — Incremented when proofreading starts; forces EHTool refresh - -## Props to EHTool - -- `refreshTrigger`, `savedSessionId`, `onSessionChange`, `onStartProofreading` diff --git a/server_api/chatbot/file_summaries/views/ehtool/ClassificationPanel.md b/server_api/chatbot/file_summaries/views/ehtool/ClassificationPanel.md deleted file mode 100644 index 20626a5..0000000 --- a/server_api/chatbot/file_summaries/views/ehtool/ClassificationPanel.md +++ /dev/null @@ -1,17 +0,0 @@ -# client/src/views/ehtool/ClassificationPanel.js - -Right-side panel for classifying selected layers in the detection workflow. - -## Props - -- **`selectedCount`** — Number of selected layers -- **`onClassify`** — Callback with "correct" | "incorrect" | "unsure" -- **`onSelectAll`** — Select all layers -- **`onClearSelection`** — Clear selection - -## UI - -- Tag: "X layer(s) selected" -- Buttons: Correct (C), Incorrect (X), Unsure (U) — disabled when none selected -- Selection: Select All (Ctrl+A), Clear Selection -- Keyboard shortcuts help section diff --git a/server_api/chatbot/file_summaries/views/ehtool/DatasetLoader.md b/server_api/chatbot/file_summaries/views/ehtool/DatasetLoader.md deleted file mode 100644 index ede5bf8..0000000 --- a/server_api/chatbot/file_summaries/views/ehtool/DatasetLoader.md +++ /dev/null @@ -1,20 +0,0 @@ -# client/src/views/ehtool/DatasetLoader.js - -Form to load a dataset for the Error Handling Tool. Collects project name, dataset path, and optional mask path. - -## Props - -- **`onLoad`** — Callback `(datasetPath, maskPath, projectName)` -- **`loading`** — Disables submit button - -## Form Fields - -- **Project Name** — Required (default "My Project") -- **Dataset Path** — Required; `UnifiedFileInput` (file, directory, or glob) -- **Mask Path** — Optional; `UnifiedFileInput` - -## Supported Formats - -- Single TIFF (2D or 3D) -- Directory of images (PNG, JPG, TIFF) -- Glob pattern (e.g. `*.tif`) diff --git a/server_api/chatbot/file_summaries/views/ehtool/DetectionWorkflow.md b/server_api/chatbot/file_summaries/views/ehtool/DetectionWorkflow.md deleted file mode 100644 index 34c995a..0000000 --- a/server_api/chatbot/file_summaries/views/ehtool/DetectionWorkflow.md +++ /dev/null @@ -1,35 +0,0 @@ -# client/src/views/ehtool/DetectionWorkflow.js - -Main detection workflow for the Error Handling Tool. Load dataset → browse layers → classify → proofread incorrect layers. - -## Props - -- **`sessionId`**, **`setSessionId`** — Session state -- **`refreshTrigger`** — Forces reload when changed - -## Flow - -1. **No session** — Show `DatasetLoader` -2. **With session** — Three-panel layout: - - Left: `ProgressTracker` (stats, progress, "Proofread Incorrect", "Load New Dataset") - - Center: `LayerGrid` (paginated layers; click to inspect, checkbox to select) - - Right: `ClassificationPanel` (Correct/Incorrect/Unsure, Select All, Clear) -3. **UnifiedImageEditor** — Modal for mask editing + classification when layer clicked - -## API - -- `POST /eh/detection/load` — Load dataset; returns session_id, project_name, total_layers -- `GET /eh/detection/layers` — Layers with pagination, include_images -- `GET /eh/detection/stats` — Progress stats -- `POST /eh/detection/classify` — Classify selected layers - -## Keyboard Shortcuts - -- C — Correct (selected layers) -- X — Incorrect -- U — Unsure -- Ctrl+A — Select all - -## Page Size - -12 layers per page (3x4 grid). diff --git a/server_api/chatbot/file_summaries/views/ehtool/LayerGrid.md b/server_api/chatbot/file_summaries/views/ehtool/LayerGrid.md deleted file mode 100644 index bc6a84d..0000000 --- a/server_api/chatbot/file_summaries/views/ehtool/LayerGrid.md +++ /dev/null @@ -1,18 +0,0 @@ -# client/src/views/ehtool/LayerGrid.js - -Grid of layer cards with thumbnails, classification status, selection checkboxes, and pagination. - -## Props - -- **`layers`** — Array of layer objects (id, image_base64, mask_base64, layer_name, layer_index, classification) -- **`selectedLayers`** — Array of selected layer IDs -- **`onLayerSelect`** — Toggle selection (layerId) -- **`onLayerClick`** — Open inspector/editor (layer) -- **`currentPage`**, **`totalPages`**, **`onPageChange`** - -## UI - -- Cards with Badge.Ribbon showing classification (Correct/Incorrect/Unsure/Unreviewed) -- Checkbox overlay for selection (click does not open editor) -- Image + optional mask overlay (opacity 0.5) -- Pagination at bottom (12 per page) diff --git a/server_api/chatbot/file_summaries/views/ehtool/ProgressTracker.md b/server_api/chatbot/file_summaries/views/ehtool/ProgressTracker.md deleted file mode 100644 index ac0715c..0000000 --- a/server_api/chatbot/file_summaries/views/ehtool/ProgressTracker.md +++ /dev/null @@ -1,18 +0,0 @@ -# client/src/views/ehtool/ProgressTracker.js - -Left-side panel showing detection workflow progress and actions. - -## Props - -- **`stats`** — { progress_percent, reviewed, total, correct, incorrect, unsure, error } -- **`projectName`**, **`totalLayers`** -- **`onNewSession`** — Load new dataset -- **`onStartProofreading`** — Start proofreading (opens first incorrect layer) - -## UI - -- Project Info card (name, layer count) -- Progress bar (reviewed/total) -- Classification Summary (Correct, Incorrect, Unsure, Unreviewed counts) -- "Proofread Incorrect Layers" button (when incorrect > 0) -- "Load New Dataset" button diff --git a/server_api/chatbot/file_summaries/views/ehtool/ProofreadingEditor.md b/server_api/chatbot/file_summaries/views/ehtool/ProofreadingEditor.md deleted file mode 100644 index 54afa39..0000000 --- a/server_api/chatbot/file_summaries/views/ehtool/ProofreadingEditor.md +++ /dev/null @@ -1,32 +0,0 @@ -# client/src/views/ehtool/ProofreadingEditor.js - -Canvas-based image editor for mask correction. Paint, erase, pan, zoom, undo/redo. - -## Props - -- **`imageBase64`**, **`maskBase64`** — Image and mask data -- **`onSave`** — Callback with base64 mask data -- **`onNext`**, **`onPrevious`** — Layer navigation (optional) -- **`currentLayer`**, **`totalLayers`**, **`layerName`** - -## Tools - -- **Paint (P)** — Draw white on mask -- **Erase (E)** — Erase from mask -- **Hand (H)** — Pan (or Ctrl+click / middle-click) - -## Features - -- Brush size (1–64) for paint/erase -- Minimap with click-to-jump viewport -- Undo/Redo (Ctrl+Z, Ctrl+Shift+Z, Ctrl+Y) -- Show/Hide mask toggle -- Zoom (wheel, buttons, Reset) -- Custom brush cursor overlay -- Ctrl+S to save - -## Keyboard - -- P, E, H — Switch tools -- A / Arrow Left — Previous layer -- D / Arrow Right — Next layer diff --git a/server_api/chatbot/file_summaries/views/ehtool/UnifiedImageEditor.md b/server_api/chatbot/file_summaries/views/ehtool/UnifiedImageEditor.md deleted file mode 100644 index d14f229..0000000 --- a/server_api/chatbot/file_summaries/views/ehtool/UnifiedImageEditor.md +++ /dev/null @@ -1,24 +0,0 @@ -# client/src/views/ehtool/UnifiedImageEditor.js - -Modal combining mask editing (ProofreadingEditor) with quality classification. Used when user clicks a layer in the grid. - -## Props - -- **`visible`** — Whether modal is open -- **`layer`** — Layer object (image_base64, mask_base64, layer_name, layer_index, id, classification) -- **`sessionId`** — Current session -- **`onClose`** — Close callback -- **`onSaveSuccess`** — Called after save; typically triggers reload of layers/stats - -## Flow - -1. User edits mask in ProofreadingEditor -2. User sets classification (Correct/Incorrect/Unsure) via radio buttons -3. Save (Ctrl+S or button) → POST mask to `/eh/detection/mask`, POST classification to `/eh/detection/classify` -4. onSaveSuccess → close - -## Keyboard - -- C, X, U — Set classification -- Ctrl+S — Save -- Escape — Close diff --git a/server_api/chatbot/update_faiss.py b/server_api/chatbot/update_faiss.py index 825ae42..15b9cab 100644 --- a/server_api/chatbot/update_faiss.py +++ b/server_api/chatbot/update_faiss.py @@ -1,38 +1,82 @@ -""" -How to update faiss_index: - 1. Delete the existing server_api/chatbot/file_summaries directory - 2. Paste the following prompt into Cursor (or any AI IDE, in agent mode): - - Create markdown files summarizing server_api/main.py and each file in client/src/*/** (including files in nested directories), - but don't create markdown files for index.js, utils.js, or any CSS files. - These markdown files will serve as the knowledge base for a RAG chatbot that helps end users navigate the frontend client. - Put these markdown files in a new server_api/chatbot/file_summaries directory. - - 3. Run this script: - python server_api/chatbot/update_faiss.py -""" +# How to update faiss_index: +# 1. Edit the markdown files in server_api/chatbot/file_summaries/ as needed. +# These are end-user-focused guides (one per application page/feature) that +# serve as the knowledge base for the RAG chatbot. +# 2. Run this script: +# python server_api/chatbot/update_faiss.py +# +# You can override the embeddings model and Ollama base URL via: +# - Environment variables: OLLAMA_EMBED_MODEL, OLLAMA_BASE_URL +# - CLI arguments: --model, --base-url +import os +import argparse from pathlib import Path from langchain_core.documents import Document +from langchain_text_splitters import RecursiveCharacterTextSplitter from langchain_community.vectorstores import FAISS from langchain_ollama import OllamaEmbeddings -script_directory = Path(__file__).parent.resolve() -summaries_directory = script_directory / "file_summaries" -faiss_directory = script_directory / "faiss_index" -documents = [] -for md_file in summaries_directory.rglob("*.md"): - summary = md_file.read_text(encoding="utf-8") - relative_path = md_file.relative_to(summaries_directory) - documents.append( - Document( - page_content=summary, - metadata={"source": str(relative_path)}, + +def main(): + # Parse CLI arguments + parser = argparse.ArgumentParser( + description="Update FAISS index for RAG chatbot documentation search" + ) + parser.add_argument( + "--model", + default=None, + help="Ollama embeddings model (default: from OLLAMA_EMBED_MODEL env or 'qwen3-embedding:8b')", + ) + parser.add_argument( + "--base-url", + default=None, + help="Ollama base URL (default: from OLLAMA_BASE_URL env or 'http://localhost:11434')", + ) + args = parser.parse_args() + + # Use same defaults as build_chain() in chatbot.py + embed_model = args.model or os.getenv("OLLAMA_EMBED_MODEL", "qwen3-embedding:8b") + base_url = args.base_url or os.getenv("OLLAMA_BASE_URL", "http://localhost:11434") + + print(f"Using embeddings model: {embed_model}") + print(f"Using Ollama base URL: {base_url}") + + script_directory = Path(__file__).parent.resolve() + summaries_directory = script_directory / "file_summaries" + faiss_directory = script_directory / "faiss_index" + + # Load full documents + documents = [] + for md_file in summaries_directory.rglob("*.md"): + summary = md_file.read_text(encoding="utf-8") + relative_path = md_file.relative_to(summaries_directory) + documents.append( + Document( + page_content=summary, + metadata={"source": str(relative_path)}, + ) ) + + # Split into chunks for better embedding quality + text_splitter = RecursiveCharacterTextSplitter( + chunk_size=1000, + chunk_overlap=200, + add_start_index=True, ) -embeddings = OllamaEmbeddings( - model="mistral:latest", base_url="http://cscigpu08.bc.edu:11434" -) -vectorstore = FAISS.from_documents(documents, embeddings) -faiss_directory.mkdir(parents=True, exist_ok=True) -vectorstore.save_local(str(faiss_directory)) + chunks = text_splitter.split_documents(documents) + print(f"Split {len(documents)} docs into {len(chunks)} chunks") + for c in chunks: + print( + f" - {c.metadata['source']} (start={c.metadata.get('start_index', '?')}, {len(c.page_content)} chars)" + ) + + embeddings = OllamaEmbeddings(model=embed_model, base_url=base_url) + vectorstore = FAISS.from_documents(chunks, embeddings) + faiss_directory.mkdir(parents=True, exist_ok=True) + vectorstore.save_local(str(faiss_directory)) + print(f"FAISS index saved with {vectorstore.index.ntotal} vectors") + + +if __name__ == "__main__": + main() diff --git a/server_api/main.py b/server_api/main.py index 39f69e6..62c3f50 100644 --- a/server_api/main.py +++ b/server_api/main.py @@ -27,22 +27,23 @@ _chatbot_error = exc chain = None -memory = None +_reset_search = None +_chat_history = [] def _ensure_chatbot(): - global chain, memory, _chatbot_error - if chain is not None and memory is not None: + global chain, _reset_search, _chatbot_error + if chain is not None and _reset_search is not None: return True if build_chain is None: return False try: - chain, memory = build_chain() + chain, _reset_search = build_chain() _chatbot_error = None return True except Exception as exc: # pragma: no cover - runtime config issue chain = None - memory = None + _reset_search = None _chatbot_error = exc return False @@ -517,9 +518,16 @@ async def chat_query(req: Request): raise HTTPException(status_code=503, detail=detail) body = await req.json() query = body.get("query") - result = chain.invoke({"messages": [{"role": "user", "content": query}]}) + if not isinstance(query, str) or not query.strip(): + raise HTTPException(status_code=400, detail="Query must be a non-empty string.") + if _reset_search is not None: + _reset_search() + all_messages = _chat_history + [{"role": "user", "content": query}] + result = chain.invoke({"messages": all_messages}) messages = result.get("messages", []) response = messages[-1].content if messages else "No response generated" + _chat_history.append({"role": "user", "content": query}) + _chat_history.append({"role": "assistant", "content": response}) return {"response": response} @@ -530,9 +538,10 @@ async def clear_chat(): if "_chatbot_error" in globals(): detail = f"{detail}: {_chatbot_error}" raise HTTPException(status_code=503, detail=detail) - if memory is not None: - memory.clear() - return {"message": "Chat history cleared"} + if _reset_search is not None: + _reset_search() + _chat_history.clear() + return {"message": "Chat session reset"} @app.get("/chat/status")