diff --git a/.env.example b/.env.example index 391f0c463d..76f11bcd45 100644 --- a/.env.example +++ b/.env.example @@ -178,3 +178,4 @@ DATABASE_NAME=baserow # BASEROW_EMBEDDINGS_API_URL=http://embeddings # BASEROW_ENTERPRISE_ASSISTANT_LLM_MODEL=groq/openai/gpt-oss-120b # Needs GROQ_API_KEY env var set too +# BASEROW_ENTERPRISE_ASSISTANT_LLM_TEMPERATURE=0.3 diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 301cbd89d9..3eeafddd67 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -20,6 +20,7 @@ env: IMAGE_REPO: ${{ github.repository }} CI_IMAGE_TAG_PREFIX: ci- DEVELOP_BRANCH_NAME: develop + REAL_GITHUB_SHA: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} jobs: check-build-and-publish: @@ -77,7 +78,7 @@ jobs: id: image run: | IMAGE_NAME="${{ env.REGISTRY }}/${{ env.IMAGE_REPO }}/backend_dev" - IMAGE_TAG="${{ env.CI_IMAGE_TAG_PREFIX }}${{ github.sha }}" + IMAGE_TAG="${{ env.CI_IMAGE_TAG_PREFIX }}${{ env.REAL_GITHUB_SHA }}" FULL_IMAGE="${IMAGE_NAME}:${IMAGE_TAG}" echo "full=${FULL_IMAGE}" >> $GITHUB_OUTPUT @@ -109,7 +110,7 @@ jobs: cache-to: type=inline labels: | org.opencontainers.image.source=${{ github.server_url }}/${{ github.repository }} - org.opencontainers.image.revision=${{ github.sha }} + org.opencontainers.image.revision=${{ env.REAL_GITHUB_SHA }} org.opencontainers.image.created=${{ github.event.head_commit.timestamp }} build-frontend: @@ -138,7 +139,7 @@ jobs: id: image run: | IMAGE_NAME="${{ env.REGISTRY }}/${{ env.IMAGE_REPO }}/web-frontend_dev" - IMAGE_TAG="${{ env.CI_IMAGE_TAG_PREFIX }}${{ github.sha }}" + IMAGE_TAG="${{ env.CI_IMAGE_TAG_PREFIX }}${{ env.REAL_GITHUB_SHA }}" FULL_IMAGE="${IMAGE_NAME}:${IMAGE_TAG}" echo "full=${FULL_IMAGE}" >> $GITHUB_OUTPUT @@ -170,7 +171,7 @@ jobs: cache-to: type=inline labels: | org.opencontainers.image.source=${{ github.server_url }}/${{ github.repository }} - org.opencontainers.image.revision=${{ github.sha }} + org.opencontainers.image.revision=${{ env.REAL_GITHUB_SHA }} org.opencontainers.image.created=${{ github.event.head_commit.timestamp }} # ========================================================================== @@ -609,6 +610,7 @@ jobs: timeout-minutes: 60 runs-on: ubuntu-latest needs: + - detect-changes - build-backend - build-frontend - backend-lint @@ -910,12 +912,12 @@ jobs: file: backend/Dockerfile push: true tags: | - ${{ env.REGISTRY }}/${{ env.IMAGE_REPO }}/backend:ci-tested-${{ github.sha }} - cache-from: type=registry,ref=${{ env.REGISTRY }}/${{ env.IMAGE_REPO }}/backend_dev:ci-${{ github.sha }} + ${{ env.REGISTRY }}/${{ env.IMAGE_REPO }}/backend:ci-tested-${{ env.REAL_GITHUB_SHA }} + cache-from: type=registry,ref=${{ env.REGISTRY }}/${{ env.IMAGE_REPO }}/backend_dev:ci-${{ env.REAL_GITHUB_SHA }} cache-to: type=inline labels: | org.opencontainers.image.source=${{ github.server_url }}/${{ github.repository }} - org.opencontainers.image.revision=${{ github.sha }} + org.opencontainers.image.revision=${{ env.REAL_GITHUB_SHA }} org.opencontainers.image.title=backend org.opencontainers.image.created=${{ github.event.head_commit.timestamp }} @@ -949,12 +951,12 @@ jobs: file: web-frontend/Dockerfile push: true tags: | - ${{ env.REGISTRY }}/${{ env.IMAGE_REPO }}/web-frontend:ci-tested-${{ github.sha }} - cache-from: type=registry,ref=${{ env.REGISTRY }}/${{ env.IMAGE_REPO }}/web-frontend_dev:ci-${{ github.sha }} + ${{ env.REGISTRY }}/${{ env.IMAGE_REPO }}/web-frontend:ci-tested-${{ env.REAL_GITHUB_SHA }} + cache-from: type=registry,ref=${{ env.REGISTRY }}/${{ env.IMAGE_REPO }}/web-frontend_dev:ci-${{ env.REAL_GITHUB_SHA }} cache-to: type=inline labels: | org.opencontainers.image.source=${{ github.server_url }}/${{ github.repository }} - org.opencontainers.image.revision=${{ github.sha }} + org.opencontainers.image.revision=${{ env.REAL_GITHUB_SHA }} org.opencontainers.image.title=web-frontend org.opencontainers.image.created=${{ github.event.head_commit.timestamp }} @@ -987,14 +989,14 @@ jobs: file: deploy/all-in-one/Dockerfile push: true build-args: | - FROM_BACKEND_IMAGE=${{ env.REGISTRY }}/${{ env.IMAGE_REPO }}/backend:ci-tested-${{ github.sha }} - FROM_WEBFRONTEND_IMAGE=${{ env.REGISTRY }}/${{ env.IMAGE_REPO }}/web-frontend:ci-tested-${{ github.sha }} + FROM_BACKEND_IMAGE=${{ env.REGISTRY }}/${{ env.IMAGE_REPO }}/backend:ci-tested-${{ env.REAL_GITHUB_SHA }} + FROM_WEBFRONTEND_IMAGE=${{ env.REGISTRY }}/${{ env.IMAGE_REPO }}/web-frontend:ci-tested-${{ env.REAL_GITHUB_SHA }} tags: | - ${{ env.REGISTRY }}/${{ env.IMAGE_REPO }}/baserow:ci-tested-${{ github.sha }} + ${{ env.REGISTRY }}/${{ env.IMAGE_REPO }}/baserow:ci-tested-${{ env.REAL_GITHUB_SHA }} cache-to: type=inline labels: | org.opencontainers.image.source=${{ github.server_url }}/${{ github.repository }} - org.opencontainers.image.revision=${{ github.sha }} + org.opencontainers.image.revision=${{ env.REAL_GITHUB_SHA }} org.opencontainers.image.title=baserow org.opencontainers.image.created=${{ github.event.head_commit.timestamp }} @@ -1026,13 +1028,13 @@ jobs: file: deploy/cloudron/Dockerfile push: true build-args: | - FROM_IMAGE=${{ env.REGISTRY }}/${{ env.IMAGE_REPO }}/baserow:ci-tested-${{ github.sha }} + FROM_IMAGE=${{ env.REGISTRY }}/${{ env.IMAGE_REPO }}/baserow:ci-tested-${{ env.REAL_GITHUB_SHA }} tags: | - ${{ env.REGISTRY }}/${{ env.IMAGE_REPO }}/cloudron:ci-tested-${{ github.sha }} + ${{ env.REGISTRY }}/${{ env.IMAGE_REPO }}/cloudron:ci-tested-${{ env.REAL_GITHUB_SHA }} cache-to: type=inline labels: | org.opencontainers.image.source=${{ github.server_url }}/${{ github.repository }} - org.opencontainers.image.revision=${{ github.sha }} + org.opencontainers.image.revision=${{ env.REAL_GITHUB_SHA }} org.opencontainers.image.title=cloudron org.opencontainers.image.created=${{ github.event.head_commit.timestamp }} @@ -1071,7 +1073,7 @@ jobs: - name: Create and push develop-latest image on Docker Hub run: | - SOURCE=${{ env.REGISTRY }}/${{ env.IMAGE_REPO }}/backend:ci-tested-${{ github.sha }} + SOURCE=${{ env.REGISTRY }}/${{ env.IMAGE_REPO }}/backend:ci-tested-${{ env.REAL_GITHUB_SHA }} TARGET=${{ env.RELEASE_DOCKER_REPOSITORY }}/backend:develop-latest echo "Publishing $SOURCE → $TARGET" docker buildx imagetools create -t $TARGET $SOURCE @@ -1111,7 +1113,7 @@ jobs: - name: Create and push develop-latest image on Docker Hub run: | - SOURCE=${{ env.REGISTRY }}/${{ env.IMAGE_REPO }}/web-frontend:ci-tested-${{ github.sha }} + SOURCE=${{ env.REGISTRY }}/${{ env.IMAGE_REPO }}/web-frontend:ci-tested-${{ env.REAL_GITHUB_SHA }} TARGET=${{ env.RELEASE_DOCKER_REPOSITORY }}/web-frontend:develop-latest echo "Publishing $SOURCE → $TARGET" docker buildx imagetools create -t $TARGET $SOURCE @@ -1151,7 +1153,7 @@ jobs: - name: Create and push develop-latest image on Docker Hub run: | - SOURCE=${{ env.REGISTRY }}/${{ env.IMAGE_REPO }}/baserow:ci-tested-${{ github.sha }} + SOURCE=${{ env.REGISTRY }}/${{ env.IMAGE_REPO }}/baserow:ci-tested-${{ env.REAL_GITHUB_SHA }} TARGET=${{ env.RELEASE_DOCKER_REPOSITORY }}/baserow:develop-latest echo "Publishing $SOURCE → $TARGET" docker buildx imagetools create -t $TARGET $SOURCE @@ -1191,7 +1193,7 @@ jobs: - name: Create and push develop-latest image on Docker Hub run: | - SOURCE=${{ env.REGISTRY }}/${{ env.IMAGE_REPO }}/cloudron:ci-tested-${{ github.sha }} + SOURCE=${{ env.REGISTRY }}/${{ env.IMAGE_REPO }}/cloudron:ci-tested-${{ env.REAL_GITHUB_SHA }} TARGET=${{ env.RELEASE_DOCKER_REPOSITORY }}/cloudron:develop-latest echo "Publishing $SOURCE → $TARGET" docker buildx imagetools create -t $TARGET $SOURCE @@ -1224,11 +1226,11 @@ jobs: run: | echo "🧩 Generating updated image references..." - BACKEND_IMAGE="${{ env.REGISTRY }}/${{ env.IMAGE_REPO }}/backend:ci-tested-${{ github.sha }}" - BACKEND_DEV_IMAGE="${{ env.REGISTRY }}/${{ env.IMAGE_REPO }}/backend_dev:ci-${{ github.sha }}" - WEB_IMAGE="${{ env.REGISTRY }}/${{ env.IMAGE_REPO }}/web-frontend:ci-tested-${{ github.sha }}" - WEB_DEV_IMAGE="${{ env.REGISTRY }}/${{ env.IMAGE_REPO }}/web-frontend_dev:ci-${{ github.sha }}" - ALL_IN_ONE_IMAGE="${{ env.REGISTRY }}/${{ env.IMAGE_REPO }}/baserow:ci-tested-${{ github.sha }}" + BACKEND_IMAGE="${{ env.REGISTRY }}/${{ env.IMAGE_REPO }}/backend:ci-tested-${{ env.REAL_GITHUB_SHA }}" + BACKEND_DEV_IMAGE="${{ env.REGISTRY }}/${{ env.IMAGE_REPO }}/backend_dev:ci-${{ env.REAL_GITHUB_SHA }}" + WEB_IMAGE="${{ env.REGISTRY }}/${{ env.IMAGE_REPO }}/web-frontend:ci-tested-${{ env.REAL_GITHUB_SHA }}" + WEB_DEV_IMAGE="${{ env.REGISTRY }}/${{ env.IMAGE_REPO }}/web-frontend_dev:ci-${{ env.REAL_GITHUB_SHA }}" + ALL_IN_ONE_IMAGE="${{ env.REGISTRY }}/${{ env.IMAGE_REPO }}/baserow:ci-tested-${{ env.REAL_GITHUB_SHA }}" echo "$BACKEND_IMAGE" > plugins/saas/backend/build_from_image.version echo "$BACKEND_DEV_IMAGE" > plugins/saas/backend/build_from_dev_image.version diff --git a/.github/workflows/publish-release-images.yml b/.github/workflows/publish-release-images.yml new file mode 100644 index 0000000000..c27980680c --- /dev/null +++ b/.github/workflows/publish-release-images.yml @@ -0,0 +1,318 @@ +name: Publish Release Images + +on: + push: + tags: + - "*" + +env: + REGISTRY: ghcr.io + IMAGE_REPO: ${{ github.repository }} + RELEASE_DOCKER_REGISTRY: ${{ secrets.RELEASE_DOCKER_REGISTRY }} + RELEASE_DOCKER_REPOSITORY: ${{ secrets.RELEASE_DOCKER_REPOSITORY }} + RELEASE_DOCKER_USERNAME: ${{ secrets.RELEASE_DOCKER_USERNAME }} + RELEASE_DOCKER_PASSWORD: ${{ secrets.RELEASE_DOCKER_PASSWORD }} + CI_TESTED_TAG_SUFFIX: ci-tested-${{ github.sha }} + +jobs: + build-backend-arm64-release-image: + name: Build backend arm64 image (GHCR) + runs-on: ubuntu-24.04-arm + permissions: + contents: read + packages: write + steps: + - name: Checkout code at tag + uses: actions/checkout@v4 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Log in to GitHub Container Registry + uses: docker/login-action@v3 + with: + registry: ${{ env.REGISTRY }} + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Build and push backend arm64 image to GHCR + uses: docker/build-push-action@v5 + with: + context: . + file: backend/Dockerfile + push: true + platforms: linux/arm64 + tags: | + ${{ env.REGISTRY }}/${{ env.IMAGE_REPO }}/backend:${{ env.CI_TESTED_TAG_SUFFIX }}-arm64 + cache-from: | + type=registry,ref=${{ env.REGISTRY }}/${{ env.IMAGE_REPO }}/backend:${{ env.CI_TESTED_TAG_SUFFIX }} + cache-to: type=inline + + publish-backend-release-tagged-image: + name: Publish backend:${{ github.ref_name }} (multi-arch) + runs-on: ubuntu-latest + needs: + - build-backend-arm64-release-image + permissions: + contents: read + packages: read + steps: + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Log in to GitHub Container Registry + uses: docker/login-action@v3 + with: + registry: ${{ env.REGISTRY }} + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Log in to Release Docker Registry + uses: docker/login-action@v3 + with: + registry: ${{ env.RELEASE_DOCKER_REGISTRY }} + username: ${{ env.RELEASE_DOCKER_USERNAME }} + password: ${{ env.RELEASE_DOCKER_PASSWORD }} + + - name: Create multi-arch backend:${{ github.ref_name }} in release registry + run: | + AMD64=${{ env.REGISTRY }}/${{ env.IMAGE_REPO }}/backend:${{ env.CI_TESTED_TAG_SUFFIX }} + ARM64=${{ env.REGISTRY }}/${{ env.IMAGE_REPO }}/backend:${{ env.CI_TESTED_TAG_SUFFIX }}-arm64 + TARGET=${{ env.RELEASE_DOCKER_REPOSITORY }}/backend:${{ github.ref_name }} + + echo "Creating multi-arch image:" + echo " $TARGET" + echo "from:" + echo " $AMD64" + echo " $ARM64" + + docker buildx imagetools create -t "$TARGET" "$AMD64" "$ARM64" + + build-webfrontend-arm64-release-image: + name: Build web-frontend arm64 image (GHCR) + runs-on: ubuntu-24.04-arm + permissions: + contents: read + packages: write + steps: + - name: Checkout code at tag + uses: actions/checkout@v4 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Log in to GitHub Container Registry + uses: docker/login-action@v3 + with: + registry: ${{ env.REGISTRY }} + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Build and push web-frontend arm64 image to GHCR + uses: docker/build-push-action@v5 + with: + context: . + file: web-frontend/Dockerfile + push: true + platforms: linux/arm64 + tags: | + ${{ env.REGISTRY }}/${{ env.IMAGE_REPO }}/web-frontend:${{ env.CI_TESTED_TAG_SUFFIX }}-arm64 + cache-from: | + type=registry,ref=${{ env.REGISTRY }}/${{ env.IMAGE_REPO }}/web-frontend:${{ env.CI_TESTED_TAG_SUFFIX }} + cache-to: type=inline + + publish-webfrontend-release-tagged-image: + name: Publish web-frontend:${{ github.ref_name }} (multi-arch) + runs-on: ubuntu-latest + needs: + - build-webfrontend-arm64-release-image + permissions: + contents: read + packages: read + steps: + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Log in to GitHub Container Registry + uses: docker/login-action@v3 + with: + registry: ${{ env.REGISTRY }} + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Log in to Release Docker Registry + uses: docker/login-action@v3 + with: + registry: ${{ env.RELEASE_DOCKER_REGISTRY }} + username: ${{ env.RELEASE_DOCKER_USERNAME }} + password: ${{ env.RELEASE_DOCKER_PASSWORD }} + + - name: Create multi-arch web-frontend:${{ github.ref_name }} in release registry + run: | + AMD64=${{ env.REGISTRY }}/${{ env.IMAGE_REPO }}/web-frontend:${{ env.CI_TESTED_TAG_SUFFIX }} + ARM64=${{ env.REGISTRY }}/${{ env.IMAGE_REPO }}/web-frontend:${{ env.CI_TESTED_TAG_SUFFIX }}-arm64 + TARGET=${{ env.RELEASE_DOCKER_REPOSITORY }}/web-frontend:${{ github.ref_name }} + + echo "Creating multi-arch image:" + echo " $TARGET" + echo "from:" + echo " $AMD64" + echo " $ARM64" + + docker buildx imagetools create --debug -t "$TARGET" "$AMD64" "$ARM64" + + build-all-in-one-arm64-release-image: + name: Build baserow (all-in-one) arm64 image (GHCR) + runs-on: ubuntu-24.04-arm + needs: + - build-backend-arm64-release-image + - build-webfrontend-arm64-release-image + permissions: + contents: read + packages: write + steps: + - name: Checkout code at tag + uses: actions/checkout@v4 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Log in to GitHub Container Registry + uses: docker/login-action@v3 + with: + registry: ${{ env.REGISTRY }} + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Build and push baserow arm64 image to GHCR + uses: docker/build-push-action@v5 + with: + context: . + file: deploy/all-in-one/Dockerfile + push: true + platforms: linux/arm64 + tags: | + ${{ env.REGISTRY }}/${{ env.IMAGE_REPO }}/baserow:${{ env.CI_TESTED_TAG_SUFFIX }}-arm64 + build-args: | + FROM_BACKEND_IMAGE=${{ env.REGISTRY }}/${{ env.IMAGE_REPO }}/backend:${{ env.CI_TESTED_TAG_SUFFIX }}-arm64 + FROM_WEBFRONTEND_IMAGE=${{ env.REGISTRY }}/${{ env.IMAGE_REPO }}/web-frontend:${{ env.CI_TESTED_TAG_SUFFIX }}-arm64 + cache-from: | + type=registry,ref=${{ env.REGISTRY }}/${{ env.IMAGE_REPO }}/baserow:${{ env.CI_TESTED_TAG_SUFFIX }} + cache-to: type=inline + + publish-all-in-one-release-tagged-image: + name: Publish baserow:${{ github.ref_name }} (multi-arch) + runs-on: ubuntu-latest + needs: + - build-all-in-one-arm64-release-image + permissions: + contents: read + packages: read + steps: + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Log in to GitHub Container Registry + uses: docker/login-action@v3 + with: + registry: ${{ env.REGISTRY }} + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Log in to Release Docker Registry + uses: docker/login-action@v3 + with: + registry: ${{ env.RELEASE_DOCKER_REGISTRY }} + username: ${{ env.RELEASE_DOCKER_USERNAME }} + password: ${{ env.RELEASE_DOCKER_PASSWORD }} + + - name: Create multi-arch baserow:${{ github.ref_name }} in release registry + run: | + AMD64=${{ env.REGISTRY }}/${{ env.IMAGE_REPO }}/baserow:${{ env.CI_TESTED_TAG_SUFFIX }} + ARM64=${{ env.REGISTRY }}/${{ env.IMAGE_REPO }}/baserow:${{ env.CI_TESTED_TAG_SUFFIX }}-arm64 + TARGET=${{ env.RELEASE_DOCKER_REPOSITORY }}/baserow:${{ github.ref_name }} + + echo "Creating multi-arch image:" + echo " $TARGET" + echo "from:" + echo " $AMD64" + echo " $ARM64" + + docker buildx imagetools create -t "$TARGET" "$AMD64" "$ARM64" + + build-cloudron-arm64-release-image: + name: Build cloudron arm64 image (GHCR) + runs-on: ubuntu-24.04-arm + needs: + - build-all-in-one-arm64-release-image + permissions: + contents: read + packages: write + steps: + - name: Checkout code at tag + uses: actions/checkout@v4 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Log in to GitHub Container Registry + uses: docker/login-action@v3 + with: + registry: ${{ env.REGISTRY }} + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Build and push cloudron arm64 image to GHCR + uses: docker/build-push-action@v5 + with: + context: . + file: deploy/cloudron/Dockerfile + push: true + platforms: linux/arm64 + tags: | + ${{ env.REGISTRY }}/${{ env.IMAGE_REPO }}/cloudron:${{ env.CI_TESTED_TAG_SUFFIX }}-arm64 + build-args: | + FROM_IMAGE=${{ env.REGISTRY }}/${{ env.IMAGE_REPO }}/baserow:${{ env.CI_TESTED_TAG_SUFFIX }}-arm64 + cache-from: | + type=registry,ref=${{ env.REGISTRY }}/${{ env.IMAGE_REPO }}/cloudron:${{ env.CI_TESTED_TAG_SUFFIX }} + cache-to: type=inline + + publish-cloudron-release-tagged-image: + name: Publish cloudron:${{ github.ref_name }} (multi-arch) + runs-on: ubuntu-latest + needs: + - build-cloudron-arm64-release-image + permissions: + contents: read + packages: read + steps: + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Log in to GitHub Container Registry + uses: docker/login-action@v3 + with: + registry: ${{ env.REGISTRY }} + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Log in to Release Docker Registry + uses: docker/login-action@v3 + with: + registry: ${{ env.RELEASE_DOCKER_REGISTRY }} + username: ${{ env.RELEASE_DOCKER_USERNAME }} + password: ${{ env.RELEASE_DOCKER_PASSWORD }} + + - name: Create multi-arch cloudron:${{ github.ref_name }} in release registry + run: | + AMD64=${{ env.REGISTRY }}/${{ env.IMAGE_REPO }}/cloudron:${{ env.CI_TESTED_TAG_SUFFIX }} + ARM64=${{ env.REGISTRY }}/${{ env.IMAGE_REPO }}/cloudron:${{ env.CI_TESTED_TAG_SUFFIX }}-arm64 + TARGET=${{ env.RELEASE_DOCKER_REPOSITORY }}/cloudron:${{ github.ref_name }} + + echo "Creating multi-arch image:" + echo " $TARGET" + echo "from:" + echo " $AMD64" + echo " $ARM64" + + docker buildx imagetools create -t "$TARGET" "$AMD64" "$ARM64" diff --git a/backend/Dockerfile b/backend/Dockerfile index cba144ada6..a1aed0b6fb 100644 --- a/backend/Dockerfile +++ b/backend/Dockerfile @@ -70,7 +70,10 @@ ENV PIP_CACHE_DIR=/tmp/baserow_pip_cache # hadolint ignore=SC1091,DL3042,DL3013 RUN --mount=type=cache,mode=777,target=$PIP_CACHE_DIR,uid=$UID,gid=$GID . /baserow/venv/bin/activate \ && pip3 install --no-cache-dir --upgrade pip setuptools==78.1.1 \ - && pip3 install -r /baserow/requirements/base.txt + && pip3 install --no-compile -r /baserow/requirements/base.txt \ + && find /baserow/venv -type d -name "__pycache__" -prune -exec rm -rf '{}' + \ + && find /baserow/venv -type f \( -name "*.pyc" -o -name "*.pyo" \) -delete \ + && find /baserow/venv -type f \( -name "*.c" -o -name "*.h" \) -delete # Build a dev_deps stage which also has the dev dependencies for use by the dev layer. FROM base AS dev_deps diff --git a/backend/requirements/base.in b/backend/requirements/base.in index 356bcc0f99..8ae549aae6 100644 --- a/backend/requirements/base.in +++ b/backend/requirements/base.in @@ -88,4 +88,4 @@ httpcore==1.0.9 # Pinned to address vulnerability. genson==1.3.0 pyotp==2.9.0 qrcode==8.2 -udspy==0.1.6 +udspy==0.1.7 diff --git a/backend/requirements/base.txt b/backend/requirements/base.txt index 53b51bb15a..bb595f63e4 100644 --- a/backend/requirements/base.txt +++ b/backend/requirements/base.txt @@ -675,7 +675,7 @@ tzdata==2025.2 # -r base.in # django-celery-beat # kombu -udspy==0.1.6 +udspy==0.1.7 # via -r base.in unicodecsv==0.14.1 # via -r base.in diff --git a/backend/src/baserow/api/search/urls.py b/backend/src/baserow/api/search/urls.py index a218b71064..cb24d54b1f 100644 --- a/backend/src/baserow/api/search/urls.py +++ b/backend/src/baserow/api/search/urls.py @@ -1,17 +1,13 @@ from django.urls import path from baserow.api.search.views import WorkspaceSearchView -from baserow.core.feature_flags import FF_WORKSPACE_SEARCH, feature_flag_is_enabled app_name = "baserow.api.search" -urlpatterns = [] - -if feature_flag_is_enabled(FF_WORKSPACE_SEARCH): - urlpatterns = [ - path( - "workspace//", - WorkspaceSearchView.as_view(), - name="workspace_search", - ), - ] +urlpatterns = [ + path( + "workspace//", + WorkspaceSearchView.as_view(), + name="workspace_search", + ), +] diff --git a/backend/src/baserow/contrib/builder/locale/en/LC_MESSAGES/django.po b/backend/src/baserow/contrib/builder/locale/en/LC_MESSAGES/django.po index 7a00f027a4..717d594c98 100644 --- a/backend/src/baserow/contrib/builder/locale/en/LC_MESSAGES/django.po +++ b/backend/src/baserow/contrib/builder/locale/en/LC_MESSAGES/django.po @@ -8,7 +8,7 @@ msgid "" msgstr "" "Project-Id-Version: PACKAGE VERSION\n" "Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2025-08-06 12:24+0000\n" +"POT-Creation-Date: 2025-11-17 15:17+0000\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" @@ -46,7 +46,7 @@ msgstr "" msgid "Last name" msgstr "" -#: src/baserow/contrib/builder/data_providers/data_provider_types.py:605 +#: src/baserow/contrib/builder/data_providers/data_provider_types.py:619 #, python-format msgid "%(user_source_name)s member" msgstr "" diff --git a/backend/src/baserow/contrib/database/api/field_rules/urls.py b/backend/src/baserow/contrib/database/api/field_rules/urls.py index 06651968b4..d8ed5f2b8f 100644 --- a/backend/src/baserow/contrib/database/api/field_rules/urls.py +++ b/backend/src/baserow/contrib/database/api/field_rules/urls.py @@ -1,24 +1,20 @@ from django.urls import re_path -from baserow.core.feature_flags import FF_DATE_DEPENDENCY, feature_flag_is_enabled +from .views import FieldRulesView, FieldRuleView, InvalidRowsView app_name = "baserow.contrib.database.api.field_rules" -urlpatterns = [] -if feature_flag_is_enabled(FF_DATE_DEPENDENCY): - from .views import FieldRulesView, FieldRuleView, InvalidRowsView - - urlpatterns += [ - re_path(r"^(?P[0-9]+)/$", FieldRulesView.as_view(), name="list"), - re_path( - r"^(?P[0-9]+)/rule/(?P[0-9]+)/$", - FieldRuleView.as_view(), - name="item", - ), - re_path( - r"^(?P[0-9]+)/invalid-rows/$", - InvalidRowsView.as_view(), - name="invalid_rows", - ), - ] +urlpatterns = [ + re_path(r"^(?P[0-9]+)/$", FieldRulesView.as_view(), name="list"), + re_path( + r"^(?P[0-9]+)/rule/(?P[0-9]+)/$", + FieldRuleView.as_view(), + name="item", + ), + re_path( + r"^(?P[0-9]+)/invalid-rows/$", + InvalidRowsView.as_view(), + name="invalid_rows", + ), +] diff --git a/backend/src/baserow/contrib/database/field_rules/handlers.py b/backend/src/baserow/contrib/database/field_rules/handlers.py index 2fa5e9f10f..1ef1917454 100644 --- a/backend/src/baserow/contrib/database/field_rules/handlers.py +++ b/backend/src/baserow/contrib/database/field_rules/handlers.py @@ -15,7 +15,6 @@ from baserow.contrib.database.table.cache import clear_generated_model_cache from baserow.contrib.database.table.models import GeneratedTableModel, Table from baserow.core.db import specific_iterator -from baserow.core.feature_flags import FF_DATE_DEPENDENCY, feature_flag_is_enabled from .collector import FieldRuleCollector from .exceptions import FieldRuleTableMismatch, NoRuleError @@ -50,8 +49,6 @@ def has_field_rules(self) -> bool: Returns `True` if the table contains active field rules. """ - if not feature_flag_is_enabled(FF_DATE_DEPENDENCY): - return False if not self.table.field_rules_validity_column_added: return False return bool(self.applicable_rules_with_types) diff --git a/backend/src/baserow/core/feature_flags.py b/backend/src/baserow/core/feature_flags.py index 06a6918236..3650627218 100644 --- a/backend/src/baserow/core/feature_flags.py +++ b/backend/src/baserow/core/feature_flags.py @@ -2,9 +2,6 @@ from baserow.core.exceptions import FeatureDisabledException -FF_ASSISTANT = "assistant" -FF_WORKSPACE_SEARCH = "workspace-search" -FF_DATE_DEPENDENCY = "date_dependency" FF_ENABLE_ALL = "*" diff --git a/backend/src/baserow/core/locale/en/LC_MESSAGES/django.po b/backend/src/baserow/core/locale/en/LC_MESSAGES/django.po index 8c5f145c01..3c42d63ebd 100644 --- a/backend/src/baserow/core/locale/en/LC_MESSAGES/django.po +++ b/backend/src/baserow/core/locale/en/LC_MESSAGES/django.po @@ -8,7 +8,7 @@ msgid "" msgstr "" "Project-Id-Version: PACKAGE VERSION\n" "Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2025-10-13 19:58+0000\n" +"POT-Creation-Date: 2025-11-17 15:17+0000\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" @@ -468,6 +468,27 @@ msgid "" "Item of type \"%(item_type)s\" (%(item_id)s) has been restored from trash" msgstr "" +#: src/baserow/core/two_factor_auth/actions.py:19 +msgid "Configure two-factor authentication" +msgstr "" + +#: src/baserow/core/two_factor_auth/actions.py:21 +#, python-format +msgid "" +"User \"%(user_email)s\" (%(user_id)s) configured %(provider_type)s (enabled " +"%(is_enabled)s) two-factor authentication." +msgstr "" + +#: src/baserow/core/two_factor_auth/actions.py:71 +msgid "Disable two-factor authentication" +msgstr "" + +#: src/baserow/core/two_factor_auth/actions.py:72 +#, python-format +msgid "" +"User \"%(user_email)s\" (%(user_id)s) disabled two-factor authentication." +msgstr "" + #: src/baserow/core/user/actions.py:25 msgid "Create User" msgstr "" diff --git a/backend/src/baserow/locale/en/LC_MESSAGES/django.po b/backend/src/baserow/locale/en/LC_MESSAGES/django.po index 72129cf0b9..67b9120a1e 100755 --- a/backend/src/baserow/locale/en/LC_MESSAGES/django.po +++ b/backend/src/baserow/locale/en/LC_MESSAGES/django.po @@ -8,7 +8,7 @@ msgid "" msgstr "" "Project-Id-Version: PACKAGE VERSION\n" "Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2025-10-13 19:58+0000\n" +"POT-Creation-Date: 2025-11-17 15:17+0000\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" @@ -54,71 +54,63 @@ msgstr "" msgid "Local Baserow" msgstr "" -#: src/baserow/contrib/automation/nodes/actions.py:32 +#: src/baserow/contrib/automation/nodes/actions.py:25 msgid "Create automation node" msgstr "" -#: src/baserow/contrib/automation/nodes/actions.py:33 +#: src/baserow/contrib/automation/nodes/actions.py:26 #, python-format msgid "Node (%(node_id)s) created" msgstr "" -#: src/baserow/contrib/automation/nodes/actions.py:104 +#: src/baserow/contrib/automation/nodes/actions.py:90 msgid "Update automation node" msgstr "" -#: src/baserow/contrib/automation/nodes/actions.py:105 +#: src/baserow/contrib/automation/nodes/actions.py:91 #, python-format msgid "Node (%(node_id)s) updated" msgstr "" -#: src/baserow/contrib/automation/nodes/actions.py:173 +#: src/baserow/contrib/automation/nodes/actions.py:159 msgid "Delete automation node" msgstr "" -#: src/baserow/contrib/automation/nodes/actions.py:174 +#: src/baserow/contrib/automation/nodes/actions.py:160 #, python-format msgid "Node (%(node_id)s) deleted" msgstr "" -#: src/baserow/contrib/automation/nodes/actions.py:231 -msgid "Order nodes" -msgstr "" - -#: src/baserow/contrib/automation/nodes/actions.py:232 -msgid "Node order changed" -msgstr "" - -#: src/baserow/contrib/automation/nodes/actions.py:300 +#: src/baserow/contrib/automation/nodes/actions.py:217 msgid "Duplicate automation node" msgstr "" -#: src/baserow/contrib/automation/nodes/actions.py:301 +#: src/baserow/contrib/automation/nodes/actions.py:218 #, python-format msgid "Node (%(node_id)s) duplicated" msgstr "" -#: src/baserow/contrib/automation/nodes/actions.py:384 +#: src/baserow/contrib/automation/nodes/actions.py:289 msgid "Replace automation node" msgstr "" -#: src/baserow/contrib/automation/nodes/actions.py:386 +#: src/baserow/contrib/automation/nodes/actions.py:291 #, python-format msgid "" "Node (%(node_id)s) changed from a type of %(original_node_type)s to " "%(node_type)s" msgstr "" -#: src/baserow/contrib/automation/nodes/actions.py:491 +#: src/baserow/contrib/automation/nodes/actions.py:377 msgid "Moved automation node" msgstr "" -#: src/baserow/contrib/automation/nodes/actions.py:492 +#: src/baserow/contrib/automation/nodes/actions.py:378 #, python-format msgid "Node (%(node_id)s) moved" msgstr "" -#: src/baserow/contrib/automation/nodes/node_types.py:176 +#: src/baserow/contrib/automation/nodes/node_types.py:260 msgid "Branch" msgstr "" @@ -213,18 +205,22 @@ msgstr "" msgid "Widget \"%(widget_title)s\" (%(widget_id)s) deleted" msgstr "" -#: src/baserow/contrib/integrations/core/service_types.py:1103 +#: src/baserow/contrib/integrations/core/service_types.py:1067 msgid "Branch taken" msgstr "" -#: src/baserow/contrib/integrations/core/service_types.py:1108 +#: src/baserow/contrib/integrations/core/service_types.py:1072 msgid "Label" msgstr "" -#: src/baserow/contrib/integrations/core/service_types.py:1110 +#: src/baserow/contrib/integrations/core/service_types.py:1074 msgid "The label of the branch that matched the condition." msgstr "" -#: src/baserow/contrib/integrations/core/service_types.py:1402 +#: src/baserow/contrib/integrations/core/service_types.py:1418 msgid "Triggered at" msgstr "" + +#: src/baserow/contrib/integrations/slack/service_types.py:166 +msgid "OK" +msgstr "" diff --git a/changelog/entries/unreleased/bug/4268_fix_synchronised_button_loading_state_in_tables.json b/changelog/entries/unreleased/bug/4268_fix_synchronised_button_loading_state_in_tables.json new file mode 100644 index 0000000000..865bb14a03 --- /dev/null +++ b/changelog/entries/unreleased/bug/4268_fix_synchronised_button_loading_state_in_tables.json @@ -0,0 +1,9 @@ +{ + "type": "bug", + "message": "Fix synchronised button loading state in tables", + "issue_origin": "github", + "issue_number": 4268, + "domain": "builder", + "bullet_points": [], + "created_at": "2025-11-17" +} \ No newline at end of file diff --git a/changelog/entries/unreleased/feature/3826_workspace_search.json b/changelog/entries/unreleased/feature/3826_workspace_search.json new file mode 100644 index 0000000000..b9d7cb3e8b --- /dev/null +++ b/changelog/entries/unreleased/feature/3826_workspace_search.json @@ -0,0 +1,9 @@ +{ + "type": "feature", + "message": "Workspace search", + "issue_origin": "github", + "issue_number": 3826, + "domain": "database", + "bullet_points": [], + "created_at": "2025-11-17" +} \ No newline at end of file diff --git a/changelog/entries/unreleased/feature/3829_date_dependencies_in_table.json b/changelog/entries/unreleased/feature/3829_date_dependencies_in_table.json new file mode 100644 index 0000000000..bf0b3f2a11 --- /dev/null +++ b/changelog/entries/unreleased/feature/3829_date_dependencies_in_table.json @@ -0,0 +1,9 @@ +{ + "type": "feature", + "message": "Date dependencies in table", + "issue_origin": "github", + "issue_number": 3829, + "domain": "database", + "bullet_points": [], + "created_at": "2025-11-17" +} \ No newline at end of file diff --git a/changelog/entries/unreleased/feature/introduce_kuma_baserow_ai_assistant.json b/changelog/entries/unreleased/feature/introduce_kuma_baserow_ai_assistant.json new file mode 100644 index 0000000000..c8a9ddc084 --- /dev/null +++ b/changelog/entries/unreleased/feature/introduce_kuma_baserow_ai_assistant.json @@ -0,0 +1,9 @@ +{ + "type": "feature", + "message": "Introduced Kuma, an AI-powered assistant to help you manage your workspace.", + "issue_origin": "github", + "issue_number": 3676, + "domain": "core", + "bullet_points": [], + "created_at": "2025-11-17" +} diff --git a/docker-compose.local-build.yml b/docker-compose.local-build.yml index 08c1325cb4..f72d660393 100644 --- a/docker-compose.local-build.yml +++ b/docker-compose.local-build.yml @@ -167,6 +167,7 @@ x-backend-variables: BASEROW_PREMIUM_GROUPED_AGGREGATE_SERVICE_MAX_SERIES: BASEROW_PREMIUM_GROUPED_AGGREGATE_SERVICE_MAX_AGG_BUCKETS: BASEROW_ENTERPRISE_ASSISTANT_LLM_MODEL: + BASEROW_ENTERPRISE_ASSISTANT_LLM_TEMPERATURE: BASEROW_EMBEDDINGS_API_URL: services: diff --git a/docker-compose.no-caddy.yml b/docker-compose.no-caddy.yml index 85a8bdda70..6ee987c896 100644 --- a/docker-compose.no-caddy.yml +++ b/docker-compose.no-caddy.yml @@ -185,6 +185,7 @@ x-backend-variables: BASEROW_PREMIUM_GROUPED_AGGREGATE_SERVICE_MAX_SERIES: BASEROW_PREMIUM_GROUPED_AGGREGATE_SERVICE_MAX_AGG_BUCKETS: BASEROW_ENTERPRISE_ASSISTANT_LLM_MODEL: + BASEROW_ENTERPRISE_ASSISTANT_LLM_TEMPERATURE: BASEROW_EMBEDDINGS_API_URL: services: diff --git a/docker-compose.yml b/docker-compose.yml index 2006a8d005..34c4374a9a 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -234,6 +234,7 @@ x-backend-variables: BASEROW_PREMIUM_GROUPED_AGGREGATE_SERVICE_MAX_SERIES: BASEROW_PREMIUM_GROUPED_AGGREGATE_SERVICE_MAX_AGG_BUCKETS: BASEROW_ENTERPRISE_ASSISTANT_LLM_MODEL: + BASEROW_ENTERPRISE_ASSISTANT_LLM_TEMPERATURE: BASEROW_EMBEDDINGS_API_URL: BASEROW_OAUTH_BACKEND_URL: BASEROW_TOTP_ISSUER_NAME: diff --git a/docs/installation/ai-assistant.md b/docs/installation/ai-assistant.md index 9a24ce89a3..e5c7be4676 100644 --- a/docs/installation/ai-assistant.md +++ b/docs/installation/ai-assistant.md @@ -21,8 +21,17 @@ Set the model you want, restart Baserow, and let migrations run. # Required BASEROW_ENTERPRISE_ASSISTANT_LLM_MODEL=openai/gpt-5-mini OPENAI_API_KEY=your_api_key + +# Optional - adjust LLM temperature (default: 0) +BASEROW_ENTERPRISE_ASSISTANT_LLM_TEMPERATURE=0 ``` +**About temperature:** +- Controls randomness in LLM responses (0.0 to 2.0) +- **Default: 0** (deterministic, consistent responses - recommended for production) +- Higher values (e.g., 0.7-1.0) = more creative/varied responses +- Lower values (e.g., 0-0.3) = more focused/consistent responses + ## 3) Provider presets Choose **one** provider block and set its variables. diff --git a/enterprise/backend/src/baserow_enterprise/api/assistant/views.py b/enterprise/backend/src/baserow_enterprise/api/assistant/views.py index e1b50cee76..1987e6f224 100644 --- a/enterprise/backend/src/baserow_enterprise/api/assistant/views.py +++ b/enterprise/backend/src/baserow_enterprise/api/assistant/views.py @@ -22,9 +22,11 @@ from baserow.api.serializers import get_example_pagination_serializer_class from baserow.api.sessions import set_client_undo_redo_action_group_id from baserow.core.exceptions import UserNotInWorkspace, WorkspaceDoesNotExist -from baserow.core.feature_flags import FF_ASSISTANT, feature_flag_is_enabled from baserow.core.handler import CoreHandler -from baserow_enterprise.assistant.assistant import set_assistant_cancellation_key +from baserow_enterprise.assistant.assistant import ( + check_lm_ready_or_raise, + set_assistant_cancellation_key, +) from baserow_enterprise.assistant.exceptions import ( AssistantChatDoesNotExist, AssistantChatMessagePredictionDoesNotExist, @@ -99,8 +101,6 @@ class AssistantChatsView(APIView): } ) def get(self, request: Request, query_params) -> Response: - feature_flag_is_enabled(FF_ASSISTANT, raise_if_disabled=True) - workspace_id = query_params["workspace_id"] workspace = CoreHandler().get_workspace(workspace_id) @@ -147,8 +147,6 @@ class AssistantChatView(APIView): } ) def post(self, request: Request, chat_uuid: str, data) -> StreamingHttpResponse: - feature_flag_is_enabled(FF_ASSISTANT, raise_if_disabled=True) - ui_context = UIContext.from_validate_request(request, data["ui_context"]) workspace_id = ui_context.workspace.id workspace = CoreHandler().get_workspace(workspace_id) @@ -159,6 +157,7 @@ def post(self, request: Request, chat_uuid: str, data) -> StreamingHttpResponse: context=workspace, ) + check_lm_ready_or_raise() handler = AssistantHandler() chat, _ = handler.get_or_create_chat(request.user, workspace, chat_uuid) @@ -174,7 +173,6 @@ def post(self, request: Request, chat_uuid: str, data) -> StreamingHttpResponse: chat.user.profile.timezone = ui_context.timezone assistant = handler.get_assistant(chat) - assistant.check_llm_ready_or_raise() human_message = HumanMessage(content=data["content"], ui_context=ui_context) async def stream_assistant_messages(): @@ -226,8 +224,6 @@ def _stream_assistant_message(self, message: AssistantMessageUnion) -> str: } ) def get(self, request: Request, chat_uuid: str) -> Response: - feature_flag_is_enabled(FF_ASSISTANT, raise_if_disabled=True) - handler = AssistantHandler() chat = handler.get_chat(request.user, chat_uuid) @@ -265,8 +261,6 @@ def get(self, request: Request, chat_uuid: str) -> Response: } ) def delete(self, request: Request, chat_uuid: str) -> Response: - feature_flag_is_enabled(FF_ASSISTANT, raise_if_disabled=True) - handler = AssistantHandler() chat = handler.get_chat(request.user, chat_uuid) @@ -307,8 +301,6 @@ class AssistantChatMessageFeedbackView(APIView): } ) def put(self, request: Request, message_id: int, data) -> Response: - feature_flag_is_enabled(FF_ASSISTANT, raise_if_disabled=True) - handler = AssistantHandler() message = handler.get_chat_message_by_id(request.user, message_id) try: diff --git a/enterprise/backend/src/baserow_enterprise/apps.py b/enterprise/backend/src/baserow_enterprise/apps.py index 38944c16e2..3541f6b026 100755 --- a/enterprise/backend/src/baserow_enterprise/apps.py +++ b/enterprise/backend/src/baserow_enterprise/apps.py @@ -312,13 +312,7 @@ def ready(self): from baserow_enterprise.assistant.tools import ( CreateBuildersToolType, - CreateFieldsToolType, - CreateTablesToolType, - CreateViewFiltersToolType, - CreateViewsToolType, - CreateWorkflowsToolType, GenerateDatabaseFormulaToolType, - GetRowsToolsToolType, GetTablesSchemaToolType, ListBuildersToolType, ListRowsToolType, @@ -326,7 +320,11 @@ def ready(self): ListViewsToolType, ListWorkflowsToolType, NavigationToolType, + RowsToolFactoryToolType, SearchDocsToolType, + TableAndFieldsToolFactoryToolType, + ViewsToolFactoryToolType, + WorkflowToolFactoryToolType, ) from baserow_enterprise.assistant.tools.registries import ( assistant_tool_registry, @@ -338,18 +336,16 @@ def ready(self): assistant_tool_registry.register(ListBuildersToolType()) assistant_tool_registry.register(CreateBuildersToolType()) assistant_tool_registry.register(ListTablesToolType()) - assistant_tool_registry.register(CreateTablesToolType()) assistant_tool_registry.register(GetTablesSchemaToolType()) - assistant_tool_registry.register(CreateFieldsToolType()) + assistant_tool_registry.register(TableAndFieldsToolFactoryToolType()) assistant_tool_registry.register(GenerateDatabaseFormulaToolType()) assistant_tool_registry.register(ListRowsToolType()) - assistant_tool_registry.register(GetRowsToolsToolType()) + assistant_tool_registry.register(RowsToolFactoryToolType()) assistant_tool_registry.register(ListViewsToolType()) - assistant_tool_registry.register(CreateViewsToolType()) - assistant_tool_registry.register(CreateViewFiltersToolType()) + assistant_tool_registry.register(ViewsToolFactoryToolType()) assistant_tool_registry.register(ListWorkflowsToolType()) - assistant_tool_registry.register(CreateWorkflowsToolType()) + assistant_tool_registry.register(WorkflowToolFactoryToolType()) # The signals must always be imported last because they use the registries # which need to be filled first. @@ -426,7 +422,7 @@ def sync_assistant_knowledge_base(sender, **kwargs): from baserow_enterprise.assistant.tasks import ( sync_assistant_knowledge_base as sync_assistant_knowledge_base_task, ) - from baserow_enterprise.assistant.tools.search_docs.handler import ( + from baserow_enterprise.assistant.tools.search_user_docs.handler import ( KnowledgeBaseHandler, ) diff --git a/enterprise/backend/src/baserow_enterprise/assistant/assistant.py b/enterprise/backend/src/baserow_enterprise/assistant/assistant.py index 15857e53a2..3b3bb99b5c 100644 --- a/enterprise/backend/src/baserow_enterprise/assistant/assistant.py +++ b/enterprise/backend/src/baserow_enterprise/assistant/assistant.py @@ -1,10 +1,11 @@ from dataclasses import dataclass from functools import lru_cache -from typing import Any, AsyncGenerator, Callable, TypedDict +from typing import Any, AsyncGenerator, Callable, Tuple, TypedDict from django.conf import settings from django.core.cache import cache from django.utils import translation +from django.utils.translation import gettext as _ import udspy from udspy.callback import BaseCallback @@ -19,7 +20,7 @@ from baserow_enterprise.assistant.tools.registries import assistant_tool_registry from .models import AssistantChat, AssistantChatMessage, AssistantChatPrediction -from .prompts import ASSISTANT_SYSTEM_PROMPT +from .signatures import ChatSignature, RequestRouter from .types import ( AiMessage, AiMessageChunk, @@ -118,47 +119,6 @@ def on_tool_end( self.extend_sources(outputs["sources"]) -class ChatSignature(udspy.Signature): - __doc__ = f"{ASSISTANT_SYSTEM_PROMPT}\n TASK INSTRUCTIONS: \n" - - question: str = udspy.InputField() - context: str = udspy.InputField( - description="Context and facts extracted from the history to help answer the question." - ) - ui_context: dict[str, Any] | None = udspy.InputField( - default=None, - description=( - "The context the user is currently in. " - "It contains information about the user, the workspace, open table, view, etc." - "Whenever make sense, use it to ground your answer." - ), - ) - answer: str = udspy.OutputField() - - -class QuestionContextSummarizationSignature(udspy.Signature): - """ - Extract relevant facts from conversation history that provide context for answering - the current question. Do not answer the question or modify it - only extract and - summarize the relevant historical facts that will help in decision-making. - """ - - question: str = udspy.InputField( - description="The current user question that needs context from history." - ) - previous_messages: list[str] = udspy.InputField( - description="Conversation history as alternating user/assistant messages." - ) - facts: str = udspy.OutputField( - description=( - "Relevant facts extracted from the conversation history as a concise " - "paragraph. Include only information that provides necessary context for " - "answering the question. Do not answer the question itself, do not modify " - "the question, and do not include irrelevant details." - ) - ) - - def get_assistant_cancellation_key(chat_uuid: str) -> str: """ Get the Redis cache key for cancellation tracking. @@ -182,27 +142,69 @@ def set_assistant_cancellation_key(chat_uuid: str, timeout: int = 300) -> None: cache.set(cache_key, True, timeout=timeout) +def get_lm_client( + model: str | None = None, +) -> "Assistant": + """ + Returns a udspy.LM client configured with the specified model or the default model. + + :param model: The language model to use. If None, the default model from settings + will be used. + :return: A udspy.LM instance. + """ + + return udspy.LM(model=model or settings.BASEROW_ENTERPRISE_ASSISTANT_LLM_MODEL) + + +@lru_cache(maxsize=1) +def check_lm_ready_or_raise() -> None: + """ + Checks if the configured LLM is ready by making a test call. Raises + AssistantModelNotSupportedError if the model is not supported or accessible. + """ + + lm = get_lm_client() + try: + lm("Respond in JSON: {'response': 'ok'}") + except Exception as e: + raise AssistantModelNotSupportedError( + f"The model '{lm.model}' is not supported or accessible: {e}" + ) + + class Assistant: def __init__(self, chat: AssistantChat): self._chat = chat self._user = chat.user self._workspace = chat.workspace - self._init_lm_client() + self._lm_client = get_lm_client() self._init_assistant() - def _init_lm_client(self): - lm_model = settings.BASEROW_ENTERPRISE_ASSISTANT_LLM_MODEL - self._lm_client = udspy.LM(model=lm_model) - def _init_assistant(self): + self.history = None self.tool_helpers = self.get_tool_helpers() - tools = assistant_tool_registry.list_all_usable_tools( - self._user, self._workspace, self.tool_helpers - ) + tools = [ + t if isinstance(t, udspy.Tool) else udspy.Tool(t) + for t in assistant_tool_registry.list_all_usable_tools( + self._user, self._workspace, self.tool_helpers + ) + ] self.callbacks = AssistantCallbacks(self.tool_helpers) - self._assistant = udspy.ReAct(ChatSignature, tools=tools, max_iters=20) - self.history: list[str] = [] + + module_kwargs = { + "temperature": settings.BASEROW_ENTERPRISE_ASSISTANT_LLM_TEMPERATURE, + "response_format": {"type": "json_object"}, + } + + self.search_user_docs_tool = next( + (tool for tool in tools if tool.name == "search_user_docs"), None + ) + self.agent_tools = tools + self._request_router = udspy.ChainOfThought(RequestRouter, **module_kwargs) + self._assistant = udspy.ReAct( + ChatSignature, tools=self.agent_tools, max_iters=20, **module_kwargs + ) async def acreate_chat_message( self, @@ -277,7 +279,7 @@ def list_chat_messages( ) return list(reversed(messages)) - async def aload_chat_history(self, limit=30): + async def afetch_chat_history(self, limit=30): """ Loads the chat history into a udspy.History object. It only loads complete message pairs (human + AI). The history will be in chronological order and must @@ -287,6 +289,7 @@ async def aload_chat_history(self, limit=30): :return: None """ + history = udspy.History() last_saved_messages: list[AssistantChatMessage] = [ msg async for msg in self._chat.messages.order_by("-created_on")[:limit] ] @@ -301,18 +304,11 @@ async def aload_chat_history(self, limit=30): ): continue - self.history.append(f"Human: {first_message.content}") - ai_answer = last_saved_messages.pop() - self.history.append(f"AI: {ai_answer.content}") + history.add_user_message(first_message.content) + assistant_answer = last_saved_messages.pop() + history.add_assistant_message(assistant_answer.content) - @lru_cache(maxsize=1) - def check_llm_ready_or_raise(self): - try: - self._lm_client("Say ok if you can read this.") - except Exception as e: - raise AssistantModelNotSupportedError( - f"The model '{self._lm_client.model}' is not supported or accessible: {e}" - ) + return history def get_tool_helpers(self) -> ToolHelpers: def update_status_localized(status: str): @@ -330,51 +326,58 @@ def update_status_localized(status: str): navigate_to=unsafe_navigate_to, ) - async def _generate_chat_title( - self, user_message: HumanMessage, ai_msg: AiMessage - ) -> str: + async def _generate_chat_title(self, user_message: str) -> str: """ Generates a title for the chat based on the user message and AI response. + + :param user_message: The latest user message in the chat. + :return: The generated chat title. """ title_generator = udspy.Predict( udspy.Signature.from_string( - "user_message, ai_response -> chat_title", - "Create a short title for the following chat conversation.", + "user_message -> chat_title", + "Create a short title for the following user request.", ) ) rsp = await title_generator.aforward( - user_message=user_message.content, - ai_response=ai_msg.content[:300], + user_message=user_message, ) return rsp.chat_title async def _acreate_ai_message_response( self, human_msg: HumanMessage, - final_prediction: udspy.Prediction, - sources: list[str], + prediction: udspy.Prediction, ) -> AiMessage: + """ + Creates and saves an AI chat message response based on the prediction. Stores + the prediction in AssistantChatPrediction, linking it to the human message, so + it can be referenced later to provide feedback. + + :param human_msg: The human message instance. + :param prediction: The udspy.Prediction instance containing the AI response. + :return: The created AiMessage instance to return to the user. + """ + + sources = self.callbacks.sources ai_msg = await self.acreate_chat_message( AssistantChatMessage.Role.AI, - final_prediction.answer, + prediction.answer, artifacts={"sources": sources}, action_group_id=get_client_undo_redo_action_group_id(self._user), ) + await AssistantChatPrediction.objects.acreate( human_message=human_msg, ai_response=ai_msg, - prediction={ - "model": self._lm_client.model, - "trajectory": final_prediction.trajectory, - "reasoning": final_prediction.reasoning, - }, + prediction={k: v for k, v in prediction.items() if k != "module"}, ) # Yield final complete message return AiMessage( id=ai_msg.id, - content=final_prediction.answer, + content=prediction.answer, sources=sources, can_submit_feedback=True, ) @@ -401,51 +404,48 @@ def _check_cancellation(self, cache_key: str, message_id: str) -> None: cache.delete(cache_key) raise AssistantMessageCancelled(message_id=message_id) - async def _summarize_context_from_history(self, question: str) -> str: + async def get_router_stream( + self, message: HumanMessage + ) -> AsyncGenerator[Any, None]: """ - Extract relevant facts from chat history to provide context for the question or - return an empty string if there is no history. + Returns an async generator that streams the router's response to a user - :param question: The current user question that needs context from history. - :return: A string containing relevant facts from the conversation history. + :param message: The current user message that needs context from history. + :return: An async generator that yields stream events. """ - if not self.history: - return "" + self.history = await self.afetch_chat_history() - predictor = udspy.Predict(QuestionContextSummarizationSignature) - result = await predictor.aforward( - question=question, - previous_messages=self.history, + return self._request_router.astream( + question=message.content, + conversation_history=RequestRouter.format_conversation_history( + self.history + ), ) - return result.facts - async def _process_stream_event( + async def _process_router_stream( self, event: Any, human_msg: AssistantChatMessage, - human_message: HumanMessage, - stream_reasoning: bool, - ) -> tuple[list[AssistantMessageUnion], bool]: + ) -> Tuple[list[AssistantMessageUnion], bool, udspy.Prediction | None]: """ - Process a single event from the output stream. + Process a single event from the smart router output stream. :param event: The event to process. :param human_msg: The human message instance. - :param human_message: The human message data. - :param stream_reasoning: Whether reasoning streaming is enabled. - :return: a tuple of (messages_to_yield, updated_stream_reasoning_flag). + :return: a tuple of (messages_to_yield, prediction). """ messages = [] + prediction = None if isinstance(event, (AiThinkingMessage, AiNavigationMessage)): messages.append(event) - return messages, True # Enable reasoning streaming + return messages, prediction # Stream the final answer if isinstance(event, udspy.OutputStreamChunk): - if event.field_name == "answer": + if event.field_name == "answer" and event.content.strip(): messages.append( AiMessageChunk( content=event.content, @@ -454,28 +454,126 @@ async def _process_stream_event( ) elif isinstance(event, udspy.Prediction): - # sub-module predictions contain reasoning steps - if "next_thought" in event and stream_reasoning: - messages.append(AiReasoningChunk(content=event.next_thought)) + if hasattr(event, "routing_decision"): + prediction = event + + if getattr(event, "routing_decision", None) == "delegate_to_agent": + messages.append(AiThinkingMessage(content=_("Thinking..."))) + elif getattr(event, "routing_decision", None) == "search_user_docs": + if self.search_user_docs_tool is not None: + await self.search_user_docs_tool(question=event.search_query) + else: + messages.append( + AiMessage( + content=_( + "I wanted to search the documentation for you, " + "but the search tool isn't currently available.\n\n" + "To enable documentation search, you'll need to set up " + "the local knowledge base. \n\n" + "You can find setup instructions at: https://baserow.io/user-docs" + ), + sources=[], + ) + ) + elif getattr(event, "answer", None): + ai_msg = await self._acreate_ai_message_response(human_msg, event) + messages.append(ai_msg) - # final prediction contains the answer to the user question - elif event.module is self._assistant: - ai_msg = await self._acreate_ai_message_response( - human_msg, event, self.callbacks.sources + return messages, prediction + + async def _process_agent_stream( + self, + event: Any, + human_msg: AssistantChatMessage, + ) -> Tuple[list[AssistantMessageUnion], udspy.Prediction | None]: + """ + Process a single event from the output stream. + + :param event: The event to process. + :param human_msg: The human message instance. + :return: a tuple of (messages_to_yield, prediction). + """ + + messages = [] + prediction = None + + if isinstance(event, (AiThinkingMessage, AiNavigationMessage)): + messages.append(event) + return messages, prediction + + # Stream the final answer + if isinstance(event, udspy.OutputStreamChunk): + if ( + event.field_name == "answer" + and event.module is self._assistant.extract_module + ): + messages.append( + AiMessageChunk( + content=event.content, + sources=self.callbacks.sources, + ) ) + + elif isinstance(event, udspy.Prediction): + # final prediction contains the answer to the user question + if event.module is self._assistant: + prediction = event + ai_msg = await self._acreate_ai_message_response(human_msg, prediction) messages.append(ai_msg) - # Generate chat title if needed - if not self._chat.title: - chat_title = await self._generate_chat_title(human_message, ai_msg) - messages.append(ChatTitleMessage(content=chat_title)) - self._chat.title = chat_title - await self._chat.asave(update_fields=["title", "updated_on"]) + elif reasoning := getattr(event, "next_thought", None): + messages.append(AiReasoningChunk(content=reasoning)) + + return messages, prediction - return messages, stream_reasoning + def get_agent_stream( + self, message: HumanMessage, extracted_context: str + ) -> AsyncGenerator[Any, None]: + """ + Returns an async generator that streams the ReAct agent's response to a user + message. + + :param user_message: The message from the user. + :return: An async generator that yields stream events. + """ + + ui_context = message.ui_context.format() if message.ui_context else None + + return self._assistant.astream( + question=message.content, + context=extracted_context, + ui_context=ui_context, + ) + + async def _process_stream( + self, + human_msg: HumanMessage, + stream: AsyncGenerator[Any, None], + process_event_func: Callable[ + [Any, AssistantChatMessage], + Tuple[list[AssistantMessageUnion], udspy.Prediction | None], + ], + ) -> AsyncGenerator[Tuple[AssistantMessageUnion, udspy.Prediction | None], None]: + chunk_count = 0 + cancellation_key = self._get_cancellation_cache_key() + message_id = str(human_msg.id) + + async for event in stream: + # Periodically check for cancellation + chunk_count += 1 + if chunk_count % 10 == 0: + self._check_cancellation(cancellation_key, message_id) + + messages, prediction = await process_event_func(event, human_msg) + + if messages: # Don't return responses if cancelled + self._check_cancellation(cancellation_key, message_id) + + for msg in messages: + yield msg, prediction async def astream_messages( - self, human_message: HumanMessage + self, message: HumanMessage ) -> AsyncGenerator[AssistantMessageUnion, None]: """ Streams the response to a user message. @@ -483,47 +581,44 @@ async def astream_messages( :param human_message: The message from the user. :return: An async generator that yields the response messages. """ + + human_msg = await self.acreate_chat_message( + AssistantChatMessage.Role.HUMAN, + message.content, + ) + with udspy.settings.context( lm=self._lm_client, callbacks=[*udspy.settings.callbacks, self.callbacks], ): - if self.history is None: - await self.aload_chat_history() - - context_from_history = await self._summarize_context_from_history( - human_message.content - ) - - output_stream = self._assistant.astream( - question=human_message.content, - context=context_from_history, - ui_context=human_message.ui_context.model_dump_json(exclude_none=True), - ) - - human_msg = await self.acreate_chat_message( - AssistantChatMessage.Role.HUMAN, human_message.content - ) - - cache_key = self._get_cancellation_cache_key() message_id = str(human_msg.id) yield AiStartedMessage(message_id=message_id) - # Flag to wait for the first step in the reasoning to start streaming it - stream_reasoning = False - chunk_count = 0 + router_stream = await self.get_router_stream(message) + routing_decision, extracted_context = None, "" - async for event in output_stream: - # Periodically check for cancellation - chunk_count += 1 - if chunk_count % 10 == 0: - self._check_cancellation(cache_key, message_id) - - messages, stream_reasoning = await self._process_stream_event( - event, human_msg, human_message, stream_reasoning + async for msg, prediction in self._process_stream( + human_msg, router_stream, self._process_router_stream + ): + if prediction is not None: + routing_decision = prediction.routing_decision + extracted_context = prediction.extracted_context + yield msg + + if routing_decision == "delegate_to_agent": + agent_stream = self.get_agent_stream( + message, + extracted_context=extracted_context, ) - if messages: # Don't return responses if cancelled - self._check_cancellation(cache_key, message_id) + async for msg, __ in self._process_stream( + human_msg, agent_stream, self._process_agent_stream + ): + yield msg - for msg in messages: - yield msg + # Generate chat title if needed + if not self._chat.title: + chat_title = await self._generate_chat_title(human_msg.content) + self._chat.title = chat_title + await self._chat.asave(update_fields=["title", "updated_on"]) + yield ChatTitleMessage(content=chat_title) diff --git a/enterprise/backend/src/baserow_enterprise/assistant/prompts.py b/enterprise/backend/src/baserow_enterprise/assistant/prompts.py index 0c4f0eaa54..7e41db1843 100644 --- a/enterprise/backend/src/baserow_enterprise/assistant/prompts.py +++ b/enterprise/backend/src/baserow_enterprise/assistant/prompts.py @@ -57,29 +57,47 @@ • **Publishing**: Requires at least one configured action """ -ASSISTANT_SYSTEM_PROMPT = ( +AGENT_LIMITATIONS = """ +## LIMITATIONS + +### CANNOT CREATE: +• User accounts, workspaces +• Applications, pages +• Dashboards, widgets +• Snapshots, webhooks, integrations +• Roles, permissions + +### CANNOT UPDATE/MODIFY: +• User, workspace, or integration settings +• Roles, permissions +• Applications, pages +• Dashboards, widgets + +### CANNOT DELETE: +• Users, workspaces +• Roles, permissions +• Applications, pages +• Dashboards, widgets +""" + +ASSISTANT_SYSTEM_PROMPT_BASE = ( f""" You are Kuma, an AI expert for Baserow (open-source no-code platform). ## YOUR KNOWLEDGE 1. **Core concepts** (below) -2. **Detailed docs** - use search_docs tool to search when needed +2. **Detailed docs** - use search_user_docs tool to search when needed 3. **API specs** - guide users to "{settings.PUBLIC_BACKEND_URL}/api/schema.json" 4. **Official website** - "https://baserow.io" +5. **Community support** - "https://community.baserow.io" +6. **Direct support** - for Advanced/Enterprise plan users -## HOW TO HELP +## ANSWER FORMATTING GUIDELINES • Use American English spelling and grammar -• Be clear, concise, and actionable -• For troubleshooting: ask for error messages or describe expected vs actual results -• **NEVER** fabricate answers or URLs. Acknowledge when you can't be sure. -• Use the tools whenever possible. Fallback to search_docs and provide instruction only when it's not possible to fulfill the request. Ground answers in the documentation. -• When finished, briefly suggest one or more logical next steps only if they use tools you have access to and directly builds on what was just done. - -## FORMATTING (CRITICAL) • Only use Markdown (bold, italics, lists, code blocks) -• Prefer lists in explanations. Numbered lists for steps; bulleted for others +• Prefer lists in explanations. Numbered lists for steps; bulleted for others. • Use code blocks for examples, commands, snippets -• EXCEPTION: When showing database schema or query results, tables are acceptable +• Be concise and clear in your response ## BASEROW CONCEPTS """ @@ -88,3 +106,107 @@ + APPLICATION_BUILDER_CONCEPTS + AUTOMATION_BUILDER_CONCEPTS ) + +AGENT_SYSTEM_PROMPT = ( + ASSISTANT_SYSTEM_PROMPT_BASE + + """ +**CRITICAL:** You MUST use your action tools to fulfill the request, loading additional tools if needed. + +### YOUR TOOLS: +- **Action tools**: Navigate, list databases, tables, fields, views, filters, workflows, rows, etc. +- **Tool loaders**: Load additional specialized tools (e.g., load_rows_tools, load_views_tools). Use them to access capabilities not currently available. + +**IMPORTANT - HOW TO UNDERSTAND YOUR TOOLS:** +- Read each tool's NAME, DESCRIPTION, and ARGUMENTS carefully +- Tool names and descriptions tell you what they do (e.g., "list_tables", "create_rows_in_table_X") +- Arguments show what inputs they need +- **NEVER use search_user_docs to learn about tools** - it contains end-user documentation, NOT information about which tools to use or how to call them +- Inspect available tools directly to decide what to use + +### HOW TO WORK: +1. **Use action tools** to accomplish the user's goal +2. **If a needed tool isn't available**, call a tool loader to load it (e.g., if you need to create a field but don't have the tool, load field creation tools) +3. **Keep using tools** until the goal is reached or you confirm NO tool can help and NO tool loader can provide the needed tool + +### EXAMPLE - CORRECT USE OF TOOL LOADERS: +**User request:** "Change all 'Done' tasks to 'Todo'" + +**CORRECT approach:** +✓ Step 1: Identify that Tasks is a table in the open database, and status is the field to update +✓ Step 2: Notice you need to update rows but don't have the tool +✓ Step 3: Call the row tool loader (e.g., `load_rows_tools` for table X, requesting update capabilities) +✓ Step 4: Use the newly loaded `update_rows` tool to update the rows +✓ Step 5: Complete the task + +**CRITICAL:** Before giving up, ALWAYS check if a tool loader can provide the necessary tools to complete the task. + +### IF YOU CANNOT COMPLETE THE REQUEST: +If you've exhausted all available tools and loaders and cannot complete the task, offer: "I wasn't able to complete this using my available tools. Would you like me to search the documentation for instructions on how to do this manually?" + +### YOUR PRIORITY: +1. **First**: Use action tools to complete the request +2. **If tool missing**: Try loading it with a tool loader (scan all available loaders) +3. **If truly unable**: Explain the issue and offer to search documentation (never provide instructions from memory) + +The router determined this requires action. You were chosen because the user wants you to DO something, not provide information. + +Be aware of your limitations. If users ask for something outside your capabilities, finish immediately, explain what you can and cannot do based on the limitations below, and offer to search the documentation for further help. +""" + + AGENT_LIMITATIONS + + """ +### TASK INSTRUCTIONS: +""" +) + + +REQUEST_ROUTER_PROMPT = ( + ASSISTANT_SYSTEM_PROMPT_BASE + + """ +Route based on what the user wants YOU to do: + +**delegate_to_agent** (DEFAULT) - User wants YOU to perform an action +- Commands/requests for YOU: "Create...", "Delete...", "Update...", "Add...", "Show me...", "List...", "Find..." +- Vague/unclear requests +- Anything not explicitly asking for instructions + +**search_user_docs** - User wants to learn HOW TO do something themselves +- ONLY when explicitly asking for instructions: "How do I...", "How can I...", "What are the steps to..." +- ONLY when asking for explanations: "What is...", "What does... mean", "Explain..." +- NOT for action requests even if phrased as questions + +## Critical Rules +- "Create X" → delegate_to_agent (action request for YOU) +- "How do I create X?" → search_user_docs (asking for instructions) +- When uncertain → delegate_to_agent + +## Output Requirements +**delegate_to_agent:** +- extracted_context: Comprehensive details from conversation history (IDs, names, actions, specs) +- search_query: empty + +**search_user_docs:** +- search_query: Clear question using Baserow terminology and the answer language if not English +- extracted_context: empty + +## Examples + +**Example 1 - delegate_to_agent (action):** +question: "Create a calendar view" +→ routing_decision: "delegate_to_agent" +→ search_query: "" +→ extracted_context: "User wants to create a calendar view." + +**Example 2 - search_user_docs (instructions):** +question: "How do I create a calendar view?" +→ routing_decision: "search_user_docs" +→ search_query: "How to create a calendar view in Baserow" +→ extracted_context: "" + +**Example 3 - delegate_to_agent (with history):** +question: "Assign them to Bob" +conversation_history: ["[0] (user): Show urgent tasks", "[1] (assistant): Found 5 tasks in table 'Tasks' (ID: 123)"] +→ routing_decision: "delegate_to_agent" +→ search_query: "" +→ extracted_context: "User wants to assign urgent tasks to Bob. Tasks in table 'Tasks' (ID: 123). Found 5 urgent tasks." +""" +) diff --git a/enterprise/backend/src/baserow_enterprise/assistant/signatures.py b/enterprise/backend/src/baserow_enterprise/assistant/signatures.py new file mode 100644 index 0000000000..89bf3d0dd1 --- /dev/null +++ b/enterprise/backend/src/baserow_enterprise/assistant/signatures.py @@ -0,0 +1,67 @@ +from typing import Literal + +import udspy + +from .prompts import AGENT_SYSTEM_PROMPT, REQUEST_ROUTER_PROMPT + + +class ChatSignature(udspy.Signature): + __doc__ = AGENT_SYSTEM_PROMPT + + question: str = udspy.InputField() + context: str = udspy.InputField( + description="Context and facts extracted from the history to help answer the question." + ) + ui_context: str | None = udspy.InputField( + default=None, + description=( + "The JSON serialized context the user is currently in. " + "It contains information about the user, the timezone, the workspace, etc." + "Whenever make sense, use it to ground your answer." + ), + ) + answer: str = udspy.OutputField() + + +class RequestRouter(udspy.Signature): + __doc__ = REQUEST_ROUTER_PROMPT + + question: str = udspy.InputField(desc="The current user question to route") + conversation_history: list[str] = udspy.InputField( + desc="Previous messages formatted as '[index] (role): content', ordered chronologically" + ) + + routing_decision: Literal[ + "delegate_to_agent", "search_user_docs" + ] = udspy.OutputField( + desc="Must be one of: 'delegate_to_agent' or 'search_user_docs'" + ) + extracted_context: str = udspy.OutputField( + desc=( + "Relevant context extracted from conversation history. " + "The agent won't see the full history, only the question and this extracted context. " + "Always fill with comprehensive details (IDs, names, actions, specifications). " + "Be verbose - include all relevant information to help understand the request." + ), + ) + search_query: str = udspy.OutputField( + desc=( + "The search query in English to use with search_user_docs if routing_decision='search_user_docs'. " + "Should be a clear, well-formulated question using Baserow terminology. " + "Empty string if routing_decision='delegate_to_agent'. " + "If the question is in another language, make sure to mention in which " + "language the answer should be." + ) + ) + + @classmethod + def format_conversation_history(cls, history: udspy.History) -> list[str]: + """ + Format the conversation history into a list of strings for the signature. + """ + + formatted_history = [] + for i, msg in enumerate(history.messages): + formatted_history.append(f"[{i}] ({msg['role']}): {msg['content']}") + + return formatted_history diff --git a/enterprise/backend/src/baserow_enterprise/assistant/tools/__init__.py b/enterprise/backend/src/baserow_enterprise/assistant/tools/__init__.py index ac6006dbb9..4ac95ed235 100644 --- a/enterprise/backend/src/baserow_enterprise/assistant/tools/__init__.py +++ b/enterprise/backend/src/baserow_enterprise/assistant/tools/__init__.py @@ -2,4 +2,4 @@ from .core.tools import * # noqa: F401, F403 from .database.tools import * # noqa: F401, F403 from .navigation.tools import * # noqa: F401, F403 -from .search_docs.tools import * # noqa: F401, F403 +from .search_user_docs.tools import * # noqa: F401, F403 diff --git a/enterprise/backend/src/baserow_enterprise/assistant/tools/automation/__init__.py b/enterprise/backend/src/baserow_enterprise/assistant/tools/automation/__init__.py index ca44da1dd7..ace1c221c3 100644 --- a/enterprise/backend/src/baserow_enterprise/assistant/tools/automation/__init__.py +++ b/enterprise/backend/src/baserow_enterprise/assistant/tools/automation/__init__.py @@ -1,6 +1,6 @@ -from .tools import CreateWorkflowsToolType, ListWorkflowsToolType +from .tools import ListWorkflowsToolType, WorkflowToolFactoryToolType __all__ = [ "ListWorkflowsToolType", - "CreateWorkflowsToolType", + "WorkflowToolFactoryToolType", ] diff --git a/enterprise/backend/src/baserow_enterprise/assistant/tools/automation/tools.py b/enterprise/backend/src/baserow_enterprise/assistant/tools/automation/tools.py index 11cc03b38e..6046ca5ffa 100644 --- a/enterprise/backend/src/baserow_enterprise/assistant/tools/automation/tools.py +++ b/enterprise/backend/src/baserow_enterprise/assistant/tools/automation/tools.py @@ -4,6 +4,8 @@ from django.db import transaction from django.utils.translation import gettext as _ +import udspy + from baserow.contrib.automation.workflows.service import AutomationWorkflowService from baserow.core.models import Workspace from baserow_enterprise.assistant.tools.registries import AssistantToolType @@ -47,13 +49,9 @@ def list_workflows(automation_id: int) -> dict[str, Any]: return list_workflows -def get_create_workflows_tool( +def get_workflow_tool_factory( user: AbstractUser, workspace: Workspace, tool_helpers: "ToolHelpers" ) -> Callable[[int, list[WorkflowCreate]], dict[str, list[dict]]]: - """ - Create new workflows. - """ - def create_workflows( automation_id: int, workflows: list[WorkflowCreate] ) -> dict[str, Any]: @@ -100,7 +98,35 @@ def create_workflows( return {"created_workflows": created} - return create_workflows + def load_workflow_automation_tools(): + """ + TOOL LOADER: Loads tools to manage workflows in an automation. + + After calling this loader, you will have access to: + - create_workflows: Create workflows with triggers, actions, and routers + + Use this when you need to create workflows in an automation but don't have the tool. + """ # noqa: W505 + + @udspy.module_callback + def _load_workflow_automation_tools(context): + nonlocal user, workspace, tool_helpers + + observation = ["New tools are now available.\n"] + + create_tool = udspy.Tool(create_workflows) + new_tools = [create_tool] + observation.append( + "- Use `create_workflows` to create workflows in an automation." + ) + + # Re-initialize the module with the new tools for the next iteration + context.module.init_module(tools=context.module._tools + new_tools) + return "\n".join(observation) + + return _load_workflow_automation_tools + + return load_workflow_automation_tools # ============================================================================ @@ -116,9 +142,9 @@ def get_tool(cls, user, workspace, tool_helpers): return get_list_workflows_tool(user, workspace, tool_helpers) -class CreateWorkflowsToolType(AssistantToolType): - type = "create_workflows" +class WorkflowToolFactoryToolType(AssistantToolType): + type = "workflow_tool_factory" @classmethod def get_tool(cls, user, workspace, tool_helpers): - return get_create_workflows_tool(user, workspace, tool_helpers) + return get_workflow_tool_factory(user, workspace, tool_helpers) diff --git a/enterprise/backend/src/baserow_enterprise/assistant/tools/database/tools.py b/enterprise/backend/src/baserow_enterprise/assistant/tools/database/tools.py index 3d3cfc53ec..54e8b03994 100644 --- a/enterprise/backend/src/baserow_enterprise/assistant/tools/database/tools.py +++ b/enterprise/backend/src/baserow_enterprise/assistant/tools/database/tools.py @@ -171,13 +171,32 @@ def get_tool( return get_tables_schema_tool(user, workspace, tool_helpers) -def get_create_tables_tool( +def get_table_and_fields_tools_factory( user: AbstractUser, workspace: Workspace, tool_helpers: "ToolHelpers" ) -> Callable[[list[TableItemCreate]], list[dict[str, Any]]]: - """ - Returns a function that creates a set of tables in a given database the user has - access to - """ + def create_fields( + table_id: int, fields: list[AnyFieldItemCreate] + ) -> list[AnyFieldItem]: + """ + Creates fields in the specified table. + + - Choose the most appropriate field type for each field. + - Field names must be unique within a table: check existing names + when needed and skip duplicates. + - For link_row fields, ensure the linked table already exists in + the same database; create it first if needed. + """ + + nonlocal user, workspace, tool_helpers + + if not fields: + return [] + + table = utils.filter_tables(user, workspace).get(id=table_id) + + with transaction.atomic(): + created_fields = utils.create_fields(user, table, fields, tool_helpers) + return {"created_fields": [field.model_dump() for field in created_fields]} def create_tables( database_id: int, tables: list[TableItemCreate], add_sample_rows: bool = True @@ -276,64 +295,48 @@ def create_tables( "notes": notes, } - return create_tables - - -class CreateTablesToolType(AssistantToolType): - type = "create_tables" - - @classmethod - def get_tool( - cls, user: AbstractUser, workspace: Workspace, tool_helpers: "ToolHelpers" - ) -> Callable[[Any], Any]: - return get_create_tables_tool(user, workspace, tool_helpers) - + def load_table_and_fields_tools(): + """ + TOOL LOADER: Loads table and field creation tools for a database. -def get_create_fields_tool( - user: AbstractUser, - workspace: Workspace, - tool_helpers: "ToolHelpers", -) -> Callable[[int, list[AnyFieldItemCreate]], list[dict[str, Any]]]: - """ - Returns a function that creates fields in a given table the user has access to - in the current workspace. - """ + After calling this loader, you will have access to: + - create_tables: Create new tables in a database with fields and sample rows + - create_fields: Add new fields to an existing table - def create_fields( - table_id: int, fields: list[AnyFieldItemCreate] - ) -> list[AnyFieldItem]: + Use this when you need to create tables or add fields but don't have the tools. """ - Creates fields in the specified table. - - Choose the most appropriate field type for each field. - - Field names must be unique within a table: check existing names - when needed and skip duplicates. - - For link_row fields, ensure the linked table already exists in - the same database; create it first if needed. - """ + @udspy.module_callback + def _load_table_and_fields_tools(context): + nonlocal user, workspace, tool_helpers - nonlocal user, workspace, tool_helpers + observation = ["New tools are now available.\n"] - if not fields: - return [] + create_tool = udspy.Tool(create_tables) + new_tools = [create_tool] + observation.append("- Use `create_tables` to create tables in a database.") - table = utils.filter_tables(user, workspace).get(id=table_id) + create_fields_tool = udspy.Tool(create_fields) + new_tools.append(create_fields_tool) + observation.append("- Use `create_fields` to create fields in a table.") - with transaction.atomic(): - created_fields = utils.create_fields(user, table, fields, tool_helpers) - return {"created_fields": [field.model_dump() for field in created_fields]} + # Re-initialize the module with the new tools for the next iteration + context.module.init_module(tools=context.module._tools + new_tools) + return "\n".join(observation) + + return _load_table_and_fields_tools - return create_fields + return load_table_and_fields_tools -class CreateFieldsToolType(AssistantToolType): - type = "create_fields" +class TableAndFieldsToolFactoryToolType(AssistantToolType): + type = "table_and_fields_tool_factory" @classmethod def get_tool( cls, user: AbstractUser, workspace: Workspace, tool_helpers: "ToolHelpers" ) -> Callable[[Any], Any]: - return get_create_fields_tool(user, workspace, tool_helpers) + return get_table_and_fields_tools_factory(user, workspace, tool_helpers) def get_list_rows_tool( @@ -347,7 +350,7 @@ def get_list_rows_tool( def list_rows( table_id: int, offset: int = 0, - limit: int = 10, + limit: int = 20, field_ids: list[int] | None = None, ) -> list[dict[str, Any]]: """ @@ -395,34 +398,43 @@ def get_tool( return get_list_rows_tool(user, workspace, tool_helpers) -def get_rows_meta_tool( +def get_rows_tools_factory( user: AbstractUser, workspace: Workspace, tool_helpers: "ToolHelpers", ) -> Callable[[int, list[dict[str, Any]]], list[Any]]: - """ - Returns a meta-tool that, given a table ID, returns an observation that says that - new tools are available and a list of tools to create, update and delete rows - in that table. - """ - - def get_rows_tools( + def load_rows_tools( table_ids: list[int], operations: list[Literal["create", "update", "delete"]], ) -> Tuple[str, list[Callable[[Any], Any]]]: """ - Generates row operation tools for specified tables. Required before: - create/update/delete rows. + TOOL LOADER: Loads row manipulation tools for specified tables. + Make sure to have the correct table IDs. + + After calling this loader, you will have access to table-specific tools: + - create_rows_in_table_X: Create new rows in table X + - update_rows_in_table_X: Update existing rows in table X by their IDs + - delete_rows_in_table_X: Delete rows from table X by their IDs + + Use this when you need to create, update, or delete rows but don't have + the tools. + Call with the table IDs and desired operations (create/update/delete). """ @udspy.module_callback - def load_rows_tools(context): + def _load_rows_tools(context): nonlocal user, workspace, tool_helpers - observation = ["New tools are now available.\n"] + tables = utils.filter_tables(user, workspace).filter(id__in=table_ids) + if not tables: + observation = [ + "No valid tables found for the given IDs. ", + "Make sure the table IDs are correct.", + ] + return "\n".join(observation) new_tools = [] - tables = utils.filter_tables(user, workspace).filter(id__in=table_ids) + observation = ["New tools are now available.\n"] for table in tables: table_tools = utils.get_table_rows_tools( user, workspace, tool_helpers, table @@ -455,19 +467,19 @@ def load_rows_tools(context): context.module.init_module(tools=context.module._tools + new_tools) return "\n".join(observation) - return load_rows_tools + return _load_rows_tools - return get_rows_tools + return load_rows_tools -class GetRowsToolsToolType(AssistantToolType): - type = "get_rows_tools" +class RowsToolFactoryToolType(AssistantToolType): + type = "rows_tool_factory" @classmethod def get_tool( cls, user: AbstractUser, workspace: Workspace, tool_helpers: "ToolHelpers" ) -> Callable[[Any], Any]: - return get_rows_meta_tool(user, workspace, tool_helpers) + return get_rows_tools_factory(user, workspace, tool_helpers) def get_list_views_tool( @@ -523,13 +535,46 @@ def get_tool( return get_list_views_tool(user, workspace, tool_helpers) -def get_create_views_tool( +def get_views_tool_factory( user: AbstractUser, workspace: Workspace, tool_helpers: "ToolHelpers" ) -> Callable[[int, list[str]], list[str]]: - """ - Returns a function that creates views in a given table the user has access to - in the current workspace. - """ + def create_view_filters( + view_filters: list[ViewFiltersArgs], + ) -> list[AnyViewFilterItem]: + """ + Creates filters in the specified views. + """ + + nonlocal user, workspace, tool_helpers + + if not view_filters: + return [] + + created_view_filters = [] + for vf in view_filters: + orm_view = utils.get_view(user, vf.view_id) + tool_helpers.update_status( + _("Creating filters in %(view_name)s...") % {"view_name": orm_view.name} + ) + + fields = {f.id: f for f in orm_view.table.field_set.all()} + created_filters = [] + with transaction.atomic(): + for filter in vf.filters: + try: + orm_filter = utils.create_view_filter( + user, orm_view, fields, filter + ) + except ValueError as e: + logger.warning(f"Skipping filter creation: {e}") + continue + + created_filters.append({"id": orm_filter.id, **filter.model_dump()}) + created_view_filters.append( + {"view_id": vf.view_id, "filters": created_filters} + ) + + return {"created_view_filters": created_view_filters} def create_views( table_id: int, views: list[AnyViewItemCreate] @@ -540,7 +585,7 @@ def create_views( - Choose the most appropriate view type for each view. - View names must be unique within a table: check existing names when needed and - skip duplicates. + avoid duplicates. """ nonlocal user, workspace, tool_helpers @@ -584,80 +629,51 @@ def create_views( return {"created_views": created_views} - return create_views - - -class CreateViewsToolType(AssistantToolType): - type = "create_views" - - @classmethod - def get_tool( - cls, user: AbstractUser, workspace: Workspace, tool_helpers: "ToolHelpers" - ) -> Callable[[Any], Any]: - return get_create_views_tool(user, workspace, tool_helpers) + def load_views_tools(): + """ + TOOL LOADER: Loads tools to manage views and filters + (grid, gallery, form, kanban, calendar and timeline). + After calling this loader, you will be able to: + - create_views: Create grid, gallery, form, kanban, calendar and timeline views + - create_view_filters: Create filters for specific views to filter rows -def get_create_view_filters_tool( - user: AbstractUser, workspace: Workspace, tool_helpers: "ToolHelpers" -) -> Callable[[int, list[str]], list[str]]: - """ - Returns a function that creates views in a given table the user has access to - in the current workspace. - """ - - def create_view_filters( - view_filters: list[ViewFiltersArgs], - ) -> list[AnyViewFilterItem]: + Use this when you need to create views or filters but don't have the tools yet. """ - Creates filters in the specified view. - - Choose the most appropriate filter for each view. - - Filter names must be unique within a view: check existing names - when needed and skip duplicates. - """ + @udspy.module_callback + def _load_views_tools(context): + nonlocal user, workspace, tool_helpers - nonlocal user, workspace, tool_helpers + observation = ["New tools are now available.\n"] - if not view_filters: - return [] + create_tool = udspy.Tool(create_views) + new_tools = [create_tool] + observation.append("- Use `create_views` to create views.") - created_view_filters = [] - for vf in view_filters: - orm_view = utils.get_view(user, vf.view_id) - tool_helpers.update_status( - _("Creating filters in %(view_name)s...") % {"view_name": orm_view.name} + create_filters_tool = udspy.Tool(create_view_filters) + new_tools.append(create_filters_tool) + observation.append( + "- Use `create_view_filters` to create filters in views." ) - fields = {f.id: f for f in orm_view.table.field_set.all()} - created_filters = [] - with transaction.atomic(): - for filter in vf.filters: - try: - orm_filter = utils.create_view_filter( - user, orm_view, fields, filter - ) - except ValueError as e: - logger.warning(f"Skipping filter creation: {e}") - continue - - created_filters.append({"id": orm_filter.id, **filter.model_dump()}) - created_view_filters.append( - {"view_id": vf.view_id, "filters": created_filters} - ) + # Re-initialize the module with the new tools for the next iteration + context.module.init_module(tools=context.module._tools + new_tools) + return "\n".join(observation) - return {"created_view_filters": created_view_filters} + return _load_views_tools - return create_view_filters + return load_views_tools -class CreateViewFiltersToolType(AssistantToolType): - type = "create_view_filters" +class ViewsToolFactoryToolType(AssistantToolType): + type = "views_tool_factory" @classmethod def get_tool( cls, user: AbstractUser, workspace: Workspace, tool_helpers: "ToolHelpers" ) -> Callable[[Any], Any]: - return get_create_view_filters_tool(user, workspace, tool_helpers) + return get_views_tool_factory(user, workspace, tool_helpers) def get_formula_type_tool( diff --git a/enterprise/backend/src/baserow_enterprise/assistant/tools/database/types/table.py b/enterprise/backend/src/baserow_enterprise/assistant/tools/database/types/table.py index ae49ebc265..3703e877bc 100644 --- a/enterprise/backend/src/baserow_enterprise/assistant/tools/database/types/table.py +++ b/enterprise/backend/src/baserow_enterprise/assistant/tools/database/types/table.py @@ -42,17 +42,20 @@ class TableItem(BaseTableItem): class ListTablesFilterArg(BaseModel): database_ids: list[int] | None = Field( - ..., description="A list of database_ids to filter. None to exclude this filter" + default=None, + description="A list of database_ids to filter. None to exclude this filter", ) database_names: list[str] | None = Field( - ..., + default=None, description="A list of database_names to filter. None to exclude this filter", ) table_ids: list[int] | None = Field( - ..., description="A list of table ids to filter. None to exclude this filter" + default=None, + description="A list of table ids to filter. None to exclude this filter", ) table_names: list[str] | None = Field( - ..., description="A list of table names to filter. None to exclude this filter" + default=None, + description="A list of table names to filter. None to exclude this filter", ) def to_orm_filter(self) -> Q: diff --git a/enterprise/backend/src/baserow_enterprise/assistant/tools/database/types/views.py b/enterprise/backend/src/baserow_enterprise/assistant/tools/database/types/views.py index 3395bd782a..e28a7438f4 100644 --- a/enterprise/backend/src/baserow_enterprise/assistant/tools/database/types/views.py +++ b/enterprise/backend/src/baserow_enterprise/assistant/tools/database/types/views.py @@ -21,7 +21,7 @@ class ViewItemCreate(BaseModel): description="A sensible name for the view (i.e. 'Pending payments', 'Completed tasks', etc.).", ) public: bool = Field( - ..., + default=False, description="Whether the view is publicly accessible. False unless specified.", ) @@ -50,11 +50,11 @@ def from_django_orm(cls, orm_view: Type[BaserowView]) -> "ViewItem": class GridFieldOption(BaseModel): field_id: int = Field(...) width: int = Field( - ..., + default=200, description="The width of the field in the grid view. Default is 200.", ) hidden: bool = Field( - ..., + default=False, description="Whether the field is hidden in the grid view. Default is False.", ) @@ -152,7 +152,7 @@ def from_django_orm(cls, orm_view: CalendarView) -> "CalendarViewItem": class BaseGalleryViewItem(ViewItemCreate): type: Literal["gallery"] = Field(..., description="A gallery view.") cover_field_id: int | None = Field( - None, + default=None, description=( "The ID of the field to use for the gallery cover image. Must be a file field. None if no file field is available." ), @@ -239,43 +239,42 @@ class FormFieldOption(BaseModel): field_id: int = Field(..., description="The ID of the field.") name: str = Field(..., description="The name to show for the field in the form.") description: str = Field( - ..., description="The description to show for the field in the form." + default="", description="The description to show for the field in the form." ) required: bool = Field( - ..., description="Whether the field is required in the form. Default is True." + default=True, + description="Whether the field is required in the form. Default is True.", ) order: int = Field(..., description="The order of the field in the form.") class BaseFormViewItem(ViewItemCreate): type: Literal["form"] = Field(..., description="A form view.") - title: str = Field(..., description="The title of the form. Can be empty.") - description: str = Field( - ..., description="The description of the form. Can be empty." - ) + title: str = Field(..., description="The title of the form.") + description: str = Field(..., description="The description of the form.") submit_button_label: str = Field( - ..., description="The label of the submit button. Default is 'Submit'." + default="Submit", description="The label of the submit button." ) receive_notification_on_submit: bool = Field( - ..., + default=False, description=( - "Whether to receive an email notification when the form is submitted. Default is False." + "Whether to receive an email notification when the form is submitted." ), ) submit_action: Literal["MESSAGE", "REDIRECT"] = Field( - ..., - description="The action to perform when the form is submitted. Default is 'MESSAGE'.", + default="MESSAGE", + description="The action to perform when the form is submitted.", ) submit_action_message: str = Field( - ..., + default="", description=( - "The message to display when the form is submitted and the action is 'MESSAGE'. Default is empty." + "The message to display when the form is submitted and the action is 'MESSAGE'." ), ) submit_action_redirect_url: str = Field( - ..., + default="", description=( - "The URL to redirect to when the form is submitted and the action is 'REDIRECT'. Default is empty." + "The URL to redirect to when the form is submitted and the action is 'REDIRECT'." ), ) diff --git a/enterprise/backend/src/baserow_enterprise/assistant/tools/database/utils.py b/enterprise/backend/src/baserow_enterprise/assistant/tools/database/utils.py index c7d13c8a06..8cb96df372 100644 --- a/enterprise/backend/src/baserow_enterprise/assistant/tools/database/utils.py +++ b/enterprise/backend/src/baserow_enterprise/assistant/tools/database/utils.py @@ -472,7 +472,7 @@ def _update_rows( ) update_rows_tool = udspy.Tool( func=_update_rows, - name=f"update_rows_in_table_{table.id}_by_row_ids", + name=f"update_rows_in_table_{table.id}", description=f"Updates existing rows in the table {table.name} (ID: {table.id}), identified by their row IDs. Max 20 at a time.", args={ "rows": { @@ -504,7 +504,7 @@ def _delete_rows(row_ids: list[int]) -> str: delete_rows_tool = udspy.Tool( func=_delete_rows, - name=f"delete_rows_in_table_{table.id}_by_row_ids", + name=f"delete_rows_in_table_{table.id}", description=f"Deletes rows in the table {table.name} (ID: {table.id}). Max 20 at a time.", args={ "row_ids": { diff --git a/enterprise/backend/src/baserow_enterprise/assistant/tools/search_docs/tools.py b/enterprise/backend/src/baserow_enterprise/assistant/tools/search_docs/tools.py deleted file mode 100644 index 619902489e..0000000000 --- a/enterprise/backend/src/baserow_enterprise/assistant/tools/search_docs/tools.py +++ /dev/null @@ -1,103 +0,0 @@ -from typing import TYPE_CHECKING, Annotated, Any, Callable - -from django.contrib.auth.models import AbstractUser -from django.utils.translation import gettext as _ - -import udspy - -from baserow.core.models import Workspace -from baserow_enterprise.assistant.tools.registries import AssistantToolType - -from .handler import KnowledgeBaseHandler - -if TYPE_CHECKING: - from baserow_enterprise.assistant.assistant import ToolHelpers - -MAX_SOURCES = 3 - - -class SearchDocsSignature(udspy.Signature): - """ - Search the Baserow documentation for relevant information to answer user questions. - Never fabricate answers or URLs. Always copy instructions exactly as they appear in - the documentation, without rephrasing. - """ - - question: str = udspy.InputField() - context: list[str] = udspy.InputField() - response: str = udspy.OutputField() - sources: list[str] = udspy.OutputField( - desc=f"List of unique and relevant source URLs. Max {MAX_SOURCES}." - ) - reliability: float = udspy.OutputField( - desc=( - "The reliability score of the response, from 0 to 1. " - "1 means the answer is fully supported by the provided context. " - "0 means the answer is not supported by the provided context." - ) - ) - - -class SearchDocsRAG(udspy.Module): - def __init__(self): - self.rag = udspy.ChainOfThought(SearchDocsSignature) - - def forward(self, question: str, *args, **kwargs): - context = KnowledgeBaseHandler().search(question, num_results=7) - return self.rag(context=context, question=question) - - -def get_search_docs_tool( - user: AbstractUser, workspace: Workspace, tool_helpers: "ToolHelpers" -) -> Callable[[str], dict[str, Any]]: - """ - Returns a function that searches the Baserow documentation for a given query. - """ - - def search_docs( - question: Annotated[ - str, "The English version of the user question, using Baserow vocabulary." - ] - ) -> dict[str, Any]: - """ - Search Baserow documentation for relevant information. Make sure the question - is in English and uses Baserow-specific terminology to get the best results. - """ - - nonlocal tool_helpers - - tool_helpers.update_status(_("Exploring the knowledge base...")) - - search_tool = SearchDocsRAG() - answer = search_tool(question=question) - # Somehow sources can be objects with an "url" attribute instead of strings, - # let's fix that - fixed_sources = [] - for src in answer.sources[:MAX_SOURCES]: - if isinstance(src, str): - fixed_sources.append(src) - elif isinstance(src, dict) and "url" in src: - fixed_sources.append(src["url"]) - - return { - "response": answer.response, - "sources": fixed_sources, - "reliability": answer.reliability, - } - - return search_docs - - -class SearchDocsToolType(AssistantToolType): - type = "search_docs" - - def can_use( - self, user: AbstractUser, workspace: Workspace, *args, **kwargs - ) -> bool: - return KnowledgeBaseHandler().can_search() - - @classmethod - def get_tool( - cls, user: AbstractUser, workspace: Workspace, tool_helpers: "ToolHelpers" - ) -> Callable[[Any], Any]: - return get_search_docs_tool(user, workspace, tool_helpers) diff --git a/enterprise/backend/src/baserow_enterprise/assistant/tools/search_docs/__init__.py b/enterprise/backend/src/baserow_enterprise/assistant/tools/search_user_docs/__init__.py similarity index 100% rename from enterprise/backend/src/baserow_enterprise/assistant/tools/search_docs/__init__.py rename to enterprise/backend/src/baserow_enterprise/assistant/tools/search_user_docs/__init__.py diff --git a/enterprise/backend/src/baserow_enterprise/assistant/tools/search_docs/handler.py b/enterprise/backend/src/baserow_enterprise/assistant/tools/search_user_docs/handler.py similarity index 97% rename from enterprise/backend/src/baserow_enterprise/assistant/tools/search_docs/handler.py rename to enterprise/backend/src/baserow_enterprise/assistant/tools/search_user_docs/handler.py index 2596548d2e..b554c57af6 100644 --- a/enterprise/backend/src/baserow_enterprise/assistant/tools/search_docs/handler.py +++ b/enterprise/backend/src/baserow_enterprise/assistant/tools/search_user_docs/handler.py @@ -102,21 +102,18 @@ def embed_knowledge_chunks( return self.embed_texts([chunk.content for chunk in chunks]) - def query(self, query: str, num_results: int = 10) -> list[str]: + def query(self, query: str, num_results: int = 10) -> list[KnowledgeBaseChunk]: """ Retrieve the most relevant document chunks for the given query. It vectorizes the query and performs a similarity search using the vector field. :param query: The text query to search for :param num_results: The number of results to return - :return: A list of document chunk contents matching the query + :return: A list of KnowledgeBaseChunk instances matching the query """ (vector_query,) = self.embed_texts([query]) - results = self.raw_query(vector_query, num_results=num_results) - response = [res.content for res in results] - - return response + return self.raw_query(vector_query, num_results=num_results) def raw_query( self, query_vector: list[float], num_results: int = 10 @@ -133,6 +130,7 @@ def raw_query( KnowledgeBaseChunk.objects.filter( source_document__status=KnowledgeBaseDocument.Status.READY, ) + .select_related("source_document") .alias( distance=L2Distance(KnowledgeBaseChunk.VECTOR_FIELD_NAME, query_vector) ) @@ -185,13 +183,13 @@ def can_search(self) -> bool: ).exists() ) - def search(self, query: str, num_results=10) -> list[str]: + def search(self, query: str, num_results=10) -> list[KnowledgeBaseChunk]: """ Retrieve the most relevant knowledge chunks for the given query. :param query: The text query to search for :param num_results: The number of results to return - :return: A list of document chunk contents matching the query + :return: A list of KnowledgeBaseChunk instances matching the query """ return self.vector_handler.query(query, num_results=num_results) diff --git a/enterprise/backend/src/baserow_enterprise/assistant/tools/search_user_docs/tools.py b/enterprise/backend/src/baserow_enterprise/assistant/tools/search_user_docs/tools.py new file mode 100644 index 0000000000..b7b7480ccd --- /dev/null +++ b/enterprise/backend/src/baserow_enterprise/assistant/tools/search_user_docs/tools.py @@ -0,0 +1,156 @@ +from typing import TYPE_CHECKING, Annotated, Any, Callable + +from django.contrib.auth.models import AbstractUser +from django.utils.translation import gettext as _ + +import udspy +from asgiref.sync import sync_to_async + +from baserow.core.models import Workspace +from baserow_enterprise.assistant.models import KnowledgeBaseChunk +from baserow_enterprise.assistant.tools.registries import AssistantToolType + +from .handler import KnowledgeBaseHandler + +if TYPE_CHECKING: + from baserow_enterprise.assistant.assistant import ToolHelpers + + +class SearchDocsSignature(udspy.Signature): + """ + Given a user question and the relevant documentation chunks as context, provide a an + accurate and concise answer along with a reliability score. If the documentation + provides instructions or URLs, include them in the answer. If the answer is not + found in the context, respond with "Nothing found in the documentation." + + Never fabricate answers or URLs. + """ + + question: str = udspy.InputField() + context: dict[str, str] = udspy.InputField( + desc="A mapping of source URLs to content." + ) + + answer: str = udspy.OutputField() + sources: list[str] = udspy.OutputField( + desc=( + "A list of source URLs as strings used to generate the answer, " + "picked from the provided context keys, in order of importance." + ) + ) + reliability: float = udspy.OutputField( + desc=( + "The reliability score of the answer, from 0 to 1. " + "1 means the answer is fully supported by the provided context. " + "0 means the answer is not supported by the provided context." + ) + ) + + @classmethod + def format_context(cls, chunks: list[KnowledgeBaseChunk]) -> dict[str, str]: + """ + Formats the context as a list of strings for the signature. + Each string is formatted as "Source URL: content". + + :param chunks: The list of knowledge base chunks. + :return: A dictionary mapping source URLs to their combined content. + """ + + context = {} + for chunk in chunks: + url = chunk.source_document.source_url + content = chunk.content + if url not in context: + context[url] = content + else: + context[url] += "\n" + content + + return context + + +def get_search_user_docs_tool( + user: AbstractUser, workspace: Workspace, tool_helpers: "ToolHelpers" +) -> Callable[[str], dict[str, Any]]: + """ + Returns a function that searches the Baserow documentation for a given query. + """ + + async def search_user_docs( + question: Annotated[ + str, "The English version of the user question, using Baserow vocabulary." + ] + ) -> dict[str, Any]: + """ + Search Baserow documentation to provide instructions and information for USERS. + + This tool provides end-user documentation explaining Baserow features and how + users can use them manually through the UI. It does NOT contain information + about: + - Which tools/functions the agent should use + - How to use agent tools or loaders + - Agent-specific implementation details + + Use this ONLY when the user explicitly asks for instructions on how to do + something themselves, or wants to learn about Baserow features. + + Make sure the question is in English and uses Baserow-specific terminology + to get the best results. + """ + + nonlocal tool_helpers + + tool_helpers.update_status(_("Exploring the knowledge base...")) + + @sync_to_async + def _search(question: str) -> list[KnowledgeBaseChunk]: + chunks = KnowledgeBaseHandler().search(question) + return list(chunks) + + searcher = udspy.ChainOfThought(SearchDocsSignature) + relevant_chunks = await _search(question) + prediction = await searcher.aexecute( + question=question, + context=SearchDocsSignature.format_context(relevant_chunks), + stream=True, + ) + + sources = [] + available_urls = {chunk.source_document.source_url for chunk in relevant_chunks} + for url in prediction.sources: + # somehow LLMs sometimes return sources as objects + if isinstance(url, dict) and "url" in url: + url = url["url"] + + if not isinstance(url, str): + continue + + if url in available_urls and url not in sources: + sources.append(url) + + # If for any reason the model wasn't able to return sources correctly, fill them + # from the available URLs. + if not sources: + sources = list(available_urls) + + return { + "answer": prediction.answer, + "reliability": prediction.reliability, + "sources": sources, + } + + return search_user_docs + + +class SearchDocsToolType(AssistantToolType): + type = "search_user_docs" + + def can_use( + self, user: AbstractUser, workspace: Workspace, *args, **kwargs + ) -> bool: + return KnowledgeBaseHandler().can_search() + + @classmethod + def get_tool( + cls, user: AbstractUser, workspace: Workspace, tool_helpers: "ToolHelpers" + ) -> Callable[[Any], Any]: + return get_search_user_docs_tool(user, workspace, tool_helpers) diff --git a/enterprise/backend/src/baserow_enterprise/assistant/types.py b/enterprise/backend/src/baserow_enterprise/assistant/types.py index 9d6337a055..8e4f068687 100644 --- a/enterprise/backend/src/baserow_enterprise/assistant/types.py +++ b/enterprise/backend/src/baserow_enterprise/assistant/types.py @@ -90,6 +90,9 @@ def from_validate_request(cls, request, ui_context_data) -> "UIContext": user_context = UserUIContext.from_user(request.user) return cls(user=user_context, **ui_context_data) + def format(self) -> dict: + return self.model_dump_json(exclude_none=True) + class AssistantMessageType(StrEnum): HUMAN = "human" diff --git a/enterprise/backend/src/baserow_enterprise/config/settings/settings.py b/enterprise/backend/src/baserow_enterprise/config/settings/settings.py index 02856687ff..63105b068d 100644 --- a/enterprise/backend/src/baserow_enterprise/config/settings/settings.py +++ b/enterprise/backend/src/baserow_enterprise/config/settings/settings.py @@ -80,3 +80,6 @@ def setup(settings): settings.BASEROW_ENTERPRISE_ASSISTANT_LLM_MODEL = os.getenv( "BASEROW_ENTERPRISE_ASSISTANT_LLM_MODEL", "" ) + settings.BASEROW_ENTERPRISE_ASSISTANT_LLM_TEMPERATURE = float( + os.getenv("BASEROW_ENTERPRISE_ASSISTANT_LLM_TEMPERATURE", "") or 0.3 + ) diff --git a/enterprise/backend/src/baserow_enterprise/locale/en/LC_MESSAGES/django.po b/enterprise/backend/src/baserow_enterprise/locale/en/LC_MESSAGES/django.po index f53de93b37..627e5114c3 100644 --- a/enterprise/backend/src/baserow_enterprise/locale/en/LC_MESSAGES/django.po +++ b/enterprise/backend/src/baserow_enterprise/locale/en/LC_MESSAGES/django.po @@ -8,7 +8,7 @@ msgid "" msgstr "" "Project-Id-Version: PACKAGE VERSION\n" "Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2025-10-17 14:14+0000\n" +"POT-Creation-Date: 2025-11-17 15:17+0000\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" @@ -18,97 +18,146 @@ msgstr "" "Content-Transfer-Encoding: 8bit\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" -#: src/baserow_enterprise/assistant/tools/database/tools.py:63 -msgid "Listing databases..." +#: src/baserow_enterprise/assistant/assistant.py:461 +msgid "Thinking..." msgstr "" -#: src/baserow_enterprise/assistant/tools/database/tools.py:136 +#: src/baserow_enterprise/assistant/assistant.py:469 +msgid "" +"I wanted to search the documentation for you, but the search tool isn't " +"currently available.\n" +"\n" +"To enable documentation search, you'll need to set up the local knowledge " +"base. \n" +"\n" +"You can find setup instructions at: https://baserow.io/user-docs" +msgstr "" + +#: src/baserow_enterprise/assistant/tools/automation/tools.py:38 +msgid "Listing workflows..." +msgstr "" + +#: src/baserow_enterprise/assistant/tools/automation/utils.py:283 #, python-format -msgid "Listing tables in %(database_names)s..." +msgid "Creating workflow '%(name)s'..." msgstr "" -#: src/baserow_enterprise/assistant/tools/database/tools.py:195 +#: src/baserow_enterprise/assistant/tools/automation/utils.py:296 #, python-format -msgid "Inspecting %(table_names)s schema..." +msgid "Creating trigger '%(label)s'..." msgstr "" -#: src/baserow_enterprise/assistant/tools/database/tools.py:238 +#: src/baserow_enterprise/assistant/tools/automation/utils.py:316 #, python-format -msgid "Creating database %(database_name)s..." +msgid "Creating node '%(label)s'..." msgstr "" -#: src/baserow_enterprise/assistant/tools/database/tools.py:303 +#: src/baserow_enterprise/assistant/tools/automation/utils.py:381 +#, python-format +msgid "Generating formulas for node '%(label)s'..." +msgstr "" + +#: src/baserow_enterprise/assistant/tools/core/tools.py:44 +#, python-format +msgid "Listing %(builder_types)ss..." +msgstr "" + +#: src/baserow_enterprise/assistant/tools/core/tools.py:103 +#, python-format +msgid "Creating %(builder_type)s %(builder_name)s..." +msgstr "" + +#: src/baserow_enterprise/assistant/tools/database/tools.py:92 +#, python-format +msgid "Listing tables in %(database_names)s..." +msgstr "" + +#: src/baserow_enterprise/assistant/tools/database/tools.py:151 +#, python-format +msgid "Inspecting %(table_names)s schema..." +msgstr "" + +#: src/baserow_enterprise/assistant/tools/database/tools.py:230 #, python-format msgid "Creating table %(table_name)s..." msgstr "" -#: src/baserow_enterprise/assistant/tools/database/tools.py:346 +#: src/baserow_enterprise/assistant/tools/database/tools.py:272 msgid "Preparing example rows for these new tables..." msgstr "" -#: src/baserow_enterprise/assistant/tools/database/tools.py:468 +#: src/baserow_enterprise/assistant/tools/database/tools.py:368 #, python-format msgid "Listing rows in %(table_name)s " msgstr "" -#: src/baserow_enterprise/assistant/tools/database/tools.py:593 +#: src/baserow_enterprise/assistant/tools/database/tools.py:506 #, python-format msgid "Listing views in %(table_name)s..." msgstr "" -#: src/baserow_enterprise/assistant/tools/database/tools.py:656 +#: src/baserow_enterprise/assistant/tools/database/tools.py:557 #, python-format -msgid "Creating %(view_type)s view %(view_name)s" +msgid "Creating filters in %(view_name)s..." msgstr "" -#: src/baserow_enterprise/assistant/tools/database/tools.py:723 +#: src/baserow_enterprise/assistant/tools/database/tools.py:602 #, python-format -msgid "Creating filters in %(view_name)s..." +msgid "Creating %(view_type)s view %(view_name)s" +msgstr "" + +#: src/baserow_enterprise/assistant/tools/database/tools.py:781 +msgid "Generating formula..." msgstr "" -#: src/baserow_enterprise/assistant/tools/database/utils.py:119 +#: src/baserow_enterprise/assistant/tools/database/utils.py:127 #, python-format msgid "Creating field %(field_name)s..." msgstr "" -#: src/baserow_enterprise/assistant/tools/database/utils.py:409 +#: src/baserow_enterprise/assistant/tools/database/utils.py:417 #, python-format msgid "Creating rows in %(table_name)s " msgstr "" -#: src/baserow_enterprise/assistant/tools/database/utils.py:450 +#: src/baserow_enterprise/assistant/tools/database/utils.py:458 #, python-format msgid "Updating rows in %(table_name)s " msgstr "" -#: src/baserow_enterprise/assistant/tools/database/utils.py:489 +#: src/baserow_enterprise/assistant/tools/database/utils.py:497 #, python-format msgid "Deleting rows in %(table_name)s " msgstr "" -#: src/baserow_enterprise/assistant/tools/navigation/tools.py:34 +#: src/baserow_enterprise/assistant/tools/navigation/tools.py:39 #, python-format msgid "Navigating to %(location)s..." msgstr "" -#: src/baserow_enterprise/assistant/tools/search_docs/tools.py:44 +#: src/baserow_enterprise/assistant/tools/search_user_docs/tools.py:102 msgid "Exploring the knowledge base..." msgstr "" -#: src/baserow_enterprise/assistant/types.py:176 +#: src/baserow_enterprise/assistant/types.py:220 #, python-format msgid "table %(table_name)s" msgstr "" -#: src/baserow_enterprise/assistant/types.py:187 +#: src/baserow_enterprise/assistant/types.py:232 #, python-format msgid "view %(view_name)s" msgstr "" -#: src/baserow_enterprise/assistant/types.py:194 +#: src/baserow_enterprise/assistant/types.py:239 msgid "home" msgstr "" +#: src/baserow_enterprise/assistant/types.py:249 +#, python-format +msgid "workflow %(workflow_name)s" +msgstr "" + #: src/baserow_enterprise/audit_log/job_types.py:36 msgid "User Email" msgstr "" diff --git a/enterprise/backend/src/baserow_enterprise/management/commands/sync_knowledge_base.py b/enterprise/backend/src/baserow_enterprise/management/commands/sync_knowledge_base.py index acb5d265c0..d2654191aa 100644 --- a/enterprise/backend/src/baserow_enterprise/management/commands/sync_knowledge_base.py +++ b/enterprise/backend/src/baserow_enterprise/management/commands/sync_knowledge_base.py @@ -1,6 +1,8 @@ from django.core.management.base import BaseCommand -from baserow_enterprise.assistant.tools.search_docs.handler import KnowledgeBaseHandler +from baserow_enterprise.assistant.tools.search_user_docs.handler import ( + KnowledgeBaseHandler, +) class Command(BaseCommand): diff --git a/enterprise/backend/tests/baserow_enterprise_tests/api/assistant/test_assistant_views.py b/enterprise/backend/tests/baserow_enterprise_tests/api/assistant/test_assistant_views.py index 43f19fe12c..87740e6f12 100644 --- a/enterprise/backend/tests/baserow_enterprise_tests/api/assistant/test_assistant_views.py +++ b/enterprise/backend/tests/baserow_enterprise_tests/api/assistant/test_assistant_views.py @@ -220,10 +220,15 @@ def test_cannot_send_message_without_valid_workspace( @pytest.mark.django_db() @override_settings(DEBUG=True) +@patch("baserow_enterprise.api.assistant.views.check_lm_ready_or_raise") @patch("baserow_enterprise.assistant.handler.Assistant") @patch("baserow_enterprise.api.assistant.views.AssistantHandler") def test_send_message_creates_chat_if_not_exists( - mock_handler_class, mock_assistant_class, api_client, enterprise_data_fixture + mock_handler_class, + mock_assistant_class, + mock_check_lm, + api_client, + enterprise_data_fixture, ): """Test that sending a message creates a chat if it doesn't exist""" @@ -282,10 +287,15 @@ async def mock_astream(human_message): @pytest.mark.django_db @override_settings(DEBUG=True) +@patch("baserow_enterprise.api.assistant.views.check_lm_ready_or_raise") @patch("baserow_enterprise.assistant.handler.Assistant") @patch("baserow_enterprise.api.assistant.views.AssistantHandler") def test_send_message_streams_response( - mock_handler_class, mock_assistant_class, api_client, enterprise_data_fixture + mock_handler_class, + mock_assistant_class, + mock_check_lm, + api_client, + enterprise_data_fixture, ): """Test that the endpoint streams AI responses properly""" @@ -865,10 +875,15 @@ def test_get_messages_includes_human_sentiment_when_feedback_exists( @pytest.mark.django_db @override_settings(DEBUG=True) +@patch("baserow_enterprise.api.assistant.views.check_lm_ready_or_raise") @patch("baserow_enterprise.assistant.handler.Assistant") @patch("baserow_enterprise.api.assistant.views.AssistantHandler") def test_send_message_streams_sources_from_tools( - mock_handler_class, mock_assistant_class, api_client, enterprise_data_fixture + mock_handler_class, + mock_assistant_class, + mock_check_lm, + api_client, + enterprise_data_fixture, ): """Test that sources from tool calls are included in streamed responses""" @@ -954,10 +969,15 @@ async def mock_astream(human_message): @pytest.mark.django_db @override_settings(DEBUG=True) +@patch("baserow_enterprise.api.assistant.views.check_lm_ready_or_raise") @patch("baserow_enterprise.assistant.handler.Assistant") @patch("baserow_enterprise.api.assistant.views.AssistantHandler") def test_send_message_streams_thinking_messages_during_tool_execution( - mock_handler_class, mock_assistant_class, api_client, enterprise_data_fixture + mock_handler_class, + mock_assistant_class, + mock_check_lm, + api_client, + enterprise_data_fixture, ): """Test that thinking messages are streamed during tool execution""" @@ -1039,10 +1059,15 @@ async def mock_astream(human_message): @pytest.mark.django_db @override_settings(DEBUG=True) +@patch("baserow_enterprise.api.assistant.views.check_lm_ready_or_raise") @patch("baserow_enterprise.assistant.handler.Assistant") @patch("baserow_enterprise.api.assistant.views.AssistantHandler") def test_send_message_generates_chat_title_on_first_message( - mock_handler_class, mock_assistant_class, api_client, enterprise_data_fixture + mock_handler_class, + mock_assistant_class, + mock_check_lm, + api_client, + enterprise_data_fixture, ): """Test that a chat title is generated and streamed on the first message""" @@ -1109,10 +1134,15 @@ async def mock_astream(human_message): @pytest.mark.django_db @override_settings(DEBUG=True) +@patch("baserow_enterprise.api.assistant.views.check_lm_ready_or_raise") @patch("baserow_enterprise.assistant.handler.Assistant") @patch("baserow_enterprise.api.assistant.views.AssistantHandler") def test_send_message_does_not_generate_title_on_subsequent_messages( - mock_handler_class, mock_assistant_class, api_client, enterprise_data_fixture + mock_handler_class, + mock_assistant_class, + mock_check_lm, + api_client, + enterprise_data_fixture, ): """Test that chat title is NOT regenerated on subsequent messages""" @@ -1179,10 +1209,15 @@ async def mock_astream(human_message): @pytest.mark.django_db @override_settings(DEBUG=True) +@patch("baserow_enterprise.api.assistant.views.check_lm_ready_or_raise") @patch("baserow_enterprise.assistant.handler.Assistant") @patch("baserow_enterprise.api.assistant.views.AssistantHandler") def test_send_message_handles_ai_error_in_streaming( - mock_handler_class, mock_assistant_class, api_client, enterprise_data_fixture + mock_handler_class, + mock_assistant_class, + mock_check_lm, + api_client, + enterprise_data_fixture, ): """Test that AI errors are properly streamed to the client""" @@ -1252,10 +1287,15 @@ async def mock_astream(human_message): @pytest.mark.django_db @override_settings(DEBUG=True) +@patch("baserow_enterprise.api.assistant.views.check_lm_ready_or_raise") @patch("baserow_enterprise.assistant.handler.Assistant") @patch("baserow_enterprise.api.assistant.views.AssistantHandler") def test_send_message_with_minimal_ui_context( - mock_handler_class, mock_assistant_class, api_client, enterprise_data_fixture + mock_handler_class, + mock_assistant_class, + mock_check_lm, + api_client, + enterprise_data_fixture, ): """Test sending message with minimal UI context (workspace only)""" @@ -1318,10 +1358,15 @@ async def mock_astream(human_message): @pytest.mark.django_db @override_settings(DEBUG=True) +@patch("baserow_enterprise.api.assistant.views.check_lm_ready_or_raise") @patch("baserow_enterprise.assistant.handler.Assistant") @patch("baserow_enterprise.api.assistant.views.AssistantHandler") def test_send_message_with_database_builder_context( - mock_handler_class, mock_assistant_class, api_client, enterprise_data_fixture + mock_handler_class, + mock_assistant_class, + mock_check_lm, + api_client, + enterprise_data_fixture, ): """ Test sending message with database builder context @@ -1398,10 +1443,15 @@ async def mock_astream(human_message): @pytest.mark.django_db @override_settings(DEBUG=True) +@patch("baserow_enterprise.api.assistant.views.check_lm_ready_or_raise") @patch("baserow_enterprise.assistant.handler.Assistant") @patch("baserow_enterprise.api.assistant.views.AssistantHandler") def test_send_message_with_application_builder_context( - mock_handler_class, mock_assistant_class, api_client, enterprise_data_fixture + mock_handler_class, + mock_assistant_class, + mock_check_lm, + api_client, + enterprise_data_fixture, ): """ Test sending message with application builder context @@ -1504,10 +1554,15 @@ def test_send_message_ui_context_validation_missing_workspace( @pytest.mark.django_db @override_settings(DEBUG=True) +@patch("baserow_enterprise.api.assistant.views.check_lm_ready_or_raise") @patch("baserow_enterprise.assistant.handler.Assistant") @patch("baserow_enterprise.api.assistant.views.AssistantHandler") def test_send_message_with_automation_context( - mock_handler_class, mock_assistant_class, api_client, enterprise_data_fixture + mock_handler_class, + mock_assistant_class, + mock_check_lm, + api_client, + enterprise_data_fixture, ): """Test sending message with automation builder context""" @@ -1575,10 +1630,15 @@ async def mock_astream(human_message): @pytest.mark.django_db @override_settings(DEBUG=True) +@patch("baserow_enterprise.api.assistant.views.check_lm_ready_or_raise") @patch("baserow_enterprise.assistant.handler.Assistant") @patch("baserow_enterprise.api.assistant.views.AssistantHandler") def test_send_message_with_dashboard_context( - mock_handler_class, mock_assistant_class, api_client, enterprise_data_fixture + mock_handler_class, + mock_assistant_class, + mock_check_lm, + api_client, + enterprise_data_fixture, ): """Test sending message with dashboard context""" diff --git a/enterprise/backend/tests/baserow_enterprise_tests/assistant/test_assistant.py b/enterprise/backend/tests/baserow_enterprise_tests/assistant/test_assistant.py index 55eec28adc..7da32e04a5 100644 --- a/enterprise/backend/tests/baserow_enterprise_tests/assistant/test_assistant.py +++ b/enterprise/backend/tests/baserow_enterprise_tests/assistant/test_assistant.py @@ -8,7 +8,7 @@ - Generates and persists chat titles appropriately - Adapts its signature based on chat state """ -from unittest.mock import MagicMock, patch +from unittest.mock import MagicMock, Mock, patch from django.core.cache import cache @@ -83,7 +83,7 @@ def test_on_tool_end_extracts_sources_from_outputs(self): # Mock tool instance and inputs tool_instance = MagicMock() - tool_instance.name = "search_docs" + tool_instance.name = "search_user_docs" inputs = {"query": "test"} # Register tool call @@ -203,22 +203,31 @@ def test_aload_chat_history_formats_as_question_answer_pairs( ) assistant = Assistant(chat) - async_to_sync(assistant.aload_chat_history)() + assistant.history = async_to_sync(assistant.afetch_chat_history)() # History should contain user/assistant message pairs assert assistant.history is not None - assert len(assistant.history) == 4 + assert len(assistant.history.messages) == 4 # First pair - assert assistant.history[0] == "Human: What is Baserow?" - assert assistant.history[1] == "AI: Baserow is a no-code database platform." + assert assistant.history.messages[0] == { + "role": "user", + "content": "What is Baserow?", + } + assert assistant.history.messages[1] == { + "role": "assistant", + "content": "Baserow is a no-code database platform.", + } # Second pair - assert assistant.history[2] == "Human: How do I create a table?" - assert ( - assistant.history[3] - == "AI: You can create a table by clicking the + button." - ) + assert assistant.history.messages[2] == { + "role": "user", + "content": "How do I create a table?", + } + assert assistant.history.messages[3] == { + "role": "assistant", + "content": "You can create a table by clicking the + button.", + } def test_aload_chat_history_respects_limit(self, enterprise_data_fixture): """Test that history loading respects the limit parameter""" @@ -241,10 +250,12 @@ def test_aload_chat_history_respects_limit(self, enterprise_data_fixture): ) assistant = Assistant(chat) - async_to_sync(assistant.aload_chat_history)(limit=6) # Last 6 messages + assistant.history = async_to_sync(assistant.afetch_chat_history)( + limit=6 + ) # Last 6 messages # Should only load the most recent 6 messages (3 pairs) - assert len(assistant.history) == 6 + assert len(assistant.history.messages) == 6 def test_aload_chat_history_handles_incomplete_pairs(self, enterprise_data_fixture): """ @@ -271,22 +282,117 @@ def test_aload_chat_history_handles_incomplete_pairs(self, enterprise_data_fixtu ) assistant = Assistant(chat) - async_to_sync(assistant.aload_chat_history)() + assistant.history = async_to_sync(assistant.afetch_chat_history)() # Should only include the complete pair (2 messages: user + assistant) - assert len(assistant.history) == 2 - assert assistant.history[0] == "Human: Question 1" - assert assistant.history[1] == "AI: Answer 1" + assert len(assistant.history.messages) == 2 + assert assistant.history.messages[0] == { + "role": "user", + "content": "Question 1", + } + assert assistant.history.messages[1] == { + "role": "assistant", + "content": "Answer 1", + } + + @patch("udspy.ReAct.astream") + @patch("udspy.LM") + def test_history_is_passed_to_astream_as_context( + self, mock_lm, mock_react_astream, enterprise_data_fixture + ): + """ + Test that chat history is loaded correctly and passed to the agent as context + """ + + user = enterprise_data_fixture.create_user() + workspace = enterprise_data_fixture.create_workspace(user=user) + chat = AssistantChat.objects.create( + user=user, workspace=workspace, title="Test Chat" + ) + + # Create conversation history (2 complete pairs) + AssistantChatMessage.objects.create( + chat=chat, role=AssistantChatMessage.Role.HUMAN, content="What is Baserow?" + ) + AssistantChatMessage.objects.create( + chat=chat, + role=AssistantChatMessage.Role.AI, + content="Baserow is a no-code database", + ) + AssistantChatMessage.objects.create( + chat=chat, + role=AssistantChatMessage.Role.HUMAN, + content="How do I create a table?", + ) + AssistantChatMessage.objects.create( + chat=chat, + role=AssistantChatMessage.Role.AI, + content="Click the Create Table button", + ) + + assistant = Assistant(chat) + + # Mock the router stream to delegate to agent with extracted context + def mock_router_stream_factory(*args, **kwargs): + # Verify conversation history is passed to router + assert kwargs["conversation_history"] == [ + "[0] (user): What is Baserow?", + "[1] (assistant): Baserow is a no-code database", + "[2] (user): How do I create a table?", + "[3] (assistant): Click the Create Table button", + ] + + async def _stream(): + yield Prediction( + routing_decision="delegate_to_agent", + extracted_context="User wants to add a view to their table", + search_query="", + ) + + return _stream() + + # Patch the instance method + assistant._request_router.astream = Mock(side_effect=mock_router_stream_factory) + + # Mock the agent stream + def mock_agent_stream_factory(*args, **kwargs): + # Verify extracted context is passed to agent + assert kwargs["context"] == "User wants to add a view to their table" + + async def _stream(): + yield OutputStreamChunk( + module=None, + field_name="answer", + delta="Answer", + content="Answer", + is_complete=False, + ) + yield Prediction(answer="Answer", trajectory=[], reasoning="") + + return _stream() + + mock_react_astream.side_effect = mock_agent_stream_factory + mock_lm.return_value.model = "test-model" + + message = HumanMessage(content="How to add a view?") + + # Consume the stream to trigger assertions + async def consume_stream(): + async for _ in assistant.astream_messages(message): + pass + + async_to_sync(consume_stream)() @pytest.mark.django_db class TestAssistantMessagePersistence: """Test that messages are persisted correctly during streaming""" + @patch("udspy.ChainOfThought.astream") @patch("udspy.ReAct.astream") @patch("udspy.LM") def test_astream_messages_persists_human_message( - self, mock_lm, mock_astream, enterprise_data_fixture + self, mock_lm, mock_react_astream, mock_cot_astream, enterprise_data_fixture ): """Test that human messages are persisted to database before streaming""" @@ -296,8 +402,18 @@ def test_astream_messages_persists_human_message( user=user, workspace=workspace, title="Test Chat" ) - # Mock the streaming - async def mock_stream(*args, **kwargs): + # Mock the router stream + async def mock_router_stream(*args, **kwargs): + yield Prediction( + routing_decision="delegate_to_agent", + extracted_context="", + search_query="", + ) + + mock_cot_astream.return_value = mock_router_stream() + + # Mock the agent streaming + async def mock_agent_stream(*args, **kwargs): # Yield a simple response yield OutputStreamChunk( module=None, @@ -308,7 +424,7 @@ async def mock_stream(*args, **kwargs): ) yield Prediction(answer="Hello", trajectory=[], reasoning="") - mock_astream.return_value = mock_stream() + mock_react_astream.return_value = mock_agent_stream() # Configure mock LM to return a serializable model name mock_lm.return_value.model = "test-model" @@ -338,10 +454,11 @@ async def consume_stream(): ).first() assert saved_message.content == "Test message" + @patch("udspy.ChainOfThought.astream") @patch("udspy.ReAct.astream") @patch("udspy.LM") def test_astream_messages_persists_ai_message_with_sources( - self, mock_lm, mock_astream, enterprise_data_fixture + self, mock_lm, mock_react_astream, mock_cot_astream, enterprise_data_fixture ): """Test that AI messages are persisted with sources in artifacts""" @@ -356,8 +473,18 @@ def test_astream_messages_persists_ai_message_with_sources( assistant = Assistant(chat) - # Mock the streaming with a Prediction at the end - async def mock_stream(*args, **kwargs): + # Mock the router stream + async def mock_router_stream(*args, **kwargs): + yield Prediction( + routing_decision="delegate_to_agent", + extracted_context="", + search_query="", + ) + + mock_cot_astream.return_value = mock_router_stream() + + # Mock the agent streaming with a Prediction at the end + async def mock_agent_stream(*args, **kwargs): yield OutputStreamChunk( module=None, field_name="answer", @@ -372,7 +499,7 @@ async def mock_stream(*args, **kwargs): reasoning="", ) - mock_astream.return_value = mock_stream() + mock_react_astream.return_value = mock_agent_stream() ui_context = UIContext( workspace=WorkspaceUIContext(id=workspace.id, name=workspace.name), user=UserUIContext(id=user.id, name=user.first_name, email=user.email), @@ -394,12 +521,14 @@ async def consume_stream(): ).count() assert ai_messages == 1 + @patch("udspy.ChainOfThought.astream") @patch("udspy.ReAct.astream") @patch("udspy.Predict") def test_astream_messages_persists_chat_title( self, mock_predict_class, - mock_astream, + mock_react_astream, + mock_cot_astream, enterprise_data_fixture, ): """Test that chat titles are persisted to the database""" @@ -420,8 +549,18 @@ async def mock_title_aforward(*args, **kwargs): assistant = Assistant(chat) - # Mock streaming - async def mock_stream(*args, **kwargs): + # Mock the router stream + async def mock_router_stream(*args, **kwargs): + yield Prediction( + routing_decision="delegate_to_agent", + extracted_context="", + search_query="", + ) + + mock_cot_astream.return_value = mock_router_stream() + + # Mock agent streaming + async def mock_agent_stream(*args, **kwargs): yield OutputStreamChunk( module=None, field_name="answer", @@ -433,7 +572,7 @@ async def mock_stream(*args, **kwargs): module=assistant._assistant, answer="Hello", trajectory=[], reasoning="" ) - mock_astream.return_value = mock_stream() + mock_react_astream.return_value = mock_agent_stream() ui_context = UIContext( workspace=WorkspaceUIContext(id=workspace.id, name=workspace.name), user=UserUIContext(id=user.id, name=user.first_name, email=user.email), @@ -458,10 +597,11 @@ async def consume_stream(): class TestAssistantStreaming: """Test streaming behavior of the Assistant""" + @patch("udspy.ChainOfThought.astream") @patch("udspy.ReAct.astream") @patch("udspy.LM") def test_astream_messages_yields_answer_chunks( - self, mock_lm, mock_astream, enterprise_data_fixture + self, mock_lm, mock_react_astream, mock_cot_astream, enterprise_data_fixture ): """Test that answer chunks are yielded during streaming""" @@ -471,17 +611,29 @@ def test_astream_messages_yields_answer_chunks( user=user, workspace=workspace, title="Test Chat" ) - # Mock streaming - async def mock_stream(*args, **kwargs): + # Mock the router stream + async def mock_router_stream(*args, **kwargs): + yield Prediction( + routing_decision="delegate_to_agent", + extracted_context="", + search_query="", + ) + + mock_cot_astream.return_value = mock_router_stream() + + assistant = Assistant(chat) + + # Mock agent streaming + async def mock_agent_stream(*args, **kwargs): yield OutputStreamChunk( - module=None, + module=assistant._assistant.extract_module, field_name="answer", delta="Hello", content="Hello", is_complete=False, ) yield OutputStreamChunk( - module=None, + module=assistant._assistant.extract_module, field_name="answer", delta=" world", content="Hello world", @@ -489,20 +641,14 @@ async def mock_stream(*args, **kwargs): ) yield Prediction(answer="Hello world", trajectory=[], reasoning="") - mock_astream.return_value = mock_stream() + mock_react_astream.return_value = mock_agent_stream() # Configure mock LM to return a serializable model name mock_lm.return_value.model = "test-model" - assistant = Assistant(chat) - ui_context = UIContext( - workspace=WorkspaceUIContext(id=workspace.id, name=workspace.name), - user=UserUIContext(id=user.id, name=user.first_name, email=user.email), - ) - async def consume_stream(): chunks = [] - human_message = HumanMessage(content="Test", ui_context=ui_context) + human_message = HumanMessage(content="Test") async for msg in assistant.astream_messages(human_message): if isinstance(msg, AiMessageChunk): chunks.append(msg) @@ -515,12 +661,14 @@ async def consume_stream(): assert chunks[0].content == "Hello" assert chunks[1].content == "Hello world" + @patch("udspy.ChainOfThought.astream") @patch("udspy.ReAct.astream") @patch("udspy.Predict") def test_astream_messages_yields_title_chunks( self, mock_predict_class, - mock_astream, + mock_react_astream, + mock_cot_astream, enterprise_data_fixture, ): """Test that title chunks are yielded for new chats""" @@ -541,8 +689,18 @@ async def mock_title_aforward(*args, **kwargs): assistant = Assistant(chat) - # Mock streaming - async def mock_stream(*args, **kwargs): + # Mock the router stream + async def mock_router_stream(*args, **kwargs): + yield Prediction( + routing_decision="delegate_to_agent", + extracted_context="", + search_query="", + ) + + mock_cot_astream.return_value = mock_router_stream() + + # Mock agent streaming + async def mock_agent_stream(*args, **kwargs): yield OutputStreamChunk( module=None, field_name="answer", @@ -557,15 +715,11 @@ async def mock_stream(*args, **kwargs): reasoning="", ) - mock_astream.return_value = mock_stream() - ui_context = UIContext( - workspace=WorkspaceUIContext(id=workspace.id, name=workspace.name), - user=UserUIContext(id=user.id, name=user.first_name, email=user.email), - ) + mock_react_astream.return_value = mock_agent_stream() async def consume_stream(): title_messages = [] - human_message = HumanMessage(content="Test", ui_context=ui_context) + human_message = HumanMessage(content="Test") async for msg in assistant.astream_messages(human_message): if isinstance(msg, ChatTitleMessage): title_messages.append(msg) @@ -577,10 +731,11 @@ async def consume_stream(): assert len(title_messages) == 1 assert title_messages[0].content == "Title" + @patch("udspy.ChainOfThought.astream") @patch("udspy.ReAct.astream") @patch("udspy.LM") def test_astream_messages_yields_thinking_messages( - self, mock_lm, mock_astream, enterprise_data_fixture + self, mock_lm, mock_react_astream, mock_cot_astream, enterprise_data_fixture ): """Test that thinking messages from tools are yielded""" @@ -590,11 +745,23 @@ def test_astream_messages_yields_thinking_messages( user=user, workspace=workspace, title="Test Chat" ) - # Mock streaming - async def mock_stream(*args, **kwargs): - yield AiThinkingMessage(content="thinking") + # Mock the router stream + async def mock_router_stream(*args, **kwargs): + yield Prediction( + routing_decision="delegate_to_agent", + extracted_context="", + search_query="", + ) + + mock_cot_astream.return_value = mock_router_stream() + + assistant = Assistant(chat) + + # Mock the agent streaming + async def mock_agent_stream(*args, **kwargs): + yield AiThinkingMessage(content="still thinking...") yield OutputStreamChunk( - module=None, + module=assistant._assistant.extract_module, field_name="answer", delta="Answer", content="Answer", @@ -602,12 +769,11 @@ async def mock_stream(*args, **kwargs): ) yield Prediction(answer="Answer", trajectory=[], reasoning="") - mock_astream.return_value = mock_stream() + mock_react_astream.return_value = mock_agent_stream() # Configure mock LM to return a serializable model name mock_lm.return_value.model = "test-model" - assistant = Assistant(chat) ui_context = UIContext( workspace=WorkspaceUIContext(id=workspace.id, name=workspace.name), user=UserUIContext(id=user.id, name=user.first_name, email=user.email), @@ -624,8 +790,9 @@ async def consume_stream(): thinking_messages = async_to_sync(consume_stream)() # Should receive thinking messages - assert len(thinking_messages) == 1 - assert thinking_messages[0].content == "thinking" + assert len(thinking_messages) == 2 + assert thinking_messages[0].content == "Thinking..." + assert thinking_messages[1].content == "still thinking..." @pytest.mark.django_db @@ -887,10 +1054,11 @@ def test_check_cancellation_does_nothing_when_no_flag( # Should not raise assistant._check_cancellation(cache_key, "msg123") + @patch("udspy.ChainOfThought.astream") @patch("udspy.ReAct.astream") @patch("udspy.LM") def test_astream_messages_yields_ai_started_message( - self, mock_lm, mock_astream, enterprise_data_fixture + self, mock_lm, mock_react_astream, mock_cot_astream, enterprise_data_fixture ): """Test that astream_messages yields AiStartedMessage at the beginning""" @@ -900,8 +1068,18 @@ def test_astream_messages_yields_ai_started_message( user=user, workspace=workspace, title="Test" ) - # Mock the streaming - async def mock_stream(*args, **kwargs): + # Mock the router stream + async def mock_router_stream(*args, **kwargs): + yield Prediction( + routing_decision="delegate_to_agent", + extracted_context="", + search_query="", + ) + + mock_cot_astream.return_value = mock_router_stream() + + # Mock the agent streaming + async def mock_agent_stream(*args, **kwargs): yield OutputStreamChunk( module=None, field_name="answer", @@ -911,15 +1089,11 @@ async def mock_stream(*args, **kwargs): ) yield Prediction(answer="Hello there!", trajectory=[], reasoning="") - mock_astream.return_value = mock_stream() + mock_react_astream.return_value = mock_agent_stream() mock_lm.return_value.model = "test-model" assistant = Assistant(chat) - ui_context = UIContext( - workspace=WorkspaceUIContext(id=workspace.id, name=workspace.name), - user=UserUIContext(id=user.id, name=user.first_name, email=user.email), - ) - human_message = HumanMessage(content="Hello", ui_context=ui_context) + human_message = HumanMessage(content="Hello") # Collect messages async def collect_messages(): @@ -935,10 +1109,11 @@ async def collect_messages(): assert isinstance(messages[0], AiStartedMessage) assert messages[0].message_id is not None + @patch("udspy.ChainOfThought.astream") @patch("udspy.ReAct.astream") @patch("udspy.LM") def test_astream_messages_checks_cancellation_periodically( - self, mock_lm, mock_astream, enterprise_data_fixture + self, mock_lm, mock_react_astream, mock_cot_astream, enterprise_data_fixture ): """Test that astream_messages checks for cancellation every 10 chunks""" @@ -948,8 +1123,18 @@ def test_astream_messages_checks_cancellation_periodically( user=user, workspace=workspace, title="Test" ) + # Mock the router stream + async def mock_router_stream(*args, **kwargs): + yield Prediction( + routing_decision="delegate_to_agent", + extracted_context="", + search_query="", + ) + + mock_cot_astream.return_value = mock_router_stream() + # Mock the stream to return many chunks - enough to trigger check at 10 - async def mock_stream(*args, **kwargs): + async def mock_agent_stream(*args, **kwargs): # Yield 15 chunks - cancellation check happens at chunk 10 for i in range(15): yield OutputStreamChunk( @@ -961,7 +1146,7 @@ async def mock_stream(*args, **kwargs): ) yield Prediction(answer="Complete response", trajectory=[], reasoning="") - mock_astream.return_value = mock_stream() + mock_react_astream.return_value = mock_agent_stream() mock_lm.return_value.model = "test-model" assistant = Assistant(chat) diff --git a/enterprise/backend/tests/baserow_enterprise_tests/assistant/test_assistant_automation_workflow_tools.py b/enterprise/backend/tests/baserow_enterprise_tests/assistant/test_assistant_automation_workflow_tools.py index c0138b7e2f..1e8ee8f8b0 100644 --- a/enterprise/backend/tests/baserow_enterprise_tests/assistant/test_assistant_automation_workflow_tools.py +++ b/enterprise/backend/tests/baserow_enterprise_tests/assistant/test_assistant_automation_workflow_tools.py @@ -1,12 +1,15 @@ +from unittest.mock import Mock + import pytest +from udspy.module.callbacks import ModuleContext, is_module_callback from baserow.contrib.automation.workflows.handler import AutomationWorkflowHandler from baserow.core.formula import resolve_formula from baserow.core.formula.registries import formula_runtime_function_registry from baserow.core.formula.types import BASEROW_FORMULA_MODE_ADVANCED from baserow_enterprise.assistant.tools.automation.tools import ( - get_create_workflows_tool, get_list_workflows_tool, + get_workflow_tool_factory, ) from baserow_enterprise.assistant.tools.automation.types import ( CreateRowActionCreate, @@ -94,8 +97,25 @@ def test_create_workflows(data_fixture): database = data_fixture.create_database_application(user=user, workspace=workspace) table = data_fixture.create_database_table(user=user, database=database) - tool = get_create_workflows_tool(user, workspace, fake_tool_helpers) - result = tool( + factory = get_workflow_tool_factory(user, workspace, fake_tool_helpers) + assert callable(factory) + + tools_upgrade = factory() + assert is_module_callback(tools_upgrade) + + mock_module = Mock() + mock_module._tools = [] + mock_module.init_module = Mock() + tools_upgrade(ModuleContext(module=mock_module)) + assert mock_module.init_module.called + + added_tools = mock_module.init_module.call_args[1]["tools"] + create_workflows_tool = next( + (tool for tool in added_tools if tool.name == "create_workflows"), None + ) + assert create_workflows_tool is not None + + result = create_workflows_tool.func( automation_id=automation.id, workflows=[ WorkflowCreate( @@ -143,8 +163,25 @@ def test_create_multiple_workflows(data_fixture): database = data_fixture.create_database_application(user=user, workspace=workspace) table = data_fixture.create_database_table(user=user, database=database) - tool = get_create_workflows_tool(user, workspace, fake_tool_helpers) - result = tool( + factory = get_workflow_tool_factory(user, workspace, fake_tool_helpers) + assert callable(factory) + + tools_upgrade = factory() + assert is_module_callback(tools_upgrade) + + mock_module = Mock() + mock_module._tools = [] + mock_module.init_module = Mock() + tools_upgrade(ModuleContext(module=mock_module)) + assert mock_module.init_module.called + + added_tools = mock_module.init_module.call_args[1]["tools"] + create_workflows_tool = next( + (tool for tool in added_tools if tool.name == "create_workflows"), None + ) + assert create_workflows_tool is not None + + result = create_workflows_tool.func( automation_id=automation.id, workflows=[ WorkflowCreate( @@ -248,8 +285,25 @@ def test_create_workflow_with_row_triggers_and_actions(data_fixture, trigger, ac table.pk = 999 # To match the action's table_id table.save() - tool = get_create_workflows_tool(user, workspace, fake_tool_helpers) - result = tool( + factory = get_workflow_tool_factory(user, workspace, fake_tool_helpers) + assert callable(factory) + + tools_upgrade = factory() + assert is_module_callback(tools_upgrade) + + mock_module = Mock() + mock_module._tools = [] + mock_module.init_module = Mock() + tools_upgrade(ModuleContext(module=mock_module)) + assert mock_module.init_module.called + + added_tools = mock_module.init_module.call_args[1]["tools"] + create_workflows_tool = next( + (tool for tool in added_tools if tool.name == "create_workflows"), None + ) + assert create_workflows_tool is not None + + result = create_workflows_tool.func( automation_id=automation.id, workflows=[ WorkflowCreate( @@ -286,8 +340,25 @@ def test_create_row_action_with_field_ids(data_fixture): text_field = data_fixture.create_text_field(table=table, name="Name") number_field = data_fixture.create_number_field(table=table, name="Age") - tool = get_create_workflows_tool(user, workspace, fake_tool_helpers) - result = tool( + factory = get_workflow_tool_factory(user, workspace, fake_tool_helpers) + assert callable(factory) + + tools_upgrade = factory() + assert is_module_callback(tools_upgrade) + + mock_module = Mock() + mock_module._tools = [] + mock_module.init_module = Mock() + tools_upgrade(ModuleContext(module=mock_module)) + assert mock_module.init_module.called + + added_tools = mock_module.init_module.call_args[1]["tools"] + create_workflows_tool = next( + (tool for tool in added_tools if tool.name == "create_workflows"), None + ) + assert create_workflows_tool is not None + + result = create_workflows_tool.func( automation_id=automation.id, workflows=[ WorkflowCreate( @@ -340,8 +411,25 @@ def test_update_row_action_with_row_id_and_field_ids(data_fixture): table = data_fixture.create_database_table(user=user, database=database) text_field = data_fixture.create_text_field(table=table, name="Status") - tool = get_create_workflows_tool(user, workspace, fake_tool_helpers) - result = tool( + factory = get_workflow_tool_factory(user, workspace, fake_tool_helpers) + assert callable(factory) + + tools_upgrade = factory() + assert is_module_callback(tools_upgrade) + + mock_module = Mock() + mock_module._tools = [] + mock_module.init_module = Mock() + tools_upgrade(ModuleContext(module=mock_module)) + assert mock_module.init_module.called + + added_tools = mock_module.init_module.call_args[1]["tools"] + create_workflows_tool = next( + (tool for tool in added_tools if tool.name == "create_workflows"), None + ) + assert create_workflows_tool is not None + + result = create_workflows_tool.func( automation_id=automation.id, workflows=[ WorkflowCreate( @@ -395,8 +483,25 @@ def test_delete_row_action_with_row_id(data_fixture): database = data_fixture.create_database_application(user=user, workspace=workspace) table = data_fixture.create_database_table(user=user, database=database) - tool = get_create_workflows_tool(user, workspace, fake_tool_helpers) - result = tool( + factory = get_workflow_tool_factory(user, workspace, fake_tool_helpers) + assert callable(factory) + + tools_upgrade = factory() + assert is_module_callback(tools_upgrade) + + mock_module = Mock() + mock_module._tools = [] + mock_module.init_module = Mock() + tools_upgrade(ModuleContext(module=mock_module)) + assert mock_module.init_module.called + + added_tools = mock_module.init_module.call_args[1]["tools"] + create_workflows_tool = next( + (tool for tool in added_tools if tool.name == "create_workflows"), None + ) + assert create_workflows_tool is not None + + result = create_workflows_tool.func( automation_id=automation.id, workflows=[ WorkflowCreate( @@ -449,8 +554,25 @@ def test_router_node_with_required_conditions(data_fixture): database = data_fixture.create_database_application(user=user, workspace=workspace) table = data_fixture.create_database_table(user=user, database=database) - tool = get_create_workflows_tool(user, workspace, fake_tool_helpers) - result = tool( + factory = get_workflow_tool_factory(user, workspace, fake_tool_helpers) + assert callable(factory) + + tools_upgrade = factory() + assert is_module_callback(tools_upgrade) + + mock_module = Mock() + mock_module._tools = [] + mock_module.init_module = Mock() + tools_upgrade(ModuleContext(module=mock_module)) + assert mock_module.init_module.called + + added_tools = mock_module.init_module.call_args[1]["tools"] + create_workflows_tool = next( + (tool for tool in added_tools if tool.name == "create_workflows"), None + ) + assert create_workflows_tool is not None + + result = create_workflows_tool.func( automation_id=automation.id, workflows=[ WorkflowCreate( diff --git a/enterprise/backend/tests/baserow_enterprise_tests/assistant/test_assistant_database_rows_tools.py b/enterprise/backend/tests/baserow_enterprise_tests/assistant/test_assistant_database_rows_tools.py index 4b83e36489..004477d9e6 100644 --- a/enterprise/backend/tests/baserow_enterprise_tests/assistant/test_assistant_database_rows_tools.py +++ b/enterprise/backend/tests/baserow_enterprise_tests/assistant/test_assistant_database_rows_tools.py @@ -6,7 +6,7 @@ from baserow.contrib.database.rows.handler import RowHandler from baserow_enterprise.assistant.tools.database.tools import ( get_list_rows_tool, - get_rows_meta_tool, + get_rows_tools_factory, ) from .utils import fake_tool_helpers @@ -211,7 +211,7 @@ def test_create_rows(data_fixture): table = res["table_a"] tool_helpers = fake_tool_helpers - meta_tool = get_rows_meta_tool(user, workspace, tool_helpers) + meta_tool = get_rows_tools_factory(user, workspace, tool_helpers) assert callable(meta_tool) tools_upgrade = meta_tool([table.id], ["create"]) @@ -277,7 +277,7 @@ def test_update_rows(data_fixture): table = res["table_a"] tool_helpers = fake_tool_helpers - meta_tool = get_rows_meta_tool(user, workspace, tool_helpers) + meta_tool = get_rows_tools_factory(user, workspace, tool_helpers) assert callable(meta_tool) tools_upgrade = meta_tool([table.id], ["update"]) assert is_module_callback(tools_upgrade) @@ -291,7 +291,7 @@ def test_update_rows(data_fixture): added_tools = mock_module.init_module.call_args[1]["tools"] added_tools_names = [tool.name for tool in added_tools] assert len(added_tools) == 1 - assert f"update_rows_in_table_{table.id}_by_row_ids" in added_tools_names + assert f"update_rows_in_table_{table.id}" in added_tools_names table_model = table.get_model() assert table_model.objects.count() == 3 @@ -371,7 +371,7 @@ def test_delete_rows(data_fixture): table = res["table_a"] tool_helpers = fake_tool_helpers - meta_tool = get_rows_meta_tool(user, workspace, tool_helpers) + meta_tool = get_rows_tools_factory(user, workspace, tool_helpers) assert callable(meta_tool) tools_upgrade = meta_tool([table.id], ["delete"]) @@ -384,7 +384,7 @@ def test_delete_rows(data_fixture): added_tools = mock_module.init_module.call_args[1]["tools"] added_tools_names = [tool.name for tool in added_tools] assert len(added_tools) == 1 - assert f"delete_rows_in_table_{table.id}_by_row_ids" in added_tools_names + assert f"delete_rows_in_table_{table.id}" in added_tools_names delete_table_rows = added_tools[0] table_model = table.get_model() diff --git a/enterprise/backend/tests/baserow_enterprise_tests/assistant/test_assistant_database_table_tools.py b/enterprise/backend/tests/baserow_enterprise_tests/assistant/test_assistant_database_table_tools.py index d065bdefac..67c0786ec1 100644 --- a/enterprise/backend/tests/baserow_enterprise_tests/assistant/test_assistant_database_table_tools.py +++ b/enterprise/backend/tests/baserow_enterprise_tests/assistant/test_assistant_database_table_tools.py @@ -1,15 +1,16 @@ -from unittest.mock import MagicMock, patch +from unittest.mock import MagicMock, Mock, patch import pytest +from udspy.module.callbacks import ModuleContext, is_module_callback from baserow.contrib.database.fields.models import FormulaField from baserow.contrib.database.formula.registries import formula_function_registry from baserow.contrib.database.table.models import Table from baserow.test_utils.helpers import AnyInt from baserow_enterprise.assistant.tools.database.tools import ( - get_create_tables_tool, get_generate_database_formula_tool, get_list_tables_tool, + get_table_and_fields_tools_factory, ) from baserow_enterprise.assistant.tools.database.types import ( BooleanFieldItemCreate, @@ -187,8 +188,29 @@ def test_create_simple_table_tool(data_fixture): workspace=workspace, name="Database 1" ) - tool = get_create_tables_tool(user, workspace, fake_tool_helpers) - response = tool( + factory = get_table_and_fields_tools_factory(user, workspace, fake_tool_helpers) + assert callable(factory) + + tools_upgrade = factory() + assert is_module_callback(tools_upgrade) + + mock_module = Mock() + mock_module._tools = [] + mock_module.init_module = Mock() + tools_upgrade(ModuleContext(module=mock_module)) + assert mock_module.init_module.called + + added_tools = mock_module.init_module.call_args[1]["tools"] + assert len(added_tools) == 2 # create_tables and create_fields + + # Find the create_tables tool + create_tables_tool = next( + (tool for tool in added_tools if tool.name == "create_tables"), None + ) + assert create_tables_tool is not None + + # Call the underlying function directly (not through udspy.Tool wrapper) + response = create_tables_tool.func( database_id=database.id, tables=[ TableItemCreate( @@ -220,7 +242,27 @@ def test_create_complex_table_tool(data_fixture): ) table = data_fixture.create_database_table(database=database, name="Table 1") - tool = get_create_tables_tool(user, workspace, fake_tool_helpers) + factory = get_table_and_fields_tools_factory(user, workspace, fake_tool_helpers) + assert callable(factory) + + tools_upgrade = factory() + assert is_module_callback(tools_upgrade) + + mock_module = Mock() + mock_module._tools = [] + mock_module.init_module = Mock() + tools_upgrade(ModuleContext(module=mock_module)) + assert mock_module.init_module.called + + added_tools = mock_module.init_module.call_args[1]["tools"] + assert len(added_tools) == 2 # create_tables and create_fields + + # Find the create_tables tool + create_tables_tool = next( + (tool for tool in added_tools if tool.name == "create_tables"), None + ) + assert create_tables_tool is not None + primary_field = TextFieldItemCreate(type="text", name="Name") fields = [ LongTextFieldItemCreate( @@ -282,7 +324,8 @@ def test_create_complex_table_tool(data_fixture): name="Attachments", ), ] - response = tool( + # Call the underlying function directly (not through udspy.Tool wrapper) + response = create_tables_tool.func( database_id=database.id, tables=[ TableItemCreate( diff --git a/enterprise/backend/tests/baserow_enterprise_tests/assistant/test_assistant_database_views_tools.py b/enterprise/backend/tests/baserow_enterprise_tests/assistant/test_assistant_database_views_tools.py index 25ec2b3a16..bf3e940d08 100644 --- a/enterprise/backend/tests/baserow_enterprise_tests/assistant/test_assistant_database_views_tools.py +++ b/enterprise/backend/tests/baserow_enterprise_tests/assistant/test_assistant_database_views_tools.py @@ -1,10 +1,12 @@ +from unittest.mock import Mock + import pytest +from udspy.module.callbacks import ModuleContext, is_module_callback from baserow.contrib.database.views.models import View, ViewFilter from baserow_enterprise.assistant.tools.database.tools import ( - get_create_view_filters_tool, - get_create_views_tool, get_list_views_tool, + get_views_tool_factory, ) from baserow_enterprise.assistant.tools.database.types import ( BooleanIsViewFilterItemCreate, @@ -40,6 +42,52 @@ from .utils import fake_tool_helpers +def get_create_views_tool(user, workspace): + """Helper to get the create_views tool from the factory""" + + factory = get_views_tool_factory(user, workspace, fake_tool_helpers) + assert callable(factory) + + tools_upgrade = factory() + assert is_module_callback(tools_upgrade) + + mock_module = Mock() + mock_module._tools = [] + mock_module.init_module = Mock() + tools_upgrade(ModuleContext(module=mock_module)) + assert mock_module.init_module.called + + added_tools = mock_module.init_module.call_args[1]["tools"] + create_views_tool = next( + (tool for tool in added_tools if tool.name == "create_views"), None + ) + assert create_views_tool is not None + return create_views_tool + + +def get_create_view_filters_tool(user, workspace): + """Helper to get the create_view_filters tool from the factory""" + + factory = get_views_tool_factory(user, workspace, fake_tool_helpers) + assert callable(factory) + + tools_upgrade = factory() + assert is_module_callback(tools_upgrade) + + mock_module = Mock() + mock_module._tools = [] + mock_module.init_module = Mock() + tools_upgrade(ModuleContext(module=mock_module)) + assert mock_module.init_module.called + + added_tools = mock_module.init_module.call_args[1]["tools"] + create_filters_tool = next( + (tool for tool in added_tools if tool.name == "create_view_filters"), None + ) + assert create_filters_tool is not None + return create_filters_tool + + @pytest.mark.django_db def test_list_views_tool(data_fixture): user = data_fixture.create_user() @@ -77,8 +125,8 @@ def test_create_grid_view(data_fixture): database = data_fixture.create_database_application(workspace=workspace) table = data_fixture.create_database_table(database=database) - tool = get_create_views_tool(user, workspace, fake_tool_helpers) - response = tool( + tool = get_create_views_tool(user, workspace) + response = tool.func( table_id=table.id, views=[ GridViewItemCreate( @@ -100,8 +148,8 @@ def test_create_kanban_view(data_fixture): table = data_fixture.create_database_table(database=database) single_select = data_fixture.create_single_select_field(table=table, name="Status") - tool = get_create_views_tool(user, workspace, fake_tool_helpers) - response = tool( + tool = get_create_views_tool(user, workspace) + response = tool.func( table_id=table.id, views=[ KanbanViewItemCreate( @@ -126,8 +174,8 @@ def test_create_calendar_view(data_fixture): table = data_fixture.create_database_table(database=database) date_field = data_fixture.create_date_field(table=table, name="Date") - tool = get_create_views_tool(user, workspace, fake_tool_helpers) - response = tool( + tool = get_create_views_tool(user, workspace) + response = tool.func( table_id=table.id, views=[ CalendarViewItemCreate( @@ -152,8 +200,8 @@ def test_create_gallery_view(data_fixture): table = data_fixture.create_database_table(database=database) file_field = data_fixture.create_file_field(table=table, name="Files") - tool = get_create_views_tool(user, workspace, fake_tool_helpers) - response = tool( + tool = get_create_views_tool(user, workspace) + response = tool.func( table_id=table.id, views=[ GalleryViewItemCreate( @@ -179,8 +227,8 @@ def test_create_timeline_view(data_fixture): start_date = data_fixture.create_date_field(table=table, name="Start Date") end_date = data_fixture.create_date_field(table=table, name="End Date") - tool = get_create_views_tool(user, workspace, fake_tool_helpers) - response = tool( + tool = get_create_views_tool(user, workspace) + response = tool.func( table_id=table.id, views=[ TimelineViewItemCreate( @@ -206,8 +254,8 @@ def test_create_form_view(data_fixture): table = data_fixture.create_database_table(database=database) field = data_fixture.create_text_field(table=table, name="Name", primary=True) - tool = get_create_views_tool(user, workspace, fake_tool_helpers) - response = tool( + tool = get_create_views_tool(user, workspace) + response = tool.func( table_id=table.id, views=[ FormViewItemCreate( @@ -249,8 +297,8 @@ def test_create_text_equal_filter(data_fixture): field = data_fixture.create_text_field(table=table, name="Name") view = data_fixture.create_grid_view(table=table) - tool = get_create_view_filters_tool(user, workspace, fake_tool_helpers) - response = tool( + tool = get_create_view_filters_tool(user, workspace) + response = tool.func( [ ViewFiltersArgs( view_id=view.id, @@ -278,8 +326,8 @@ def test_create_text_not_equal_filter(data_fixture): field = data_fixture.create_text_field(table=table) view = data_fixture.create_grid_view(table=table) - tool = get_create_view_filters_tool(user, workspace, fake_tool_helpers) - response = tool( + tool = get_create_view_filters_tool(user, workspace) + response = tool.func( [ ViewFiltersArgs( view_id=view.id, @@ -291,7 +339,7 @@ def test_create_text_not_equal_filter(data_fixture): value="test", ) ], - ) + ), ] ) @@ -308,8 +356,8 @@ def test_create_text_contains_filter(data_fixture): field = data_fixture.create_text_field(table=table) view = data_fixture.create_grid_view(table=table) - tool = get_create_view_filters_tool(user, workspace, fake_tool_helpers) - response = tool( + tool = get_create_view_filters_tool(user, workspace) + response = tool.func( [ ViewFiltersArgs( view_id=view.id, @@ -321,7 +369,7 @@ def test_create_text_contains_filter(data_fixture): value="test", ) ], - ) + ), ] ) @@ -338,8 +386,8 @@ def test_create_text_not_contains_filter(data_fixture): field = data_fixture.create_text_field(table=table) view = data_fixture.create_grid_view(table=table) - tool = get_create_view_filters_tool(user, workspace, fake_tool_helpers) - response = tool( + tool = get_create_view_filters_tool(user, workspace) + response = tool.func( [ ViewFiltersArgs( view_id=view.id, @@ -351,7 +399,7 @@ def test_create_text_not_contains_filter(data_fixture): value="test", ) ], - ) + ), ] ) @@ -371,8 +419,8 @@ def test_create_number_equal_filter(data_fixture): field = data_fixture.create_number_field(table=table) view = data_fixture.create_grid_view(table=table) - tool = get_create_view_filters_tool(user, workspace, fake_tool_helpers) - response = tool( + tool = get_create_view_filters_tool(user, workspace) + response = tool.func( [ ViewFiltersArgs( view_id=view.id, @@ -381,7 +429,7 @@ def test_create_number_equal_filter(data_fixture): field_id=field.id, type="number", operator="equal", value=42.0 ) ], - ) + ), ] ) @@ -398,8 +446,8 @@ def test_create_number_not_equal_filter(data_fixture): field = data_fixture.create_number_field(table=table) view = data_fixture.create_grid_view(table=table) - tool = get_create_view_filters_tool(user, workspace, fake_tool_helpers) - response = tool( + tool = get_create_view_filters_tool(user, workspace) + response = tool.func( [ ViewFiltersArgs( view_id=view.id, @@ -411,7 +459,7 @@ def test_create_number_not_equal_filter(data_fixture): value=42.0, ) ], - ) + ), ] ) @@ -428,8 +476,8 @@ def test_create_number_higher_than_filter(data_fixture): field = data_fixture.create_number_field(table=table) view = data_fixture.create_grid_view(table=table) - tool = get_create_view_filters_tool(user, workspace, fake_tool_helpers) - response = tool( + tool = get_create_view_filters_tool(user, workspace) + response = tool.func( [ ViewFiltersArgs( view_id=view.id, @@ -443,7 +491,7 @@ def test_create_number_higher_than_filter(data_fixture): ) ], ) - ] + ], ) assert len(response["created_view_filters"]) == 1 @@ -461,8 +509,8 @@ def test_create_number_lower_than_filter(data_fixture): field = data_fixture.create_number_field(table=table) view = data_fixture.create_grid_view(table=table) - tool = get_create_view_filters_tool(user, workspace, fake_tool_helpers) - response = tool( + tool = get_create_view_filters_tool(user, workspace) + response = tool.func( [ ViewFiltersArgs( view_id=view.id, @@ -475,7 +523,7 @@ def test_create_number_lower_than_filter(data_fixture): or_equal=False, ) ], - ) + ), ] ) @@ -493,8 +541,8 @@ def test_create_date_equal_filter(data_fixture): field = data_fixture.create_date_field(table=table) view = data_fixture.create_grid_view(table=table) - tool = get_create_view_filters_tool(user, workspace, fake_tool_helpers) - response = tool( + tool = get_create_view_filters_tool(user, workspace) + response = tool.func( [ ViewFiltersArgs( view_id=view.id, @@ -507,7 +555,7 @@ def test_create_date_equal_filter(data_fixture): mode="exact_date", ) ], - ) + ), ] ) @@ -524,8 +572,8 @@ def test_create_date_not_equal_filter(data_fixture): field = data_fixture.create_date_field(table=table) view = data_fixture.create_grid_view(table=table) - tool = get_create_view_filters_tool(user, workspace, fake_tool_helpers) - response = tool( + tool = get_create_view_filters_tool(user, workspace) + response = tool.func( [ ViewFiltersArgs( view_id=view.id, @@ -538,7 +586,7 @@ def test_create_date_not_equal_filter(data_fixture): mode="today", ) ], - ) + ), ] ) @@ -557,8 +605,8 @@ def test_create_date_after_filter(data_fixture): field = data_fixture.create_date_field(table=table) view = data_fixture.create_grid_view(table=table) - tool = get_create_view_filters_tool(user, workspace, fake_tool_helpers) - response = tool( + tool = get_create_view_filters_tool(user, workspace) + response = tool.func( [ ViewFiltersArgs( view_id=view.id, @@ -572,7 +620,7 @@ def test_create_date_after_filter(data_fixture): or_equal=False, ) ], - ) + ), ] ) @@ -591,8 +639,8 @@ def test_create_date_before_filter(data_fixture): field = data_fixture.create_date_field(table=table) view = data_fixture.create_grid_view(table=table) - tool = get_create_view_filters_tool(user, workspace, fake_tool_helpers) - response = tool( + tool = get_create_view_filters_tool(user, workspace) + response = tool.func( [ ViewFiltersArgs( view_id=view.id, @@ -606,7 +654,7 @@ def test_create_date_before_filter(data_fixture): or_equal=True, ) ], - ) + ), ] ) @@ -628,8 +676,8 @@ def test_create_single_select_is_any_of_filter(data_fixture): data_fixture.create_select_option(field=field, value="Option 2") view = data_fixture.create_grid_view(table=table) - tool = get_create_view_filters_tool(user, workspace, fake_tool_helpers) - response = tool( + tool = get_create_view_filters_tool(user, workspace) + response = tool.func( [ ViewFiltersArgs( view_id=view.id, @@ -641,7 +689,7 @@ def test_create_single_select_is_any_of_filter(data_fixture): value=["Option 1", "Option 2"], ) ], - ) + ), ] ) @@ -661,8 +709,8 @@ def test_create_single_select_is_none_of_filter(data_fixture): data_fixture.create_select_option(field=field, value="Bad Option") view = data_fixture.create_grid_view(table=table) - tool = get_create_view_filters_tool(user, workspace, fake_tool_helpers) - response = tool( + tool = get_create_view_filters_tool(user, workspace) + response = tool.func( [ ViewFiltersArgs( view_id=view.id, @@ -674,7 +722,7 @@ def test_create_single_select_is_none_of_filter(data_fixture): value=["Bad Option"], ) ], - ) + ), ] ) @@ -694,8 +742,8 @@ def test_create_boolean_is_true_filter(data_fixture): field = data_fixture.create_boolean_field(table=table, name="Active") view = data_fixture.create_grid_view(table=table) - tool = get_create_view_filters_tool(user, workspace, fake_tool_helpers) - response = tool( + tool = get_create_view_filters_tool(user, workspace) + response = tool.func( [ ViewFiltersArgs( view_id=view.id, @@ -704,7 +752,7 @@ def test_create_boolean_is_true_filter(data_fixture): field_id=field.id, type="boolean", operator="is", value=True ) ], - ) + ), ] ) @@ -721,8 +769,8 @@ def test_create_boolean_is_false_filter(data_fixture): field = data_fixture.create_boolean_field(table=table, name="Active") view = data_fixture.create_grid_view(table=table) - tool = get_create_view_filters_tool(user, workspace, fake_tool_helpers) - response = tool( + tool = get_create_view_filters_tool(user, workspace) + response = tool.func( [ ViewFiltersArgs( view_id=view.id, @@ -731,7 +779,7 @@ def test_create_boolean_is_false_filter(data_fixture): field_id=field.id, type="boolean", operator="is", value=False ) ], - ) + ), ] ) @@ -751,8 +799,8 @@ def test_create_multiple_select_is_any_of_filter(data_fixture): data_fixture.create_select_option(field=field, value="Tag 2") view = data_fixture.create_grid_view(table=table) - tool = get_create_view_filters_tool(user, workspace, fake_tool_helpers) - response = tool( + tool = get_create_view_filters_tool(user, workspace) + response = tool.func( [ ViewFiltersArgs( view_id=view.id, @@ -764,7 +812,7 @@ def test_create_multiple_select_is_any_of_filter(data_fixture): value=["Tag 1", "Tag 2"], ) ], - ) + ), ] ) @@ -784,8 +832,8 @@ def test_create_multiple_select_is_none_of_filter(data_fixture): data_fixture.create_select_option(field=field, value="Bad Tag") view = data_fixture.create_grid_view(table=table) - tool = get_create_view_filters_tool(user, workspace, fake_tool_helpers) - response = tool( + tool = get_create_view_filters_tool(user, workspace) + response = tool.func( [ ViewFiltersArgs( view_id=view.id, @@ -797,10 +845,9 @@ def test_create_multiple_select_is_none_of_filter(data_fixture): value=["Bad Tag"], ) ], - ) + ), ] ) - assert len(response["created_view_filters"]) == 1 assert ViewFilter.objects.filter( view=view, field=field, type="multiple_select_has_not" diff --git a/enterprise/backend/tests/baserow_enterprise_tests/assistant/test_assistant_knowledge_retrieval_handler.py b/enterprise/backend/tests/baserow_enterprise_tests/assistant/test_assistant_knowledge_retrieval_handler.py index 7297760d40..97d62c37ef 100644 --- a/enterprise/backend/tests/baserow_enterprise_tests/assistant/test_assistant_knowledge_retrieval_handler.py +++ b/enterprise/backend/tests/baserow_enterprise_tests/assistant/test_assistant_knowledge_retrieval_handler.py @@ -9,7 +9,7 @@ KnowledgeBaseChunk, KnowledgeBaseDocument, ) -from baserow_enterprise.assistant.tools.search_docs.handler import ( +from baserow_enterprise.assistant.tools.search_user_docs.handler import ( BaserowEmbedder, KnowledgeBaseHandler, VectorHandler, @@ -27,7 +27,7 @@ def test_returns_list_of_vectors(self): # Mock the httpxClient where it's used in the handler module with patch( - "baserow_enterprise.assistant.tools.search_docs.handler.httpxClient" + "baserow_enterprise.assistant.tools.search_user_docs.handler.httpxClient" ) as mock_client: mock_client_instance = mock_client.return_value mock_post_response = mock_client_instance.post.return_value @@ -56,7 +56,7 @@ def test_returns_embeddings_with_correct_dimensions(self): # Mock the httpxClient where it's used in the handler module with patch( - "baserow_enterprise.assistant.tools.search_docs.handler.httpxClient" + "baserow_enterprise.assistant.tools.search_user_docs.handler.httpxClient" ) as mock_client: mock_client_instance = mock_client.return_value mock_post_response = mock_client_instance.post.return_value @@ -82,7 +82,7 @@ def test_pads_smaller_dimensions_with_zeros(self): # Mock the httpxClient where it's used in the handler module with patch( - "baserow_enterprise.assistant.tools.search_docs.handler.httpxClient" + "baserow_enterprise.assistant.tools.search_user_docs.handler.httpxClient" ) as mock_client: # Mock the httpxClient.post call with smaller dimensions small_dimension = 512 @@ -111,7 +111,7 @@ def test_raises_error_on_larger_dimensions(self): # Mock the httpxClient where it's used in the handler module with patch( - "baserow_enterprise.assistant.tools.search_docs.handler.httpxClient" + "baserow_enterprise.assistant.tools.search_user_docs.handler.httpxClient" ) as mock_client: # Mock the httpxClient.post call with larger dimensions large_dimension = DEFAULT_EMBEDDING_DIMENSIONS + 100 @@ -255,7 +255,7 @@ def test_retrieve_knowledge_chunks_empty_store(self, knowledge_handler): """Test knowledge retrieval when vector store is empty""" results = knowledge_handler.search("database query") - assert results == [] + assert list(results) == [] def test_retrieve_knowledge_chunks_with_data( self, knowledge_handler, sample_documents_with_chunks @@ -267,7 +267,10 @@ def test_retrieve_knowledge_chunks_with_data( # The chunks are already in the database and available for search # Query for database-related content - results = knowledge_handler.search("database fundamentals", num_results=5) + results = [ + ch.content + for ch in knowledge_handler.search("database fundamentals", num_results=5) + ] assert len(results) > 0 assert any( @@ -352,7 +355,9 @@ def test_search_orders_by_l2_distance(self, knowledge_handler): # Search with a query that will be embedded as [1.0, 0.0, 0.0, ...] # (our MockEmbeddings returns this for "database" queries) - results = knowledge_handler.search("database", num_results=3) + results = [ + ch.content for ch in knowledge_handler.search("database", num_results=3) + ] # Results should be ordered by distance (closest first) assert len(results) == 3 @@ -405,7 +410,9 @@ def test_search_l2_distance_with_different_vectors(self, knowledge_handler): index=2, ) - results = knowledge_handler.search("database", num_results=3) + results = [ + ch.content for ch in knowledge_handler.search("database", num_results=3) + ] # Should be ordered: distance 0, sqrt(0.5), sqrt(2) assert len(results) == 3 @@ -491,7 +498,7 @@ def test_handler_with_default_vector_store(self): """Test handler creation with default vector store""" with patch( - "baserow_enterprise.assistant.tools.search_docs.handler.VectorHandler" + "baserow_enterprise.assistant.tools.search_user_docs.handler.VectorHandler" ) as mock_vector_handler: handler = KnowledgeBaseHandler() diff --git a/enterprise/backend/tests/baserow_enterprise_tests/assistant/test_sync_knowledge_base.py b/enterprise/backend/tests/baserow_enterprise_tests/assistant/test_sync_knowledge_base.py index e6eddb0e28..e89edee9fc 100644 --- a/enterprise/backend/tests/baserow_enterprise_tests/assistant/test_sync_knowledge_base.py +++ b/enterprise/backend/tests/baserow_enterprise_tests/assistant/test_sync_knowledge_base.py @@ -10,7 +10,9 @@ KnowledgeBaseChunk, KnowledgeBaseDocument, ) -from baserow_enterprise.assistant.tools.search_docs.handler import KnowledgeBaseHandler +from baserow_enterprise.assistant.tools.search_user_docs.handler import ( + KnowledgeBaseHandler, +) @pytest.fixture diff --git a/enterprise/web-frontend/modules/baserow_enterprise/components/dateDependency/DateDependencyMenuItem.vue b/enterprise/web-frontend/modules/baserow_enterprise/components/dateDependency/DateDependencyMenuItem.vue index b563462b27..c4ba42c660 100644 --- a/enterprise/web-frontend/modules/baserow_enterprise/components/dateDependency/DateDependencyMenuItem.vue +++ b/enterprise/web-frontend/modules/baserow_enterprise/components/dateDependency/DateDependencyMenuItem.vue @@ -1,6 +1,6 @@